reprepro-4.13.1/0000755000175100017510000000000012152655346010452 500000000000000reprepro-4.13.1/config.h.in0000644000175100017510000000751012152655344012416 00000000000000/* config.h.in. Generated from configure.ac by autoheader. */ /* Define if building universal (internal helper macro) */ #undef AC_APPLE_UNIVERSAL_BUILD /* Define to 1 if you have the `closefrom' function. */ #undef HAVE_CLOSEFROM /* Define to 1 if you have the `dprintf' function. */ #undef HAVE_DPRINTF /* Define to 1 if you have the header file. */ #undef HAVE_INTTYPES_H /* Defined if libarchive is available */ #undef HAVE_LIBARCHIVE /* Define to 1 if you have the `bz2' library (-lbz2). */ #undef HAVE_LIBBZ2 /* Define to 1 if you have the `gpgme' library (-lgpgme). */ #undef HAVE_LIBGPGME /* Define to 1 if you have the `gpg-error' library (-lgpg-error). */ #undef HAVE_LIBGPG_ERROR /* Define to 1 if you have the `z' library (-lz). */ #undef HAVE_LIBZ /* Define to 1 if you have the header file. */ #undef HAVE_MEMORY_H /* Define to 1 if you have the `mkostemp' function. */ #undef HAVE_MKOSTEMP /* Define to 1 if you have the `mkstemp' function. */ #undef HAVE_MKSTEMP /* Define to 1 if stdbool.h conforms to C99. */ #undef HAVE_STDBOOL_H /* Define to 1 if you have the header file. */ #undef HAVE_STDINT_H /* Define to 1 if you have the header file. */ #undef HAVE_STDLIB_H /* Define to 1 if you have the header file. */ #undef HAVE_STRINGS_H /* Define to 1 if you have the header file. */ #undef HAVE_STRING_H /* Define to 1 if you have the `strndup' function. */ #undef HAVE_STRNDUP /* Define to 1 if you have the header file. */ #undef HAVE_SYS_STAT_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_TYPES_H /* Define to 1 if you have the `tdestroy' function. */ #undef HAVE_TDESTROY /* Define to 1 if you have the header file. */ #undef HAVE_UNISTD_H /* Define to 1 if the system has the type `_Bool'. */ #undef HAVE__BOOL /* Name of package */ #undef PACKAGE /* Define to the address where bug reports for this package should be sent. */ #undef PACKAGE_BUGREPORT /* Define to the full name of this package. */ #undef PACKAGE_NAME /* Define to the full name and version of this package. */ #undef PACKAGE_STRING /* Define to the one symbol short name of this package. */ #undef PACKAGE_TARNAME /* Define to the home page for this package. */ #undef PACKAGE_URL /* Define to the version of this package. */ #undef PACKAGE_VERSION /* Define to 1 if you have the ANSI C header files. */ #undef STDC_HEADERS /* Enable extensions on AIX 3, Interix. */ #ifndef _ALL_SOURCE # undef _ALL_SOURCE #endif /* Enable GNU extensions on systems that have them. */ #ifndef _GNU_SOURCE # undef _GNU_SOURCE #endif /* Enable threading extensions on Solaris. */ #ifndef _POSIX_PTHREAD_SEMANTICS # undef _POSIX_PTHREAD_SEMANTICS #endif /* Enable extensions on HP NonStop. */ #ifndef _TANDEM_SOURCE # undef _TANDEM_SOURCE #endif /* Enable general extensions on Solaris. */ #ifndef __EXTENSIONS__ # undef __EXTENSIONS__ #endif /* Version number of package */ #undef VERSION /* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most significant byte first (like Motorola and SPARC, unlike Intel). */ #if defined AC_APPLE_UNIVERSAL_BUILD # if defined __BIG_ENDIAN__ # define WORDS_BIGENDIAN 1 # endif #else # ifndef WORDS_BIGENDIAN # undef WORDS_BIGENDIAN # endif #endif /* Enable large inode numbers on Mac OS X 10.5. */ #ifndef _DARWIN_USE_64_BIT_INODE # define _DARWIN_USE_64_BIT_INODE 1 #endif /* Number of bits in a file offset, on hosts where this is settable. */ #undef _FILE_OFFSET_BITS /* Define for large files, on AIX-style hosts. */ #undef _LARGE_FILES /* Define to 1 if on MINIX. */ #undef _MINIX /* Define to 2 if the system does not provide POSIX.1 features except with this defined. */ #undef _POSIX_1_SOURCE /* Define to 1 if you need to in order for `stat' and other things to work. */ #undef _POSIX_SOURCE reprepro-4.13.1/strlist.h0000644000175100017510000000377412152651661012256 00000000000000#ifndef REPREPRO_STRLIST_H #define REPREPRO_STRLIST_H #ifndef REPREPRO_ERROR_H #include "error.h" #warning "What's hapening here?" #endif #ifndef REPREPRO_GLOBALS_H #include "globals.h" #warning "What's hapening here?" #endif struct strlist { char **values; int count, size; }; void strlist_init(/*@out@*/struct strlist *); retvalue strlist_init_n(int /*startsize*/, /*@out@*/struct strlist *); retvalue strlist_init_singleton(/*@only@*/char *, /*@out@*/struct strlist *); void strlist_done(/*@special@*/struct strlist *strlist) /*@releases strlist->values @*/; /* add a string, will get property of the strlist and free'd by it */ retvalue strlist_add(struct strlist *, /*@only@*/char *); /* include a string at the beginning, otherwise like strlist_add */ retvalue strlist_include(struct strlist *, /*@only@*/char *); /* add a string alphabetically, discarding if already there. */ retvalue strlist_adduniq(struct strlist *, /*@only@*/char *); /* like strlist_add, but strdup it first */ retvalue strlist_add_dup(struct strlist *strlist, const char *todup); /* print a space separated list of elements */ retvalue strlist_fprint(FILE *, const struct strlist *); /* replace the contents of dest with those from orig, which get emptied */ void strlist_move(/*@out@*/struct strlist *dest, /*@special@*/struct strlist *orig) /*@releases orig->values @*/; bool strlist_in(const struct strlist *, const char *); int strlist_ofs(const struct strlist *, const char *); bool strlist_intersects(const struct strlist *, const struct strlist *); /* if missing != NULL And subset no subset of strlist, set *missing to the first missing one */ bool strlist_subset(const struct strlist *, const struct strlist * /*subset*/, const char ** /*missing_p*/); /* concatenate */ char *strlist_concat(const struct strlist *, const char * /*prefix*/, const char * /*infix*/, const char * /*suffix*/); /* remove all strings equal to the argument */ void strlist_remove(struct strlist *, const char *); #endif reprepro-4.13.1/globmatch.h0000644000175100017510000000020612152651661012475 00000000000000#ifndef REPREPRO_GLOBMATCH_H #define REPREPRO_GLOBMATCH_H bool globmatch(const char * /*string*/, const char */*pattern*/); #endif reprepro-4.13.1/sourceextraction.h0000644000175100017510000000132312152651661014137 00000000000000#ifndef REPREPRO_SOURCEEXTRACTION_H #define REPREPRO_SOURCEEXTRACTION_H struct sourceextraction; /*@NULL@*/struct sourceextraction *sourceextraction_init(/*@null@*/char **section_p, /*@null@*/char **priority_p); void sourceextraction_abort(/*@only@*/struct sourceextraction *); /* register a file part of this source */ void sourceextraction_setpart(struct sourceextraction *, int , const char *); /* return the next needed file */ bool sourceextraction_needs(struct sourceextraction *, /*@out@*/int *); /* full file name of requested files ready to analyse */ retvalue sourceextraction_analyse(struct sourceextraction *, const char *); retvalue sourceextraction_finish(/*@only@*/struct sourceextraction *); #endif reprepro-4.13.1/ignore.c0000644000175100017510000000471312152651661012022 00000000000000/* This file is part of "reprepro" * Copyright (C) 2005 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include "ignore.h" int ignored[IGN_COUNT]; bool ignore[IGN_COUNT]; enum config_option_owner owner_ignore[IGN_COUNT]; static const char * const ignores[] = { #define IGN(what) #what , VALID_IGNORES #undef IGN }; bool print_ignore_type_message(bool i, enum ignore what) { ignored[what]++; if (ignore[what]) fprintf(stderr, "%s as --ignore=%s given.\n", i ? "Ignoring" : "Not rejecting", ignores[what]); else fprintf(stderr, "To ignore use --ignore=%s.\n", ignores[what]); return ignore[what]; } static retvalue set(const char *given, size_t len, bool newvalue, enum config_option_owner newowner) { int i; //TODO: allow multiple values sperated by some sign here... for (i = 0 ; i < IGN_COUNT ; i++) { if (strncmp(given, ignores[i], len) == 0 && ignores[i][len] == '\0') { if (owner_ignore[i] <= newowner) { ignore[i] = newvalue; owner_ignore[i] = newowner; } break; } } if (i == IGN_COUNT) { char *str = strndup(given, len); if (IGNORING(ignore, "Unknown --ignore value: '%s'!\n", (str!=NULL)?str:given)) { free(str); return RET_NOTHING; } else { free(str); return RET_ERROR; } } else return RET_OK; } retvalue set_ignore(const char *given, bool newvalue, enum config_option_owner newowner) { const char *g, *p; retvalue r; assert (given != NULL); g = given; while (true) { p = g; while (*p != '\0' && *p != ',') p++; if (p == g) { fprintf(stderr, "Empty ignore option in --ignore='%s'!\n", given); return RET_ERROR_MISSING; } r = set(g, p - g, newvalue, newowner); if (RET_WAS_ERROR(r)) return r; if (*p == '\0') return RET_OK; g = p+1; } } reprepro-4.13.1/reference.c0000644000175100017510000001274112152651661012475 00000000000000/* This file is part of "reprepro" * Copyright (C) 2003,2004,2005,2007 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include "error.h" #include "strlist.h" #include "names.h" #include "dirs.h" #include "database_p.h" #include "pool.h" #include "reference.h" retvalue references_isused( const char *what) { return table_gettemprecord(rdb_references, what, NULL, NULL); } retvalue references_check(const char *referee, const struct strlist *filekeys) { int i; retvalue result, r; result = RET_NOTHING; for (i = 0 ; i < filekeys->count ; i++) { r = table_checkrecord(rdb_references, filekeys->values[i], referee); if (r == RET_NOTHING) { fprintf(stderr, "Missing reference to '%s' by '%s'\n", filekeys->values[i], referee); r = RET_ERROR; } RET_UPDATE(result, r); } return result; } /* add an reference to a file for an identifier. multiple calls */ retvalue references_increment(const char *needed, const char *neededby) { retvalue r; r = table_addrecord(rdb_references, needed, neededby, strlen(neededby), false); if (RET_IS_OK(r) && verbose > 8) printf("Adding reference to '%s' by '%s'\n", needed, neededby); return r; } /* remove reference for a file from a given reference */ retvalue references_decrement(const char *needed, const char *neededby) { retvalue r; r = table_removerecord(rdb_references, needed, neededby); if (r == RET_NOTHING) return r; if (RET_WAS_ERROR(r)) { fprintf(stderr, "Error while trying to removing reference to '%s' by '%s'\n", needed, neededby); return r; } if (verbose > 8) fprintf(stderr, "Removed reference to '%s' by '%s'\n", needed, neededby); if (RET_IS_OK(r)) { retvalue r2; r2 = pool_dereferenced(needed); RET_UPDATE(r, r2); } return r; } /* Add an reference by for the given , * excluding , if it is nonNULL. */ retvalue references_insert(const char *identifier, const struct strlist *files, const struct strlist *exclude) { retvalue result, r; int i; result = RET_NOTHING; for (i = 0 ; i < files->count ; i++) { const char *filename = files->values[i]; if (exclude == NULL || !strlist_in(exclude, filename)) { r = references_increment(filename, identifier); RET_UPDATE(result, r); } } return result; } /* add possible already existing references */ retvalue references_add(const char *identifier, const struct strlist *files) { int i; retvalue r; for (i = 0 ; i < files->count ; i++) { const char *filekey = files->values[i]; r = table_addrecord(rdb_references, filekey, identifier, strlen(identifier), true); if (RET_WAS_ERROR(r)) return r; } return RET_OK; } /* Remove reference by for the given , * excluding , if it is nonNULL. */ retvalue references_delete(const char *identifier, struct strlist *files, const struct strlist *exclude) { retvalue result, r; int i; assert (files != NULL); result = RET_NOTHING; for (i = 0 ; i < files->count ; i++) { const char *filekey = files->values[i]; if (exclude == NULL || !strlist_in(exclude, filekey)) { r = references_decrement(filekey, identifier); RET_UPDATE(result, r); } } return result; } /* remove all references from a given identifier */ retvalue references_remove(const char *neededby) { struct cursor *cursor; retvalue result, r; const char *found_to, *found_by; size_t datalen, l; r = table_newglobalcursor(rdb_references, &cursor); if (!RET_IS_OK(r)) return r; l = strlen(neededby); result = RET_NOTHING; while (cursor_nexttempdata(rdb_references, cursor, &found_to, &found_by, &datalen)) { if (datalen >= l && strncmp(found_by, neededby, l) == 0 && (found_by[l] == '\0' || found_by[l] == ' ')) { if (verbose > 8) fprintf(stderr, "Removing reference to '%s' by '%s'\n", found_to, neededby); r = cursor_delete(rdb_references, cursor, found_to, NULL); RET_UPDATE(result, r); if (RET_IS_OK(r)) { r = pool_dereferenced(found_to); RET_ENDUPDATE(result, r); } } } r = cursor_close(rdb_references, cursor); RET_ENDUPDATE(result, r); return result; } /* dump all references to stdout */ retvalue references_dump(void) { struct cursor *cursor; retvalue result, r; const char *found_to, *found_by; r = table_newglobalcursor(rdb_references, &cursor); if (!RET_IS_OK(r)) return r; result = RET_OK; while (cursor_nexttemp(rdb_references, cursor, &found_to, &found_by)) { if (fputs(found_by, stdout) == EOF || putchar(' ') == EOF || puts(found_to) == EOF) { result = RET_ERROR; break; } result = RET_OK; if (interrupted()) { result = RET_ERROR_INTERRUPTED; break; } } r = cursor_close(rdb_references, cursor); RET_ENDUPDATE(result, r); return result; } reprepro-4.13.1/NEWS0000644000175100017510000007025712152655314011077 00000000000000Updates between 4.13.0 and 4.13.1: - fix bug in restore - fix percomponent udeb Contents filenames - add support for sources listing architecture wildcards to build-needing Updates between 4.12.5 and 4.13.0: - new commands: deleteifunreferenced repairdescriptions lsbycomponent - add ${$basename}, ${$filekey} and ${$fullfilename} to --listformat - reject absurd large values in ValidFor header - add --endhook, --outhook - SignWith: can now also contain external scripts for signing - several small cleanups and fixes Updates between 4.12.4 and 4.12.5: - various documentation improvements - fix bitrot in non-libarchive code Updates between 4.12.3 and 4.12.4: - fix bug when only generating .bz2 indices - ignore diff comments about unterminated lines when parsing .diff files Updates between 4.12.2 and 4.12.3: - actually set REPREPRO_CONFIG_DIR in hooks as documented in manpage - support 103 redirect message from apt's http method. (works best with apt >= 0.9.4) Updates between 4.12.1 and 4.12.2: - fix error with uploader files with more than 16 group members Updates between 4.12.0 and 4.12.1: - fix bash and zsh completion to work with config directories - add experimental -A, -C, -T support to the pull/update family of commands Updates between 4.11.0 and 4.12.0: - get InRelease from remote repositories (to disable use new GetInRelease: no) - always put Package field first in indices - support getting packages from remote repositories without md5sums. Updates between 4.10.0 and 4.11.0: - Contents files default location is now "percomponent compatsymlink". - unify handling of "unknown" section. Updates between 4.9.0 and 4.10.0: - allow "!include:" in conf/{distributions,updates,pulls,incoming} - conf/{distributions,updates,pulls,incoming} can be directories - add FilterList keyword 'supersede' to remove if upstream has newer pkgs - improve changelogs.example (CHANGELOGDIR empty stored directly in pool/) Updates between 4.8.2 and 4.9.0: - build-needing now allows to look for things for 'all' and 'any' - improve error messages when parsing config files - uploader files now can 'include' other files. Updates between 4.8.1 and 4.8.2: - rredtool: produce .diff/Index files that reprepro can understand. - warn if uploader files contains key ids too long to handle - make .diff/Index parsing errors non-fatal Updates between 4.8.0 and 4.8.1: - fix NULL-dereference with broken Packages.diff/Index files Updates between 4.7.0 and 4.8.0: - add compatsymlink nocompatsymlink Contents: options (and document that the default will change in the future) Updates between 4.6.1 and 4.7.0: - add 'redochecksums' command - add percomponent and allcomponents to Contents: flags Updates between 4.6.0 and 4.6.1: - fix message given when replacing a package with the same version - fix bug not deleting packages if none added in update Updates between 4.5.1 and 4.6.0: - add 'FilterSrcList' for update and pull - ignore leading comments in control files Updates between 4.5.0 and 4.5.1: - 'check' also checks if architectures match - buffix in 'sourcemissing', 'unusedsources' and 'reportcruft' without tracking - fix 'pull' copying packages with wrong architecture - compile with libdb5 Updates between 4.4.0 and 4.5.0: - support reading of Release files without MD5Sum - add all missing Checksums-* when importing from remote repositories - add 'reportcruft' command Updates between 4.3.0 and 4.4.0: - SignWith allows multiple arguments to denote multiple keys to sign wtih - add removesrcs command (like removesrc can you can remove more at once) - uploaders files can have groups of uploaders and depend on the distribution to upload to (to share uploaders file between distributions) - add 'sizes' command. Updates between 4.2.0 and 4.3.0: - add special "$Delete" override field to get rid of fields - add support for ButAtuomaticUpgrades - add 'unusedsources' and 'sourcemissing' commands - add support for lzip compressed files - bugfixes ($component overrides, compiling without libbz2, with gcc4.5) Updates between 4.1.1 and 4.2.0: - allow patterns in override files - apply override files when doing 'pull' and 'update' - add special '$Component' override field - create InRelease file additionally to Release.gpg Updates between 4.1.1 and 4.1.2: - fix parsing of .orig-*.tar.* lines in .changes files, especially do not choke on _ characters. - add --onlysmalldeletes option to limit scope of update and pull Updates between 4.1.0 and 4.1.1: - fix calling --changes Log:-notifiers from processincoming - add '${$source}' and '${$sourceversion}' to --list-format Updates between 4.0.2 and 4.1.0: - rredtool can be used as index hook to maintain a .diff/Index file. - properly handle relative LogDir in conf/incoming - add ByHandHooks to conf/distributions (only used by processincoming yet) - fix extraction of exactly one of section or priority from a tar file. - new byhand statement for uploaders files and ByHandHook for configuration Updates between 4.0.1 and 4.0.2: - add support for xz de-compression - fix regression (since 3.8.0) breaking arch1>arch2 update rules. - some small warning output fixes in update code Updates between 4.0.0 and 4.0.1: - strip the last '/' from Method and Fallback in conf/updates to work around problems in some apt methods. (to get old behaviour, use "//") - 'check' now warns if a file was missing but could be readded - much more permissive check for libdb. You are on your own now to check what version to build against. Updates between 3.12.1 and 4.0.0: - disable old files.db handling (remove all support but translatelegacyfilelists), remove --oldfilesdb options - remove --overridedir - bugfixes in documentation and bash/zsh completion Updates between 3.12.0 and 3.12.1: - fix problems with libgpgme 1.2.0 Updates between 3.11.1 and 3.12.0: - warn if directories are relative to the currect directory but do not start with './' - directories starting '+b/' '+o/' and '+c/' are relative to basedir, outdir or confdir. - FakeComponentPrefix now no longer adds its arguments to components already having it and shortens their dist directories to not duplicate that either. - -A, -C and -T can have multiple arguments now, separated with '|'. - new 'flood' action to align architecture all packages - new '--show-percent' option - warn if old legacy files.db is still used - add new translatelegacyfilelists command for easier migration. (just a collectnewchecksums and deleting that file was enough, though) Updates between 3.11.0 and 3.11.1: - new changestool option --create-with-all-fields - new --morguedir option (or morguedir in conf/options, of course) - new $Version, $Source, $SourceVersion et al on formulas - bugfixes Updates between 3.10.0 and 3.11.0: - new --list-max and --list-skip options - new glob-matching in formulas (e.g. "reprepro listfilter codename 'Package (% linux-image-*)'") - new listmatched, removematched, copymatched and restorematched - new build-needing command to list source packages likely to need a build for a given architecture. - pull, predelete and update call retrack on distributions with tracking enabled. Updates between 3.9.2 and 3.10.0: - fix bug of ListHook not used if in the From: rule of a rule. - add ListShelllHook - add _listdbidentifers and _listconfidentifiers - add --list-format to change format of list and listfilter + rewrite Release.gpg verification code: - more hops needed to use expired or revoked keys - earlier check of keys. now all keys in VerifyRelease must be known to gpg - subkeys are accepted if the key-id is appended with '+'. * improve uploader lists: - subkeys are accepted if key-if is appended with '+' - new 'anybody' while 'unsigned' now means really unsigned - new conditions to look as sourcename, binary names and sections Updates between 3.9.1 and 3.9.2: + fix bug (catched by assertion if there is no old index file) that inverts the logic of downloading .diff files when there is no DownLoadListsAs line. Updates between 3.9.0 and 3.9.1: + fix error of misinterpreting newer libz return value when extracting section from a .dsc. Updates between 3.8.2 and 3.9.0: + deprecate old (pre 3.3) file database format. Warn loudly when the database is still using that format. + new features - support Sources/Package.diff downloading (Use DownloadListsAs if you want to force .gz downloading instead) - support falling back to other compressions of index files when not downloadable at first. - changestool can now also look in .lzma files for .dsc section/priority - delete .new files in dists/ on error unless --keeptemporaries - new 'warning' state for FilterList - set REPREPRO_FROM and REPREPRO_CAUSING_RULE in some log notifiers + bug fixes: - reenable workaround for apt-methods having problem with existing files which got lost in 3.8 - fix bug not looking at DownloadListsAs in all cases - bugfix in misparsing some .diff files for section/priority retrieval - do not stop when incomplete downloads or other stray files are in the pool Updates between 3.8.1 and 3.8.2: - add ReadOnly option for conf/distributions - support byhand and raw-* files in include and processincoming - allow uploading log files with .changes files - new LogDir in conf/incoming to store changes and log files. Updates between 3.8.0 and 3.8.1: - make filtercopy work again - fix bug not allowing source packages from flat repositories without Directory fields to be imported - add gnupghome option to make GNUPGHOME setable via conf/options Updates between 3.8.0~alpha and 3.8.0: - add support for generating Valid-Until fields in Release files Updates between 3.6.3 and 3.8.0~alpha: + different small improvements: - log notifiers can be limited to a specific command with --via - upgradeonly value for FilterList to only include a package if an older one is already there. - new --keepunusednewfiles to keep files just added to the pool but later in the same run decided to no longer be needed (for example because a package was not added because of later detected errors). - --keepunreferenced and actions implying this now print the number of files that lost their last reference - new dumpupdate and dumppull actions that are like checkupdate and checkpull put with output easier parseable - new ls action to list a package in all distributions + bugfixes - if FilterFormula excludes a package, FilterList can no longer put a package on hold. + improved decompression support - support looking into lzma compressed .deb, .diff and .tar files. - support for external helpers for uncompression (to speed up uncompression on multiple processors, also reprepro can now be compiled without libbz2 and zlib if needed) - support for downloading and using bz2 and lzma index files in updates + major changes to index file retrieval on updates: - iteratedupdate action was removed - update-rules can inherit settings from others - ListHooks are now called once per usage (mostly only makes a difference for flat upstream repositories) - --nolistsdownload no longer includes --noskipold and checks checksums of the lists files. - format of lists/ directory contents changed (I doubt anyone cares for the files in there, but if you do, you have been informed hereby that it looks differently) - lists/ directory no longer auto-cleaned, thus --(no)keepuneeded longer exists and new action cleanlists to clean files no longer usable... + visible effects of internal refactorisations: - multiple checks for identifiers more strict now - some fields in conf/distributions need a specific order now (Architectures and Components before things using the values defined by those) Updates between 3.6.2 and 3.6.3: - fix sha256 generation of very large files, thanks to Max Bowsher - allow multiple export hooks at once - use libgpg-error directly (to avoid some warnings in dependency analysis) Updates between 3.6.1 and 3.6.2: - --nooldfilesdb is the default now, create new repositories with --oldfilesdb if you do not want to destroy them by accidentially running reprepro versions before 3.0.0 on them... - fix content reading of overlong .deb files - fix parsing of flat repositories without Directory in Sources - fix tracking database corruption in removesrc with outdated tracking data [previously believed hard to trigger, but outdated tracking data suffices] - many improvements and less spelling errors in manpage Updates between 3.6.0 and 3.6.1: - fix reoverride - fix bz2 compression (newer libbz2 sometimes uses more return codes than previous versions, triggering a bug in reprepro) Updates between 3.5.2 and 3.6.0: - add IgnoreHashes option - allow list to list all packages if not package name specified. - support retrieving packages from flat repositories - speed up updating by buffering zlib's reading of index files - remove iteratedupdate - multiple little but nasty bugs fixed Updates between 3.5.1 and 3.5.2: - fix bug in optionsfilename generation introduced in 3.5.1 - add FakeComponentPrefix to cope with apt's problems with / in distribution names. Updates between 3.5.0 and 3.5.1: - support upcoming version 3 format source packages (priority and section extraction only for wig&pen and quilt format) - set environment variables REPREPRO_*_DIR when calling hooks. (note that those are set to the last set values, so for example REPREPRO_CONF_DIR will be the directory with 'distributions' in it, not necessarily the one with 'options' in it that was parsed). - other minor bugfixes Updates between 3.4.2 and 3.5.0: - allow suite names as command line arguments (when there is not codename of this name and only one distribution has this suite name) - generate and check Sha256, too. - changestool puts Files: last in .changes files so etch's dupload works. Updates between 3.4.1 and 3.4.2: now really fix the nasty bug with notifiers 3.4.1 should have fixed and be more verbose when rejecting packages because of problems with a key Updates between 3.4.0 and 3.4.1: bugfixes only (though of the ugly segfaults kind) Updates between 3.3.2 and 3.4.0: + bugfixes: - no longer mix up -S and -P command line arguments (introduced in 3.0.1) - some field overriding was erroneously case dependent. - many spelling corrections + improvements: - more support for Checksums-Sha1 - add copysrc and copyfilter commands (improve copy w.r.t tracking) - add restore restoresrc restorefilter and _addpackage commands - warn about some impossible -A -T combinations. - set fake Suite: in snapshots to quiet apt's signature checks. - add REPREPRO_CAUSING_FILE environment variable in log notifiers. - update expected fields to new dpkg-dev - try to extract missing section and priority of .dsc files from .diff.gz and .tar.gz. Updates between 3.3.1 and 3.3.2: - bugfix in includedeb and a little bit code cleanup Updates between 3.3.0 and 3.3.1: - multiple bugfixes Updates between 3.1.0 and 3.3.0: - add support for different checksums. The new checksums.db which stores all the checksums, while files.db still only stores md5sum and is the canonical information, when it exists. This way repositories keep backward compatible. A repository generated with --nooldfilesdb only has checksums.db and will not work with reprepro version prior to 3.3. New command collectnewchecksums to calculate checksums missing in the database. Updates between 3.0.1 and 3.1.0: - add sha1 hashes to the generated Release files. the changes semantics needed in the release.caches.db file for this should be transient. This will only cause index files without uncompressed variants to be regenerated once upon upgrade, but switching back and forth between previous versions and this or later versions will cause regenerating of unchanged files. - internal changes of reading of text files (.dsc/.changes/Release/ control from .deb). Should not make any difference with normal input, and make the situation better with strange input. - source packages now can have .tar und .diff lzma compressed (still missing is support for lzma compressed binary packages) Updates between 3.0.0 and 3.0.1: - the default for --export is now "changed", as the old default was just too confusing most of the time. - translatefilelist know also can convert databases with old and new style entries Updates between 2.2.4 and 3.0.0: - new config file parser: * many error messages now with line numbers * native support of comments (i.e. lines starting with # are now ignored, instead of treated as ignored headers, # within lines is now comment, too) * better support of tabs * meaning of empty fields changed, empty now means nothing and not all. - always parse the whole distributions file first before doing anything else (avoids actions started in the wrong base directory and helps to catch more disambiguities, may lead to the need of a valid config file for some actions not needing one, though). - check pull and update rules to not list any architectures or components that will never be used, so typos won't go unnoticed. - obsolete --overridedir and searching files in overrides/ directory by default. This places are still search, but so is the configuration directory now and future version will stop accepting --overridedir and not search in that directory. - added db/version file to document database format (so future versions can warn about incompatibilities) - cleaned up tracking handling a bit: * retrack no longer created tracking data for distribtions without tracking * retrack only recreates usage data, not all data (so .changes files and old versions are no longer lost when run) also references from tracking data are now refreshed by rereferences instead * removealltracks now needs explicitly needs distribution names * tidytracks now removes all tracking data from a distribution without tracking * clearvanished removes tracking data from vanished distributions. - make update's ListHook relative to confdir (unless absolute) - added removesrc and removefilter - new format for contents.cache.db. Only needs half of the disk space and runtime to generate Contents files, but you need to run translatefilelists to translate the cached items (or delete your contents.cache.db and let reprepro reread all your .deb files). Also format and meaning of the Contents-fields changed, a rate no longer can be specified. Updates between 2.2.3 and 2.2.4: - [SECURITY] fix bug causing a Release.gpg with only unknown signatures considered as properly signed. Updates between 2.2.2 and 2.2.3: - add support for binNMUs (i.e. .changes files having a Version: that is not the source version). - add zsh auto-completions script Updates between 2.2.1 and 2.2.2: - processincoming can be limited to a single .changes file - fix to support apt-methods stating Send-Config: false - set GPG_TTY when stdin is a terminal to ease usage of pinentry-curses Updates between 2.2.0 and 2.2.1: - fix mixup of the name of the --spacecheck option - fix missing options in bash completions - fix segfault when including changes without notificators Updates between 2.1.0 and 2.2.0: - renamed cleartracks in removealltracks - new notifier type for accepted changes files - bugs fixed: * not tidy tracking dependencies on package remove * forgot to call some slow notifiers in processincoming - new --wait-for-lock option - check free space on update (new --spaceheck option to switch this off) - extended the changestool helper (add, adddsc, addrawfile, setdistribution) - processincoming changes: * reports and error if a package is not included due to an already existing newer version. * allow ignoring of unused files and newer versions (Permit:) * option when to delete rejected or faulty package (Cleanup:) - include command names incldued .changes files like processincoming does Updates between 2.0.0 and 2.1.0: - add --silent option - change some status output to stdout instead of stderr. - fix some uncessary exporting of index files - fix bug in term parsing (for FilterFormula and the like) - add Log: mechanism to log to file and execute external helpers - example-script to generate a packages.debian.org/changelogs like hierachy with changelog and copyright files. Updates between 1.3.1 and 2.0.0: - add "adddeb" action to changestool - fix bug in manpage ("accept" should have been "allow" for uploaders) - new AlsoAcceptFor:-header for conf/distributions to allow more fine controled which distributions to allow than just codename/suite or everything (via --ignore=wrongdistribution) - fail cleanly when getting a .dsc without Format header - fix bug in non-libarchive filelist extraction on large lists - add processincoming command to scan an incoming directory and add packages from there. (this needed some refactorisations of other code, so beware) - add gensnapshot command Updates between 1.3.0 and 1.3.1: - bugfix in changestool updatechecksums Updates between 1.2.0 and 1.3.0: - now uses libgpgme11 instead of libgpgme6. - remove --onlyacceptsigned switch (soon to be be replaced by something useable, hopefully) - only reject a package because of signatures if it only has bad signatures and no good one. (Rejecting a package because of a missing key when it would have processed without signature did not really make sense) - new --ignore=brokensignatures to also accept packages with broken signatures without any valid signature. - Now looks at the Binary: and Version: fields of a .changes file. Unless the new --ignore=wrongversion is specified, a dsc must have the same version, and a .deb must have this source version unless --ignore=wrongsourceversion is given. A .deb must also contain a package listed in the Binary: header unless --ignore=surprisingbinary is given. (A .dsc with an other name or a .deb with an other Source than the Source-header if the .changes file is still not ignoreable due to file naming issues) - FilterList in update and pull rules now has a space separated list of filenames instead of only a single filename. - new Uploaders field in conf/distributions: allows to specify what a .changes file has to be signed with to be allowed in - new helper program "changestool" to preprocess .changes files. Updates between 1.1.0 and 1.2.0: - improve message of missing files - checkin now support .tar.bz2, .diff.bz2 and .tar.bz2 (checkindsc did not care, binaries may contain tar.bz2 if reprepro is compiled with libarchive and libbz2) - fix bug delaying full Contents- generation Updates between 1.0.2 and 1.1.0: - extended the (experimental) package tracking feature - cleartracks removes files losing their last reference (unless --keepunreferenced as usual) - fix bug of not generating a uncompressed Sources line in Release when no uncompressed Sources file is generated. Updates between 1.0.1 and 1.0.2: - fix segfault in non-libarchive code introduced with 1.0.0 Updates between 1.0.0 and 1.0.1: - add clearvanished command - cope with GNU ar style .deb files (when using libarchive) - cope with strange control.tar.gz files (when not using libarchive) Updates between 0.9.1 and 1.0.0: - reject some .changes earlier, delete added files when checks after copying files to the pool failed. - handle some signals (TERM, ABRT, INT and QUIT) a bit more gracefully - some little fixes in the documentation - add predelete action to delete packages that would be deleted or replaced in an update - add new copy command to copy a single package from one distribution to another. Updates between 0.9.0 and 0.9.1: - fix bug in post-export script handling. - fixed documentation in tiffany.example how to generate .diff directories the new apt can read. Updates between 0.8.2 and 0.9.0: - added --export= option and harmonized exporting of distributions. (Now every distribution processed without errors is exported by default, with options for always, never or only export it when changed) - added pull and checkpull actions. Those are roughly equivalent to upgrade rules with file:/path/to/basedir Method, but faster and a bit more limited (files cannot change components) - fix segfault of checkupdate - fix including a changes file with source and restricting to some binary distribution or to binary package type. - add support to use libarchive instead of calling ar and tar - added Contents file generation support - now supporting libdb-4.4, libdb-4.3 and libdb3 Updates between 0.8.1 and 0.8.2: - mark process list files and only skip those not marked as processed instead those not newly downloaded. - change the wording of some warnings, add some new - new WORKAROUND part in the manpage - add example bash_completion script Updates between 0.8 and 0.8.1: - some bugfixes (segfault, memmory leak, manpage typos) - enforcement of extensions of include{,dsc,deb,udeb} files to .changes,.dsc,.deb,.udeb and new --ignore=extension to circumvent it. - support generation of the NotAutomatic field. - added --ignore=missingfile to ignore files missing in a .changes file, but lying around and requested by a .dsc file. Updates between 0.7 and 0.8: - unless the new --keepdirectories option is given, try to remove pool/ directories that got empty by removing things from them. (To be exact, try to rmdir(2) them every time, which will only work if they are empty). - Unless the new --noskipold is used, only targets with newly downloaded index files are updated. (new = downloaded by the instance of reprepro currently running) - reprepro now always puts the checksums of the uncompressed index files into the Release file, even if it is not written to disk. This fixes some problems with newer versions of apt. (Take a look at DscIndices to get older versions of reprepro to please them, too). - The export hooks (the programs specified as DebIndices, UDebIndices and DscIndices) are now always called once with the uncompressed names. - to compile reprepro with woody without a backported zlib use the -DOLDZLIB switch. - reprepro now supports bzip2 output natively. (You can still use the example if you want to call bzip2 yourself instead of using the libbz2 library) - new db/release.cache.db file storing md5sums of written index and Release files there. (This can cause Release file give old md5sums when the files are not what it expects, but unless you manually changed them that is a good way to find errors, and manually changing if fragile anyway, so better do not do it but ask me if some feature is missing overrides cannot offer yet). Updates between 0.6 and 0.7: - new --ignore=missingfield,brokenold,brokenversioncmp, unusedarch,surpisingarch - Fix segfault when update file is empty. (Thanks to Gianluigi Tiesi for noticing this.) - improve manpage a little bit - many little tidy ups Updates between 0.5 and 0.6: - no longer set execute bit of generated Release.gpg files - use REPREPRO_BASE_DIR for default basedir, parse conf/options for further default options. (and add --no options to disable boolean options again, same for ignore) - new command createsymlinks (for symlinks like "stable"->"sarge") - parse FilterList default action correctly - putting .changes in a distribution not listed is now an error without --ignore=wrongdistribution (and without "ignore wrongdistributions" in conf/options) Updates between 0.4 and 0.5: - starts of source package tracking - add quick&dirty --ask-passphrase option - SignWith's argument is now used, use "yes" or "default" to get old behaviour - allow ~ in versions listed in .changes files Updates between 0.3 and 0.4: - minor bugfix: no longer readd existing packages, when after a delete rule a old package was found first. - adopt short-howto to changes in keywords. - many tidy ups and little bugfixes - add Fallback option to specify another host to get mirrored files from - default basedir is now "." i.e. the current directory. Updates between 0.2 and 0.3: - Override: SourceOverride: replaced by (Deb|UDeb|Dsc)Override - new command reoverride to reapply override information. - sometimes be a bit more verbose - new experimental iteratedupdate command , which is a variant of update but needs less memory. - to ignore Release signature failures two --force's are needed now. Updates between 0.1.1 and 0.2: - _md5sums command got removed. New command to dump the contents of the files database is _listmd5sums - --basedir (alias -b) will no longer override prior given values to --confdir, --listdir, .... - fix nasty overflow bug - write Release, Packages, and Sources files first to .new variants and move then all at once. - new Options DebIndices DscIndices UDebIndices reprepro-4.13.1/Makefile.am0000644000175100017510000000635712152651661012435 00000000000000SUBDIRS = docs tests EXTRA_DIST = autogen.sh bin_PROGRAMS = reprepro changestool rredtool if HAVE_LIBARCHIVE ARCHIVE_USED = ar.c debfile.c ARCHIVE_CONTENTS = debfilecontents.c ARCHIVE_UNUSED = extractcontrol.c else ARCHIVE_USED = extractcontrol.c ARCHIVE_CONTENTS = ARCHIVE_UNUSED = ar.c debfile.c debfilecontents.c endif AM_CPPFLAGS = $(ARCHIVECPP) $(DBCPPFLAGS) reprepro_LDADD = $(ARCHIVELIBS) $(DBLIBS) changestool_LDADD = $(ARCHIVELIBS) reprepro_SOURCES = outhook.c descriptions.c sizes.c sourcecheck.c byhandhook.c archallflood.c needbuild.c globmatch.c printlistformat.c diffindex.c rredpatch.c pool.c atoms.c uncompression.c remoterepository.c indexfile.c copypackages.c sourceextraction.c checksums.c readtextfile.c filecntl.c sha1.c sha256.c configparser.c database.c freespace.c hooks.c log.c changes.c incoming.c uploaderslist.c guesscomponent.c files.c md5.c dirs.c chunks.c reference.c binaries.c sources.c checks.c names.c dpkgversions.c release.c mprintf.c updates.c strlist.c signature_check.c signedfile.c signature.c distribution.c checkindeb.c checkindsc.c checkin.c upgradelist.c target.c aptmethod.c downloadcache.c main.c override.c terms.c termdecide.c ignore.c filterlist.c exports.c tracking.c optionsfile.c donefile.c pull.c contents.c filelist.c $(ARCHIVE_USED) $(ARCHIVE_CONTENTS) EXTRA_reprepro_SOURCE = $(ARCHIVE_UNUSED) changestool_SOURCES = uncompression.c sourceextraction.c readtextfile.c filecntl.c tool.c chunkedit.c strlist.c checksums.c sha1.c sha256.c md5.c mprintf.c chunks.c signature.c dirs.c names.c $(ARCHIVE_USED) rredtool_SOURCES = rredtool.c rredpatch.c mprintf.c filecntl.c sha1.c noinst_HEADERS = outhook.h descriptions.h sizes.h sourcecheck.h byhandhook.h archallflood.h needbuild.h globmatch.h printlistformat.h pool.h atoms.h uncompression.h remoterepository.h copypackages.h sourceextraction.h checksums.h readtextfile.h filecntl.h sha1.h sha256.h configparser.h database_p.h database.h freespace.h hooks.h log.h changes.h incoming.h guesscomponent.h md5.h dirs.h files.h chunks.h reference.h binaries.h sources.h checks.h names.h release.h error.h mprintf.h updates.h strlist.h signature.h signature_p.h distribution.h debfile.h checkindeb.h checkindsc.h upgradelist.h target.h aptmethod.h downloadcache.h override.h terms.h termdecide.h ignore.h filterlist.h dpkgversions.h checkin.h exports.h globals.h tracking.h trackingt.h optionsfile.h donefile.h pull.h ar.h filelist.h contents.h chunkedit.h uploaderslist.h indexfile.h rredpatch.h diffindex.h MAINTAINERCLEANFILES = $(srcdir)/Makefile.in $(srcdir)/configure $(srcdir)/stamp-h.in $(srcdir)/aclocal.m4 $(srcdir)/config.h.in clean-local: -rm -rf autom4te.cache $(srcdir)/autom4te.cache maintainer-clean-local: -rm -rf $(srcdir)/ac # Some things for my private laziness strictbooleancheck: /home/brl/gcc/b/gcc/cc1 -DHAVE_CONFIG_H -I/home/brl/gcc/b/gcc/include -I/usr/include -I. -Wall -DAVOID_CHECKPROBLEMS=1 -g -W -O2 *.c SPLINT=splint SPLITFLAGSFORVIM= -linelen 10000 -locindentspaces 0 SPLINTFLAGS= +posixlib -booltype bool -numabstractcast -fixedformalarray -enumint +enumindex +charint $(SPLITFLAGSFORVIM) $(EXTRASPLINTFLAGS) splint: $(SPLINT) -DSPLINT=1 $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) -D_GNU_SOURCE=1 $(SPLINTFLAGS) $(foreach file,$(reprepro_SOURCES),$(srcdir)/$(file)) reprepro-4.13.1/release.c0000644000175100017510000013470112152655314012157 00000000000000/* This file is part of "reprepro" * Copyright (C) 2003,2004,2005,2007,2009,2012 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_LIBBZ2 #include #endif #define CHECKSUMS_CONTEXT visible #include "error.h" #include "ignore.h" #include "mprintf.h" #include "strlist.h" #include "filecntl.h" #include "chunks.h" #include "checksums.h" #include "dirs.h" #include "names.h" #include "signature.h" #include "distribution.h" #include "outhook.h" #include "release.h" #define INPUT_BUFFER_SIZE 1024 #define GZBUFSIZE 40960 #define BZBUFSIZE 40960 struct release { /* The base-directory of the distribution we are exporting */ char *dirofdist; /* anything new yet added */ bool new; /* NULL if no snapshot */ /*@null@*/char *snapshotname; /* specific overrides for fakeprefixes or snapshots: */ /*@null@*/char *fakesuite; /*@null@*/char *fakecodename; /*@null@*/const char *fakecomponentprefix; size_t fakecomponentprefixlen; /* the files yet for the list */ struct release_entry { struct release_entry *next; char *relativefilename; struct checksums *checksums; char *fullfinalfilename; char *fulltemporaryfilename; char *symlinktarget; /* name chks NULL NULL NULL: add old filename or virtual file * name chks file file NULL: rename new file and publish * name NULL file file NULL: rename new file * name NULL file NULL NULL: delete if done * name NULL file NULL file: create symlink */ } *files; /* the Release file in preperation * (only valid between _prepare and _finish) */ struct signedfile *signedfile; /* the cache database for old files */ struct table *cachedb; }; static void release_freeentry(struct release_entry *e) { free(e->relativefilename); checksums_free(e->checksums); free(e->fullfinalfilename); if (!global.keeptemporaries && e->fulltemporaryfilename != NULL) (void)unlink(e->fulltemporaryfilename); free(e->fulltemporaryfilename); free(e->symlinktarget); free(e); } void release_free(struct release *release) { struct release_entry *e; free(release->snapshotname); free(release->dirofdist); free(release->fakesuite); free(release->fakecodename); while ((e = release->files) != NULL) { release->files = e->next; release_freeentry(e); } if (release->signedfile != NULL) signedfile_free(release->signedfile); if (release->cachedb != NULL) { table_close(release->cachedb); } free(release); } const char *release_dirofdist(struct release *release) { return release->dirofdist; } static retvalue newreleaseentry(struct release *release, /*@only@*/ char *relativefilename, /*@only@*/ struct checksums *checksums, /*@only@*/ /*@null@*/ char *fullfinalfilename, /*@only@*/ /*@null@*/ char *fulltemporaryfilename, /*@only@*/ /*@null@*/ char *symlinktarget) { struct release_entry *n, *p; /* everything has a relative name */ assert (relativefilename != NULL); /* it's either something to do or to publish */ assert (fullfinalfilename != NULL || checksums != NULL); /* if there is something temporary, it has a final place */ assert (fulltemporaryfilename == NULL || fullfinalfilename != NULL); /* a symlink cannot be published (Yet?) */ assert (symlinktarget == NULL || checksums == NULL); /* cannot place a file and a symlink */ assert (symlinktarget == NULL || fulltemporaryfilename == NULL); /* something to publish cannot be a file deletion */ assert (checksums == NULL || fullfinalfilename == NULL || fulltemporaryfilename != NULL || symlinktarget != NULL); n = NEW(struct release_entry); if (FAILEDTOALLOC(n)) { checksums_free(checksums); free(fullfinalfilename); free(fulltemporaryfilename); free(symlinktarget); return RET_ERROR_OOM; } n->next = NULL; n->relativefilename = relativefilename; n->checksums = checksums; n->fullfinalfilename = fullfinalfilename; n->fulltemporaryfilename = fulltemporaryfilename; n->symlinktarget = symlinktarget; if (release->files == NULL) release->files = n; else { p = release->files; while (p->next != NULL) p = p->next; p->next = n; } return RET_OK; } retvalue release_init(struct release **release, const char *codename, const char *suite, const char *fakecomponentprefix) { struct release *n; size_t len, suitelen, codenamelen; retvalue r; n = zNEW(struct release); if (FAILEDTOALLOC(n)) return RET_ERROR_OOM; n->dirofdist = calc_dirconcat(global.distdir, codename); if (FAILEDTOALLOC(n->dirofdist)) { free(n); return RET_ERROR_OOM; } if (fakecomponentprefix != NULL) { len = strlen(fakecomponentprefix); codenamelen = strlen(codename); n->fakecomponentprefix = fakecomponentprefix; n->fakecomponentprefixlen = len; if (codenamelen > len && codename[codenamelen - len - 1] == '/' && memcmp(codename + (codenamelen - len), fakecomponentprefix, len) == 0) { n->fakecodename = strndup(codename, codenamelen - len - 1); if (FAILEDTOALLOC(n->fakecodename)) { free(n->dirofdist); free(n); return RET_ERROR_OOM; } } if (suite != NULL && (suitelen = strlen(suite)) > len && suite[suitelen - len - 1] == '/' && memcmp(suite + (suitelen - len), fakecomponentprefix, len) == 0) { n->fakesuite = strndup(suite, suitelen - len - 1); if (FAILEDTOALLOC(n->fakesuite)) { free(n->fakecodename); free(n->dirofdist); free(n); return RET_ERROR_OOM; } } } r = database_openreleasecache(codename, &n->cachedb); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) { n->cachedb = NULL; free(n->fakecodename); free(n->fakesuite); free(n->dirofdist); free(n); return r; } *release = n; return RET_OK; } retvalue release_initsnapshot(const char *codename, const char *name, struct release **release) { struct release *n; n = zNEW(struct release); if (FAILEDTOALLOC(n)) return RET_ERROR_OOM; n->dirofdist = calc_snapshotbasedir(codename, name); if (FAILEDTOALLOC(n->dirofdist)) { free(n); return RET_ERROR_OOM; } /* apt only removes the last /... part but we create two, * so stop it generating warnings by faking a suite */ n->fakesuite = mprintf("%s/snapshots/%s", codename, name); if (FAILEDTOALLOC(n->fakesuite)) { free(n->dirofdist); free(n); return RET_ERROR_OOM; } n->fakecodename = NULL; n->fakecomponentprefix = NULL; n->fakecomponentprefixlen = 0; n->cachedb = NULL; n->snapshotname = strdup(name); if (n->snapshotname == NULL) { free(n->fakesuite); free(n->dirofdist); free(n); return RET_ERROR_OOM; } *release = n; return RET_OK; } retvalue release_adddel(struct release *release, /*@only@*/char *reltmpfile) { char *filename; filename = calc_dirconcat(release->dirofdist, reltmpfile); if (FAILEDTOALLOC(filename)) { free(reltmpfile); return RET_ERROR_OOM; } return newreleaseentry(release, reltmpfile, NULL, filename, NULL, NULL); } retvalue release_addnew(struct release *release, /*@only@*/char *reltmpfile, /*@only@*/char *relfilename) { retvalue r; char *filename, *finalfilename; struct checksums *checksums; filename = calc_dirconcat(release->dirofdist, reltmpfile); if (FAILEDTOALLOC(filename)) { free(reltmpfile); free(relfilename); return RET_ERROR_OOM; } free(reltmpfile); r = checksums_read(filename, &checksums); if (!RET_IS_OK(r)) { free(relfilename); free(filename); return r; } finalfilename = calc_dirconcat(release->dirofdist, relfilename); if (FAILEDTOALLOC(finalfilename)) { free(relfilename); free(filename); checksums_free(checksums); return RET_ERROR_OOM; } release->new = true; return newreleaseentry(release, relfilename, checksums, finalfilename, filename, NULL); } retvalue release_addsilentnew(struct release *release, /*@only@*/char *reltmpfile, /*@only@*/char *relfilename) { char *filename, *finalfilename; filename = calc_dirconcat(release->dirofdist, reltmpfile); if (FAILEDTOALLOC(filename)) { free(reltmpfile); free(relfilename); return RET_ERROR_OOM; } free(reltmpfile); finalfilename = calc_dirconcat(release->dirofdist, relfilename); if (FAILEDTOALLOC(finalfilename)) { free(relfilename); free(filename); return RET_ERROR_OOM; } release->new = true; return newreleaseentry(release, relfilename, NULL, finalfilename, filename, NULL); } retvalue release_addold(struct release *release, /*@only@*/char *relfilename) { retvalue r; char *filename; struct checksums *checksums; filename = calc_dirconcat(release->dirofdist, relfilename); if (FAILEDTOALLOC(filename)) { free(filename); return RET_ERROR_OOM; } r = checksums_read(filename, &checksums); free(filename); if (!RET_IS_OK(r)) { free(relfilename); return r; } return newreleaseentry(release, relfilename, checksums, NULL, NULL, NULL); } static retvalue release_addsymlink(struct release *release, /*@only@*/char *relfilename, /*@only@*/ char *symlinktarget) { char *fullfilename; fullfilename = calc_dirconcat(release->dirofdist, relfilename); if (FAILEDTOALLOC(fullfilename)) { free(symlinktarget); free(relfilename); return RET_ERROR_OOM; } release->new = true; return newreleaseentry(release, relfilename, NULL, fullfilename, NULL, symlinktarget); } static char *calc_compressedname(const char *name, enum indexcompression ic) { switch (ic) { case ic_uncompressed: return strdup(name); case ic_gzip: return calc_addsuffix(name, "gz"); #ifdef HAVE_LIBBZ2 case ic_bzip2: return calc_addsuffix(name, "bz2"); #endif default: assert ("Huh?" == NULL); return NULL; } } static retvalue release_usecached(struct release *release, const char *relfilename, compressionset compressions) { retvalue result, r; enum indexcompression ic; char *filename[ic_count]; struct checksums *checksums[ic_count]; memset(filename, 0, sizeof(filename)); memset(checksums, 0, sizeof(checksums)); result = RET_OK; for (ic = ic_uncompressed ; ic < ic_count ; ic++) { if (ic != ic_uncompressed && (compressions & IC_FLAG(ic)) == 0) continue; filename[ic] = calc_compressedname(relfilename, ic); if (FAILEDTOALLOC(filename[ic])) { result = RET_ERROR_OOM; break; } } if (RET_IS_OK(result)) { /* first look if the there are actual files, in case * the cache still lists them but they got lost */ for (ic = ic_uncompressed ; ic < ic_count ; ic++) { char *fullfilename; if ((compressions & IC_FLAG(ic)) == 0) continue; assert (filename[ic] != NULL); fullfilename = calc_dirconcat(release->dirofdist, filename[ic]); if (FAILEDTOALLOC(fullfilename)) { result = RET_ERROR_OOM; break; } if (!isregularfile(fullfilename)) { free(fullfilename); result = RET_NOTHING; break; } free(fullfilename); } } if (RET_IS_OK(result) && release->cachedb == NULL) result = RET_NOTHING; if (!RET_IS_OK(result)) { for (ic = ic_uncompressed ; ic < ic_count ; ic++) free(filename[ic]); return result; } /* now that the files are there look into the cache * what checksums they have. */ for (ic = ic_uncompressed ; ic < ic_count ; ic++) { char *combinedchecksum; if (filename[ic] == NULL) continue; r = table_getrecord(release->cachedb, filename[ic], &combinedchecksum); if (!RET_IS_OK(r)) { result = r; break; } r = checksums_parse(&checksums[ic], combinedchecksum); // TODO: handle malformed checksums better? free(combinedchecksum); if (!RET_IS_OK(r)) { result = r; break; } } /* some files might not yet have some type of checksum available, * so calculate them (checking the other checksums match...): */ if (RET_IS_OK(result)) { for (ic = ic_uncompressed ; ic < ic_count ; ic++) { char *fullfilename; if (filename[ic] == NULL) continue; fullfilename = calc_dirconcat(release->dirofdist, filename[ic]); if (FAILEDTOALLOC(fullfilename)) r = RET_ERROR_OOM; else r = checksums_complete(&checksums[ic], fullfilename); if (r == RET_ERROR_WRONG_MD5) { fprintf(stderr, "WARNING: '%s' is different from recorded checksums.\n" "(This was only catched because some new checksum type was not yet available.)\n" "Triggering recreation of that file.\n", fullfilename); r = RET_NOTHING; } free(fullfilename); if (!RET_IS_OK(r)) { result = r; break; } } } if (!RET_IS_OK(result)) { for (ic = ic_uncompressed ; ic < ic_count ; ic++) { if (filename[ic] == NULL) continue; free(filename[ic]); checksums_free(checksums[ic]); } return result; } /* everything found, commit it: */ result = RET_OK; for (ic = ic_uncompressed ; ic < ic_count ; ic++) { if (filename[ic] == NULL) continue; r = newreleaseentry(release, filename[ic], checksums[ic], NULL, NULL, NULL); RET_UPDATE(result, r); } return result; } struct filetorelease { retvalue state; struct openfile { int fd; struct checksumscontext context; char *relativefilename; char *fullfinalfilename; char *fulltemporaryfilename; char *symlinkas; } f[ic_count]; /* input buffer, to checksum/compress data at once */ unsigned char *buffer; size_t waiting_bytes; /* output buffer for gzip compression */ unsigned char *gzoutputbuffer; size_t gz_waiting_bytes; z_stream gzstream; #ifdef HAVE_LIBBZ2 /* output buffer for bzip2 compression */ char *bzoutputbuffer; size_t bz_waiting_bytes; bz_stream bzstream; #endif }; void release_abortfile(struct filetorelease *file) { enum indexcompression i; for (i = ic_uncompressed ; i < ic_count ; i++) { if (file->f[i].fd >= 0) { (void)close(file->f[i].fd); if (file->f[i].fulltemporaryfilename != NULL) (void)unlink(file->f[i].fulltemporaryfilename); } free(file->f[i].relativefilename); free(file->f[i].fullfinalfilename); free(file->f[i].fulltemporaryfilename); free(file->f[i].symlinkas); } free(file->buffer); free(file->gzoutputbuffer); if (file->gzstream.next_out != NULL) { (void)deflateEnd(&file->gzstream); } #ifdef HAVE_LIBBZ2 free(file->bzoutputbuffer); if (file->bzstream.next_out != NULL) { (void)BZ2_bzCompressEnd(&file->bzstream); } #endif } bool release_oldexists(struct filetorelease *file) { enum indexcompression ic; bool hadanything = false; for (ic = ic_uncompressed ; ic < ic_count ; ic++) { char *f = file->f[ic].fullfinalfilename; if (f != NULL) { if (isregularfile(f)) hadanything = true; else return false; } } return hadanything; } static retvalue openfile(const char *dirofdist, struct openfile *f) { f->fullfinalfilename = calc_dirconcat(dirofdist, f->relativefilename); if (FAILEDTOALLOC(f->fullfinalfilename)) return RET_ERROR_OOM; f->fulltemporaryfilename = calc_addsuffix(f->fullfinalfilename, "new"); if (FAILEDTOALLOC(f->fulltemporaryfilename)) return RET_ERROR_OOM; (void)unlink(f->fulltemporaryfilename); f->fd = open(f->fulltemporaryfilename, O_WRONLY|O_CREAT|O_EXCL|O_NOCTTY, 0666); if (f->fd < 0) { int e = errno; fprintf(stderr, "Error %d opening file %s for writing: %s\n", e, f->fulltemporaryfilename, strerror(e)); return RET_ERRNO(e); } return RET_OK; } static retvalue writetofile(struct openfile *file, const unsigned char *data, size_t len) { checksumscontext_update(&file->context, data, len); if (file->fd < 0) return RET_NOTHING; while (len > 0) { ssize_t written = write(file->fd, data, len); if (written >= 0) { len -= written; data += written; } else { int e = errno; if (e == EAGAIN || e == EINTR) continue; fprintf(stderr, "Error %d writing to %s: %s\n", e, file->fullfinalfilename, strerror(e)); return RET_ERRNO(e); } } return RET_OK; } static retvalue initgzcompression(struct filetorelease *f) { int zret; if ((zlibCompileFlags() & (1<<17)) !=0) { fprintf(stderr, "libz compiled without .gz supporting code\n"); return RET_ERROR; } f->gzoutputbuffer = malloc(GZBUFSIZE); if (FAILEDTOALLOC(f->gzoutputbuffer)) return RET_ERROR_OOM; f->gzstream.next_in = NULL; f->gzstream.avail_in = 0; f->gzstream.next_out = f->gzoutputbuffer; f->gzstream.avail_out = GZBUFSIZE; f->gzstream.zalloc = NULL; f->gzstream.zfree = NULL; f->gzstream.opaque = NULL; zret = deflateInit2(&f->gzstream, /* Level: 0-9 or Z_DEFAULT_COMPRESSION: */ Z_DEFAULT_COMPRESSION, /* only possibility yet: */ Z_DEFLATED, /* +16 to generate gzip header */ 16 + MAX_WBITS, /* how much memory to use 1-9 */ 8, /* default or Z_FILTERED or Z_HUFFMAN_ONLY or Z_RLE */ Z_DEFAULT_STRATEGY ); f->gz_waiting_bytes = GZBUFSIZE - f->gzstream.avail_out; if (zret == Z_MEM_ERROR) return RET_ERROR_OOM; if (zret != Z_OK) { if (f->gzstream.msg == NULL) { fprintf(stderr, "Error from zlib's deflateInit2: " "unknown(%d)\n", zret); } else { fprintf(stderr, "Error from zlib's deflateInit2: %s\n", f->gzstream.msg); } return RET_ERROR; } return RET_OK; } #ifdef HAVE_LIBBZ2 static retvalue initbzcompression(struct filetorelease *f) { int bzret; f->bzoutputbuffer = malloc(BZBUFSIZE); if (FAILEDTOALLOC(f->bzoutputbuffer)) return RET_ERROR_OOM; f->bzstream.next_in = NULL; f->bzstream.avail_in = 0; f->bzstream.next_out = f->bzoutputbuffer; f->bzstream.avail_out = BZBUFSIZE; f->bzstream.bzalloc = NULL; f->bzstream.bzfree = NULL; f->bzstream.opaque = NULL; bzret = BZ2_bzCompressInit(&f->bzstream, /* blocksize (1-9) */ 9, /* verbosity */ 0, /* workFaktor (1-250, 0 = default(30)) */ 0 ); if (bzret == BZ_MEM_ERROR) return RET_ERROR_OOM; if (bzret != BZ_OK) { fprintf(stderr, "Error from libbz2's bzCompressInit: " "%d\n", bzret); return RET_ERROR; } return RET_OK; } #endif static const char * const ics[ic_count] = { "", ".gz" #ifdef HAVE_LIBBZ2 , ".bz2" #endif }; static inline retvalue setfilename(struct filetorelease *n, const char *relfilename, /*@null@*/const char *symlinkas, enum indexcompression ic) { n->f[ic].relativefilename = mprintf("%s%s", relfilename, ics[ic]); if (FAILEDTOALLOC(n->f[ic].relativefilename)) return RET_ERROR_OOM; if (symlinkas == NULL) return RET_OK; /* symlink creation fails horrible if the symlink is not in the base * directory */ assert (strchr(symlinkas, '/') == NULL); n->f[ic].symlinkas = mprintf("%s%s", symlinkas, ics[ic]); if (FAILEDTOALLOC(n->f[ic].symlinkas)) return RET_ERROR_OOM; return RET_OK; } static inline void warnfilename(struct release *release, const char *relfilename, enum indexcompression ic) { char *fullfilename; if (IGNORABLE(oldfile)) return; fullfilename = mprintf("%s/%s%s", release->dirofdist, relfilename, ics[ic]); if (FAILEDTOALLOC(fullfilename)) return; if (isanyfile(fullfilename)) { fprintf(stderr, "Possibly left over file '%s'.\n", fullfilename); if (!ignored[IGN_oldfile]) { fputs("You might want to delete it or use --ignore=oldfile to no longer get this message.\n", stderr); ignored[IGN_oldfile] = true; } } free(fullfilename); } static retvalue startfile(struct release *release, const char *filename, /*@null@*/const char *symlinkas, compressionset compressions, bool usecache, struct filetorelease **file) { struct filetorelease *n; enum indexcompression i; if (usecache) { retvalue r = release_usecached(release, filename, compressions); if (r != RET_NOTHING) { if (RET_IS_OK(r)) return RET_NOTHING; return r; } } n = zNEW(struct filetorelease); if (FAILEDTOALLOC(n)) return RET_ERROR_OOM; n->buffer = malloc(INPUT_BUFFER_SIZE); if (FAILEDTOALLOC(n->buffer)) { release_abortfile(n); return RET_ERROR_OOM; } for (i = ic_uncompressed ; i < ic_count ; i ++) { n->f[i].fd = -1; } if ((compressions & IC_FLAG(ic_uncompressed)) != 0) { retvalue r; r = setfilename(n, filename, symlinkas, ic_uncompressed); if (!RET_WAS_ERROR(r)) r = openfile(release->dirofdist, &n->f[ic_uncompressed]); if (RET_WAS_ERROR(r)) { release_abortfile(n); return r; } } else { /* the uncompressed file always shows up in Release */ n->f[ic_uncompressed].relativefilename = strdup(filename); if (FAILEDTOALLOC(n->f[ic_uncompressed].relativefilename)) { release_abortfile(n); return RET_ERROR_OOM; } } if ((compressions & IC_FLAG(ic_gzip)) != 0) { retvalue r; r = setfilename(n, filename, symlinkas, ic_gzip); if (!RET_WAS_ERROR(r)) r = openfile(release->dirofdist, &n->f[ic_gzip]); if (RET_WAS_ERROR(r)) { release_abortfile(n); return r; } checksumscontext_init(&n->f[ic_gzip].context); r = initgzcompression(n); if (RET_WAS_ERROR(r)) { release_abortfile(n); return r; } } #ifdef HAVE_LIBBZ2 if ((compressions & IC_FLAG(ic_bzip2)) != 0) { retvalue r; r = setfilename(n, filename, symlinkas, ic_bzip2); if (!RET_WAS_ERROR(r)) r = openfile(release->dirofdist, &n->f[ic_bzip2]); if (RET_WAS_ERROR(r)) { release_abortfile(n); return r; } checksumscontext_init(&n->f[ic_bzip2].context); r = initbzcompression(n); if (RET_WAS_ERROR(r)) { release_abortfile(n); return r; } } #endif checksumscontext_init(&n->f[ic_uncompressed].context); *file = n; return RET_OK; } retvalue release_startfile(struct release *release, const char *filename, compressionset compressions, bool usecache, struct filetorelease **file) { return startfile(release, filename, NULL, compressions, usecache, file); } retvalue release_startlinkedfile(struct release *release, const char *filename, const char *symlinkas, compressionset compressions, bool usecache, struct filetorelease **file) { return startfile(release, filename, symlinkas, compressions, usecache, file); } void release_warnoldfileorlink(struct release *release, const char *filename, compressionset compressions) { enum indexcompression i; for (i = ic_uncompressed ; i < ic_count ; i ++) if ((compressions & IC_FLAG(i)) != 0) warnfilename(release, filename, i); } static inline char *calc_relative_path(const char *target, const char *linkname) { size_t t_len, l_len, common_len, len; const char *t, *l; int depth; char *n, *p; t_len = strlen(target); l_len = strlen(linkname); t = target; l = linkname; common_len = 0; while (*t == *l && *t != '\0') { if (*t == '/') common_len = (t - target) + 1; t++; l++; } depth = 0; while (*l != '\0') { if (*l++ == '/') depth++; } assert (common_len <= t_len && common_len <= l_len && memcmp(target, linkname, common_len) == 0); len = 3 * depth + t_len - common_len; n = malloc(len + 1); if (FAILEDTOALLOC(n)) return NULL; p = n; while (depth > 0) { memcpy(p, "../", 3); p += 3; } memcpy(p, target + common_len, 1 + t_len - common_len); p += t_len - common_len; assert ((size_t)(p-n) == len); return n; } static retvalue releasefile(struct release *release, struct openfile *f) { struct checksums *checksums; retvalue r; if (f->relativefilename == NULL) { assert (f->fullfinalfilename == NULL); assert (f->fulltemporaryfilename == NULL); return RET_NOTHING; } assert((f->fullfinalfilename == NULL && f->fulltemporaryfilename == NULL) || (f->fullfinalfilename != NULL && f->fulltemporaryfilename != NULL)); r = checksums_from_context(&checksums, &f->context); if (RET_WAS_ERROR(r)) return r; if (f->symlinkas) { char *symlinktarget = calc_relative_path(f->relativefilename, f->symlinkas); if (FAILEDTOALLOC(symlinktarget)) return RET_ERROR_OOM; r = release_addsymlink(release, f->symlinkas, symlinktarget); f->symlinkas = NULL; if (RET_WAS_ERROR(r)) return r; } r = newreleaseentry(release, f->relativefilename, checksums, f->fullfinalfilename, f->fulltemporaryfilename, NULL); f->relativefilename = NULL; f->fullfinalfilename = NULL; f->fulltemporaryfilename = NULL; return r; } static retvalue writegz(struct filetorelease *f) { int zret; assert (f->f[ic_gzip].fd >= 0); f->gzstream.next_in = f->buffer; f->gzstream.avail_in = INPUT_BUFFER_SIZE; do { f->gzstream.next_out = f->gzoutputbuffer + f->gz_waiting_bytes; f->gzstream.avail_out = GZBUFSIZE - f->gz_waiting_bytes; zret = deflate(&f->gzstream, Z_NO_FLUSH); f->gz_waiting_bytes = GZBUFSIZE - f->gzstream.avail_out; if ((zret == Z_OK && f->gz_waiting_bytes >= GZBUFSIZE / 2) || zret == Z_BUF_ERROR) { retvalue r; /* there should be anything to write, otherwise * better break to avoid an infinite loop */ if (f->gz_waiting_bytes == 0) break; r = writetofile(&f->f[ic_gzip], f->gzoutputbuffer, f->gz_waiting_bytes); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; f->gz_waiting_bytes = 0; } /* as we start with some data to process, Z_BUF_ERROR * should only happend when no output is possible, as that * gets possible again it should finally produce more output * and return Z_OK and always terminate. Hopefully... */ } while (zret == Z_BUF_ERROR || (zret == Z_OK && f->gzstream.avail_in != 0)); f->gzstream.next_in = NULL; f->gzstream.avail_in = 0; if (zret != Z_OK) { if (f->gzstream.msg == NULL) { fprintf(stderr, "Error from zlib's deflate: " "unknown(%d)\n", zret); } else { fprintf(stderr, "Error from zlib's deflate: %s\n", f->gzstream.msg); } return RET_ERROR; } return RET_OK; } static retvalue finishgz(struct filetorelease *f) { int zret; assert (f->f[ic_gzip].fd >= 0); f->gzstream.next_in = f->buffer; f->gzstream.avail_in = f->waiting_bytes; do { f->gzstream.next_out = f->gzoutputbuffer + f->gz_waiting_bytes; f->gzstream.avail_out = GZBUFSIZE - f->gz_waiting_bytes; zret = deflate(&f->gzstream, Z_FINISH); f->gz_waiting_bytes = GZBUFSIZE - f->gzstream.avail_out; if (zret == Z_OK || zret == Z_STREAM_END || zret == Z_BUF_ERROR) { retvalue r; if (f->gz_waiting_bytes == 0) { if (zret != Z_STREAM_END) { fprintf(stderr, "Unexpected buffer error after deflate (%d)\n", zret); return RET_ERROR; } break; } r = writetofile(&f->f[ic_gzip], f->gzoutputbuffer, f->gz_waiting_bytes); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; f->gz_waiting_bytes = 0; } /* see above */ } while (zret == Z_BUF_ERROR || zret == Z_OK); if (zret != Z_STREAM_END) { if (f->gzstream.msg == NULL) { fprintf(stderr, "Error from zlib's deflate: " "unknown(%d)\n", zret); } else { fprintf(stderr, "Error from zlib's deflate: %s\n", f->gzstream.msg); } return RET_ERROR; } zret = deflateEnd(&f->gzstream); /* to avoid deflateEnd called again */ f->gzstream.next_out = NULL; if (zret != Z_OK) { if (f->gzstream.msg == NULL) { fprintf(stderr, "Error from zlib's deflateEnd: " "unknown(%d)\n", zret); } else { fprintf(stderr, "Error from zlib's deflateEnd: %s\n", f->gzstream.msg); } return RET_ERROR; } return RET_OK; } #ifdef HAVE_LIBBZ2 static retvalue writebz(struct filetorelease *f) { int bzret; assert (f->f[ic_bzip2].fd >= 0); f->bzstream.next_in = (char*)f->buffer; f->bzstream.avail_in = INPUT_BUFFER_SIZE; do { f->bzstream.next_out = f->bzoutputbuffer + f->bz_waiting_bytes; f->bzstream.avail_out = BZBUFSIZE - f->bz_waiting_bytes; bzret = BZ2_bzCompress(&f->bzstream, BZ_RUN); f->bz_waiting_bytes = BZBUFSIZE - f->bzstream.avail_out; if (bzret == BZ_RUN_OK && f->bz_waiting_bytes >= BZBUFSIZE / 2) { retvalue r; assert (f->bz_waiting_bytes > 0); r = writetofile(&f->f[ic_bzip2], (const unsigned char *)f->bzoutputbuffer, f->bz_waiting_bytes); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; f->bz_waiting_bytes = 0; } } while (bzret == BZ_RUN_OK && f->bzstream.avail_in != 0); f->bzstream.next_in = NULL; f->bzstream.avail_in = 0; if (bzret != BZ_RUN_OK) { fprintf(stderr, "Error from libbz2's bzCompress: " "%d\n", bzret); return RET_ERROR; } return RET_OK; } static retvalue finishbz(struct filetorelease *f) { int bzret; assert (f->f[ic_bzip2].fd >= 0); f->bzstream.next_in = (char*)f->buffer; f->bzstream.avail_in = f->waiting_bytes; do { f->bzstream.next_out = f->bzoutputbuffer + f->bz_waiting_bytes; f->bzstream.avail_out = BZBUFSIZE - f->bz_waiting_bytes; bzret = BZ2_bzCompress(&f->bzstream, BZ_FINISH); f->bz_waiting_bytes = BZBUFSIZE - f->bzstream.avail_out; /* BZ_RUN_OK most likely is not possible here, but BZ_FINISH_OK * is returned when it cannot be finished in one step. * but better safe then sorry... */ if ((bzret == BZ_RUN_OK || bzret == BZ_FINISH_OK || bzret == BZ_STREAM_END) && f->bz_waiting_bytes > 0) { retvalue r; r = writetofile(&f->f[ic_bzip2], (const unsigned char*)f->bzoutputbuffer, f->bz_waiting_bytes); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; f->bz_waiting_bytes = 0; } } while (bzret == BZ_RUN_OK || bzret == BZ_FINISH_OK); if (bzret != BZ_STREAM_END) { fprintf(stderr, "Error from bzlib's bzCompress: " "%d\n", bzret); return RET_ERROR; } bzret = BZ2_bzCompressEnd(&f->bzstream); /* to avoid bzCompressEnd called again */ f->bzstream.next_out = NULL; if (bzret != BZ_OK) { fprintf(stderr, "Error from libbz2's bzCompressEnd: " "%d\n", bzret); return RET_ERROR; } return RET_OK; } #endif retvalue release_finishfile(struct release *release, struct filetorelease *file) { retvalue result, r; enum indexcompression i; if (RET_WAS_ERROR(file->state)) { r = file->state; release_abortfile(file); return r; } r = writetofile(&file->f[ic_uncompressed], file->buffer, file->waiting_bytes); if (RET_WAS_ERROR(r)) { release_abortfile(file); return r; } if (file->f[ic_uncompressed].fd >= 0) { if (close(file->f[ic_uncompressed].fd) != 0) { int e = errno; file->f[ic_uncompressed].fd = -1; release_abortfile(file); return RET_ERRNO(e); } file->f[ic_uncompressed].fd = -1; } if (file->f[ic_gzip].fd >= 0) { r = finishgz(file); if (RET_WAS_ERROR(r)) { release_abortfile(file); return r; } if (close(file->f[ic_gzip].fd) != 0) { int e = errno; file->f[ic_gzip].fd = -1; release_abortfile(file); return RET_ERRNO(e); } file->f[ic_gzip].fd = -1; } #ifdef HAVE_LIBBZ2 if (file->f[ic_bzip2].fd >= 0) { r = finishbz(file); if (RET_WAS_ERROR(r)) { release_abortfile(file); return r; } if (close(file->f[ic_bzip2].fd) != 0) { int e = errno; file->f[ic_bzip2].fd = -1; release_abortfile(file); return RET_ERRNO(e); } file->f[ic_bzip2].fd = -1; } #endif release->new = true; result = RET_OK; for (i = ic_uncompressed ; i < ic_count ; i++) { r = releasefile(release, &file->f[i]); if (RET_WAS_ERROR(r)) { release_abortfile(file); return r; } RET_UPDATE(result, r); } free(file->buffer); free(file->gzoutputbuffer); #ifdef HAVE_LIBBZ2 free(file->bzoutputbuffer); #endif free(file); return result; } static retvalue release_processbuffer(struct filetorelease *file) { retvalue result, r; result = RET_OK; assert (file->waiting_bytes == INPUT_BUFFER_SIZE); /* always call this - even if there is no uncompressed file * to generate - so that checksums are calculated */ r = writetofile(&file->f[ic_uncompressed], file->buffer, INPUT_BUFFER_SIZE); RET_UPDATE(result, r); if (file->f[ic_gzip].relativefilename != NULL) { r = writegz(file); RET_UPDATE(result, r); } RET_UPDATE(file->state, result); #ifdef HAVE_LIBBZ2 if (file->f[ic_bzip2].relativefilename != NULL) { r = writebz(file); RET_UPDATE(result, r); } RET_UPDATE(file->state, result); #endif return result; } retvalue release_writedata(struct filetorelease *file, const char *data, size_t len) { retvalue result, r; size_t free_bytes; result = RET_OK; /* move stuff into buffer, so stuff is not processed byte by byte */ free_bytes = INPUT_BUFFER_SIZE - file->waiting_bytes; if (len < free_bytes) { memcpy(file->buffer + file->waiting_bytes, data, len); file->waiting_bytes += len; assert (file->waiting_bytes < INPUT_BUFFER_SIZE); return RET_OK; } memcpy(file->buffer + file->waiting_bytes, data, free_bytes); len -= free_bytes; data += free_bytes; file->waiting_bytes += free_bytes; r = release_processbuffer(file); RET_UPDATE(result, r); while (len >= INPUT_BUFFER_SIZE) { /* should not hopefully not happen, as all this copying * is quite slow... */ memcpy(file->buffer, data, INPUT_BUFFER_SIZE); len -= INPUT_BUFFER_SIZE; data += INPUT_BUFFER_SIZE; r = release_processbuffer(file); RET_UPDATE(result, r); } memcpy(file->buffer, data, len); file->waiting_bytes = len; assert (file->waiting_bytes < INPUT_BUFFER_SIZE); return result; } /* Generate a "Release"-file for arbitrary directory */ retvalue release_directorydescription(struct release *release, const struct distribution *distribution, const struct target *target, const char *releasename, bool onlyifneeded) { retvalue r; struct filetorelease *f; char *relfilename; relfilename = calc_dirconcat(target->relativedirectory, releasename); if (FAILEDTOALLOC(relfilename)) return RET_ERROR_OOM; r = startfile(release, relfilename, NULL, IC_FLAG(ic_uncompressed), onlyifneeded, &f); free(relfilename); if (RET_WAS_ERROR(r) || r == RET_NOTHING) return r; #define release_writeheader(name, data) \ if (data != NULL) { \ (void)release_writestring(f, name ": "); \ (void)release_writestring(f, data); \ (void)release_writestring(f, "\n"); \ } release_writeheader("Archive", distribution->suite); release_writeheader("Version", distribution->version); release_writeheader("Component", atoms_components[target->component]); release_writeheader("Origin", distribution->origin); release_writeheader("Label", distribution->label); release_writeheader("Architecture", atoms_architectures[target->architecture]); release_writeheader("NotAutomatic", distribution->notautomatic); release_writeheader("ButAutomaticUpgrades", distribution->butautomaticupgrades); release_writeheader("Description", distribution->description); #undef release_writeheader r = release_finishfile(release, f); return r; } static retvalue storechecksums(struct release *release) { struct release_entry *file; retvalue result, r; const char *combinedchecksum; /* size including trailing '\0' character: */ size_t len; result = RET_OK; for (file = release->files ; file != NULL ; file = file->next) { assert (file->relativefilename != NULL); r = table_deleterecord(release->cachedb, file->relativefilename, true); if (RET_WAS_ERROR(r)) return r; if (file->checksums == NULL) continue; r = checksums_getcombined(file->checksums, &combinedchecksum, &len); RET_UPDATE(result, r); if (!RET_IS_OK(r)) continue; r = table_adduniqsizedrecord(release->cachedb, file->relativefilename, combinedchecksum, len+1, false, false); RET_UPDATE(result, r); } return result; } static inline bool componentneedsfake(const char *cn, const struct release *release) { if (release->fakecomponentprefix == NULL) return false; if (strncmp(cn, release->fakecomponentprefix, release->fakecomponentprefixlen) != 0) return true; return cn[release->fakecomponentprefixlen] != '/'; } static struct release_entry *newspecialreleaseentry(struct release *release, const char *relativefilename) { struct release_entry *n, *p; assert (relativefilename != NULL); n = zNEW(struct release_entry); if (FAILEDTOALLOC(n)) return NULL; n->relativefilename = strdup(relativefilename); n->fullfinalfilename = calc_dirconcat(release->dirofdist, relativefilename); if (!FAILEDTOALLOC(n->fullfinalfilename)) n->fulltemporaryfilename = mprintf("%s.new", n->fullfinalfilename); if (FAILEDTOALLOC(n->relativefilename) || FAILEDTOALLOC(n->fullfinalfilename) || FAILEDTOALLOC(n->fulltemporaryfilename)) { release_freeentry(n); return NULL; } if (release->files == NULL) release->files = n; else { p = release->files; while (p->next != NULL) p = p->next; p->next = n; } return n; } static void omitunusedspecialreleaseentry(struct release *release, struct release_entry *e) { struct release_entry **p; if (e->fulltemporaryfilename != NULL) /* new file available, nothing to omit */ return; if (isregularfile(e->fullfinalfilename)) /* this will be deleted, everything fine */ return; p = &release->files; while (*p != NULL && *p != e) p = &(*p)->next; if (*p != e) { assert (*p == e); return; } *p = e->next; release_freeentry(e); } /* Generate a main "Release" file for a distribution */ retvalue release_prepare(struct release *release, struct distribution *distribution, bool onlyifneeded) { size_t s; retvalue r; char buffer[100], untilbuffer[100]; time_t t; struct tm *gmt; struct release_entry *file; enum checksumtype cs; int i; static const char * const release_checksum_headers[cs_hashCOUNT] = { "MD5Sum:\n", "SHA1:\n", "SHA256:\n" }; struct release_entry *plainentry, *signedentry, *detachedentry; // TODO: check for existance of Release file here first? if (onlyifneeded && !release->new) { return RET_NOTHING; } (void)time(&t); gmt = gmtime(&t); if (FAILEDTOALLOC(gmt)) return RET_ERROR_OOM; s=strftime(buffer, 99, "%a, %d %b %Y %H:%M:%S UTC", gmt); if (s == 0 || s >= 99) { fprintf(stderr, "strftime is doing strange things...\n"); return RET_ERROR; } if (distribution->validfor > 0) { t += distribution->validfor; gmt = gmtime(&t); if (FAILEDTOALLOC(gmt)) return RET_ERROR_OOM; s=strftime(untilbuffer, 99, "%a, %d %b %Y %H:%M:%S UTC", gmt); if (s == 0 || s >= 99) { fprintf(stderr, "strftime is doing strange things...\n"); return RET_ERROR; } } plainentry = newspecialreleaseentry(release, "Release"); if (FAILEDTOALLOC(plainentry)) return RET_ERROR_OOM; signedentry = newspecialreleaseentry(release, "InRelease"); if (FAILEDTOALLOC(signedentry)) return RET_ERROR_OOM; detachedentry = newspecialreleaseentry(release, "Release.gpg"); if (FAILEDTOALLOC(signedentry)) return RET_ERROR_OOM; r = signature_startsignedfile(&release->signedfile); if (RET_WAS_ERROR(r)) return r; #define writestring(s) signedfile_write(release->signedfile, s, strlen(s)) #define writechar(c) {char __c = c ; signedfile_write(release->signedfile, &__c, 1); } if (distribution->origin != NULL) { writestring("Origin: "); writestring(distribution->origin); writechar('\n'); } if (distribution->label != NULL) { writestring("Label: "); writestring(distribution->label); writechar('\n'); } if (release->fakesuite != NULL) { writestring("Suite: "); writestring(release->fakesuite); writechar('\n'); } else if (distribution->suite != NULL) { writestring("Suite: "); writestring(distribution->suite); writechar('\n'); } writestring("Codename: "); if (release->fakecodename != NULL) writestring(release->fakecodename); else writestring(distribution->codename); if (distribution->version != NULL) { writestring("\nVersion: "); writestring(distribution->version); } writestring("\nDate: "); writestring(buffer); if (distribution->validfor > 0) { writestring("\nValid-Until: "); writestring(untilbuffer); } writestring("\nArchitectures:"); for (i = 0 ; i < distribution->architectures.count ; i++) { architecture_t a = distribution->architectures.atoms[i]; /* Debian's topmost Release files do not list it, * so we won't either */ if (a == architecture_source) continue; writechar(' '); writestring(atoms_architectures[a]); } writestring("\nComponents:"); for (i = 0 ; i < distribution->components.count ; i++) { component_t c = distribution->components.atoms[i]; const char *cn = atoms_components[c]; writechar(' '); if (componentneedsfake(cn, release)) { writestring(release->fakecomponentprefix); writechar('/'); } writestring(cn); } if (distribution->description != NULL) { writestring("\nDescription: "); writestring(distribution->description); } if (distribution->notautomatic != NULL) { writestring("\nNotAutomatic: "); writestring(distribution->notautomatic); } if (distribution->butautomaticupgrades != NULL) { writestring("\nButAutomaticUpgrades: "); writestring(distribution->butautomaticupgrades); } writechar('\n'); for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) { assert (release_checksum_headers[cs] != NULL); writestring(release_checksum_headers[cs]); for (file = release->files ; file != NULL ; file = file->next) { const char *hash, *size; size_t hashlen, sizelen; if (file->checksums == NULL) continue; if (!checksums_gethashpart(file->checksums, cs, &hash, &hashlen, &size, &sizelen)) continue; writechar(' '); signedfile_write(release->signedfile, hash, hashlen); writechar(' '); signedfile_write(release->signedfile, size, sizelen); writechar(' '); writestring(file->relativefilename); writechar('\n'); } } r = signedfile_create(release->signedfile, plainentry->fulltemporaryfilename, &signedentry->fulltemporaryfilename, &detachedentry->fulltemporaryfilename, &distribution->signwith, !global.keeptemporaries); if (RET_WAS_ERROR(r)) { signedfile_free(release->signedfile); release->signedfile = NULL; return r; } omitunusedspecialreleaseentry(release, signedentry); omitunusedspecialreleaseentry(release, detachedentry); return RET_OK; } static inline void release_toouthook(struct release *release, struct distribution *distribution) { struct release_entry *file; char *reldir; if (release->snapshotname != NULL) { reldir = mprintf("dists/%s/snapshots/%s", distribution->codename, release->snapshotname); if (FAILEDTOALLOC(reldir)) return; outhook_send("BEGIN-SNAPSHOT", distribution->codename, reldir, release->snapshotname); } else { reldir = mprintf("dists/%s", distribution->codename); if (FAILEDTOALLOC(reldir)) return; outhook_send("BEGIN-DISTRIBUTION", distribution->codename, reldir, distribution->suite); } for (file = release->files ; file != NULL ; file = file->next) { /* relf chks ffn ftfn symt * name chks NULL NULL NULL: added old filename or virtual file * name chks file NULL NULL: renamed new file and published * name NULL file NULL NULL: renamed new file * name NULL NULL NULL NULL: deleted file * name NULL NULL NULL file: created symlink */ /* should already be in place: */ assert (file->fulltemporaryfilename == NULL); /* symlinks are special: */ if (file->symlinktarget != NULL) { outhook_send("DISTSYMLINK", reldir, file->relativefilename, file->symlinktarget); } else if (file->fullfinalfilename != NULL) { outhook_send("DISTFILE", reldir, file->relativefilename, file->fullfinalfilename); } else if (file->checksums == NULL){ outhook_send("DISTDELETE", reldir, file->relativefilename, NULL); } /* would be nice to distinguish kept and virtual files... */ } if (release->snapshotname != NULL) { outhook_send("END-SNAPSHOT", distribution->codename, reldir, release->snapshotname); } else { outhook_send("END-DISTRIBUTION", distribution->codename, reldir, distribution->suite); } free(reldir); } /* Generate a main "Release" file for a distribution */ retvalue release_finish(/*@only@*/struct release *release, struct distribution *distribution) { retvalue result, r; int e; struct release_entry *file; bool somethingwasdone; somethingwasdone = false; result = RET_OK; for (file = release->files ; file != NULL ; file = file->next) { assert (file->relativefilename != NULL); if (file->checksums == NULL && file->fullfinalfilename != NULL && file->fulltemporaryfilename == NULL && file->symlinktarget == NULL) { e = unlink(file->fullfinalfilename); if (e < 0) { e = errno; fprintf(stderr, "Error %d deleting %s: %s. (Will be ignored)\n", e, file->fullfinalfilename, strerror(e)); } free(file->fullfinalfilename); file->fullfinalfilename = NULL; } else if (file->fulltemporaryfilename != NULL) { assert (file->fullfinalfilename != NULL); assert (file->symlinktarget == NULL); e = rename(file->fulltemporaryfilename, file->fullfinalfilename); if (e < 0) { e = errno; fprintf(stderr, "Error %d moving %s to %s: %s!\n", e, file->fulltemporaryfilename, file->fullfinalfilename, strerror(e)); r = RET_ERRNO(e); /* after something was done, do not stop * but try to do as much as possible */ if (!somethingwasdone) { release_free(release); return r; } RET_UPDATE(result, r); } else { somethingwasdone = true; free(file->fulltemporaryfilename); file->fulltemporaryfilename = NULL; } } else if (file->symlinktarget != NULL) { assert (file->fullfinalfilename != NULL); (void)unlink(file->fullfinalfilename); e = symlink(file->symlinktarget, file->fullfinalfilename); if (e != 0) { e = errno; fprintf(stderr, "Error %d creating symlink '%s' -> '%s': %s.\n", e, file->fullfinalfilename, file->symlinktarget, strerror(e)); r = RET_ERRNO(e); /* after something was done, do not stop * but try to do as much as possible */ if (!somethingwasdone) { release_free(release); return r; } RET_UPDATE(result, r); } } } if (RET_WAS_ERROR(result) && somethingwasdone) { fprintf(stderr, "ATTENTION: some files were already moved to place, some could not be.\n" "The generated index files for %s might be in a inconsistent state\n" "and currently not useable! You should remove the reason for the failure\n" "(most likely bad access permissions) and export the affected distributions\n" "manually (via reprepro export codenames) as soon as possible!\n", distribution->codename); } if (release->cachedb != NULL) { // TODO: split this in removing before and adding later? // remember which file were changed in case of error, so // only those are changed... /* now update the cache database, * so we find those the next time */ r = storechecksums(release); RET_UPDATE(result, r); r = table_close(release->cachedb); release->cachedb = NULL; RET_ENDUPDATE(result, r); } release_toouthook(release, distribution); /* free everything */ release_free(release); return result; } retvalue release_mkdir(struct release *release, const char *relativedirectory) { char *dirname; retvalue r; dirname = calc_dirconcat(release->dirofdist, relativedirectory); if (FAILEDTOALLOC(dirname)) return RET_ERROR_OOM; // TODO: in some far future, remember which dirs were created so that r = dirs_make_recursive(dirname); free(dirname); return r; } reprepro-4.13.1/diffindex.h0000644000175100017510000000065112152651661012501 00000000000000#ifndef REPREPRO_DIFFINDEX_H #define REPREPRO_DIFFINDEX_H struct diffindex { struct checksums *destination; int patchcount; struct diffindex_patch { struct checksums *frompackages; char *name; struct checksums *checksums; /* safe-guard against cycles */ bool done; } patches[]; }; void diffindex_free(/*@only@*/struct diffindex *); retvalue diffindex_read(const char *, /*@out@*/struct diffindex **); #endif reprepro-4.13.1/terms.c0000644000175100017510000002170612152651661011672 00000000000000/* This file is part of "reprepro" * Copyright (C) 2004,2005,2007,2009 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include "error.h" #include "mprintf.h" #include "strlist.h" #include "names.h" #include "chunks.h" #include "globmatch.h" #include "terms.h" void term_free(term *t) { while (t != NULL) { struct term_atom *next = t->next; if (t->isspecial) { if (t->special.type != NULL && t->special.type->done != NULL) t->special.type->done(t->comparison, &t->special.comparewith); } else { free(t->generic.key); free(t->generic.comparewith); } strlist_done(&t->architectures); free(t); t = next; } } static retvalue parseatom(const char **formula, /*@out@*/struct term_atom **atom, int options, const struct term_special *specials) { struct term_atom *a; const char *f = *formula; #define overspace() while (*f != '\0' && xisspace(*f)) f++ const char *keystart, *keyend; const char *valuestart, *valueend; enum term_comparison comparison = tc_none; bool negated = false; const struct term_special *s; overspace(); if (*f == '!' && ISSET(options, T_NEGATION)) { negated = true; f++; } keystart = f; // TODO: allow more strict checking again with some option? while (*f != '\0' && *f != '(' && !xisspace(*f) && *f != ',' && *f != '|' && *f !='(' && *f != ')' && *f != '[' && *f != '!') f++; keyend = f; if (keystart == keyend) { *formula = f; return RET_NOTHING; } overspace(); if (ISSET(options, T_VERSION) && *f == '(') { f++; overspace(); switch (*f) { case '>': f++; if (*f == '=') { comparison = tc_moreorequal; f++; } else if (*f == '>') { comparison = tc_strictmore; f++; } else { comparison = tc_moreorequal; fprintf(stderr, "Warning: Found a '(>' without '=' or '>' in '%s'(beginning cut), will be treated as '>='.\n", *formula); } break; case '<': f++; if (*f == '=') { comparison = tc_lessorequal; f++; } else if (*f == '<') { comparison = tc_strictless; f++; } else { comparison = tc_lessorequal; fprintf(stderr, "Warning: Found a '(<' without '=' or '<' in '%s'(begin cut), will be treated as '<='.\n", *formula); } break; case '=': f++; if (*f == '=') f++; else if (*f != ' ') { *formula = f; return RET_NOTHING; } comparison = tc_equal; break; case '%': if (ISSET(options, T_GLOBMATCH)) { f++; comparison = tc_globmatch; break; } *formula = f; return RET_NOTHING; case '!': if (f[1] == '%' && ISSET(options, T_GLOBMATCH)) { f += 2; comparison = tc_notglobmatch; break; } if (ISSET(options, T_NOTEQUAL)) { f++; if (*f != '=') { *formula = f; return RET_NOTHING; } f++; comparison = tc_notequal; break; } // no break here... default: *formula = f; return RET_NOTHING; } overspace(); valueend = valuestart = f; while (*f != '\0' && *f != ')') { valueend = f+1; f++; while (*f != '\0' && xisspace(*f)) f++; } if (*f != ')' || valueend == valuestart) { *formula = f; return RET_NOTHING; } f++; } else { comparison = tc_none; valuestart = valueend = NULL; } overspace(); if (ISSET(options, T_ARCHITECTURES) && *f == '[') { //TODO: implement this one... assert ("Not yet implemented!" == NULL); } for (s = specials ; s->name != NULL ; s++) { if (strncasecmp(s->name, keystart, keyend-keystart) == 0 && s->name[keyend-keystart] == '\0') break; } a = zNEW(struct term_atom); if (FAILEDTOALLOC(a)) return RET_ERROR_OOM; a->negated = negated; a->comparison = comparison; if (s->name != NULL) { retvalue r; a->isspecial = true; a->special.type = s; r = s->parse(comparison, valuestart, valueend-valuestart, &a->special.comparewith); if (RET_WAS_ERROR(r)) { term_free(a); return r; } } else { a->isspecial = false; a->generic.key = strndup(keystart, keyend - keystart); if (FAILEDTOALLOC(a->generic.key)) { term_free(a); return RET_ERROR_OOM; } if (comparison != tc_none) { if (valueend - valuestart > 2048 && (comparison == tc_globmatch || comparison == tc_notglobmatch)) { fprintf(stderr, "Ridicilous long globmatch '%.10s...'!\n", valuestart); term_free(a); return RET_ERROR; } a->generic.comparewith = strndup(valuestart, valueend - valuestart); if (FAILEDTOALLOC(a->generic.comparewith)) { term_free(a); return RET_ERROR_OOM; } } } //TODO: here architectures, too *atom = a; *formula = f; return RET_OK; #undef overspace } /* as this are quite special BDDs (a atom being false cannot make it true), * the places where True and False can be found are * quite easy and fast to find: */ static void orterm(term *termtochange, /*@dependent@*/term *termtoor) { struct term_atom *p = termtochange; while (p != NULL) { while (p->nextiffalse != NULL) p = p->nextiffalse; p->nextiffalse= termtoor; p = p->nextiftrue; } } static void andterm(term *termtochange, /*@dependent@*/term *termtoand) { struct term_atom *p = termtochange; while (p != NULL) { while (p->nextiftrue != NULL) p = p->nextiftrue; p->nextiftrue = termtoand; p = p->nextiffalse; } } retvalue term_compile(term **term_p, const char *origformula, int options, const struct term_special *specials) { const char *formula = origformula; /* for the global list */ struct term_atom *first, *last; /* the atom just read */ struct term_atom *atom; struct { /*@dependent@*/struct term_atom *firstinand, *firstinor; } levels[50]; int lastinitializeddepth=-1; int depth=0; retvalue r; int i; //TODO: ??? char junction = '\0'; if (ISSET(options, T_ARCHITECTURES)) { //TODO: implement this one... assert ("Not yet implemented!" == NULL); } #define overspace() while (*formula!='\0' && xisspace(*formula)) formula++ lastinitializeddepth=-1; depth=0; first = last = NULL; while (true) { overspace(); while (*formula == '(' && ISSET(options, T_BRACKETS)) { depth++; formula++; overspace(); } if (depth >= 50) { term_free(first); fprintf(stderr, "Nested too deep: '%s'!\n", origformula); return RET_ERROR; } r = parseatom(&formula, &atom, options, specials); if (r == RET_NOTHING) { if (*formula == '\0') fprintf(stderr, "Unexpected end of string parsing formula '%s'!\n", origformula); else fprintf(stderr, "Unexpected character '%c' parsing formula '%s'!\n", *formula, origformula); r = RET_ERROR; } if (RET_WAS_ERROR(r)) { term_free(first); return r; } for (i=lastinitializeddepth+1 ; i <= depth ; i ++) { levels[i].firstinand = atom; levels[i].firstinor = atom; } if (junction != '\0') { assert(lastinitializeddepth >= 0); assert (first != NULL); last->next = atom; last = atom; if (junction == ',') { andterm(levels[lastinitializeddepth].firstinand, atom); levels[lastinitializeddepth].firstinand = atom; levels[lastinitializeddepth].firstinor = atom; } else { assert (junction == '|'); orterm(levels[lastinitializeddepth].firstinor, atom); levels[lastinitializeddepth].firstinor = atom; } } else { assert(lastinitializeddepth == -1); assert (first == NULL); first = last = atom; } lastinitializeddepth = depth; overspace(); if (*formula == ')' && ISSET(options, T_BRACKETS)) { formula++; if (depth > 0) { depth--; lastinitializeddepth = depth; } else { fprintf(stderr, "Too many ')'s in '%s'!\n", origformula); term_free(first); return RET_ERROR; } overspace(); } overspace(); if (*formula == '\0') break; if (*formula != ',' && (*formula != '|' || NOTSET(options, T_OR))) { fprintf(stderr, "Unexpected character '%c' within '%s'!\n", *formula, origformula); term_free(first); return RET_ERROR; } junction = *formula; formula++; } if (depth > 0) { fprintf(stderr, "Missing ')' at end of formula '%s'!\n", origformula); term_free(first); return RET_ERROR; } if (*formula != '\0') { fprintf(stderr, "Trailing garbage at end of term: '%s'\n", formula); term_free(first); return RET_ERROR; } *term_p = first; return RET_OK; } reprepro-4.13.1/configparser.h0000644000175100017510000003463412152651661013233 00000000000000#ifndef REPREPRO_CONFIGPARSER_H #define REPREPRO_CONFIGPARSER_H #ifndef REPREPRO_STRLIST_H #include "strlist.h" #endif #ifndef REPREPRO_CHECKS_H #include "checks.h" #endif #ifndef REPREPRO_ATOMS_H #include "atoms.h" #endif struct configiterator; typedef retvalue configsetfunction(void *, const char *, void *, struct configiterator *); typedef retvalue configinitfunction(void *, void *, void **); typedef retvalue configfinishfunction(void *, void *, void **, bool, struct configiterator *); retvalue linkedlistfinish(void *, void *, void **, bool, struct configiterator *); struct configfield { const char *name; size_t namelen; /* privdata, allocated struct, iterator */ configsetfunction *setfunc; bool required; }; struct constant { const char *name; int value; }; #define CFr(name, sname, field) {name, sizeof(name)-1, configparser_ ## sname ## _set_ ## field, true} #define CF(name, sname, field) {name, sizeof(name)-1, configparser_ ## sname ## _set_ ## field, false} /*@observer@*/const char *config_filename(const struct configiterator *) __attribute__((pure)); unsigned int config_line(const struct configiterator *) __attribute__((pure)); unsigned int config_column(const struct configiterator *) __attribute__((pure)); unsigned int config_firstline(const struct configiterator *) __attribute__((pure)); unsigned int config_markerline(const struct configiterator *) __attribute__((pure)); unsigned int config_markercolumn(const struct configiterator *) __attribute__((pure)); retvalue config_getflags(struct configiterator *, const char *, const struct constant *, bool *, bool, const char *); int config_nextnonspaceinline(struct configiterator *iter); retvalue config_getlines(struct configiterator *, struct strlist *); retvalue config_getwords(struct configiterator *, struct strlist *); retvalue config_getall(struct configiterator *iter, /*@out@*/char **result_p); retvalue config_getword(struct configiterator *, /*@out@*/char **); retvalue config_getwordinline(struct configiterator *, /*@out@*/char **); retvalue config_geturl(struct configiterator *, const char *, /*@out@*/char **); retvalue config_getonlyword(struct configiterator *, const char *, checkfunc, /*@out@*/char **); retvalue config_getuniqwords(struct configiterator *, const char *, checkfunc, struct strlist *); retvalue config_getinternatomlist(struct configiterator *, const char *, enum atom_type, checkfunc, struct atomlist *); retvalue config_getatom(struct configiterator *, const char *, enum atom_type, atom_t *); retvalue config_getatomlist(struct configiterator *, const char *, enum atom_type, struct atomlist *); retvalue config_getatomsublist(struct configiterator *, const char *, enum atom_type, struct atomlist *, const struct atomlist *, const char *); retvalue config_getsplitatoms(struct configiterator *, const char *, enum atom_type, struct atomlist *, struct atomlist *); retvalue config_getsplitwords(struct configiterator *, const char *, struct strlist *, struct strlist *); retvalue config_gettruth(struct configiterator *, const char *, bool *); retvalue config_getnumber(struct configiterator *, const char *, long long *, long long /*minvalue*/, long long /*maxvalue*/); retvalue config_getconstant(struct configiterator *, const struct constant *, int *); #define config_getenum(iter, type, constants, result) ({int _val;retvalue _r = config_getconstant(iter, type ## _ ## constants, &_val);*(result) = (enum type)_val;_r;}) retvalue config_completeword(struct configiterator *, char, /*@out@*/char **); retvalue config_gettimespan(struct configiterator *, const char *, /*@out@*/unsigned long *); retvalue config_getscript(struct configiterator *, const char *, /*@out@*/char **); retvalue config_getsignwith(struct configiterator *, const char *, struct strlist *); void config_overline(struct configiterator *); bool config_nextline(struct configiterator *); retvalue configfile_parse(const char * /*filename*/, bool /*ignoreunknown*/, configinitfunction, configfinishfunction, const char *chunkname, const struct configfield *, size_t, void *); #define CFlinkedlistinit(sname) \ static retvalue configparser_ ## sname ## _init(void *rootptr, void *lastitem, void **newptr) { \ struct sname *n, **root_p = rootptr, *last = lastitem; \ n = calloc(1, sizeof(struct sname)); \ if (n == NULL) \ return RET_ERROR_OOM; \ if (last == NULL) \ *root_p = n; \ else \ last->next = n; \ *newptr = n; \ return RET_OK; \ } #define CFtimespanSETPROC(sname, field) \ static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), const char *name, void *data, struct configiterator *iter) { \ struct sname *item = data; \ return config_gettimespan(iter, name, &item->field); \ } #define CFcheckvalueSETPROC(sname, field, checker) \ static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), const char *name, void *data, struct configiterator *iter) { \ struct sname *item = data; \ return config_getonlyword(iter, name, checker, &item->field); \ } #define CFvalueSETPROC(sname, field) \ static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), const char *name, void *data, struct configiterator *iter) { \ struct sname *item = data; \ return config_getonlyword(iter, name, NULL, &item->field); \ } #define CFurlSETPROC(sname, field) \ static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), const char *name, void *data, struct configiterator *iter) { \ struct sname *item = data; \ return config_geturl(iter, name, &item->field); \ } #define CFscriptSETPROC(sname, field) \ static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), const char *name, void *data, struct configiterator *iter) { \ struct sname *item = data; \ return config_getscript(iter, name, &item->field); \ } #define CFlinelistSETPROC(sname, field) \ static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), UNUSED(const char *name), void *data, struct configiterator *iter) { \ struct sname *item = data; \ item->field ## _set = true; \ return config_getlines(iter, &item->field); \ } #define CFstrlistSETPROC(sname, field) \ static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), UNUSED(const char *name), void *data, struct configiterator *iter) { \ struct sname *item = data; \ return config_getwords(iter, &item->field); \ } #define CFsignwithSETPROC(sname, field) \ static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), const char *name, void *data, struct configiterator *iter) { \ struct sname *item = data; \ return config_getsignwith(iter, name, &item->field); \ } #define CFcheckuniqstrlistSETPROC(sname, field, checker) \ static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), const char *name, void *data, struct configiterator *iter) { \ struct sname *item = data; \ retvalue r; \ r = config_getuniqwords(iter, name, checker, &item->field); \ if (r == RET_NOTHING) { \ fprintf(stderr, \ "Error parsing %s, line %d, column %d:\n" \ " An empty %s-field is not allowed.\n", config_filename(iter), \ config_line(iter), \ config_column(iter), \ name); \ r = RET_ERROR; \ } \ return r; \ } #define CFinternatomsSETPROC(sname, field, checker, type) \ static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), const char *name, void *data, struct configiterator *iter) { \ struct sname *item = data; \ retvalue r; \ r = config_getinternatomlist(iter, name, type, checker, &item->field); \ if (r == RET_NOTHING) { \ fprintf(stderr, \ "Error parsing %s, line %d, column %d:\n" \ " An empty %s-field is not allowed.\n", config_filename(iter), \ config_line(iter), \ config_column(iter), \ name); \ r = RET_ERROR; \ } \ return r; \ } #define CFatomlistSETPROC(sname, field, type) \ static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), const char *name, void *data, struct configiterator *iter) { \ struct sname *item = data; \ retvalue r; \ item->field ## _set = true; \ r = config_getatomlist(iter, name, type, &item->field); \ if (r == RET_NOTHING) { \ fprintf(stderr, \ "Error parsing %s, line %d, column %d:\n" \ " An empty %s-field is not allowed.\n", config_filename(iter), \ config_line(iter), \ config_column(iter), \ name); \ r = RET_ERROR; \ } \ return r; \ } #define CFatomSETPROC(sname, field, type) \ static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), const char *name, void *data, struct configiterator *iter) { \ struct sname *item = data; \ return config_getatom(iter, name, type, &item->field); \ } #define CFatomsublistSETPROC(sname, field, type, superset, superset_header) \ static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), const char *name, void *data, struct configiterator *iter) { \ struct sname *item = data; \ retvalue r; \ item->field ## _set = true; \ if (item->superset.count == 0) { \ fprintf(stderr, \ "Error parsing %s, line %d, column %d:\n" \ " A '%s'-field is only allowed after a '%s'-field.\n", config_filename(iter), \ config_line(iter), \ config_column(iter), \ name, superset_header); \ return RET_ERROR; \ } \ r = config_getatomsublist(iter, name, type, &item->field, \ &item->superset, superset_header); \ if (r == RET_NOTHING) { \ fprintf(stderr, \ "Error parsing %s, line %d, column %d:\n" \ " An empty %s-field is not allowed.\n", config_filename(iter), \ config_line(iter), \ config_column(iter), \ name); \ r = RET_ERROR; \ } \ return r; \ } #define CFuniqstrlistSETPROC(sname, field) \ static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), const char *name, void *data, struct configiterator *iter) { \ struct sname *item = data; \ return config_getuniqwords(iter, name, NULL, &item->field); \ } #define CFuniqstrlistSETPROCset(sname, name) \ static retvalue configparser_ ## sname ## _set_ ## name (UNUSED(void *dummy), const char *name, void *data, struct configiterator *iter) { \ struct sname *item = data; \ item->name ## _set = true; \ return config_getuniqwords(iter, name, NULL, &item->name); \ } #define CFtruthSETPROC(sname, field) \ static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), const char *name, void *data, struct configiterator *iter) { \ struct sname *item = data; \ item->field ## _set = true; \ return config_gettruth(iter, name, &item->field); \ } #define CFtruthSETPROC2(sname, name, field) \ static retvalue configparser_ ## sname ## _set_ ## name(UNUSED(void *dummy), const char *name, void *data, struct configiterator *iter) { \ struct sname *item = data; \ return config_gettruth(iter, name, &item->field); \ } #define CFallSETPROC(sname, field) \ static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), UNUSED(const char *name), void *data, struct configiterator *iter) { \ struct sname *item = data; \ return config_getall(iter, &item->field); \ } #define CFfilterlistSETPROC(sname, field) \ static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), UNUSED(const char *name), void *data, struct configiterator *iter) { \ struct sname *item = data; \ return filterlist_load(&item->field, iter); \ } #define CFexportmodeSETPROC(sname, field) \ static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), UNUSED(const char *name), void *data, struct configiterator *iter) { \ struct sname *item = data; \ return exportmode_set(&item->field, iter); \ } #define CFUSETPROC(sname, field) static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), UNUSED(const char *name), void *thisdata_ ## sname, struct configiterator *iter) #define CFuSETPROC(sname, field) static retvalue configparser_ ## sname ## _set_ ## field(void *privdata_ ## sname, UNUSED(const char *name), void *thisdata_ ## sname, struct configiterator *iter) #define CFSETPROC(sname, field) static retvalue configparser_ ## sname ## _set_ ## field(void *privdata_ ## sname, const char *headername, void *thisdata_ ## sname, struct configiterator *iter) #define CFSETPROCVARS(sname, item, mydata) struct sname *item = thisdata_ ## sname; struct read_ ## sname ## _data *mydata = privdata_ ## sname #define CFSETPROCVAR(sname, item) struct sname *item = thisdata_ ## sname #define CFstartparse(sname) static retvalue startparse ## sname(UNUSED(void *dummyprivdata), UNUSED(void *lastdata), void **result_p_ ##sname) #define CFstartparseVAR(sname, r) struct sname **r = (void*)result_p_ ## sname #define CFfinishparse(sname) static retvalue finishparse ## sname(void *privdata_ ## sname, void *thisdata_ ## sname, void **lastdata_p_ ##sname, bool complete, struct configiterator *iter) #define CFfinishparseVARS(sname, this, last, mydata) struct sname *this = thisdata_ ## sname, **last = (void*)lastdata_p_ ## sname; struct read_ ## sname ## _data *mydata = privdata_ ## sname #define CFUfinishparseVARS(sname, this, last, mydata) struct sname *this = thisdata_ ## sname #define CFhashesSETPROC(sname, field) \ static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), const char *name, void *data, struct configiterator *iter) { \ struct sname *item = data; \ retvalue r; \ item->field ## _set = true; \ r = config_getflags(iter, name, hashnames, item->field, false, \ "(allowed values: md5, sha1 and sha256)"); \ if (!RET_IS_OK(r)) \ return r; \ return RET_OK; \ } // TODO: better error reporting: #define CFtermSETPROC(sname, field) \ static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), UNUSED(const char *name), void *data, struct configiterator *iter) { \ struct sname *item = data; \ char *formula; \ retvalue r; \ r = config_getall(iter, &formula); \ if (! RET_IS_OK(r)) \ return r; \ r = term_compilefortargetdecision(&item->field, formula); \ free(formula); \ return r; \ } #define CFtermSSETPROC(sname, field) \ static retvalue configparser_ ## sname ## _set_ ## field(UNUSED(void *dummy), UNUSED(const char *name), void *data, struct configiterator *iter) { \ struct sname *item = data; \ char *formula; \ retvalue r; \ r = config_getall(iter, &formula); \ if (! RET_IS_OK(r)) \ return r; \ r = term_compilefortargetdecision(&item->field, formula); \ free(formula); \ item->field ## _set = true; \ return r; \ } // TODO: decide which should get better checking, which might allow escaping spaces: #define CFdirSETPROC CFvalueSETPROC #define CFfileSETPROC CFvalueSETPROC #define config_getfileinline config_getwordinline char *configfile_expandname(const char *, /*@only@*//*@null@*/char *); #endif /* REPREPRO_CONFIGPARSER_H */ reprepro-4.13.1/checkin.c0000644000175100017510000012736012152651661012147 00000000000000/* This file is part of "reprepro" * Copyright (C) 2003,2004,2005,2006,2007,2009,2012 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include "error.h" #include "ignore.h" #include "strlist.h" #include "mprintf.h" #include "atoms.h" #include "checksums.h" #include "names.h" #include "filecntl.h" #include "dirs.h" #include "chunks.h" #include "reference.h" #include "signature.h" #include "sources.h" #include "files.h" #include "tracking.h" #include "guesscomponent.h" #include "override.h" #include "checkindsc.h" #include "checkindeb.h" #include "checkin.h" #include "uploaderslist.h" #include "log.h" #include "dpkgversions.h" #include "changes.h" /* Things to do when including a .changes-file: * - Read in the chunk of the possible signed file. * (In later versions possibly checking the signature) * - Parse it, extracting: * + Distribution * + Source * + Architecture * + Binary * + Version * + ... * + Files * - Calculate what files are expectable... * - Compare supplied filed with files expected. * - (perhaps: write what was done and changes to some logfile) * - add supplied files to the pool and register them in files.db * - add the .dsc-files via checkindsc.c * - add the .deb-filed via checkindeb.c * */ struct fileentry { struct fileentry *next; char *basename; filetype type; struct checksums *checksums; char *section; char *priority; architecture_t architecture_into; char *name; /* this might be different for different files, * (though this is only allowed in rare cases), * will be set by _fixfields */ component_t component; /* only set after changes_includefiles */ char *filekey; /* was already found in the pool before */ bool wasalreadythere; /* set between checkpkg and includepkg */ struct strlist needed_filekeys; union { struct dsc_headers dsc; struct debpackage *deb;} pkg; /* only valid while parsing: */ struct hashes hashes; }; struct changes { /* Things read by changes_read: */ char *source, *sourceversion, *changesversion; struct strlist distributions, architectures, binaries; struct fileentry *files; char *control; struct signatures *signatures; /* Things to be set by changes_fixfields: */ /* the component source files are put into */ component_t srccomponent; /* != NULL if changesfile was put into pool/ */ /*@null@*/ char *changesfilekey; /* the directory where source files are put into */ char *srcdirectory; /* (only to warn if multiple are used) */ component_t firstcomponent; /* the directory the .changes file resides in */ char *incomingdirectory; /* the Version: and the version in Source: differ */ bool isbinnmu; }; static void freeentries(/*@only@*/struct fileentry *entry) { struct fileentry *h; while (entry != NULL) { h = entry->next; free(entry->filekey); free(entry->basename); checksums_free(entry->checksums); free(entry->section); free(entry->priority); free(entry->name); if (entry->type == fe_DEB || entry->type == fe_UDEB) deb_free(entry->pkg.deb); else if (entry->type == fe_DSC) { strlist_done(&entry->needed_filekeys); sources_done(&entry->pkg.dsc); } free(entry); entry = h; } } static void changes_free(/*@only@*/struct changes *changes) { if (changes != NULL) { free(changes->source); free(changes->sourceversion); free(changes->changesversion); strlist_done(&changes->architectures); strlist_done(&changes->binaries); freeentries(changes->files); strlist_done(&changes->distributions); free(changes->control); free(changes->srcdirectory); free(changes->changesfilekey); // trackedpackage_free(changes->trackedpkg); free(changes->incomingdirectory); signatures_free(changes->signatures); } free(changes); } static retvalue newentry(struct fileentry **entry, const char *fileline, const struct atomlist *packagetypes, const struct atomlist *forcearchitectures, const char *sourcename, bool includebyhand, bool includelogs, bool *ignoredlines_p, bool skip_binaries) { struct fileentry *e; retvalue r; e = zNEW(struct fileentry); if (FAILEDTOALLOC(e)) return RET_ERROR_OOM; r = changes_parsefileline(fileline, &e->type, &e->basename, &e->hashes.hashes[cs_md5sum], &e->hashes.hashes[cs_length], &e->section, &e->priority, &e->architecture_into, &e->name); if (RET_WAS_ERROR(r)) { free(e); return r; } assert (RET_IS_OK(r)); if (e->type == fe_BYHAND) { if (!includebyhand) { // TODO: at least check them and fail if wrong? fprintf(stderr, "Ignoring byhand file: '%s'!\n", e->basename); freeentries(e); *ignoredlines_p = true; return RET_NOTHING; } e->next = *entry; *entry = e; return RET_OK; } if (FE_SOURCE(e->type) && limitations_missed(packagetypes, pt_dsc)) { freeentries(e); *ignoredlines_p = true; return RET_NOTHING; } if (e->type == fe_DEB && limitations_missed(packagetypes, pt_deb)) { freeentries(e); *ignoredlines_p = true; return RET_NOTHING; } if (e->type == fe_UDEB && limitations_missed(packagetypes, pt_udeb)) { freeentries(e); *ignoredlines_p = true; return RET_NOTHING; } if (e->type != fe_LOG && e->architecture_into == architecture_source && strcmp(e->name, sourcename) != 0) { fprintf(stderr, "Warning: File '%s' looks like source but does not start with '%s_'!\n", e->basename, sourcename); } if (e->type == fe_LOG) { if (strcmp(e->name, sourcename) != 0) { fprintf(stderr, "Warning: File '%s' looks like log but does not start with '%s_'!\n", e->basename, sourcename); } if (!includelogs) { // TODO: at least check them and fail if wrong? fprintf(stderr, "Ignoring log file: '%s'!\n", e->basename); freeentries(e); *ignoredlines_p = true; return RET_NOTHING; } if (atom_defined(e->architecture_into) && limitations_missed(forcearchitectures, e->architecture_into)) { if (verbose > 1) fprintf(stderr, "Skipping '%s' as not for architecture ", e->basename); atomlist_fprint(stderr, at_architecture, forcearchitectures); fputs(".\n", stderr); freeentries(e); *ignoredlines_p = true; return RET_NOTHING; } } else if (forcearchitectures != NULL) { if (e->architecture_into == architecture_all && !skip_binaries) { if (verbose > 2) { fprintf(stderr, "Limiting '%s' to architectures ", e->basename); atomlist_fprint(stderr, at_architecture, forcearchitectures); fputs(" as requested.\n", stderr); } /* keep e->architecture_into to all, this wil * be restricted to forcearchitectures when added */ } else if (!atomlist_in(forcearchitectures, e->architecture_into)) { if (verbose > 1) fprintf(stderr, "Skipping '%s' as architecture '%s' is not in the requested set.\n", e->basename, atoms_architectures[ e->architecture_into]); freeentries(e); *ignoredlines_p = true; return RET_NOTHING; } } e->next = *entry; *entry = e; return RET_OK; } /* Parse the Files-header to see what kind of files we carry around */ static retvalue changes_parsefilelines(const char *filename, struct changes *changes, const struct strlist *filelines, const struct atomlist *packagetypes, const struct atomlist *forcearchitectures, bool includebyhand, bool includelogs, bool *ignoredlines_p, bool skip_binaries) { retvalue result, r; int i; assert (changes->files == NULL); result = RET_NOTHING; for (i = 0 ; i < filelines->count ; i++) { const char *fileline = filelines->values[i]; r = newentry(&changes->files, fileline, packagetypes, forcearchitectures, changes->source, includebyhand, includelogs, ignoredlines_p, skip_binaries); RET_UPDATE(result, r); if (r == RET_ERROR) return r; } if (result == RET_NOTHING) { fprintf(stderr, "%s: Not enough files in .changes!\n", filename); return RET_ERROR; } return result; } static retvalue changes_addhashes(const char *filename, struct changes *changes, enum checksumtype cs, struct strlist *filelines, bool ignoresomefiles) { int i; retvalue r; for (i = 0 ; i < filelines->count ; i++) { struct hash_data data, size; const char *fileline = filelines->values[i]; struct fileentry *e; const char *basefilename; r = hashline_parse(filename, fileline, cs, &basefilename, &data, &size); if (r == RET_NOTHING) continue; if (RET_WAS_ERROR(r)) return r; e = changes->files; while (e != NULL && strcmp(e->basename, basefilename) != 0) e = e->next; if (e == NULL) { if (ignoresomefiles) /* we might already have ignored files when * creating changes->files, so we cannot say * if this is an error. */ continue; fprintf(stderr, "In '%s': file '%s' listed in '%s' but not in 'Files'\n", filename, basefilename, changes_checksum_names[cs]); return RET_ERROR; } if (e->hashes.hashes[cs_length].len != size.len || memcmp(e->hashes.hashes[cs_length].start, size.start, size.len) != 0) { fprintf(stderr, "In '%s': file '%s' listed in '%s' with different size than in 'Files'\n", filename, basefilename, changes_checksum_names[cs]); return RET_ERROR; } e->hashes.hashes[cs] = data; } return RET_OK; } static retvalue changes_finishhashes(struct changes *changes) { struct fileentry *e; retvalue r; for (e = changes->files ; e != NULL ; e = e->next) { r = checksums_initialize(&e->checksums, e->hashes.hashes); if (RET_WAS_ERROR(r)) return r; } return RET_OK; } static retvalue check(const char *filename, struct changes *changes, const char *field) { retvalue r; r = chunk_checkfield(changes->control, field); if (r == RET_NOTHING) { if (IGNORING(missingfield, "In '%s': Missing '%s' field!\n", filename, field)) { return RET_OK; } else { return RET_ERROR; } } return r; } static retvalue changes_read(const char *filename, /*@out@*/struct changes **changes, const struct atomlist *packagetypes, const struct atomlist *forcearchitectures, bool includebyhand, bool includelogs) { retvalue r; struct changes *c; struct strlist filelines[cs_hashCOUNT]; enum checksumtype cs; bool broken, ignoredlines; int versioncmp; bool skip_binaries; #define E(err) { \ if (r == RET_NOTHING) { \ fprintf(stderr, "In '%s': " err "\n", filename); \ r = RET_ERROR; \ } \ if (RET_WAS_ERROR(r)) { \ changes_free(c); \ return r; \ } \ } #define R { \ if (RET_WAS_ERROR(r)) { \ changes_free(c); \ return r; \ } \ } c = zNEW(struct changes); if (FAILEDTOALLOC(c)) return RET_ERROR_OOM; r = signature_readsignedchunk(filename, filename, &c->control, &c->signatures, &broken); R; if (broken && !IGNORING(brokensignatures, "'%s' contains only broken signatures.\n" "This most likely means the file was damaged or edited improperly.\n", filename)) { r = RET_ERROR; R; } r = check(filename, c, "Format"); R; r = check(filename, c, "Date"); R; r = chunk_getnameandversion(c->control, "Source", &c->source, &c->sourceversion); E("Missing 'Source' field"); r = propersourcename(c->source); R; if (c->sourceversion != NULL) { r = properversion(c->sourceversion); R; } r = chunk_getwordlist(c->control, "Binary", &c->binaries); E("Missing 'Binary' field"); r = chunk_getwordlist(c->control, "Architecture", &c->architectures); E("Missing 'Architecture' field"); r = chunk_getvalue(c->control, "Version", &c->changesversion); E("Missing 'Version' field"); r = properversion(c->changesversion); E("Malforce Version number"); if (c->sourceversion == NULL) { c->sourceversion = strdup(c->changesversion); if (FAILEDTOALLOC(c->sourceversion)) { changes_free(c); return RET_ERROR_OOM; } c->isbinnmu = false; } else { r = dpkgversions_cmp(c->sourceversion, c->changesversion, &versioncmp); E("Error comparing versions. (That should have been caught earlier, why now?)"); c->isbinnmu = versioncmp != 0; } r = chunk_getwordlist(c->control, "Distribution", &c->distributions); E("Missing 'Distribution' field"); r = check(filename, c, "Maintainer"); R; r = chunk_getextralinelist(c->control, changes_checksum_names[cs_md5sum], &filelines[cs_md5sum]); E("Missing 'Files' field!"); ignoredlines = false; /* check if forcearchitectures allows non-source binaries, * (used to check if Architecture all are skipped) */ if (forcearchitectures == NULL) { skip_binaries = false; } else { skip_binaries = !atomlist_hasexcept(forcearchitectures, architecture_source); } r = changes_parsefilelines(filename, c, &filelines[cs_md5sum], packagetypes, forcearchitectures, includebyhand, includelogs, &ignoredlines, skip_binaries); if (RET_WAS_ERROR(r)) { strlist_done(&filelines[cs_md5sum]); changes_free(c); return r; } for (cs = cs_firstEXTENDED ; cs < cs_hashCOUNT ; cs++) { r = chunk_getextralinelist(c->control, changes_checksum_names[cs], &filelines[cs]); if (RET_IS_OK(r)) r = changes_addhashes(filename, c, cs, &filelines[cs], ignoredlines); else strlist_init(&filelines[cs]); if (RET_WAS_ERROR(r)) { while (cs-- > cs_md5sum) strlist_done(&filelines[cs]); changes_free(c); return r; } } r = changes_finishhashes(c); for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) strlist_done(&filelines[cs]); R; r = dirs_getdirectory(filename, &c->incomingdirectory); R; *changes = c; return RET_OK; #undef E #undef R } static retvalue changes_fixfields(const struct distribution *distribution, const char *filename, struct changes *changes, component_t forcecomponent, /*@null@*/const char *forcesection, /*@null@*/const char *forcepriority) { struct fileentry *e; retvalue r; bool needsourcedir = false; struct fileentry *needs_source_package = NULL; bool has_source_package = false; r = propersourcename(changes->source); if (RET_WAS_ERROR(r)) return r; e = changes->files; if (e == NULL) { fprintf(stderr, "No files given in '%s'!\n", filename); return RET_ERROR; } for (; e != NULL ; e = e->next) { const struct overridedata *oinfo = NULL; const char *force = NULL; if (e->type == fe_BYHAND || e->type == fe_LOG) { needsourcedir = true; continue; } /* section and priority are only needed for the dsc, * not for the other source files */ if (FE_SOURCE(e->type) && !FE_PACKAGE(e->type)) { needs_source_package = e; continue; } if (forcesection == NULL || forcepriority == NULL) { oinfo = override_search( FE_BINARY(e->type)?(e->type==fe_UDEB? distribution->overrides.udeb :distribution->overrides.deb) :distribution->overrides.dsc, e->name); } if (forcesection != NULL) force = forcesection; else force = override_get(oinfo, SECTION_FIELDNAME); if (force != NULL) { free(e->section); e->section = strdup(force); if (FAILEDTOALLOC(e->section)) return RET_ERROR_OOM; } if (strcmp(e->section, "unknown") == 0 && verbose >= 0) { fprintf(stderr, "Warning: '%s' contains strange section '%s'!\n", filename, e->section); } if (strcmp(e->section, "-") == 0) { fprintf(stderr, "No section specified for '%s' in '%s'!\n", e->basename, filename); return RET_ERROR; } if (forcepriority != NULL) force = forcepriority; else force = override_get(oinfo, PRIORITY_FIELDNAME); if (force != NULL) { free(e->priority); e->priority = strdup(force); if (FAILEDTOALLOC(e->priority)) return RET_ERROR_OOM; } if (strcmp(e->priority, "-") == 0) { fprintf(stderr, "No priority specified for '%s'!\n", filename); return RET_ERROR; } if (!atom_defined(forcecomponent)) { const char *fc; fc = override_get(oinfo, "$Component"); if (fc != NULL) { forcecomponent = component_find(fc); if (!atom_defined(forcecomponent)) { fprintf(stderr, "Unparseable component '%s' in $Component override of '%s'\n", fc, e->name); return RET_ERROR; } } } // I'm undecided here. If this is a udeb, one could also use // distribution->udebcomponents. Though this might result // in not really predictable guesses for the section. r = guess_component(distribution->codename, &distribution->components, changes->source, e->section, forcecomponent, &e->component); if (RET_WAS_ERROR(r)) return r; assert(atom_defined(e->component)); if (!atom_defined(changes->firstcomponent)) { changes->firstcomponent = e->component; } else if (changes->firstcomponent != e->component) { fprintf(stderr, "Warning: %s contains files guessed to be in different components ('%s' vs '%s)!\n", filename, atoms_components[e->component], atoms_components[changes->firstcomponent]); } if (FE_SOURCE(e->type)) { assert (FE_PACKAGE(e->type)); has_source_package = true; if (strcmp(changes->source, e->name) != 0) { r = propersourcename(e->name); if (RET_WAS_ERROR(r)) return r; } if (!atom_defined(changes->srccomponent)) { changes->srccomponent = e->component; } else if (changes->srccomponent != e->component) { fprintf(stderr, "%s contains source files guessed to be in different components ('%s' vs '%s)!\n", filename, atoms_components[e->component], atoms_components[changes->srccomponent]); return RET_ERROR; } } else if (FE_BINARY(e->type)) { r = properpackagename(e->name); if (RET_WAS_ERROR(r)) return r; // Let's just check here, perhaps if (e->type == fe_UDEB && !atomlist_in( &distribution->udebcomponents, e->component)) { fprintf(stderr, "Cannot put file '%s' into component '%s', as it is not listed in UDebComponents!\n", e->basename, atoms_components[e->component]); return RET_ERROR; } } else { assert (FE_SOURCE(e->type) || FE_BINARY(e->type)); fprintf(stderr, "Internal Error!\n"); return RET_ERROR; } } if (needs_source_package != NULL && !has_source_package) { fprintf(stderr, "'%s' looks like part of an source package, but no dsc file listed in the .changes file!\n", needs_source_package->basename); return RET_ERROR; } if (atom_defined(changes->srccomponent)) { changes->srcdirectory = calc_sourcedir(changes->srccomponent, changes->source); if (FAILEDTOALLOC(changes->srcdirectory)) return RET_ERROR_OOM; } else if (distribution->trackingoptions.includechanges || needsourcedir) { component_t component = forcecomponent; if (!atom_defined(forcecomponent)) { for (e = changes->files ; e != NULL ; e = e->next) { if (FE_PACKAGE(e->type)){ component = e->component; break; } } } if (!atom_defined(component)) { fprintf(stderr, "No component found to place .changes or byhand files in. Aborting.\n"); return RET_ERROR; } changes->srcdirectory = calc_sourcedir(component, changes->source); if (FAILEDTOALLOC(changes->srcdirectory)) return RET_ERROR_OOM; } return RET_OK; } static inline retvalue checkforarchitecture(const struct fileentry *e, architecture_t architecture) { if (!atom_defined(architecture)) return RET_NOTHING; while (e != NULL && e->architecture_into != architecture) e = e->next; if (e == NULL) { if (!IGNORING(unusedarch, "Architecture header lists architecture '%s', but no files for it!\n", atoms_architectures[architecture])) return RET_ERROR; } return RET_OK; } static bool can_add_all(const struct atomlist *forcearchitectures, const struct distribution *d) { const struct atomlist *da = &d->architectures; int i; if (forcearchitectures == NULL) { return atomlist_hasexcept(da, architecture_source); } for (i = 0 ; i < forcearchitectures->count ; i++) { architecture_t a = forcearchitectures->atoms[i]; if (a == architecture_source) continue; if (a == architecture_all) return atomlist_hasexcept(da, architecture_source); if (atomlist_in(da, a)) return true; } return false; } static retvalue changes_check(const struct distribution *distribution, const char *filename, struct changes *changes, const struct atomlist *forcearchitectures, const struct atomlist *packagetypes) { int i; struct fileentry *e; retvalue r = RET_OK; bool havedsc = false, haveorig = false, havetar = false, havediff = false, havealtsrc = false; /* First check for each given architecture, if it has files: */ if (forcearchitectures != NULL) { for (i = 0 ; i < forcearchitectures->count ; i++) { architecture_t a = forcearchitectures->atoms[i]; if (!strlist_in(&changes->architectures, atoms_architectures[a])) { // TODO: check if this is sensible if (!IGNORING(surprisingarch, "Architecture header does not list the" " architecture '%s' to be forced in!\n", atoms_architectures[a])) return RET_ERROR_MISSING; } r = checkforarchitecture(changes->files, a); if (RET_WAS_ERROR(r)) return r; } } else { bool limitedtosource = false; bool limitedtononsource = false; if (packagetypes != NULL) { limitedtosource = true; limitedtononsource = true; for (i = 0 ; i < packagetypes->count ; i++) { if (packagetypes->atoms[i] == pt_dsc) limitedtononsource = false; else limitedtosource = false; } } for (i = 0 ; i < changes->architectures.count ; i++) { const char *architecture = changes->architectures.values[i]; if (strcmp(architecture, "source") == 0) { if (limitedtononsource) continue; } else { if (limitedtosource) continue; } r = checkforarchitecture(changes->files, architecture_find(architecture)); if (RET_WAS_ERROR(r)) return r; } } /* Then check for each file, if its architecture is sensible * and listed. */ for (e = changes->files ; e != NULL ; e = e->next) { if (e->type == fe_BYHAND || e->type == fe_LOG) continue; if (atom_defined(e->architecture_into)) { if (e->architecture_into == architecture_all) { /* "all" can be added if at least one binary * architecture */ if (!can_add_all(forcearchitectures, distribution)) e->architecture_into = atom_unknown; } else if (!atomlist_in(&distribution->architectures, e->architecture_into)) e->architecture_into = atom_unknown; } if (!atom_defined(e->architecture_into)) { fprintf(stderr, "Error: '%s' has the wrong architecture to add it to %s!\n", e->basename, distribution->codename); return RET_ERROR; } if (!strlist_in(&changes->architectures, atoms_architectures[e->architecture_into])) { if (!IGNORING(surprisingarch, "'%s' looks like architecture '%s', but this is not listed in the Architecture-Header!\n", e->basename, atoms_architectures[e->architecture_into])) return RET_ERROR; } if (e->type == fe_DSC) { char *calculatedname; if (havedsc) { fprintf(stderr, "I don't know what to do with multiple .dsc files in '%s'!\n", filename); return RET_ERROR; } havedsc = true; calculatedname = calc_source_basename(changes->source, changes->sourceversion); if (FAILEDTOALLOC(calculatedname)) return RET_ERROR_OOM; if (strcmp(calculatedname, e->basename) != 0) { fprintf(stderr, "dsc file name is '%s' instead of the expected '%s'!\n", e->basename, calculatedname); free(calculatedname); return RET_ERROR; } free(calculatedname); } else if (e->type == fe_DIFF) { if (havediff) { fprintf(stderr, "I don't know what to do with multiple .diff files in '%s'!\n", filename); return RET_ERROR; } havediff = true; } else if (e->type == fe_ORIG) { if (haveorig) { fprintf(stderr, "I don't know what to do with multiple .orig.tar.gz files in '%s'!\n", filename); return RET_ERROR; } haveorig = true; } else if (e->type == fe_TAR) { havetar = true; } else if (e->type == fe_ALTSRC) { havealtsrc = true; } } if (havetar && !haveorig && havediff) { fprintf(stderr, "I don't know what to do having a .tar.gz not being a .orig.tar.gz and a .diff.gz in '%s'!\n", filename); return RET_ERROR; } if (strlist_in(&changes->architectures, "source") && !havedsc && !limitations_missed(forcearchitectures, architecture_source) && !limitations_missed(packagetypes, pt_dsc)) { fprintf(stderr, "I don't know what to do with a source-upload not containing a .dsc in '%s'!\n", filename); return RET_ERROR; } if (havedsc && !havediff && !haveorig && !havetar && !havealtsrc) { fprintf(stderr, "I don't know what to do having a .dsc without a .diff.gz or .tar.gz in '%s'!\n", filename); return RET_ERROR; } return r; } static retvalue changes_checkfiles(const char *filename, struct changes *changes) { struct fileentry *e; retvalue r; r = RET_NOTHING; for (e = changes->files; e != NULL ; e = e->next) { //TODO: decide earlier which files to include if (e->type == fe_BYHAND) { /* byhand files might have the same name and not * contain the version, so store separately */ assert(changes->srcdirectory!=NULL); e->filekey = mprintf("%s/%s_%s_byhand/%s", changes->srcdirectory, changes->source, changes->changesversion, e->basename); } else if (FE_SOURCE(e->type) || e->type == fe_LOG) { assert(changes->srcdirectory!=NULL); e->filekey = calc_dirconcat(changes->srcdirectory, e->basename); } else { char *directory; // TODO: make this in-situ? /* as the directory depends on the sourcename, it can be * different for every file... */ directory = calc_sourcedir(e->component, changes->source); if (FAILEDTOALLOC(directory)) return RET_ERROR_OOM; e->filekey = calc_dirconcat(directory, e->basename); free(directory); } if (FAILEDTOALLOC(e->filekey)) return RET_ERROR_OOM; /* do not copy yet, but only check if it could be included */ r = files_canadd(e->filekey, e->checksums); if (RET_WAS_ERROR(r)) return r; /* If is was already there, remember that */ if (r == RET_NOTHING) { e->wasalreadythere = true; } else { /* and if it needs inclusion check if there is a file */ char *fullfilename; assert(RET_IS_OK(r)); // TODO: add a --paranoid to also check md5sums before copying? fullfilename = calc_dirconcat( changes->incomingdirectory, e->basename); if (FAILEDTOALLOC(fullfilename)) return RET_ERROR_OOM; if (!isregularfile(fullfilename)) { fprintf(stderr, "Cannot find file '%s' needed by '%s'!\n", fullfilename, filename); free(fullfilename); return RET_ERROR_MISSING; } free(fullfilename); } } return RET_OK; } static retvalue changes_includefiles(struct changes *changes) { struct fileentry *e; retvalue r; r = RET_NOTHING; for (e = changes->files; e != NULL ; e = e->next) { assert (e->filekey != NULL); if (e->wasalreadythere && checksums_iscomplete(e->checksums)) continue; r = files_checkincludefile(changes->incomingdirectory, e->basename, e->filekey, &e->checksums); if (RET_WAS_ERROR(r)) return r; } return r; } /* delete the files included */ static retvalue changes_deleteleftoverfiles(struct changes *changes, int delete) { struct fileentry *e; retvalue result, r; if (delete < D_MOVE) return RET_OK; result = RET_OK; // TODO: we currently only see files included here, so D_DELETE // only affacts the .changes file. for (e = changes->files; e != NULL ; e = e->next) { char *fullorigfilename; if (delete < D_DELETE && e->filekey == NULL) continue; fullorigfilename = calc_dirconcat(changes->incomingdirectory, e->basename); if (unlink(fullorigfilename) != 0) { int err = errno; fprintf(stderr, "Error deleting '%s': %d=%s\n", fullorigfilename, err, strerror(err)); r = RET_ERRNO(err); RET_UPDATE(result, r); } free(fullorigfilename); } return result; } static retvalue changes_check_sourcefile(struct changes *changes, struct fileentry *dsc, const char *basefilename, const char *filekey, struct checksums **checksums_p) { retvalue r; r = files_expect(filekey, *checksums_p, false); if (RET_WAS_ERROR(r)) return r; // TODO: get additionals checksum out of database, as future // source file completion code might need them... if (RET_IS_OK(r)) return RET_OK; if (!IGNORABLE(missingfile)) { fprintf(stderr, "Unable to find %s needed by %s!\n" "Perhaps you forgot to give dpkg-buildpackage the -sa option,\n" " or you could try --ignore=missingfile to guess possible files to use.\n", filekey, dsc->basename); return RET_ERROR_MISSING; } fprintf(stderr, "Unable to find %s!\n" "Perhaps you forgot to give dpkg-buildpackage the -sa option.\n" "--ignore=missingfile was given, searching for file...\n", filekey); return files_checkincludefile(changes->incomingdirectory, basefilename, filekey, checksums_p); } static retvalue dsc_prepare(struct changes *changes, struct fileentry *dsc, struct distribution *distribution, const char *dscfilename){ retvalue r; const struct overridedata *oinfo; char *dscbasename; char *control; int i; bool broken; assert (dsc->section != NULL); assert (dsc->priority != NULL); assert (atom_defined(changes->srccomponent)); assert (dsc->basename != NULL); assert (dsc->checksums != NULL); assert (changes->source != NULL); assert (changes->sourceversion != NULL); /* First make sure this distribution has a source section at all, * for which it has to be listed in the "Architectures:"-field ;-) */ if (!atomlist_in(&distribution->architectures, architecture_source)) { fprintf(stderr, "Cannot put a source package into Distribution '%s' not having 'source' in its 'Architectures:'-field!\n", distribution->codename); /* nota bene: this cannot be forced or ignored, as no target has been created for this. */ return RET_ERROR; } /* Then take a closer look in the file: */ r = sources_readdsc(&dsc->pkg.dsc, dscfilename, dscfilename, &broken); if (RET_IS_OK(r) && broken && !IGNORING(brokensignatures, "'%s' contains only broken signatures.\n" "This most likely means the file was damaged or edited improperly\n", dscfilename)) r = RET_ERROR; if (RET_IS_OK(r)) r = propersourcename(dsc->pkg.dsc.name); if (RET_IS_OK(r)) r = properversion(dsc->pkg.dsc.version); if (RET_IS_OK(r)) r = properfilenames(&dsc->pkg.dsc.files.names); if (RET_WAS_ERROR(r)) return r; if (strcmp(changes->source, dsc->pkg.dsc.name) != 0) { /* This cannot be ignored, as too much depends on it yet */ fprintf(stderr, "'%s' says it is '%s', while .changes file said it is '%s'\n", dsc->basename, dsc->pkg.dsc.name, changes->source); return RET_ERROR; } if (strcmp(changes->sourceversion, dsc->pkg.dsc.version) != 0 && !IGNORING(wrongversion, "'%s' says it is version '%s', while .changes file said it is '%s'\n", dsc->basename, dsc->pkg.dsc.version, changes->sourceversion)) { return RET_ERROR; } oinfo = override_search(distribution->overrides.dsc, dsc->pkg.dsc.name); free(dsc->pkg.dsc.section); dsc->pkg.dsc.section = strdup(dsc->section); if (FAILEDTOALLOC(dsc->pkg.dsc.section)) return RET_ERROR_OOM; free(dsc->pkg.dsc.priority); dsc->pkg.dsc.priority = strdup(dsc->priority); if (FAILEDTOALLOC(dsc->pkg.dsc.priority)) return RET_ERROR_OOM; assert (dsc->pkg.dsc.name != NULL && dsc->pkg.dsc.version != NULL); /* Add the dsc file to the list of files in this source package: */ dscbasename = strdup(dsc->basename); if (FAILEDTOALLOC(dscbasename)) r = RET_ERROR_OOM; else r = checksumsarray_include(&dsc->pkg.dsc.files, dscbasename, dsc->checksums); if (RET_WAS_ERROR(r)) return r; /* Calculate the filekeys: */ r = calc_dirconcats(changes->srcdirectory, &dsc->pkg.dsc.files.names, &dsc->needed_filekeys); if (RET_WAS_ERROR(r)) return r; /* noone else might have looked yet, if we have them: */ assert (dsc->pkg.dsc.files.names.count == dsc->needed_filekeys.count); for (i = 1 ; i < dsc->pkg.dsc.files.names.count ; i ++) { if (!RET_WAS_ERROR(r)) { r = changes_check_sourcefile( changes, dsc, dsc->pkg.dsc.files.names.values[i], dsc->needed_filekeys.values[i], &dsc->pkg.dsc.files.checksums[i]); } } if (!RET_WAS_ERROR(r)) r = sources_complete(&dsc->pkg.dsc, changes->srcdirectory, oinfo, dsc->pkg.dsc.section, dsc->pkg.dsc.priority, &control); if (RET_IS_OK(r)) { free(dsc->pkg.dsc.control); dsc->pkg.dsc.control = control; } return r; } static retvalue changes_checkpkgs(struct distribution *distribution, struct changes *changes) { struct fileentry *e; retvalue r; r = RET_NOTHING; e = changes->files; while (e != NULL) { char *fullfilename; if (e->type != fe_DEB && e->type != fe_DSC && e->type != fe_UDEB) { e = e->next; continue; } fullfilename = files_calcfullfilename(e->filekey); if (FAILEDTOALLOC(fullfilename)) return RET_ERROR_OOM; if (e->type == fe_DEB) { r = deb_prepare(&e->pkg.deb, e->component, e->architecture_into, e->section, e->priority, pt_deb, distribution, fullfilename, e->filekey, e->checksums, &changes->binaries, changes->source, changes->sourceversion); } else if (e->type == fe_UDEB) { r = deb_prepare(&e->pkg.deb, e->component, e->architecture_into, e->section, e->priority, pt_udeb, distribution, fullfilename, e->filekey, e->checksums, &changes->binaries, changes->source, changes->sourceversion); } else if (e->type == fe_DSC) { if (!changes->isbinnmu || IGNORING(dscinbinnmu, "File '%s' looks like a source package, but this .changes looks like a binNMU\n" "(as '%s' (from Source:) and '%s' (From Version:) differ.)\n", e->filekey, changes->sourceversion, changes->changesversion)) { assert (atom_defined(changes->srccomponent)); assert (changes->srcdirectory!=NULL); r = dsc_prepare(changes, e, distribution, fullfilename); } else r = RET_ERROR; } free(fullfilename); if (RET_WAS_ERROR(r)) break; e = e->next; } return r; } static retvalue changes_includepkgs(struct distribution *distribution, struct changes *changes, /*@null@*/struct trackingdata *trackingdata, const struct atomlist *forcearchitectures, bool *missed_p) { struct fileentry *e; retvalue result, r; *missed_p = false; r = distribution_prepareforwriting(distribution); if (RET_WAS_ERROR(r)) return r; result = RET_NOTHING; e = changes->files; while (e != NULL) { if (e->type != fe_DEB && e->type != fe_DSC && e->type != fe_UDEB && e->type != fe_LOG && e->type != fe_BYHAND) { e = e->next; continue; } if (interrupted()) return RET_ERROR_INTERRUPTED; if (e->type == fe_DEB) { r = deb_addprepared(e->pkg.deb, /* architecture all needs this, the rest is * already filtered out */ (e->architecture_into == architecture_all)? forcearchitectures:NULL, pt_deb, distribution, trackingdata); if (r == RET_NOTHING) *missed_p = true; } else if (e->type == fe_UDEB) { r = deb_addprepared(e->pkg.deb, /* architecture all needs this, the rest is * already filtered out */ (e->architecture_into == architecture_all)? forcearchitectures:NULL, pt_udeb, distribution, trackingdata); if (r == RET_NOTHING) *missed_p = true; } else if (e->type == fe_DSC) { r = dsc_addprepared(&e->pkg.dsc, changes->srccomponent, &e->needed_filekeys, distribution, trackingdata); if (r == RET_NOTHING) *missed_p = true; } else if (e->type == fe_LOG && trackingdata != NULL) { r = trackedpackage_addfilekey(trackingdata->tracks, trackingdata->pkg, ft_LOG, e->filekey, false); e->filekey = NULL; } else if (e->type == fe_BYHAND && trackingdata != NULL) { r = trackedpackage_addfilekey(trackingdata->tracks, trackingdata->pkg, ft_XTRA_DATA, e->filekey, false); e->filekey = NULL; } RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; e = e->next; } logger_wait(); return result; } static void verifyarchitectures(const struct changes *changes, struct upload_conditions *conditions) { const struct fileentry *e; for (e = changes->files ; e != NULL ; e = e->next) { if (FE_SOURCE(e->type)) { if (!uploaders_verifyatom(conditions, architecture_source)) break; } else if (FE_BINARY(e->type)) { if (!uploaders_verifyatom(conditions, e->architecture_into)) break; } } } static void verifysection(const struct changes *changes, struct upload_conditions *conditions) { const struct fileentry *e; for (e = changes->files ; e != NULL ; e = e->next) { if (FE_SOURCE(e->type)) { if (!uploaders_verifystring(conditions, e->section)) break; } else if (FE_BINARY(e->type)) { if (!uploaders_verifystring(conditions, e->section)) break; } } } static void verifybinary(const struct changes *changes, struct upload_conditions *conditions) { const struct fileentry *e; for (e = changes->files ; e != NULL ; e = e->next) { if (FE_BINARY(e->type)) { if (!uploaders_verifystring(conditions, e->name)) break; } } } static void verifybyhands(const struct changes *changes, struct upload_conditions *conditions) { const struct fileentry *e; for (e = changes->files ; e != NULL ; e = e->next) { if (e->type == fe_BYHAND) { if (!uploaders_verifystring(conditions, e->name)) break; } } } static bool permissionssuffice(struct changes *changes, const struct distribution *into, struct upload_conditions *conditions) { do switch (uploaders_nextcondition(conditions)) { case uc_ACCEPTED: return true; case uc_REJECTED: return false; case uc_CODENAME: (void)uploaders_verifystring(conditions, into->codename); break; case uc_SOURCENAME: assert (changes->source != NULL); (void)uploaders_verifystring(conditions, changes->source); break; case uc_SECTIONS: verifysection(changes, conditions); break; case uc_BINARIES: verifybinary(changes, conditions); break; case uc_BYHAND: verifybyhands(changes, conditions); break; case uc_ARCHITECTURES: verifyarchitectures(changes, conditions); break; } while (true); } /* insert the given .changes into the mirror in the * if forcecomponent, forcesection or forcepriority is NULL * get it from the files or try to guess it. */ retvalue changes_add(trackingdb const tracks, const struct atomlist *packagetypes, component_t forcecomponent, const struct atomlist *forcearchitectures, const char *forcesection, const char *forcepriority, struct distribution *distribution, const char *changesfilename, int delete) { retvalue result, r; struct changes *changes; struct trackingdata trackingdata; bool somethingwasmissed; causingfile = changesfilename; r = changes_read(changesfilename, &changes, packagetypes, forcearchitectures, distribution->trackingoptions.includebyhand, distribution->trackingoptions.includelogs); if (RET_WAS_ERROR(r)) return r; if ((distribution->suite == NULL || !strlist_in(&changes->distributions, distribution->suite)) && !strlist_in(&changes->distributions, distribution->codename) && !strlist_intersects(&changes->distributions, &distribution->alsoaccept)) { if (!IGNORING(wrongdistribution, ".changes put in a distribution not listed within it!\n")) { changes_free(changes); return RET_ERROR; } } /* make sure caller has called distribution_loaduploaders */ assert (distribution->uploaders == NULL || distribution->uploaderslist != NULL); if (distribution->uploaderslist != NULL) { struct upload_conditions *conditions; r = uploaders_permissions(distribution->uploaderslist, changes->signatures, &conditions); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) { changes_free(changes); return r; } if (!permissionssuffice(changes, distribution, conditions) && !IGNORING(uploaders, "No rule allowing this package in found in %s!\n", distribution->uploaders)) { free(conditions); changes_free(changes); return RET_ERROR; } free(conditions); } /*look for component, section and priority to be correct or guess them*/ r = changes_fixfields(distribution, changesfilename, changes, forcecomponent, forcesection, forcepriority); /* do some tests if values are sensible */ if (!RET_WAS_ERROR(r)) r = changes_check(distribution, changesfilename, changes, forcearchitectures, packagetypes); if (interrupted()) RET_UPDATE(r, RET_ERROR_INTERRUPTED); if (!RET_WAS_ERROR(r)) r = changes_checkfiles(changesfilename, changes); if (interrupted()) RET_UPDATE(r, RET_ERROR_INTERRUPTED); /* add files in the pool */ if (!RET_WAS_ERROR(r)) r = changes_includefiles(changes); if (!RET_WAS_ERROR(r)) r = changes_checkpkgs(distribution, changes); if (RET_WAS_ERROR(r)) { changes_free(changes); return r; } if (tracks != NULL) { r = trackingdata_summon(tracks, changes->source, changes->sourceversion, &trackingdata); if (RET_WAS_ERROR(r)) { changes_free(changes); return r; } if (distribution->trackingoptions.includechanges) { char *basefilename; assert (changes->srcdirectory != NULL); basefilename = calc_changes_basename(changes->source, changes->changesversion, &changes->architectures); changes->changesfilekey = calc_dirconcat(changes->srcdirectory, basefilename); free(basefilename); if (FAILEDTOALLOC(changes->changesfilekey)) { changes_free(changes); trackingdata_done(&trackingdata); return RET_ERROR_OOM; } if (interrupted()) r = RET_ERROR_INTERRUPTED; else r = files_preinclude(changesfilename, changes->changesfilekey, NULL); if (RET_WAS_ERROR(r)) { changes_free(changes); trackingdata_done(&trackingdata); return r; } } } if (interrupted()) { if (tracks != NULL) trackingdata_done(&trackingdata); changes_free(changes); return RET_ERROR_INTERRUPTED; } /* add the source and binary packages in the given distribution */ result = changes_includepkgs(distribution, changes, (tracks!=NULL)?&trackingdata:NULL, forcearchitectures, &somethingwasmissed); if (RET_WAS_ERROR(result)) { if (tracks != NULL) { trackingdata_done(&trackingdata); } changes_free(changes); return result; } if (tracks != NULL) { if (changes->changesfilekey != NULL) { char *changesfilekey = strdup(changes->changesfilekey); assert (changes->srcdirectory != NULL); if (FAILEDTOALLOC(changesfilekey)) { trackingdata_done(&trackingdata); changes_free(changes); return RET_ERROR_OOM; } r = trackedpackage_addfilekey(tracks, trackingdata.pkg, ft_CHANGES, changesfilekey, false); RET_ENDUPDATE(result, r); } r = trackingdata_finish(tracks, &trackingdata); RET_ENDUPDATE(result, r); if (RET_WAS_ERROR(result)) { changes_free(changes); return result; } } /* if something was included, call --changes notify scripts */ if (RET_IS_OK(result)) { assert (logger_isprepared(distribution->logger)); logger_logchanges(distribution->logger, distribution->codename, changes->source, changes->changesversion, changes->control, changesfilename, changes->changesfilekey); } /* wait for notify scripts (including those for the packages) * before deleting the .changes */ logger_wait(); if ((delete >= D_MOVE && changes->changesfilekey != NULL) || delete >= D_DELETE) { if (somethingwasmissed && delete < D_DELETE) { if (verbose >= 0) { fprintf(stderr, "Not deleting '%s' as no package was added or some package was missed.\n" "(Use --delete --delete to delete anyway in such cases)\n", changesfilename); } } else { if (verbose >= 5) { printf("Deleting '%s'.\n", changesfilename); } if (unlink(changesfilename) != 0) { int e = errno; fprintf(stderr, "Error %d deleting '%s': %s\n", e, changesfilename, strerror(e)); } } } result = changes_deleteleftoverfiles(changes, delete); (void)changes_free(changes); return result; } reprepro-4.13.1/hooks.h0000644000175100017510000000077212152651661011670 00000000000000#ifndef REPREPRO_HOOKS_H #define REPREPRO_HOOKS_H #ifndef REPREPRO_ATOMS_H #include "atoms.h" #endif /* the command currently processed (may not changed till all loggers are run) */ extern command_t causingcommand; /* file causing the current actions (may change so may need to be saved for queued actions)*/ extern /*@null@*/ const char *causingfile; /* for other hooks */ void sethookenvironment(/*@null@*/const char *, /*@null@*/const char *, /*@null@*/const char *, /*@null@*/const char *); #endif reprepro-4.13.1/downloadcache.c0000644000175100017510000002033712152651661013332 00000000000000/* This file is part of "reprepro" * Copyright (C) 2004,2005,2007,2009 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include "error.h" #include "strlist.h" #include "names.h" #include "dirs.h" #include "files.h" #include "freespace.h" #include "downloadcache.h" struct downloaditem { /*@dependent@*//*@null@*/struct downloaditem *parent; /*@null@*/struct downloaditem *left, *right; char *filekey; struct checksums *checksums; bool done; }; /* Initialize a new download session */ retvalue downloadcache_initialize(enum spacecheckmode mode, off_t reserveddb, off_t reservedother, struct downloadcache **download) { struct downloadcache *cache; retvalue r; cache = zNEW(struct downloadcache); if (FAILEDTOALLOC(cache)) return RET_ERROR_OOM; r = space_prepare(&cache->devices, mode, reserveddb, reservedother); if (RET_WAS_ERROR(r)) { free(cache); return r; } *download = cache; return RET_OK; } /* free all memory */ static void freeitem(/*@null@*//*@only@*/struct downloaditem *item) { if (item == NULL) return; freeitem(item->left); freeitem(item->right); free(item->filekey); checksums_free(item->checksums); free(item); } retvalue downloadcache_free(struct downloadcache *download) { if (download == NULL) return RET_NOTHING; freeitem(download->items); space_free(download->devices); free(download); return RET_OK; } static retvalue downloaditem_callback(enum queue_action action, void *privdata, void *privdata2, const char *uri, const char *gotfilename, const char *wantedfilename, /*@null@*/const struct checksums *checksums, const char *method) { struct downloaditem *d = privdata; struct downloadcache *cache = privdata2; struct checksums *read_checksums = NULL; retvalue r; bool improves; if (action != qa_got) // TODO: instead store in downloaditem? return RET_ERROR; /* if the file is somewhere else, copy it: */ if (strcmp(gotfilename, wantedfilename) != 0) { if (verbose > 1) fprintf(stderr, "Linking file '%s' to '%s'...\n", gotfilename, wantedfilename); r = checksums_linkorcopyfile(wantedfilename, gotfilename, &read_checksums); if (r == RET_NOTHING) { fprintf(stderr, "Cannot open '%s', obtained from '%s' method.\n", gotfilename, method); r = RET_ERROR_MISSING; } if (RET_WAS_ERROR(r)) { // TODO: instead store in downloaditem? return r; } if (read_checksums != NULL) checksums = read_checksums; } if (checksums == NULL || !checksums_iscomplete(checksums)) { assert(read_checksums == NULL); r = checksums_read(wantedfilename, &read_checksums); if (r == RET_NOTHING) { fprintf(stderr, "Cannot open '%s', though '%s' method claims to have put it there!\n", wantedfilename, method); r = RET_ERROR_MISSING; } if (RET_WAS_ERROR(r)) { // TODO: instead store in downloaditem? return r; } checksums = read_checksums; } assert (checksums != NULL); if (!checksums_check(d->checksums, checksums, &improves)) { fprintf(stderr, "Wrong checksum during receive of '%s':\n", uri); checksums_printdifferences(stderr, d->checksums, checksums); checksums_free(read_checksums); (void)unlink(wantedfilename); // TODO: instead store in downloaditem? return RET_ERROR_WRONG_MD5; } if (improves) { r = checksums_combine(&d->checksums, checksums, NULL); checksums_free(read_checksums); if (RET_WAS_ERROR(r)) return r; } else checksums_free(read_checksums); if (global.showdownloadpercent > 0) { unsigned int percent; cache->size_done += checksums_getfilesize(d->checksums); percent = (100 * cache->size_done) / cache->size_todo; if (global.showdownloadpercent > 1 || percent > cache->last_percent) { unsigned long long all = cache->size_done; int kb, mb, gb, tb, b, groups = 0; cache->last_percent = percent; printf("Got %u%%: ", percent); b = all & 1023; all = all >> 10; kb = all & 1023; all = all >> 10; mb = all & 1023; all = all >> 10; gb = all & 1023; all = all >> 10; tb = all; if (tb != 0) { printf("%dT ", tb); groups++; } if (groups < 2 && (groups > 0 || gb != 0)) { printf("%dG ", gb); groups++; } if (groups < 2 && (groups > 0 || mb != 0)) { printf("%dM ", mb); groups++; } if (groups < 2 && (groups > 0 || kb != 0)) { printf("%dK ", kb); groups++; } if (groups < 2 && (groups > 0 || b != 0)) printf("%d ", b); puts("bytes"); } } r = files_add_checksums(d->filekey, d->checksums); if (RET_WAS_ERROR(r)) return r; d->done = true; return RET_OK; } /*@null@*//*@dependent@*/ static struct downloaditem *searchforitem(struct downloadcache *list, const char *filekey, /*@out@*/struct downloaditem **p, /*@out@*/struct downloaditem ***h) { struct downloaditem *item; int c; *h = &list->items; *p = NULL; item = list->items; while (item != NULL) { *p = item; c = strcmp(filekey, item->filekey); if (c == 0) return item; else if (c < 0) { *h = &item->left; item = item->left; } else { *h = &item->right; item = item->right; } } return NULL; } /* queue a new file to be downloaded: * results in RET_ERROR_WRONG_MD5, if someone else already asked * for the same destination with other md5sum created. */ retvalue downloadcache_add(struct downloadcache *cache, struct aptmethod *method, const char *orig, const char *filekey, const struct checksums *checksums) { struct downloaditem *i; struct downloaditem *item, **h, *parent; char *fullfilename; retvalue r; assert (cache != NULL && method != NULL); r = files_expect(filekey, checksums, false); if (r != RET_NOTHING) return r; i = searchforitem(cache, filekey, &parent, &h); if (i != NULL) { bool improves; assert (i->filekey != NULL); if (!checksums_check(i->checksums, checksums, &improves)) { fprintf(stderr, "ERROR: Same file is requested with conflicting checksums:\n"); checksums_printdifferences(stderr, i->checksums, checksums); return RET_ERROR_WRONG_MD5; } if (improves) { r = checksums_combine(&i->checksums, checksums, NULL); if (RET_WAS_ERROR(r)) return r; } return RET_NOTHING; } item = zNEW(struct downloaditem); if (FAILEDTOALLOC(item)) return RET_ERROR_OOM; item->done = false; item->filekey = strdup(filekey); item->checksums = checksums_dup(checksums); if (FAILEDTOALLOC(item->filekey) || FAILEDTOALLOC(item->checksums)) { freeitem(item); return RET_ERROR_OOM; } fullfilename = files_calcfullfilename(filekey); if (FAILEDTOALLOC(fullfilename)) { freeitem(item); return RET_ERROR_OOM; } (void)dirs_make_parent(fullfilename); r = space_needed(cache->devices, fullfilename, checksums); if (RET_WAS_ERROR(r)) { free(fullfilename); freeitem(item); return r; } r = aptmethod_enqueue(method, orig, fullfilename, downloaditem_callback, item, cache); if (RET_WAS_ERROR(r)) { freeitem(item); return r; } item->left = item->right = NULL; item->parent = parent; *h = item; cache->size_todo += checksums_getfilesize(item->checksums); return RET_OK; } /* some as above, only for more files... */ retvalue downloadcache_addfiles(struct downloadcache *cache, struct aptmethod *method, const struct checksumsarray *origfiles, const struct strlist *filekeys) { retvalue result, r; int i; assert (origfiles != NULL && filekeys != NULL && origfiles->names.count == filekeys->count); result = RET_NOTHING; for (i = 0 ; i < filekeys->count ; i++) { r = downloadcache_add(cache, method, origfiles->names.values[i], filekeys->values[i], origfiles->checksums[i]); RET_UPDATE(result, r); } return result; } reprepro-4.13.1/atoms.c0000644000175100017510000002232412152651661011660 00000000000000/* This file is part of "reprepro" * Copyright (C) 2003,2004,2005,2006,2007,2008,2009 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include "error.h" #include "mprintf.h" #include "strlist.h" #include "atoms.h" const char **atoms_architectures; const char **atoms_components; const char * const packagetypes[4] = { "!!NONE!!", "dsc", "deb", "udeb" }; const char **atoms_packagetypes = (const char **)&packagetypes; const char **atoms_commands; static int command_count; static const char * const types[4] = { "architecture", "component", "packagetype", "command" }; const char **atomtypes = (const char **)types; /* trivial implementation for now, perhaps make it more complicated later */ static struct strlist architectures, components; retvalue atoms_init(int count) { retvalue r; strlist_init(&architectures); strlist_init(&components); /* add a 0th entry to all, so 0 means uninitialized */ r = strlist_add_dup(&architectures, "!!NONE!!"); if (RET_WAS_ERROR(r)) return r; r = strlist_add_dup(&architectures, "source"); if (RET_WAS_ERROR(r)) return r; r = strlist_add_dup(&architectures, "all"); if (RET_WAS_ERROR(r)) return r; r = strlist_add_dup(&components, "!!NONE!!"); if (RET_WAS_ERROR(r)) return r; /* a fallback component to put things without a component in */ r = strlist_add_dup(&components, "strange"); if (RET_WAS_ERROR(r)) return r; atoms_components = (const char**)components.values; atoms_architectures = (const char**)architectures.values; command_count = count; if (command_count > 0) { atoms_commands = nzNEW(command_count + 1, const char*); if (FAILEDTOALLOC(atoms_commands)) return RET_ERROR_OOM; } return RET_OK; } retvalue architecture_intern(const char *value, architecture_t *atom_p) { retvalue r; int i; i = strlist_ofs(&architectures, value); if (i >= 0) { *atom_p = (architecture_t)i; return RET_OK; } i = architectures.count; r = strlist_add_dup(&architectures, value); atoms_architectures = (const char**)architectures.values; if (RET_IS_OK(r)) { *atom_p = (architecture_t)i; return RET_OK; } else return r; } retvalue component_intern(const char *value, component_t *atom_p) { retvalue r; int i; i = strlist_ofs(&components, value); if (i >= 0) { *atom_p = (component_t)i; return RET_OK; } i = components.count; r = strlist_add_dup(&components, value); atoms_components = (const char**)components.values; if (RET_IS_OK(r)) { *atom_p = (component_t)i; return RET_OK; } else return r; } architecture_t architecture_find(const char *value) { int i = strlist_ofs(&architectures, value); if (i < 0) return atom_unknown; else return (architecture_t)i; } architecture_t architecture_find_l(const char *value, size_t l) { architecture_t a; for (a = architectures.count - 1 ; a > 0 ; a--) { const char *name = atoms_architectures[a]; size_t len = strlen(name); if (len == l && memcmp(name, value, len) == 0) return a; } return atom_unknown; } // TODO: this might be called a lot, perhaps optimize it... component_t component_find_l(const char *value, size_t l) { component_t a; for (a = components.count - 1 ; a > 0 ; a--) { const char *name = atoms_components[a]; size_t len = strlen(name); if (len == l && memcmp(name, value, len) == 0) return a; } return atom_unknown; } component_t component_find(const char *value) { int i = strlist_ofs(&components, value); if (i < 0) return atom_unknown; else return (architecture_t)i; } packagetype_t packagetype_find(const char *value) { if (strcmp(value, "dsc") == 0) return pt_dsc; else if (strcmp(value, "deb") == 0) return pt_deb; else if (strcmp(value, "udeb") == 0) return pt_udeb; else return atom_unknown; } packagetype_t packagetype_find_l(const char *value, size_t len) { if (len == 3) { if (strncmp(value, "dsc", 3) == 0) return pt_dsc; else if (strncmp(value, "deb", 3) == 0) return pt_deb; } else if (len == 4 && strncmp(value, "udeb", 4) == 0) return pt_udeb; return atom_unknown; } static inline command_t command_find(const char *value) { command_t c; for (c = command_count ; c > 0 ; c--) { if (strcmp(atoms_commands[c], value) == 0) return c; } return atom_unknown; } atom_t atom_find(enum atom_type type, const char *value) { switch (type) { case at_packagetype: return packagetype_find(value); case at_architecture: return architecture_find(value); case at_component: return component_find(value); case at_command: return command_find(value); default: return atom_unknown; } } retvalue atom_intern(enum atom_type type, const char *value, atom_t *atom_p) { assert (type == at_architecture || type == at_component); switch (type) { case at_architecture: return architecture_intern(value, atom_p); case at_component: return component_intern(value, atom_p); default: return RET_ERROR; } } void atomlist_init(struct atomlist *list) { list->count = 0; list->size = 0; list->atoms = 0; } void atomlist_done(struct atomlist *list) { if (list->size > 0) { assert (list->atoms != 0); free(list->atoms); } /* reset atoms but not size, so reuse can be catched */ list->atoms = NULL; } /* add a atom uniquely (perhaps sorted), RET_NOTHING when already there */ retvalue atomlist_add_uniq(struct atomlist *list, atom_t atom) { int i; atom_t *n; assert (atom_defined(atom)); for (i = 0 ; i < list->count ; i++) { if (list->atoms[i] == atom) return RET_NOTHING; } if (list->size <= list->count) { n = realloc(list->atoms, (sizeof(atom_t))*(list->count + 8)); if (FAILEDTOALLOC(n)) return RET_ERROR_OOM; list->size = list->count + 8; list->atoms = n; } list->atoms[list->count++] = atom; return RET_OK; } retvalue atomlist_add(struct atomlist *list, atom_t atom) { atom_t *n; assert (atom_defined(atom)); if (list->size <= list->count) { n = realloc(list->atoms, (sizeof(atom_t))*(list->count + 8)); if (FAILEDTOALLOC(n)) return RET_ERROR_OOM; list->size = list->count + 8; list->atoms = n; } list->atoms[list->count++] = atom; return RET_OK; } /* replace the contents of dest with those from orig, which get emptied */ void atomlist_move(struct atomlist *dest, struct atomlist *orig) { dest->atoms = orig->atoms; dest->count = orig->count; dest->size = orig->size; /* reset atoms but not size, so reuse can be catched */ orig->atoms = NULL; } bool atomlist_hasexcept(const struct atomlist *list, atom_t atom) { int i; for (i = 0 ; i < list->count ; i++) { if (list->atoms[i] != atom) return true; } return false; } bool atomlist_in(const struct atomlist *list, atom_t atom) { int i; for (i = 0 ; i < list->count ; i++) { if (list->atoms[i] == atom) return true; } return false; } int atomlist_ofs(const struct atomlist *list, atom_t atom) { int i; for (i = 0 ; i < list->count ; i++) { if (list->atoms[i] == atom) return i; } return -1; } bool atomlist_subset(const struct atomlist *list, const struct atomlist *subset, atom_t *missing) { int i, j; for (j = 0 ; j < subset->count ; j++) { atom_t atom = subset->atoms[j]; for (i = 0 ; i < list->count ; i++) { if (list->atoms[i] == atom) break; } if (i >= list->count) { if (missing != NULL) *missing = atom; return false; } } return true; } retvalue atomlist_fprint(FILE *file, enum atom_type type, const struct atomlist *list) { const char **atoms = NULL; int c; atom_t *p; retvalue result; assert(list != NULL); assert(file != NULL); switch (type) { case at_architecture: atoms = atoms_architectures; break; case at_component: atoms = atoms_components; break; case at_packagetype: atoms = atoms_packagetypes; break; case at_command: atoms = atoms_commands; break; } assert(atoms != NULL); c = list->count; p = list->atoms; result = RET_OK; while (c > 0) { if (fputs(atoms[*(p++)], file) == EOF) result = RET_ERROR; if (--c > 0 && fputc(' ', file) == EOF) result = RET_ERROR; } return result; } component_t components_count(void) { return components.count; } retvalue atomlist_filllist(enum atom_type type, struct atomlist *list, char *string, const char **missing) { struct atomlist l; char *e; retvalue r; atom_t a; atomlist_init(&l); while (*string != '\0') { e = strchr(string, '|'); if (e == NULL) e = strchr(string, '\0'); else *(e++) = '\0'; a = atom_find(type, string); if (!atom_defined(a)) { atomlist_done(&l); *missing = string; return RET_NOTHING; } r = atomlist_add(&l, a); if (RET_WAS_ERROR(r)) { atomlist_done(&l); return r; } string = e; } atomlist_move(list, &l); return RET_OK; } reprepro-4.13.1/donefile.c0000644000175100017510000001423712152651661012326 00000000000000/* This file is part of "reprepro" * Copyright (C) 2008 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include "donefile.h" #include "names.h" #include "checksums.h" #include "remoterepository.h" /* This stores what an distribution that is updated from remote repositories * has already processed, so that things already processed do not have to be * downloaded or processed again. */ struct markdonefile { char *finalfilename; char *tempfilename; FILE *file; }; static inline char *donefilename(const char *codename) { return genlistsfilename("lastseen", 2, "", codename, NULL); } retvalue markdone_create(const char *codename, struct markdonefile **done_p) { struct markdonefile *done; done = NEW(struct markdonefile); if (FAILEDTOALLOC(done)) return RET_ERROR_OOM; done->finalfilename = donefilename(codename); if (FAILEDTOALLOC(done->finalfilename)) { free(done); return RET_ERROR_OOM; } done->tempfilename = calc_addsuffix(done->finalfilename, "new"); if (FAILEDTOALLOC(done->tempfilename)) { free(done->finalfilename); free(done); return RET_ERROR_OOM; } done->file = fopen(done->tempfilename, "w+"); if (done->file == NULL) { int e = errno; fprintf(stderr, "Error %d creating '%s': %s\n", e, done->tempfilename, strerror(e)); free(done->finalfilename); free(done->tempfilename); free(done); return RET_ERROR; } fprintf(done->file, "Updates already processed for %s:\n", codename); *done_p = done; return RET_OK; } void markdone_finish(struct markdonefile *done) { bool error = false; if (done == NULL) return; if (done->file == NULL) error = true; else { if (ferror(done->file) != 0) { fprintf(stderr, "An error occured writing to '%s'!\n", done->tempfilename); (void)fclose(done->file); error = true; } else if (fclose(done->file) != 0) { int e = errno; fprintf(stderr, "Error %d occured writing to '%s': %s!\n", e, done->tempfilename, strerror(e)); error = true; } done->file = NULL; } if (error) (void)unlink(done->tempfilename); else { int i; i = rename(done->tempfilename, done->finalfilename); if (i != 0) { int e = errno; fprintf(stderr, "Error %d moving '%s' to '%s': %s!\n", e, done->tempfilename, done->finalfilename, strerror(e)); } } free(done->finalfilename); free(done->tempfilename); free(done); } void markdone_target(struct markdonefile *done, const char *identifier) { fprintf(done->file, "Target %s\n", identifier); } void markdone_index(struct markdonefile *done, const char *file, const struct checksums *checksums) { retvalue r; size_t s; const char *data; r = checksums_getcombined(checksums, &data, &s); if (!RET_IS_OK(r)) return; fprintf(done->file, "Index %s %s\n", file, data); } void markdone_cleaner(struct markdonefile *done) { fprintf(done->file, "Delete\n"); } /* the same for reading */ struct donefile { char *filename; char *linebuffer; size_t linebuffer_size; FILE *file; }; retvalue donefile_open(const char *codename, struct donefile **done_p) { struct donefile *done; ssize_t s; done = zNEW(struct donefile); if (FAILEDTOALLOC(done)) return RET_ERROR_OOM; done->filename = donefilename(codename); if (FAILEDTOALLOC(done->filename)) { free(done); return RET_ERROR_OOM; } done->file = fopen(done->filename, "r"); if (done->file == NULL) { donefile_close(done); return RET_NOTHING; } s = getline(&done->linebuffer, &done->linebuffer_size, done->file); if (s <= 0 || done->linebuffer[s-1] != '\n') { /* if it cannot be read or is empty or not a text file, * delete it, and do as if it never existed... */ unlink(done->filename); donefile_close(done); return RET_NOTHING; } done->linebuffer[s-1] = '\0'; // TODO: check the first line? *done_p = done; return RET_OK; } void donefile_close(struct donefile *done) { if (done == NULL) return; // TODO: check return, only print a warning, though, // no need to interrupt anything. if (done->file != NULL) fclose(done->file); free(done->linebuffer); free(done->filename); free(done); } retvalue donefile_nexttarget(struct donefile *done, const char **identifier_p) { ssize_t s; while (strncmp(done->linebuffer, "Target ", 7) != 0) { s = getline(&done->linebuffer, &done->linebuffer_size, done->file); if (s <= 0 || done->linebuffer[s-1] != '\n') /* Malformed line, ignore the rest... */ return RET_NOTHING; done->linebuffer[s-1] = '\0'; } /* do not process a second time */ done->linebuffer[0] = '\0'; /* and return the identifier part */ *identifier_p = done->linebuffer + 7; return RET_OK; } bool donefile_nextindex(struct donefile *done, const char **filename_p, struct checksums **checksums_p) { char *p; ssize_t s; retvalue r; s = getline(&done->linebuffer, &done->linebuffer_size, done->file); if (s <= 0 || done->linebuffer[s-1] != '\n') { done->linebuffer[0] = '\0'; return false; } done->linebuffer[s-1] = '\0'; if (strncmp(done->linebuffer, "Index ", 6) != 0) return false; p = done->linebuffer + 6; *filename_p = p; p = strchr(p, ' '); if (p == NULL) return false; *(p++) = '\0'; r = checksums_parse(checksums_p, p); return RET_IS_OK(r); } bool donefile_iscleaner(struct donefile *done) { ssize_t s; s = getline(&done->linebuffer, &done->linebuffer_size, done->file); if (s <= 0 || done->linebuffer[s-1] != '\n') { done->linebuffer[0] = '\0'; return false; } done->linebuffer[s-1] = '\0'; return strcmp(done->linebuffer, "Delete") == 0; } reprepro-4.13.1/checksums.c0000644000175100017510000011636112152651661012527 00000000000000/* This file is part of "reprepro" * Copyright (C) 2006,2007,2008,2009 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #define CHECKSUMS_CONTEXT visible #include "error.h" #include "mprintf.h" #include "checksums.h" #include "filecntl.h" #include "names.h" #include "dirs.h" #include "configparser.h" const char * const changes_checksum_names[] = { "Files", "Checksums-Sha1", "Checksums-Sha256" }; const char * const source_checksum_names[] = { "Files", "Checksums-Sha1", "Checksums-Sha256" }; const char * const release_checksum_names[cs_hashCOUNT] = { "MD5Sum", "SHA1", "SHA256" }; /* The internal representation of a checksum, as written to the databases, * is \(:[1-9a-z]:[^ ]\+ \)*[0-9a-fA-F]\+ [0-9]\+ * first some hashes, whose type is determined by a single character * (also yet unknown hashes are supported and should be preserved, but are * not generated) * after that the md5sum and finaly the size in dezimal representation. * * Checksums are parsed and stored in a structure for fast access of their * known parts: */ #ifdef SPLINT typedef size_t hashlen_t; #else typedef unsigned short hashlen_t; #endif struct checksums { struct { unsigned short ofs; hashlen_t len; } parts[cs_COUNT]; char representation[]; }; #define checksums_hashpart(c, t) ((c)->representation + (c)->parts[t].ofs) #define checksums_totallength(c) ((c)->parts[cs_length].ofs + (c)->parts[cs_length].len) static const char * const hash_name[cs_COUNT] = { "md5", "sha1", "sha256", "size" }; void checksums_free(struct checksums *checksums) { free(checksums); } retvalue checksums_init(/*@out@*/struct checksums **checksums_p, char *hashes[cs_COUNT]) { const char *p, *size; char *d; struct checksums *n; enum checksumtype type; size_t len, hashlens[cs_COUNT]; /* everything assumes yet that this is available */ if (hashes[cs_length] == NULL) { for (type = cs_md5sum ; type < cs_COUNT ; type++) free(hashes[type]); *checksums_p = NULL; return RET_OK; } size = hashes[cs_length]; while (*size == '0' && size[1] >= '0' && size[1] <= '9') size++; if (hashes[cs_md5sum] == NULL) hashlens[cs_md5sum] = 1; else hashlens[cs_md5sum] = strlen(hashes[cs_md5sum]); hashlens[cs_length] = strlen(size); len = hashlens[cs_md5sum] + 1 + hashlens[cs_length]; p = hashes[cs_md5sum]; if (p != NULL) { while ((*p >= '0' && *p <= '9') || (*p >= 'a' && *p <= 'f') || (*p >= 'A' && *p <= 'F')) p++; if (*p != '\0') { // TODO: find way to give more meaningfull error message fprintf(stderr, "Invalid md5 hash: '%s'\n", hashes[cs_md5sum]); for (type = cs_md5sum ; type < cs_COUNT ; type++) free(hashes[type]); return RET_ERROR; } } p = size; while ((*p >= '0' && *p <= '9')) p++; if (*p != '\0') { // TODO: find way to give more meaningfull error message fprintf(stderr, "Invalid size: '%s'\n", size); for (type = cs_md5sum ; type < cs_COUNT ; type++) free(hashes[type]); return RET_ERROR; } for (type = cs_firstEXTENDED ; type < cs_hashCOUNT ; type++) { if (hashes[type] == NULL) continue; p = hashes[type]; while ((*p >= '0' && *p <= '9') || (*p >= 'a' && *p <= 'f') || (*p >= 'A' && *p <= 'F')) p++; if (*p != '\0') { // TODO: find way to give more meaningfull error message fprintf(stderr, "Invalid hash: '%s'\n", hashes[type]); for (type = cs_md5sum ; type < cs_COUNT ; type++) free(hashes[type]); return RET_ERROR; } hashlens[type] = (size_t)(p - hashes[type]); len += strlen(" :x:") + hashlens[type]; } n = malloc(sizeof(struct checksums) + len + 1); if (FAILEDTOALLOC(n)) { for (type = cs_md5sum ; type < cs_COUNT ; type++) free(hashes[type]); return RET_ERROR_OOM; } setzero(struct checksums, n); d = n->representation; for (type = cs_firstEXTENDED ; type < cs_hashCOUNT ; type++) { if (hashes[type] == NULL) continue; *(d++) = ':'; *(d++) = '1' + (char)(type - cs_sha1sum); *(d++) = ':'; n->parts[type].ofs = d - n->representation; n->parts[type].len = (hashlen_t)hashlens[type]; memcpy(d, hashes[type], hashlens[type]); d += hashlens[type]; *(d++) = ' '; } if (hashes[cs_md5sum] == NULL) { n->parts[cs_md5sum].ofs = d - n->representation; n->parts[cs_md5sum].len = 0; *(d++) = '-'; } else { n->parts[cs_md5sum].ofs = d - n->representation; n->parts[cs_md5sum].len = (hashlen_t)hashlens[cs_md5sum]; memcpy(d, hashes[cs_md5sum], hashlens[cs_md5sum]); d += hashlens[cs_md5sum]; } *(d++) = ' '; n->parts[cs_length].ofs = d - n->representation; n->parts[cs_length].len = (hashlen_t)hashlens[cs_length]; memcpy(d, size, hashlens[cs_length] + 1); d += hashlens[cs_length] + 1; assert ((size_t)(d-n->representation) == len + 1); for (type = cs_md5sum ; type < cs_COUNT ; type++) free(hashes[type]); *checksums_p = n; return RET_OK; } retvalue checksums_initialize(struct checksums **checksums_p, const struct hash_data *hashes) { char *d; struct checksums *n; enum checksumtype type; size_t len; /* everything assumes that this is available */ if (hashes[cs_length].start == NULL) { assert (0 == 1); *checksums_p = NULL; return RET_ERROR; } len = hashes[cs_md5sum].len + 1 + hashes[cs_length].len; if (hashes[cs_md5sum].start == NULL) { assert(hashes[cs_md5sum].len == 0); len++; } for (type = cs_firstEXTENDED ; type < cs_hashCOUNT ; type++) { if (hashes[type].start == NULL) continue; len += strlen(" :x:") + hashes[type].len; } n = malloc(sizeof(struct checksums) + len + 1); if (FAILEDTOALLOC(n)) return RET_ERROR_OOM; setzero(struct checksums, n); d = n->representation; for (type = cs_firstEXTENDED ; type < cs_hashCOUNT ; type++) { if (hashes[type].start == NULL) continue; *(d++) = ':'; *(d++) = '1' + (char)(type - cs_firstEXTENDED); *(d++) = ':'; n->parts[type].ofs = d - n->representation; n->parts[type].len = (hashlen_t)hashes[type].len; memcpy(d, hashes[type].start, hashes[type].len); d += hashes[type].len; *(d++) = ' '; } if (hashes[cs_md5sum].start == NULL) { n->parts[cs_md5sum].ofs = d - n->representation; n->parts[cs_md5sum].len = 0; *(d++) = '-'; } else { n->parts[cs_md5sum].ofs = d - n->representation; n->parts[cs_md5sum].len = (hashlen_t)hashes[cs_md5sum].len; memcpy(d, hashes[cs_md5sum].start, hashes[cs_md5sum].len); d += hashes[cs_md5sum].len; } *(d++) = ' '; n->parts[cs_length].ofs = d - n->representation; n->parts[cs_length].len = (hashlen_t)hashes[cs_length].len; memcpy(d, hashes[cs_length].start, hashes[cs_length].len); d += hashes[cs_length].len; *(d++) = '\0'; assert ((size_t)(d-n->representation) == len + 1); *checksums_p = n; return RET_OK; } retvalue checksums_setall(/*@out@*/struct checksums **checksums_p, const char *combinedchecksum, UNUSED(size_t len)) { // This comes from our database, so it surely well formed // (as alreadyassumed above), so this should be possible to // do faster than that... return checksums_parse(checksums_p, combinedchecksum); } retvalue checksums_parse(struct checksums **checksums_p, const char *combinedchecksum) { struct checksums *n; size_t len = strlen(combinedchecksum); const char *p = combinedchecksum; /*@dependent@*/char *d; char type; /*@dependent@*/const char *start; n = malloc(sizeof(struct checksums) + len + 1); if (FAILEDTOALLOC(n)) return RET_ERROR_OOM; setzero(struct checksums, n); d = n->representation; while (*p == ':') { p++; if (p[0] == '\0' || p[1] != ':') { // TODO: how to get some context in this? fprintf(stderr, "Malformed checksums representation: '%s'!\n", combinedchecksum); free(n); return RET_ERROR; } type = p[0]; p += 2; *(d++) = ':'; *(d++) = type; *(d++) = ':'; if (type == '1') { start = d; n->parts[cs_sha1sum].ofs = d - n->representation; while (*p != ' ' && *p != '\0') *(d++) = *(p++); n->parts[cs_sha1sum].len = (hashlen_t)(d - start); } else if (type == '2') { start = d; n->parts[cs_sha256sum].ofs = d - n->representation; while (*p != ' ' && *p != '\0') *(d++) = *(p++); n->parts[cs_sha256sum].len = (hashlen_t)(d - start); } else { while (*p != ' ' && *p != '\0') *(d++) = *(p++); } *(d++) = ' '; while (*p == ' ') p++; } n->parts[cs_md5sum].ofs = d - n->representation; start = d; if (*p == '-' && p[1] == ' ') { p++; *(d++) = '-'; start = d; } else while (*p != ' ' && *p != '\0') { if ((*p >= '0' && *p <= '9') || (*p >= 'a' && *p <= 'f')) { *(d++) = *(p++); } else if (*p >= 'A' && *p <= 'F') { *(d++) = *(p++) + ('a' - 'A'); } else { // TODO: how to get some context in this? fprintf(stderr, "Malformed checksums representation (invalid md5sum): '%s'!\n", combinedchecksum); free(n); return RET_ERROR; } } n->parts[cs_md5sum].len = (hashlen_t)(d - start); *(d++) = ' '; while (*p == ' ') p++; n->parts[cs_length].ofs = d - n->representation; while (*p == '0' && (p[1] >= '0' && p[1] <= '9')) p++; start = d; while (*p != '\0') { if (*p >= '0' && *p <= '9') { *(d++) = *(p++); } else { // TODO: how to get some context in this? fprintf(stderr, "Malformed checksums representation (invalid size): '%s'!\n", combinedchecksum); free(n); return RET_ERROR; } } n->parts[cs_length].len = (hashlen_t)(d - start); if (d == start) { // TODO: how to get some context in this? fprintf(stderr, "Malformed checksums representation (no size): '%s'!\n", combinedchecksum); free(n); return RET_ERROR; } *d = '\0'; assert ((size_t)(d - n->representation) <= len); *checksums_p = n; return RET_OK; } struct checksums *checksums_dup(const struct checksums *checksums) { struct checksums *n; size_t len; assert (checksums != NULL); len = checksums_totallength(checksums); assert (checksums->representation[len] == '\0'); n = malloc(sizeof(struct checksums) + len + 1); if (FAILEDTOALLOC(n)) return NULL; memcpy(n, checksums, sizeof(struct checksums) + len + 1); assert (n->representation[len] == '\0'); return n; } bool checksums_getpart(const struct checksums *checksums, enum checksumtype type, const char **sum_p, size_t *size_p) { assert (type < cs_COUNT); if (checksums->parts[type].len == 0) return false; *size_p = checksums->parts[type].len; *sum_p = checksums_hashpart(checksums, type); return true; } bool checksums_gethashpart(const struct checksums *checksums, enum checksumtype type, const char **hash_p, size_t *hashlen_p, const char **size_p, size_t *sizelen_p) { assert (type < cs_hashCOUNT); if (checksums->parts[type].len == 0) return false; *hashlen_p = checksums->parts[type].len; *hash_p = checksums_hashpart(checksums, type); *sizelen_p = checksums->parts[cs_length].len; *size_p = checksums_hashpart(checksums, cs_length); return true; } retvalue checksums_getcombined(const struct checksums *checksums, /*@out@*/const char **data_p, /*@out@*/size_t *datalen_p) { size_t len; assert (checksums != NULL); len = checksums->parts[cs_length].ofs + checksums->parts[cs_length].len; assert (checksums->representation[len] == '\0'); *data_p = checksums->representation; *datalen_p = len; return RET_OK; } off_t checksums_getfilesize(const struct checksums *checksums) { const char *p = checksums_hashpart(checksums, cs_length); off_t filesize; filesize = 0; while (*p <= '9' && *p >= '0') { filesize = filesize*10 + (size_t)(*p-'0'); p++; } assert (*p == '\0'); return filesize; } bool checksums_matches(const struct checksums *checksums, enum checksumtype type, const char *sum) { size_t len = (size_t)checksums->parts[type].len; assert (type < cs_hashCOUNT); if (len == 0) return true; if (strncmp(sum, checksums_hashpart(checksums, type), len) != 0) return false; if (sum[len] != ' ') return false; /* assuming count is the last part: */ if (strncmp(sum + len + 1, checksums_hashpart(checksums, cs_length), checksums->parts[cs_length].len + 1) != 0) return false; return true; } static inline bool differ(const struct checksums *a, const struct checksums *b, enum checksumtype type) { if (a->parts[type].len == 0 || b->parts[type].len == 0) return false; if (a->parts[type].len != b->parts[type].len) return true; return memcmp(checksums_hashpart(a, type), checksums_hashpart(b, type), a->parts[type].len) != 0; } bool checksums_check(const struct checksums *checksums, const struct checksums *realchecksums, bool *improves) { enum checksumtype type; bool additional = false; for (type = cs_md5sum ; type < cs_COUNT ; type++) { if (differ(checksums, realchecksums, type)) return false; if (checksums->parts[type].len == 0 && realchecksums->parts[type].len != 0) additional = true; } if (improves != NULL) *improves = additional; return true; } void checksums_printdifferences(FILE *f, const struct checksums *expected, const struct checksums *got) { enum checksumtype type; for (type = cs_md5sum ; type < cs_COUNT ; type++) { if (differ(expected, got, type)) { fprintf(f, "%s expected: %.*s, got: %.*s\n", hash_name[type], (int)expected->parts[type].len, checksums_hashpart(expected, type), (int)got->parts[type].len, checksums_hashpart(got, type)); } } } retvalue checksums_combine(struct checksums **checksums_p, const struct checksums *by, bool *improvedhashes) /*@requires only *checksums_p @*/ /*@ensures only *checksums_p @*/ { struct checksums *old = *checksums_p, *n; size_t len = checksums_totallength(old) + checksums_totallength(by); const char *o, *b, *start; char /*@dependent@*/ *d; char typeid; n = malloc(sizeof(struct checksums)+ len + 1); if (FAILEDTOALLOC(n)) return RET_ERROR_OOM; setzero(struct checksums, n); o = old->representation; b = by->representation; d = n->representation; while (*o == ':' || *b == ':') { if (b[0] != ':' || (o[0] == ':' && o[1] <= b[1])) { *(d++) = *(o++); typeid = *o; *(d++) = *(o++); *(d++) = *(o++); if (typeid == '1') { start = d; n->parts[cs_sha1sum].ofs = d - n->representation; while (*o != ' ' && *o != '\0') *(d++) = *(o++); n->parts[cs_sha1sum].len = (hashlen_t)(d - start); } else if (typeid == '2') { start = d; n->parts[cs_sha256sum].ofs = d - n->representation; while (*o != ' ' && *o != '\0') *(d++) = *(o++); n->parts[cs_sha256sum].len = (hashlen_t)(d - start); } else while (*o != ' ' && *o != '\0') *(d++) = *(o++); assert (*o == ' '); if (*o == ' ') *(d++) = *(o++); if (b[0] == ':' && typeid == b[1]) { while (*b != ' ' && *b != '\0') b++; assert (*b == ' '); if (*b == ' ') b++; } } else { *(d++) = *(b++); typeid = *b; *(d++) = *(b++); *(d++) = *(b++); if (typeid == '1') { if (improvedhashes != NULL) improvedhashes[cs_sha1sum] = true; start = d; n->parts[cs_sha1sum].ofs = d - n->representation; while (*b != ' ' && *b != '\0') *(d++) = *(b++); n->parts[cs_sha1sum].len = (hashlen_t)(d - start); } else if (typeid == '2') { if (improvedhashes != NULL) improvedhashes[cs_sha256sum] = true; start = d; n->parts[cs_sha256sum].ofs = d - n->representation; while (*b != ' ' && *b != '\0') *(d++) = *(b++); n->parts[cs_sha256sum].len = (hashlen_t)(d - start); } else while (*b != ' ' && *b != '\0') *(d++) = *(b++); assert (*b == ' '); if (*b == ' ') *(d++) = *(b++); } } /* now take md5sum from original code, unless only the new one has it */ n->parts[cs_md5sum].ofs = d - n->representation; start = d; if (*o == '-' && *b != '-') o = b; while (*o != ' ' && *o != '\0') *(d++) = *(o++); n->parts[cs_md5sum].len = (hashlen_t)(d - start); assert (*o == ' '); if (*o == ' ') *(d++) = *(o++); /* and now the size */ n->parts[cs_length].ofs = d - n->representation; start = d; while (*o != '\0') *(d++) = *(o++); n->parts[cs_length].len = (hashlen_t)(d - start); assert ((size_t)(d - n->representation) <= len); *(d++) = '\0'; *checksums_p = realloc(n, sizeof(struct checksums) + (d-n->representation)); if (*checksums_p == NULL) *checksums_p = n; checksums_free(old); return RET_OK; } void checksumsarray_done(struct checksumsarray *array) { if (array->names.count > 0) { int i; assert (array->checksums != NULL); for (i = 0 ; i < array->names.count ; i++) { checksums_free(array->checksums[i]); } } else assert (array->checksums == NULL); strlist_done(&array->names); free(array->checksums); } retvalue hashline_parse(const char *filenametoshow, const char *line, enum checksumtype cs, const char **basename_p, struct hash_data *data_p, struct hash_data *size_p) { const char *p = line; const char *hash_start, *size_start, *filename; size_t hash_len, size_len; while (*p == ' ' || *p == '\t') p++; hash_start = p; while ((*p >= '0' && *p <= '9') || (*p >= 'a' && *p <= 'f')) p++; hash_len = p - hash_start; while (*p == ' ' || *p == '\t') p++; while (*p == '0' && p[1] >= '0' && p[1] <= '9') p++; size_start = p; while ((*p >= '0' && *p <= '9')) p++; size_len = p - size_start; while (*p == ' ' || *p == '\t') p++; filename = p; while (*p != '\0' && *p != ' ' && *p != '\t' && *p != '\r' && *p != '\n') p++; if (unlikely(size_len == 0 || hash_len == 0 || filename == p || *p != '\0')) { fprintf(stderr, "Error parsing %s checksum line ' %s' within '%s'\n", hash_name[cs], line, filenametoshow); return RET_ERROR; } *basename_p = filename; data_p->start = hash_start; data_p->len = hash_len; size_p->start = size_start; size_p->len = size_len; return RET_OK; } retvalue checksumsarray_parse(struct checksumsarray *out, const struct strlist l[cs_hashCOUNT], const char *filenametoshow) { retvalue r; int i; struct checksumsarray a; struct strlist filenames; size_t count; bool foundhashtype[cs_hashCOUNT]; struct hashes *parsed; enum checksumtype cs; memset(foundhashtype, 0, sizeof(foundhashtype)); /* avoid realloc by allocing the absolute maximum only * if every checksum field contains different files */ count = 0; for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) { count += l[cs].count; } parsed = nzNEW(count, struct hashes); if (FAILEDTOALLOC(parsed)) return RET_ERROR_OOM; strlist_init_n(count + 1, &filenames); for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) { for (i = 0 ; i < l[cs].count ; i++) { const char *line = l[cs].values[i]; const char *p = line, *hash_start, *size_start, *filename; size_t hash_len, size_len; int fileofs; while (*p == ' ' || *p == '\t') p++; hash_start = p; while ((*p >= '0' && *p <= '9') || (*p >= 'a' && *p <= 'f')) p++; hash_len = p - hash_start; while (*p == ' ' || *p == '\t') p++; while (*p == '0' && p[1] >= '0' && p[1] <= '9') p++; size_start = p; while ((*p >= '0' && *p <= '9')) p++; size_len = p - size_start; while (*p == ' ' || *p == '\t') p++; filename = p; while (*p != '\0' && *p != ' ' && *p != '\t' && *p != '\r' && *p != '\n') p++; if (unlikely(size_len == 0 || hash_len == 0 || filename == p || *p != '\0')) { fprintf(stderr, "Error parsing %s checksum line ' %s' within '%s'\n", hash_name[cs], line, filenametoshow); strlist_done(&filenames); free(parsed); return RET_ERROR; } else { struct hash_data *hashes; fileofs = strlist_ofs(&filenames, filename); if (fileofs == -1) { fileofs = filenames.count; r = strlist_add_dup(&filenames, filename); if (RET_WAS_ERROR(r)) { strlist_done(&filenames); free(parsed); return r; } hashes = parsed[fileofs].hashes; hashes[cs_length].start = size_start; hashes[cs_length].len = size_len; } else { hashes = parsed[fileofs].hashes; if (unlikely(hashes[cs_length].len != size_len || memcmp(hashes[cs_length].start, size_start, size_len) != 0)) { fprintf(stderr, "WARNING: %s checksum line ' %s' in '%s' contradicts previous filesize!\n", hash_name[cs], line, filenametoshow); continue; } } hashes[cs].start = hash_start; hashes[cs].len = hash_len; foundhashtype[cs] = true; } } } assert (count >= (size_t)filenames.count); if (filenames.count == 0) { strlist_done(&filenames); strlist_init(&out->names); out->checksums = NULL; free(parsed); return RET_OK; } #if 0 // TODO: reenable this once apt-utils is fixed for a long enough time... for (i = 0 ; i < filenames.count ; i++) { for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) { if (!foundhashtype[cs]) continue; if (parsed[i].hashes[cs].start == NULL) { fprintf(stderr, "WARNING: Inconsistent hashes in %s: '%s' missing %s!\n", filenametoshow, filenames.values[i], hash_name[cs]); r = RET_ERROR; /* show one per file, but list all problematic files */ break; } } } #endif a.checksums = nzNEW(filenames.count+1, struct checksums *); if (FAILEDTOALLOC(a.checksums)) { strlist_done(&filenames); free(parsed); return RET_ERROR_OOM; } strlist_move(&a.names, &filenames); for (i = 0 ; i < a.names.count ; i++) { r = checksums_initialize(a.checksums + i, parsed[i].hashes); if (RET_WAS_ERROR(r)) { free(parsed); checksumsarray_done(&a); return r; } } checksumsarray_move(out, &a); free(parsed); return RET_OK; } retvalue checksumsarray_genfilelist(const struct checksumsarray *a, char **md5_p, char **sha1_p, char **sha256_p) { size_t lens[cs_hashCOUNT]; bool missing[cs_hashCOUNT]; char *filelines[cs_hashCOUNT]; int i; enum checksumtype cs; size_t filenamelen[a->names.count]; memset(missing, 0, sizeof(missing)); memset(lens, 0, sizeof(lens)); for (i=0 ; i < a->names.count ; i++) { const struct checksums *checksums = a->checksums[i]; size_t len; filenamelen[i] = strlen(a->names.values[i]); len = 4 + filenamelen[i] + checksums->parts[cs_length].len; assert (checksums != NULL); if (checksums->parts[cs_md5sum].len == 0) lens[cs_md5sum] += len + 1; else lens[cs_md5sum] += len + checksums->parts[cs_md5sum].len; for (cs = cs_md5sum+1 ; cs < cs_hashCOUNT ; cs++) { if (checksums->parts[cs].len == 0) missing[cs] = true; lens[cs] += len + checksums->parts[cs].len; } } for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) { if (missing[cs]) filelines[cs] = NULL; else { filelines[cs] = malloc(lens[cs] + 1); if (FAILEDTOALLOC(filelines[cs])) { while (cs-- > cs_md5sum) free(filelines[cs]); return RET_ERROR_OOM; } } } for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) { char *p; if (missing[cs]) continue; p = filelines[cs]; *(p++) = '\n'; for (i=0 ; i < a->names.count ; i++) { const struct checksums *c = a->checksums[i]; *(p++) = ' '; if (c->parts[cs].len == 0) { *(p++) = '-'; } else { memcpy(p, checksums_hashpart(c, cs), c->parts[cs].len); p += c->parts[cs].len; } *(p++) = ' '; memcpy(p, checksums_hashpart(c, cs_length), c->parts[cs_length].len); p += c->parts[cs_length].len; *(p++) = ' '; memcpy(p, a->names.values[i], filenamelen[i]); p += filenamelen[i]; *(p++) = '\n'; } *(--p) = '\0'; assert ((size_t)(p - filelines[cs]) == lens[cs]); } *md5_p = filelines[cs_md5sum]; *sha1_p = filelines[cs_sha1sum]; *sha256_p = filelines[cs_sha256sum]; return RET_OK; } void checksumsarray_move(/*@out@*/struct checksumsarray *destination, struct checksumsarray *origin) { strlist_move(&destination->names, &origin->names); destination->checksums = origin->checksums; origin->checksums = NULL; } void checksumsarray_resetunsupported(const struct checksumsarray *a, bool *types) { int i; enum checksumtype cs; for (i = 0 ; i < a->names.count ; i++) { struct checksums *c = a->checksums[i]; for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) { if (c->parts[cs].len == 0) types[cs] = false; } } } retvalue checksumsarray_include(struct checksumsarray *a, /*@only@*/char *name, const struct checksums *checksums) { retvalue r; struct checksums **n; int count = a->names.count; n = nNEW(count + 1, struct checksums *); if (FAILEDTOALLOC(n)) { free(name); return RET_ERROR_OOM; } n[0] = checksums_dup(checksums); if (FAILEDTOALLOC(n[0])) { free(name); free(n); return RET_ERROR_OOM; } r = strlist_include(&a->names, name); if (!RET_IS_OK(r)) { checksums_free(n[0]); free(n); return r; } assert (a->names.count == count + 1); if (count > 0) { assert (a->checksums != NULL); memcpy(&n[1], a->checksums, count*sizeof(struct checksums*)); } free(a->checksums); a->checksums = n; return RET_OK; } /* check if the file has the given md5sum (only cheap tests like size), * RET_NOTHING means file does not exist, RET_ERROR_WRONG_MD5 means wrong size */ retvalue checksums_cheaptest(const char *fullfilename, const struct checksums *checksums, bool complain) { off_t expectedsize; int i; struct stat s; i = stat(fullfilename, &s); if (i < 0) { i = errno; if (i == EACCES || i == ENOENT) return RET_NOTHING; else { fprintf(stderr, "Error %d stating '%s': %s!\n", i, fullfilename, strerror(i)); return RET_ERRNO(i); } } expectedsize = checksums_getfilesize(checksums); if (s.st_size == expectedsize) return RET_OK; if (complain) fprintf(stderr, "WRONG SIZE of '%s': expected %lld found %lld\n", fullfilename, (long long)expectedsize, (long long)s.st_size); return RET_ERROR_WRONG_MD5; } retvalue checksums_test(const char *filename, const struct checksums *checksums, struct checksums **checksums_p) { retvalue r; struct checksums *filechecksums; bool improves; /* check if it is there and has the correct size */ r = checksums_cheaptest(filename, checksums, false); /* if it is, read its checksums */ if (RET_IS_OK(r)) r = checksums_read(filename, &filechecksums); if (!RET_IS_OK(r)) return r; if (!checksums_check(checksums, filechecksums, &improves)) { checksums_free(filechecksums); return RET_ERROR_WRONG_MD5; } if (improves && checksums_p != NULL) { if (*checksums_p == NULL) { *checksums_p = checksums_dup(checksums); if (FAILEDTOALLOC(*checksums_p)) { checksums_free(filechecksums); return RET_ERROR_OOM; } } r = checksums_combine(checksums_p, filechecksums, NULL); if (RET_WAS_ERROR(r)) { checksums_free(filechecksums); return r; } } checksums_free(filechecksums); return RET_OK; } /* copy, only checking file size, perhaps add some paranoia checks later */ static retvalue copy(const char *destination, const char *source, const struct checksums *checksums) { off_t filesize = 0, expected; static const size_t bufsize = 16384; char *buffer = malloc(bufsize); ssize_t sizeread, towrite, written; const char *start; int e, i; int infd, outfd; if (FAILEDTOALLOC(buffer)) return RET_ERROR_OOM; infd = open(source, O_RDONLY); if (infd < 0) { e = errno; fprintf(stderr, "Error %d opening '%s': %s\n", e, source, strerror(e)); free(buffer); return RET_ERRNO(e); } outfd = open(destination, O_NOCTTY|O_WRONLY|O_CREAT|O_EXCL, 0666); if (outfd < 0) { e = errno; fprintf(stderr, "Error %d creating '%s': %s\n", e, destination, strerror(e)); (void)close(infd); free(buffer); return RET_ERRNO(e); } filesize = 0; do { sizeread = read(infd, buffer, bufsize); if (sizeread < 0) { e = errno; fprintf(stderr, "Error %d while reading %s: %s\n", e, source, strerror(e)); free(buffer); (void)close(infd); (void)close(outfd); deletefile(destination); return RET_ERRNO(e);; } filesize += sizeread; towrite = sizeread; start = buffer; while (towrite > 0) { written = write(outfd, start, (size_t)towrite); if (written < 0) { e = errno; fprintf(stderr, "Error %d while writing to %s: %s\n", e, destination, strerror(e)); free(buffer); (void)close(infd); (void)close(outfd); deletefile(destination); return RET_ERRNO(e);; } towrite -= written; start += written; } } while (sizeread > 0); free(buffer); i = close(infd); if (i != 0) { e = errno; fprintf(stderr, "Error %d reading %s: %s\n", e, source, strerror(e)); (void)close(outfd); deletefile(destination); return RET_ERRNO(e);; } i = close(outfd); if (i != 0) { e = errno; fprintf(stderr, "Error %d writing to %s: %s\n", e, destination, strerror(e)); deletefile(destination); return RET_ERRNO(e);; } expected = checksums_getfilesize(checksums); if (filesize != expected) { fprintf(stderr, "Error copying %s to %s:\n" " File seems to be of size %llu, while %llu was expected!\n", source, destination, (unsigned long long)filesize, (unsigned long long)expected); deletefile(destination); return RET_ERROR_WRONG_MD5; } return RET_OK; } retvalue checksums_hardlink(const char *directory, const char *filekey, const char *sourcefilename, const struct checksums *checksums) { retvalue r; int i, e; char *fullfilename = calc_dirconcat(directory, filekey); if (FAILEDTOALLOC(fullfilename)) return RET_ERROR_OOM; i = link(sourcefilename, fullfilename); e = errno; if (i != 0 && e == EEXIST) { (void)unlink(fullfilename); errno = 0; i = link(sourcefilename, fullfilename); e = errno; } if (i != 0 && (e == EACCES || e == ENOENT || e == ENOTDIR)) { errno = 0; (void)dirs_make_parent(fullfilename); i = link(sourcefilename, fullfilename); e = errno; } if (i != 0) { if (e == EXDEV || e == EPERM || e == EMLINK) { r = copy(fullfilename, sourcefilename, checksums); if (RET_WAS_ERROR(r)) { free(fullfilename); return r; } } else { fprintf(stderr, "Error %d creating hardlink of '%s' as '%s': %s\n", e, sourcefilename, fullfilename, strerror(e)); free(fullfilename); return RET_ERRNO(e); } } free(fullfilename); return RET_OK; } void checksumscontext_init(struct checksumscontext *context) { MD5Init(&context->md5); SHA1Init(&context->sha1); SHA256Init(&context->sha256); } void checksumscontext_update(struct checksumscontext *context, const unsigned char *data, size_t len) { MD5Update(&context->md5, data, len); // TODO: sha1 and sha256 share quite some stuff, // the code can most likely be combined with quite some synergies.. SHA1Update(&context->sha1, data, len); SHA256Update(&context->sha256, data, len); } static const char tab[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; retvalue checksums_from_context(struct checksums **out, struct checksumscontext *context) { unsigned char md5buffer[MD5_DIGEST_SIZE], sha1buffer[SHA1_DIGEST_SIZE], sha256buffer[SHA256_DIGEST_SIZE]; char *d; unsigned int i; struct checksums *n; n = malloc(sizeof(struct checksums) + 2*MD5_DIGEST_SIZE + 2*SHA1_DIGEST_SIZE + 2*SHA256_DIGEST_SIZE + 30); if (FAILEDTOALLOC(n)) return RET_ERROR_OOM; setzero(struct checksums, n); d = n->representation; *(d++) = ':'; *(d++) = '1'; *(d++) = ':'; n->parts[cs_sha1sum].ofs = 3; n->parts[cs_sha1sum].len = 2*SHA1_DIGEST_SIZE; SHA1Final(&context->sha1, sha1buffer); for (i = 0 ; i < SHA1_DIGEST_SIZE ; i++) { *(d++) = tab[sha1buffer[i] >> 4]; *(d++) = tab[sha1buffer[i] & 0xF]; } *(d++) = ' '; *(d++) = ':'; *(d++) = '2'; *(d++) = ':'; n->parts[cs_sha256sum].ofs = d - n->representation; n->parts[cs_sha256sum].len = 2*SHA256_DIGEST_SIZE; SHA256Final(&context->sha256, sha256buffer); for (i = 0 ; i < SHA256_DIGEST_SIZE ; i++) { *(d++) = tab[sha256buffer[i] >> 4]; *(d++) = tab[sha256buffer[i] & 0xF]; } *(d++) = ' '; n->parts[cs_md5sum].ofs = d - n->representation; assert (d - n->representation == n->parts[cs_md5sum].ofs); n->parts[cs_md5sum].len = 2*MD5_DIGEST_SIZE; MD5Final(md5buffer, &context->md5); for (i=0 ; i < MD5_DIGEST_SIZE ; i++) { *(d++) = tab[md5buffer[i] >> 4]; *(d++) = tab[md5buffer[i] & 0xF]; } *(d++) = ' '; n->parts[cs_length].ofs = d - n->representation; assert (d - n->representation == n->parts[cs_length].ofs); n->parts[cs_length].len = (hashlen_t)snprintf(d, 2*MD5_DIGEST_SIZE + 2*SHA1_DIGEST_SIZE + 2*SHA256_DIGEST_SIZE + 30 - (d - n->representation), "%lld", (long long)context->sha1.count); assert (strlen(d) == n->parts[cs_length].len); *out = n; return RET_OK; } bool checksums_iscomplete(const struct checksums *checksums) { return checksums->parts[cs_md5sum].len != 0 && checksums->parts[cs_sha1sum].len != 0 && checksums->parts[cs_sha256sum].len != 0; } /* Collect missing checksums. * if the file is not there, return RET_NOTHING. * return RET_ERROR_WRONG_MD5 if already existing do not match */ retvalue checksums_complete(struct checksums **checksums_p, const char *fullfilename) { if (checksums_iscomplete(*checksums_p)) return RET_OK; return checksums_test(fullfilename, *checksums_p, checksums_p); } retvalue checksums_read(const char *fullfilename, /*@out@*/struct checksums **checksums_p) { struct checksumscontext context; static const size_t bufsize = 16384; unsigned char *buffer = malloc(bufsize); ssize_t sizeread; int e, i; int infd; if (FAILEDTOALLOC(buffer)) return RET_ERROR_OOM; checksumscontext_init(&context); infd = open(fullfilename, O_RDONLY); if (infd < 0) { e = errno; if ((e == EACCES || e == ENOENT) && !isregularfile(fullfilename)) { free(buffer); return RET_NOTHING; } fprintf(stderr, "Error %d opening '%s': %s\n", e, fullfilename, strerror(e)); free(buffer); return RET_ERRNO(e); } do { sizeread = read(infd, buffer, bufsize); if (sizeread < 0) { e = errno; fprintf(stderr, "Error %d while reading %s: %s\n", e, fullfilename, strerror(e)); free(buffer); (void)close(infd); return RET_ERRNO(e);; } checksumscontext_update(&context, buffer, (size_t)sizeread); } while (sizeread > 0); free(buffer); i = close(infd); if (i != 0) { e = errno; fprintf(stderr, "Error %d reading %s: %s\n", e, fullfilename, strerror(e)); return RET_ERRNO(e);; } return checksums_from_context(checksums_p, &context); } retvalue checksums_copyfile(const char *destination, const char *source, bool deletetarget, struct checksums **checksums_p) { struct checksumscontext context; static const size_t bufsize = 16384; unsigned char *buffer = malloc(bufsize); ssize_t sizeread, towrite, written; const unsigned char *start; int e, i; int infd, outfd; if (FAILEDTOALLOC(buffer)) return RET_ERROR_OOM; infd = open(source, O_RDONLY); if (infd < 0) { e = errno; fprintf(stderr, "Error %d opening '%s': %s\n", e, source, strerror(e)); free(buffer); return RET_ERRNO(e); } outfd = open(destination, O_NOCTTY|O_WRONLY|O_CREAT|O_EXCL, 0666); if (outfd < 0) { e = errno; if (e == EEXIST) { if (deletetarget) { i = unlink(destination); if (i != 0) { e = errno; fprintf(stderr, "Error %d deleting '%s': %s\n", e, destination, strerror(e)); (void)close(infd); free(buffer); return RET_ERRNO(e); } outfd = open(destination, O_NOCTTY|O_WRONLY|O_CREAT|O_EXCL, 0666); e = errno; } else { (void)close(infd); free(buffer); return RET_ERROR_EXIST; } } if (outfd < 0) { fprintf(stderr, "Error %d creating '%s': %s\n", e, destination, strerror(e)); (void)close(infd); free(buffer); return RET_ERRNO(e); } } checksumscontext_init(&context); do { sizeread = read(infd, buffer, bufsize); if (sizeread < 0) { e = errno; fprintf(stderr, "Error %d while reading %s: %s\n", e, source, strerror(e)); free(buffer); (void)close(infd); (void)close(outfd); deletefile(destination); return RET_ERRNO(e);; } checksumscontext_update(&context, buffer, (size_t)sizeread); towrite = sizeread; start = buffer; while (towrite > 0) { written = write(outfd, start, (size_t)towrite); if (written < 0) { e = errno; fprintf(stderr, "Error %d while writing to %s: %s\n", e, destination, strerror(e)); free(buffer); (void)close(infd); (void)close(outfd); deletefile(destination); return RET_ERRNO(e);; } towrite -= written; start += written; } } while (sizeread > 0); free(buffer); i = close(infd); if (i != 0) { e = errno; fprintf(stderr, "Error %d reading %s: %s\n", e, source, strerror(e)); (void)close(outfd); deletefile(destination); return RET_ERRNO(e);; } i = close(outfd); if (i != 0) { e = errno; fprintf(stderr, "Error %d writing to %s: %s\n", e, destination, strerror(e)); deletefile(destination); return RET_ERRNO(e);; } return checksums_from_context(checksums_p, &context); } retvalue checksums_linkorcopyfile(const char *destination, const char *source, struct checksums **checksums_p) { int i; retvalue r; // TODO: is this needed? perhaps move this duty to the caller... r = dirs_make_parent(destination); if (RET_WAS_ERROR(r)) return r; errno = 0; i = link(source, destination); if (i != 0) return checksums_copyfile(destination, source, true, checksums_p); *checksums_p = NULL; return RET_OK; } retvalue checksums_replace(const char *filename, const char *data, size_t len, struct checksums **checksums_p){ struct checksumscontext context; size_t todo; const char *towrite; char *tempfilename; struct checksums *checksums; int fd, ret; retvalue r; tempfilename = calc_addsuffix(filename, "new"); if (FAILEDTOALLOC(tempfilename)) return RET_ERROR_OOM; fd = open(tempfilename, O_WRONLY|O_CREAT|O_EXCL|O_NOCTTY, 0666); if (fd < 0) { int e = errno; fprintf(stderr, "ERROR creating '%s': %s\n", tempfilename, strerror(e)); free(tempfilename); return RET_ERRNO(e); } todo = len; towrite = data; while (todo > 0) { ssize_t written = write(fd, towrite, todo); if (written >= 0) { todo -= written; towrite += written; } else { int e = errno; close(fd); fprintf(stderr, "Error writing to '%s': %s\n", tempfilename, strerror(e)); unlink(tempfilename); free(tempfilename); return RET_ERRNO(e); } } ret = close(fd); if (ret < 0) { int e = errno; fprintf(stderr, "Error writing to '%s': %s\n", tempfilename, strerror(e)); unlink(tempfilename); free(tempfilename); return RET_ERRNO(e); } if (checksums_p != NULL) { checksumscontext_init(&context); checksumscontext_update(&context, (const unsigned char *)data, len); r = checksums_from_context(&checksums, &context); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) { unlink(tempfilename); free(tempfilename); return r; } } else checksums = NULL; ret = rename(tempfilename, filename); if (ret < 0) { int e = errno; fprintf(stderr, "Error moving '%s' to '%s': %s\n", tempfilename, filename, strerror(e)); unlink(tempfilename); free(tempfilename); checksums_free(checksums); return RET_ERRNO(e); } free(tempfilename); if (checksums_p != NULL) *checksums_p = checksums; return RET_OK; } const struct constant hashes_constants[cs_hashCOUNT+1] = { {"md5", cs_md5sum}, {"sha1", cs_sha1sum}, {"sha256", cs_sha256sum}, {NULL, 0} }, *hashnames = hashes_constants; reprepro-4.13.1/configure0000755000175100017510000062627512152655327012322 00000000000000#! /bin/sh # Guess values for system-dependent variables and create Makefiles. # Generated by GNU Autoconf 2.69 for reprepro 4.13.1. # # Report bugs to . # # # Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. # # # This configure script is free software; the Free Software Foundation # gives unlimited permission to copy, distribute and modify it. ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo # Prefer a ksh shell builtin over an external printf program on Solaris, # but without wasting forks for bash or zsh. if test -z "$BASH_VERSION$ZSH_VERSION" \ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='print -r --' as_echo_n='print -rn --' elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in #( *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Unset variables that we do not need and which cause bugs (e.g. in # pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" # suppresses any "Segmentation fault" message there. '((' could # trigger a bug in pdksh 5.2.14. for as_var in BASH_ENV ENV MAIL MAILPATH do eval test x\${$as_var+set} = xset \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH # Use a proper internal environment variable to ensure we don't fall # into an infinite loop, continuously re-executing ourselves. if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then _as_can_reexec=no; export _as_can_reexec; # We cannot yet assume a decent shell, so we have to provide a # neutralization value for shells without unset; and this also # works around shells that cannot unset nonexistent variables. # Preserve -v and -x to the replacement shell. BASH_ENV=/dev/null ENV=/dev/null (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV case $- in # (((( *v*x* | *x*v* ) as_opts=-vx ;; *v* ) as_opts=-v ;; *x* ) as_opts=-x ;; * ) as_opts= ;; esac exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} # Admittedly, this is quite paranoid, since all the known shells bail # out after a failed `exec'. $as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 as_fn_exit 255 fi # We don't want this to propagate to other subprocesses. { _as_can_reexec=; unset _as_can_reexec;} if test "x$CONFIG_SHELL" = x; then as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which # is contrary to our usage. Disable this feature. alias -g '\${1+\"\$@\"}'='\"\$@\"' setopt NO_GLOB_SUBST else case \`(set -o) 2>/dev/null\` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi " as_required="as_fn_return () { (exit \$1); } as_fn_success () { as_fn_return 0; } as_fn_failure () { as_fn_return 1; } as_fn_ret_success () { return 0; } as_fn_ret_failure () { return 1; } exitcode=0 as_fn_success || { exitcode=1; echo as_fn_success failed.; } as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : else exitcode=1; echo positional parameters were not saved. fi test x\$exitcode = x0 || exit 1 test -x / || exit 1" as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 test \$(( 1 + 1 )) = 2 || exit 1" if (eval "$as_required") 2>/dev/null; then : as_have_required=yes else as_have_required=no fi if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_found=false for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. as_found=: case $as_dir in #( /*) for as_base in sh bash ksh sh5; do # Try only shells that exist, to save several forks. as_shell=$as_dir/$as_base if { test -f "$as_shell" || test -f "$as_shell.exe"; } && { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : CONFIG_SHELL=$as_shell as_have_required=yes if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : break 2 fi fi done;; esac as_found=false done $as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : CONFIG_SHELL=$SHELL as_have_required=yes fi; } IFS=$as_save_IFS if test "x$CONFIG_SHELL" != x; then : export CONFIG_SHELL # We cannot yet assume a decent shell, so we have to provide a # neutralization value for shells without unset; and this also # works around shells that cannot unset nonexistent variables. # Preserve -v and -x to the replacement shell. BASH_ENV=/dev/null ENV=/dev/null (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV case $- in # (((( *v*x* | *x*v* ) as_opts=-vx ;; *v* ) as_opts=-v ;; *x* ) as_opts=-x ;; * ) as_opts= ;; esac exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} # Admittedly, this is quite paranoid, since all the known shells bail # out after a failed `exec'. $as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 exit 255 fi if test x$as_have_required = xno; then : $as_echo "$0: This script requires a shell more modern than all" $as_echo "$0: the shells that I found on your system." if test x${ZSH_VERSION+set} = xset ; then $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" $as_echo "$0: be upgraded to zsh 4.3.4 or later." else $as_echo "$0: Please tell bug-autoconf@gnu.org and brlink@debian.org $0: about your system, including any error possibly output $0: before this message. Then install a modern shell, or $0: manually run the script under such a shell if you do $0: have one." fi exit 1 fi fi fi SHELL=${CONFIG_SHELL-/bin/sh} export SHELL # Unset more variables known to interfere with behavior of common tools. CLICOLOR_FORCE= GREP_OPTIONS= unset CLICOLOR_FORCE GREP_OPTIONS ## --------------------- ## ## M4sh Shell Functions. ## ## --------------------- ## # as_fn_unset VAR # --------------- # Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. as_fn_set_status () { return $1 } # as_fn_set_status # as_fn_exit STATUS # ----------------- # Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } # as_fn_exit # as_fn_mkdir_p # ------------- # Create "$as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" } # as_fn_mkdir_p # as_fn_executable_p FILE # ----------------------- # Test if FILE is an executable regular file. as_fn_executable_p () { test -f "$1" && test -x "$1" } # as_fn_executable_p # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : eval 'as_fn_append () { eval $1+=\$2 }' else as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append # as_fn_arith ARG... # ------------------ # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : eval 'as_fn_arith () { as_val=$(( $* )) }' else as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith # as_fn_error STATUS ERROR [LINENO LOG_FD] # ---------------------------------------- # Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are # provided, also output the error to LOG_FD, referencing LINENO. Then exit the # script with STATUS, using 1 if that was 0. as_fn_error () { as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi $as_echo "$as_me: error: $2" >&2 as_fn_exit $as_status } # as_fn_error if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits as_lineno_1=$LINENO as_lineno_1a=$LINENO as_lineno_2=$LINENO as_lineno_2a=$LINENO eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) sed -n ' p /[$]LINENO/= ' <$as_myself | sed ' s/[$]LINENO.*/&-/ t lineno b :lineno N :loop s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ t loop s/-\n.*// ' >$as_me.lineno && chmod +x "$as_me.lineno" || { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } # If we had to re-execute with $CONFIG_SHELL, we're ensured to have # already done that, so ensure we don't try to do so again and fall # in an infinite loop. This has already happened in practice. _as_can_reexec=no; export _as_can_reexec # Don't try to exec as it changes $[0], causing all sort of problems # (the dirname of $[0] is not the place where we might find the # original and so on. Autoconf is especially sensitive to this). . "./$as_me.lineno" # Exit status is that of the last command. exit } ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -pR'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -pR' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -pR' fi else as_ln_s='cp -pR' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi as_test_x='test -x' as_executable_p=as_fn_executable_p # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" test -n "$DJDIR" || exec 7<&0 &1 # Name of the host. # hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, # so uname gets run too. ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` # # Initializations. # ac_default_prefix=/usr/local ac_clean_files= ac_config_libobj_dir=. LIBOBJS= cross_compiling=no subdirs= MFLAGS= MAKEFLAGS= # Identity of this package. PACKAGE_NAME='reprepro' PACKAGE_TARNAME='reprepro' PACKAGE_VERSION='4.13.1' PACKAGE_STRING='reprepro 4.13.1' PACKAGE_BUGREPORT='brlink@debian.org' PACKAGE_URL='' ac_unique_file="main.c" # Factoring default headers for most tests. ac_includes_default="\ #include #ifdef HAVE_SYS_TYPES_H # include #endif #ifdef HAVE_SYS_STAT_H # include #endif #ifdef STDC_HEADERS # include # include #else # ifdef HAVE_STDLIB_H # include # endif #endif #ifdef HAVE_STRING_H # if !defined STDC_HEADERS && defined HAVE_MEMORY_H # include # endif # include #endif #ifdef HAVE_STRINGS_H # include #endif #ifdef HAVE_INTTYPES_H # include #endif #ifdef HAVE_STDINT_H # include #endif #ifdef HAVE_UNISTD_H # include #endif" ac_subst_vars='am__EXEEXT_FALSE am__EXEEXT_TRUE LTLIBOBJS LIBOBJS ARCHIVECPP ARCHIVELIBS HAVE_LIBARCHIVE_FALSE HAVE_LIBARCHIVE_TRUE DBLIBS EGREP GREP CPP am__fastdepCC_FALSE am__fastdepCC_TRUE CCDEPMODE am__nodep AMDEPBACKSLASH AMDEP_FALSE AMDEP_TRUE am__quote am__include DEPDIR OBJEXT EXEEXT ac_ct_CC CPPFLAGS LDFLAGS CFLAGS CC MAINT MAINTAINER_MODE_FALSE MAINTAINER_MODE_TRUE am__untar am__tar AMTAR am__leading_dot SET_MAKE AWK mkdir_p MKDIR_P INSTALL_STRIP_PROGRAM STRIP install_sh MAKEINFO AUTOHEADER AUTOMAKE AUTOCONF ACLOCAL VERSION PACKAGE CYGPATH_W am__isrc INSTALL_DATA INSTALL_SCRIPT INSTALL_PROGRAM target_alias host_alias build_alias LIBS ECHO_T ECHO_N ECHO_C DEFS mandir localedir libdir psdir pdfdir dvidir htmldir infodir docdir oldincludedir includedir localstatedir sharedstatedir sysconfdir datadir datarootdir libexecdir sbindir bindir program_transform_name prefix exec_prefix PACKAGE_URL PACKAGE_BUGREPORT PACKAGE_STRING PACKAGE_VERSION PACKAGE_TARNAME PACKAGE_NAME PATH_SEPARATOR SHELL' ac_subst_files='' ac_user_opts=' enable_option_checking enable_maintainer_mode enable_dependency_tracking enable_largefile with_libgpgme with_libbz2 with_libarchive with_static_libarchive ' ac_precious_vars='build_alias host_alias target_alias CC CFLAGS LDFLAGS LIBS CPPFLAGS CPP' # Initialize some variables set by options. ac_init_help= ac_init_version=false ac_unrecognized_opts= ac_unrecognized_sep= # The variables have the same names as the options, with # dashes changed to underlines. cache_file=/dev/null exec_prefix=NONE no_create= no_recursion= prefix=NONE program_prefix=NONE program_suffix=NONE program_transform_name=s,x,x, silent= site= srcdir= verbose= x_includes=NONE x_libraries=NONE # Installation directory options. # These are left unexpanded so users can "make install exec_prefix=/foo" # and all the variables that are supposed to be based on exec_prefix # by default will actually change. # Use braces instead of parens because sh, perl, etc. also accept them. # (The list follows the same order as the GNU Coding Standards.) bindir='${exec_prefix}/bin' sbindir='${exec_prefix}/sbin' libexecdir='${exec_prefix}/libexec' datarootdir='${prefix}/share' datadir='${datarootdir}' sysconfdir='${prefix}/etc' sharedstatedir='${prefix}/com' localstatedir='${prefix}/var' includedir='${prefix}/include' oldincludedir='/usr/include' docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' infodir='${datarootdir}/info' htmldir='${docdir}' dvidir='${docdir}' pdfdir='${docdir}' psdir='${docdir}' libdir='${exec_prefix}/lib' localedir='${datarootdir}/locale' mandir='${datarootdir}/man' ac_prev= ac_dashdash= for ac_option do # If the previous option needs an argument, assign it. if test -n "$ac_prev"; then eval $ac_prev=\$ac_option ac_prev= continue fi case $ac_option in *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; *=) ac_optarg= ;; *) ac_optarg=yes ;; esac # Accept the important Cygnus configure options, so we can diagnose typos. case $ac_dashdash$ac_option in --) ac_dashdash=yes ;; -bindir | --bindir | --bindi | --bind | --bin | --bi) ac_prev=bindir ;; -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) bindir=$ac_optarg ;; -build | --build | --buil | --bui | --bu) ac_prev=build_alias ;; -build=* | --build=* | --buil=* | --bui=* | --bu=*) build_alias=$ac_optarg ;; -cache-file | --cache-file | --cache-fil | --cache-fi \ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) ac_prev=cache_file ;; -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) cache_file=$ac_optarg ;; --config-cache | -C) cache_file=config.cache ;; -datadir | --datadir | --datadi | --datad) ac_prev=datadir ;; -datadir=* | --datadir=* | --datadi=* | --datad=*) datadir=$ac_optarg ;; -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ | --dataroo | --dataro | --datar) ac_prev=datarootdir ;; -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) datarootdir=$ac_optarg ;; -disable-* | --disable-*) ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid feature name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=no ;; -docdir | --docdir | --docdi | --doc | --do) ac_prev=docdir ;; -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) docdir=$ac_optarg ;; -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) ac_prev=dvidir ;; -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) dvidir=$ac_optarg ;; -enable-* | --enable-*) ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid feature name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=\$ac_optarg ;; -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ | --exec | --exe | --ex) ac_prev=exec_prefix ;; -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ | --exec=* | --exe=* | --ex=*) exec_prefix=$ac_optarg ;; -gas | --gas | --ga | --g) # Obsolete; use --with-gas. with_gas=yes ;; -help | --help | --hel | --he | -h) ac_init_help=long ;; -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) ac_init_help=recursive ;; -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) ac_init_help=short ;; -host | --host | --hos | --ho) ac_prev=host_alias ;; -host=* | --host=* | --hos=* | --ho=*) host_alias=$ac_optarg ;; -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) ac_prev=htmldir ;; -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ | --ht=*) htmldir=$ac_optarg ;; -includedir | --includedir | --includedi | --included | --include \ | --includ | --inclu | --incl | --inc) ac_prev=includedir ;; -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ | --includ=* | --inclu=* | --incl=* | --inc=*) includedir=$ac_optarg ;; -infodir | --infodir | --infodi | --infod | --info | --inf) ac_prev=infodir ;; -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) infodir=$ac_optarg ;; -libdir | --libdir | --libdi | --libd) ac_prev=libdir ;; -libdir=* | --libdir=* | --libdi=* | --libd=*) libdir=$ac_optarg ;; -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ | --libexe | --libex | --libe) ac_prev=libexecdir ;; -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ | --libexe=* | --libex=* | --libe=*) libexecdir=$ac_optarg ;; -localedir | --localedir | --localedi | --localed | --locale) ac_prev=localedir ;; -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) localedir=$ac_optarg ;; -localstatedir | --localstatedir | --localstatedi | --localstated \ | --localstate | --localstat | --localsta | --localst | --locals) ac_prev=localstatedir ;; -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) localstatedir=$ac_optarg ;; -mandir | --mandir | --mandi | --mand | --man | --ma | --m) ac_prev=mandir ;; -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) mandir=$ac_optarg ;; -nfp | --nfp | --nf) # Obsolete; use --without-fp. with_fp=no ;; -no-create | --no-create | --no-creat | --no-crea | --no-cre \ | --no-cr | --no-c | -n) no_create=yes ;; -no-recursion | --no-recursion | --no-recursio | --no-recursi \ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) no_recursion=yes ;; -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ | --oldin | --oldi | --old | --ol | --o) ac_prev=oldincludedir ;; -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) oldincludedir=$ac_optarg ;; -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) ac_prev=prefix ;; -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) prefix=$ac_optarg ;; -program-prefix | --program-prefix | --program-prefi | --program-pref \ | --program-pre | --program-pr | --program-p) ac_prev=program_prefix ;; -program-prefix=* | --program-prefix=* | --program-prefi=* \ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) program_prefix=$ac_optarg ;; -program-suffix | --program-suffix | --program-suffi | --program-suff \ | --program-suf | --program-su | --program-s) ac_prev=program_suffix ;; -program-suffix=* | --program-suffix=* | --program-suffi=* \ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) program_suffix=$ac_optarg ;; -program-transform-name | --program-transform-name \ | --program-transform-nam | --program-transform-na \ | --program-transform-n | --program-transform- \ | --program-transform | --program-transfor \ | --program-transfo | --program-transf \ | --program-trans | --program-tran \ | --progr-tra | --program-tr | --program-t) ac_prev=program_transform_name ;; -program-transform-name=* | --program-transform-name=* \ | --program-transform-nam=* | --program-transform-na=* \ | --program-transform-n=* | --program-transform-=* \ | --program-transform=* | --program-transfor=* \ | --program-transfo=* | --program-transf=* \ | --program-trans=* | --program-tran=* \ | --progr-tra=* | --program-tr=* | --program-t=*) program_transform_name=$ac_optarg ;; -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) ac_prev=pdfdir ;; -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) pdfdir=$ac_optarg ;; -psdir | --psdir | --psdi | --psd | --ps) ac_prev=psdir ;; -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) psdir=$ac_optarg ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) silent=yes ;; -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) ac_prev=sbindir ;; -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ | --sbi=* | --sb=*) sbindir=$ac_optarg ;; -sharedstatedir | --sharedstatedir | --sharedstatedi \ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ | --sharedst | --shareds | --shared | --share | --shar \ | --sha | --sh) ac_prev=sharedstatedir ;; -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ | --sha=* | --sh=*) sharedstatedir=$ac_optarg ;; -site | --site | --sit) ac_prev=site ;; -site=* | --site=* | --sit=*) site=$ac_optarg ;; -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) ac_prev=srcdir ;; -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) srcdir=$ac_optarg ;; -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ | --syscon | --sysco | --sysc | --sys | --sy) ac_prev=sysconfdir ;; -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) sysconfdir=$ac_optarg ;; -target | --target | --targe | --targ | --tar | --ta | --t) ac_prev=target_alias ;; -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) target_alias=$ac_optarg ;; -v | -verbose | --verbose | --verbos | --verbo | --verb) verbose=yes ;; -version | --version | --versio | --versi | --vers | -V) ac_init_version=: ;; -with-* | --with-*) ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid package name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=\$ac_optarg ;; -without-* | --without-*) ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid package name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=no ;; --x) # Obsolete; use --with-x. with_x=yes ;; -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ | --x-incl | --x-inc | --x-in | --x-i) ac_prev=x_includes ;; -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) x_includes=$ac_optarg ;; -x-libraries | --x-libraries | --x-librarie | --x-librari \ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) ac_prev=x_libraries ;; -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) x_libraries=$ac_optarg ;; -*) as_fn_error $? "unrecognized option: \`$ac_option' Try \`$0 --help' for more information" ;; *=*) ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` # Reject names that are not valid shell variable names. case $ac_envvar in #( '' | [0-9]* | *[!_$as_cr_alnum]* ) as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; esac eval $ac_envvar=\$ac_optarg export $ac_envvar ;; *) # FIXME: should be removed in autoconf 3.0. $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" ;; esac done if test -n "$ac_prev"; then ac_option=--`echo $ac_prev | sed 's/_/-/g'` as_fn_error $? "missing argument to $ac_option" fi if test -n "$ac_unrecognized_opts"; then case $enable_option_checking in no) ;; fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; esac fi # Check all directory arguments for consistency. for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ datadir sysconfdir sharedstatedir localstatedir includedir \ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ libdir localedir mandir do eval ac_val=\$$ac_var # Remove trailing slashes. case $ac_val in */ ) ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` eval $ac_var=\$ac_val;; esac # Be sure to have absolute directory names. case $ac_val in [\\/$]* | ?:[\\/]* ) continue;; NONE | '' ) case $ac_var in *prefix ) continue;; esac;; esac as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" done # There might be people who depend on the old broken behavior: `$host' # used to hold the argument of --host etc. # FIXME: To remove some day. build=$build_alias host=$host_alias target=$target_alias # FIXME: To remove some day. if test "x$host_alias" != x; then if test "x$build_alias" = x; then cross_compiling=maybe elif test "x$build_alias" != "x$host_alias"; then cross_compiling=yes fi fi ac_tool_prefix= test -n "$host_alias" && ac_tool_prefix=$host_alias- test "$silent" = yes && exec 6>/dev/null ac_pwd=`pwd` && test -n "$ac_pwd" && ac_ls_di=`ls -di .` && ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || as_fn_error $? "working directory cannot be determined" test "X$ac_ls_di" = "X$ac_pwd_ls_di" || as_fn_error $? "pwd does not report name of working directory" # Find the source files, if location was not specified. if test -z "$srcdir"; then ac_srcdir_defaulted=yes # Try the directory containing this script, then the parent directory. ac_confdir=`$as_dirname -- "$as_myself" || $as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_myself" : 'X\(//\)[^/]' \| \ X"$as_myself" : 'X\(//\)$' \| \ X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_myself" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` srcdir=$ac_confdir if test ! -r "$srcdir/$ac_unique_file"; then srcdir=.. fi else ac_srcdir_defaulted=no fi if test ! -r "$srcdir/$ac_unique_file"; then test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" fi ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" ac_abs_confdir=`( cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" pwd)` # When building in place, set srcdir=. if test "$ac_abs_confdir" = "$ac_pwd"; then srcdir=. fi # Remove unnecessary trailing slashes from srcdir. # Double slashes in file names in object file debugging info # mess up M-x gdb in Emacs. case $srcdir in */) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; esac for ac_var in $ac_precious_vars; do eval ac_env_${ac_var}_set=\${${ac_var}+set} eval ac_env_${ac_var}_value=\$${ac_var} eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} eval ac_cv_env_${ac_var}_value=\$${ac_var} done # # Report the --help message. # if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF \`configure' configures reprepro 4.13.1 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... To assign environment variables (e.g., CC, CFLAGS...), specify them as VAR=VALUE. See below for descriptions of some of the useful variables. Defaults for the options are specified in brackets. Configuration: -h, --help display this help and exit --help=short display options specific to this package --help=recursive display the short help of all the included packages -V, --version display version information and exit -q, --quiet, --silent do not print \`checking ...' messages --cache-file=FILE cache test results in FILE [disabled] -C, --config-cache alias for \`--cache-file=config.cache' -n, --no-create do not create output files --srcdir=DIR find the sources in DIR [configure dir or \`..'] Installation directories: --prefix=PREFIX install architecture-independent files in PREFIX [$ac_default_prefix] --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX [PREFIX] By default, \`make install' will install all the files in \`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify an installation prefix other than \`$ac_default_prefix' using \`--prefix', for instance \`--prefix=\$HOME'. For better control, use the options below. Fine tuning of the installation directories: --bindir=DIR user executables [EPREFIX/bin] --sbindir=DIR system admin executables [EPREFIX/sbin] --libexecdir=DIR program executables [EPREFIX/libexec] --sysconfdir=DIR read-only single-machine data [PREFIX/etc] --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] --localstatedir=DIR modifiable single-machine data [PREFIX/var] --libdir=DIR object code libraries [EPREFIX/lib] --includedir=DIR C header files [PREFIX/include] --oldincludedir=DIR C header files for non-gcc [/usr/include] --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] --datadir=DIR read-only architecture-independent data [DATAROOTDIR] --infodir=DIR info documentation [DATAROOTDIR/info] --localedir=DIR locale-dependent data [DATAROOTDIR/locale] --mandir=DIR man documentation [DATAROOTDIR/man] --docdir=DIR documentation root [DATAROOTDIR/doc/reprepro] --htmldir=DIR html documentation [DOCDIR] --dvidir=DIR dvi documentation [DOCDIR] --pdfdir=DIR pdf documentation [DOCDIR] --psdir=DIR ps documentation [DOCDIR] _ACEOF cat <<\_ACEOF Program names: --program-prefix=PREFIX prepend PREFIX to installed program names --program-suffix=SUFFIX append SUFFIX to installed program names --program-transform-name=PROGRAM run sed PROGRAM on installed program names _ACEOF fi if test -n "$ac_init_help"; then case $ac_init_help in short | recursive ) echo "Configuration of reprepro 4.13.1:";; esac cat <<\_ACEOF Optional Features: --disable-option-checking ignore unrecognized --enable/--with options --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) --enable-FEATURE[=ARG] include FEATURE [ARG=yes] --enable-maintainer-mode enable make rules and dependencies not useful (and sometimes confusing) to the casual installer --disable-dependency-tracking speeds up one-time build --enable-dependency-tracking do not reject slow dependency extractors --disable-largefile omit support for large files Optional Packages: --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) --with-libgpgme=path|yes|no Give path to prefix libgpgme was installed with --with-libbz2=path|yes|no Give path to prefix libbz2 was installed with --with-libarchive=path|yes|no Give path to prefix libarchive was installed with --with-static-libarchive=.a-file static libarchive library to be linked against Some influential environment variables: CC C compiler command CFLAGS C compiler flags LDFLAGS linker flags, e.g. -L if you have libraries in a nonstandard directory LIBS libraries to pass to the linker, e.g. -l CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if you have headers in a nonstandard directory CPP C preprocessor Use these variables to override the choices made by `configure' or to help it to find libraries and programs with nonstandard names/locations. Report bugs to . _ACEOF ac_status=$? fi if test "$ac_init_help" = "recursive"; then # If there are subdirs, report their specific --help. for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue test -d "$ac_dir" || { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || continue ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix cd "$ac_dir" || { ac_status=$?; continue; } # Check for guested configure. if test -f "$ac_srcdir/configure.gnu"; then echo && $SHELL "$ac_srcdir/configure.gnu" --help=recursive elif test -f "$ac_srcdir/configure"; then echo && $SHELL "$ac_srcdir/configure" --help=recursive else $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 fi || ac_status=$? cd "$ac_pwd" || { ac_status=$?; break; } done fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF reprepro configure 4.13.1 generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. This configure script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. _ACEOF exit fi ## ------------------------ ## ## Autoconf initialization. ## ## ------------------------ ## # ac_fn_c_try_compile LINENO # -------------------------- # Try to compile conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_compile # ac_fn_c_try_cpp LINENO # ---------------------- # Try to preprocess conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_cpp () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } > conftest.i && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_cpp # ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES # ------------------------------------------------------- # Tests whether HEADER exists, giving a warning if it cannot be compiled using # the include files in INCLUDES and setting the cache variable VAR # accordingly. ac_fn_c_check_header_mongrel () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if eval \${$3+:} false; then : { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 $as_echo_n "checking $2 usability... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 #include <$2> _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_header_compiler=yes else ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 $as_echo_n "checking $2 presence... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include <$2> _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : ac_header_preproc=yes else ac_header_preproc=no fi rm -f conftest.err conftest.i conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #(( yes:no: ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ;; no:yes:* ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ( $as_echo "## -------------------------------- ## ## Report this to brlink@debian.org ## ## -------------------------------- ##" ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=\$ac_header_compiler" fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_header_mongrel # ac_fn_c_try_run LINENO # ---------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. Assumes # that executables *can* be run. ac_fn_c_try_run () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then : ac_retval=0 else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=$ac_status fi rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_run # ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES # ------------------------------------------------------- # Tests whether HEADER exists and can be compiled using the include files in # INCLUDES, setting the cache variable VAR accordingly. ac_fn_c_check_header_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 #include <$2> _ACEOF if ac_fn_c_try_compile "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_header_compile # ac_fn_c_check_type LINENO TYPE VAR INCLUDES # ------------------------------------------- # Tests whether TYPE exists after having included INCLUDES, setting cache # variable VAR accordingly. ac_fn_c_check_type () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=no" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { if (sizeof ($2)) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { if (sizeof (($2))) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : else eval "$3=yes" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_type # ac_fn_c_try_link LINENO # ----------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_link () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext conftest$ac_exeext if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || test -x conftest$ac_exeext }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would # interfere with the next link command; also delete a directory that is # left behind by Apple's compiler. We do this before executing the actions. rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_link # ac_fn_c_check_func LINENO FUNC VAR # ---------------------------------- # Tests whether FUNC exists, setting the cache variable VAR accordingly ac_fn_c_check_func () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Define $2 to an innocuous variant, in case declares $2. For example, HP-UX 11i declares gettimeofday. */ #define $2 innocuous_$2 /* System header to define __stub macros and hopefully few prototypes, which can conflict with char $2 (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ #ifdef __STDC__ # include #else # include #endif #undef $2 /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char $2 (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ #if defined __stub_$2 || defined __stub___$2 choke me #endif int main () { return $2 (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_func cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. It was created by reprepro $as_me 4.13.1, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ _ACEOF exec 5>>config.log { cat <<_ASUNAME ## --------- ## ## Platform. ## ## --------- ## hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` uname -m = `(uname -m) 2>/dev/null || echo unknown` uname -r = `(uname -r) 2>/dev/null || echo unknown` uname -s = `(uname -s) 2>/dev/null || echo unknown` uname -v = `(uname -v) 2>/dev/null || echo unknown` /usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` /bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` /bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` /usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` /usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` /bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` /bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` _ASUNAME as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. $as_echo "PATH: $as_dir" done IFS=$as_save_IFS } >&5 cat >&5 <<_ACEOF ## ----------- ## ## Core tests. ## ## ----------- ## _ACEOF # Keep a trace of the command line. # Strip out --no-create and --no-recursion so they do not pile up. # Strip out --silent because we don't want to record it for future runs. # Also quote any args containing shell meta-characters. # Make two passes to allow for proper duplicate-argument suppression. ac_configure_args= ac_configure_args0= ac_configure_args1= ac_must_keep_next=false for ac_pass in 1 2 do for ac_arg do case $ac_arg in -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) continue ;; *\'*) ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; esac case $ac_pass in 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; 2) as_fn_append ac_configure_args1 " '$ac_arg'" if test $ac_must_keep_next = true; then ac_must_keep_next=false # Got value, back to normal. else case $ac_arg in *=* | --config-cache | -C | -disable-* | --disable-* \ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ | -with-* | --with-* | -without-* | --without-* | --x) case "$ac_configure_args0 " in "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; esac ;; -* ) ac_must_keep_next=true ;; esac fi as_fn_append ac_configure_args " '$ac_arg'" ;; esac done done { ac_configure_args0=; unset ac_configure_args0;} { ac_configure_args1=; unset ac_configure_args1;} # When interrupted or exit'd, cleanup temporary files, and complete # config.log. We remove comments because anyway the quotes in there # would cause problems or look ugly. # WARNING: Use '\'' to represent an apostrophe within the trap. # WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. trap 'exit_status=$? # Save into config.log some information that might help in debugging. { echo $as_echo "## ---------------- ## ## Cache variables. ## ## ---------------- ##" echo # The following way of writing the cache mishandles newlines in values, ( for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( *${as_nl}ac_space=\ *) sed -n \ "s/'\''/'\''\\\\'\'''\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" ;; #( *) sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) echo $as_echo "## ----------------- ## ## Output variables. ## ## ----------------- ##" echo for ac_var in $ac_subst_vars do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo if test -n "$ac_subst_files"; then $as_echo "## ------------------- ## ## File substitutions. ## ## ------------------- ##" echo for ac_var in $ac_subst_files do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo fi if test -s confdefs.h; then $as_echo "## ----------- ## ## confdefs.h. ## ## ----------- ##" echo cat confdefs.h echo fi test "$ac_signal" != 0 && $as_echo "$as_me: caught signal $ac_signal" $as_echo "$as_me: exit $exit_status" } >&5 rm -f core *.core core.conftest.* && rm -f -r conftest* confdefs* conf$$* $ac_clean_files && exit $exit_status ' 0 for ac_signal in 1 2 13 15; do trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal done ac_signal=0 # confdefs.h avoids OS command line length limits that DEFS can exceed. rm -f -r conftest* confdefs.h $as_echo "/* confdefs.h */" > confdefs.h # Predefined preprocessor variables. cat >>confdefs.h <<_ACEOF #define PACKAGE_NAME "$PACKAGE_NAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_TARNAME "$PACKAGE_TARNAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_VERSION "$PACKAGE_VERSION" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_STRING "$PACKAGE_STRING" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_URL "$PACKAGE_URL" _ACEOF # Let the site file select an alternate cache file if it wants to. # Prefer an explicitly selected file to automatically selected ones. ac_site_file1=NONE ac_site_file2=NONE if test -n "$CONFIG_SITE"; then # We do not want a PATH search for config.site. case $CONFIG_SITE in #(( -*) ac_site_file1=./$CONFIG_SITE;; */*) ac_site_file1=$CONFIG_SITE;; *) ac_site_file1=./$CONFIG_SITE;; esac elif test "x$prefix" != xNONE; then ac_site_file1=$prefix/share/config.site ac_site_file2=$prefix/etc/config.site else ac_site_file1=$ac_default_prefix/share/config.site ac_site_file2=$ac_default_prefix/etc/config.site fi for ac_site_file in "$ac_site_file1" "$ac_site_file2" do test "x$ac_site_file" = xNONE && continue if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 $as_echo "$as_me: loading site script $ac_site_file" >&6;} sed 's/^/| /' "$ac_site_file" >&5 . "$ac_site_file" \ || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "failed to load site script $ac_site_file See \`config.log' for more details" "$LINENO" 5; } fi done if test -r "$cache_file"; then # Some versions of bash will fail to source /dev/null (special files # actually), so we avoid doing that. DJGPP emulates it as a regular file. if test /dev/null != "$cache_file" && test -f "$cache_file"; then { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 $as_echo "$as_me: loading cache $cache_file" >&6;} case $cache_file in [\\/]* | ?:[\\/]* ) . "$cache_file";; *) . "./$cache_file";; esac fi else { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 $as_echo "$as_me: creating cache $cache_file" >&6;} >$cache_file fi # Check that the precious variables saved in the cache have kept the same # value. ac_cache_corrupted=false for ac_var in $ac_precious_vars; do eval ac_old_set=\$ac_cv_env_${ac_var}_set eval ac_new_set=\$ac_env_${ac_var}_set eval ac_old_val=\$ac_cv_env_${ac_var}_value eval ac_new_val=\$ac_env_${ac_var}_value case $ac_old_set,$ac_new_set in set,) { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} ac_cache_corrupted=: ;; ,set) { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} ac_cache_corrupted=: ;; ,);; *) if test "x$ac_old_val" != "x$ac_new_val"; then # differences in whitespace do not lead to failure. ac_old_val_w=`echo x $ac_old_val` ac_new_val_w=`echo x $ac_new_val` if test "$ac_old_val_w" != "$ac_new_val_w"; then { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 $as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} ac_cache_corrupted=: else { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 $as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} eval $ac_var=\$ac_old_val fi { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 $as_echo "$as_me: former value: \`$ac_old_val'" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 $as_echo "$as_me: current value: \`$ac_new_val'" >&2;} fi;; esac # Pass precious variables to config.status. if test "$ac_new_set" = set; then case $ac_new_val in *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; *) ac_arg=$ac_var=$ac_new_val ;; esac case " $ac_configure_args " in *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. *) as_fn_append ac_configure_args " '$ac_arg'" ;; esac fi done if $ac_cache_corrupted; then { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 $as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 fi ## -------------------- ## ## Main body of script. ## ## -------------------- ## ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_aux_dir= for ac_dir in ac "$srcdir"/ac; do if test -f "$ac_dir/install-sh"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/install-sh -c" break elif test -f "$ac_dir/install.sh"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/install.sh -c" break elif test -f "$ac_dir/shtool"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/shtool install -c" break fi done if test -z "$ac_aux_dir"; then as_fn_error $? "cannot find install-sh, install.sh, or shtool in ac \"$srcdir\"/ac" "$LINENO" 5 fi # These three variables are undocumented and unsupported, # and are intended to be withdrawn in a future Autoconf release. # They can cause serious problems if a builder's source tree is in a directory # whose full name contains unusual characters. ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. am__api_version='1.11' # Find a good install program. We prefer a C program (faster), # so one script is as good as another. But avoid the broken or # incompatible versions: # SysV /etc/install, /usr/sbin/install # SunOS /usr/etc/install # IRIX /sbin/install # AIX /bin/install # AmigaOS /C/install, which installs bootblocks on floppy discs # AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag # AFS /usr/afsws/bin/install, which mishandles nonexistent args # SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" # OS/2's system install, which has a completely different semantic # ./install, which can be erroneously created by make from ./install.sh. # Reject install programs that cannot install multiple files. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 $as_echo_n "checking for a BSD-compatible install... " >&6; } if test -z "$INSTALL"; then if ${ac_cv_path_install+:} false; then : $as_echo_n "(cached) " >&6 else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. # Account for people who put trailing slashes in PATH elements. case $as_dir/ in #(( ./ | .// | /[cC]/* | \ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \ /usr/ucb/* ) ;; *) # OSF1 and SCO ODT 3.0 have their own names for install. # Don't use installbsd from OSF since it installs stuff as root # by default. for ac_prog in ginstall scoinst install; do for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then if test $ac_prog = install && grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # AIX install. It has an incompatible calling convention. : elif test $ac_prog = install && grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # program-specific install script used by HP pwplus--don't use. : else rm -rf conftest.one conftest.two conftest.dir echo one > conftest.one echo two > conftest.two mkdir conftest.dir if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" && test -s conftest.one && test -s conftest.two && test -s conftest.dir/conftest.one && test -s conftest.dir/conftest.two then ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" break 3 fi fi fi done done ;; esac done IFS=$as_save_IFS rm -rf conftest.one conftest.two conftest.dir fi if test "${ac_cv_path_install+set}" = set; then INSTALL=$ac_cv_path_install else # As a last resort, use the slow shell script. Don't cache a # value for INSTALL within a source directory, because that will # break other packages using the cache if that directory is # removed, or if the value is a relative name. INSTALL=$ac_install_sh fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 $as_echo "$INSTALL" >&6; } # Use test -z because SunOS4 sh mishandles braces in ${var-val}. # It thinks the first close brace ends the variable substitution. test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether build environment is sane" >&5 $as_echo_n "checking whether build environment is sane... " >&6; } # Just in case sleep 1 echo timestamp > conftest.file # Reject unsafe characters in $srcdir or the absolute working directory # name. Accept space and tab only in the latter. am_lf=' ' case `pwd` in *[\\\"\#\$\&\'\`$am_lf]*) as_fn_error $? "unsafe absolute working directory name" "$LINENO" 5;; esac case $srcdir in *[\\\"\#\$\&\'\`$am_lf\ \ ]*) as_fn_error $? "unsafe srcdir value: \`$srcdir'" "$LINENO" 5;; esac # Do `set' in a subshell so we don't clobber the current shell's # arguments. Must try -L first in case configure is actually a # symlink; some systems play weird games with the mod time of symlinks # (eg FreeBSD returns the mod time of the symlink's containing # directory). if ( set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` if test "$*" = "X"; then # -L didn't work. set X `ls -t "$srcdir/configure" conftest.file` fi rm -f conftest.file if test "$*" != "X $srcdir/configure conftest.file" \ && test "$*" != "X conftest.file $srcdir/configure"; then # If neither matched, then we have a broken ls. This can happen # if, for instance, CONFIG_SHELL is bash and it inherits a # broken ls alias from the environment. This has actually # happened. Such a system could not be considered "sane". as_fn_error $? "ls -t appears to fail. Make sure there is not a broken alias in your environment" "$LINENO" 5 fi test "$2" = conftest.file ) then # Ok. : else as_fn_error $? "newly created file is older than distributed files! Check your system clock" "$LINENO" 5 fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } test "$program_prefix" != NONE && program_transform_name="s&^&$program_prefix&;$program_transform_name" # Use a double $ so make ignores it. test "$program_suffix" != NONE && program_transform_name="s&\$&$program_suffix&;$program_transform_name" # Double any \ or $. # By default was `s,x,x', remove it if useless. ac_script='s/[\\$]/&&/g;s/;s,x,x,$//' program_transform_name=`$as_echo "$program_transform_name" | sed "$ac_script"` # expand $ac_aux_dir to an absolute path am_aux_dir=`cd $ac_aux_dir && pwd` if test x"${MISSING+set}" != xset; then case $am_aux_dir in *\ * | *\ *) MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;; *) MISSING="\${SHELL} $am_aux_dir/missing" ;; esac fi # Use eval to expand $SHELL if eval "$MISSING --run true"; then am_missing_run="$MISSING --run " else am_missing_run= { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \`missing' script is too old or missing" >&5 $as_echo "$as_me: WARNING: \`missing' script is too old or missing" >&2;} fi if test x"${install_sh}" != xset; then case $am_aux_dir in *\ * | *\ *) install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; *) install_sh="\${SHELL} $am_aux_dir/install-sh" esac fi # Installed binaries are usually stripped using `strip' when the user # run `make install-strip'. However `strip' might not be the right # tool to use in cross-compilation environments, therefore Automake # will honor the `STRIP' environment variable to overrule this program. if test "$cross_compiling" != no; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. set dummy ${ac_tool_prefix}strip; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_STRIP+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$STRIP"; then ac_cv_prog_STRIP="$STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_STRIP="${ac_tool_prefix}strip" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi STRIP=$ac_cv_prog_STRIP if test -n "$STRIP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 $as_echo "$STRIP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_STRIP"; then ac_ct_STRIP=$STRIP # Extract the first word of "strip", so it can be a program name with args. set dummy strip; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_STRIP+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_STRIP"; then ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_STRIP="strip" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP if test -n "$ac_ct_STRIP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 $as_echo "$ac_ct_STRIP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_STRIP" = x; then STRIP=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac STRIP=$ac_ct_STRIP fi else STRIP="$ac_cv_prog_STRIP" fi fi INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for a thread-safe mkdir -p" >&5 $as_echo_n "checking for a thread-safe mkdir -p... " >&6; } if test -z "$MKDIR_P"; then if ${ac_cv_path_mkdir+:} false; then : $as_echo_n "(cached) " >&6 else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in mkdir gmkdir; do for ac_exec_ext in '' $ac_executable_extensions; do as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext" || continue case `"$as_dir/$ac_prog$ac_exec_ext" --version 2>&1` in #( 'mkdir (GNU coreutils) '* | \ 'mkdir (coreutils) '* | \ 'mkdir (fileutils) '4.1*) ac_cv_path_mkdir=$as_dir/$ac_prog$ac_exec_ext break 3;; esac done done done IFS=$as_save_IFS fi test -d ./--version && rmdir ./--version if test "${ac_cv_path_mkdir+set}" = set; then MKDIR_P="$ac_cv_path_mkdir -p" else # As a last resort, use the slow shell script. Don't cache a # value for MKDIR_P within a source directory, because that will # break other packages using the cache if that directory is # removed, or if the value is a relative name. MKDIR_P="$ac_install_sh -d" fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MKDIR_P" >&5 $as_echo "$MKDIR_P" >&6; } mkdir_p="$MKDIR_P" case $mkdir_p in [\\/$]* | ?:[\\/]*) ;; */*) mkdir_p="\$(top_builddir)/$mkdir_p" ;; esac for ac_prog in gawk mawk nawk awk do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_AWK+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$AWK"; then ac_cv_prog_AWK="$AWK" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_AWK="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi AWK=$ac_cv_prog_AWK if test -n "$AWK"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 $as_echo "$AWK" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$AWK" && break done { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5 $as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } set x ${MAKE-make} ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` if eval \${ac_cv_prog_make_${ac_make}_set+:} false; then : $as_echo_n "(cached) " >&6 else cat >conftest.make <<\_ACEOF SHELL = /bin/sh all: @echo '@@@%%%=$(MAKE)=@@@%%%' _ACEOF # GNU make sometimes prints "make[1]: Entering ...", which would confuse us. case `${MAKE-make} -f conftest.make 2>/dev/null` in *@@@%%%=?*=@@@%%%*) eval ac_cv_prog_make_${ac_make}_set=yes;; *) eval ac_cv_prog_make_${ac_make}_set=no;; esac rm -f conftest.make fi if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } SET_MAKE= else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } SET_MAKE="MAKE=${MAKE-make}" fi rm -rf .tst 2>/dev/null mkdir .tst 2>/dev/null if test -d .tst; then am__leading_dot=. else am__leading_dot=_ fi rmdir .tst 2>/dev/null if test "`cd $srcdir && pwd`" != "`pwd`"; then # Use -I$(srcdir) only when $(srcdir) != ., so that make's output # is not polluted with repeated "-I." am__isrc=' -I$(srcdir)' # test to see if srcdir already configured if test -f $srcdir/config.status; then as_fn_error $? "source directory already configured; run \"make distclean\" there first" "$LINENO" 5 fi fi # test whether we have cygpath if test -z "$CYGPATH_W"; then if (cygpath --version) >/dev/null 2>/dev/null; then CYGPATH_W='cygpath -w' else CYGPATH_W=echo fi fi # Define the identity of the package. PACKAGE='reprepro' VERSION='4.13.1' cat >>confdefs.h <<_ACEOF #define PACKAGE "$PACKAGE" _ACEOF cat >>confdefs.h <<_ACEOF #define VERSION "$VERSION" _ACEOF # Some tools Automake needs. ACLOCAL=${ACLOCAL-"${am_missing_run}aclocal-${am__api_version}"} AUTOCONF=${AUTOCONF-"${am_missing_run}autoconf"} AUTOMAKE=${AUTOMAKE-"${am_missing_run}automake-${am__api_version}"} AUTOHEADER=${AUTOHEADER-"${am_missing_run}autoheader"} MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"} # We need awk for the "check" target. The system "awk" is bad on # some platforms. # Always define AMTAR for backward compatibility. Yes, it's still used # in the wild :-( We should find a proper way to deprecate it ... AMTAR='$${TAR-tar}' am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -' ac_config_headers="$ac_config_headers config.h" if test "${CFLAGS+set}" != set ; then CFLAGS="-Wall -O2 -g -Wmissing-prototypes -Wstrict-prototypes -Wshadow" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable maintainer-specific portions of Makefiles" >&5 $as_echo_n "checking whether to enable maintainer-specific portions of Makefiles... " >&6; } # Check whether --enable-maintainer-mode was given. if test "${enable_maintainer_mode+set}" = set; then : enableval=$enable_maintainer_mode; USE_MAINTAINER_MODE=$enableval else USE_MAINTAINER_MODE=no fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $USE_MAINTAINER_MODE" >&5 $as_echo "$USE_MAINTAINER_MODE" >&6; } if test $USE_MAINTAINER_MODE = yes; then MAINTAINER_MODE_TRUE= MAINTAINER_MODE_FALSE='#' else MAINTAINER_MODE_TRUE='#' MAINTAINER_MODE_FALSE= fi MAINT=$MAINTAINER_MODE_TRUE DEPDIR="${am__leading_dot}deps" ac_config_commands="$ac_config_commands depfiles" am_make=${MAKE-make} cat > confinc << 'END' am__doit: @echo this is the am__doit target .PHONY: am__doit END # If we don't find an include directive, just comment out the code. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for style of include used by $am_make" >&5 $as_echo_n "checking for style of include used by $am_make... " >&6; } am__include="#" am__quote= _am_result=none # First try GNU make style include. echo "include confinc" > confmf # Ignore all kinds of additional output from `make'. case `$am_make -s -f confmf 2> /dev/null` in #( *the\ am__doit\ target*) am__include=include am__quote= _am_result=GNU ;; esac # Now try BSD make style include. if test "$am__include" = "#"; then echo '.include "confinc"' > confmf case `$am_make -s -f confmf 2> /dev/null` in #( *the\ am__doit\ target*) am__include=.include am__quote="\"" _am_result=BSD ;; esac fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $_am_result" >&5 $as_echo "$_am_result" >&6; } rm -f confinc confmf # Check whether --enable-dependency-tracking was given. if test "${enable_dependency_tracking+set}" = set; then : enableval=$enable_dependency_tracking; fi if test "x$enable_dependency_tracking" != xno; then am_depcomp="$ac_aux_dir/depcomp" AMDEPBACKSLASH='\' am__nodep='_no' fi if test "x$enable_dependency_tracking" != xno; then AMDEP_TRUE= AMDEP_FALSE='#' else AMDEP_TRUE='#' AMDEP_FALSE= fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. set dummy ${ac_tool_prefix}gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_CC"; then ac_ct_CC=$CC # Extract the first word of "gcc", so it can be a program name with args. set dummy gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi else CC="$ac_cv_prog_CC" fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. set dummy ${ac_tool_prefix}cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi fi if test -z "$CC"; then # Extract the first word of "cc", so it can be a program name with args. set dummy cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else ac_prog_rejected=no as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then ac_prog_rejected=yes continue fi ac_cv_prog_CC="cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS if test $ac_prog_rejected = yes; then # We found a bogon in the path, so make sure we never use it. set dummy $ac_cv_prog_CC shift if test $# != 0; then # We chose a different compiler from the bogus one. # However, it has the same basename, so the bogon will be chosen # first if we set CC to just the basename; use the full file name. shift ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" fi fi fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then for ac_prog in cl.exe do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CC" && break done fi if test -z "$CC"; then ac_ct_CC=$CC for ac_prog in cl.exe do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CC" && break done if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi fi fi test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "no acceptable C compiler found in \$PATH See \`config.log' for more details" "$LINENO" 5; } # Provide some information about the compiler. $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 set X $ac_compile ac_compiler=$2 for ac_option in --version -v -V -qversion; do { { ac_try="$ac_compiler $ac_option >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compiler $ac_option >&5") 2>conftest.err ac_status=$? if test -s conftest.err; then sed '10a\ ... rest of stderr output deleted ... 10q' conftest.err >conftest.er1 cat conftest.er1 >&5 fi rm -f conftest.er1 conftest.err $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } done cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" # Try to create an executable without -o first, disregard a.out. # It will help us diagnose broken compilers, and finding out an intuition # of exeext. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 $as_echo_n "checking whether the C compiler works... " >&6; } ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` # The possible output files: ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" ac_rmfiles= for ac_file in $ac_files do case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; * ) ac_rmfiles="$ac_rmfiles $ac_file";; esac done rm -f $ac_rmfiles if { { ac_try="$ac_link_default" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link_default") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. # So ignore a value of `no', otherwise this would lead to `EXEEXT = no' # in a Makefile. We should not override ac_cv_exeext if it was cached, # so that the user can short-circuit this test for compilers unknown to # Autoconf. for ac_file in $ac_files '' do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; [ab].out ) # We found the default executable, but exeext='' is most # certainly right. break;; *.* ) if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; then :; else ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` fi # We set ac_cv_exeext here because the later test for it is not # safe: cross compilers may not add the suffix if given an `-o' # argument, so we may need to know it at that point already. # Even if this section looks crufty: it has the advantage of # actually working. break;; * ) break;; esac done test "$ac_cv_exeext" = no && ac_cv_exeext= else ac_file='' fi if test -z "$ac_file"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "C compiler cannot create executables See \`config.log' for more details" "$LINENO" 5; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 $as_echo_n "checking for C compiler default output file name... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 $as_echo "$ac_file" >&6; } ac_exeext=$ac_cv_exeext rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out ac_clean_files=$ac_clean_files_save { $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 $as_echo_n "checking for suffix of executables... " >&6; } if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : # If both `conftest.exe' and `conftest' are `present' (well, observable) # catch `conftest.exe'. For instance with Cygwin, `ls conftest' will # work properly (i.e., refer to `conftest.exe'), while it won't with # `rm'. for ac_file in conftest.exe conftest conftest.*; do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` break;; * ) break;; esac done else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of executables: cannot compile and link See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest conftest$ac_cv_exeext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 $as_echo "$ac_cv_exeext" >&6; } rm -f conftest.$ac_ext EXEEXT=$ac_cv_exeext ac_exeext=$EXEEXT cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { FILE *f = fopen ("conftest.out", "w"); return ferror (f) || fclose (f) != 0; ; return 0; } _ACEOF ac_clean_files="$ac_clean_files conftest.out" # Check that the compiler produces executables we can run. If not, either # the compiler is broken, or we cross compile. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 $as_echo_n "checking whether we are cross compiling... " >&6; } if test "$cross_compiling" != yes; then { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } if { ac_try='./conftest$ac_cv_exeext' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then cross_compiling=no else if test "$cross_compiling" = maybe; then cross_compiling=yes else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot run C compiled programs. If you meant to cross compile, use \`--host'. See \`config.log' for more details" "$LINENO" 5; } fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 $as_echo "$cross_compiling" >&6; } rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out ac_clean_files=$ac_clean_files_save { $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 $as_echo_n "checking for suffix of object files... " >&6; } if ${ac_cv_objext+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.o conftest.obj if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : for ac_file in conftest.o conftest.obj conftest.*; do test -f "$ac_file" || continue; case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` break;; esac done else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of object files: cannot compile See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest.$ac_cv_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 $as_echo "$ac_cv_objext" >&6; } OBJEXT=$ac_cv_objext ac_objext=$OBJEXT { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 $as_echo_n "checking whether we are using the GNU C compiler... " >&6; } if ${ac_cv_c_compiler_gnu+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_compiler_gnu=yes else ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_c_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 $as_echo "$ac_cv_c_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GCC=yes else GCC= fi ac_test_CFLAGS=${CFLAGS+set} ac_save_CFLAGS=$CFLAGS { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 $as_echo_n "checking whether $CC accepts -g... " >&6; } if ${ac_cv_prog_cc_g+:} false; then : $as_echo_n "(cached) " >&6 else ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes ac_cv_prog_cc_g=no CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes else CFLAGS="" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : else ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 $as_echo "$ac_cv_prog_cc_g" >&6; } if test "$ac_test_CFLAGS" = set; then CFLAGS=$ac_save_CFLAGS elif test $ac_cv_prog_cc_g = yes; then if test "$GCC" = yes; then CFLAGS="-g -O2" else CFLAGS="-g" fi else if test "$GCC" = yes; then CFLAGS="-O2" else CFLAGS= fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 $as_echo_n "checking for $CC option to accept ISO C89... " >&6; } if ${ac_cv_prog_cc_c89+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_prog_cc_c89=no ac_save_CC=$CC cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include struct stat; /* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ struct buf { int x; }; FILE * (*rcsopen) (struct buf *, struct stat *, int); static char *e (p, i) char **p; int i; { return p[i]; } static char *f (char * (*g) (char **, int), char **p, ...) { char *s; va_list v; va_start (v,p); s = g (p, va_arg (v,int)); va_end (v); return s; } /* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has function prototypes and stuff, but not '\xHH' hex character constants. These don't provoke an error unfortunately, instead are silently treated as 'x'. The following induces an error, until -std is added to get proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an array size at least. It's necessary to write '\x00'==0 to get something that's true only with -std. */ int osf4_cc_array ['\x00' == 0 ? 1 : -1]; /* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters inside strings and character constants. */ #define FOO(x) 'x' int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; int test (int i, double x); struct s1 {int (*f) (int a);}; struct s2 {int (*f) (double a);}; int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); int argc; char **argv; int main () { return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; ; return 0; } _ACEOF for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" do CC="$ac_save_CC $ac_arg" if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_c89=$ac_arg fi rm -f core conftest.err conftest.$ac_objext test "x$ac_cv_prog_cc_c89" != "xno" && break done rm -f conftest.$ac_ext CC=$ac_save_CC fi # AC_CACHE_VAL case "x$ac_cv_prog_cc_c89" in x) { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 $as_echo "none needed" >&6; } ;; xno) { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 $as_echo "unsupported" >&6; } ;; *) CC="$CC $ac_cv_prog_cc_c89" { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 $as_echo "$ac_cv_prog_cc_c89" >&6; } ;; esac if test "x$ac_cv_prog_cc_c89" != xno; then : fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu depcc="$CC" am_compiler_list= { $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 $as_echo_n "checking dependency style of $depcc... " >&6; } if ${am_cv_CC_dependencies_compiler_type+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then # We make a subdir and do the tests there. Otherwise we can end up # making bogus files that we don't know about and never remove. For # instance it was reported that on HP-UX the gcc test will end up # making a dummy file named `D' -- because `-MD' means `put the output # in D'. rm -rf conftest.dir mkdir conftest.dir # Copy depcomp to subdir because otherwise we won't find it if we're # using a relative directory. cp "$am_depcomp" conftest.dir cd conftest.dir # We will build objects and dependencies in a subdirectory because # it helps to detect inapplicable dependency modes. For instance # both Tru64's cc and ICC support -MD to output dependencies as a # side effect of compilation, but ICC will put the dependencies in # the current directory while Tru64 will put them in the object # directory. mkdir sub am_cv_CC_dependencies_compiler_type=none if test "$am_compiler_list" = ""; then am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` fi am__universal=false case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac for depmode in $am_compiler_list; do # Setup a source with many dependencies, because some compilers # like to wrap large dependency lists on column 80 (with \), and # we should not choose a depcomp mode which is confused by this. # # We need to recreate these files for each test, as the compiler may # overwrite some of them when testing with obscure command lines. # This happens at least with the AIX C compiler. : > sub/conftest.c for i in 1 2 3 4 5 6; do echo '#include "conftst'$i'.h"' >> sub/conftest.c # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with # Solaris 8's {/usr,}/bin/sh. touch sub/conftst$i.h done echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf # We check with `-c' and `-o' for the sake of the "dashmstdout" # mode. It turns out that the SunPro C++ compiler does not properly # handle `-M -o', and we need to detect this. Also, some Intel # versions had trouble with output in subdirs am__obj=sub/conftest.${OBJEXT-o} am__minus_obj="-o $am__obj" case $depmode in gcc) # This depmode causes a compiler race in universal mode. test "$am__universal" = false || continue ;; nosideeffect) # after this tag, mechanisms are not by side-effect, so they'll # only be used when explicitly requested if test "x$enable_dependency_tracking" = xyes; then continue else break fi ;; msvc7 | msvc7msys | msvisualcpp | msvcmsys) # This compiler won't grok `-c -o', but also, the minuso test has # not run yet. These depmodes are late enough in the game, and # so weak that their functioning should not be impacted. am__obj=conftest.${OBJEXT-o} am__minus_obj= ;; none) break ;; esac if depmode=$depmode \ source=sub/conftest.c object=$am__obj \ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ >/dev/null 2>conftest.err && grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && grep $am__obj sub/conftest.Po > /dev/null 2>&1 && ${MAKE-make} -s -f confmf > /dev/null 2>&1; then # icc doesn't choke on unknown options, it will just issue warnings # or remarks (even with -Werror). So we grep stderr for any message # that says an option was ignored or not supported. # When given -MP, icc 7.0 and 7.1 complain thusly: # icc: Command line warning: ignoring option '-M'; no argument required # The diagnosis changed in icc 8.0: # icc: Command line remark: option '-MP' not supported if (grep 'ignoring option' conftest.err || grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else am_cv_CC_dependencies_compiler_type=$depmode break fi fi done cd .. rm -rf conftest.dir else am_cv_CC_dependencies_compiler_type=none fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5 $as_echo "$am_cv_CC_dependencies_compiler_type" >&6; } CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type if test "x$enable_dependency_tracking" != xno \ && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then am__fastdepCC_TRUE= am__fastdepCC_FALSE='#' else am__fastdepCC_TRUE='#' am__fastdepCC_FALSE= fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 $as_echo_n "checking how to run the C preprocessor... " >&6; } # On Suns, sometimes $CPP names a directory. if test -n "$CPP" && test -d "$CPP"; then CPP= fi if test -z "$CPP"; then if ${ac_cv_prog_CPP+:} false; then : $as_echo_n "(cached) " >&6 else # Double quotes because CPP needs to be expanded for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" do ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : break fi done ac_cv_prog_CPP=$CPP fi CPP=$ac_cv_prog_CPP else ac_cv_prog_CPP=$CPP fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 $as_echo "$CPP" >&6; } ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "C preprocessor \"$CPP\" fails sanity check See \`config.log' for more details" "$LINENO" 5; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 $as_echo_n "checking for grep that handles long lines and -e... " >&6; } if ${ac_cv_path_GREP+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$GREP"; then ac_path_GREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in grep ggrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_GREP" || continue # Check for GNU ac_path_GREP and select it if it is found. # Check for GNU $ac_path_GREP case `"$ac_path_GREP" --version 2>&1` in *GNU*) ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'GREP' >> "conftest.nl" "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_GREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_GREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_GREP"; then as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_GREP=$GREP fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 $as_echo "$ac_cv_path_GREP" >&6; } GREP="$ac_cv_path_GREP" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 $as_echo_n "checking for egrep... " >&6; } if ${ac_cv_path_EGREP+:} false; then : $as_echo_n "(cached) " >&6 else if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 then ac_cv_path_EGREP="$GREP -E" else if test -z "$EGREP"; then ac_path_EGREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in egrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_EGREP" || continue # Check for GNU ac_path_EGREP and select it if it is found. # Check for GNU $ac_path_EGREP case `"$ac_path_EGREP" --version 2>&1` in *GNU*) ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'EGREP' >> "conftest.nl" "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_EGREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_EGREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_EGREP"; then as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_EGREP=$EGREP fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 $as_echo "$ac_cv_path_EGREP" >&6; } EGREP="$ac_cv_path_EGREP" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 $as_echo_n "checking for ANSI C header files... " >&6; } if ${ac_cv_header_stdc+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include #include int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_stdc=yes else ac_cv_header_stdc=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test $ac_cv_header_stdc = yes; then # SunOS 4.x string.h does not declare mem*, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "memchr" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "free" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. if test "$cross_compiling" = yes; then : : else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #if ((' ' & 0x0FF) == 0x020) # define ISLOWER(c) ('a' <= (c) && (c) <= 'z') # define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) #else # define ISLOWER(c) \ (('a' <= (c) && (c) <= 'i') \ || ('j' <= (c) && (c) <= 'r') \ || ('s' <= (c) && (c) <= 'z')) # define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) #endif #define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) int main () { int i; for (i = 0; i < 256; i++) if (XOR (islower (i), ISLOWER (i)) || toupper (i) != TOUPPER (i)) return 2; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : else ac_cv_header_stdc=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 $as_echo "$ac_cv_header_stdc" >&6; } if test $ac_cv_header_stdc = yes; then $as_echo "#define STDC_HEADERS 1" >>confdefs.h fi # On IRIX 5.3, sys/types and inttypes.h are conflicting. for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ inttypes.h stdint.h unistd.h do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default " if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done ac_fn_c_check_header_mongrel "$LINENO" "minix/config.h" "ac_cv_header_minix_config_h" "$ac_includes_default" if test "x$ac_cv_header_minix_config_h" = xyes; then : MINIX=yes else MINIX= fi if test "$MINIX" = yes; then $as_echo "#define _POSIX_SOURCE 1" >>confdefs.h $as_echo "#define _POSIX_1_SOURCE 2" >>confdefs.h $as_echo "#define _MINIX 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether it is safe to define __EXTENSIONS__" >&5 $as_echo_n "checking whether it is safe to define __EXTENSIONS__... " >&6; } if ${ac_cv_safe_to_define___extensions__+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ # define __EXTENSIONS__ 1 $ac_includes_default int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_safe_to_define___extensions__=yes else ac_cv_safe_to_define___extensions__=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_safe_to_define___extensions__" >&5 $as_echo "$ac_cv_safe_to_define___extensions__" >&6; } test $ac_cv_safe_to_define___extensions__ = yes && $as_echo "#define __EXTENSIONS__ 1" >>confdefs.h $as_echo "#define _ALL_SOURCE 1" >>confdefs.h $as_echo "#define _GNU_SOURCE 1" >>confdefs.h $as_echo "#define _POSIX_PTHREAD_SEMANTICS 1" >>confdefs.h $as_echo "#define _TANDEM_SOURCE 1" >>confdefs.h { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C99" >&5 $as_echo_n "checking for $CC option to accept ISO C99... " >&6; } if ${ac_cv_prog_cc_c99+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_prog_cc_c99=no ac_save_CC=$CC cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include #include #include // Check varargs macros. These examples are taken from C99 6.10.3.5. #define debug(...) fprintf (stderr, __VA_ARGS__) #define showlist(...) puts (#__VA_ARGS__) #define report(test,...) ((test) ? puts (#test) : printf (__VA_ARGS__)) static void test_varargs_macros (void) { int x = 1234; int y = 5678; debug ("Flag"); debug ("X = %d\n", x); showlist (The first, second, and third items.); report (x>y, "x is %d but y is %d", x, y); } // Check long long types. #define BIG64 18446744073709551615ull #define BIG32 4294967295ul #define BIG_OK (BIG64 / BIG32 == 4294967297ull && BIG64 % BIG32 == 0) #if !BIG_OK your preprocessor is broken; #endif #if BIG_OK #else your preprocessor is broken; #endif static long long int bignum = -9223372036854775807LL; static unsigned long long int ubignum = BIG64; struct incomplete_array { int datasize; double data[]; }; struct named_init { int number; const wchar_t *name; double average; }; typedef const char *ccp; static inline int test_restrict (ccp restrict text) { // See if C++-style comments work. // Iterate through items via the restricted pointer. // Also check for declarations in for loops. for (unsigned int i = 0; *(text+i) != '\0'; ++i) continue; return 0; } // Check varargs and va_copy. static void test_varargs (const char *format, ...) { va_list args; va_start (args, format); va_list args_copy; va_copy (args_copy, args); const char *str; int number; float fnumber; while (*format) { switch (*format++) { case 's': // string str = va_arg (args_copy, const char *); break; case 'd': // int number = va_arg (args_copy, int); break; case 'f': // float fnumber = va_arg (args_copy, double); break; default: break; } } va_end (args_copy); va_end (args); } int main () { // Check bool. _Bool success = false; // Check restrict. if (test_restrict ("String literal") == 0) success = true; char *restrict newvar = "Another string"; // Check varargs. test_varargs ("s, d' f .", "string", 65, 34.234); test_varargs_macros (); // Check flexible array members. struct incomplete_array *ia = malloc (sizeof (struct incomplete_array) + (sizeof (double) * 10)); ia->datasize = 10; for (int i = 0; i < ia->datasize; ++i) ia->data[i] = i * 1.234; // Check named initializers. struct named_init ni = { .number = 34, .name = L"Test wide string", .average = 543.34343, }; ni.number = 58; int dynamic_array[ni.number]; dynamic_array[ni.number - 1] = 543; // work around unused variable warnings return (!success || bignum == 0LL || ubignum == 0uLL || newvar[0] == 'x' || dynamic_array[ni.number - 1] != 543); ; return 0; } _ACEOF for ac_arg in '' -std=gnu99 -std=c99 -c99 -AC99 -D_STDC_C99= -qlanglvl=extc99 do CC="$ac_save_CC $ac_arg" if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_c99=$ac_arg fi rm -f core conftest.err conftest.$ac_objext test "x$ac_cv_prog_cc_c99" != "xno" && break done rm -f conftest.$ac_ext CC=$ac_save_CC fi # AC_CACHE_VAL case "x$ac_cv_prog_cc_c99" in x) { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 $as_echo "none needed" >&6; } ;; xno) { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 $as_echo "unsupported" >&6; } ;; *) CC="$CC $ac_cv_prog_cc_c99" { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c99" >&5 $as_echo "$ac_cv_prog_cc_c99" >&6; } ;; esac if test "x$ac_cv_prog_cc_c99" != xno; then : fi # Check whether --enable-largefile was given. if test "${enable_largefile+set}" = set; then : enableval=$enable_largefile; fi if test "$enable_largefile" != no; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for special C compiler options needed for large files" >&5 $as_echo_n "checking for special C compiler options needed for large files... " >&6; } if ${ac_cv_sys_largefile_CC+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_sys_largefile_CC=no if test "$GCC" != yes; then ac_save_CC=$CC while :; do # IRIX 6.2 and later do not support large files by default, # so use the C compiler's -n32 option if that helps. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : break fi rm -f core conftest.err conftest.$ac_objext CC="$CC -n32" if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_largefile_CC=' -n32'; break fi rm -f core conftest.err conftest.$ac_objext break done CC=$ac_save_CC rm -f conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_largefile_CC" >&5 $as_echo "$ac_cv_sys_largefile_CC" >&6; } if test "$ac_cv_sys_largefile_CC" != no; then CC=$CC$ac_cv_sys_largefile_CC fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _FILE_OFFSET_BITS value needed for large files" >&5 $as_echo_n "checking for _FILE_OFFSET_BITS value needed for large files... " >&6; } if ${ac_cv_sys_file_offset_bits+:} false; then : $as_echo_n "(cached) " >&6 else while :; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_file_offset_bits=no; break fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #define _FILE_OFFSET_BITS 64 #include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_file_offset_bits=64; break fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_sys_file_offset_bits=unknown break done fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_file_offset_bits" >&5 $as_echo "$ac_cv_sys_file_offset_bits" >&6; } case $ac_cv_sys_file_offset_bits in #( no | unknown) ;; *) cat >>confdefs.h <<_ACEOF #define _FILE_OFFSET_BITS $ac_cv_sys_file_offset_bits _ACEOF ;; esac rm -rf conftest* if test $ac_cv_sys_file_offset_bits = unknown; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _LARGE_FILES value needed for large files" >&5 $as_echo_n "checking for _LARGE_FILES value needed for large files... " >&6; } if ${ac_cv_sys_large_files+:} false; then : $as_echo_n "(cached) " >&6 else while :; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_large_files=no; break fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #define _LARGE_FILES 1 #include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_large_files=1; break fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_sys_large_files=unknown break done fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_large_files" >&5 $as_echo "$ac_cv_sys_large_files" >&6; } case $ac_cv_sys_large_files in #( no | unknown) ;; *) cat >>confdefs.h <<_ACEOF #define _LARGE_FILES $ac_cv_sys_large_files _ACEOF ;; esac rm -rf conftest* fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether byte ordering is bigendian" >&5 $as_echo_n "checking whether byte ordering is bigendian... " >&6; } if ${ac_cv_c_bigendian+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_c_bigendian=unknown # See if we're dealing with a universal compiler. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifndef __APPLE_CC__ not a universal capable compiler #endif typedef int dummy; _ACEOF if ac_fn_c_try_compile "$LINENO"; then : # Check for potential -arch flags. It is not universal unless # there are at least two -arch flags with different values. ac_arch= ac_prev= for ac_word in $CC $CFLAGS $CPPFLAGS $LDFLAGS; do if test -n "$ac_prev"; then case $ac_word in i?86 | x86_64 | ppc | ppc64) if test -z "$ac_arch" || test "$ac_arch" = "$ac_word"; then ac_arch=$ac_word else ac_cv_c_bigendian=universal break fi ;; esac ac_prev= elif test "x$ac_word" = "x-arch"; then ac_prev=arch fi done fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test $ac_cv_c_bigendian = unknown; then # See if sys/param.h defines the BYTE_ORDER macro. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { #if ! (defined BYTE_ORDER && defined BIG_ENDIAN \ && defined LITTLE_ENDIAN && BYTE_ORDER && BIG_ENDIAN \ && LITTLE_ENDIAN) bogus endian macros #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : # It does; now see whether it defined to BIG_ENDIAN or not. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { #if BYTE_ORDER != BIG_ENDIAN not big endian #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_c_bigendian=yes else ac_cv_c_bigendian=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi if test $ac_cv_c_bigendian = unknown; then # See if defines _LITTLE_ENDIAN or _BIG_ENDIAN (e.g., Solaris). cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { #if ! (defined _LITTLE_ENDIAN || defined _BIG_ENDIAN) bogus endian macros #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : # It does; now see whether it defined to _BIG_ENDIAN or not. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { #ifndef _BIG_ENDIAN not big endian #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_c_bigendian=yes else ac_cv_c_bigendian=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi if test $ac_cv_c_bigendian = unknown; then # Compile a test program. if test "$cross_compiling" = yes; then : # Try to guess by grepping values from an object file. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ short int ascii_mm[] = { 0x4249, 0x4765, 0x6E44, 0x6961, 0x6E53, 0x7953, 0 }; short int ascii_ii[] = { 0x694C, 0x5454, 0x656C, 0x6E45, 0x6944, 0x6E61, 0 }; int use_ascii (int i) { return ascii_mm[i] + ascii_ii[i]; } short int ebcdic_ii[] = { 0x89D3, 0xE3E3, 0x8593, 0x95C5, 0x89C4, 0x9581, 0 }; short int ebcdic_mm[] = { 0xC2C9, 0xC785, 0x95C4, 0x8981, 0x95E2, 0xA8E2, 0 }; int use_ebcdic (int i) { return ebcdic_mm[i] + ebcdic_ii[i]; } extern int foo; int main () { return use_ascii (foo) == use_ebcdic (foo); ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : if grep BIGenDianSyS conftest.$ac_objext >/dev/null; then ac_cv_c_bigendian=yes fi if grep LiTTleEnDian conftest.$ac_objext >/dev/null ; then if test "$ac_cv_c_bigendian" = unknown; then ac_cv_c_bigendian=no else # finding both strings is unlikely to happen, but who knows? ac_cv_c_bigendian=unknown fi fi fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default int main () { /* Are we little or big endian? From Harbison&Steele. */ union { long int l; char c[sizeof (long int)]; } u; u.l = 1; return u.c[sizeof (long int) - 1] == 1; ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_c_bigendian=no else ac_cv_c_bigendian=yes fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_bigendian" >&5 $as_echo "$ac_cv_c_bigendian" >&6; } case $ac_cv_c_bigendian in #( yes) $as_echo "#define WORDS_BIGENDIAN 1" >>confdefs.h ;; #( no) ;; #( universal) $as_echo "#define AC_APPLE_UNIVERSAL_BUILD 1" >>confdefs.h ;; #( *) as_fn_error $? "unknown endianness presetting ac_cv_c_bigendian=no (or yes) will help" "$LINENO" 5 ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for stdbool.h that conforms to C99" >&5 $as_echo_n "checking for stdbool.h that conforms to C99... " >&6; } if ${ac_cv_header_stdbool_h+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #ifndef bool "error: bool is not defined" #endif #ifndef false "error: false is not defined" #endif #if false "error: false is not 0" #endif #ifndef true "error: true is not defined" #endif #if true != 1 "error: true is not 1" #endif #ifndef __bool_true_false_are_defined "error: __bool_true_false_are_defined is not defined" #endif struct s { _Bool s: 1; _Bool t; } s; char a[true == 1 ? 1 : -1]; char b[false == 0 ? 1 : -1]; char c[__bool_true_false_are_defined == 1 ? 1 : -1]; char d[(bool) 0.5 == true ? 1 : -1]; /* See body of main program for 'e'. */ char f[(_Bool) 0.0 == false ? 1 : -1]; char g[true]; char h[sizeof (_Bool)]; char i[sizeof s.t]; enum { j = false, k = true, l = false * true, m = true * 256 }; /* The following fails for HP aC++/ANSI C B3910B A.05.55 [Dec 04 2003]. */ _Bool n[m]; char o[sizeof n == m * sizeof n[0] ? 1 : -1]; char p[-1 - (_Bool) 0 < 0 && -1 - (bool) 0 < 0 ? 1 : -1]; /* Catch a bug in an HP-UX C compiler. See http://gcc.gnu.org/ml/gcc-patches/2003-12/msg02303.html http://lists.gnu.org/archive/html/bug-coreutils/2005-11/msg00161.html */ _Bool q = true; _Bool *pq = &q; int main () { bool e = &s; *pq |= q; *pq |= ! q; /* Refer to every declared value, to avoid compiler optimizations. */ return (!a + !b + !c + !d + !e + !f + !g + !h + !i + !!j + !k + !!l + !m + !n + !o + !p + !q + !pq); ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_stdbool_h=yes else ac_cv_header_stdbool_h=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdbool_h" >&5 $as_echo "$ac_cv_header_stdbool_h" >&6; } ac_fn_c_check_type "$LINENO" "_Bool" "ac_cv_type__Bool" "$ac_includes_default" if test "x$ac_cv_type__Bool" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE__BOOL 1 _ACEOF fi if test $ac_cv_header_stdbool_h = yes; then $as_echo "#define HAVE_STDBOOL_H 1" >>confdefs.h fi for ac_func in closefrom strndup dprintf tdestroy do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done found_mktemp=no for ac_func in mkostemp mkstemp do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF found_mktemp=yes ; break fi done if test "$found_mktemp" = "no" ; then as_fn_error $? "Missing mkstemp or mkostemp" "$LINENO" 5 fi ac_fn_c_check_func "$LINENO" "vasprintf" "ac_cv_func_vasprintf" if test "x$ac_cv_func_vasprintf" = xyes; then : else as_fn_error $? "Could not find vasprintf implementation!" "$LINENO" 5 fi DBLIBS="" # the only way to find out which is compileable is to look into db.h: ac_fn_c_check_header_mongrel "$LINENO" "db.h" "ac_cv_header_db_h" "$ac_includes_default" if test "x$ac_cv_header_db_h" = xyes; then : else as_fn_error $? "\"no db.h found\"" "$LINENO" 5 fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for db_create in -ldb" >&5 $as_echo_n "checking for db_create in -ldb... " >&6; } if ${ac_cv_lib_db_db_create+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldb $DBLIBS $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char db_create (); int main () { return db_create (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_db_db_create=yes else ac_cv_lib_db_db_create=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_db_db_create" >&5 $as_echo "$ac_cv_lib_db_db_create" >&6; } if test "x$ac_cv_lib_db_db_create" = xyes; then : DBLIBS="-ldb $DBLIBS" else as_fn_error $? "\"no libdb found\"" "$LINENO" 5 fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for gzopen in -lz" >&5 $as_echo_n "checking for gzopen in -lz... " >&6; } if ${ac_cv_lib_z_gzopen+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lz $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char gzopen (); int main () { return gzopen (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_z_gzopen=yes else ac_cv_lib_z_gzopen=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_z_gzopen" >&5 $as_echo "$ac_cv_lib_z_gzopen" >&6; } if test "x$ac_cv_lib_z_gzopen" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBZ 1 _ACEOF LIBS="-lz $LIBS" else as_fn_error $? "\"no zlib found\"" "$LINENO" 5 fi # Check whether --with-libgpgme was given. if test "${with_libgpgme+set}" = set; then : withval=$with_libgpgme; case "$withval" in no) ;; yes) ac_fn_c_check_header_mongrel "$LINENO" "gpgme.h" "ac_cv_header_gpgme_h" "$ac_includes_default" if test "x$ac_cv_header_gpgme_h" = xyes; then : else as_fn_error $? "\"no gpgme.h found\"" "$LINENO" 5 fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for gpg_strsource in -lgpg-error" >&5 $as_echo_n "checking for gpg_strsource in -lgpg-error... " >&6; } if ${ac_cv_lib_gpg_error_gpg_strsource+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lgpg-error $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char gpg_strsource (); int main () { return gpg_strsource (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_gpg_error_gpg_strsource=yes else ac_cv_lib_gpg_error_gpg_strsource=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_gpg_error_gpg_strsource" >&5 $as_echo "$ac_cv_lib_gpg_error_gpg_strsource" >&6; } if test "x$ac_cv_lib_gpg_error_gpg_strsource" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBGPG_ERROR 1 _ACEOF LIBS="-lgpg-error $LIBS" else as_fn_error $? "\"no libgpg-error found\"" "$LINENO" 5 fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for gpgme_get_protocol_name in -lgpgme" >&5 $as_echo_n "checking for gpgme_get_protocol_name in -lgpgme... " >&6; } if ${ac_cv_lib_gpgme_gpgme_get_protocol_name+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lgpgme $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char gpgme_get_protocol_name (); int main () { return gpgme_get_protocol_name (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_gpgme_gpgme_get_protocol_name=yes else ac_cv_lib_gpgme_gpgme_get_protocol_name=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_gpgme_gpgme_get_protocol_name" >&5 $as_echo "$ac_cv_lib_gpgme_gpgme_get_protocol_name" >&6; } if test "x$ac_cv_lib_gpgme_gpgme_get_protocol_name" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBGPGME 1 _ACEOF LIBS="-lgpgme $LIBS" else as_fn_error $? "\"no libgpgme found (need at least 0.4.1)\"" "$LINENO" 5 fi ;; *) CPPFLAGS="$CPPFLAGS -I$withval/include" LIBS="$LIBS -L$withval/lib" ac_fn_c_check_header_mongrel "$LINENO" "gpgme.h" "ac_cv_header_gpgme_h" "$ac_includes_default" if test "x$ac_cv_header_gpgme_h" = xyes; then : else as_fn_error $? "\"no gpgme.h found\"" "$LINENO" 5 fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for gpg_strsource in -lgpg-error" >&5 $as_echo_n "checking for gpg_strsource in -lgpg-error... " >&6; } if ${ac_cv_lib_gpg_error_gpg_strsource+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lgpg-error $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char gpg_strsource (); int main () { return gpg_strsource (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_gpg_error_gpg_strsource=yes else ac_cv_lib_gpg_error_gpg_strsource=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_gpg_error_gpg_strsource" >&5 $as_echo "$ac_cv_lib_gpg_error_gpg_strsource" >&6; } if test "x$ac_cv_lib_gpg_error_gpg_strsource" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBGPG_ERROR 1 _ACEOF LIBS="-lgpg-error $LIBS" else as_fn_error $? "\"no libgpg-error found\"" "$LINENO" 5 fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for gpgme_get_protocol_name in -lgpgme" >&5 $as_echo_n "checking for gpgme_get_protocol_name in -lgpgme... " >&6; } if ${ac_cv_lib_gpgme_gpgme_get_protocol_name+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lgpgme $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char gpgme_get_protocol_name (); int main () { return gpgme_get_protocol_name (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_gpgme_gpgme_get_protocol_name=yes else ac_cv_lib_gpgme_gpgme_get_protocol_name=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_gpgme_gpgme_get_protocol_name" >&5 $as_echo "$ac_cv_lib_gpgme_gpgme_get_protocol_name" >&6; } if test "x$ac_cv_lib_gpgme_gpgme_get_protocol_name" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBGPGME 1 _ACEOF LIBS="-lgpgme $LIBS" else as_fn_error $? "\"no libgpgme found (need at least 0.4.1)\"" "$LINENO" 5 fi ;; esac else ac_fn_c_check_header_mongrel "$LINENO" "gpgme.h" "ac_cv_header_gpgme_h" "$ac_includes_default" if test "x$ac_cv_header_gpgme_h" = xyes; then : else as_fn_error $? "\"no gpgme.h found (to disable run with --without-libgpgme)\"" "$LINENO" 5 fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for gpg_strsource in -lgpg-error" >&5 $as_echo_n "checking for gpg_strsource in -lgpg-error... " >&6; } if ${ac_cv_lib_gpg_error_gpg_strsource+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lgpg-error $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char gpg_strsource (); int main () { return gpg_strsource (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_gpg_error_gpg_strsource=yes else ac_cv_lib_gpg_error_gpg_strsource=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_gpg_error_gpg_strsource" >&5 $as_echo "$ac_cv_lib_gpg_error_gpg_strsource" >&6; } if test "x$ac_cv_lib_gpg_error_gpg_strsource" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBGPG_ERROR 1 _ACEOF LIBS="-lgpg-error $LIBS" else as_fn_error $? "\"no libgpg-error found (to disable run with --without-libgpgme)\"" "$LINENO" 5 fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for gpgme_get_protocol_name in -lgpgme" >&5 $as_echo_n "checking for gpgme_get_protocol_name in -lgpgme... " >&6; } if ${ac_cv_lib_gpgme_gpgme_get_protocol_name+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lgpgme $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char gpgme_get_protocol_name (); int main () { return gpgme_get_protocol_name (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_gpgme_gpgme_get_protocol_name=yes else ac_cv_lib_gpgme_gpgme_get_protocol_name=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_gpgme_gpgme_get_protocol_name" >&5 $as_echo "$ac_cv_lib_gpgme_gpgme_get_protocol_name" >&6; } if test "x$ac_cv_lib_gpgme_gpgme_get_protocol_name" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBGPGME 1 _ACEOF LIBS="-lgpgme $LIBS" else as_fn_error $? "\"did not find libgpgme versoion 0.4.1 or later (to disable run with --without-libgpgme)\"" "$LINENO" 5 fi fi # Check whether --with-libbz2 was given. if test "${with_libbz2+set}" = set; then : withval=$with_libbz2; case "$withval" in no) ;; yes) { $as_echo "$as_me:${as_lineno-$LINENO}: checking for BZ2_bzCompressInit in -lbz2" >&5 $as_echo_n "checking for BZ2_bzCompressInit in -lbz2... " >&6; } if ${ac_cv_lib_bz2_BZ2_bzCompressInit+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lbz2 $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char BZ2_bzCompressInit (); int main () { return BZ2_bzCompressInit (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_bz2_BZ2_bzCompressInit=yes else ac_cv_lib_bz2_BZ2_bzCompressInit=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_bz2_BZ2_bzCompressInit" >&5 $as_echo "$ac_cv_lib_bz2_BZ2_bzCompressInit" >&6; } if test "x$ac_cv_lib_bz2_BZ2_bzCompressInit" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBBZ2 1 _ACEOF LIBS="-lbz2 $LIBS" else as_fn_error $? "\"no libbz2 found, despite being told to use it\"" "$LINENO" 5 fi ;; *) { $as_echo "$as_me:${as_lineno-$LINENO}: checking for BZ2_bzCompressInit in -lbz2" >&5 $as_echo_n "checking for BZ2_bzCompressInit in -lbz2... " >&6; } if ${ac_cv_lib_bz2_BZ2_bzCompressInit+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lbz2 -L$withval/lib $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char BZ2_bzCompressInit (); int main () { return BZ2_bzCompressInit (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_bz2_BZ2_bzCompressInit=yes else ac_cv_lib_bz2_BZ2_bzCompressInit=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_bz2_BZ2_bzCompressInit" >&5 $as_echo "$ac_cv_lib_bz2_BZ2_bzCompressInit" >&6; } if test "x$ac_cv_lib_bz2_BZ2_bzCompressInit" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBBZ2 1 _ACEOF LIBS="$LIBS -L$withval/lib -lbz2" CPPFLAGS="$CPPFLAGS -I$withval/include" else as_fn_error $? "\"no libbz2 found, despite being told to use it\"" "$LINENO" 5 fi ;; esac else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for BZ2_bzCompressInit in -lbz2" >&5 $as_echo_n "checking for BZ2_bzCompressInit in -lbz2... " >&6; } if ${ac_cv_lib_bz2_BZ2_bzCompressInit+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lbz2 $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char BZ2_bzCompressInit (); int main () { return BZ2_bzCompressInit (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_bz2_BZ2_bzCompressInit=yes else ac_cv_lib_bz2_BZ2_bzCompressInit=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_bz2_BZ2_bzCompressInit" >&5 $as_echo "$ac_cv_lib_bz2_BZ2_bzCompressInit" >&6; } if test "x$ac_cv_lib_bz2_BZ2_bzCompressInit" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBBZ2 1 _ACEOF LIBS="-lbz2 $LIBS" else { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \"no libbz2 found, compiling without\"" >&5 $as_echo "$as_me: WARNING: \"no libbz2 found, compiling without\"" >&2;} fi fi ARCHIVELIBS="" ARCHIVECPP="" # Check whether --with-libarchive was given. if test "${with_libarchive+set}" = set; then : withval=$with_libarchive; case "$withval" in no) ;; yes) { $as_echo "$as_me:${as_lineno-$LINENO}: checking for archive_read_new in -larchive" >&5 $as_echo_n "checking for archive_read_new in -larchive... " >&6; } if ${ac_cv_lib_archive_archive_read_new+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-larchive $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char archive_read_new (); int main () { return archive_read_new (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_archive_archive_read_new=yes else ac_cv_lib_archive_archive_read_new=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_archive_archive_read_new" >&5 $as_echo "$ac_cv_lib_archive_archive_read_new" >&6; } if test "x$ac_cv_lib_archive_archive_read_new" = xyes; then : ac_fn_c_check_header_mongrel "$LINENO" "archive.h" "ac_cv_header_archive_h" "$ac_includes_default" if test "x$ac_cv_header_archive_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBARCHIVE 1 _ACEOF ARCHIVELIBS="-larchive" else as_fn_error $? "Could not find archive.h" "$LINENO" 5 fi else as_fn_error $? "Could not find libarchive" "$LINENO" 5 fi ;; *) { $as_echo "$as_me:${as_lineno-$LINENO}: checking for archive_read_new in -larchive" >&5 $as_echo_n "checking for archive_read_new in -larchive... " >&6; } if ${ac_cv_lib_archive_archive_read_new+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-larchive -L$withval/lib $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char archive_read_new (); int main () { return archive_read_new (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_archive_archive_read_new=yes else ac_cv_lib_archive_archive_read_new=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_archive_archive_read_new" >&5 $as_echo "$ac_cv_lib_archive_archive_read_new" >&6; } if test "x$ac_cv_lib_archive_archive_read_new" = xyes; then : mysave_CPPFLAGS="$CPPFLAGS" CPPFLAGS="-I$withval/include $CPPFLAGS" ac_fn_c_check_header_mongrel "$LINENO" "archive.h" "ac_cv_header_archive_h" "$ac_includes_default" if test "x$ac_cv_header_archive_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBARCHIVE 1 _ACEOF ARCHIVELIBS="-L$withval/lib -larchive" ARCHIVECPP="-I$withval/include" else as_fn_error $? "Could not find archive.h" "$LINENO" 5 fi CPPFLAGS="$mysave_CPPFLAGS" else as_fn_error $? "Could not find libarchive" "$LINENO" 5 fi ;; esac else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for archive_read_new in -larchive" >&5 $as_echo_n "checking for archive_read_new in -larchive... " >&6; } if ${ac_cv_lib_archive_archive_read_new+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-larchive $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char archive_read_new (); int main () { return archive_read_new (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_archive_archive_read_new=yes else ac_cv_lib_archive_archive_read_new=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_archive_archive_read_new" >&5 $as_echo "$ac_cv_lib_archive_archive_read_new" >&6; } if test "x$ac_cv_lib_archive_archive_read_new" = xyes; then : ac_fn_c_check_header_mongrel "$LINENO" "archive.h" "ac_cv_header_archive_h" "$ac_includes_default" if test "x$ac_cv_header_archive_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBARCHIVE 1 _ACEOF ARCHIVELIBS="-larchive" fi fi fi # Check whether --with-static-libarchive was given. if test "${with_static_libarchive+set}" = set; then : withval=$with_static_libarchive; case "$withval" in no|yes) as_fn_error $? "--with-static-libarchive needs an .a file as parameter" "$LINENO" 5 ;; *) { $as_echo "$as_me:${as_lineno-$LINENO}: checking for archive_read_new in -lc" >&5 $as_echo_n "checking for archive_read_new in -lc... " >&6; } if ${ac_cv_lib_c_archive_read_new+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lc $withval $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char archive_read_new (); int main () { return archive_read_new (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_c_archive_read_new=yes else ac_cv_lib_c_archive_read_new=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_c_archive_read_new" >&5 $as_echo "$ac_cv_lib_c_archive_read_new" >&6; } if test "x$ac_cv_lib_c_archive_read_new" = xyes; then : mysave_CPPFLAGS="$CPPFLAGS" CPPFLAGS="$ARCHIVECPP $CPPFLAGS" ac_fn_c_check_header_mongrel "$LINENO" "archive.h" "ac_cv_header_archive_h" "$ac_includes_default" if test "x$ac_cv_header_archive_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBARCHIVE 1 _ACEOF ARCHIVELIBS="$withval" else as_fn_error $? "Could not find archive.h" "$LINENO" 5 fi CPPFLAGS="$mysave_CPPFLAGS" else as_fn_error $? "Error linking against $withval" "$LINENO" 5 fi esac fi if test -n "$ARCHIVELIBS"; then HAVE_LIBARCHIVE_TRUE= HAVE_LIBARCHIVE_FALSE='#' else HAVE_LIBARCHIVE_TRUE='#' HAVE_LIBARCHIVE_FALSE= fi ac_config_files="$ac_config_files Makefile docs/Makefile tests/Makefile" cat >confcache <<\_ACEOF # This file is a shell script that caches the results of configure # tests run on this system so they can be shared between configure # scripts and configure runs, see configure's option --config-cache. # It is not useful on other systems. If it contains results you don't # want to keep, you may remove or edit it. # # config.status only pays attention to the cache file if you give it # the --recheck option to rerun configure. # # `ac_cv_env_foo' variables (set or unset) will be overridden when # loading this file, other *unset* `ac_cv_foo' will be assigned the # following values. _ACEOF # The following way of writing the cache mishandles newlines in values, # but we know of no workaround that is simple, portable, and efficient. # So, we kill variables containing newlines. # Ultrix sh set writes to stderr and can't be redirected directly, # and sets the high bit in the cache file unless we assign to the vars. ( for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space=' '; set) 2>&1` in #( *${as_nl}ac_space=\ *) # `set' does not quote correctly, so add quotes: double-quote # substitution turns \\\\ into \\, and sed turns \\ into \. sed -n \ "s/'/'\\\\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" ;; #( *) # `set' quotes correctly as required by POSIX, so do not add quotes. sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) | sed ' /^ac_cv_env_/b end t clear :clear s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ t end s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ :end' >>confcache if diff "$cache_file" confcache >/dev/null 2>&1; then :; else if test -w "$cache_file"; then if test "x$cache_file" != "x/dev/null"; then { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 $as_echo "$as_me: updating cache $cache_file" >&6;} if test ! -f "$cache_file" || test -h "$cache_file"; then cat confcache >"$cache_file" else case $cache_file in #( */* | ?:*) mv -f confcache "$cache_file"$$ && mv -f "$cache_file"$$ "$cache_file" ;; #( *) mv -f confcache "$cache_file" ;; esac fi fi else { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 $as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} fi fi rm -f confcache test "x$prefix" = xNONE && prefix=$ac_default_prefix # Let make expand exec_prefix. test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' DEFS=-DHAVE_CONFIG_H ac_libobjs= ac_ltlibobjs= U= for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue # 1. Remove the extension, and $U if already installed. ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' ac_i=`$as_echo "$ac_i" | sed "$ac_script"` # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR # will be set to the directory where LIBOBJS objects are built. as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' done LIBOBJS=$ac_libobjs LTLIBOBJS=$ac_ltlibobjs if test -n "$EXEEXT"; then am__EXEEXT_TRUE= am__EXEEXT_FALSE='#' else am__EXEEXT_TRUE='#' am__EXEEXT_FALSE= fi if test -z "${MAINTAINER_MODE_TRUE}" && test -z "${MAINTAINER_MODE_FALSE}"; then as_fn_error $? "conditional \"MAINTAINER_MODE\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${AMDEP_TRUE}" && test -z "${AMDEP_FALSE}"; then as_fn_error $? "conditional \"AMDEP\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then as_fn_error $? "conditional \"am__fastdepCC\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${HAVE_LIBARCHIVE_TRUE}" && test -z "${HAVE_LIBARCHIVE_FALSE}"; then as_fn_error $? "conditional \"HAVE_LIBARCHIVE\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi : "${CONFIG_STATUS=./config.status}" ac_write_fail=0 ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files $CONFIG_STATUS" { $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 $as_echo "$as_me: creating $CONFIG_STATUS" >&6;} as_write_fail=0 cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 #! $SHELL # Generated by $as_me. # Run this file to recreate the current configuration. # Compiler output produced by configure, useful for debugging # configure, is in config.log if it exists. debug=false ac_cs_recheck=false ac_cs_silent=false SHELL=\${CONFIG_SHELL-$SHELL} export SHELL _ASEOF cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo # Prefer a ksh shell builtin over an external printf program on Solaris, # but without wasting forks for bash or zsh. if test -z "$BASH_VERSION$ZSH_VERSION" \ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='print -r --' as_echo_n='print -rn --' elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in #( *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Unset variables that we do not need and which cause bugs (e.g. in # pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" # suppresses any "Segmentation fault" message there. '((' could # trigger a bug in pdksh 5.2.14. for as_var in BASH_ENV ENV MAIL MAILPATH do eval test x\${$as_var+set} = xset \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH # as_fn_error STATUS ERROR [LINENO LOG_FD] # ---------------------------------------- # Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are # provided, also output the error to LOG_FD, referencing LINENO. Then exit the # script with STATUS, using 1 if that was 0. as_fn_error () { as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi $as_echo "$as_me: error: $2" >&2 as_fn_exit $as_status } # as_fn_error # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. as_fn_set_status () { return $1 } # as_fn_set_status # as_fn_exit STATUS # ----------------- # Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } # as_fn_exit # as_fn_unset VAR # --------------- # Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : eval 'as_fn_append () { eval $1+=\$2 }' else as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append # as_fn_arith ARG... # ------------------ # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : eval 'as_fn_arith () { as_val=$(( $* )) }' else as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -pR'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -pR' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -pR' fi else as_ln_s='cp -pR' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null # as_fn_mkdir_p # ------------- # Create "$as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" } # as_fn_mkdir_p if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi # as_fn_executable_p FILE # ----------------------- # Test if FILE is an executable regular file. as_fn_executable_p () { test -f "$1" && test -x "$1" } # as_fn_executable_p as_test_x='test -x' as_executable_p=as_fn_executable_p # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" exec 6>&1 ## ----------------------------------- ## ## Main body of $CONFIG_STATUS script. ## ## ----------------------------------- ## _ASEOF test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Save the log message, to keep $0 and so on meaningful, and to # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" This file was extended by reprepro $as_me 4.13.1, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES CONFIG_HEADERS = $CONFIG_HEADERS CONFIG_LINKS = $CONFIG_LINKS CONFIG_COMMANDS = $CONFIG_COMMANDS $ $0 $@ on `(hostname || uname -n) 2>/dev/null | sed 1q` " _ACEOF case $ac_config_files in *" "*) set x $ac_config_files; shift; ac_config_files=$*;; esac case $ac_config_headers in *" "*) set x $ac_config_headers; shift; ac_config_headers=$*;; esac cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 # Files that config.status was made for. config_files="$ac_config_files" config_headers="$ac_config_headers" config_commands="$ac_config_commands" _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 ac_cs_usage="\ \`$as_me' instantiates files and other configuration actions from templates according to the current configuration. Unless the files and actions are specified as TAGs, all are instantiated by default. Usage: $0 [OPTION]... [TAG]... -h, --help print this help, then exit -V, --version print version number and configuration settings, then exit --config print configuration, then exit -q, --quiet, --silent do not print progress messages -d, --debug don't remove temporary files --recheck update $as_me by reconfiguring in the same conditions --file=FILE[:TEMPLATE] instantiate the configuration file FILE --header=FILE[:TEMPLATE] instantiate the configuration header FILE Configuration files: $config_files Configuration headers: $config_headers Configuration commands: $config_commands Report bugs to ." _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ reprepro config.status 4.13.1 configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" Copyright (C) 2012 Free Software Foundation, Inc. This config.status script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it." ac_pwd='$ac_pwd' srcdir='$srcdir' INSTALL='$INSTALL' MKDIR_P='$MKDIR_P' AWK='$AWK' test -n "\$AWK" || AWK=awk _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # The default lists apply if the user does not specify any file. ac_need_defaults=: while test $# != 0 do case $1 in --*=?*) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` ac_shift=: ;; --*=) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg= ac_shift=: ;; *) ac_option=$1 ac_optarg=$2 ac_shift=shift ;; esac case $ac_option in # Handling of the options. -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) ac_cs_recheck=: ;; --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) $as_echo "$ac_cs_version"; exit ;; --config | --confi | --conf | --con | --co | --c ) $as_echo "$ac_cs_config"; exit ;; --debug | --debu | --deb | --de | --d | -d ) debug=: ;; --file | --fil | --fi | --f ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; '') as_fn_error $? "missing file argument" ;; esac as_fn_append CONFIG_FILES " '$ac_optarg'" ac_need_defaults=false;; --header | --heade | --head | --hea ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; esac as_fn_append CONFIG_HEADERS " '$ac_optarg'" ac_need_defaults=false;; --he | --h) # Conflict between --help and --header as_fn_error $? "ambiguous option: \`$1' Try \`$0 --help' for more information.";; --help | --hel | -h ) $as_echo "$ac_cs_usage"; exit ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil | --si | --s) ac_cs_silent=: ;; # This is an error. -*) as_fn_error $? "unrecognized option: \`$1' Try \`$0 --help' for more information." ;; *) as_fn_append ac_config_targets " $1" ac_need_defaults=false ;; esac shift done ac_configure_extra_args= if $ac_cs_silent; then exec 6>/dev/null ac_configure_extra_args="$ac_configure_extra_args --silent" fi _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 if \$ac_cs_recheck; then set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion shift \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 CONFIG_SHELL='$SHELL' export CONFIG_SHELL exec "\$@" fi _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 exec 5>>config.log { echo sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX ## Running $as_me. ## _ASBOX $as_echo "$ac_log" } >&5 _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 # # INIT-COMMANDS # AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir" _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Handling of arguments. for ac_config_target in $ac_config_targets do case $ac_config_target in "config.h") CONFIG_HEADERS="$CONFIG_HEADERS config.h" ;; "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;; "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; "docs/Makefile") CONFIG_FILES="$CONFIG_FILES docs/Makefile" ;; "tests/Makefile") CONFIG_FILES="$CONFIG_FILES tests/Makefile" ;; *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; esac done # If the user did not use the arguments to specify the items to instantiate, # then the envvar interface is used. Set only those that are not. # We use the long form for the default assignment because of an extremely # bizarre bug on SunOS 4.1.3. if $ac_need_defaults; then test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands fi # Have a temporary directory for convenience. Make it in the build tree # simply because there is no reason against having it here, and in addition, # creating and moving files from /tmp can sometimes cause problems. # Hook for its removal unless debugging. # Note that there is a small window in which the directory will not be cleaned: # after its creation but before its name has been assigned to `$tmp'. $debug || { tmp= ac_tmp= trap 'exit_status=$? : "${ac_tmp:=$tmp}" { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status ' 0 trap 'as_fn_exit 1' 1 2 13 15 } # Create a (secure) tmp directory for tmp files. { tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && test -d "$tmp" } || { tmp=./conf$$-$RANDOM (umask 077 && mkdir "$tmp") } || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 ac_tmp=$tmp # Set up the scripts for CONFIG_FILES section. # No need to generate them if there are no CONFIG_FILES. # This happens for instance with `./config.status config.h'. if test -n "$CONFIG_FILES"; then ac_cr=`echo X | tr X '\015'` # On cygwin, bash can eat \r inside `` if the user requested igncr. # But we know of no other shell where ac_cr would be empty at this # point, so we can use a bashism as a fallback. if test "x$ac_cr" = x; then eval ac_cr=\$\'\\r\' fi ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then ac_cs_awk_cr='\\r' else ac_cs_awk_cr=$ac_cr fi echo 'BEGIN {' >"$ac_tmp/subs1.awk" && _ACEOF { echo "cat >conf$$subs.awk <<_ACEOF" && echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && echo "_ACEOF" } >conf$$subs.sh || as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` ac_delim='%!_!# ' for ac_last_try in false false false false false :; do . ./conf$$subs.sh || as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` if test $ac_delim_n = $ac_delim_num; then break elif $ac_last_try; then as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done rm -f conf$$subs.sh cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && _ACEOF sed -n ' h s/^/S["/; s/!.*/"]=/ p g s/^[^!]*!// :repl t repl s/'"$ac_delim"'$// t delim :nl h s/\(.\{148\}\)..*/\1/ t more1 s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ p n b repl :more1 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t nl :delim h s/\(.\{148\}\)..*/\1/ t more2 s/["\\]/\\&/g; s/^/"/; s/$/"/ p b :more2 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t delim ' >$CONFIG_STATUS || ac_write_fail=1 rm -f conf$$subs.awk cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 _ACAWK cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && for (key in S) S_is_set[key] = 1 FS = "" } { line = $ 0 nfields = split(line, field, "@") substed = 0 len = length(field[1]) for (i = 2; i < nfields; i++) { key = field[i] keylen = length(key) if (S_is_set[key]) { value = S[key] line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) len += length(value) + length(field[++i]) substed = 1 } else len += 1 + keylen } print line } _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" else cat fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 _ACEOF # VPATH may cause trouble with some makes, so we remove sole $(srcdir), # ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and # trailing colons and then remove the whole line if VPATH becomes empty # (actually we leave an empty line to preserve line numbers). if test "x$srcdir" = x.; then ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ h s/// s/^/:/ s/[ ]*$/:/ s/:\$(srcdir):/:/g s/:\${srcdir}:/:/g s/:@srcdir@:/:/g s/^:*// s/:*$// x s/\(=[ ]*\).*/\1/ G s/\n// s/^[^=]*=[ ]*$// }' fi cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 fi # test -n "$CONFIG_FILES" # Set up the scripts for CONFIG_HEADERS section. # No need to generate them if there are no CONFIG_HEADERS. # This happens for instance with `./config.status Makefile'. if test -n "$CONFIG_HEADERS"; then cat >"$ac_tmp/defines.awk" <<\_ACAWK || BEGIN { _ACEOF # Transform confdefs.h into an awk script `defines.awk', embedded as # here-document in config.status, that substitutes the proper values into # config.h.in to produce config.h. # Create a delimiter string that does not exist in confdefs.h, to ease # handling of long lines. ac_delim='%!_!# ' for ac_last_try in false false :; do ac_tt=`sed -n "/$ac_delim/p" confdefs.h` if test -z "$ac_tt"; then break elif $ac_last_try; then as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done # For the awk script, D is an array of macro values keyed by name, # likewise P contains macro parameters if any. Preserve backslash # newline sequences. ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* sed -n ' s/.\{148\}/&'"$ac_delim"'/g t rset :rset s/^[ ]*#[ ]*define[ ][ ]*/ / t def d :def s/\\$// t bsnl s/["\\]/\\&/g s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ D["\1"]=" \3"/p s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p d :bsnl s/["\\]/\\&/g s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ D["\1"]=" \3\\\\\\n"\\/p t cont s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p t cont d :cont n s/.\{148\}/&'"$ac_delim"'/g t clear :clear s/\\$// t bsnlc s/["\\]/\\&/g; s/^/"/; s/$/"/p d :bsnlc s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p b cont ' >$CONFIG_STATUS || ac_write_fail=1 cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 for (key in D) D_is_set[key] = 1 FS = "" } /^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { line = \$ 0 split(line, arg, " ") if (arg[1] == "#") { defundef = arg[2] mac1 = arg[3] } else { defundef = substr(arg[1], 2) mac1 = arg[2] } split(mac1, mac2, "(") #) macro = mac2[1] prefix = substr(line, 1, index(line, defundef) - 1) if (D_is_set[macro]) { # Preserve the white space surrounding the "#". print prefix "define", macro P[macro] D[macro] next } else { # Replace #undef with comments. This is necessary, for example, # in the case of _POSIX_SOURCE, which is predefined and required # on some systems where configure will not decide to define it. if (defundef == "undef") { print "/*", prefix defundef, macro, "*/" next } } } { print } _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 fi # test -n "$CONFIG_HEADERS" eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS" shift for ac_tag do case $ac_tag in :[FHLC]) ac_mode=$ac_tag; continue;; esac case $ac_mode$ac_tag in :[FHL]*:*);; :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; :[FH]-) ac_tag=-:-;; :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; esac ac_save_IFS=$IFS IFS=: set x $ac_tag IFS=$ac_save_IFS shift ac_file=$1 shift case $ac_mode in :L) ac_source=$1;; :[FH]) ac_file_inputs= for ac_f do case $ac_f in -) ac_f="$ac_tmp/stdin";; *) # Look for the file first in the build tree, then in the source tree # (if the path is not absolute). The absolute path cannot be DOS-style, # because $ac_f cannot contain `:'. test -f "$ac_f" || case $ac_f in [\\/$]*) false;; *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; esac || as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; esac case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac as_fn_append ac_file_inputs " '$ac_f'" done # Let's still pretend it is `configure' which instantiates (i.e., don't # use $as_me), people would be surprised to read: # /* config.h. Generated by config.status. */ configure_input='Generated from '` $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' `' by configure.' if test x"$ac_file" != x-; then configure_input="$ac_file. $configure_input" { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 $as_echo "$as_me: creating $ac_file" >&6;} fi # Neutralize special characters interpreted by sed in replacement strings. case $configure_input in #( *\&* | *\|* | *\\* ) ac_sed_conf_input=`$as_echo "$configure_input" | sed 's/[\\\\&|]/\\\\&/g'`;; #( *) ac_sed_conf_input=$configure_input;; esac case $ac_tag in *:-:* | *:-) cat >"$ac_tmp/stdin" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; esac ;; esac ac_dir=`$as_dirname -- "$ac_file" || $as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$ac_file" : 'X\(//\)[^/]' \| \ X"$ac_file" : 'X\(//\)$' \| \ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$ac_file" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` as_dir="$ac_dir"; as_fn_mkdir_p ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix case $ac_mode in :F) # # CONFIG_FILE # case $INSTALL in [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; esac ac_MKDIR_P=$MKDIR_P case $MKDIR_P in [\\/$]* | ?:[\\/]* ) ;; */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;; esac _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # If the template does not know about datarootdir, expand it. # FIXME: This hack should be removed a few years after 2.60. ac_datarootdir_hack=; ac_datarootdir_seen= ac_sed_dataroot=' /datarootdir/ { p q } /@datadir@/p /@docdir@/p /@infodir@/p /@localedir@/p /@mandir@/p' case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in *datarootdir*) ac_datarootdir_seen=yes;; *@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 $as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_datarootdir_hack=' s&@datadir@&$datadir&g s&@docdir@&$docdir&g s&@infodir@&$infodir&g s&@localedir@&$localedir&g s&@mandir@&$mandir&g s&\\\${datarootdir}&$datarootdir&g' ;; esac _ACEOF # Neutralize VPATH when `$srcdir' = `.'. # Shell code in configure.ac might set extrasub. # FIXME: do we really want to maintain this feature? cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_sed_extra="$ac_vpsub $extrasub _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 :t /@[a-zA-Z_][a-zA-Z_0-9]*@/!b s|@configure_input@|$ac_sed_conf_input|;t t s&@top_builddir@&$ac_top_builddir_sub&;t t s&@top_build_prefix@&$ac_top_build_prefix&;t t s&@srcdir@&$ac_srcdir&;t t s&@abs_srcdir@&$ac_abs_srcdir&;t t s&@top_srcdir@&$ac_top_srcdir&;t t s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t s&@builddir@&$ac_builddir&;t t s&@abs_builddir@&$ac_abs_builddir&;t t s&@abs_top_builddir@&$ac_abs_top_builddir&;t t s&@INSTALL@&$ac_INSTALL&;t t s&@MKDIR_P@&$ac_MKDIR_P&;t t $ac_datarootdir_hack " eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ "$ac_tmp/out"`; test -z "$ac_out"; } && { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&5 $as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&2;} rm -f "$ac_tmp/stdin" case $ac_file in -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; esac \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; :H) # # CONFIG_HEADER # if test x"$ac_file" != x-; then { $as_echo "/* $configure_input */" \ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" } >"$ac_tmp/config.h" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 $as_echo "$as_me: $ac_file is unchanged" >&6;} else rm -f "$ac_file" mv "$ac_tmp/config.h" "$ac_file" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 fi else $as_echo "/* $configure_input */" \ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ || as_fn_error $? "could not create -" "$LINENO" 5 fi # Compute "$ac_file"'s index in $config_headers. _am_arg="$ac_file" _am_stamp_count=1 for _am_header in $config_headers :; do case $_am_header in $_am_arg | $_am_arg:* ) break ;; * ) _am_stamp_count=`expr $_am_stamp_count + 1` ;; esac done echo "timestamp for $_am_arg" >`$as_dirname -- "$_am_arg" || $as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$_am_arg" : 'X\(//\)[^/]' \| \ X"$_am_arg" : 'X\(//\)$' \| \ X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$_am_arg" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'`/stamp-h$_am_stamp_count ;; :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 $as_echo "$as_me: executing $ac_file commands" >&6;} ;; esac case $ac_file$ac_mode in "depfiles":C) test x"$AMDEP_TRUE" != x"" || { # Autoconf 2.62 quotes --file arguments for eval, but not when files # are listed without --file. Let's play safe and only enable the eval # if we detect the quoting. case $CONFIG_FILES in *\'*) eval set x "$CONFIG_FILES" ;; *) set x $CONFIG_FILES ;; esac shift for mf do # Strip MF so we end up with the name of the file. mf=`echo "$mf" | sed -e 's/:.*$//'` # Check whether this is an Automake generated Makefile or not. # We used to match only the files named `Makefile.in', but # some people rename them; so instead we look at the file content. # Grep'ing the first line is not enough: some people post-process # each Makefile.in and add a new line on top of each file to say so. # Grep'ing the whole file is not good either: AIX grep has a line # limit of 2048, but all sed's we know have understand at least 4000. if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then dirpart=`$as_dirname -- "$mf" || $as_expr X"$mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$mf" : 'X\(//\)[^/]' \| \ X"$mf" : 'X\(//\)$' \| \ X"$mf" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$mf" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` else continue fi # Extract the definition of DEPDIR, am__include, and am__quote # from the Makefile without running `make'. DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"` test -z "$DEPDIR" && continue am__include=`sed -n 's/^am__include = //p' < "$mf"` test -z "am__include" && continue am__quote=`sed -n 's/^am__quote = //p' < "$mf"` # When using ansi2knr, U may be empty or an underscore; expand it U=`sed -n 's/^U = //p' < "$mf"` # Find all dependency output files, they are included files with # $(DEPDIR) in their names. We invoke sed twice because it is the # simplest approach to changing $(DEPDIR) to its actual value in the # expansion. for file in `sed -n " s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \ sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g' -e 's/\$U/'"$U"'/g'`; do # Make sure the directory exists. test -f "$dirpart/$file" && continue fdir=`$as_dirname -- "$file" || $as_expr X"$file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$file" : 'X\(//\)[^/]' \| \ X"$file" : 'X\(//\)$' \| \ X"$file" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$file" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` as_dir=$dirpart/$fdir; as_fn_mkdir_p # echo "creating $dirpart/$file" echo '# dummy' > "$dirpart/$file" done done } ;; esac done # for ac_tag as_fn_exit 0 _ACEOF ac_clean_files=$ac_clean_files_save test $ac_write_fail = 0 || as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 # configure is writing to config.log, and then calls config.status. # config.status does its own redirection, appending to config.log. # Unfortunately, on DOS this fails, as config.log is still kept open # by configure, so config.status won't be able to write to it; its # output is simply discarded. So we exec the FD to /dev/null, # effectively closing config.log, so it can be properly (re)opened and # appended to by config.status. When coming back to configure, we # need to make the FD available again. if test "$no_create" != yes; then ac_cs_success=: ac_config_status_args= test "$silent" = yes && ac_config_status_args="$ac_config_status_args --quiet" exec 5>/dev/null $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false exec 5>>config.log # Use ||, not &&, to avoid exiting from the if with $? = 1, which # would make configure fail if this is the last instruction. $ac_cs_success || as_fn_exit 1 fi if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} fi reprepro-4.13.1/ar.h0000644000175100017510000000145012152651661011141 00000000000000#ifndef DEBCOMP_AR_H #define DEBCOMP_AR_H struct ar_archive; retvalue ar_open(/*@out@*/struct ar_archive **, const char *); void ar_close(/*@only@*/struct ar_archive *); /* RET_OK = next is there, RET_NOTHING = eof, < 0 = error */ retvalue ar_nextmember(struct ar_archive *, /*@out@*/char ** /*filename*/); /* set compression for the next member */ void ar_archivemember_setcompression(struct ar_archive *, enum compression); /* the following can be used for libarchive to read an file in the ar * after ar_nextmember returned sucessfully. * All references get invalid after the ar_nextmember is called again. */ int ar_archivemember_close(struct archive *, void *); int ar_archivemember_open(struct archive *, void *); ssize_t ar_archivemember_read(struct archive *, void *, const void **); #endif reprepro-4.13.1/filterlist.c0000644000175100017510000003412212152651661012715 00000000000000/* This file is part of "reprepro" * Copyright (C) 2005 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include "error.h" #include "mprintf.h" #include "strlist.h" #include "names.h" #include "configparser.h" #include "filterlist.h" struct filterlistfile { size_t reference_count; char *filename; size_t filename_len; /*@owned@*//*@null@*/ struct filterlistitem *root; /*@dependent@*//*@null@*/ const struct filterlistitem *last; /*@owned@*//*@null@*/ struct filterlistfile *next; } *listfiles = NULL; struct filterlistitem { /*@owned@*//*@null@*/ struct filterlistitem *next; char *packagename; char *version; enum filterlisttype what; }; static void filterlistitems_free(/*@null@*//*@only@*/struct filterlistitem *list) { while (list != NULL) { struct filterlistitem *next = list->next; free(list->version); free(list->packagename); free(list); list = next; } } static void filterlistfile_unlock(struct filterlistfile *list) { assert (list != NULL); if (list->reference_count <= 1) { struct filterlistfile **p = &listfiles; assert (list->reference_count == 1); if (list->reference_count == 0) return; while (*p != NULL && *p != list) p = &(*p)->next; assert (p != NULL); if (*p == list) { *p = list->next; filterlistitems_free(list->root); free(list->filename); free(list); } } else list->reference_count--; } static inline retvalue filterlistfile_parse(struct filterlistfile *n, const char *filename, FILE *f) { char *lineend, *namestart, *nameend, *what, *version; int cmp; enum filterlisttype type; struct filterlistitem *h; char line[1001]; int lineno = 0; struct filterlistitem **last = &n->root; while (fgets(line, 1000, f) != NULL) { lineno++; lineend = strchr(line, '\n'); if (lineend == NULL) { fprintf(stderr, "Overlong or unterminated line in '%s'!\n", filename); return RET_ERROR; } while (lineend >= line && xisspace(*lineend)) *(lineend--) = '\0'; /* Ignore line only containing whitespace */ if (line[0] == '\0') continue; namestart = line; while (*namestart != '\0' && xisspace(*namestart)) namestart++; nameend=namestart; while (*nameend != '\0' && !xisspace(*nameend)) nameend++; what = nameend; while (*what != '\0' && xisspace(*what)) *(what++)='\0'; if (*what == '\0') { fprintf(stderr, "Malformed line in '%s': %d!\n", filename, lineno); return RET_ERROR; } version = NULL; if (strcmp(what, "install") == 0) { type = flt_install; } else if (strcmp(what, "deinstall") == 0) { type = flt_deinstall; } else if (strcmp(what, "purge") == 0) { type = flt_purge; } else if (strcmp(what, "hold") == 0) { type = flt_hold; } else if (strcmp(what, "supersede") == 0) { type = flt_supersede; } else if (strcmp(what, "upgradeonly") == 0) { type = flt_upgradeonly; } else if (strcmp(what, "warning") == 0) { type = flt_warning; } else if (strcmp(what, "error") == 0) { type = flt_error; } else if (what[0] == '=') { what++; while (*what != '\0' && xisspace(*what)) what++; version = what; if (*version == '\0') { fprintf(stderr, "Malformed line %d in '%s': missing version after '='!\n", lineno, filename); return RET_ERROR; } while (*what != '\0' && !xisspace(*what)) what++; while (*what != '\0' && xisspace(*what)) *(what++) = '\0'; if (*what != '\0') { fprintf(stderr, "Malformed line %d in '%s': space in version!\n", lineno, filename); return RET_ERROR; } type = flt_install; } else { fprintf(stderr, "Unknown status in '%s':%d: '%s'!\n", filename, lineno, what); return RET_ERROR; } if (*last == NULL || strcmp(namestart, (*last)->packagename) < 0) last = &n->root; cmp = -1; while (*last != NULL && (cmp=strcmp(namestart, (*last)->packagename)) > 0) last = &((*last)->next); if (cmp == 0) { fprintf(stderr, "Two lines describing '%s' in '%s'!\n", namestart, filename); return RET_ERROR; } h = zNEW(struct filterlistitem); if (FAILEDTOALLOC(h)) { return RET_ERROR_OOM; } h->next = *last; *last = h; h->what = type; h->packagename = strdup(namestart); if (FAILEDTOALLOC(h->packagename)) { return RET_ERROR_OOM; } if (version == NULL) h->version = NULL; else { h->version = strdup(version); if (FAILEDTOALLOC(h->version)) return RET_ERROR_OOM; } } n->last = *last; return RET_OK; } static inline retvalue filterlistfile_read(struct filterlistfile *n, const char *filename) { FILE *f; retvalue r; f = fopen(filename, "r"); if (f == NULL) { fprintf(stderr, "Cannot open %s for reading: %s!\n", filename, strerror(errno)); return RET_ERROR; } r = filterlistfile_parse(n, filename, f); // Can this return an yet unseen error? was read-only.. (void)fclose(f); return r; } static inline retvalue filterlistfile_getl(const char *filename, size_t len, struct filterlistfile **list) { struct filterlistfile *p; retvalue r; for (p = listfiles ; p != NULL ; p = p->next) { if (p->filename_len == len && strncmp(p->filename, filename, len) == 0) { p->reference_count++; *list = p; return RET_OK; } } p = zNEW(struct filterlistfile); if (FAILEDTOALLOC(p)) return RET_ERROR_OOM; p->reference_count = 1; p->filename = strndup(filename, len); p->filename_len = len; if (FAILEDTOALLOC(p->filename)) { free(p); return RET_ERROR_OOM; } char *fullfilename = configfile_expandname(p->filename, NULL); if (FAILEDTOALLOC(fullfilename)) r = RET_ERROR_OOM; else { r = filterlistfile_read(p, fullfilename); free(fullfilename); } if (RET_IS_OK(r)) { p->next = listfiles; listfiles = p; *list = p; } else { filterlistitems_free(p->root); free(p->filename); free(p); } return r; } static inline retvalue filterlistfile_get(/*@only@*/char *filename, /*@out@*/struct filterlistfile **list) { struct filterlistfile *p; retvalue r; size_t len = strlen(filename); for (p = listfiles ; p != NULL ; p = p->next) { if (p->filename_len == len && strncmp(p->filename, filename, len) == 0) { p->reference_count++; *list = p; free(filename); return RET_OK; } } p = zNEW(struct filterlistfile); if (FAILEDTOALLOC(p)) { free(filename); return RET_ERROR_OOM; } p->reference_count = 1; p->filename = filename; p->filename_len = len; if (FAILEDTOALLOC(p->filename)) { free(p); return RET_ERROR_OOM; } char *fullfilename = configfile_expandname(p->filename, NULL); if (FAILEDTOALLOC(fullfilename)) r = RET_ERROR_OOM; else { r = filterlistfile_read(p, fullfilename); free(fullfilename); } if (RET_IS_OK(r)) { p->next = listfiles; listfiles = p; *list = p; } else { filterlistitems_free(p->root); free(p->filename); free(p); } return r; } void filterlist_release(struct filterlist *list) { size_t i; assert(list != NULL); if (list->files != NULL) { for (i = 0 ; i < list->count ; i++) filterlistfile_unlock(list->files[i]); free(list->files); list->files = NULL; } else { assert (list->count == 0); } } static const struct constant filterlisttype_listtypes[] = { {"install", (int)flt_install}, {"hold", (int)flt_hold}, {"supersede", (int)flt_supersede}, {"deinstall", (int)flt_deinstall}, {"purge", (int)flt_purge}, {"upgradeonly", (int)flt_upgradeonly}, {"warning", (int)flt_warning}, {"error", (int)flt_error}, {NULL, 0} }; retvalue filterlist_load(struct filterlist *list, struct configiterator *iter) { enum filterlisttype defaulttype; size_t count; struct filterlistfile **files; retvalue r; char *filename; r = config_getenum(iter, filterlisttype, listtypes, &defaulttype); if (r == RET_NOTHING || r == RET_ERROR_UNKNOWNFIELD) { fprintf(stderr, "Error parsing %s, line %u, column %u: Expected default action as first argument to FilterList: (one of install, purge, hold, ...)\n", config_filename(iter), config_markerline(iter), config_markercolumn(iter)); return RET_ERROR; } if (RET_WAS_ERROR(r)) return r; count = 0; files = NULL; while ((r = config_getword(iter, &filename)) != RET_NOTHING) { struct filterlistfile **n; n = realloc(files, (count+1)* sizeof(struct filterlistfile *)); if (FAILEDTOALLOC(n)) { free(filename); r = RET_ERROR_OOM; } else { n[count] = NULL; files = n; // TODO: make filename only r = filterlistfile_get(filename, &files[count]); if (RET_IS_OK(r)) count++; } if (RET_WAS_ERROR(r)) { while (count > 0) { count--; filterlistfile_unlock(files[count]); } free(files); return r; } } list->count = count; list->files = files; list->defaulttype = defaulttype; list->set = true; return RET_OK; } static inline bool find(const char *name, /*@null@*/struct filterlistfile *list) { int cmp; /*@dependent@*/const struct filterlistitem *last = list->last; assert (last != NULL); if (last->next != NULL) { cmp = strcmp(name, last->next->packagename); if (cmp == 0) { list->last = last->next; return true; } } if (last->next == NULL || cmp < 0) { cmp = strcmp(name, last->packagename); if (cmp == 0) { return true; } else if (cmp > 0) return false; last = list->root; cmp = strcmp(name, last->packagename); if (cmp == 0) { list->last = list->root; return true; } else if (cmp < 0) return false; } /* now we are after last */ while (last->next != NULL) { cmp = strcmp(name, last->next->packagename); if (cmp == 0) { list->last = last->next; return true; } if (cmp < 0) { list->last = last; return false; } last = last->next; } list->last = last; return false; } enum filterlisttype filterlist_find(const char *name, const char *version, const struct filterlist *list) { size_t i; for (i = 0 ; i < list->count ; i++) { if (list->files[i]->root == NULL) continue; if (!find(name, list->files[i])) continue; if (list->files[i]->last->version == NULL) return list->files[i]->last->what; if (strcmp(list->files[i]->last->version, version) == 0) return list->files[i]->last->what; } return list->defaulttype; } struct filterlist cmdline_bin_filter = { .count = 0, .files = NULL, /* as long as nothing added, this does not change anything. * Once something is added, that will be auto_hold */ .defaulttype = flt_unchanged, .set = false, }; struct filterlist cmdline_src_filter = { .count = 0, .files = NULL, /* as long as nothing added, this does not change anything. * Once something is added, that will be auto_hold */ .defaulttype = flt_unchanged, .set = false, }; static retvalue filterlist_cmdline_init(struct filterlist *l) { if (l->count == 0) { l->files = nzNEW(2, struct filterlistfile *); if (FAILEDTOALLOC(l->files)) return RET_ERROR_OOM; l->files[0] = zNEW(struct filterlistfile); if (FAILEDTOALLOC(l->files[0])) return RET_ERROR_OOM; l->files[0]->reference_count = 1; l->count = 1; } return RET_OK; } retvalue filterlist_cmdline_add_file(bool src, const char *filename) { retvalue r; struct filterlist *l = src ? &cmdline_src_filter : &cmdline_bin_filter; char *name; r = filterlist_cmdline_init(l); if (RET_WAS_ERROR(r)) return r; l->set = true; l->defaulttype = flt_auto_hold; if (strcmp(filename, "-") == 0) filename = "/dev/stdin"; name = strdup(filename); if (FAILEDTOALLOC(name)) return RET_ERROR_OOM; if (l->count > 1) { struct filterlistfile **n; n = realloc(l->files, (l->count + 1) * sizeof(struct filterlistfile *)); if (FAILEDTOALLOC(n)) { free(name); return RET_ERROR_OOM; } n[l->count++] = NULL; l->files = n; } else { /* already allocated in _init */ assert (l->count == 1); l->count++; } return filterlistfile_get(name, &l->files[l->count - 1]); } retvalue filterlist_cmdline_add_pkg(bool src, const char *package) { retvalue r; enum filterlisttype what; struct filterlist *l = src ? &cmdline_src_filter : &cmdline_bin_filter; struct filterlistfile *f; struct filterlistitem **p, *h; char *name, *version; const char *c; int cmp; r = filterlist_cmdline_init(l); if (RET_WAS_ERROR(r)) return r; l->set = true; l->defaulttype = flt_auto_hold; c = strchr(package, '='); if (c != NULL) { what = flt_install; name = strndup(package, c - package); if (FAILEDTOALLOC(name)) return RET_ERROR_OOM; version = strdup(c + 1); if (FAILEDTOALLOC(version)) { free(name); return RET_ERROR_OOM; } } else { version = NULL; c = strchr(package, ':'); if (c == NULL) { what = flt_install; name = strndup(package, c - package); if (FAILEDTOALLOC(name)) return RET_ERROR_OOM; } else { const struct constant *t = filterlisttype_listtypes; while (t->name != NULL) { if (strcmp(c + 1, t->name) == 0) { what = t->value; break; } t++; } if (t->name == NULL) { fprintf(stderr, "Error: unknown filter-outcome '%s' (expected 'install' or ...)\n", c + 1); return RET_ERROR; } } name = strndup(package, c - package); if (FAILEDTOALLOC(name)) return RET_ERROR_OOM; } f = l->files[0]; assert (f != NULL); p = &f->root; cmp = -1; while (*p != NULL && (cmp = strcmp(name, (*p)->packagename)) > 0) p = &((*p)->next); if (cmp == 0) { fprintf(stderr, "Package in command line filter two times: '%s'\n", name); free(name); free(version); return RET_ERROR; } h = zNEW(struct filterlistitem); if (FAILEDTOALLOC(h)) { free(name); free(version); return RET_ERROR_OOM; } h->next = *p; *p = h; h->what = what; h->packagename = name; h->version = version; f->last = h; return RET_OK; } reprepro-4.13.1/sourcecheck.h0000644000175100017510000000032212152651661013032 00000000000000#ifndef REPREPRO_SOURCECHECK_H #define REPREPRO_SOURCECHECK_H retvalue unusedsources(struct distribution *); retvalue sourcemissing(struct distribution *); retvalue reportcruft(struct distribution *); #endif reprepro-4.13.1/globmatch.c0000644000175100017510000001125612152651661012477 00000000000000/* This file is part of "reprepro" * Copyright (C) 2009 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #ifdef TEST_GLOBMATCH #include #include #endif #include "error.h" #include "globmatch.h" #ifdef NOPARANOIA #define Assert(a) /* */ #else #define Assert(a) assert(a) #endif /* check if a string matches a pattern, the pattern may contain * and ?. This algorithm should be in O( strlen(pattern) * strlen(string) ) */ bool globmatch(const char *string, const char *pattern) { int i, l = strlen(pattern); int smallest_possible = 0, largest_possible = 0; bool possible[ l + 1 ]; const char *p; if (strlen(pattern) > (size_t)INT_MAX) return false; memset(possible, 0, sizeof(possible)); /* the first character must match the first pattern character or the first one after the first star */ possible[smallest_possible] = true; while (pattern[largest_possible] == '*') largest_possible++; Assert (largest_possible <= l); possible[largest_possible] = true; for (p = string ; *p != '\0' ; p++) { Assert (largest_possible >= smallest_possible); for (i = largest_possible ; i >= smallest_possible ; i--) { if (!possible[i]) continue; /* no character matches the end of the pattern: */ if (pattern[i] == '\0') { Assert (i == l); possible[i] = false; do { if (largest_possible <= smallest_possible) return false; largest_possible--; } while (!possible[largest_possible]); i = largest_possible + 1; continue; } Assert (i < l); if (pattern[i] == '*') { int j = i + 1; while (pattern[j] == '*') j++; /* all the '*' match one character: */ Assert (j <= l); possible[j] = true; if (j > largest_possible) largest_possible = j; /* or more than one */ continue; } if (pattern[i] == '[') { int j = i+1; bool matches = false, negate = false; if (pattern[j] == '!' || pattern[j] == '^') { j++; negate = true; } if (pattern[j] == '\0') return false; do { if (pattern[j+1] == '-' && pattern[j+2] != ']' && pattern[j+2] != '\0') { if (*p >= pattern[j] && *p <= pattern[j+2]) matches = true; j += 3; } else { if (*p == pattern[j]) matches = true; j++; } if (pattern[j] == '\0') { /* stray [ matches nothing */ return false; } } while (pattern[j] != ']'); j++; Assert (j <= l); if (negate) matches = !matches; if (matches) { possible[j] = true; /* if the next character is a star, that might also match 0 characters */ while (pattern[j] == '*') j++; Assert (j <= l); possible[j] = true; if (j > largest_possible) largest_possible = j; } } else if (pattern[i] == '?' || pattern[i] == *p) { int j = i + 1; possible[j] = true; /* if the next character is a star, that might also match 0 characters */ while (pattern[j] == '*') j++; Assert (j <= l); possible[j] = true; if (j > largest_possible) largest_possible = j; } possible[i] = false; if (i == smallest_possible) { smallest_possible++; while (!possible[smallest_possible]) { if (smallest_possible >= largest_possible) return false; smallest_possible++; } Assert (smallest_possible <= l); } if (i == largest_possible) { do { if (largest_possible <= smallest_possible) return false; largest_possible--; } while (!possible[largest_possible]); Assert (largest_possible >= 0); } } } /* end of string matches end of pattern, if largest got < smallest, then this is also false */ return possible[l]; } #ifdef TEST_GLOBMATCH int main(int argc, const char *argv[]) { if (argc != 3) { fputs("Wrong number of arguments!\n", stderr); exit(EXIT_FAILURE); } if (globmatch(argv[2], argv[1])) { puts("true"); return 0; } else { puts("false"); return 0; } } #endif reprepro-4.13.1/debfile.c0000644000175100017510000001372212152651661012131 00000000000000/* This file is part of "reprepro" * Copyright (C) 2006 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include "error.h" #include "uncompression.h" #include "ar.h" #include "chunks.h" #include "debfile.h" #ifndef HAVE_LIBARCHIVE #error Why did this file got compiled instead of extractcontrol.c? #endif static retvalue read_control_file(char **control, const char *debfile, struct archive *tar, struct archive_entry *entry) { int64_t size; char *buffer, *n; const char *afterchanges; size_t len, controllen; ssize_t got; size = archive_entry_size(entry); if (size <= 0) { fprintf(stderr, "Error: Empty control file within %s!\n", debfile); return RET_ERROR; } if (size > 10*1024*1024) { fprintf(stderr, "Error: Ridiculously long control file within %s!\n", debfile); return RET_ERROR; } buffer = malloc(size + 2); if (FAILEDTOALLOC(buffer)) return RET_ERROR_OOM; len = 0; while ((got = archive_read_data(tar, buffer+len, ((size_t)size+1)-len)) > 0 && !interrupted()) { len += got; if (len > (size_t)size) { fprintf(stderr, "Internal Error: libarchive miscalculated length of the control file inside '%s',\n" " perhaps the file is corrupt, perhaps libarchive!\n", debfile); free(buffer); return RET_ERROR; } } if (interrupted()) { free(buffer); return RET_ERROR_INTERRUPTED; } if (got < 0) { free(buffer); fprintf(stderr, "Error reading control file from %s\n", debfile); return RET_ERROR; } if (len < (size_t)size) fprintf(stderr, "Warning: libarchive miscalculated length of the control file inside '%s'.\n" "Maybe the file is corrupt, perhaps libarchive!\n", debfile); buffer[len] = '\0'; controllen = chunk_extract(buffer, buffer, len, true, &afterchanges); if (controllen == 0) { fprintf(stderr, "Could only find spaces within control file of '%s'!\n", debfile); free(buffer); return RET_ERROR; } if ((size_t)(afterchanges - buffer) < len) { if (*afterchanges == '\0') fprintf(stderr, "Unexpected \\0 character within control file of '%s'!\n", debfile); else fprintf(stderr, "Unexpected data after ending empty line in control file of '%s'!\n", debfile); free(buffer); return RET_ERROR; } assert (buffer[controllen] == '\0'); n = realloc(buffer, controllen+1); if (FAILEDTOALLOC(n)) { free(buffer); return RET_ERROR_OOM; } *control = n; return RET_OK; } static retvalue read_control_tar(char **control, const char *debfile, struct ar_archive *ar, struct archive *tar) { struct archive_entry *entry; int a; retvalue r; archive_read_support_format_tar(tar); archive_read_support_format_gnutar(tar); a = archive_read_open(tar, ar, ar_archivemember_open, ar_archivemember_read, ar_archivemember_close); if (a != ARCHIVE_OK) { fprintf(stderr, "open control.tar.gz within '%s' failed: %d:%d:%s\n", debfile, a, archive_errno(tar), archive_error_string(tar)); return RET_ERROR; } while ((a=archive_read_next_header(tar, &entry)) == ARCHIVE_OK) { if (strcmp(archive_entry_pathname(entry), "./control") != 0 && strcmp(archive_entry_pathname(entry), "control") != 0) { a = archive_read_data_skip(tar); if (a != ARCHIVE_OK) { int e = archive_errno(tar); printf( "Error skipping %s within data.tar.gz from %s: %d=%s\n", archive_entry_pathname(entry), debfile, e, archive_error_string(tar)); return (e!=0)?(RET_ERRNO(e)):RET_ERROR; } if (interrupted()) return RET_ERROR_INTERRUPTED; } else { r = read_control_file(control, debfile, tar, entry); if (r != RET_NOTHING) return r; } } if (a != ARCHIVE_EOF) { int e = archive_errno(tar); printf("Error reading control.tar.gz from %s: %d=%s\n", debfile, e, archive_error_string(tar)); return (e!=0)?(RET_ERRNO(e)):RET_ERROR; } fprintf(stderr, "Could not find a control file within control.tar.gz within '%s'!\n", debfile); return RET_ERROR_MISSING; } retvalue extractcontrol(char **control, const char *debfile) { struct ar_archive *ar; retvalue r; bool hadcandidate = false; r = ar_open(&ar, debfile); if (RET_WAS_ERROR(r)) return r; assert (r != RET_NOTHING); do { char *filename; enum compression c; r = ar_nextmember(ar, &filename); if (RET_IS_OK(r)) { if (strncmp(filename, "control.tar", 11) != 0) { free(filename); continue; } hadcandidate = true; for (c = 0 ; c < c_COUNT ; c++) { if (strcmp(filename + 11, uncompression_suffix[c]) == 0) break; } if (c >= c_COUNT) { free(filename); continue; } ar_archivemember_setcompression(ar, c); if (uncompression_supported(c)) { struct archive *tar; tar = archive_read_new(); r = read_control_tar(control, debfile, ar, tar); // TODO run archive_read_close to get error messages? archive_read_finish(tar); if (r != RET_NOTHING) { ar_close(ar); free(filename); return r; } } free(filename); } } while (RET_IS_OK(r)); ar_close(ar); if (hadcandidate) fprintf(stderr, "Could not find a suitable control.tar file within '%s'!\n", debfile); else fprintf(stderr, "Could not find a control.tar file within '%s'!\n", debfile); return RET_ERROR_MISSING; } reprepro-4.13.1/main.c0000644000175100017510000042460212152655314011465 00000000000000/* This file is part of "reprepro" * Copyright (C) 2003,2004,2005,2006,2007,2008,2009,2011,2012 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "error.h" #define DEFINE_IGNORE_VARIABLES #include "ignore.h" #include "mprintf.h" #include "strlist.h" #include "atoms.h" #include "dirs.h" #include "names.h" #include "filecntl.h" #include "files.h" #include "filelist.h" #include "target.h" #include "reference.h" #include "binaries.h" #include "sources.h" #include "release.h" #include "aptmethod.h" #include "updates.h" #include "pull.h" #include "upgradelist.h" #include "signature.h" #include "debfile.h" #include "checkindeb.h" #include "checkindsc.h" #include "checkin.h" #include "downloadcache.h" #include "termdecide.h" #include "tracking.h" #include "optionsfile.h" #include "dpkgversions.h" #include "incoming.h" #include "override.h" #include "log.h" #include "copypackages.h" #include "uncompression.h" #include "sourceextraction.h" #include "pool.h" #include "printlistformat.h" #include "globmatch.h" #include "needbuild.h" #include "archallflood.h" #include "sourcecheck.h" #include "uploaderslist.h" #include "sizes.h" #include "filterlist.h" #include "descriptions.h" #include "outhook.h" #ifndef STD_BASE_DIR #define STD_BASE_DIR "." #endif #ifndef STD_METHOD_DIR #define STD_METHOD_DIR "/usr/lib/apt/methods" #endif #ifndef LLONG_MAX #define LLONG_MAX __LONG_LONG_MAX__ #endif /* global options available to the rest */ struct global_config global; /* global options */ static char /*@only@*/ /*@notnull@*/ // *g* *x_basedir = NULL, *x_outdir = NULL, *x_distdir = NULL, *x_dbdir = NULL, *x_listdir = NULL, *x_confdir = NULL, *x_logdir = NULL, *x_morguedir = NULL, *x_methoddir = NULL; static char /*@only@*/ /*@null@*/ *x_section = NULL, *x_priority = NULL, *x_component = NULL, *x_architecture = NULL, *x_packagetype = NULL; static char /*@only@*/ /*@null@*/ *listformat = NULL; static char /*@only@*/ /*@null@*/ *endhook = NULL; static char /*@only@*/ /*@null@*/ *outhook = NULL; static char /*@only@*/ *gunzip = NULL, *bunzip2 = NULL, *unlzma = NULL, *unxz = NULL, *lunzip = NULL, *gnupghome = NULL; static int listmax = -1; static int listskip = 0; static int delete = D_COPY; static bool nothingiserror = false; static bool nolistsdownload = false; static bool keepunreferenced = false; static bool keepunusednew = false; static bool askforpassphrase = false; static bool guessgpgtty = true; static bool skipold = true; static size_t waitforlock = 0; static enum exportwhen export = EXPORT_CHANGED; int verbose = 0; static bool fast = false; static bool verbosedatabase = false; static enum spacecheckmode spacecheckmode = scm_FULL; /* default: 100 MB for database to grow */ static off_t reserveddbspace = 1024*1024*100 /* 1MB safety margin for other fileystems */; static off_t reservedotherspace = 1024*1024; /* define for each config value an owner, and only higher owners are allowed * to change something owned by lower owners. */ enum config_option_owner config_state, #define O(x) owner_ ## x = CONFIG_OWNER_DEFAULT O(fast), O(x_morguedir), O(x_outdir), O(x_basedir), O(x_distdir), O(x_dbdir), O(x_listdir), O(x_confdir), O(x_logdir), O(x_methoddir), O(x_section), O(x_priority), O(x_component), O(x_architecture), O(x_packagetype), O(nothingiserror), O(nolistsdownload), O(keepunusednew), O(keepunreferenced), O(keeptemporaries), O(keepdirectories), O(askforpassphrase), O(skipold), O(export), O(waitforlock), O(spacecheckmode), O(reserveddbspace), O(reservedotherspace), O(guessgpgtty), O(verbosedatabase), O(gunzip), O(bunzip2), O(unlzma), O(unxz), O(lunzip), O(gnupghome), O(listformat), O(listmax), O(listskip), O(onlysmalldeletes), O(endhook), O(outhook); #undef O #define CONFIGSET(variable, value) if (owner_ ## variable <= config_state) { \ owner_ ## variable = config_state; \ variable = value; } #define CONFIGGSET(variable, value) if (owner_ ## variable <= config_state) { \ owner_ ## variable = config_state; \ global.variable = value; } #define CONFIGDUP(variable, value) if (owner_ ## variable <= config_state) { \ owner_ ## variable = config_state; \ free(variable); \ variable = strdup(value); \ if (FAILEDTOALLOC(variable)) { \ (void)fputs("Out of Memory!", \ stderr); \ exit(EXIT_FAILURE); \ } } #define y(type, name) type name #define n(type, name) UNUSED(type dummy_ ## name) #define ACTION_N(act, sp, args, name) static retvalue action_n_ ## act ## _ ## sp ## _ ## name ( \ UNUSED(struct distribution *dummy2), \ sp(const char *, section), \ sp(const char *, priority), \ act(const struct atomlist *, architectures), \ act(const struct atomlist *, components), \ act(const struct atomlist *, packagetypes), \ int argc, args(const char *, argv[])) #define ACTION_C(act, sp, name) static retvalue action_c_ ## act ## _ ## sp ## _ ## name ( \ struct distribution *alldistributions, \ sp(const char *, section), \ sp(const char *, priority), \ act(const struct atomlist *, architectures), \ act(const struct atomlist *, components), \ act(const struct atomlist *, packagetypes), \ int argc, const char *argv[]) #define ACTION_B(act, sp, u, name) static retvalue action_b_ ## act ## _ ## sp ## _ ## name ( \ u(struct distribution *, alldistributions), \ sp(const char *, section), \ sp(const char *, priority), \ act(const struct atomlist *, architectures), \ act(const struct atomlist *, components), \ act(const struct atomlist *, packagetypes), \ int argc, const char *argv[]) #define ACTION_L(act, sp, u, args, name) static retvalue action_l_ ## act ## _ ## sp ## _ ## name ( \ struct distribution *alldistributions, \ sp(const char *, section), \ sp(const char *, priority), \ act(const struct atomlist *, architectures), \ act(const struct atomlist *, components), \ act(const struct atomlist *, packagetypes), \ int argc, args(const char *, argv[])) #define ACTION_R(act, sp, d, a, name) static retvalue action_r_ ## act ## _ ## sp ## _ ## name ( \ d(struct distribution *, alldistributions), \ sp(const char *, section), \ sp(const char *, priority), \ act(const struct atomlist *, architectures), \ act(const struct atomlist *, components), \ act(const struct atomlist *, packagetypes), \ a(int, argc), a(const char *, argv[])) #define ACTION_T(act, sp, name) static retvalue action_t_ ## act ## _ ## sp ## _ ## name ( \ UNUSED(struct distribution *ddummy), \ sp(const char *, section), \ sp(const char *, priority), \ act(const struct atomlist *, architectures), \ act(const struct atomlist *, components), \ act(const struct atomlist *, packagetypes), \ UNUSED(int argc), UNUSED(const char *dummy4[])) #define ACTION_F(act, sp, d, a, name) static retvalue action_f_ ## act ## _ ## sp ## _ ## name ( \ d(struct distribution *, alldistributions), \ sp(const char *, section), \ sp(const char *, priority), \ act(const struct atomlist *, architectures), \ act(const struct atomlist *, components), \ act(const struct atomlist *, packagetypes), \ a(int, argc), a(const char *, argv[])) #define ACTION_RF(act, sp, ud, u, name) static retvalue action_rf_ ## act ## _ ## sp ## _ ## name ( \ ud(struct distribution *, alldistributions), \ sp(const char *, section), \ sp(const char *, priority), \ act(const struct atomlist *, architectures), \ act(const struct atomlist *, components), \ act(const struct atomlist *, packagetypes), \ u(int, argc), u(const char *, argv[])) #define ACTION_D(act, sp, u, name) static retvalue action_d_ ## act ## _ ## sp ## _ ## name ( \ struct distribution *alldistributions, \ sp(const char *, section), \ sp(const char *, priority), \ act(const struct atomlist *, architectures), \ act(const struct atomlist *, components), \ act(const struct atomlist *, packagetypes), \ u(int, argc), u(const char *, argv[])) ACTION_N(n, n, y, printargs) { int i; fprintf(stderr, "argc: %d\n", argc); for (i=0 ; i < argc ; i++) { fprintf(stderr, "%s\n", argv[i]); } return RET_OK; } ACTION_N(n, n, n, dumpuncompressors) { enum compression c; assert (argc == 1); for (c = 0 ; c < c_COUNT ; c++) { if (c == c_none) continue; printf("%s: ", uncompression_suffix[c]); if (uncompression_builtin(c)) { if (extern_uncompressors[c] != NULL) printf("built-in + '%s'\n", extern_uncompressors[c]); else printf("built-in\n"); } else if (extern_uncompressors[c] != NULL) printf("'%s'\n", extern_uncompressors[c]); else switch (c) { case c_bzip2: printf( "not supported (install bzip2 or use --bunzip2 to tell where bunzip2 is).\n"); break; case c_lzma: printf( "not supported (install lzma or use --unlzma to tell where unlzma is).\n"); break; case c_xz: printf( "not supported (install xz-utils or use --unxz to tell where unxz is).\n"); break; case c_lunzip: printf( "not supported (install lzip or use --lunzip to tell where lunzip is).\n"); break; default: printf("not supported\n"); } } return RET_OK; } ACTION_N(n, n, y, uncompress) { enum compression c; assert (argc == 4); c = c_none + 1; while (c < c_COUNT && strcmp(argv[1], uncompression_suffix[c]) != 0) c++; if (c >= c_COUNT) { fprintf(stderr, "Unknown compression format '%s'\n", argv[1]); return RET_ERROR; } if (!uncompression_supported(c)) { fprintf(stderr, "Cannot uncompress format '%s'\nCheck __dumpuncompressors for more details.\n", argv[1]); return RET_ERROR; } return uncompress_file(argv[2], argv[3], c); } ACTION_N(n, n, y, extractcontrol) { retvalue result; char *control; assert (argc == 2); result = extractcontrol(&control, argv[1]); if (RET_IS_OK(result)) { puts(control); free(control); } return result; } ACTION_N(n, n, y, extractfilelist) { retvalue result; char *filelist; size_t fls, len; size_t lengths[256]; const unsigned char *dirs[256]; int depth = 0, i, j; assert (argc == 2); result = getfilelist(&filelist, &fls, argv[1]); if (RET_IS_OK(result)) { const unsigned char *p = (unsigned char*)filelist; while (*p != '\0') { unsigned char c = *(p++); if (c > 2) { if (depth >= c) depth -= c; else depth = 0; } else if (c == 2) { len = 0; while (*p == 255) { len +=255; p++; } len += *(p++); lengths[depth] = len; dirs[depth++] = p; p += len; } else { len = 0; while (*p == 255) { len +=255; p++; } len += *(p++); (void)putchar('/'); for (i = 0 ; i < depth ; i++) { const unsigned char *n = dirs[i]; j = lengths[i]; while (j-- > 0) (void)putchar(*(n++)); (void)putchar('/'); } while (len-- > 0) (void)putchar(*(p++)); (void)putchar('\n'); } } free(filelist); } return result; } ACTION_N(n, n, y, extractsourcesection) { struct dsc_headers dsc; struct sourceextraction *extraction; char *section = NULL, *priority = NULL, *directory, *filename; retvalue result, r; bool broken; int i; assert (argc == 2); r = sources_readdsc(&dsc, argv[1], argv[1], &broken); if (!RET_IS_OK(r)) return r; if (broken && !IGNORING(brokensignatures, "'%s' contains only broken signatures.\n" "This most likely means the file was damaged or edited improperly\n", argv[1])) return RET_ERROR; r = dirs_getdirectory(argv[1], &directory); if (RET_WAS_ERROR(r)) { sources_done(&dsc); return r; } assert (RET_IS_OK(r)); extraction = sourceextraction_init(§ion, &priority); if (FAILEDTOALLOC(extraction)) { sources_done(&dsc); return RET_ERROR_OOM; } for (i = 0 ; i < dsc.files.names.count ; i ++) sourceextraction_setpart(extraction, i, dsc.files.names.values[i]); result = RET_OK; while (sourceextraction_needs(extraction, &i)) { filename = calc_dirconcat(directory, dsc.files.names.values[i]); if (FAILEDTOALLOC(filename)) { result = RET_ERROR_OOM; break; } r = sourceextraction_analyse(extraction, filename); free(filename); if (RET_WAS_ERROR(r)) { result = r; break; } } free(directory); if (RET_WAS_ERROR(result)) { sourceextraction_abort(extraction); } else { r = sourceextraction_finish(extraction); RET_UPDATE(result, r); } if (RET_IS_OK(result)) { if (section != NULL) printf("Section: %s\n", section); if (priority != NULL) printf("Priority: %s\n", priority); } sources_done(&dsc); free(section); free(priority); return result; } ACTION_F(n, n, n, y, fakeemptyfilelist) { assert (argc == 2); return fakefilelist(argv[1]); } ACTION_F(n, n, n, y, generatefilelists) { assert (argc == 2 || argc == 3); if (argc == 2) return files_regenerate_filelist(false); if (strcmp(argv[1], "reread") == 0) return files_regenerate_filelist(true); fprintf(stderr, "Error: Unrecognized second argument '%s'\n" "Syntax: reprepro generatefilelists [reread]\n", argv[1]); return RET_ERROR; } ACTION_T(n, n, translatefilelists) { return database_translate_filelists(); } ACTION_N(n, n, n, translatelegacychecksums) { assert (argc == 1); return database_translate_legacy_checksums( verbosedatabase || verbose > 10); } ACTION_F(n, n, n, n, addmd5sums) { char buffer[2000], *c, *m; retvalue result, r; result = RET_NOTHING; while (fgets(buffer, 1999, stdin) != NULL) { struct checksums *checksums; c = strchr(buffer, '\n'); if (c == NULL) { fprintf(stderr, "Line too long\n"); return RET_ERROR; } *c = '\0'; m = strchr(buffer, ' '); if (m == NULL) { fprintf(stderr, "Malformed line\n"); return RET_ERROR; } *m = '\0'; m++; if (*m == '\0') { fprintf(stderr, "Malformed line\n"); return RET_ERROR; } r = checksums_setall(&checksums, m, strlen(m)); if (RET_WAS_ERROR(r)) return r; r = files_add_checksums(buffer, checksums); RET_UPDATE(result, r); checksums_free(checksums); } return result; } ACTION_R(n, n, n, y, removereferences) { assert (argc == 2); return references_remove(argv[1]); } ACTION_R(n, n, n, n, dumpreferences) { return references_dump(); } static retvalue checkifreferenced(UNUSED(void *data), const char *filekey) { retvalue r; r = references_isused(filekey); if (r == RET_NOTHING) { printf("%s\n", filekey); return RET_OK; } else if (RET_IS_OK(r)) { return RET_NOTHING; } else return r; } ACTION_RF(n, n, n, n, dumpunreferenced) { retvalue result; result = files_foreach(checkifreferenced, NULL); return result; } static retvalue deleteifunreferenced(UNUSED(void *data), const char *filekey) { retvalue r; r = references_isused(filekey); if (r == RET_NOTHING) { r = pool_delete(filekey); return r; } else if (RET_IS_OK(r)) { return RET_NOTHING; } else return r; } ACTION_RF(n, n, n, n, deleteunreferenced) { retvalue result; if (keepunreferenced) { if (owner_keepunreferenced == CONFIG_OWNER_CMDLINE) fprintf(stderr, "Calling deleteunreferenced with --keepunreferencedfiles does not really make sense, does it?\n"); else fprintf(stderr, "Error: deleteunreferenced called with option\n" "'keepunreferencedfiles' activated. Please run\n" "'reprepro --nokeepunreferencedfiles deleteunreferenced',\n" "if you are sure you want to delete those files.\n"); return RET_ERROR; } result = files_foreach(deleteifunreferenced, NULL); return result; } ACTION_RF(n, n, n, y, deleteifunreferenced) { char buffer[5000], *nl; int i; retvalue r, ret; ret = RET_NOTHING; if (argc > 1) { for (i = 1 ; i < argc ; i++) { r = deleteifunreferenced(NULL, argv[i]); RET_UPDATE(ret, r); if (r == RET_NOTHING && verbose >= 0) fprintf(stderr, "Not removing '%s'\n", argv[i]); } } else while (fgets(buffer, 4999, stdin) != NULL) { nl = strchr(buffer, '\n'); if (nl == NULL) { return RET_ERROR; } *nl = '\0'; r = deleteifunreferenced(NULL, buffer); RET_UPDATE(ret, r); if (r == RET_NOTHING && verbose >= 0) fprintf(stderr, "Not removing '%s'\n", buffer); } return ret; } ACTION_R(n, n, n, y, addreference) { assert (argc == 2 || argc == 3); return references_increment(argv[1], argv[2]); } static retvalue remove_from_target(struct distribution *distribution, struct trackingdata *trackingdata, struct target *target, int count, const char * const *names, int *todo, bool *gotremoved) { retvalue result, r; int i; result = RET_NOTHING; for (i = 0 ; i < count ; i++){ r = target_removepackage(target, distribution->logger, names[i], trackingdata); RET_UPDATE(distribution->status, r); if (RET_IS_OK(r)) { if (!gotremoved[i]) (*todo)--; gotremoved[i] = true; } RET_UPDATE(result, r); } return result; } ACTION_D(y, n, y, remove) { retvalue result, r; struct distribution *distribution; struct target *t; bool *gotremoved; int todo; trackingdb tracks; struct trackingdata trackingdata; r = distribution_get(alldistributions, argv[1], true, &distribution); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; if (distribution->readonly) { fprintf(stderr, "Cannot remove packages from read-only distribution '%s'\n", distribution->codename); return RET_ERROR; } r = distribution_prepareforwriting(distribution); if (RET_WAS_ERROR(r)) return r; if (distribution->tracking != dt_NONE) { r = tracking_initialize(&tracks, distribution, false); if (RET_WAS_ERROR(r)) { return r; } r = trackingdata_new(tracks, &trackingdata); if (RET_WAS_ERROR(r)) { (void)tracking_done(tracks); return r; } } todo = argc-2; gotremoved = nzNEW(argc - 2, bool); result = RET_NOTHING; if (FAILEDTOALLOC(gotremoved)) result = RET_ERROR_OOM; else for (t = distribution->targets ; t != NULL ; t = t->next) { if (!target_matches(t, components, architectures, packagetypes)) continue; r = target_initpackagesdb(t, READWRITE); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; r = remove_from_target(distribution, (distribution->tracking != dt_NONE) ? &trackingdata : NULL, t, argc-2, argv+2, &todo, gotremoved); RET_UPDATE(result, r); r = target_closepackagesdb(t); RET_UPDATE(distribution->status, r); RET_UPDATE(result, r); if (RET_WAS_ERROR(result)) break; } if (distribution->tracking != dt_NONE) { if (RET_WAS_ERROR(result)) trackingdata_done(&trackingdata); else trackingdata_finish(tracks, &trackingdata); r = tracking_done(tracks); RET_ENDUPDATE(result, r); } if (verbose >= 0 && !RET_WAS_ERROR(result) && todo > 0) { int i = argc - 2; (void)fputs("Not removed as not found: ", stderr); while (i > 0) { i--; assert(gotremoved != NULL); if (!gotremoved[i]) { (void)fputs(argv[2 + i], stderr); todo--; if (todo > 0) (void)fputs(", ", stderr); } } (void)fputc('\n', stderr); } free(gotremoved); return result; } struct removesrcdata { const char *sourcename; const char /*@null@*/ *sourceversion; bool found; }; static retvalue package_source_fits(UNUSED(struct distribution *di), struct target *target, const char *packagename, const char *control, void *data) { struct removesrcdata *d = data; char *sourcename, *sourceversion; retvalue r; r = target->getsourceandversion(control, packagename, &sourcename, &sourceversion); if (!RET_IS_OK(r)) return r; for (; d->sourcename != NULL ; d++) { if (strcmp(sourcename, d->sourcename) != 0) continue; if (d->sourceversion == NULL) break; if (strcmp(sourceversion, d->sourceversion) == 0) break; } free(sourcename); free(sourceversion); if (d->sourcename == NULL) return RET_NOTHING; else { d->found = true; return RET_OK; } } static retvalue remove_packages(struct distribution *distribution, struct removesrcdata *toremove) { trackingdb tracks; retvalue result, r; r = distribution_prepareforwriting(distribution); if (RET_WAS_ERROR(r)) return r; if (distribution->tracking != dt_NONE) { r = tracking_initialize(&tracks, distribution, false); if (RET_WAS_ERROR(r)) { return r; } if (r == RET_NOTHING) tracks = NULL; } else tracks = NULL; result = RET_NOTHING; if (tracks != NULL) { result = RET_NOTHING; for (; toremove->sourcename != NULL ; toremove++) { r = tracking_removepackages(tracks, distribution, toremove->sourcename, toremove->sourceversion); RET_UPDATE(result, r); if (r == RET_NOTHING) { if (verbose >= -2) { if (toremove->sourceversion == NULL) fprintf(stderr, "Nothing about source package '%s' found in the tracking data of '%s'!\n" "This either means nothing from this source in this version is there,\n" "or the tracking information might be out of date.\n", toremove->sourcename, distribution->codename); else fprintf(stderr, "Nothing about '%s' version '%s' found in the tracking data of '%s'!\n" "This either means nothing from this source in this version is there,\n" "or the tracking information might be out of date.\n", toremove->sourcename, toremove->sourceversion, distribution->codename); } } } r = tracking_done(tracks); RET_ENDUPDATE(result, r); return result; } return distribution_remove_packages(distribution, // TODO: why not arch comp pt here? atom_unknown, atom_unknown, atom_unknown, package_source_fits, NULL, toremove); } ACTION_D(n, n, y, removesrc) { retvalue r; struct distribution *distribution; struct removesrcdata data[2]; r = distribution_get(alldistributions, argv[1], true, &distribution); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; if (distribution->readonly) { fprintf(stderr, "Error: Cannot remove packages from read-only distribution '%s'\n", distribution->codename); return RET_ERROR; } data[0].found = false; data[0].sourcename = argv[2]; if (argc <= 3) data[0].sourceversion = NULL; else data[0].sourceversion = argv[3]; if (index(data[0].sourcename, '=') != NULL && verbose >= 0) { fputs( "Warning: removesrc treats '=' as normal character.\n" "Did you want to use removesrcs?\n", stderr); } data[1].sourcename = NULL; data[1].sourceversion = NULL; return remove_packages(distribution, data); } ACTION_D(n, n, y, removesrcs) { retvalue r; struct distribution *distribution; struct removesrcdata data[argc-1]; int i; r = distribution_get(alldistributions, argv[1], true, &distribution); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; if (distribution->readonly) { fprintf(stderr, "Error: Cannot remove packages from read-only distribution '%s'\n", distribution->codename); return RET_ERROR; } for (i = 0 ; i < argc-2 ; i++) { data[i].found = false; data[i].sourcename = argv[2 + i]; data[i].sourceversion = index(data[i].sourcename, '='); if (data[i].sourceversion != NULL) { if (index(data[i].sourceversion+1, '=') != NULL) { fprintf(stderr, "Cannot parse '%s': more than one '='\n", data[i].sourcename); data[i].sourcename = NULL; r = RET_ERROR; } else if (data[i].sourceversion[1] == '\0') { fprintf(stderr, "Cannot parse '%s': no version after '='\n", data[i].sourcename); data[i].sourcename = NULL; r = RET_ERROR; } else if (data[i].sourceversion == data[i].sourcename) { fprintf(stderr, "Cannot parse '%s': no source name found before the '='\n", data[i].sourcename); data[i].sourcename = NULL; r = RET_ERROR; } else { data[i].sourcename = strndup(data[i].sourcename, data[i].sourceversion - data[i].sourcename); if (FAILEDTOALLOC(data[i].sourcename)) r = RET_ERROR_OOM; else r = RET_OK; } if (RET_WAS_ERROR(r)) { for (i-- ; i >= 0 ; i--) { if (data[i].sourceversion != NULL) free((char*)data[i].sourcename); } return r; } data[i].sourceversion++; } } data[i].sourcename = NULL; data[i].sourceversion= NULL; r = remove_packages(distribution, data); for (i = 0 ; i < argc-2 ; i++) { if (verbose >= 0 && !data[i].found) { if (data[i].sourceversion != NULL) fprintf(stderr, "No package from source '%s', version '%s' found.\n", data[i].sourcename, data[i].sourceversion); else fprintf(stderr, "No package from source '%s' (any version) found.\n", data[i].sourcename); } if (data[i].sourceversion != NULL) free((char*)data[i].sourcename); } return r; } static retvalue package_matches_condition(UNUSED(struct distribution *di), struct target *target, UNUSED(const char *pa), const char *control, void *data) { term *condition = data; return term_decidechunktarget(condition, control, target); } ACTION_D(y, n, y, removefilter) { retvalue result, r; struct distribution *distribution; trackingdb tracks; struct trackingdata trackingdata; term *condition; assert (argc == 3); r = distribution_get(alldistributions, argv[1], true, &distribution); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; if (distribution->readonly) { fprintf(stderr, "Error: Cannot remove packages from read-only distribution '%s'\n", distribution->codename); return RET_ERROR; } result = term_compilefortargetdecision(&condition, argv[2]); if (RET_WAS_ERROR(result)) return result; r = distribution_prepareforwriting(distribution); if (RET_WAS_ERROR(r)) { term_free(condition); return r; } if (distribution->tracking != dt_NONE) { r = tracking_initialize(&tracks, distribution, false); if (RET_WAS_ERROR(r)) { term_free(condition); return r; } if (r == RET_NOTHING) tracks = NULL; else { r = trackingdata_new(tracks, &trackingdata); if (RET_WAS_ERROR(r)) { (void)tracking_done(tracks); term_free(condition); return r; } } } else tracks = NULL; result = distribution_remove_packages(distribution, components, architectures, packagetypes, package_matches_condition, (tracks != NULL)?&trackingdata:NULL, condition); if (tracks != NULL) { trackingdata_finish(tracks, &trackingdata); r = tracking_done(tracks); RET_ENDUPDATE(result, r); } term_free(condition); return result; } static retvalue package_matches_glob(UNUSED(struct distribution *di), UNUSED(struct target *ta), const char *packagename, UNUSED(const char *control), void *data) { if (globmatch(packagename, data)) return RET_OK; else return RET_NOTHING; } ACTION_D(y, n, y, removematched) { retvalue result, r; struct distribution *distribution; trackingdb tracks; struct trackingdata trackingdata; assert (argc == 3); r = distribution_get(alldistributions, argv[1], true, &distribution); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; if (distribution->readonly) { fprintf(stderr, "Error: Cannot remove packages from read-only distribution '%s'\n", distribution->codename); return RET_ERROR; } r = distribution_prepareforwriting(distribution); if (RET_WAS_ERROR(r)) return r; if (distribution->tracking != dt_NONE) { r = tracking_initialize(&tracks, distribution, false); if (RET_WAS_ERROR(r)) return r; if (r == RET_NOTHING) tracks = NULL; else { r = trackingdata_new(tracks, &trackingdata); if (RET_WAS_ERROR(r)) { (void)tracking_done(tracks); return r; } } } else tracks = NULL; result = distribution_remove_packages(distribution, components, architectures, packagetypes, package_matches_glob, (tracks != NULL)?&trackingdata:NULL, (void*)argv[2]); if (tracks != NULL) { trackingdata_finish(tracks, &trackingdata); r = tracking_done(tracks); RET_ENDUPDATE(result, r); } return result; } ACTION_B(y, n, y, buildneeded) { retvalue r; struct distribution *distribution; const char *glob; architecture_t arch; bool anyarchitecture; if (architectures != NULL) { fprintf(stderr, "Error: build-needing cannot be used with --architecture!\n"); return RET_ERROR; } if (packagetypes != NULL) { fprintf(stderr, "Error: build-needing cannot be used with --packagetype!\n"); return RET_ERROR; } if (argc == 4) glob = argv[3]; else glob = NULL; if (strcmp(argv[2], "any") == 0) { anyarchitecture = true; } else { anyarchitecture = false; arch = architecture_find(argv[2]); if (!atom_defined(arch)) { fprintf(stderr, "Error: Architecture '%s' is not known!\n", argv[2]); return RET_ERROR; } if (arch == architecture_source) { fprintf(stderr, "Error: Architecture '%s' makes no sense for build-needing!\n", argv[2]); return RET_ERROR; } } r = distribution_get(alldistributions, argv[1], false, &distribution); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; if (!atomlist_in(&distribution->architectures, architecture_source)) { fprintf(stderr, "Error: Architecture '%s' does not contain sources. build-needing cannot be used!\n", distribution->codename); return RET_ERROR; } if (anyarchitecture) { retvalue result; int i; result = find_needs_build(distribution, architecture_all, components, glob, true); for (i = 0 ; i < distribution->architectures.count ; i++) { architecture_t a = distribution->architectures.atoms[i]; if (a == architecture_source || a == architecture_all) continue; r = find_needs_build(distribution, a, components, glob, true); RET_UPDATE(result, r); } return result; } else { if (!atomlist_in(&distribution->architectures, arch) && arch != architecture_all) { fprintf(stderr, "Error: Architecture '%s' not found in distribution '%s'!\n", argv[2], distribution->codename); return RET_ERROR; } return find_needs_build(distribution, arch, components, glob, false); } } static retvalue list_in_target(struct target *target, const char *packagename) { retvalue r, result; char *control; if (listmax == 0) return RET_NOTHING; r = target_initpackagesdb(target, READONLY); if (!RET_IS_OK(r)) return r; result = table_getrecord(target->packages, packagename, &control); if (RET_IS_OK(result)) { if (listskip <= 0) { r = listformat_print(listformat, target, packagename, control); RET_UPDATE(result, r); if (listmax > 0) listmax--; } else listskip--; free(control); } r = target_closepackagesdb(target); RET_ENDUPDATE(result, r); return result; } static retvalue list_package(UNUSED(struct distribution *dummy2), struct target *target, const char *package, const char *control, UNUSED(void *dummy3)) { if (listmax == 0) return RET_NOTHING; if (listskip <= 0) { if (listmax > 0) listmax--; return listformat_print(listformat, target, package, control); } else { listskip--; return RET_NOTHING; } } ACTION_B(y, n, y, list) { retvalue result = RET_NOTHING, r; struct distribution *distribution; struct target *t; assert (argc >= 2); r = distribution_get(alldistributions, argv[1], false, &distribution); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; if (argc == 2) return distribution_foreach_package(distribution, components, architectures, packagetypes, list_package, NULL, NULL); else for (t = distribution->targets ; t != NULL ; t = t->next) { if (!target_matches(t, components, architectures, packagetypes)) continue; r = list_in_target(t, argv[2]); if (RET_WAS_ERROR(r)) return r; RET_UPDATE(result, r); } return result; } struct lsversion { /*@null@*/struct lsversion *next; char *version; struct atomlist architectures; }; struct lspart { struct lspart *next; const char *codename; const char *component; struct lsversion *versions; }; static retvalue newlsversion(struct lsversion **versions_p, /*@only@*/char *version, architecture_t architecture) { struct lsversion *v, **v_p; for (v_p = versions_p ; (v = *v_p) != NULL ; v_p = &v->next) { if (strcmp(v->version, version) != 0) continue; free(version); return atomlist_add_uniq(&v->architectures, architecture); } v = zNEW(struct lsversion); if (FAILEDTOALLOC(v)) return RET_ERROR_OOM; *v_p = v; v->version = version; return atomlist_add(&v->architectures, architecture); } static retvalue ls_in_target(struct target *target, const char *packagename, struct lsversion **versions_p) { retvalue r, result; char *control, *version; r = target_initpackagesdb(target, READONLY); if (!RET_IS_OK(r)) return r; result = table_getrecord(target->packages, packagename, &control); if (RET_IS_OK(result)) { r = target->getversion(control, &version); if (RET_IS_OK(r)) r = newlsversion(versions_p, version, target->architecture); free(control); RET_UPDATE(result, r); } r = target_closepackagesdb(target); RET_ENDUPDATE(result, r); return result; } static inline retvalue printlsparts(const char *pkgname, struct lspart *parts) { int versionlen, codenamelen, componentlen; struct lspart *p; retvalue result = RET_NOTHING; versionlen = 0; codenamelen = 0; componentlen = 0; for (p = parts ; p->codename != NULL ; p = p->next) { struct lsversion *v; int l; l = strlen(p->codename); if (l > codenamelen) codenamelen = l; if (p->component != NULL) { l = strlen(p->component); if (l > componentlen) componentlen = l; } for (v = p->versions ; v != NULL ; v = v->next) { l = strlen(v->version); if (l > versionlen) versionlen = l; } } while (parts->codename != NULL) { p = parts; parts = parts->next; while (p->versions != NULL) { architecture_t a; int i; struct lsversion *v; v = p->versions; p->versions = v->next; result = RET_OK; printf("%s | %*s | %*s | ", pkgname, versionlen, v->version, codenamelen, p->codename); if (componentlen > 0 && p->component != NULL) printf("%*s | ", componentlen, p->component); for (i = 0 ; i + 1 < v->architectures.count ; i++) { a = v->architectures.atoms[i]; printf("%s, ", atoms_architectures[a]); } a = v->architectures.atoms[i]; puts(atoms_architectures[a]); free(v->version); atomlist_done(&v->architectures); free(v); } free(p); } free(parts); return result; } ACTION_B(y, n, y, ls) { retvalue r; struct distribution *d; struct target *t; struct lspart *first, *last; assert (argc == 2); first = zNEW(struct lspart); last = first; for (d = alldistributions ; d != NULL ; d = d->next) { for (t = d->targets ; t != NULL ; t = t->next) { if (!target_matches(t, components, architectures, packagetypes)) continue; r = ls_in_target(t, argv[1], &last->versions); if (RET_WAS_ERROR(r)) return r; } if (last->versions != NULL) { last->codename = d->codename; last->next = zNEW(struct lspart); last = last->next; } } return printlsparts(argv[1], first); } ACTION_B(y, n, y, lsbycomponent) { retvalue r; struct distribution *d; struct target *t; struct lspart *first, *last; int i; assert (argc == 2); first = zNEW(struct lspart); last = first; for (d = alldistributions ; d != NULL ; d = d->next) { for (i = 0 ; i < d->components.count ; i ++) { component_t component = d->components.atoms[i]; if (limitations_missed(components, component)) continue; for (t = d->targets ; t != NULL ; t = t->next) { if (t->component != component) continue; if (limitations_missed(architectures, t->architecture)) continue; if (limitations_missed(packagetypes, t->packagetype)) continue; r = ls_in_target(t, argv[1], &last->versions); if (RET_WAS_ERROR(r)) return r; } if (last->versions != NULL) { last->codename = d->codename; last->component = atoms_components[component]; last->next = zNEW(struct lspart); last = last->next; } } } return printlsparts(argv[1], first); } static retvalue listfilterprint(UNUSED(struct distribution *di), struct target *target, const char *packagename, const char *control, void *data) { term *condition = data; retvalue r; if (listmax == 0) return RET_NOTHING; r = term_decidechunktarget(condition, control, target); if (RET_IS_OK(r)) { if (listskip <= 0) { if (listmax > 0) listmax--; r = listformat_print(listformat, target, packagename, control); } else { listskip--; r = RET_NOTHING; } } return r; } ACTION_B(y, n, y, listfilter) { retvalue r, result; struct distribution *distribution; term *condition; assert (argc == 3); r = distribution_get(alldistributions, argv[1], false, &distribution); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) { return r; } result = term_compilefortargetdecision(&condition, argv[2]); if (RET_WAS_ERROR(result)) { return result; } result = distribution_foreach_package(distribution, components, architectures, packagetypes, listfilterprint, NULL, condition); term_free(condition); return result; } static retvalue listmatchprint(UNUSED(struct distribution *di), struct target *target, const char *packagename, const char *control, void *data) { const char *glob = data; if (listmax == 0) return RET_NOTHING; if (globmatch(packagename, glob)) { if (listskip <= 0) { if (listmax > 0) listmax--; return listformat_print(listformat, target, packagename, control); } else { listskip--; return RET_NOTHING; } } else return RET_NOTHING; } ACTION_B(y, n, y, listmatched) { retvalue r, result; struct distribution *distribution; assert (argc == 3); r = distribution_get(alldistributions, argv[1], false, &distribution); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) { return r; } result = distribution_foreach_package(distribution, components, architectures, packagetypes, listmatchprint, NULL, (void*)argv[2]); return result; } ACTION_F(n, n, n, y, detect) { char buffer[5000], *nl; int i; retvalue r, ret; ret = RET_NOTHING; if (argc > 1) { for (i = 1 ; i < argc ; i++) { r = files_detect(argv[i]); RET_UPDATE(ret, r); } } else while (fgets(buffer, 4999, stdin) != NULL) { nl = strchr(buffer, '\n'); if (nl == NULL) { return RET_ERROR; } *nl = '\0'; r = files_detect(buffer); RET_UPDATE(ret, r); } return ret; } ACTION_F(n, n, n, y, forget) { char buffer[5000], *nl; int i; retvalue r, ret; ret = RET_NOTHING; if (argc > 1) { for (i = 1 ; i < argc ; i++) { r = files_remove(argv[i]); RET_UPDATE(ret, r); } } else while (fgets(buffer, 4999, stdin) != NULL) { nl = strchr(buffer, '\n'); if (nl == NULL) { return RET_ERROR; } *nl = '\0'; r = files_remove(buffer); RET_UPDATE(ret, r); } return ret; } ACTION_F(n, n, n, n, listmd5sums) { return files_printmd5sums(); } ACTION_F(n, n, n, n, listchecksums) { return files_printchecksums(); } ACTION_B(n, n, n, dumpcontents) { retvalue result, r; struct table *packages; const char *package, *chunk; struct cursor *cursor; assert (argc == 2); result = database_openpackages(argv[1], true, &packages); if (RET_WAS_ERROR(result)) return result; r = table_newglobalcursor(packages, &cursor); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) { (void)table_close(packages); return r; } result = RET_NOTHING; while (cursor_nexttemp(packages, cursor, &package, &chunk)) { printf("'%s' -> '%s'\n", package, chunk); result = RET_OK; } r = cursor_close(packages, cursor); RET_ENDUPDATE(result, r); r = table_close(packages); RET_ENDUPDATE(result, r); return result; } ACTION_F(n, n, y, y, export) { retvalue result, r; struct distribution *d; if (export == EXPORT_NEVER || export == EXPORT_SILENT_NEVER) { fprintf(stderr, "Error: reprepro export incompatible with --export=never\n"); return RET_ERROR; } result = distribution_match(alldistributions, argc-1, argv+1, true, READWRITE); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) return result; result = RET_NOTHING; for (d = alldistributions ; d != NULL ; d = d->next) { if (!d->selected) continue; if (verbose > 0) { printf("Exporting %s...\n", d->codename); } r = distribution_fullexport(d); if (RET_IS_OK(r)) /* avoid being exported again */ d->lookedat = false; RET_UPDATE(result, r); if (RET_WAS_ERROR(r) && export != EXPORT_FORCE) { return r; } } return result; } /***********************update********************************/ ACTION_D(y, n, y, update) { retvalue result; struct update_pattern *patterns; struct update_distribution *u_distributions; result = dirs_make_recursive(global.listdir); if (RET_WAS_ERROR(result)) { return result; } result = distribution_match(alldistributions, argc-1, argv+1, true, READWRITE); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) return result; result = updates_getpatterns(&patterns); if (RET_WAS_ERROR(result)) return result; assert (RET_IS_OK(result)); result = updates_calcindices(patterns, alldistributions, components, architectures, packagetypes, &u_distributions); if (!RET_IS_OK(result)) { if (result == RET_NOTHING) { if (argc == 1) fputs( "Nothing to do, because no distribution has an Update: field.\n", stderr); else fputs( "Nothing to do, because none of the selected distributions has an Update: field.\n", stderr); } updates_freepatterns(patterns); return result; } assert (RET_IS_OK(result)); if (!RET_WAS_ERROR(result)) result = updates_update(u_distributions, nolistsdownload, skipold, spacecheckmode, reserveddbspace, reservedotherspace); updates_freeupdatedistributions(u_distributions); updates_freepatterns(patterns); return result; } ACTION_D(y, n, y, predelete) { retvalue result; struct update_pattern *patterns; struct update_distribution *u_distributions; result = dirs_make_recursive(global.listdir); if (RET_WAS_ERROR(result)) { return result; } result = distribution_match(alldistributions, argc-1, argv+1, true, READWRITE); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) return result; result = updates_getpatterns(&patterns); if (RET_WAS_ERROR(result)) { return result; } assert (RET_IS_OK(result)); result = updates_calcindices(patterns, alldistributions, components, architectures, packagetypes, &u_distributions); if (!RET_IS_OK(result)) { if (result == RET_NOTHING) { if (argc == 1) fputs( "Nothing to do, because no distribution has an Update: field.\n", stderr); else fputs( "Nothing to do, because none of the selected distributions has an Update: field.\n", stderr); } updates_freepatterns(patterns); return result; } assert (RET_IS_OK(result)); if (!RET_WAS_ERROR(result)) result = updates_predelete(u_distributions, nolistsdownload, skipold); updates_freeupdatedistributions(u_distributions); updates_freepatterns(patterns); return result; } ACTION_B(y, n, y, checkupdate) { retvalue result; struct update_pattern *patterns; struct update_distribution *u_distributions; result = dirs_make_recursive(global.listdir); if (RET_WAS_ERROR(result)) { return result; } result = distribution_match(alldistributions, argc-1, argv+1, false, READONLY); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) return result; result = updates_getpatterns(&patterns); if (RET_WAS_ERROR(result)) { return result; } result = updates_calcindices(patterns, alldistributions, components, architectures, packagetypes, &u_distributions); if (!RET_IS_OK(result)) { if (result == RET_NOTHING) { if (argc == 1) fputs( "Nothing to do, because no distribution has an Updates: field.\n", stderr); else fputs( "Nothing to do, because none of the selected distributions has an Update: field.\n", stderr); } updates_freepatterns(patterns); return result; } result = updates_checkupdate(u_distributions, nolistsdownload, skipold); updates_freeupdatedistributions(u_distributions); updates_freepatterns(patterns); return result; } ACTION_B(y, n, y, dumpupdate) { retvalue result; struct update_pattern *patterns; struct update_distribution *u_distributions; result = dirs_make_recursive(global.listdir); if (RET_WAS_ERROR(result)) { return result; } result = distribution_match(alldistributions, argc-1, argv+1, false, READONLY); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) return result; result = updates_getpatterns(&patterns); if (RET_WAS_ERROR(result)) { return result; } result = updates_calcindices(patterns, alldistributions, components, architectures, packagetypes, &u_distributions); if (!RET_IS_OK(result)) { if (result == RET_NOTHING) { if (argc == 1) fputs( "Nothing to do, because no distribution has an Updates: field.\n", stderr); else fputs( "Nothing to do, because none of the selected distributions has an Update: field.\n", stderr); } updates_freepatterns(patterns); return result; } result = updates_dumpupdate(u_distributions, nolistsdownload, skipold); updates_freeupdatedistributions(u_distributions); updates_freepatterns(patterns); return result; } ACTION_L(n, n, n, n, cleanlists) { retvalue result; struct update_pattern *patterns; assert (argc == 1); if (!isdirectory(global.listdir)) return RET_NOTHING; result = updates_getpatterns(&patterns); if (RET_WAS_ERROR(result)) return result; result = updates_cleanlists(alldistributions, patterns); updates_freepatterns(patterns); return result; } /***********************migrate*******************************/ ACTION_D(y, n, y, pull) { retvalue result; struct pull_rule *rules; struct pull_distribution *p; result = distribution_match(alldistributions, argc-1, argv+1, true, READWRITE); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) return result; result = pull_getrules(&rules); if (RET_WAS_ERROR(result)) { return result; } assert (RET_IS_OK(result)); result = pull_prepare(alldistributions, rules, fast, components, architectures, packagetypes, &p); if (RET_WAS_ERROR(result)) { pull_freerules(rules); return result; } result = pull_update(p); pull_freerules(rules); pull_freedistributions(p); return result; } ACTION_B(y, n, y, checkpull) { retvalue result; struct pull_rule *rules; struct pull_distribution *p; result = distribution_match(alldistributions, argc-1, argv+1, false, READONLY); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) return result; result = pull_getrules(&rules); if (RET_WAS_ERROR(result)) { return result; } assert (RET_IS_OK(result)); result = pull_prepare(alldistributions, rules, fast, components, architectures, packagetypes, &p); if (RET_WAS_ERROR(result)) { pull_freerules(rules); return result; } result = pull_checkupdate(p); pull_freerules(rules); pull_freedistributions(p); return result; } ACTION_B(y, n, y, dumppull) { retvalue result; struct pull_rule *rules; struct pull_distribution *p; result = distribution_match(alldistributions, argc-1, argv+1, false, READONLY); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) return result; result = pull_getrules(&rules); if (RET_WAS_ERROR(result)) { return result; } assert (RET_IS_OK(result)); result = pull_prepare(alldistributions, rules, fast, components, architectures, packagetypes, &p); if (RET_WAS_ERROR(result)) { pull_freerules(rules); return result; } result = pull_dumpupdate(p); pull_freerules(rules); pull_freedistributions(p); return result; } ACTION_D(y, n, y, copy) { struct distribution *destination, *source; retvalue result; result = distribution_get(alldistributions, argv[1], true, &destination); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) return result; result = distribution_get(alldistributions, argv[2], false, &source); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) return result; if (destination->readonly) { fprintf(stderr, "Cannot copy packages to read-only distribution '%s'.\n", destination->codename); return RET_ERROR; } result = distribution_prepareforwriting(destination); if (RET_WAS_ERROR(result)) return result; return copy_by_name(destination, source, argc-3, argv+3, components, architectures, packagetypes); } ACTION_D(y, n, y, copysrc) { struct distribution *destination, *source; retvalue result; result = distribution_get(alldistributions, argv[1], true, &destination); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) return result; result = distribution_get(alldistributions, argv[2], false, &source); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) return result; if (destination->readonly) { fprintf(stderr, "Cannot copy packages to read-only distribution '%s'.\n", destination->codename); return RET_ERROR; } result = distribution_prepareforwriting(destination); if (RET_WAS_ERROR(result)) return result; return copy_by_source(destination, source, argc-3, argv+3, components, architectures, packagetypes); return result; } ACTION_D(y, n, y, copyfilter) { struct distribution *destination, *source; retvalue result; assert (argc == 4); result = distribution_get(alldistributions, argv[1], true, &destination); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) return result; result = distribution_get(alldistributions, argv[2], false, &source); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) return result; if (destination->readonly) { fprintf(stderr, "Cannot copy packages to read-only distribution '%s'.\n", destination->codename); return RET_ERROR; } result = distribution_prepareforwriting(destination); if (RET_WAS_ERROR(result)) return result; return copy_by_formula(destination, source, argv[3], components, architectures, packagetypes); } ACTION_D(y, n, y, copymatched) { struct distribution *destination, *source; retvalue result; assert (argc == 4); result = distribution_get(alldistributions, argv[1], true, &destination); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) return result; result = distribution_get(alldistributions, argv[2], false, &source); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) return result; if (destination->readonly) { fprintf(stderr, "Cannot copy packages to read-only distribution '%s'.\n", destination->codename); return RET_ERROR; } result = distribution_prepareforwriting(destination); if (RET_WAS_ERROR(result)) return result; return copy_by_glob(destination, source, argv[3], components, architectures, packagetypes); } ACTION_D(y, n, y, restore) { struct distribution *destination; retvalue result; result = distribution_get(alldistributions, argv[1], true, &destination); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) return result; if (destination->readonly) { fprintf(stderr, "Cannot copy packages to read-only distribution '%s'.\n", destination->codename); return RET_ERROR; } result = distribution_prepareforwriting(destination); if (RET_WAS_ERROR(result)) return result; return restore_by_name(destination, components, architectures, packagetypes, argv[2], argc-3, argv+3); } ACTION_D(y, n, y, restoresrc) { struct distribution *destination; retvalue result; result = distribution_get(alldistributions, argv[1], true, &destination); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) return result; if (destination->readonly) { fprintf(stderr, "Cannot copy packages to read-only distribution '%s'.\n", destination->codename); return RET_ERROR; } result = distribution_prepareforwriting(destination); if (RET_WAS_ERROR(result)) return result; return restore_by_source(destination, components, architectures, packagetypes, argv[2], argc-3, argv+3); } ACTION_D(y, n, y, restorematched) { struct distribution *destination; retvalue result; assert (argc == 4); result = distribution_get(alldistributions, argv[1], true, &destination); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) return result; if (destination->readonly) { fprintf(stderr, "Cannot copy packages to read-only distribution '%s'.\n", destination->codename); return RET_ERROR; } result = distribution_prepareforwriting(destination); if (RET_WAS_ERROR(result)) return result; return restore_by_glob(destination, components, architectures, packagetypes, argv[2], argv[3]); } ACTION_D(y, n, y, restorefilter) { struct distribution *destination; retvalue result; assert (argc == 4); result = distribution_get(alldistributions, argv[1], true, &destination); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) return result; if (destination->readonly) { fprintf(stderr, "Cannot copy packages to read-only distribution '%s'.\n", destination->codename); return RET_ERROR; } result = distribution_prepareforwriting(destination); if (RET_WAS_ERROR(result)) return result; return restore_by_formula(destination, components, architectures, packagetypes, argv[2], argv[3]); } ACTION_D(y, n, y, addpackage) { struct distribution *destination; retvalue result; architecture_t architecture = atom_unknown; component_t component = atom_unknown; packagetype_t packagetype = atom_unknown; if (packagetypes != NULL) { if (packagetypes->count > 1) { fprintf(stderr, "_addpackage can only cope with one packagetype at a time!\n"); return RET_ERROR; } packagetype = packagetypes->atoms[0]; } if (architectures != NULL) { if (architectures->count > 1) { fprintf(stderr, "_addpackage can only cope with one architecture at a time!\n"); return RET_ERROR; } architecture = architectures->atoms[0]; } if (components != NULL) { if (components->count > 1) { fprintf(stderr, "_addpackage can only cope with one component at a time!\n"); return RET_ERROR; } component = components->atoms[0]; } if (!atom_defined(packagetype) && atom_defined(architecture) && architecture == architecture_source) packagetype = pt_dsc; if (atom_defined(packagetype) && !atom_defined(architecture) && packagetype == pt_dsc) architecture = architecture_source; // TODO: some more guesses based on components and udebcomponents if (!atom_defined(architecture) || !atom_defined(component) || !atom_defined(packagetype)) { fprintf(stderr, "_addpackage needs -C and -A and -T set!\n"); return RET_ERROR; } result = distribution_get(alldistributions, argv[1], true, &destination); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) return result; if (destination->readonly) { fprintf(stderr, "Cannot add packages to read-only distribution '%s'.\n", destination->codename); return RET_ERROR; } result = distribution_prepareforwriting(destination); if (RET_WAS_ERROR(result)) return result; return copy_from_file(destination, component, architecture, packagetype, argv[2], argc-3, argv+3); } /***********************rereferencing*************************/ ACTION_R(n, n, y, y, rereference) { retvalue result, r; struct distribution *d; struct target *t; result = distribution_match(alldistributions, argc-1, argv+1, false, READONLY); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) { return result; } result = RET_NOTHING; for (d = alldistributions ; d != NULL ; d = d->next) { if (!d->selected) continue; if (verbose > 0) { printf("Referencing %s...\n", d->codename); } for (t = d->targets ; t != NULL ; t = t->next) { r = target_rereference(t); RET_UPDATE(result, r); } r = tracking_rereference(d); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; } return result; } /***************************retrack****************************/ ACTION_D(n, n, y, retrack) { retvalue result, r; struct distribution *d; result = distribution_match(alldistributions, argc-1, argv+1, false, READONLY); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) { return result; } result = RET_NOTHING; for (d = alldistributions ; d != NULL ; d = d->next) { if (!d->selected) continue; if (d->tracking == dt_NONE) { if (argc > 1) { fprintf(stderr, "Cannot retrack %s: Tracking not activated for this distribution!\n", d->codename); RET_UPDATE(result, RET_ERROR); } continue; } r = tracking_retrack(d, true); RET_ENDUPDATE(result, r); if (RET_WAS_ERROR(result)) break; } return result; } ACTION_D(n, n, y, removetrack) { retvalue result, r; struct distribution *distribution; trackingdb tracks; assert (argc == 4); result = distribution_get(alldistributions, argv[1], false, &distribution); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) return result; r = tracking_initialize(&tracks, distribution, false); if (RET_WAS_ERROR(r)) { return r; } result = tracking_remove(tracks, argv[2], argv[3]); r = tracking_done(tracks); RET_ENDUPDATE(result, r); return result; } ACTION_D(n, n, y, removealltracks) { retvalue result, r; struct distribution *d; const char *codename; int i; if (delete <= 0) for (i = 1 ; i < argc ; i ++) { codename = argv[i]; d = alldistributions; while (d != NULL && strcmp(codename, d->codename) != 0) d = d->next; if (d != NULL && d->tracking != dt_NONE) { fprintf(stderr, "Error: Requested removing of all tracks of distribution '%s',\n" "which still has tracking enabled. Use --delete to delete anyway.\n", codename); return RET_ERROR; } } result = RET_NOTHING; for (i = 1 ; i < argc ; i ++) { codename = argv[i]; if (verbose >= 0) { printf("Deleting all tracks for %s...\n", codename); } r = tracking_drop(codename); RET_UPDATE(result, r); if (RET_WAS_ERROR(result)) break; if (r == RET_NOTHING) { d = alldistributions; while (d != NULL && strcmp(codename, d->codename) != 0) d = d->next; if (d == NULL) { fprintf(stderr, "Warning: There was no tracking information to delete for '%s',\n" "which is also not found in conf/distributions. Either this was already\n" "deleted earlier, or you might have mistyped.\n", codename); } } } return result; } ACTION_D(n, n, y, tidytracks) { retvalue result, r; struct distribution *d; result = distribution_match(alldistributions, argc-1, argv+1, false, READONLY); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) { return result; } result = RET_NOTHING; for (d = alldistributions ; d != NULL ; d = d->next) { trackingdb tracks; if (!d->selected) continue; if (d->tracking == dt_NONE) { r = tracking_drop(d->codename); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; continue; } if (verbose >= 0) { printf("Looking for old tracks in %s...\n", d->codename); } r = tracking_initialize(&tracks, d, false); if (RET_WAS_ERROR(r)) { RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; continue; } r = tracking_tidyall(tracks); RET_UPDATE(result, r); r = tracking_done(tracks); RET_ENDUPDATE(result, r); if (RET_WAS_ERROR(result)) break; } return result; } ACTION_B(n, n, y, dumptracks) { retvalue result, r; struct distribution *d; result = distribution_match(alldistributions, argc-1, argv+1, false, READONLY); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) { return result; } result = RET_NOTHING; for (d = alldistributions ; d != NULL ; d = d->next) { trackingdb tracks; if (!d->selected) continue; r = tracking_initialize(&tracks, d, true); if (RET_WAS_ERROR(r)) { RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; continue; } if (r == RET_NOTHING) continue; r = tracking_printall(tracks); RET_UPDATE(result, r); r = tracking_done(tracks); RET_ENDUPDATE(result, r); if (RET_WAS_ERROR(result)) break; } return result; } /***********************checking*************************/ ACTION_RF(y, n, y, y, check) { retvalue result, r; struct distribution *d; result = distribution_match(alldistributions, argc-1, argv+1, false, READONLY); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) { return result; } result = RET_NOTHING; for (d = alldistributions ; d != NULL ; d = d->next) { if (!d->selected) continue; if (verbose > 0) { printf("Checking %s...\n", d->codename); } r = distribution_foreach_package(d, components, architectures, packagetypes, package_check, NULL, NULL); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; } return result; } ACTION_F(n, n, n, y, checkpool) { if (argc == 2 && strcmp(argv[1], "fast") != 0) { fprintf(stderr, "Error: Unrecognized second argument '%s'\n" "Syntax: reprepro checkpool [fast]\n", argv[1]); return RET_ERROR; } return files_checkpool(argc == 2); } /* Update checksums of existing files */ ACTION_F(n, n, n, n, collectnewchecksums) { return files_collectnewchecksums(); } /*****************reapplying override info***************/ ACTION_F(y, n, y, y, reoverride) { retvalue result, r; struct distribution *d; result = distribution_match(alldistributions, argc-1, argv+1, true, READWRITE); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) { return result; } result = RET_NOTHING; for (d = alldistributions ; d != NULL ; d = d->next) { if (!d->selected) continue; if (verbose > 0) { fprintf(stderr, "Reapplying override to %s...\n", d->codename); } r = distribution_loadalloverrides(d); if (RET_IS_OK(r)) { struct target *t; for (t = d->targets ; t != NULL ; t = t->next) { if (!target_matches(t, components, architectures, packagetypes)) continue; r = target_reoverride(t, d); RET_UPDATE(result, r); // TODO: how to seperate this in those affecting d // and those that do not? RET_UPDATE(d->status, r); } distribution_unloadoverrides(d); } else if (r == RET_NOTHING) { fprintf(stderr, "No override files, thus nothing to do for %s.\n", d->codename); } else { RET_UPDATE(result, r); } if (RET_WAS_ERROR(result)) break; } return result; } /*****************retrieving Description data from .deb files***************/ static retvalue repair_descriptions(struct target *target, bool force) { struct target_cursor iterator; retvalue result, r; const char *package, *controlchunk; assert(target->packages == NULL); assert(target->packagetype == pt_deb); if (verbose > 2) { printf( "Redoing checksum information for packages in '%s'...\n", target->identifier); } r = target_openiterator(target, READWRITE, &iterator); if (!RET_IS_OK(r)) return r; result = RET_NOTHING; while (target_nextpackage(&iterator, &package, &controlchunk)) { char *newcontrolchunk = NULL; if (interrupted()) { result = RET_ERROR_INTERRUPTED; break; } r = description_complete(package, controlchunk, force, &newcontrolchunk); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; if (RET_IS_OK(r)) { if (verbose >= 0) { printf( "Fixing description for '%s'...\n", package); } r = cursor_replace(target->packages, iterator.cursor, newcontrolchunk, strlen(newcontrolchunk)); free(newcontrolchunk); if (RET_WAS_ERROR(r)) { result = r; break; } target->wasmodified = true; } } r = target_closeiterator(&iterator); RET_ENDUPDATE(result, r); return result; } ACTION_F(y, n, y, y, repairdescriptions) { retvalue result, r; struct distribution *d; bool force = strcmp(argv[0], "forcerepairdescriptions") == 0; result = distribution_match(alldistributions, argc-1, argv+1, true, READWRITE); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) { return result; } result = RET_NOTHING; for (d = alldistributions ; d != NULL ; d = d->next) { struct target *t; if (!d->selected) continue; if (verbose > 0) { printf( "Looking for 'Description's to repair in %s...\n", d->codename); } for (t = d->targets ; t != NULL ; t = t->next) { if (interrupted()) { result = RET_ERROR_INTERRUPTED; break; } if (!target_matches(t, components, architectures, packagetypes)) continue; if (t->packagetype != pt_deb) continue; r = repair_descriptions(t, force); RET_UPDATE(result, r); RET_UPDATE(d->status, r); if (RET_WAS_ERROR(r)) break; } } return result; } /*****************adding checkums of files again*****************/ ACTION_F(y, n, y, y, redochecksums) { retvalue result, r; struct distribution *d; result = distribution_match(alldistributions, argc-1, argv+1, true, READWRITE); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) { return result; } result = RET_NOTHING; for (d = alldistributions ; d != NULL ; d = d->next) { struct target *t; if (!d->selected) continue; if (verbose > 0) { fprintf(stderr, "Readding checksum information to packages in %s...\n", d->codename); } for (t = d->targets ; t != NULL ; t = t->next) { if (!target_matches(t, components, architectures, packagetypes)) continue; r = target_redochecksums(t, d); RET_UPDATE(result, r); RET_UPDATE(d->status, r); if (RET_WAS_ERROR(r)) break; } if (RET_WAS_ERROR(result)) break; } return result; } /*******************sizes of distributions***************/ ACTION_RF(n, n, y, y, sizes) { retvalue result; result = distribution_match(alldistributions, argc-1, argv+1, false, READONLY); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) { return result; } return sizes_distributions(alldistributions, argc > 1); } /***********************include******************************************/ ACTION_D(y, y, y, includedeb) { retvalue result, r; struct distribution *distribution; bool isudeb; trackingdb tracks; int i = 0; component_t component = atom_unknown; if (components != NULL) { if (components->count > 1) { fprintf(stderr, "Error: Only one component is allowed with %s!\n", argv[0]); return RET_ERROR; } assert(components->count > 0); component = components->atoms[0]; } if (architectures != NULL) if (!atomlist_hasexcept(architectures, architecture_source)) { fprintf(stderr, "Error: -A source is not possible with includedeb!\n"); return RET_ERROR; } if (strcmp(argv[0], "includeudeb") == 0) { isudeb = true; if (limitations_missed(packagetypes, pt_udeb)) { fprintf(stderr, "Calling includeudeb with a -T not containing udeb makes no sense!\n"); return RET_ERROR; } } else if (strcmp(argv[0], "includedeb") == 0) { isudeb = false; if (limitations_missed(packagetypes, pt_deb)) { fprintf(stderr, "Calling includedeb with a -T not containing deb makes no sense!\n"); return RET_ERROR; } } else { fprintf(stderr, "Internal error while parding command!\n"); return RET_ERROR; } for (i = 2 ; i < argc ; i++) { const char *filename = argv[i]; if (isudeb) { if (!endswith(filename, ".udeb") && !IGNORING(extension, "includeudeb called with file '%s' not ending with '.udeb'\n", filename)) return RET_ERROR; } else { if (!endswith(filename, ".deb") && !IGNORING(extension, "includedeb called with file '%s' not ending with '.deb'\n", filename)) return RET_ERROR; } } result = distribution_get(alldistributions, argv[1], true, &distribution); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) { return result; } if (distribution->readonly) { fprintf(stderr, "Cannot add packages to read-only distribution '%s'.\n", distribution->codename); return RET_ERROR; } if (isudeb) result = override_read(distribution->udeb_override, &distribution->overrides.udeb, false); else result = override_read(distribution->deb_override, &distribution->overrides.deb, false); if (RET_WAS_ERROR(result)) { return result; } // TODO: same for component? (depending on type?) if (architectures != NULL) { architecture_t missing = atom_unknown; if (!atomlist_subset(&distribution->architectures, architectures, &missing)){ fprintf(stderr, "Cannot force into the architecture '%s' not available in '%s'!\n", atoms_architectures[missing], distribution->codename); return RET_ERROR; } } r = distribution_prepareforwriting(distribution); if (RET_WAS_ERROR(r)) { return RET_ERROR; } if (distribution->tracking != dt_NONE) { result = tracking_initialize(&tracks, distribution, false); if (RET_WAS_ERROR(result)) { return result; } } else { tracks = NULL; } result = RET_NOTHING; for (i = 2 ; i < argc ; i++) { const char *filename = argv[i]; r = deb_add(component, architectures, section, priority, isudeb?pt_udeb:pt_deb, distribution, filename, delete, tracks); RET_UPDATE(result, r); } distribution_unloadoverrides(distribution); r = tracking_done(tracks); RET_ENDUPDATE(result, r); return result; } ACTION_D(y, y, y, includedsc) { retvalue result, r; struct distribution *distribution; trackingdb tracks; component_t component = atom_unknown; if (components != NULL) { if (components->count > 1) { fprintf(stderr, "Error: Only one component is allowed with %s!\n", argv[0]); return RET_ERROR; } assert(components->count > 0); component = components->atoms[0]; } assert (argc == 3); if (limitations_missed(architectures, architecture_source)) { fprintf(stderr, "Cannot put a source package anywhere else than in architecture 'source'!\n"); return RET_ERROR; } if (limitations_missed(packagetypes, pt_dsc)) { fprintf(stderr, "Cannot put a source package anywhere else than in type 'dsc'!\n"); return RET_ERROR; } if (!endswith(argv[2], ".dsc") && !IGNORING(extension, "includedsc called with a file not ending with '.dsc'\n")) return RET_ERROR; result = distribution_get(alldistributions, argv[1], true, &distribution); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) return result; if (distribution->readonly) { fprintf(stderr, "Cannot add packages to read-only distribution '%s'.\n", distribution->codename); return RET_ERROR; } result = override_read(distribution->dsc_override, &distribution->overrides.dsc, true); if (RET_WAS_ERROR(result)) { return result; } result = distribution_prepareforwriting(distribution); if (RET_WAS_ERROR(result)) { return result; } if (distribution->tracking != dt_NONE) { result = tracking_initialize(&tracks, distribution, false); if (RET_WAS_ERROR(result)) { return result; } } else { tracks = NULL; } result = dsc_add(component, section, priority, distribution, argv[2], delete, tracks); logger_wait(); distribution_unloadoverrides(distribution); r = tracking_done(tracks); RET_ENDUPDATE(result, r); return result; } ACTION_D(y, y, y, include) { retvalue result, r; struct distribution *distribution; trackingdb tracks; component_t component = atom_unknown; if (components != NULL) { if (components->count > 1) { fprintf(stderr, "Error: Only one component is allowed with %s!\n", argv[0]); return RET_ERROR; } assert(components->count > 0); component = components->atoms[0]; } assert (argc == 3); if (!endswith(argv[2], ".changes") && !IGNORING(extension, "include called with a file not ending with '.changes'\n" "(Did you mean includedeb or includedsc?)\n")) return RET_ERROR; result = distribution_get(alldistributions, argv[1], true, &distribution); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) return result; if (distribution->readonly) { fprintf(stderr, "Cannot add packages to read-only distribution '%s'.\n", distribution->codename); return RET_ERROR; } result = distribution_loadalloverrides(distribution); if (RET_WAS_ERROR(result)) { return result; } if (distribution->tracking != dt_NONE) { result = tracking_initialize(&tracks, distribution, false); if (RET_WAS_ERROR(result)) { return result; } } else { tracks = NULL; } result = distribution_loaduploaders(distribution); if (RET_WAS_ERROR(result)) { r = tracking_done(tracks); RET_ENDUPDATE(result, r); return result; } result = changes_add(tracks, packagetypes, component, architectures, section, priority, distribution, argv[2], delete); if (RET_WAS_ERROR(result)) RET_UPDATE(distribution->status, result); distribution_unloadoverrides(distribution); distribution_unloaduploaders(distribution); r = tracking_done(tracks); RET_ENDUPDATE(result, r); return result; } /***********************createsymlinks***********************************/ static bool mayaliasas(const struct distribution *alldistributions, const char *part, const char *cnpart) { const struct distribution *d; /* here it is only checked whether there is something that could * cause this link to exist. No tests whether this really will * cause it to be created (or already existing). */ for (d = alldistributions ; d != NULL ; d = d->next) { if (d->suite == NULL) continue; if (strcmp(d->suite, part) == 0 && strcmp(d->codename, cnpart) == 0) return true; if (strcmp(d->codename, part) == 0 && strcmp(d->suite, cnpart) == 0) return true; } return false; } ACTION_C(n, n, createsymlinks) { retvalue result, r; struct distribution *d, *d2; bool warned_slash = false; r = dirs_make_recursive(global.distdir); if (RET_WAS_ERROR(r)) return r; result = distribution_match(alldistributions, argc-1, argv+1, false, READONLY); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) { return result; } result = RET_NOTHING; for (d = alldistributions ; d != NULL ; d = d->next) { char *linkname, *buffer; size_t bufsize; int ret; const char *separator_in_suite; if (!d->selected) continue; if (d->suite == NULL || strcmp(d->suite, d->codename) == 0) continue; r = RET_NOTHING; for (d2 = alldistributions ; d2 != NULL ; d2 = d2->next) { if (!d2->selected) continue; if (d!=d2 && d2->suite!=NULL && strcmp(d->suite, d2->suite)==0) { fprintf(stderr, "Not linking %s->%s due to conflict with %s->%s\n", d->suite, d->codename, d2->suite, d2->codename); r = RET_ERROR; } else if (strcmp(d->suite, d2->codename)==0) { fprintf(stderr, "Not linking %s->%s due to conflict with %s\n", d->suite, d->codename, d2->codename); r = RET_ERROR; } } if (RET_WAS_ERROR(r)) { RET_UPDATE(result, r); continue; } separator_in_suite = strchr(d->suite, '/'); if (separator_in_suite != NULL) { /* things with / in it are tricky: * relative symbolic links are hard, * perhaps something else already moved * the earlier ones, ... */ const char *separator_in_codename; size_t ofs_in_suite = separator_in_suite - d->suite; char *part = strndup(d->suite, ofs_in_suite); if (FAILEDTOALLOC(part)) return RET_ERROR_OOM; /* check if this is some case we do not want to warn about: */ separator_in_codename = strchr(d->codename, '/'); if (separator_in_codename != NULL && strcmp(separator_in_codename, separator_in_suite) == 0) { /* all but the first is common: */ size_t cnofs = separator_in_codename - d->codename; char *cnpart = strndup(d->codename, cnofs); if (FAILEDTOALLOC(cnpart)) { free(part); return RET_ERROR_OOM; } if (mayaliasas(alldistributions, part, cnpart)) { if (verbose > 1) fprintf(stderr, "Not creating '%s' -> '%s' because of the '/' in it.\n" "Hopefully something else will link '%s' -> '%s' then this is not needed.\n", d->suite, d->codename, part, cnpart); free(part); free(cnpart); continue; } free(cnpart); } free(part); if (verbose >= 0 && !warned_slash) { fprintf(stderr, "Creating symlinks with '/' in them is not yet supported:\n"); warned_slash = true; } if (verbose >= 0) fprintf(stderr, "Not creating '%s' -> '%s' because of '/'.\n", d->suite, d->codename); continue; } linkname = calc_dirconcat(global.distdir, d->suite); bufsize = strlen(d->codename)+10; buffer = calloc(1, bufsize); if (FAILEDTOALLOC(linkname) || FAILEDTOALLOC(buffer)) { free(linkname); free(buffer); (void)fputs("Out of Memory!\n", stderr); return RET_ERROR_OOM; } ret = readlink(linkname, buffer, bufsize - 4); if (ret < 0 && errno == ENOENT) { ret = symlink(d->codename, linkname); if (ret != 0) { int e = errno; r = RET_ERRNO(e); fprintf(stderr, "Error %d creating symlink %s->%s: %s\n", e, linkname, d->codename, strerror(e)); RET_UPDATE(result, r); } else { if (verbose > 0) { printf("Created %s->%s\n", linkname, d->codename); } RET_UPDATE(result, RET_OK); } } else if (ret >= 0) { buffer[ret] = '\0'; if (ret >= ((int)bufsize) - 4) { buffer[bufsize-4]='.'; buffer[bufsize-3]='.'; buffer[bufsize-2]='.'; buffer[bufsize-1]='\0'; } if (strcmp(buffer, d->codename) == 0) { if (verbose > 2) { printf("Already ok: %s->%s\n", linkname, d->codename); } RET_UPDATE(result, RET_OK); } else { if (delete <= 0) { fprintf(stderr, "Cannot create %s as already pointing to %s instead of %s,\n" " use --delete to delete the old link before creating an new one.\n", linkname, buffer, d->codename); RET_UPDATE(result, RET_ERROR); } else { unlink(linkname); ret = symlink(d->codename, linkname); if (ret != 0) { int e = errno; r = RET_ERRNO(e); fprintf(stderr, "Error %d creating symlink %s->%s: %s\n", e, linkname, d->codename, strerror(e)); RET_UPDATE(result, r); } else { if (verbose > 0) { printf( "Replaced %s->%s\n", linkname, d->codename); } RET_UPDATE(result, RET_OK); } } } } else { int e = errno; r = RET_ERRNO(e); fprintf(stderr, "Error %d checking %s, perhaps not a symlink?: %s\n", e, linkname, strerror(e)); RET_UPDATE(result, r); } free(linkname); free(buffer); RET_UPDATE(result, r); } return result; } /***********************checkuploaders***********************************/ /* Read a fake package description from stdin */ static inline retvalue read_package_description(char **sourcename, struct strlist *sections, struct strlist *binaries, struct strlist *byhands, struct atomlist *architectures, struct signatures **signatures, char **buffer_p, size_t *bufferlen_p) { retvalue r; ssize_t got; char *buffer, *v, *p; struct strlist *l; struct signatures *s; struct signature *sig; architecture_t architecture; if (isatty(0)) { puts( "Please input the simulated package data to test.\n" "Format: (source|section|binary|byhand|architecture|signature) \n" "some keys may be given multiple times"); } while ((got = getline(buffer_p, bufferlen_p, stdin)) >= 0) { buffer = *buffer_p; if (got == 0 || buffer[got - 1] != '\n') { fputs("stdin is not text\n", stderr); return RET_ERROR; } buffer[--got] = '\0'; if (strncmp(buffer, "source ", 7) == 0) { if (*sourcename != NULL) { fprintf(stderr, "Source name only allowed once!\n"); return RET_ERROR; } *sourcename = strdup(buffer + 7); if (FAILEDTOALLOC(*sourcename)) return RET_ERROR_OOM; continue; } else if (strncmp(buffer, "signature ", 10) == 0) { v = buffer + 10; if (*signatures == NULL) { s = calloc(1, sizeof(struct signatures) +sizeof(struct signature)); if (FAILEDTOALLOC(s)) return RET_ERROR_OOM; } else { s = realloc(*signatures, sizeof(struct signatures) + (s->count+1) * sizeof(struct signature)); if (FAILEDTOALLOC(s)) return RET_ERROR_OOM; } *signatures = s; sig = s->signatures + s->count; s->count++; s->validcount++; sig->expired_key = false; sig->expired_signature = false; sig->revoced_key = false; sig->state = sist_valid; switch (*v) { case 'b': sig->state = sist_bad; s->validcount--; v++; break; case 'e': sig->state = sist_mostly; sig->expired_signature = true; s->validcount--; v++; break; case 'i': sig->state = sist_invalid; s->validcount--; v++; break; } p = v; while ((*p >= '0' && *p <= '9') || (*p >= 'a' && *p <= 'f')) p++; sig->keyid = strndup(v, p-v); sig->primary_keyid = NULL; if (FAILEDTOALLOC(sig->keyid)) return RET_ERROR_OOM; if (*p == ':') { p++; v = p; while ((*p >= '0' && *p <= '9') || (*p >= 'a' && *p <= 'f')) p++; if (*p != '\0') { fprintf(stderr, "Invalid character in key id: '%c'!\n", *p); return RET_ERROR; } sig->primary_keyid = strdup(v); } else if (*p != '\0') { fprintf(stderr, "Invalid character in key id: '%c'!\n", *p); return RET_ERROR; } else sig->primary_keyid = strdup(sig->keyid); if (FAILEDTOALLOC(sig->primary_keyid)) return RET_ERROR_OOM; continue; } else if (strncmp(buffer, "section ", 8) == 0) { v = buffer + 8; l = sections; } else if (strncmp(buffer, "binary ", 7) == 0) { v = buffer + 7; l = binaries; } else if (strncmp(buffer, "byhand ", 7) == 0) { v = buffer + 7; l = byhands; } else if (strncmp(buffer, "architecture ", 13) == 0) { v = buffer + 13; r = architecture_intern(v, &architecture); if (RET_WAS_ERROR(r)) return r; r = atomlist_add(architectures, architecture); if (RET_WAS_ERROR(r)) return r; continue; } else if (strcmp(buffer, "finished") == 0) { break; } else { fprintf(stderr, "Unparseable line '%s'\n", buffer); return RET_ERROR; } r = strlist_add_dup(l, v); if (RET_WAS_ERROR(r)) return r; } if (ferror(stdin)) { int e = errno; fprintf(stderr, "Error %d reading data from stdin: %s\n", e, strerror(e)); return RET_ERRNO(e); } if (*sourcename == NULL) { fprintf(stderr, "No source name specified!\n"); return RET_ERROR; } return RET_OK; } static inline void verifystrlist(struct upload_conditions *conditions, const struct strlist *list) { int i; for (i = 0 ; i < list->count ; i++) { if (!uploaders_verifystring(conditions, list->values[i])) break; } } static inline void verifyatomlist(struct upload_conditions *conditions, const struct atomlist *list) { int i; for (i = 0 ; i < list->count ; i++) { if (!uploaders_verifyatom(conditions, list->atoms[i])) break; } } ACTION_C(n, n, checkuploaders) { retvalue result, r; struct distribution *d; char *sourcename = NULL; struct strlist sections, binaries, byhands; struct atomlist architectures; struct signatures *signatures = NULL; struct upload_conditions *conditions; bool accepted, rejected; char *buffer = NULL; size_t bufferlen = 0; int i; r = distribution_match(alldistributions, argc-1, argv+1, false, READONLY); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) { return r; } for (d = alldistributions ; d != NULL ; d = d->next) { if (!d->selected) continue; r = distribution_loaduploaders(d); if (RET_WAS_ERROR(r)) return r; } strlist_init(§ions); strlist_init(&binaries); strlist_init(&byhands); atomlist_init(&architectures); r = read_package_description(&sourcename, §ions, &binaries, &byhands, &architectures, &signatures, &buffer, &bufferlen); free(buffer); if (RET_WAS_ERROR(r)) { free(sourcename); strlist_done(§ions); strlist_done(&byhands); atomlist_done(&architectures); signatures_free(signatures); return r; } result = RET_NOTHING; accepted = false; for (i = 1 ; !accepted && i < argc ; i++) { r = distribution_get(alldistributions, argv[i], false, &d); if (RET_WAS_ERROR(r)) { result = r; break; } r = distribution_loaduploaders(d); if (RET_WAS_ERROR(r)) { result = r; break; } if (d->uploaderslist == NULL) { printf( "'%s' would have been accepted by '%s' (as it has no uploader restrictions)\n", sourcename, d->codename); accepted = true; break; } r = uploaders_permissions(d->uploaderslist, signatures, &conditions); if (RET_WAS_ERROR(r)) { result = r; break; } rejected = false; do switch (uploaders_nextcondition(conditions)) { case uc_ACCEPTED: accepted = true; break; case uc_REJECTED: rejected = true; break; case uc_CODENAME: uploaders_verifystring(conditions, d->codename); break; case uc_SOURCENAME: uploaders_verifystring(conditions, sourcename); break; case uc_SECTIONS: verifystrlist(conditions, §ions); break; case uc_ARCHITECTURES: verifyatomlist(conditions, &architectures); break; case uc_BYHAND: verifystrlist(conditions, &byhands); break; case uc_BINARIES: verifystrlist(conditions, &byhands); break; } while (!accepted && !rejected); free(conditions); if (accepted) { printf("'%s' would have been accepted by '%s'\n", sourcename, d->codename); break; } } if (!accepted) printf( "'%s' would NOT have been accepted by any of the distributions selected.\n", sourcename); free(sourcename); strlist_done(§ions); strlist_done(&byhands); atomlist_done(&architectures); signatures_free(signatures); if (RET_WAS_ERROR(result)) return result; else if (accepted) return RET_OK; else return RET_NOTHING; } /***********************clearvanished***********************************/ ACTION_D(n, n, n, clearvanished) { retvalue result, r; struct distribution *d; struct strlist identifiers, codenames; bool *inuse; int i; result = database_listpackages(&identifiers); if (!RET_IS_OK(result)) { return result; } inuse = nzNEW(identifiers.count, bool); if (FAILEDTOALLOC(inuse)) { strlist_done(&identifiers); return RET_ERROR_OOM; } for (d = alldistributions; d != NULL ; d = d->next) { struct target *t; for (t = d->targets; t != NULL ; t = t->next) { int ofs = strlist_ofs(&identifiers, t->identifier); if (ofs >= 0) { inuse[ofs] = true; if (verbose > 6) printf( "Marking '%s' as used.\n", t->identifier); } else if (verbose > 3 && database_allcreated()){ fprintf(stderr, "Strange, '%s' does not appear in packages.db yet.\n", t->identifier); } } } for (i = 0 ; i < identifiers.count ; i ++) { const char *identifier = identifiers.values[i]; const char *p, *q; if (inuse[i]) continue; if (interrupted()) return RET_ERROR_INTERRUPTED; if (delete <= 0) { struct table *packages; r = database_openpackages(identifier, true, &packages); if (RET_IS_OK(r)) { if (!table_isempty(packages)) { fprintf(stderr, "There are still packages in '%s', not removing (give --delete to do so)!\n", identifier); (void)table_close(packages); continue; } r = table_close(packages); } } if (interrupted()) return RET_ERROR_INTERRUPTED; // TODO: if delete, check what is removed, so that tracking // information can be updated. printf( "Deleting vanished identifier '%s'.\n", identifier); /* intern component and architectures, so parsing * has no problems (actually only need component now) */ p = identifier; if (strncmp(p, "u|", 2) == 0) p += 2; p = strchr(p, '|'); if (p != NULL) { p++; q = strchr(p, '|'); if (q != NULL) { atom_t dummy; char *component = strndup(p, q-p); q++; char *architecture = strdup(q); if (FAILEDTOALLOC(component) || FAILEDTOALLOC(architecture)) { free(component); free(architecture); return RET_ERROR_OOM; } r = architecture_intern(architecture, &dummy); free(architecture); if (RET_WAS_ERROR(r)) { free(component); return r; } r = component_intern(component, &dummy); free(component); if (RET_WAS_ERROR(r)) return r; } } /* derference anything left */ references_remove(identifier); /* remove the database */ database_droppackages(identifier); } free(inuse); strlist_done(&identifiers); if (interrupted()) return RET_ERROR_INTERRUPTED; r = tracking_listdistributions(&codenames); RET_UPDATE(result, r); if (RET_IS_OK(r)) { for (d = alldistributions; d != NULL ; d = d->next) { strlist_remove(&codenames, d->codename); } for (i = 0 ; i < codenames.count ; i ++) { printf( "Deleting tracking data for vanished distribution '%s'.\n", codenames.values[i]); r = tracking_drop(codenames.values[i]); RET_UPDATE(result, r); } strlist_done(&codenames); } return result; } ACTION_B(n, n, y, listdbidentifiers) { retvalue result; struct strlist identifiers; const struct distribution *d; int i; result = database_listpackages(&identifiers); if (!RET_IS_OK(result)) { return result; } result = distribution_match(alldistributions, argc-1, argv+1, false, READONLY); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) return result; result = RET_NOTHING; for (i = 0 ; i < identifiers.count ; i++) { const char *p, *q, *identifier = identifiers.values[i]; if (argc <= 1) { puts(identifier); result = RET_OK; continue; } p = identifier; if (strncmp(p, "u|", 2) == 0) p += 2; q = strchr(p, '|'); if (q == NULL) q = strchr(p, '\0'); for (d = alldistributions ; d != NULL ; d = d->next) { if (!d->selected) continue; if (strncmp(p, d->codename, q - p) == 0 && d->codename[q-p] == '\0') { puts(identifier); result = RET_OK; break; } } } strlist_done(&identifiers); return result; } ACTION_C(n, n, listconfidentifiers) { struct target *t; const struct distribution *d; retvalue result; result = distribution_match(alldistributions, argc-1, argv+1, false, READONLY); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) return result; result = RET_NOTHING; for (d = alldistributions ; d != NULL ; d = d->next) { if (!d->selected) continue; for (t = d->targets; t != NULL ; t = t->next) { puts(t->identifier); result = RET_OK; } } return result; } ACTION_N(n, n, y, versioncompare) { retvalue r; int i; assert (argc == 3); r = properversion(argv[1]); if (RET_WAS_ERROR(r)) fprintf(stderr, "'%s' is not a proper version!\n", argv[1]); r = properversion(argv[2]); if (RET_WAS_ERROR(r)) fprintf(stderr, "'%s' is not a proper version!\n", argv[2]); r = dpkgversions_cmp(argv[1], argv[2], &i); if (RET_IS_OK(r)) { if (i < 0) { printf("'%s' is smaller than '%s'.\n", argv[1], argv[2]); } else if (i > 0) { printf("'%s' is larger than '%s'.\n", argv[1], argv[2]); } else printf("'%s' is the same as '%s'.\n", argv[1], argv[2]); } return r; } /***********************processincoming********************************/ ACTION_D(n, n, y, processincoming) { struct distribution *d; for (d = alldistributions ; d != NULL ; d = d->next) d->selected = true; return process_incoming(alldistributions, argv[1], (argc==3) ? argv[2] : NULL); } /***********************gensnapshot********************************/ ACTION_R(n, n, y, y, gensnapshot) { retvalue result; struct distribution *distribution; assert (argc == 3); result = distribution_get(alldistributions, argv[1], false, &distribution); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) return result; return distribution_snapshot(distribution, argv[2]); } /***********************rerunnotifiers********************************/ static retvalue rerunnotifiersintarget(struct distribution *d, struct target *target, UNUSED(void *dummy)) { if (!logger_rerun_needs_target(d->logger, target)) return RET_NOTHING; return RET_OK; } ACTION_B(y, n, y, rerunnotifiers) { retvalue result, r; struct distribution *d; result = distribution_match(alldistributions, argc-1, argv+1, false, READONLY); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) return result; result = RET_NOTHING; for (d = alldistributions ; d != NULL ; d = d->next) { if (!d->selected) continue; if (d->logger == NULL) continue; if (verbose > 0) { printf("Processing %s...\n", d->codename); } r = logger_prepare(d->logger); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; r = distribution_foreach_package(d, components, architectures, packagetypes, package_rerunnotifiers, rerunnotifiersintarget, NULL); logger_wait(); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; } return result; } /*********************** flood ****************************/ ACTION_D(y, n, y, flood) { retvalue result, r; struct distribution *distribution; trackingdb tracks; component_t architecture = atom_unknown; result = distribution_get(alldistributions, argv[1], true, &distribution); assert (result != RET_NOTHING); if (RET_WAS_ERROR(result)) return result; if (distribution->readonly) { fprintf(stderr, "Cannot add packages to read-only distribution '%s'.\n", distribution->codename); return RET_ERROR; } if (argc == 3) { architecture = architecture_find(argv[2]); if (!atom_defined(architecture)) { fprintf(stderr, "Error: Unknown architecture '%s'!\n", argv[2]); return RET_ERROR; } if (architecture == architecture_source) { fprintf(stderr, "Error: Architecture 'source' does not make sense with 'flood'!\n"); return RET_ERROR; } if (!atomlist_in(&distribution->architectures, architecture)) { fprintf(stderr, "Error: Architecture '%s' not part of '%s'!\n", argv[2], distribution->codename); return RET_ERROR; } } result = distribution_prepareforwriting(distribution); if (RET_WAS_ERROR(result)) { return result; } if (distribution->tracking != dt_NONE) { result = tracking_initialize(&tracks, distribution, false); if (RET_WAS_ERROR(result)) { return result; } } else tracks = NULL; result = flood(distribution, components, architectures, packagetypes, architecture, tracks); if (RET_WAS_ERROR(result)) RET_UPDATE(distribution->status, result); if (tracks != NULL) { r = tracking_done(tracks); RET_ENDUPDATE(result, r); } return result; } /*********************** unusedsources ****************************/ ACTION_B(n, n, y, unusedsources) { retvalue r; r = distribution_match(alldistributions, argc-1, argv+1, false, READONLY); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; return unusedsources(alldistributions); } /*********************** missingsource ****************************/ ACTION_B(n, n, y, sourcemissing) { retvalue r; r = distribution_match(alldistributions, argc-1, argv+1, false, READONLY); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; return sourcemissing(alldistributions); } /*********************** reportcruft ****************************/ ACTION_B(n, n, y, reportcruft) { retvalue r; r = distribution_match(alldistributions, argc-1, argv+1, false, READONLY); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; return reportcruft(alldistributions); } /*********************/ /* argument handling */ /*********************/ // TODO: this has become an utter mess and needs some serious cleaning... #define NEED_REFERENCES 1 /* FILESDB now includes REFERENCED... */ #define NEED_FILESDB 2 #define NEED_DEREF 4 #define NEED_DATABASE 8 #define NEED_CONFIG 16 #define NEED_NO_PACKAGES 32 #define IS_RO 64 #define MAY_UNUSED 128 #define NEED_ACT 256 #define NEED_SP 512 #define NEED_DELNEW 1024 #define NEED_RESTRICT 2048 #define A_N(w) action_n_n_n_ ## w, 0 #define A_C(w) action_c_n_n_ ## w, NEED_CONFIG #define A_ROB(w) action_b_n_n_ ## w, NEED_DATABASE|IS_RO #define A_ROBact(w) action_b_y_n_ ## w, NEED_ACT|NEED_DATABASE|IS_RO #define A_L(w) action_l_n_n_ ## w, NEED_DATABASE #define A_B(w) action_b_n_n_ ## w, NEED_DATABASE #define A_Bact(w) action_b_y_n_ ## w, NEED_ACT|NEED_DATABASE #define A_F(w) action_f_n_n_ ## w, NEED_DATABASE|NEED_FILESDB #define A_Fact(w) action_f_y_n_ ## w, NEED_ACT|NEED_DATABASE|NEED_FILESDB #define A_R(w) action_r_n_n_ ## w, NEED_DATABASE|NEED_REFERENCES #define A__F(w) action_f_n_n_ ## w, NEED_DATABASE|NEED_FILESDB|NEED_NO_PACKAGES #define A__R(w) action_r_n_n_ ## w, NEED_DATABASE|NEED_REFERENCES|NEED_NO_PACKAGES #define A__T(w) action_t_n_n_ ## w, NEED_DATABASE|NEED_NO_PACKAGES|MAY_UNUSED #define A_RF(w) action_rf_n_n_ ## w, NEED_DATABASE|NEED_FILESDB|NEED_REFERENCES #define A_RFact(w) action_rf_y_n_ ## w, NEED_ACT|NEED_DATABASE|NEED_FILESDB|NEED_REFERENCES /* to dereference files, one needs files and references database: */ #define A_D(w) action_d_n_n_ ## w, NEED_DATABASE|NEED_FILESDB|NEED_REFERENCES|NEED_DEREF #define A_Dact(w) action_d_y_n_ ## w, NEED_ACT|NEED_DATABASE|NEED_FILESDB|NEED_REFERENCES|NEED_DEREF #define A_Dactsp(w) action_d_y_y_ ## w, NEED_ACT|NEED_SP|NEED_DATABASE|NEED_FILESDB|NEED_REFERENCES|NEED_DEREF static const struct action { const char *name; retvalue (*start)( /*@null@*/struct distribution *, /*@null@*/const char *priority, /*@null@*/const char *section, /*@null@*/const struct atomlist *, /*@null@*/const struct atomlist *, /*@null@*/const struct atomlist *, int argc, const char *argv[]); int needs; int minargs, maxargs; const char *wrongargmessage; } all_actions[] = { {"__d", A_N(printargs), -1, -1, NULL}, {"__dumpuncompressors", A_N(dumpuncompressors), 0, 0, "__dumpuncompressors"}, {"__uncompress", A_N(uncompress), 3, 3, "__uncompress .gz|.bz2|.lzma|.xz|.lz "}, {"__extractsourcesection", A_N(extractsourcesection), 1, 1, "__extractsourcesection <.dsc-file>"}, {"__extractcontrol", A_N(extractcontrol), 1, 1, "__extractcontrol <.deb-file>"}, {"__extractfilelist", A_N(extractfilelist), 1, 1, "__extractfilelist <.deb-file>"}, {"__checkuploaders", A_C(checkuploaders), 1, -1, "__checkuploaders "}, {"_versioncompare", A_N(versioncompare), 2, 2, "_versioncompare "}, {"_detect", A__F(detect), -1, -1, NULL}, {"_forget", A__F(forget), -1, -1, NULL}, {"_listmd5sums", A__F(listmd5sums), 0, 0, "_listmd5sums"}, {"_listchecksums", A__F(listchecksums), 0, 0, "_listchecksums"}, {"_addchecksums", A__F(addmd5sums), 0, 0, "_addchecksums < data"}, {"_addmd5sums", A__F(addmd5sums), 0, 0, "_addmd5sums < data"}, {"_dumpcontents", A_ROB(dumpcontents)|MAY_UNUSED, 1, 1, "_dumpcontents "}, {"_removereferences", A__R(removereferences), 1, 1, "_removereferences "}, {"_addreference", A__R(addreference), 2, 2, "_addreference "}, {"_fakeemptyfilelist", A__F(fakeemptyfilelist), 1, 1, "_fakeemptyfilelist "}, {"_addpackage", A_Dact(addpackage), 3, -1, "-C -A -T _addpackage "}, {"remove", A_Dact(remove), 2, -1, "[-C ] [-A ] [-T ] remove "}, {"removesrc", A_D(removesrc), 2, 3, "removesrc []"}, {"removesrcs", A_D(removesrcs), 2, -1, "removesrcs ([=])+"}, {"ls", A_ROBact(ls), 1, 1, "[-C ] [-A ] [-T ] ls "}, {"lsbycomponent", A_ROBact(lsbycomponent), 1, 1, "[-C ] [-A ] [-T ] lsbycomponent "}, {"list", A_ROBact(list), 1, 2, "[-C ] [-A ] [-T ] list []"}, {"listfilter", A_ROBact(listfilter), 2, 2, "[-C ] [-A ] [-T ] listfilter "}, {"removefilter", A_Dact(removefilter), 2, 2, "[-C ] [-A ] [-T ] removefilter "}, {"listmatched", A_ROBact(listmatched), 2, 2, "[-C ] [-A ] [-T ] listmatched "}, {"removematched", A_Dact(removematched), 2, 2, "[-C ] [-A ] [-T ] removematched "}, {"createsymlinks", A_C(createsymlinks), 0, -1, "createsymlinks []"}, {"export", A_F(export), 0, -1, "export []"}, {"check", A_RFact(check), 0, -1, "check []"}, {"sizes", A_RF(sizes), 0, -1, "check []"}, {"reoverride", A_Fact(reoverride), 0, -1, "[-T ...] [-C ...] [-A ...] reoverride []"}, {"repairdescriptions", A_Fact(repairdescriptions), 0, -1, "[-C ...] [-A ...] [force]repairdescriptions []"}, {"forcerepairdescriptions", A_Fact(repairdescriptions), 0, -1, "[-C ...] [-A ...] [force]repairdescriptions []"}, {"redochecksums", A_Fact(redochecksums), 0, -1, "[-T ...] [-C ...] [-A ...] redo []"}, {"collectnewchecksums", A_F(collectnewchecksums), 0, 0, "collectnewchecksums"}, {"checkpool", A_F(checkpool), 0, 1, "checkpool [fast]"}, {"rereference", A_R(rereference), 0, -1, "rereference []"}, {"dumpreferences", A_R(dumpreferences)|MAY_UNUSED, 0, 0, "dumpreferences", }, {"dumpunreferenced", A_RF(dumpunreferenced), 0, 0, "dumpunreferenced", }, {"deleteifunreferenced", A_RF(deleteifunreferenced), 0, -1, "deleteifunreferenced"}, {"deleteunreferenced", A_RF(deleteunreferenced), 0, 0, "deleteunreferenced", }, {"retrack", A_D(retrack), 0, -1, "retrack []"}, {"dumptracks", A_ROB(dumptracks)|MAY_UNUSED, 0, -1, "dumptracks []"}, {"removealltracks", A_D(removealltracks)|MAY_UNUSED, 1, -1, "removealltracks "}, {"tidytracks", A_D(tidytracks), 0, -1, "tidytracks []"}, {"removetrack", A_D(removetrack), 3, 3, "removetrack "}, {"update", A_Dact(update)|NEED_RESTRICT, 0, -1, "update []"}, {"checkupdate", A_Bact(checkupdate)|NEED_RESTRICT, 0, -1, "checkupdate []"}, {"dumpupdate", A_Bact(dumpupdate)|NEED_RESTRICT, 0, -1, "dumpupdate []"}, {"predelete", A_Dact(predelete), 0, -1, "predelete []"}, {"pull", A_Dact(pull)|NEED_RESTRICT, 0, -1, "pull []"}, {"copy", A_Dact(copy), 3, -1, "[-C ] [-A ] [-T ] copy "}, {"copysrc", A_Dact(copysrc), 3, -1, "[-C ] [-A ] [-T ] copysrc []"}, {"copymatched", A_Dact(copymatched), 3, 3, "[-C ] [-A ] [-T ] copymatched "}, {"copyfilter", A_Dact(copyfilter), 3, 3, "[-C ] [-A ] [-T ] copyfilter "}, {"restore", A_Dact(restore), 3, -1, "[-C ] [-A ] [-T ] restore "}, {"restoresrc", A_Dact(restoresrc), 3, -1, "[-C ] [-A ] [-T ] restoresrc []"}, {"restorematched", A_Dact(restorematched), 3, 3, "[-C ] [-A ] [-T ] restorematched "}, {"restorefilter", A_Dact(restorefilter), 3, 3, "[-C ] [-A ] [-T ] restorefilter "}, {"dumppull", A_Bact(dumppull)|NEED_RESTRICT, 0, -1, "dumppull []"}, {"checkpull", A_Bact(checkpull)|NEED_RESTRICT, 0, -1, "checkpull []"}, {"includedeb", A_Dactsp(includedeb)|NEED_DELNEW, 2, -1, "[--delete] includedeb <.deb-file>"}, {"includeudeb", A_Dactsp(includedeb)|NEED_DELNEW, 2, -1, "[--delete] includeudeb <.udeb-file>"}, {"includedsc", A_Dactsp(includedsc)|NEED_DELNEW, 2, 2, "[--delete] includedsc "}, {"include", A_Dactsp(include)|NEED_DELNEW, 2, 2, "[--delete] include <.changes-file>"}, {"generatefilelists", A_F(generatefilelists), 0, 1, "generatefilelists [reread]"}, {"translatefilelists", A__T(translatefilelists), 0, 0, "translatefilelists"}, {"translatelegacychecksums", A_N(translatelegacychecksums), 0, 0, "translatelegacychecksums"}, {"_listconfidentifiers", A_C(listconfidentifiers), 0, -1, "_listconfidentifiers"}, {"_listdbidentifiers", A_ROB(listdbidentifiers)|MAY_UNUSED, 0, -1, "_listdbidentifiers"}, {"clearvanished", A_D(clearvanished)|MAY_UNUSED, 0, 0, "[--delete] clearvanished"}, {"processincoming", A_D(processincoming)|NEED_DELNEW, 1, 2, "processincoming [<.changes file>]"}, {"gensnapshot", A_R(gensnapshot), 2, 2, "gensnapshot "}, {"rerunnotifiers", A_Bact(rerunnotifiers), 0, -1, "rerunnotifiers []"}, {"cleanlists", A_L(cleanlists), 0, 0, "cleanlists"}, {"build-needing", A_ROBact(buildneeded), 2, 3, "[-C ] build-needing []"}, {"flood", A_Dact(flood)|MAY_UNUSED, 1, 2, "[-C ] [-A ] [-T ] flood []"}, {"unusedsources", A_B(unusedsources), 0, -1, "unusedsources []"}, {"sourcemissing", A_B(sourcemissing), 0, -1, "sourcemissing []"}, {"reportcruft", A_B(reportcruft), 0, -1, "reportcruft []"}, {NULL, NULL , 0, 0, 0, NULL} }; #undef A_N #undef A_B #undef A_ROB #undef A_C #undef A_F #undef A_R #undef A_RF #undef A_F #undef A__T static retvalue callaction(command_t command, const struct action *action, int argc, const char *argv[]) { retvalue result, r; struct distribution *alldistributions = NULL; bool deletederef, deletenew; int needs; struct atomlist as, *architectures = NULL; struct atomlist cs, *components = NULL; struct atomlist ps, *packagetypes = NULL; assert(action != NULL); causingcommand = command; if (action->minargs >= 0 && argc < 1 + action->minargs) { fprintf(stderr, "Error: Too few arguments for command '%s'!\nSyntax: reprepro %s\n", argv[0], action->wrongargmessage); return RET_ERROR; } if (action->maxargs >= 0 && argc > 1 + action->maxargs) { fprintf(stderr, "Error: Too many arguments for command '%s'!\nSyntax: reprepro %s\n", argv[0], action->wrongargmessage); return RET_ERROR; } needs = action->needs; if (!ISSET(needs, NEED_ACT) && (x_architecture != NULL)) { if (!IGNORING(unusedoption, "Action '%s' cannot be restricted to an architecture!\n" "neither --archiecture nor -A make sense here.\n", action->name)) return RET_ERROR; } if (!ISSET(needs, NEED_ACT) && (x_component != NULL)) { if (!IGNORING(unusedoption, "Action '%s' cannot be restricted to a component!\n" "neither --component nor -C make sense here.\n", action->name)) return RET_ERROR; } if (!ISSET(needs, NEED_ACT) && (x_packagetype != NULL)) { if (!IGNORING(unusedoption, "Action '%s' cannot be restricted to a packagetype!\n" "neither --packagetype nor -T make sense here.\n", action->name)) return RET_ERROR; } if (!ISSET(needs, NEED_SP) && (x_section != NULL)) { if (!IGNORING(unusedoption, "Action '%s' cannot take a section option!\n" "neither --section nor -S make sense here.\n", action->name)) return RET_ERROR; } if (!ISSET(needs, NEED_SP) && (x_priority != NULL)) { if (!IGNORING(unusedoption, "Action '%s' cannot take a priority option!\n" "neither --priority nor -P make sense here.\n", action->name)) return RET_ERROR; } if (!ISSET(needs, NEED_RESTRICT) && (cmdline_bin_filter.set || cmdline_src_filter.set)) { if (!IGNORING(unusedoption, "Action '%s' cannot take a --restrict-* option!\n", action->name)) return RET_ERROR; } if (ISSET(needs, NEED_DATABASE)) needs |= NEED_CONFIG; if (ISSET(needs, NEED_CONFIG)) { r = distribution_readall(&alldistributions); if (RET_WAS_ERROR(r)) return r; } if (!ISSET(needs, NEED_DATABASE)) { assert ((needs & ~NEED_CONFIG) == 0); result = action->start(alldistributions, x_section, x_priority, atom_unknown, atom_unknown, atom_unknown, argc, argv); logger_wait(); if (!RET_WAS_ERROR(result)) { r = distribution_exportlist(export, alldistributions); RET_ENDUPDATE(result, r); } r = distribution_freelist(alldistributions); RET_ENDUPDATE(result, r); return result; } if (ISSET(needs, NEED_ACT)) { const char *unknownitem; if (x_architecture != NULL) { r = atomlist_filllist(at_architecture, &as, x_architecture, &unknownitem); if (r == RET_NOTHING) { fprintf(stderr, "Error: Architecture '%s' as given to --architecture is not know.\n" "(it does not appear as architecture in %s/distributions (did you mistype?))\n", unknownitem, global.confdir); r = RET_ERROR; } if (RET_WAS_ERROR(r)) { (void)distribution_freelist(alldistributions); return r; } architectures = &as; } else { atomlist_init(&as); } if (x_component != NULL) { r = atomlist_filllist(at_component, &cs, x_component, &unknownitem); if (r == RET_NOTHING) { fprintf(stderr, "Error: Component '%s' as given to --component is not know.\n" "(it does not appear as component in %s/distributions (did you mistype?))\n", unknownitem, global.confdir); r = RET_ERROR; } if (RET_WAS_ERROR(r)) { (void)distribution_freelist(alldistributions); return r; } components = &cs; } else { atomlist_init(&cs); } if (x_packagetype != NULL) { r = atomlist_filllist(at_packagetype, &ps, x_packagetype, &unknownitem); if (r == RET_NOTHING) { fprintf(stderr, "Error: Packagetype '%s' as given to --packagetype is not know.\n" "(only dsc, deb, udeb and combinations of those are allowed)\n", unknownitem); r = RET_ERROR; } if (RET_WAS_ERROR(r)) { (void)distribution_freelist(alldistributions); return r; } packagetypes = &ps; } else { atomlist_init(&ps); } if (ps.count == 1 && ps.atoms[0] == pt_dsc && limitations_missed(architectures, architecture_source)) { fprintf(stderr, "Error: -T dsc is not possible with -A not including source!\n"); return RET_ERROR; } if (as.count == 1 && as.atoms[0] == architecture_source && limitations_missed(packagetypes, pt_dsc)) { fprintf(stderr, "Error: -A source is not possible with -T not including dsc!\n"); return RET_ERROR; } } deletederef = ISSET(needs, NEED_DEREF) && !keepunreferenced; deletenew = ISSET(needs, NEED_DELNEW) && !keepunusednew; result = database_create(alldistributions, fast, ISSET(needs, NEED_NO_PACKAGES), ISSET(needs, MAY_UNUSED), ISSET(needs, IS_RO), waitforlock, verbosedatabase || (verbose >= 30)); if (!RET_IS_OK(result)) { (void)distribution_freelist(alldistributions); return result; } /* adding files may check references to see if they were added */ if (ISSET(needs, NEED_FILESDB)) needs |= NEED_REFERENCES; if (ISSET(needs, NEED_REFERENCES)) result = database_openreferences(); assert (result != RET_NOTHING); if (RET_IS_OK(result)) { if (ISSET(needs, NEED_FILESDB)) result = database_openfiles(); if (RET_IS_OK(result)) { if (outhook != NULL) { r = outhook_start(); RET_UPDATE(result, r); } } assert (result != RET_NOTHING); if (RET_IS_OK(result)) { if (deletederef) { assert (ISSET(needs, NEED_REFERENCES)); } if (!interrupted()) { result = action->start(alldistributions, x_section, x_priority, architectures, components, packagetypes, argc, argv); /* wait for package specific loggers */ logger_wait(); /* remove files added but not used */ pool_tidyadded(deletenew); /* tell an outhook about added files */ if (outhook != NULL) pool_sendnewfiles(); /* export changed/lookedat distributions */ if (!RET_WAS_ERROR(result)) { r = distribution_exportlist(export, alldistributions); RET_ENDUPDATE(result, r); } /* delete files losing references, or * tell how many lost their references */ // TODO: instead check if any distribution that // was not exported lost files // (and in a far future do not remove references // before the index is written) if (deletederef && RET_WAS_ERROR(result)) { deletederef = false; if (pool_havedereferenced) { fprintf(stderr, "Not deleting possibly left over files due to previous errors.\n" "(To keep the files in the still existing index files from vanishing)\n" "Use dumpunreferenced/deleteunreferenced to show/delete files without references.\n"); } } r = pool_removeunreferenced(deletederef); RET_ENDUPDATE(result, r); if (outhook != NULL) { if (interrupted()) r = RET_ERROR_INTERRUPTED; else r = outhook_call(outhook); RET_ENDUPDATE(result, r); } } } } if (!interrupted()) { logger_wait(); } if (ISSET(needs, NEED_ACT)) { atomlist_done(&as); atomlist_done(&cs); atomlist_done(&ps); } logger_warn_waiting(); r = database_close(); RET_ENDUPDATE(result, r); r = distribution_freelist(alldistributions); RET_ENDUPDATE(result, r); return result; } enum { LO_DELETE=1, LO_KEEPUNREFERENCED, LO_KEEPUNUSEDNEW, LO_KEEPUNNEEDEDLISTS, LO_NOTHINGISERROR, LO_NOLISTDOWNLOAD, LO_ASKPASSPHRASE, LO_ONLYSMALLDELETES, LO_KEEPDIRECTORIES, LO_KEEPTEMPORARIES, LO_FAST, LO_SKIPOLD, LO_GUESSGPGTTY, LO_NODELETE, LO_NOKEEPUNREFERENCED, LO_NOKEEPUNUSEDNEW, LO_NOKEEPUNNEEDEDLISTS, LO_NONOTHINGISERROR, LO_LISTDOWNLOAD, LO_NOASKPASSPHRASE, LO_NOONLYSMALLDELETES, LO_NOKEEPDIRECTORIES, LO_NOKEEPTEMPORARIES, LO_NOFAST, LO_NOSKIPOLD, LO_NOGUESSGPGTTY, LO_VERBOSEDB, LO_NOVERBOSEDB, LO_EXPORT, LO_OUTDIR, LO_DISTDIR, LO_DBDIR, LO_LOGDIR, LO_LISTDIR, LO_OVERRIDEDIR, LO_CONFDIR, LO_METHODDIR, LO_VERSION, LO_WAITFORLOCK, LO_SPACECHECK, LO_SAFETYMARGIN, LO_DBSAFETYMARGIN, LO_GUNZIP, LO_BUNZIP2, LO_UNLZMA, LO_UNXZ, LO_LZIP, LO_GNUPGHOME, LO_LISTFORMAT, LO_LISTSKIP, LO_LISTMAX, LO_MORGUEDIR, LO_SHOWPERCENT, LO_RESTRICT_BIN, LO_RESTRICT_SRC, LO_RESTRICT_FILE_BIN, LO_RESTRICT_FILE_SRC, LO_ENDHOOK, LO_OUTHOOK, LO_UNIGNORE}; static int longoption = 0; const char *programname; static void setexport(const char *argument) { if (strcasecmp(argument, "silent-never") == 0) { CONFIGSET(export, EXPORT_SILENT_NEVER); return; } if (strcasecmp(argument, "never") == 0) { CONFIGSET(export, EXPORT_NEVER); return; } if (strcasecmp(argument, "changed") == 0) { CONFIGSET(export, EXPORT_CHANGED); return; } if (strcasecmp(argument, "normal") == 0) { CONFIGSET(export, EXPORT_NORMAL); return; } if (strcasecmp(argument, "lookedat") == 0) { CONFIGSET(export, EXPORT_NORMAL); return; } if (strcasecmp(argument, "force") == 0) { CONFIGSET(export, EXPORT_FORCE); return; } fprintf(stderr, "Error: --export needs an argument of 'never', 'normal' or 'force', but got '%s'\n", argument); exit(EXIT_FAILURE); } static unsigned long long parse_number(const char *name, const char *argument, long long max) { long long l; char *p; l = strtoll(argument, &p, 10); if (p==NULL || *p != '\0' || l < 0) { fprintf(stderr, "Invalid argument to %s: '%s'\n", name, argument); exit(EXIT_FAILURE); } if (l == LLONG_MAX || l > max) { fprintf(stderr, "Too large argument for to %s: '%s'\n", name, argument); exit(EXIT_FAILURE); } return l; } static void handle_option(int c, const char *argument) { retvalue r; int i; switch (c) { case 'h': printf( "reprepro - Produce and Manage a Debian package repository\n\n" "options:\n" " -h, --help: Show this help\n" " -i --ignore : Ignore errors of type .\n" " --keepunreferencedfiles: Do not delete files no longer needed.\n" " --delete: Delete included files if reasonable.\n" " -b, --basedir : Base directory\n" " --outdir : Set pool and dists base directory\n" " --distdir : Override dists directory.\n" " --dbdir : Directory to place the database in.\n" " --listdir : Directory to place downloaded lists in.\n" " --confdir : Directory to search configuration in.\n" " --logdir : Directory to put requeted log files in.\n" " --methodir : Use instead of /usr/lib/apt/methods/\n" " -S, --section
: Force include* to set section.\n" " -P, --priority : Force include* to set priority.\n" " -C, --component : Add,list or delete only in component.\n" " -A, --architecture : Add,list or delete only to architecture.\n" " -T, --type : Add,list or delete only type (dsc,deb,udeb).\n" "\n" "actions (selection, for more see manpage):\n" " dumpreferences: Print all saved references\n" " dumpunreferenced: Print registered files without reference\n" " deleteunreferenced: Delete and forget all unreferenced files\n" " checkpool: Check if all files in the pool are still in proper shape.\n" " check []\n" " Check for all needed files to be registered properly.\n" " export []\n" " Force (re)generation of Packages.gz/Packages/Sources.gz/Release\n" " update []\n" " Update the given distributions from the configured sources.\n" " remove \n" " Remove the given package from the specified distribution.\n" " include <.changes-file>\n" " Include the given upload.\n" " includedeb <.deb-file>\n" " Include the given binary package.\n" " includeudeb <.udeb-file>\n" " Include the given installer binary package.\n" " includedsc <.dsc-file>\n" " Include the given source package.\n" " list \n" " List all packages by the given name occurring in the given distribution.\n" " listfilter \n" " List all packages in the given distribution matching the condition.\n" " clearvanished\n" " Remove everything no longer referenced in the distributions config file.\n" "\n"); exit(EXIT_SUCCESS); case '\0': switch (longoption) { case LO_UNIGNORE: r = set_ignore(argument, false, config_state); if (RET_WAS_ERROR(r)) { exit(EXIT_FAILURE); } break; case LO_SHOWPERCENT: global.showdownloadpercent++; break; case LO_DELETE: delete++; break; case LO_NODELETE: delete--; break; case LO_KEEPUNREFERENCED: CONFIGSET(keepunreferenced, true); break; case LO_NOKEEPUNREFERENCED: CONFIGSET(keepunreferenced, false); break; case LO_KEEPUNUSEDNEW: CONFIGSET(keepunusednew, true); break; case LO_NOKEEPUNUSEDNEW: CONFIGSET(keepunusednew, false); break; case LO_KEEPUNNEEDEDLISTS: /* this is the only option now and ignored * for compatibility reasond */ break; case LO_NOKEEPUNNEEDEDLISTS: fprintf(stderr, "Warning: --nokeepuneededlists no longer exists.\n" "Use cleanlists to clean manually.\n"); break; case LO_KEEPTEMPORARIES: CONFIGGSET(keeptemporaries, true); break; case LO_NOKEEPTEMPORARIES: CONFIGGSET(keeptemporaries, false); break; case LO_ONLYSMALLDELETES: CONFIGGSET(onlysmalldeletes, true); break; case LO_NOONLYSMALLDELETES: CONFIGGSET(onlysmalldeletes, false); break; case LO_KEEPDIRECTORIES: CONFIGGSET(keepdirectories, true); break; case LO_NOKEEPDIRECTORIES: CONFIGGSET(keepdirectories, false); break; case LO_NOTHINGISERROR: CONFIGSET(nothingiserror, true); break; case LO_NONOTHINGISERROR: CONFIGSET(nothingiserror, false); break; case LO_NOLISTDOWNLOAD: CONFIGSET(nolistsdownload, true); break; case LO_LISTDOWNLOAD: CONFIGSET(nolistsdownload, false); break; case LO_ASKPASSPHRASE: CONFIGSET(askforpassphrase, true); break; case LO_NOASKPASSPHRASE: CONFIGSET(askforpassphrase, false); break; case LO_GUESSGPGTTY: CONFIGSET(guessgpgtty, true); break; case LO_NOGUESSGPGTTY: CONFIGSET(guessgpgtty, false); break; case LO_SKIPOLD: CONFIGSET(skipold, true); break; case LO_NOSKIPOLD: CONFIGSET(skipold, false); break; case LO_FAST: CONFIGSET(fast, true); break; case LO_NOFAST: CONFIGSET(fast, false); break; case LO_VERBOSEDB: CONFIGSET(verbosedatabase, true); break; case LO_NOVERBOSEDB: CONFIGSET(verbosedatabase, false); break; case LO_EXPORT: setexport(argument); break; case LO_OUTDIR: CONFIGDUP(x_outdir, argument); break; case LO_DISTDIR: CONFIGDUP(x_distdir, argument); break; case LO_DBDIR: CONFIGDUP(x_dbdir, argument); break; case LO_LISTDIR: CONFIGDUP(x_listdir, argument); break; case LO_CONFDIR: CONFIGDUP(x_confdir, argument); break; case LO_LOGDIR: CONFIGDUP(x_logdir, argument); break; case LO_METHODDIR: CONFIGDUP(x_methoddir, argument); break; case LO_MORGUEDIR: CONFIGDUP(x_morguedir, argument); break; case LO_VERSION: fprintf(stderr, "%s: This is " PACKAGE " version " VERSION "\n", programname); exit(EXIT_SUCCESS); case LO_WAITFORLOCK: CONFIGSET(waitforlock, parse_number( "--waitforlock", argument, LONG_MAX)); break; case LO_SPACECHECK: if (strcasecmp(argument, "none") == 0) { CONFIGSET(spacecheckmode, scm_NONE); } else if (strcasecmp(argument, "full") == 0) { CONFIGSET(spacecheckmode, scm_FULL); } else { fprintf(stderr, "Unknown --spacecheck argument: '%s'!\n", argument); exit(EXIT_FAILURE); } break; case LO_SAFETYMARGIN: CONFIGSET(reservedotherspace, parse_number( "--safetymargin", argument, LONG_MAX)); break; case LO_DBSAFETYMARGIN: CONFIGSET(reserveddbspace, parse_number( "--dbsafetymargin", argument, LONG_MAX)); break; case LO_GUNZIP: CONFIGDUP(gunzip, argument); break; case LO_BUNZIP2: CONFIGDUP(bunzip2, argument); break; case LO_UNLZMA: CONFIGDUP(unlzma, argument); break; case LO_UNXZ: CONFIGDUP(unxz, argument); break; case LO_LZIP: CONFIGDUP(lunzip, argument); break; case LO_GNUPGHOME: CONFIGDUP(gnupghome, argument); break; case LO_ENDHOOK: CONFIGDUP(endhook, argument); break; case LO_OUTHOOK: CONFIGDUP(outhook, argument); break; case LO_LISTMAX: i = parse_number("--list-max", argument, INT_MAX); if (i == 0) i = -1; CONFIGSET(listmax, i); break; case LO_LISTSKIP: i = parse_number("--list-skip", argument, INT_MAX); CONFIGSET(listskip, i); break; case LO_LISTFORMAT: if (strcmp(argument, "NONE") == 0) { CONFIGSET(listformat, NULL); } else CONFIGDUP(listformat, argument); break; case LO_RESTRICT_BIN: r = filterlist_cmdline_add_pkg(false, argument); if (RET_WAS_ERROR(r)) exit(EXIT_FAILURE); break; case LO_RESTRICT_SRC: r = filterlist_cmdline_add_pkg(true, argument); if (RET_WAS_ERROR(r)) exit(EXIT_FAILURE); break; case LO_RESTRICT_FILE_BIN: r = filterlist_cmdline_add_file(false, argument); if (RET_WAS_ERROR(r)) exit(EXIT_FAILURE); break; case LO_RESTRICT_FILE_SRC: r = filterlist_cmdline_add_file(true, argument); if (RET_WAS_ERROR(r)) exit(EXIT_FAILURE); break; default: fputs( "Error parsing arguments!\n", stderr); exit(EXIT_FAILURE); } longoption = 0; break; case 's': verbose--; break; case 'v': verbose++; break; case 'V': verbose+=5; break; case 'f': fprintf(stderr, "Ignoring no longer existing option -f/--force!\n"); break; case 'b': CONFIGDUP(x_basedir, argument); break; case 'i': r = set_ignore(argument, true, config_state); if (RET_WAS_ERROR(r)) { exit(EXIT_FAILURE); } break; case 'C': if (x_component != NULL && strcmp(x_component, argument) != 0) { fprintf(stderr, "Multiple '-%c' are not supported!\n", 'C'); exit(EXIT_FAILURE); } CONFIGDUP(x_component, argument); break; case 'A': if (x_architecture != NULL && strcmp(x_architecture, argument) != 0) { fprintf(stderr, "Multiple '-%c' are not supported!\n", 'A'); exit(EXIT_FAILURE); } CONFIGDUP(x_architecture, argument); break; case 'T': if (x_packagetype != NULL && strcmp(x_packagetype, argument) != 0) { fprintf(stderr, "Multiple '-%c' are not supported!\n", 'T'); exit(EXIT_FAILURE); } CONFIGDUP(x_packagetype, argument); break; case 'S': if (x_section != NULL && strcmp(x_section, argument) != 0) { fprintf(stderr, "Multiple '-%c' are not supported!\n", 'S'); exit(EXIT_FAILURE); } CONFIGDUP(x_section, argument); break; case 'P': if (x_priority != NULL && strcmp(x_priority, argument) != 0) { fprintf(stderr, "Multiple '-%c' are not supported!\n", 'P'); exit(EXIT_FAILURE); } CONFIGDUP(x_priority, argument); break; case '?': /* getopt_long should have already given an error msg */ exit(EXIT_FAILURE); default: fprintf(stderr, "Not supported option '-%c'\n", c); exit(EXIT_FAILURE); } } static volatile bool was_interrupted = false; static bool interruption_printed = false; bool interrupted(void) { if (was_interrupted) { if (!interruption_printed) { interruption_printed = true; fprintf(stderr, "\n\nInterruption in progress, interrupt again to force-stop it (and risking database corruption!)\n\n"); } return true; } else return false; } static void interrupt_signaled(int) /*__attribute__((signal))*/; static void interrupt_signaled(UNUSED(int s)) { was_interrupted = true; } static void myexit(int) __attribute__((__noreturn__)); static void myexit(int status) { free(x_dbdir); free(x_distdir); free(x_listdir); free(x_logdir); free(x_confdir); free(x_basedir); free(x_outdir); free(x_methoddir); free(x_component); free(x_architecture); free(x_packagetype); free(x_section); free(x_priority); free(x_morguedir); free(gnupghome); free(endhook); free(outhook); pool_free(); exit(status); } static void disallow_plus_prefix(const char *dir, const char *name, const char *allowed) { if (dir[0] != '+') return; if (dir[1] == '\0' || dir[2] != '/') { fprintf(stderr, "Error: %s starts with +, but does not continue with '+b/'.\n", name); myexit(EXIT_FAILURE); } if (strchr(allowed, dir[1]) != NULL) return; fprintf(stderr, "Error: %s is not allowed to start with '+%c/'.\n" "(if your directory is named like that, set it to './+%c/')\n", name, dir[1], dir[1]); myexit(EXIT_FAILURE); } static char *expand_plus_prefix(/*@only@*/char *dir, const char *name, const char *allowed, bool freedir) { const char *fromdir; char *newdir; disallow_plus_prefix(dir, name, allowed); if (dir[0] == '/' || (dir[0] == '.' && dir[1] == '/')) return dir; if (dir[0] != '+') { fprintf(stderr, "Warning: %s '%s' does not start with '/', './', or '+'.\n" "This currently means it is relative to the current working directory,\n" "but that might change in the future or cause an error instead!\n", name, dir); return dir; } if (dir[1] == 'b') { fromdir = x_basedir; } else if (dir[1] == 'o') { fromdir = x_outdir; } else if (dir[1] == 'c') { fromdir = x_confdir; } else { abort(); return dir; } if (dir[3] == '\0') newdir = strdup(fromdir); else newdir = calc_dirconcat(fromdir, dir + 3); if (FAILEDTOALLOC(newdir)) { (void)fputs("Out of Memory!\n", stderr); exit(EXIT_FAILURE); } if (freedir) free(dir); return newdir; } static inline int callendhook(int status, char *argv[]) { char exitcode[4]; /* Try to close all open fd but 0,1,2 */ closefrom(3); if (snprintf(exitcode, 4, "%u", ((unsigned int)status)&255U) > 3) memcpy(exitcode, "255", 4); sethookenvironment(causingfile, NULL, NULL, exitcode); argv[0] = endhook, (void)execv(endhook, argv); fprintf(stderr, "Error executing '%s': %s\n", endhook, strerror(errno)); return EXIT_RET(RET_ERROR); } int main(int argc, char *argv[]) { static struct option longopts[] = { {"delete", no_argument, &longoption, LO_DELETE}, {"nodelete", no_argument, &longoption, LO_NODELETE}, {"basedir", required_argument, NULL, 'b'}, {"ignore", required_argument, NULL, 'i'}, {"unignore", required_argument, &longoption, LO_UNIGNORE}, {"noignore", required_argument, &longoption, LO_UNIGNORE}, {"methoddir", required_argument, &longoption, LO_METHODDIR}, {"outdir", required_argument, &longoption, LO_OUTDIR}, {"distdir", required_argument, &longoption, LO_DISTDIR}, {"dbdir", required_argument, &longoption, LO_DBDIR}, {"listdir", required_argument, &longoption, LO_LISTDIR}, {"confdir", required_argument, &longoption, LO_CONFDIR}, {"logdir", required_argument, &longoption, LO_LOGDIR}, {"section", required_argument, NULL, 'S'}, {"priority", required_argument, NULL, 'P'}, {"component", required_argument, NULL, 'C'}, {"architecture", required_argument, NULL, 'A'}, {"type", required_argument, NULL, 'T'}, {"help", no_argument, NULL, 'h'}, {"verbose", no_argument, NULL, 'v'}, {"silent", no_argument, NULL, 's'}, {"version", no_argument, &longoption, LO_VERSION}, {"nothingiserror", no_argument, &longoption, LO_NOTHINGISERROR}, {"nolistsdownload", no_argument, &longoption, LO_NOLISTDOWNLOAD}, {"keepunreferencedfiles", no_argument, &longoption, LO_KEEPUNREFERENCED}, {"keepunusednewfiles", no_argument, &longoption, LO_KEEPUNUSEDNEW}, {"keepunneededlists", no_argument, &longoption, LO_KEEPUNNEEDEDLISTS}, {"onlysmalldeletes", no_argument, &longoption, LO_ONLYSMALLDELETES}, {"keepdirectories", no_argument, &longoption, LO_KEEPDIRECTORIES}, {"keeptemporaries", no_argument, &longoption, LO_KEEPTEMPORARIES}, {"ask-passphrase", no_argument, &longoption, LO_ASKPASSPHRASE}, {"nonothingiserror", no_argument, &longoption, LO_NONOTHINGISERROR}, {"nonolistsdownload", no_argument, &longoption, LO_LISTDOWNLOAD}, {"listsdownload", no_argument, &longoption, LO_LISTDOWNLOAD}, {"nokeepunreferencedfiles", no_argument, &longoption, LO_NOKEEPUNREFERENCED}, {"nokeepunusednewfiles", no_argument, &longoption, LO_NOKEEPUNUSEDNEW}, {"nokeepunneededlists", no_argument, &longoption, LO_NOKEEPUNNEEDEDLISTS}, {"noonlysmalldeletes", no_argument, &longoption, LO_NOONLYSMALLDELETES}, {"nokeepdirectories", no_argument, &longoption, LO_NOKEEPDIRECTORIES}, {"nokeeptemporaries", no_argument, &longoption, LO_NOKEEPTEMPORARIES}, {"noask-passphrase", no_argument, &longoption, LO_NOASKPASSPHRASE}, {"guessgpgtty", no_argument, &longoption, LO_GUESSGPGTTY}, {"noguessgpgtty", no_argument, &longoption, LO_NOGUESSGPGTTY}, {"nonoguessgpgtty", no_argument, &longoption, LO_GUESSGPGTTY}, {"fast", no_argument, &longoption, LO_FAST}, {"nofast", no_argument, &longoption, LO_NOFAST}, {"verbosedb", no_argument, &longoption, LO_VERBOSEDB}, {"noverbosedb", no_argument, &longoption, LO_NOVERBOSEDB}, {"verbosedatabase", no_argument, &longoption, LO_VERBOSEDB}, {"noverbosedatabase", no_argument, &longoption, LO_NOVERBOSEDB}, {"skipold", no_argument, &longoption, LO_SKIPOLD}, {"noskipold", no_argument, &longoption, LO_NOSKIPOLD}, {"nonoskipold", no_argument, &longoption, LO_SKIPOLD}, {"force", no_argument, NULL, 'f'}, {"export", required_argument, &longoption, LO_EXPORT}, {"waitforlock", required_argument, &longoption, LO_WAITFORLOCK}, {"checkspace", required_argument, &longoption, LO_SPACECHECK}, {"spacecheck", required_argument, &longoption, LO_SPACECHECK}, {"safetymargin", required_argument, &longoption, LO_SAFETYMARGIN}, {"dbsafetymargin", required_argument, &longoption, LO_DBSAFETYMARGIN}, {"gunzip", required_argument, &longoption, LO_GUNZIP}, {"bunzip2", required_argument, &longoption, LO_BUNZIP2}, {"unlzma", required_argument, &longoption, LO_UNLZMA}, {"unxz", required_argument, &longoption, LO_UNXZ}, {"lunzip", required_argument, &longoption, LO_LZIP}, {"gnupghome", required_argument, &longoption, LO_GNUPGHOME}, {"list-format", required_argument, &longoption, LO_LISTFORMAT}, {"list-skip", required_argument, &longoption, LO_LISTSKIP}, {"list-max", required_argument, &longoption, LO_LISTMAX}, {"morguedir", required_argument, &longoption, LO_MORGUEDIR}, {"show-percent", no_argument, &longoption, LO_SHOWPERCENT}, {"restrict", required_argument, &longoption, LO_RESTRICT_SRC}, {"restrict-source", required_argument, &longoption, LO_RESTRICT_SRC}, {"restrict-src", required_argument, &longoption, LO_RESTRICT_SRC}, {"restrict-binary", required_argument, &longoption, LO_RESTRICT_BIN}, {"restrict-file", required_argument, &longoption, LO_RESTRICT_FILE_SRC}, {"restrict-file-source", required_argument, &longoption, LO_RESTRICT_FILE_SRC}, {"restrict-file-src", required_argument, &longoption, LO_RESTRICT_FILE_SRC}, {"restrict-file-binary", required_argument, &longoption, LO_RESTRICT_FILE_BIN}, {"endhook", required_argument, &longoption, LO_ENDHOOK}, {"outhook", required_argument, &longoption, LO_OUTHOOK}, {NULL, 0, NULL, 0} }; const struct action *a; retvalue r; int c; struct sigaction sa; char *tempconfdir; sigemptyset(&sa.sa_mask); #if defined(SA_ONESHOT) sa.sa_flags = SA_ONESHOT; #elif defined(SA_RESETHAND) sa.sa_flags = SA_RESETHAND; #elif !defined(SPLINT) # error "missing argument to sigaction!" #endif sa.sa_handler = interrupt_signaled; (void)sigaction(SIGTERM, &sa, NULL); (void)sigaction(SIGABRT, &sa, NULL); (void)sigaction(SIGINT, &sa, NULL); (void)sigaction(SIGQUIT, &sa, NULL); (void)signal(SIGPIPE, SIG_IGN); programname = argv[0]; config_state = CONFIG_OWNER_DEFAULT; CONFIGDUP(x_basedir, STD_BASE_DIR); CONFIGDUP(x_confdir, "+b/conf"); CONFIGDUP(x_methoddir, STD_METHOD_DIR); CONFIGDUP(x_outdir, "+b/"); CONFIGDUP(x_distdir, "+o/dists"); CONFIGDUP(x_dbdir, "+b/db"); CONFIGDUP(x_logdir, "+b/logs"); CONFIGDUP(x_listdir, "+b/lists"); config_state = CONFIG_OWNER_CMDLINE; if (interrupted()) exit(EXIT_RET(RET_ERROR_INTERRUPTED)); while ((c = getopt_long(argc, argv, "+fVvshb:P:i:A:C:S:T:", longopts, NULL)) != -1) { handle_option(c, optarg); } if (optind >= argc) { fputs( "No action given. (see --help for available options and actions)\n", stderr); exit(EXIT_FAILURE); } if (interrupted()) exit(EXIT_RET(RET_ERROR_INTERRUPTED)); /* only for this CONFIG_OWNER_ENVIRONMENT is a bit stupid, * but perhaps it gets more... */ config_state = CONFIG_OWNER_ENVIRONMENT; if (getenv("REPREPRO_BASE_DIR") != NULL) { CONFIGDUP(x_basedir, getenv("REPREPRO_BASE_DIR")); } if (getenv("REPREPRO_CONFIG_DIR") != NULL) { CONFIGDUP(x_confdir, getenv("REPREPRO_CONFIG_DIR")); } disallow_plus_prefix(x_basedir, "basedir", ""); tempconfdir = expand_plus_prefix(x_confdir, "confdir", "b", false); config_state = CONFIG_OWNER_FILE; optionsfile_parse(tempconfdir, longopts, handle_option); if (tempconfdir != x_confdir) free(tempconfdir); disallow_plus_prefix(x_basedir, "basedir", ""); disallow_plus_prefix(x_methoddir, "methoddir", ""); x_confdir = expand_plus_prefix(x_confdir, "confdir", "b", true); x_outdir = expand_plus_prefix(x_outdir, "outdir", "bc", true); x_logdir = expand_plus_prefix(x_logdir, "logdir", "boc", true); x_dbdir = expand_plus_prefix(x_dbdir, "dbdir", "boc", true); x_distdir = expand_plus_prefix(x_distdir, "distdir", "boc", true); x_listdir = expand_plus_prefix(x_listdir, "listdir", "boc", true); if (x_morguedir != NULL) x_morguedir = expand_plus_prefix(x_morguedir, "morguedir", "boc", true); if (endhook != NULL) { if (endhook[0] == '+' || endhook[0] == '/' || (endhook[0] == '.' && endhook[1] == '/')) { endhook = expand_plus_prefix(endhook, "endhook", "boc", true); } else { char *h; h = calc_dirconcat(x_confdir, endhook); free(endhook); endhook = h; if (endhook == NULL) exit(EXIT_RET(RET_ERROR_OOM)); } } if (outhook != NULL) { if (outhook[0] == '+' || outhook[0] == '/' || (outhook[0] == '.' && outhook[1] == '/')) { outhook = expand_plus_prefix(outhook, "outhook", "boc", true); } else { char *h; h = calc_dirconcat(x_confdir, outhook); free(outhook); outhook = h; if (outhook == NULL) exit(EXIT_RET(RET_ERROR_OOM)); } } if (guessgpgtty && (getenv("GPG_TTY")==NULL) && isatty(0)) { static char terminalname[1024]; ssize_t len; len = readlink("/proc/self/fd/0", terminalname, 1023); if (len > 0 && len < 1024) { terminalname[len] = '\0'; setenv("GPG_TTY", terminalname, 0); } else if (verbose > 10) { fprintf(stderr, "Could not readlink /proc/self/fd/0 (error was %s), not setting GPG_TTY.\n", strerror(errno)); } } if (delete < D_COPY) delete = D_COPY; if (interrupted()) exit(EXIT_RET(RET_ERROR_INTERRUPTED)); global.basedir = x_basedir; global.dbdir = x_dbdir; global.outdir = x_outdir; global.confdir = x_confdir; global.distdir = x_distdir; global.logdir = x_logdir; global.methoddir = x_methoddir; global.listdir = x_listdir; global.morguedir = x_morguedir; if (gunzip != NULL && gunzip[0] == '+') gunzip = expand_plus_prefix(gunzip, "gunzip", "boc", true); if (bunzip2 != NULL && bunzip2[0] == '+') bunzip2 = expand_plus_prefix(bunzip2, "bunzip2", "boc", true); if (unlzma != NULL && unlzma[0] == '+') unlzma = expand_plus_prefix(unlzma, "unlzma", "boc", true); if (unxz != NULL && unxz[0] == '+') unxz = expand_plus_prefix(unxz, "unxz", "boc", true); if (lunzip != NULL && lunzip[0] == '+') lunzip = expand_plus_prefix(lunzip, "lunzip", "boc", true); uncompressions_check(gunzip, bunzip2, unlzma, unxz, lunzip); free(gunzip); free(bunzip2); free(unlzma); free(unxz); free(lunzip); a = all_actions; while (a->name != NULL) { a++; } r = atoms_init(a - all_actions); if (r == RET_ERROR_OOM) (void)fputs("Out of Memory!\n", stderr); if (RET_WAS_ERROR(r)) exit(EXIT_RET(r)); for (a = all_actions; a->name != NULL ; a++) { atoms_commands[1 + (a - all_actions)] = a->name; } if (gnupghome != NULL) { gnupghome = expand_plus_prefix(gnupghome, "gnupghome", "boc", true); if (setenv("GNUPGHOME", gnupghome, 1) != 0) { int e = errno; fprintf(stderr, "Error %d setting GNUPGHOME to '%s': %s\n", e, gnupghome, strerror(e)); myexit(EXIT_FAILURE); } } a = all_actions; while (a->name != NULL) { if (strcasecmp(a->name, argv[optind]) == 0) { signature_init(askforpassphrase); r = callaction(1 + (a - all_actions), a, argc-optind, (const char**)argv+optind); /* yeah, freeing all this stuff before exiting is * stupid, but it makes valgrind logs easier * readable */ signatures_done(); free_known_keys(); if (RET_WAS_ERROR(r)) { if (r == RET_ERROR_OOM) (void)fputs("Out of Memory!\n", stderr); else if (verbose >= 0) (void)fputs( "There have been errors!\n", stderr); } if (endhook != NULL) { assert (optind > 0); /* only returns upon error: */ r = callendhook(EXIT_RET(r), argv + optind - 1); } myexit(EXIT_RET(r)); } else a++; } fprintf(stderr, "Unknown action '%s'. (see --help for available options and actions)\n", argv[optind]); signatures_done(); myexit(EXIT_FAILURE); } reprepro-4.13.1/upgradelist.h0000644000175100017510000000430212152651661013061 00000000000000#ifndef REPREPRO_UPGRADELIST_H #define REPREPRO_UPGRADELIST_H /* Things for making decisions what to upgrade and what not */ typedef enum { UD_ERROR, UD_LOUDNO, UD_NO, UD_UPGRADE, UD_HOLD, UD_SUPERSEDE } upgrade_decision; typedef upgrade_decision upgrade_decide_function(void *privdata, const struct target *, const char *package, const char *source, const char *old_version, const char *new_version, const char *new_src_version, const char *newcontrolchunk); /* The main part: */ struct target; struct logger; struct upgradelist; retvalue upgradelist_initialize(struct upgradelist **, /*@dependent@*/struct target *); void upgradelist_free(/*@only@*/struct upgradelist *); typedef void dumpaction(const char */*packagename*/, /*@null@*/const char */*oldversion*/, /*@null@*/const char */*newversion*/, /*@null@*/const char */*bestcandidate*/, /*@null@*/const struct strlist */*newfilekeys*/, /*@null@*/const char */*newcontrol*/, void *); void upgradelist_dump(struct upgradelist *, dumpaction *); /* Take all items in 'filename' into account, and remember them coming from 'method' */ retvalue upgradelist_update(struct upgradelist *, /*@dependent@*/void *, const char * /*filename*/, upgrade_decide_function *, void *, bool /*ignorewrongarchitecture*/); /* Take all items in source into account */ retvalue upgradelist_pull(struct upgradelist *, struct target *, upgrade_decide_function *, void *, void *); /* mark all packages as deleted, so they will vanis unless readded or reholded */ retvalue upgradelist_deleteall(struct upgradelist *); typedef retvalue enqueueaction(void *, const struct checksumsarray *, const struct strlist *, void *); /* request all wanted files refering the methods given before */ retvalue upgradelist_enqueue(struct upgradelist *, enqueueaction *, void *); bool upgradelist_isbigdelete(const struct upgradelist *); bool upgradelist_woulddelete(const struct upgradelist *); retvalue upgradelist_install(struct upgradelist *, /*@null@*/struct logger *, bool /*ignoredelete*/, void (*)(void *, const char **, const char **)); /* remove all packages that would either be removed or upgraded by an upgrade */ retvalue upgradelist_predelete(struct upgradelist *, /*@null@*/struct logger *); #endif reprepro-4.13.1/Makefile.in0000644000175100017510000011567512152655330012447 00000000000000# Makefile.in generated by automake 1.11.6 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software # Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__make_dryrun = \ { \ am__dry=no; \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ echo 'am--echo: ; @echo "AM" OK' | $(MAKE) -f - 2>/dev/null \ | grep '^AM OK$$' >/dev/null || am__dry=yes;; \ *) \ for am__flg in $$MAKEFLAGS; do \ case $$am__flg in \ *=*|--*) ;; \ *n*) am__dry=yes; break;; \ esac; \ done;; \ esac; \ test $$am__dry = yes; \ } pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : bin_PROGRAMS = reprepro$(EXEEXT) changestool$(EXEEXT) \ rredtool$(EXEEXT) subdir = . DIST_COMMON = README $(am__configure_deps) $(noinst_HEADERS) \ $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ $(srcdir)/config.h.in $(top_srcdir)/configure AUTHORS COPYING \ ChangeLog INSTALL NEWS TODO ac/depcomp ac/install-sh \ ac/missing ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/acinclude.m4 \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ configure.lineno config.status.lineno mkinstalldirs = $(install_sh) -d CONFIG_HEADER = config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__installdirs = "$(DESTDIR)$(bindir)" PROGRAMS = $(bin_PROGRAMS) am__changestool_SOURCES_DIST = uncompression.c sourceextraction.c \ readtextfile.c filecntl.c tool.c chunkedit.c strlist.c \ checksums.c sha1.c sha256.c md5.c mprintf.c chunks.c \ signature.c dirs.c names.c extractcontrol.c ar.c debfile.c @HAVE_LIBARCHIVE_FALSE@am__objects_1 = extractcontrol.$(OBJEXT) @HAVE_LIBARCHIVE_TRUE@am__objects_1 = ar.$(OBJEXT) debfile.$(OBJEXT) am_changestool_OBJECTS = uncompression.$(OBJEXT) \ sourceextraction.$(OBJEXT) readtextfile.$(OBJEXT) \ filecntl.$(OBJEXT) tool.$(OBJEXT) chunkedit.$(OBJEXT) \ strlist.$(OBJEXT) checksums.$(OBJEXT) sha1.$(OBJEXT) \ sha256.$(OBJEXT) md5.$(OBJEXT) mprintf.$(OBJEXT) \ chunks.$(OBJEXT) signature.$(OBJEXT) dirs.$(OBJEXT) \ names.$(OBJEXT) $(am__objects_1) changestool_OBJECTS = $(am_changestool_OBJECTS) am__DEPENDENCIES_1 = changestool_DEPENDENCIES = $(am__DEPENDENCIES_1) am__reprepro_SOURCES_DIST = outhook.c descriptions.c sizes.c \ sourcecheck.c byhandhook.c archallflood.c needbuild.c \ globmatch.c printlistformat.c diffindex.c rredpatch.c pool.c \ atoms.c uncompression.c remoterepository.c indexfile.c \ copypackages.c sourceextraction.c checksums.c readtextfile.c \ filecntl.c sha1.c sha256.c configparser.c database.c \ freespace.c hooks.c log.c changes.c incoming.c uploaderslist.c \ guesscomponent.c files.c md5.c dirs.c chunks.c reference.c \ binaries.c sources.c checks.c names.c dpkgversions.c release.c \ mprintf.c updates.c strlist.c signature_check.c signedfile.c \ signature.c distribution.c checkindeb.c checkindsc.c checkin.c \ upgradelist.c target.c aptmethod.c downloadcache.c main.c \ override.c terms.c termdecide.c ignore.c filterlist.c \ exports.c tracking.c optionsfile.c donefile.c pull.c \ contents.c filelist.c extractcontrol.c ar.c debfile.c \ debfilecontents.c @HAVE_LIBARCHIVE_TRUE@am__objects_2 = debfilecontents.$(OBJEXT) am_reprepro_OBJECTS = outhook.$(OBJEXT) descriptions.$(OBJEXT) \ sizes.$(OBJEXT) sourcecheck.$(OBJEXT) byhandhook.$(OBJEXT) \ archallflood.$(OBJEXT) needbuild.$(OBJEXT) globmatch.$(OBJEXT) \ printlistformat.$(OBJEXT) diffindex.$(OBJEXT) \ rredpatch.$(OBJEXT) pool.$(OBJEXT) atoms.$(OBJEXT) \ uncompression.$(OBJEXT) remoterepository.$(OBJEXT) \ indexfile.$(OBJEXT) copypackages.$(OBJEXT) \ sourceextraction.$(OBJEXT) checksums.$(OBJEXT) \ readtextfile.$(OBJEXT) filecntl.$(OBJEXT) sha1.$(OBJEXT) \ sha256.$(OBJEXT) configparser.$(OBJEXT) database.$(OBJEXT) \ freespace.$(OBJEXT) hooks.$(OBJEXT) log.$(OBJEXT) \ changes.$(OBJEXT) incoming.$(OBJEXT) uploaderslist.$(OBJEXT) \ guesscomponent.$(OBJEXT) files.$(OBJEXT) md5.$(OBJEXT) \ dirs.$(OBJEXT) chunks.$(OBJEXT) reference.$(OBJEXT) \ binaries.$(OBJEXT) sources.$(OBJEXT) checks.$(OBJEXT) \ names.$(OBJEXT) dpkgversions.$(OBJEXT) release.$(OBJEXT) \ mprintf.$(OBJEXT) updates.$(OBJEXT) strlist.$(OBJEXT) \ signature_check.$(OBJEXT) signedfile.$(OBJEXT) \ signature.$(OBJEXT) distribution.$(OBJEXT) \ checkindeb.$(OBJEXT) checkindsc.$(OBJEXT) checkin.$(OBJEXT) \ upgradelist.$(OBJEXT) target.$(OBJEXT) aptmethod.$(OBJEXT) \ downloadcache.$(OBJEXT) main.$(OBJEXT) override.$(OBJEXT) \ terms.$(OBJEXT) termdecide.$(OBJEXT) ignore.$(OBJEXT) \ filterlist.$(OBJEXT) exports.$(OBJEXT) tracking.$(OBJEXT) \ optionsfile.$(OBJEXT) donefile.$(OBJEXT) pull.$(OBJEXT) \ contents.$(OBJEXT) filelist.$(OBJEXT) $(am__objects_1) \ $(am__objects_2) reprepro_OBJECTS = $(am_reprepro_OBJECTS) reprepro_DEPENDENCIES = $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am_rredtool_OBJECTS = rredtool.$(OBJEXT) rredpatch.$(OBJEXT) \ mprintf.$(OBJEXT) filecntl.$(OBJEXT) sha1.$(OBJEXT) rredtool_OBJECTS = $(am_rredtool_OBJECTS) rredtool_LDADD = $(LDADD) DEFAULT_INCLUDES = -I.@am__isrc@ depcomp = $(SHELL) $(top_srcdir)/ac/depcomp am__depfiles_maybe = depfiles am__mv = mv -f COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) CCLD = $(CC) LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ SOURCES = $(changestool_SOURCES) $(reprepro_SOURCES) \ $(rredtool_SOURCES) DIST_SOURCES = $(am__changestool_SOURCES_DIST) \ $(am__reprepro_SOURCES_DIST) $(rredtool_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ html-recursive info-recursive install-data-recursive \ install-dvi-recursive install-exec-recursive \ install-html-recursive install-info-recursive \ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac HEADERS = $(noinst_HEADERS) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ distdir dist dist-all distcheck ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) distdir = $(PACKAGE)-$(VERSION) top_distdir = $(distdir) am__remove_distdir = \ if test -d "$(distdir)"; then \ find "$(distdir)" -type d ! -perm -200 -exec chmod u+w {} ';' \ && rm -rf "$(distdir)" \ || { sleep 5 && rm -rf "$(distdir)"; }; \ else :; fi am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" DIST_ARCHIVES = $(distdir).tar.gz GZIP_ENV = --best distuninstallcheck_listfiles = find . -type f -print am__distuninstallcheck_listfiles = $(distuninstallcheck_listfiles) \ | sed 's|^\./|$(prefix)/|' | grep -v '$(infodir)/dir$$' distcleancheck_listfiles = find . -type f -print ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ ARCHIVECPP = @ARCHIVECPP@ ARCHIVELIBS = @ARCHIVELIBS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CYGPATH_W = @CYGPATH_W@ DBLIBS = @DBLIBS@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ LDFLAGS = @LDFLAGS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LTLIBOBJS = @LTLIBOBJS@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ STRIP = @STRIP@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bindir = @bindir@ build_alias = @build_alias@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host_alias = @host_alias@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ SUBDIRS = docs tests EXTRA_DIST = autogen.sh @HAVE_LIBARCHIVE_FALSE@ARCHIVE_USED = extractcontrol.c @HAVE_LIBARCHIVE_TRUE@ARCHIVE_USED = ar.c debfile.c @HAVE_LIBARCHIVE_FALSE@ARCHIVE_CONTENTS = @HAVE_LIBARCHIVE_TRUE@ARCHIVE_CONTENTS = debfilecontents.c @HAVE_LIBARCHIVE_FALSE@ARCHIVE_UNUSED = ar.c debfile.c debfilecontents.c @HAVE_LIBARCHIVE_TRUE@ARCHIVE_UNUSED = extractcontrol.c AM_CPPFLAGS = $(ARCHIVECPP) $(DBCPPFLAGS) reprepro_LDADD = $(ARCHIVELIBS) $(DBLIBS) changestool_LDADD = $(ARCHIVELIBS) reprepro_SOURCES = outhook.c descriptions.c sizes.c sourcecheck.c byhandhook.c archallflood.c needbuild.c globmatch.c printlistformat.c diffindex.c rredpatch.c pool.c atoms.c uncompression.c remoterepository.c indexfile.c copypackages.c sourceextraction.c checksums.c readtextfile.c filecntl.c sha1.c sha256.c configparser.c database.c freespace.c hooks.c log.c changes.c incoming.c uploaderslist.c guesscomponent.c files.c md5.c dirs.c chunks.c reference.c binaries.c sources.c checks.c names.c dpkgversions.c release.c mprintf.c updates.c strlist.c signature_check.c signedfile.c signature.c distribution.c checkindeb.c checkindsc.c checkin.c upgradelist.c target.c aptmethod.c downloadcache.c main.c override.c terms.c termdecide.c ignore.c filterlist.c exports.c tracking.c optionsfile.c donefile.c pull.c contents.c filelist.c $(ARCHIVE_USED) $(ARCHIVE_CONTENTS) EXTRA_reprepro_SOURCE = $(ARCHIVE_UNUSED) changestool_SOURCES = uncompression.c sourceextraction.c readtextfile.c filecntl.c tool.c chunkedit.c strlist.c checksums.c sha1.c sha256.c md5.c mprintf.c chunks.c signature.c dirs.c names.c $(ARCHIVE_USED) rredtool_SOURCES = rredtool.c rredpatch.c mprintf.c filecntl.c sha1.c noinst_HEADERS = outhook.h descriptions.h sizes.h sourcecheck.h byhandhook.h archallflood.h needbuild.h globmatch.h printlistformat.h pool.h atoms.h uncompression.h remoterepository.h copypackages.h sourceextraction.h checksums.h readtextfile.h filecntl.h sha1.h sha256.h configparser.h database_p.h database.h freespace.h hooks.h log.h changes.h incoming.h guesscomponent.h md5.h dirs.h files.h chunks.h reference.h binaries.h sources.h checks.h names.h release.h error.h mprintf.h updates.h strlist.h signature.h signature_p.h distribution.h debfile.h checkindeb.h checkindsc.h upgradelist.h target.h aptmethod.h downloadcache.h override.h terms.h termdecide.h ignore.h filterlist.h dpkgversions.h checkin.h exports.h globals.h tracking.h trackingt.h optionsfile.h donefile.h pull.h ar.h filelist.h contents.h chunkedit.h uploaderslist.h indexfile.h rredpatch.h diffindex.h MAINTAINERCLEANFILES = $(srcdir)/Makefile.in $(srcdir)/configure $(srcdir)/stamp-h.in $(srcdir)/aclocal.m4 $(srcdir)/config.h.in SPLINT = splint SPLITFLAGSFORVIM = -linelen 10000 -locindentspaces 0 SPLINTFLAGS = +posixlib -booltype bool -numabstractcast -fixedformalarray -enumint +enumindex +charint $(SPLITFLAGSFORVIM) $(EXTRASPLINTFLAGS) all: config.h $(MAKE) $(AM_MAKEFLAGS) all-recursive .SUFFIXES: .SUFFIXES: .c .o .obj am--refresh: Makefile @: $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ echo ' cd $(srcdir) && $(AUTOMAKE) --gnu'; \ $(am__cd) $(srcdir) && $(AUTOMAKE) --gnu \ && exit 0; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --gnu Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ echo ' $(SHELL) ./config.status'; \ $(SHELL) ./config.status;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) $(SHELL) ./config.status --recheck $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) $(am__cd) $(srcdir) && $(AUTOCONF) $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) $(am__cd) $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS) $(am__aclocal_m4_deps): config.h: stamp-h1 @if test ! -f $@; then rm -f stamp-h1; else :; fi @if test ! -f $@; then $(MAKE) $(AM_MAKEFLAGS) stamp-h1; else :; fi stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status @rm -f stamp-h1 cd $(top_builddir) && $(SHELL) ./config.status config.h $(srcdir)/config.h.in: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) ($(am__cd) $(top_srcdir) && $(AUTOHEADER)) rm -f stamp-h1 touch $@ distclean-hdr: -rm -f config.h stamp-h1 install-binPROGRAMS: $(bin_PROGRAMS) @$(NORMAL_INSTALL) @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(bindir)'"; \ $(MKDIR_P) "$(DESTDIR)$(bindir)" || exit 1; \ fi; \ for p in $$list; do echo "$$p $$p"; done | \ sed 's/$(EXEEXT)$$//' | \ while read p p1; do if test -f $$p; \ then echo "$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n;h' -e 's|.*|.|' \ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) files[d] = files[d] " " $$1; \ else { print "f", $$3 "/" $$4, $$1; } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_PROGRAM_ENV) $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \ $(INSTALL_PROGRAM_ENV) $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \ } \ ; done uninstall-binPROGRAMS: @$(NORMAL_UNINSTALL) @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ -e 's/$$/$(EXEEXT)/' `; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(bindir)" && rm -f $$files clean-binPROGRAMS: -test -z "$(bin_PROGRAMS)" || rm -f $(bin_PROGRAMS) changestool$(EXEEXT): $(changestool_OBJECTS) $(changestool_DEPENDENCIES) $(EXTRA_changestool_DEPENDENCIES) @rm -f changestool$(EXEEXT) $(LINK) $(changestool_OBJECTS) $(changestool_LDADD) $(LIBS) reprepro$(EXEEXT): $(reprepro_OBJECTS) $(reprepro_DEPENDENCIES) $(EXTRA_reprepro_DEPENDENCIES) @rm -f reprepro$(EXEEXT) $(LINK) $(reprepro_OBJECTS) $(reprepro_LDADD) $(LIBS) rredtool$(EXEEXT): $(rredtool_OBJECTS) $(rredtool_DEPENDENCIES) $(EXTRA_rredtool_DEPENDENCIES) @rm -f rredtool$(EXEEXT) $(LINK) $(rredtool_OBJECTS) $(rredtool_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/aptmethod.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ar.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/archallflood.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/atoms.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/binaries.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/byhandhook.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/changes.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/checkin.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/checkindeb.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/checkindsc.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/checks.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/checksums.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/chunkedit.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/chunks.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/configparser.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/contents.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/copypackages.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/database.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/debfile.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/debfilecontents.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/descriptions.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/diffindex.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/dirs.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/distribution.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/donefile.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/downloadcache.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/dpkgversions.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/exports.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/extractcontrol.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/filecntl.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/filelist.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/files.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/filterlist.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/freespace.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/globmatch.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/guesscomponent.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hooks.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ignore.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/incoming.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/indexfile.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/log.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/main.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/md5.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mprintf.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/names.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/needbuild.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/optionsfile.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/outhook.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/override.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pool.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/printlistformat.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pull.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/readtextfile.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/reference.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/release.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/remoterepository.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/rredpatch.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/rredtool.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sha1.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sha256.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/signature.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/signature_check.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/signedfile.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sizes.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sourcecheck.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sourceextraction.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sources.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/strlist.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/target.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/termdecide.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/terms.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tool.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tracking.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/uncompression.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/updates.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/upgradelist.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/uploaderslist.Po@am__quote@ .c.o: @am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(COMPILE) -c $< .c.obj: @am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'` # This directory's subdirectories are mostly independent; you can cd # into them and run `make' without going through this Makefile. # To change the values of `make' variables: instead of editing Makefiles, # (1) if the variable is set in `config.status', edit `config.status' # (which will cause the Makefiles to be regenerated when you run `make'); # (2) otherwise, pass the desired values on the `make' command line. $(RECURSIVE_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ list='$(SUBDIRS)'; for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" $(RECURSIVE_CLEAN_TARGETS): @fail= failcom='exit 1'; \ for f in x $$MAKEFLAGS; do \ case $$f in \ *=* | --[!k]*);; \ *k*) failcom='fail=yes';; \ esac; \ done; \ dot_seen=no; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ rev=''; for subdir in $$list; do \ if test "$$subdir" = "."; then :; else \ rev="$$subdir $$rev"; \ fi; \ done; \ rev="$$rev ."; \ target=`echo $@ | sed s/-recursive//`; \ for subdir in $$rev; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done && test -z "$$fail" tags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ done ctags-recursive: list='$(SUBDIRS)'; for subdir in $$list; do \ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ done ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS TAGS: tags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) $(am__remove_distdir) test -d "$(distdir)" || mkdir "$(distdir)" @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ $(am__make_dryrun) \ || test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done -test -n "$(am__skip_mode_fix)" \ || find "$(distdir)" -type d ! -perm -755 \ -exec chmod u+rwx,go+rx {} \; -o \ ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \ ! -type d ! -perm -400 -exec chmod a+r {} \; -o \ ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \ || chmod -R a+r "$(distdir)" dist-gzip: distdir tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz $(am__remove_distdir) dist-bzip2: distdir tardir=$(distdir) && $(am__tar) | BZIP2=$${BZIP2--9} bzip2 -c >$(distdir).tar.bz2 $(am__remove_distdir) dist-lzip: distdir tardir=$(distdir) && $(am__tar) | lzip -c $${LZIP_OPT--9} >$(distdir).tar.lz $(am__remove_distdir) dist-lzma: distdir tardir=$(distdir) && $(am__tar) | lzma -9 -c >$(distdir).tar.lzma $(am__remove_distdir) dist-xz: distdir tardir=$(distdir) && $(am__tar) | XZ_OPT=$${XZ_OPT--e} xz -c >$(distdir).tar.xz $(am__remove_distdir) dist-tarZ: distdir tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z $(am__remove_distdir) dist-shar: distdir shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz $(am__remove_distdir) dist-zip: distdir -rm -f $(distdir).zip zip -rq $(distdir).zip $(distdir) $(am__remove_distdir) dist dist-all: distdir tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz $(am__remove_distdir) # This target untars the dist file and tries a VPATH configuration. Then # it guarantees that the distribution is self-contained by making another # tarfile. distcheck: dist case '$(DIST_ARCHIVES)' in \ *.tar.gz*) \ GZIP=$(GZIP_ENV) gzip -dc $(distdir).tar.gz | $(am__untar) ;;\ *.tar.bz2*) \ bzip2 -dc $(distdir).tar.bz2 | $(am__untar) ;;\ *.tar.lzma*) \ lzma -dc $(distdir).tar.lzma | $(am__untar) ;;\ *.tar.lz*) \ lzip -dc $(distdir).tar.lz | $(am__untar) ;;\ *.tar.xz*) \ xz -dc $(distdir).tar.xz | $(am__untar) ;;\ *.tar.Z*) \ uncompress -c $(distdir).tar.Z | $(am__untar) ;;\ *.shar.gz*) \ GZIP=$(GZIP_ENV) gzip -dc $(distdir).shar.gz | unshar ;;\ *.zip*) \ unzip $(distdir).zip ;;\ esac chmod -R a-w $(distdir); chmod u+w $(distdir) mkdir $(distdir)/_build mkdir $(distdir)/_inst chmod a-w $(distdir) test -d $(distdir)/_build || exit 0; \ dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \ && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \ && am__cwd=`pwd` \ && $(am__cd) $(distdir)/_build \ && ../configure --srcdir=.. --prefix="$$dc_install_base" \ $(AM_DISTCHECK_CONFIGURE_FLAGS) \ $(DISTCHECK_CONFIGURE_FLAGS) \ && $(MAKE) $(AM_MAKEFLAGS) \ && $(MAKE) $(AM_MAKEFLAGS) dvi \ && $(MAKE) $(AM_MAKEFLAGS) check \ && $(MAKE) $(AM_MAKEFLAGS) install \ && $(MAKE) $(AM_MAKEFLAGS) installcheck \ && $(MAKE) $(AM_MAKEFLAGS) uninstall \ && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \ distuninstallcheck \ && chmod -R a-w "$$dc_install_base" \ && ({ \ (cd ../.. && umask 077 && mkdir "$$dc_destdir") \ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \ distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \ } || { rm -rf "$$dc_destdir"; exit 1; }) \ && rm -rf "$$dc_destdir" \ && $(MAKE) $(AM_MAKEFLAGS) dist \ && rm -rf $(DIST_ARCHIVES) \ && $(MAKE) $(AM_MAKEFLAGS) distcleancheck \ && cd "$$am__cwd" \ || exit 1 $(am__remove_distdir) @(echo "$(distdir) archives ready for distribution: "; \ list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \ sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x' distuninstallcheck: @test -n '$(distuninstallcheck_dir)' || { \ echo 'ERROR: trying to run $@ with an empty' \ '$$(distuninstallcheck_dir)' >&2; \ exit 1; \ }; \ $(am__cd) '$(distuninstallcheck_dir)' || { \ echo 'ERROR: cannot chdir into $(distuninstallcheck_dir)' >&2; \ exit 1; \ }; \ test `$(am__distuninstallcheck_listfiles) | wc -l` -eq 0 \ || { echo "ERROR: files left after uninstall:" ; \ if test -n "$(DESTDIR)"; then \ echo " (check DESTDIR support)"; \ fi ; \ $(distuninstallcheck_listfiles) ; \ exit 1; } >&2 distcleancheck: distclean @if test '$(srcdir)' = . ; then \ echo "ERROR: distcleancheck can only run from a VPATH build" ; \ exit 1 ; \ fi @test `$(distcleancheck_listfiles) | wc -l` -eq 0 \ || { echo "ERROR: files left in build directory after distclean:" ; \ $(distcleancheck_listfiles) ; \ exit 1; } >&2 check-am: all-am check: check-recursive all-am: Makefile $(PROGRAMS) $(HEADERS) config.h installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(bindir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) clean: clean-recursive clean-am: clean-binPROGRAMS clean-generic clean-local mostlyclean-am distclean: distclean-recursive -rm -f $(am__CONFIG_DISTCLEAN_FILES) -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-hdr distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-binPROGRAMS install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f $(am__CONFIG_DISTCLEAN_FILES) -rm -rf $(top_srcdir)/autom4te.cache -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic \ maintainer-clean-local mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-binPROGRAMS .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) all \ ctags-recursive install-am install-strip tags-recursive .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am am--refresh check check-am clean clean-binPROGRAMS \ clean-generic clean-local ctags ctags-recursive dist dist-all \ dist-bzip2 dist-gzip dist-lzip dist-lzma dist-shar dist-tarZ \ dist-xz dist-zip distcheck distclean distclean-compile \ distclean-generic distclean-hdr distclean-tags distcleancheck \ distdir distuninstallcheck dvi dvi-am html html-am info \ info-am install install-am install-binPROGRAMS install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic maintainer-clean-local mostlyclean \ mostlyclean-compile mostlyclean-generic pdf pdf-am ps ps-am \ tags tags-recursive uninstall uninstall-am \ uninstall-binPROGRAMS clean-local: -rm -rf autom4te.cache $(srcdir)/autom4te.cache maintainer-clean-local: -rm -rf $(srcdir)/ac # Some things for my private laziness strictbooleancheck: /home/brl/gcc/b/gcc/cc1 -DHAVE_CONFIG_H -I/home/brl/gcc/b/gcc/include -I/usr/include -I. -Wall -DAVOID_CHECKPROBLEMS=1 -g -W -O2 *.c splint: $(SPLINT) -DSPLINT=1 $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) -D_GNU_SOURCE=1 $(SPLINTFLAGS) $(foreach file,$(reprepro_SOURCES),$(srcdir)/$(file)) # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: reprepro-4.13.1/log.h0000644000175100017510000000255612152651661011330 00000000000000#ifndef REPREPRO_LOG_H #define REPREPRO_LOG_H #ifndef REPREPRO_STRLIST_H #include "strlist.h" #endif #ifndef REPREPRO_HOOKS_H #include "hooks.h" #endif struct target; struct logger; retvalue logger_init(struct configiterator *, /*@out@*/struct logger **); void logger_free(/*@only@*/struct logger *); retvalue logger_prepare(struct logger *logger); bool logger_isprepared(/*@null@*/const struct logger *logger); void logger_logchanges(struct logger *, const char * /*codename*/, const char * /*name*/, const char * /*version*/, const char * /*data*/, const char * /*safefilename*/, /*@null@*/const char * /*changesfilekey*/); void logger_log(struct logger *, struct target *, const char * /*name*/, /*@null@*/const char * /*version*/, /*@null@*/const char */*oldversion*/, /*@null@*/const char * /*control*/, /*@null@*/const char * /*oldcontrol*/, /*@null@*/const struct strlist * /*filekeys*/, /*@null@*/const struct strlist * /*oldfilekeys*/, /*@null@*/const char * /*causingrule*/, /*@null@*/const char * /*suitefrom*/); bool logger_rerun_needs_target(const struct logger *, const struct target *); retvalue logger_reruninfo(struct logger *, struct target *, const char * /*name*/, const char * /*version*/, const char * /*control*/, /*@null@*/const struct strlist * /*filekeys*/); /* wait for all jobs to finish */ void logger_wait(void); void logger_warn_waiting(void); #endif reprepro-4.13.1/chunks.c0000644000175100017510000004425412152651661012036 00000000000000/* This file is part of "reprepro" * Copyright (C) 2003,2004,2005,2007 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include "error.h" #include "chunks.h" #include "names.h" /* point to a specified field in a chunk */ static const char *chunk_getfield(const char *name, const char *chunk) { size_t l; if (chunk == NULL) return NULL; l = strlen(name); while (*chunk != '\0') { if (strncasecmp(name, chunk, l) == 0 && chunk[l] == ':') { chunk += l+1; return chunk; } while (*chunk != '\n' && *chunk != '\0') chunk++; if (*chunk == '\0') return NULL; chunk++; } return NULL; } /* get the content of the given field, including all following lines, in a format * that may be put into chunk_replacefields static retvalue chunk_getcontent(const char *chunk, const char *name, char **value) { const char *field; char *val; const char *b, *e; assert(value != NULL); field = chunk_getfield(name, chunk); if (field == NULL) return RET_NOTHING; b = field; * jump over spaces at the beginning * if (xisspace(*b)) b++; * search for the end * e = b; do { while (*e != '\n' && *e != '\0') e++; if (*e != '\0') e++; } while (*e != ' ' && *e != '\t' && *e != '\0'); if (e > b && *e == '\0') e--; * remove trailing newline * if (e > b && *e == '\n') e--; if (e > b) val = strndup(b, e - b + 1); else val = strdup(""); if (FAILEDTOALLOC(val)) return RET_ERROR_OOM; *value = val; return RET_OK; } */ /* look for name in chunk. returns RET_NOTHING if not found */ retvalue chunk_getvalue(const char *chunk, const char *name, char **value) { const char *field; char *val; const char *b, *e; assert(value != NULL); field = chunk_getfield(name, chunk); if (field == NULL) return RET_NOTHING; b = field; /* jump over spaces at the beginning */ if (xisspace(*b)) b++; /* search for the end */ e = b; while (*e != '\n' && *e != '\0') e++; /* remove trailing spaces */ while (e > b && xisspace(*e)) e--; if (!xisspace(*e)) val = strndup(b, e - b + 1); else val = strdup(""); if (FAILEDTOALLOC(val)) return RET_ERROR_OOM; *value = val; return RET_OK; } retvalue chunk_getextralinelist(const char *chunk, const char *name, struct strlist *strlist) { retvalue r; const char *f, *b, *e; char *v; f = chunk_getfield(name, chunk); if (f == NULL) return RET_NOTHING; strlist_init(strlist); /* walk over the first line */ while (*f != '\0' && *f != '\n') f++; /* nothing there is an emtpy list */ if (*f == '\0') return RET_OK; f++; /* while lines begin with ' ' or '\t', add them */ while (*f == ' ' || *f == '\t') { while (*f != '\0' && xisblank(*f)) f++; b = f; while (*f != '\0' && *f != '\n') f++; e = f; while (e > b && *e != '\0' && xisspace(*e)) e--; if (!xisspace(*e)) v = strndup(b, e - b + 1); else v = strdup(""); if (FAILEDTOALLOC(v)) { strlist_done(strlist); return RET_ERROR_OOM; } r = strlist_add(strlist, v); if (!RET_IS_OK(r)) { strlist_done(strlist); return r; } if (*f == '\0') return RET_OK; f++; } return RET_OK; } retvalue chunk_getwholedata(const char *chunk, const char *name, char **value) { const char *f, *p, *e; bool afternewline = false; char *v; f = chunk_getfield(name, chunk); if (f == NULL) return RET_NOTHING; while (*f == ' ') f++; for (e = p = f ; *p != '\0' ; p++) { if (afternewline) { if (*p == ' ' || *p == '\t') afternewline = false; else if (*p != '\r') break; } else { if (*p == '\n') { e = p; afternewline = true; } } } if (!afternewline && *p == '\0') e = p; v = strndup(f, e - f); if (FAILEDTOALLOC(v)) return RET_ERROR_OOM; *value = v; return RET_OK; } retvalue chunk_getwordlist(const char *chunk, const char *name, struct strlist *strlist) { retvalue r; const char *f, *b; char *v; f = chunk_getfield(name, chunk); if (f == NULL) return RET_NOTHING; strlist_init(strlist); while (*f != '\0') { /* walk over spaces */ while (*f != '\0' && xisspace(*f)) { if (*f == '\n') { f++; if (*f != ' ' && *f != '\t') return RET_OK; } else f++; } if (*f == '\0') return RET_OK; b = f; /* search for end of word */ while (*f != '\0' && !xisspace(*f)) f++; v = strndup(b, f - b); if (FAILEDTOALLOC(v)) { strlist_done(strlist); return RET_ERROR_OOM; } r = strlist_add(strlist, v); if (!RET_IS_OK(r)) { strlist_done(strlist); return r; } } return RET_OK; } retvalue chunk_getuniqwordlist(const char *chunk, const char *name, struct strlist *strlist) { retvalue r; const char *f, *b; char *v; f = chunk_getfield(name, chunk); if (f == NULL) return RET_NOTHING; strlist_init(strlist); while (*f != '\0') { /* walk over spaces */ while (*f != '\0' && xisspace(*f)) { if (*f == '\n') { f++; if (*f != ' ' && *f != '\t') return RET_OK; } else f++; } if (*f == '\0') return RET_OK; b = f; /* search for end of word */ while (*f != '\0' && !xisspace(*f)) f++; v = strndup(b, f - b); if (FAILEDTOALLOC(v)) { strlist_done(strlist); return RET_ERROR_OOM; } r = strlist_adduniq(strlist, v); if (!RET_IS_OK(r)) { strlist_done(strlist); return r; } } return RET_OK; } retvalue chunk_gettruth(const char *chunk, const char *name) { const char *field; field = chunk_getfield(name, chunk); if (field == NULL) return RET_NOTHING; while (*field == ' ' || *field == '\t') field++; if ((field[0] == 'f' || field[0] == 'F') && (field[1] == 'a' || field[1] == 'A') && (field[2] == 'l' || field[2] == 'L') && (field[3] == 's' || field[3] == 'S') && (field[4] == 'e' || field[4] == 'E')) { return RET_NOTHING; } if ((field[0] == 'n' || field[0] == 'N') && (field[1] == 'o' || field[1] == 'O')) { return RET_NOTHING; } // TODO: strict check? return RET_OK; } /* return RET_OK, if field is found, RET_NOTHING, if not */ retvalue chunk_checkfield(const char *chunk, const char *name){ const char *field; field = chunk_getfield(name, chunk); if (field == NULL) return RET_NOTHING; return RET_OK; } /* Parse a package/source-field: ' *value( ?\(version\))? *' */ retvalue chunk_getname(const char *chunk, const char *name, char **pkgname, bool allowversion) { const char *field, *name_end, *p; field = chunk_getfield(name, chunk); if (field == NULL) return RET_NOTHING; while (*field != '\0' && *field != '\n' && xisspace(*field)) field++; name_end = field; /* this has now checked somewhere else for correctness and * is only a pure separation process: * (as package(version) is possible, '(' must be checked) */ while (*name_end != '\0' && *name_end != '\n' && *name_end != '(' && !xisspace(*name_end)) name_end++; p = name_end; while (*p != '\0' && *p != '\n' && xisspace(*p)) p++; if (name_end == field || (*p != '\0' && *p != '\n' && (!allowversion || *p != '('))) { if (*field == '\n' || *field == '\0') { fprintf(stderr, "Error: Field '%s' is empty!\n", name); } else { fprintf(stderr, "Error: Field '%s' contains unexpected character '%c'!\n", name, *p); } return RET_ERROR; } if (*p == '(') { while (*p != '\0' && *p != '\n' && *p != ')') // TODO: perhaps check for wellformed version p++; if (*p != ')') { fprintf(stderr, "Error: Field '%s' misses closing parenthesis!\n", name); return RET_ERROR; } p++; } while (*p != '\0' && *p != '\n' && xisspace(*p)) p++; if (*p != '\0' && *p != '\n') { fprintf(stderr, "Error: Field '%s' contains trailing junk starting with '%c'!\n", name, *p); return RET_ERROR; } *pkgname = strndup(field, name_end - field); if (FAILEDTOALLOC(*pkgname)) return RET_ERROR_OOM; return RET_OK; } /* Parse a package/source-field: ' *value( ?\(version\))? *' */ retvalue chunk_getnameandversion(const char *chunk, const char *name, char **pkgname, char **version) { const char *field, *name_end, *p; char *v; field = chunk_getfield(name, chunk); if (field == NULL) return RET_NOTHING; while (*field != '\0' && *field != '\n' && xisspace(*field)) field++; name_end = field; /* this has now checked somewhere else for correctness and * is only a pure separation process: * (as package(version) is possible, '(' must be checked) */ while (*name_end != '\0' && *name_end != '\n' && *name_end != '(' && !xisspace(*name_end)) name_end++; p = name_end; while (*p != '\0' && *p != '\n' && xisspace(*p)) p++; if (name_end == field || (*p != '\0' && *p != '\n' && *p != '(')) { if (*field == '\n' || *field == '\0') { fprintf(stderr, "Error: Field '%s' is empty!\n", name); } else { fprintf(stderr, "Error: Field '%s' contains unexpected character '%c'!\n", name, *p); } return RET_ERROR; } if (*p == '(') { const char *version_begin; p++; while (*p != '\0' && *p != '\n' && xisspace(*p)) p++; version_begin = p; while (*p != '\0' && *p != '\n' && *p != ')' && !xisspace(*p)) // TODO: perhaps check for wellformed version p++; v = strndup(version_begin, p - version_begin); if (FAILEDTOALLOC(v)) return RET_ERROR_OOM; while (*p != '\0' && *p != '\n' && *p != ')' && xisspace(*p)) p++; if (*p != ')') { free(v); if (*p == '\0' || *p == '\n') fprintf(stderr, "Error: Field '%s' misses closing parenthesis!\n", name); else fprintf(stderr, "Error: Field '%s' has multipe words after '('!\n", name); return RET_ERROR; } p++; } else { v = NULL; } while (*p != '\0' && *p != '\n' && xisspace(*p)) p++; if (*p != '\0' && *p != '\n') { free(v); fprintf(stderr, "Error: Field '%s' contains trailing junk starting with '%c'!\n", name, *p); return RET_ERROR; } *pkgname = strndup(field, name_end - field); if (FAILEDTOALLOC(*pkgname)) { free(v); return RET_ERROR_OOM; } *version = v; return RET_OK; } /* Add this the to before field, * replacing older fields of this name, if they are already there. */ char *chunk_replacefields(const char *chunk, const struct fieldtoadd *toadd, const char *beforethis, bool maybemissing) { const char *c, *ce; char *newchunk, *n; size_t size, len_beforethis; const struct fieldtoadd *f; retvalue result; bool fieldsadded = false; assert (chunk != NULL && beforethis != NULL); if (toadd == NULL) return NULL; c = chunk; /* calculate the maximal size we might end up with */ size = 2 + strlen(c); f = toadd; while (f != NULL) { if (f->data != NULL) size += 3 + f->len_field + f->len_data; f = f->next; } newchunk = n = malloc(size); if (FAILEDTOALLOC(n)) return NULL; len_beforethis = strlen(beforethis); result = RET_NOTHING; do { /* are we at the place to add the fields yet? */ if (!fieldsadded && strncasecmp(c, beforethis, len_beforethis) == 0 && c[len_beforethis] == ':') { /* add them now: */ f = toadd; while (f != NULL) { if (f->data != NULL) { memcpy(n, f->field, f->len_field); n += f->len_field; *n = ':'; n++; *n = ' '; n++; memcpy(n, f->data, f->len_data); n += f->len_data; *n = '\n'; n++; } f = f->next; } result = RET_OK; fieldsadded = true; } /* is this one of the fields we added/will add? */ f = toadd; while (f != NULL) { if (strncasecmp(c, f->field, f->len_field) == 0 && c[f->len_field] == ':') break; f = f->next; } /* search the end of the field */ ce = c; do { while (*ce != '\n' && *ce != '\0') ce++; if (*ce == '\0') break; ce++; } while (*ce == ' ' || *ce == '\t'); /* copy it, if it is not to be ignored */ if (f == NULL && ce-c > 0) { memcpy(n, c, ce -c); n += ce-c; } /* and proceed with the next */ c = ce; } while (*c != '\0' && *c != '\n'); if (n > newchunk && *(n-1) != '\n') *(n++) = '\n'; if (maybemissing && !fieldsadded) { /* add them now, if they are allowed to come later */ f = toadd; while (f != NULL) { if (f->data != NULL) { memcpy(n, f->field, f->len_field); n += f->len_field; *n = ':'; n++; *n = ' '; n++; memcpy(n, f->data, f->len_data); n += f->len_data; *n = '\n'; n++; } f = f->next; } result = RET_OK; fieldsadded = true; } *n = '\0'; assert (n-newchunk < 0 || (size_t)(n-newchunk) <= size-1); if (result == RET_NOTHING) { fprintf(stderr, "Could not find field '%s' in chunk '%s'!!!\n", beforethis, chunk); assert(false); } return newchunk; } struct fieldtoadd *aodfield_new(const char *field, const char *data, struct fieldtoadd *next) { struct fieldtoadd *n; assert(field != NULL); n = NEW(struct fieldtoadd); if (FAILEDTOALLOC(n)) { addfield_free(next); return NULL; } n->field = field; n->len_field = strlen(field); n->data = data; if (data != NULL) n->len_data = strlen(data); else n->len_data = 0; n->next = next; return n; } struct fieldtoadd *addfield_new(const char *field, const char *data, struct fieldtoadd *next) { struct fieldtoadd *n; assert(field != NULL && data != NULL); n = NEW(struct fieldtoadd); if (FAILEDTOALLOC(n)) { addfield_free(next); return NULL; } n->field = field; n->len_field = strlen(field); n->data = data; n->len_data = strlen(data); n->next = next; return n; } struct fieldtoadd *deletefield_new(const char *field, struct fieldtoadd *next) { struct fieldtoadd *n; assert(field != NULL); n = NEW(struct fieldtoadd); if (FAILEDTOALLOC(n)) { addfield_free(next); return NULL; } n->field = field; n->len_field = strlen(field); n->data = NULL; n->len_data = 0; n->next = next; return n; } struct fieldtoadd *addfield_newn(const char *field, const char *data, size_t len, struct fieldtoadd *next) { struct fieldtoadd *n; n = NEW(struct fieldtoadd); if (FAILEDTOALLOC(n)) { addfield_free(next); return NULL; } n->field = field; n->len_field = strlen(field); n->data = data; n->len_data = len; n->next = next; return n; } void addfield_free(struct fieldtoadd *f) { struct fieldtoadd *g; while (f != NULL) { g = f->next; free(f); f = g; } } char *chunk_replacefield(const char *chunk, const char *fieldname, const char *data, bool maybemissing) { struct fieldtoadd toadd; toadd.field = fieldname; toadd.len_field = strlen(fieldname); toadd.data = data; toadd.len_data = strlen(data); toadd.next = NULL; return chunk_replacefields(chunk, &toadd, fieldname, maybemissing); } /* Add field as first field with value data, and remove * all other fields of that name (and of name alsoremove if that is != NULL), */ char *chunk_normalize(const char *chunk, const char *firstfieldname, const char *data) { const char *c, *ce; char *newchunk, *n; size_t size; size_t data_len, field_len; assert (chunk != NULL && firstfieldname != NULL && data != NULL); data_len = strlen(data); field_len = strlen(firstfieldname); c = chunk; /* calculate the maximal size we might end up with */ size = 2 + strlen(c) + 3 + data_len + field_len; newchunk = n = malloc(size); if (FAILEDTOALLOC(n)) return NULL; memcpy(n, firstfieldname, field_len); n += field_len; *(n++) = ':'; *(n++) = ' '; memcpy(n, data, data_len); n += data_len; *(n++) = '\n'; do { bool toremove; if (strncasecmp(c, firstfieldname, field_len) == 0 && c[field_len] == ':') toremove = true; else toremove = false; /* search the end of the field */ ce = c; do { while (*ce != '\n' && *ce != '\0') ce++; if (*ce == '\0') break; ce++; } while (*ce == ' ' || *ce == '\t'); /* copy it, if it is not to be ignored */ if (!toremove && ce-c > 0) { memcpy(n, c, ce-c); n += ce-c; } /* and proceed with the next */ c = ce; } while (*c != '\0' && *c != '\n'); if (n > newchunk && *(n-1) != '\n') *(n++) = '\n'; *n = '\0'; return newchunk; } const char *chunk_getstart(const char *start, size_t len, bool commentsallowed) { const char *s, *l; s = start; l = start + len; while (s < l && (*s == ' ' || *s == '\t' || *s == '\r' || *s =='\n')) s++; /* ignore leading comments (even full paragraphs of them) */ while (commentsallowed && s < l && *s == '#') { while (s < l && *s != '\n') s++; while (s < l && (*s == ' ' || *s == '\t' || *s == '\r' || *s =='\n')) s++; } return s; } const char *chunk_over(const char *e) { while (*e != '\0') { if (*(e++) == '\n') { while (*e =='\r') e++; if (*e == '\n') return e+1; } } return e; } /* this is a bit wastefull, as with normaly perfect formated input, it just * writes everything to itself in a inefficent way. But when there are \r * in it or spaces before it or stuff like that, it will be in perfect * form afterwards. */ /* Write the first chunk found in the first len bytes after start * to buffer and set next to the next data found after it. * buffer can be a different buffer may be the buffer start is in * (as long as start is bigger than buffer). * buffer must be big enough to store up to len+1 bytes */ size_t chunk_extract(char *buffer, const char *start, size_t len, bool commentsallowed, const char **next) { const char *e, *n, *l; char *p; p = buffer; l = start + len; e = chunk_getstart(start, len, commentsallowed); n = NULL; while (e < l && *e != '\0') { if (*e == '\r') { e++; } else if (*e == '\n') { *(p++) = *(e++); n = e; while (n < l && *n =='\r') n++; if (n < l && *n == '\n') break; e = n; n = NULL; } else { *(p++) = *(e++); } } if (n == NULL) { n = e; assert (n == l || *n == '\0'); assert ((p - buffer) <= (n - start)); *p = '\0'; } else { assert (n < l && *n == '\n'); n++; assert (p - buffer < n - start); *p = '\0'; while (n < l && (*n == '\n' || *n =='\r')) n++; } *next = n; return p - buffer; } reprepro-4.13.1/AUTHORS0000644000175100017510000000004512152651661011435 00000000000000Bernhard R. Link reprepro-4.13.1/binaries.h0000644000175100017510000000413212152651661012333 00000000000000#ifndef REPREPRO_BINARIES_H #define REPREPRO_BINARIES_H #ifndef REPREPRO_ERROR_H #include "error.h" #warning "What's hapening here?" #endif #ifndef REPREPRO_TARGET_H #include "target.h" #endif #ifndef REPREPRO_CHECKSUMS_H #include "checksums.h" #endif /* Functions for the target.h-stuff: */ get_version binaries_getversion; get_installdata binaries_getinstalldata; get_architecture binaries_getarchitecture; get_filekeys binaries_getfilekeys; get_checksums binaries_getchecksums; do_reoverride binaries_doreoverride; do_reoverride ubinaries_doreoverride; do_retrack binaries_retrack; get_sourceandversion binaries_getsourceandversion; complete_checksums binaries_complete_checksums; /* Functions for checkindeb.c and incoming.c: */ struct deb_headers { char *name, *version; char *source; architecture_t architecture; char *control; /* only extracted when requested: */ /*@null@*/char *sourceversion; /* optional fields: */ /*@null@*/char *section; /*@null@*/char *priority; }; /* read contents of filename into deb_headers. * - does not follow retvalue conventions, some fields may be set even when * error returned * - no checks for sanity of values, left to the caller */ retvalue binaries_readdeb(struct deb_headers *, const char *filename, bool /*needssourceversion*/); void binaries_debdone(struct deb_headers *); retvalue binaries_calcfilekeys(component_t, const struct deb_headers *, packagetype_t, /*@out@*/struct strlist *); struct overridedata; retvalue binaries_complete(const struct deb_headers *, const char * /*filekey*/, const struct checksums *, const struct overridedata *, const char * /*section*/, const char * /*priority*/, char **/*newcontrol_p*/); retvalue binaries_adddeb(const struct deb_headers *, const struct atomlist */*forcedarchitectures*/, packagetype_t, struct distribution *, /*@null@*/struct trackingdata *, component_t, const struct strlist */*filekeys*/, const char */*control*/); retvalue binaries_checkadddeb(const struct deb_headers *, architecture_t /*forcearchitecture*/, packagetype_t, struct distribution *, bool tracking, component_t, bool /*permitnewerold*/); #endif reprepro-4.13.1/checkindeb.h0000644000175100017510000000306612152651661012623 00000000000000#ifndef REPREPRO_CHECKINDEB_H #define REPREPRO_CHECKINDEB_H #ifndef REPREPRO_ERROR_H #include "error.h" #warning "What's hapening here?" #endif #ifndef REPREPRO_DISTRIBUTION_H #include "distribution.h" #endif #ifndef REPREPRO_DATABASE_H #include "database.h" #endif /* insert the given .deb into the mirror in in the * putting things with architecture of "all" into (and also * causing error, if it is not one of them otherwise) * if overwrite is not NULL, it will be search for fields to reset for this * package. (forcesection and forcepriority have higher priority than the * information there), */ retvalue deb_add(component_t, const struct atomlist * /*forcearchitectures*/, /*@null@*/const char * /*forcesection*/, /*@null@*/const char * /*forcepriority*/, packagetype_t, struct distribution *, const char * /*debfilename*/, int /*delete*/, /*@null@*/trackingdb); /* in two steps */ struct debpackage; retvalue deb_addprepared(const struct debpackage *, const struct atomlist * /*forcearchitectures*/, packagetype_t, struct distribution *, struct trackingdata *); retvalue deb_prepare(/*@out@*/struct debpackage **, component_t, architecture_t /*forcearchitectures*/, const char * /*forcesection*/, const char * /*forcepriority*/, packagetype_t, struct distribution *, const char * /*debfilename*/, const char * const /*filekey*/, const struct checksums *, const struct strlist * /*allowed_binaries*/, const char * /*expectedsourcename*/, const char * /*expectedsourceversion*/); void deb_free(/*@only@*/struct debpackage *); #endif reprepro-4.13.1/strlist.c0000644000175100017510000001425512152651661012245 00000000000000/* This file is part of "reprepro" * Copyright (C) 2003,2005,2007 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include "error.h" #include "strlist.h" bool strlist_in(const struct strlist *strlist, const char *element) { int c; char **t; assert(strlist != NULL); c = strlist->count; t = strlist->values; while (c-- != 0) { if (strcmp(*(t++), element) == 0) return true; } return false; } int strlist_ofs(const struct strlist *strlist, const char *element) { int c; char **t; assert(strlist != NULL); c = strlist->count; t = strlist->values; while (c-- != 0) { if (strcmp(*(t++), element) == 0) return (t-strlist->values)-1; } return -1; } bool strlist_subset(const struct strlist *strlist, const struct strlist *subset, const char **missing) { int c; char **t; assert(subset != NULL); c = subset->count; t = subset->values; while (c-- != 0) { if (!strlist_in(strlist, *(t++))) { if (missing != NULL) *missing = *(t-1); return false; } } return true; } retvalue strlist_init_n(int startsize, struct strlist *strlist) { assert(strlist != NULL && startsize >= 0); if (startsize == 0) startsize = 1; strlist->count = 0; strlist->size = startsize; if (startsize > 0) { strlist->values = malloc(startsize*sizeof(char *)); if (FAILEDTOALLOC(strlist->values)) return RET_ERROR_OOM; } else { strlist->values = NULL; } return RET_OK; } retvalue strlist_init_singleton(char *value, struct strlist *strlist) { assert(strlist != NULL); strlist->count = 1; strlist->size = 1; strlist->values = NEW(char *); if (FAILEDTOALLOC(strlist->values)) { free(value); return RET_ERROR_OOM; } strlist->values[0] = value; return RET_OK; } void strlist_init(struct strlist *strlist) { assert(strlist != NULL); strlist->count = 0; strlist->size = 0; strlist->values = NULL; } void strlist_done(struct strlist *strlist) { int c; char **t; assert(strlist != NULL); c = strlist->count; t = strlist->values; while (c-- != 0) { free(*t); t++; } free(strlist->values); strlist->values = NULL; } retvalue strlist_add(struct strlist *strlist, char *element) { char **v; assert(strlist != NULL && element != NULL); if (strlist->count >= strlist->size) { strlist->size += 8; v = realloc(strlist->values, strlist->size*sizeof(char *)); if (FAILEDTOALLOC(v)) { free(element); return RET_ERROR_OOM; } strlist->values = v; } strlist->values[strlist->count++] = element; return RET_OK; } retvalue strlist_add_dup(struct strlist *strlist, const char *todup) { char *element = strdup(todup); if (FAILEDTOALLOC(element)) return RET_ERROR_OOM; return strlist_add(strlist, element); } retvalue strlist_include(struct strlist *strlist, char *element) { char **v; assert(strlist != NULL && element != NULL); if (strlist->count >= strlist->size) { strlist->size += 1; v = realloc(strlist->values, strlist->size*sizeof(char *)); if (FAILEDTOALLOC(v)) { free(element); return RET_ERROR_OOM; } strlist->values = v; } arrayinsert(char *, strlist->values, 0, strlist->count); strlist->count++; strlist->values[0] = element; return RET_OK; } retvalue strlist_fprint(FILE *file, const struct strlist *strlist) { int c; char **p; retvalue result; assert(strlist != NULL); assert(file != NULL); c = strlist->count; p = strlist->values; result = RET_OK; while (c > 0) { if (fputs(*(p++), file) == EOF) result = RET_ERROR; if (--c > 0 && fputc(' ', file) == EOF) result = RET_ERROR; } return result; } /* replace the contents of dest with those from orig, which get emptied */ void strlist_move(struct strlist *dest, struct strlist *orig) { assert(dest != NULL && orig != NULL); if (dest == orig) return; dest->size = orig->size; dest->count = orig->count; dest->values = orig->values; orig->size = orig->count = 0; orig->values = NULL; } retvalue strlist_adduniq(struct strlist *strlist, char *element) { // TODO: is there something better feasible? if (strlist_in(strlist, element)) { free(element); return RET_OK; } else return strlist_add(strlist, element); } bool strlist_intersects(const struct strlist *a, const struct strlist *b) { int i; for (i = 0 ; i < a->count ; i++) if (strlist_in(b, a->values[i])) return true; return false; } char *strlist_concat(const struct strlist *list, const char *prefix, const char *infix, const char *suffix) { size_t l, prefix_len, infix_len, suffix_len, line_len; char *c, *n; int i; prefix_len = strlen(prefix); infix_len = strlen(infix); suffix_len = strlen(suffix); l = prefix_len + suffix_len; for (i = 0 ; i < list->count ; i++) l += strlen(list->values[i]); if (list->count > 0) l += (list->count-1)*infix_len; c = malloc(l + 1); if (FAILEDTOALLOC(c)) return c; memcpy(c, prefix, prefix_len); n = c + prefix_len; for (i = 0 ; i < list->count ; i++) { line_len = strlen(list->values[i]); memcpy(n, list->values[i], line_len); n += line_len; if (i+1 < list->count) { memcpy(n, infix, infix_len); n += infix_len; } else { memcpy(n, suffix, suffix_len); n += suffix_len; } } assert ((size_t)(n-c) == l); *n = '\0'; return c; } void strlist_remove(struct strlist *strlist, const char *element) { int i, j; assert(strlist != NULL); assert(element != NULL); j = 0; for (i = 0 ; i < strlist->count ; i++) { if (strcmp(strlist->values[i], element) != 0) { if (i != j) strlist->values[j] = strlist->values[i]; j++; } else free(strlist->values[i]); } strlist->count = j; } reprepro-4.13.1/distribution.c0000644000175100017510000010020412152655314013245 00000000000000/* This file is part of "reprepro" * Copyright (C) 2003,2004,2005,2006,2007,2008,2009,2010 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include "error.h" #include "mprintf.h" #include "atoms.h" #include "sources.h" #include "dirs.h" #include "names.h" #include "release.h" #include "tracking.h" #include "override.h" #include "log.h" #include "ignore.h" #include "uploaderslist.h" #include "configparser.h" #include "byhandhook.h" #include "distribution.h" static retvalue distribution_free(struct distribution *distribution) { retvalue result, r; bool needsretrack = false; if (distribution != NULL) { free(distribution->suite); free(distribution->fakecomponentprefix); free(distribution->version); free(distribution->origin); free(distribution->notautomatic); free(distribution->butautomaticupgrades); free(distribution->label); free(distribution->description); free(distribution->deb_override); free(distribution->udeb_override); free(distribution->dsc_override); free(distribution->uploaders); atomlist_done(&distribution->udebcomponents); atomlist_done(&distribution->architectures); atomlist_done(&distribution->components); strlist_done(&distribution->signwith); strlist_done(&distribution->updates); strlist_done(&distribution->pulls); strlist_done(&distribution->alsoaccept); exportmode_done(&distribution->dsc); exportmode_done(&distribution->deb); exportmode_done(&distribution->udeb); atomlist_done(&distribution->contents_architectures); atomlist_done(&distribution->contents_components); atomlist_done(&distribution->contents_ucomponents); override_free(distribution->overrides.deb); override_free(distribution->overrides.udeb); override_free(distribution->overrides.dsc); logger_free(distribution->logger); if (distribution->uploaderslist != NULL) { uploaders_unlock(distribution->uploaderslist); } byhandhooks_free(distribution->byhandhooks); result = RET_OK; while (distribution->targets != NULL) { struct target *next = distribution->targets->next; if (distribution->targets->staletracking) needsretrack = true; r = target_free(distribution->targets); RET_UPDATE(result, r); distribution->targets = next; } if (distribution->tracking != dt_NONE && needsretrack) { fprintf(stderr, "WARNING: Tracking data of '%s' might have become out of date.\n" "Consider running retrack to avoid getting funny effects.\n", distribution->codename); } free(distribution->codename); free(distribution); return result; } else return RET_OK; } /* allow premature free'ing of overrides to save some memory */ void distribution_unloadoverrides(struct distribution *distribution) { override_free(distribution->overrides.deb); override_free(distribution->overrides.udeb); override_free(distribution->overrides.dsc); distribution->overrides.deb = NULL; distribution->overrides.udeb = NULL; distribution->overrides.dsc = NULL; } /* create all contained targets... */ static retvalue createtargets(struct distribution *distribution) { retvalue r; int i, j; struct target *t; struct target *last = NULL; bool has_source = false; for (i = 0 ; i < distribution->components.count ; i++) { component_t c = distribution->components.atoms[i]; for (j = 0 ; j < distribution->architectures.count ; j++) { architecture_t a = distribution->architectures.atoms[j]; if (a == architecture_source) { has_source = true; continue; } if (a == architecture_all) { fprintf(stderr, "Error: Distribution %s contains an architecture called 'all'.\n", distribution->codename); return RET_ERROR; } if (strcmp(atoms_architectures[a], "any") == 0) { fprintf(stderr, "Error: Distribution %s contains an architecture called 'any'.\n", distribution->codename); return RET_ERROR; } r = target_initialize_binary( distribution, c, a, &distribution->deb, distribution->readonly, distribution->fakecomponentprefix, &t); if (RET_IS_OK(r)) { if (last != NULL) { last->next = t; } else { distribution->targets = t; } last = t; } if (RET_WAS_ERROR(r)) return r; if (atomlist_in(&distribution->udebcomponents, c)) { r = target_initialize_ubinary( distribution, c, a, &distribution->udeb, distribution->readonly, distribution->fakecomponentprefix, &t); if (RET_IS_OK(r)) { if (last != NULL) { last->next = t; } else { distribution->targets = t; } last = t; } if (RET_WAS_ERROR(r)) return r; } } /* check if this distribution contains source * (yes, yes, source is not really an architecture, but * the .changes files started with this...) */ if (has_source) { r = target_initialize_source(distribution, c, &distribution->dsc, distribution->readonly, distribution->fakecomponentprefix, &t); if (last != NULL) { last->next = t; } else { distribution->targets = t; } last = t; if (RET_WAS_ERROR(r)) return r; } } return RET_OK; } struct read_distribution_data { struct distribution *distributions; }; CFstartparse(distribution) { CFstartparseVAR(distribution, result_p); struct distribution *n; retvalue r; n = zNEW(struct distribution); if (FAILEDTOALLOC(n)) return RET_ERROR_OOM; /* set some default value: */ r = exportmode_init(&n->udeb, true, NULL, "Packages"); if (RET_WAS_ERROR(r)) { (void)distribution_free(n); return r; } r = exportmode_init(&n->deb, true, "Release", "Packages"); if (RET_WAS_ERROR(r)) { (void)distribution_free(n); return r; } r = exportmode_init(&n->dsc, false, "Release", "Sources"); if (RET_WAS_ERROR(r)) { (void)distribution_free(n); return r; } *result_p = n; return RET_OK; } static bool notpropersuperset(const struct atomlist *allowed, const char *allowedname, const struct atomlist *check, const char *checkname, const char **atoms, const struct distribution *d) { atom_t missing; if (!atomlist_subset(allowed, check, &missing)) { fprintf(stderr, "In distribution description of '%s' (line %u to %u in %s):\n" "%s contains '%s' not found in %s!\n", d->codename, d->firstline, d->lastline, d->filename, checkname, atoms[missing], allowedname); return true; } return false; } static inline retvalue checkcomponentsequalduetofake(const struct distribution *d) { size_t l; int i, j; if (d->fakecomponentprefix == NULL) return RET_OK; l = strlen(d->fakecomponentprefix); for (i = 0 ; i < d->components.count ; i++) { const char *c1 = atoms_components[d->components.atoms[i]]; if (strncmp(c1, d->fakecomponentprefix, l) != 0) continue; if (d->fakecomponentprefix[l] != '/') continue; for (j = 0 ; i < d->components.count ; j++) { const char *c2; if (j == i) continue; c2 = atoms_components[d->components.atoms[j]]; if (strcmp(c1 + l + 1, c2) == 0) { fprintf(stderr, "ERROR: distribution '%s' has components '%s' and '%s',\n" "which would be output to the same place due to FakeComponentPrefix '%s'.\n", d->codename, c1, c2, d->fakecomponentprefix); return RET_ERROR; } } } return RET_OK; } CFfinishparse(distribution) { CFfinishparseVARS(distribution, n, last_p, mydata); struct distribution *d; retvalue r; if (!complete) { distribution_free(n); return RET_NOTHING; } n->filename = config_filename(iter); n->firstline = config_firstline(iter); n->lastline = config_line(iter) - 1; /* Do some consitency checks */ for (d = mydata->distributions; d != NULL; d = d->next) { if (strcmp(d->codename, n->codename) == 0) { fprintf(stderr, "Multiple distributions with the common codename: '%s'!\n" "First was in %s line %u to %u,\n" "now another in lines %u to %u of %s.\n", n->codename, d->filename, d->firstline, d->lastline, n->firstline, n->lastline, n->filename); distribution_free(n); return RET_ERROR; } } if (notpropersuperset(&n->architectures, "Architectures", &n->contents_architectures, "ContentsArchitectures", atoms_architectures, n) || notpropersuperset(&n->components, "Components", &n->contents_components, "ContentsComponents", atoms_components, n) || notpropersuperset(&n->udebcomponents, "UDebComponents", &n->contents_ucomponents, "ContentsUComponents", atoms_components, n) || // TODO: instead of checking here make sure it can have more // in the rest of the code...: notpropersuperset(&n->components, "Components", &n->udebcomponents, "UDebComponents", atoms_components, n)) { (void)distribution_free(n); return RET_ERROR; } /* overwrite creation of contents files based on given lists: */ if (n->contents_components_set) { if (n->contents_components.count > 0) { n->contents.flags.enabled = true; n->contents.flags.nodebs = false; } else { n->contents.flags.nodebs = true; } } if (n->contents_ucomponents_set) { if (n->contents_ucomponents.count > 0) { n->contents.flags.enabled = true; n->contents.flags.udebs = true; } else { n->contents.flags.udebs = false; } } if (n->contents_architectures_set) { if (n->contents_architectures.count > 0) n->contents.flags.enabled = true; else n->contents.flags.enabled = false; } r = checkcomponentsequalduetofake(n); if (RET_WAS_ERROR(r)) { (void)distribution_free(n); return r; } /* prepare substructures */ r = createtargets(n); if (RET_WAS_ERROR(r)) { (void)distribution_free(n); return r; } n->status = RET_NOTHING; n->lookedat = false; n->selected = false; /* put in linked list */ if (*last_p == NULL) mydata->distributions = n; else (*last_p)->next = n; *last_p = n; return RET_OK; } CFallSETPROC(distribution, suite) CFallSETPROC(distribution, version) CFallSETPROC(distribution, origin) CFallSETPROC(distribution, notautomatic) CFallSETPROC(distribution, butautomaticupgrades) CFtruthSETPROC2(distribution, readonly, readonly) CFallSETPROC(distribution, label) CFallSETPROC(distribution, description) CFsignwithSETPROC(distribution, signwith) CFfileSETPROC(distribution, deb_override) CFfileSETPROC(distribution, udeb_override) CFfileSETPROC(distribution, dsc_override) CFfileSETPROC(distribution, uploaders) CFuniqstrlistSETPROC(distribution, alsoaccept) CFstrlistSETPROC(distribution, updates) CFstrlistSETPROC(distribution, pulls) CFinternatomsSETPROC(distribution, components, checkforcomponent, at_component) CFinternatomsSETPROC(distribution, architectures, checkforarchitecture, at_architecture) CFatomsublistSETPROC(distribution, contents_architectures, at_architecture, architectures, "Architectures") CFatomsublistSETPROC(distribution, contents_components, at_component, components, "Components") CFatomsublistSETPROC(distribution, udebcomponents, at_component, components, "Components") CFatomsublistSETPROC(distribution, contents_ucomponents, at_component, udebcomponents, "UDebComponents") CFexportmodeSETPROC(distribution, udeb) CFexportmodeSETPROC(distribution, deb) CFexportmodeSETPROC(distribution, dsc) CFcheckvalueSETPROC(distribution, codename, checkforcodename) CFcheckvalueSETPROC(distribution, fakecomponentprefix, checkfordirectoryandidentifier) CFtimespanSETPROC(distribution, validfor) CFUSETPROC(distribution, Contents) { CFSETPROCVAR(distribution, d); return contentsoptions_parse(d, iter); } CFUSETPROC(distribution, logger) { CFSETPROCVAR(distribution, d); return logger_init(iter, &d->logger); } CFUSETPROC(distribution, Tracking) { CFSETPROCVAR(distribution, d); return tracking_parse(d, iter); } CFUSETPROC(distribution, byhandhooks) { CFSETPROCVAR(distribution, d); return byhandhooks_parse(iter, &d->byhandhooks); } static const struct configfield distributionconfigfields[] = { CF("AlsoAcceptFor", distribution, alsoaccept), CFr("Architectures", distribution, architectures), CF("ByHandHooks", distribution, byhandhooks), CFr("Codename", distribution, codename), CFr("Components", distribution, components), CF("ContentsArchitectures", distribution, contents_architectures), CF("ContentsComponents", distribution, contents_components), CF("Contents", distribution, Contents), CF("ContentsUComponents", distribution, contents_ucomponents), CF("DebIndices", distribution, deb), CF("DebOverride", distribution, deb_override), CF("Description", distribution, description), CF("DscIndices", distribution, dsc), CF("DscOverride", distribution, dsc_override), CF("FakeComponentPrefix", distribution, fakecomponentprefix), CF("Label", distribution, label), CF("Log", distribution, logger), CF("NotAutomatic", distribution, notautomatic), CF("ButAutomaticUpgrades", distribution, butautomaticupgrades), CF("Origin", distribution, origin), CF("Pull", distribution, pulls), CF("ReadOnly", distribution, readonly), CF("SignWith", distribution, signwith), CF("Suite", distribution, suite), CF("Tracking", distribution, Tracking), CF("UDebComponents", distribution, udebcomponents), CF("UDebIndices", distribution, udeb), CF("UDebOverride", distribution, udeb_override), CF("Update", distribution, updates), CF("Uploaders", distribution, uploaders), CF("ValidFor", distribution, validfor), CF("Version", distribution, version) }; /* read specification of all distributions */ retvalue distribution_readall(struct distribution **distributions) { struct read_distribution_data mydata; retvalue result; mydata.distributions = NULL; // TODO: readd some way to tell about -b or --confdir here? /* result = regularfileexists(fn); if (RET_WAS_ERROR(result)) { fprintf(stderr, "Could not find '%s'!\n" "(Have you forgotten to specify a basedir by -b?\n" "To only set the conf/ dir use --confdir)\n", fn); free(mydata.filter.found); free(fn); return RET_ERROR_MISSING; } */ result = configfile_parse("distributions", IGNORABLE(unknownfield), startparsedistribution, finishparsedistribution, "distribution definition", distributionconfigfields, ARRAYCOUNT(distributionconfigfields), &mydata); if (result == RET_ERROR_UNKNOWNFIELD) fprintf(stderr, "Use --ignore=unknownfield to ignore unknown fields\n"); if (RET_WAS_ERROR(result)) { distribution_freelist(mydata.distributions); return result; } if (mydata.distributions == NULL) { fprintf(stderr, "No distribution definitions found in %s/distributions!\n", global.confdir); distribution_freelist(mydata.distributions); return RET_ERROR_MISSING; } *distributions = mydata.distributions; return RET_OK; } /* call for each package */ retvalue distribution_foreach_package(struct distribution *distribution, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *packagetypes, each_package_action action, each_target_action target_action, void *data) { retvalue result, r; struct target *t; struct target_cursor iterator; const char *package, *control; result = RET_NOTHING; for (t = distribution->targets ; t != NULL ; t = t->next) { if (!target_matches(t, components, architectures, packagetypes)) continue; if (target_action != NULL) { r = target_action(distribution, t, data); if (RET_WAS_ERROR(r)) return result; if (r == RET_NOTHING) continue; } r = target_openiterator(t, READONLY, &iterator); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) return result; while (target_nextpackage(&iterator, &package, &control)) { r = action(distribution, t, package, control, data); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; } r = target_closeiterator(&iterator); RET_ENDUPDATE(result, r); if (RET_WAS_ERROR(result)) return result; } return result; } retvalue distribution_foreach_package_c(struct distribution *distribution, const struct atomlist *components, architecture_t architecture, packagetype_t packagetype, each_package_action action, void *data) { retvalue result, r; struct target *t; const char *package, *control; struct target_cursor iterator; result = RET_NOTHING; for (t = distribution->targets ; t != NULL ; t = t->next) { if (components != NULL && !atomlist_in(components, t->component)) continue; if (limitation_missed(architecture, t->architecture)) continue; if (limitation_missed(packagetype, t->packagetype)) continue; r = target_openiterator(t, READONLY, &iterator); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) return result; while (target_nextpackage(&iterator, &package, &control)) { r = action(distribution, t, package, control, data); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; } r = target_closeiterator(&iterator); RET_ENDUPDATE(result, r); if (RET_WAS_ERROR(result)) return result; } return result; } struct target *distribution_gettarget(const struct distribution *distribution, component_t component, architecture_t architecture, packagetype_t packagetype) { struct target *t = distribution->targets; assert (atom_defined(component)); assert (atom_defined(architecture)); assert (atom_defined(packagetype)); // TODO: think about making read only access and only alowing readwrite when lookedat is set while (t != NULL && (t->component != component || t->architecture != architecture || t->packagetype != packagetype)) { t = t->next; } return t; } struct target *distribution_getpart(const struct distribution *distribution, component_t component, architecture_t architecture, packagetype_t packagetype) { struct target *t = distribution->targets; assert (atom_defined(component)); assert (atom_defined(architecture)); assert (atom_defined(packagetype)); while (t != NULL && (t->component != component || t->architecture != architecture || t->packagetype != packagetype)) { t = t->next; } if (t == NULL) { fprintf(stderr, "Internal error in distribution_getpart: Bogus request for c='%s' a='%s' t='%s' in '%s'!\n", atoms_components[component], atoms_architectures[architecture], atoms_packagetypes[packagetype], distribution->codename); abort(); } return t; } /* mark all distributions matching one of the first argc argv */ retvalue distribution_match(struct distribution *alldistributions, int argc, const char *argv[], bool lookedat, bool allowreadonly) { struct distribution *d; bool found[argc], unusable_as_suite[argc]; struct distribution *has_suite[argc]; int i; assert (alldistributions != NULL); if (argc <= 0) { for (d = alldistributions ; d != NULL ; d = d->next) { if (!allowreadonly && d->readonly) continue; d->selected = true; d->lookedat = lookedat; } return RET_OK; } memset(found, 0, sizeof(found)); memset(unusable_as_suite, 0, sizeof(unusable_as_suite)); memset(has_suite, 0, sizeof(has_suite)); for (d = alldistributions ; d != NULL ; d = d->next) { for (i = 0 ; i < argc ; i++) { if (strcmp(argv[i], d->codename) == 0) { assert (!found[i]); found[i] = true; d->selected = true; if (lookedat) d->lookedat = lookedat; if (!allowreadonly && d->readonly) { fprintf(stderr, "Error: %s is readonly, so operation not allowed!\n", d->codename); return RET_ERROR; } } else if (d->suite != NULL && strcmp(argv[i], d->suite) == 0) { if (has_suite[i] != NULL) unusable_as_suite[i] = true; has_suite[i] = d; } } } for (i = 0 ; i < argc ; i++) { if (!found[i]) { if (has_suite[i] != NULL && !unusable_as_suite[i]) { if (!allowreadonly && has_suite[i]->readonly) { fprintf(stderr, "Error: %s is readonly, so operation not allowed!\n", has_suite[i]->codename); return RET_ERROR; } has_suite[i]->selected = true; if (lookedat) has_suite[i]->lookedat = lookedat; continue; } fprintf(stderr, "No distribution definition of '%s' found in '%s/distributions'!\n", argv[i], global.confdir); if (unusable_as_suite[i]) fprintf(stderr, "(It is not the codename of any distribution and there are multiple\n" "distributions with this as suite name.)\n"); return RET_ERROR_MISSING; } } return RET_OK; } retvalue distribution_get(struct distribution *alldistributions, const char *name, bool lookedat, struct distribution **distribution) { struct distribution *d, *d2; d = alldistributions; while (d != NULL && strcmp(name, d->codename) != 0) d = d->next; if (d == NULL) { for (d2 = alldistributions; d2 != NULL ; d2 = d2->next) { if (d2->suite == NULL) continue; if (strcmp(name, d2->suite) != 0) continue; if (d != NULL) { fprintf(stderr, "No distribution has '%s' as codename, but multiple as suite name,\n" "thus it cannot be used to determine a distribution.\n", name); return RET_ERROR_MISSING; } d = d2; } } if (d == NULL) { fprintf(stderr, "Cannot find definition of distribution '%s'!\n", name); return RET_ERROR_MISSING; } d->selected = true; if (lookedat) d->lookedat = true; *distribution = d; return RET_OK; } retvalue distribution_snapshot(struct distribution *distribution, const char *name) { struct target *target; retvalue result, r; struct release *release; char *id; assert (distribution != NULL); r = release_initsnapshot(distribution->codename, name, &release); if (RET_WAS_ERROR(r)) return r; result = RET_NOTHING; for (target=distribution->targets; target != NULL ; target = target->next) { r = release_mkdir(release, target->relativedirectory); RET_ENDUPDATE(result, r); if (RET_WAS_ERROR(r)) break; r = target_export(target, false, true, release); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; if (target->exportmode->release != NULL) { r = release_directorydescription(release, distribution, target, target->exportmode->release, false); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; } } if (!RET_WAS_ERROR(result)) { result = release_prepare(release, distribution, false); assert (result != RET_NOTHING); } if (RET_WAS_ERROR(result)) { release_free(release); return result; } result = release_finish(release, distribution); if (RET_WAS_ERROR(result)) return r; id = mprintf("s=%s=%s", distribution->codename, name); if (FAILEDTOALLOC(id)) return RET_ERROR_OOM; r = distribution_foreach_package(distribution, atom_unknown, atom_unknown, atom_unknown, package_referenceforsnapshot, NULL, id); free(id); RET_UPDATE(result, r); return result; } static retvalue export(struct distribution *distribution, bool onlyneeded) { struct target *target; retvalue result, r; struct release *release; assert (distribution != NULL); if (distribution->readonly) { fprintf(stderr, "Error: trying to re-export read-only distribution %s\n", distribution->codename); return RET_ERROR; } r = release_init(&release, distribution->codename, distribution->suite, distribution->fakecomponentprefix); if (RET_WAS_ERROR(r)) return r; result = RET_NOTHING; for (target=distribution->targets; target != NULL ; target = target->next) { r = release_mkdir(release, target->relativedirectory); RET_ENDUPDATE(result, r); if (RET_WAS_ERROR(r)) break; r = target_export(target, onlyneeded, false, release); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; if (target->exportmode->release != NULL) { r = release_directorydescription(release, distribution, target, target->exportmode->release, onlyneeded); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; } } if (!RET_WAS_ERROR(result) && distribution->contents.flags.enabled) { r = contents_generate(distribution, release, onlyneeded); } if (!RET_WAS_ERROR(result)) { result = release_prepare(release, distribution, onlyneeded); if (result == RET_NOTHING) { release_free(release); return result; } } if (RET_WAS_ERROR(result)) { bool workleft = false; release_free(release); fprintf(stderr, "ERROR: Could not finish exporting '%s'!\n", distribution->codename); for (target=distribution->targets; target != NULL ; target = target->next) { workleft |= target->saved_wasmodified; } if (workleft) { (void)fputs( "This means that from outside your repository will still look like before (and\n" "should still work if this old state worked), but the changes intended with this\n" "call will not be visible until you call export directly (via reprepro export)\n" "Changes will also get visible when something else changes the same file and\n" "thus creates a new export of that file, but even changes to other parts of the\n" "same distribution will not!\n", stderr); } } else { r = release_finish(release, distribution); RET_UPDATE(result, r); } if (RET_IS_OK(result)) distribution->status = RET_NOTHING; return result; } retvalue distribution_fullexport(struct distribution *distribution) { return export(distribution, false); } retvalue distribution_freelist(struct distribution *distributions) { retvalue result, r; result = RET_NOTHING; while (distributions != NULL) { struct distribution *d = distributions->next; r = distribution_free(distributions); RET_UPDATE(result, r); distributions = d; } return result; } retvalue distribution_exportlist(enum exportwhen when, struct distribution *distributions) { retvalue result, r; bool todo = false; struct distribution *d; if (when == EXPORT_SILENT_NEVER) { for (d = distributions ; d != NULL ; d = d->next) { struct target *t; for (t = d->targets ; t != NULL ; t = t->next) t->wasmodified = false; } return RET_NOTHING; } if (when == EXPORT_NEVER) { if (verbose > 10) fprintf(stderr, "Not exporting anything as --export=never specified\n"); return RET_NOTHING; } for (d=distributions; d != NULL; d = d->next) { if (d->omitted || !d->selected) continue; if (d->lookedat && (RET_IS_OK(d->status) || (d->status == RET_NOTHING && when != EXPORT_CHANGED) || when == EXPORT_FORCE)) { todo = true; } } if (verbose >= 0 && todo) printf("Exporting indices...\n"); result = RET_NOTHING; for (d=distributions; d != NULL; d = d->next) { if (d->omitted || !d->selected) continue; if (!d->lookedat) { if (verbose >= 30) printf( " Not exporting %s because not looked at.\n", d->codename); } else if ((RET_WAS_ERROR(d->status)||interrupted()) && when != EXPORT_FORCE) { if (verbose >= 10) fprintf(stderr, " Not exporting %s because there have been errors and no --export=force.\n", d->codename); } else if (d->status==RET_NOTHING && when==EXPORT_CHANGED) { struct target *t; if (verbose >= 10) printf( " Not exporting %s because of no recorded changes and --export=changed.\n", d->codename); /* some paranoid check */ for (t = d->targets ; t != NULL ; t = t->next) { if (t->wasmodified) { fprintf(stderr, "A paranoid check found distribution %s would not have been exported,\n" "despite having parts that are marked changed by deeper code.\n" "Please report this and how you got this message as bugreport. Thanks.\n" "Doing a export despite --export=changed....\n", d->codename); r = export(d, true); RET_UPDATE(result, r); break; } } } else { assert (RET_IS_OK(d->status) || (d->status == RET_NOTHING && when != EXPORT_CHANGED) || when == EXPORT_FORCE); r = export(d, true); RET_UPDATE(result, r); } } return result; } /* get a pointer to the apropiate part of the linked list */ struct distribution *distribution_find(struct distribution *distributions, const char *name) { struct distribution *d = distributions, *r; while (d != NULL && strcmp(d->codename, name) != 0) d = d->next; if (d != NULL) return d; d = distributions; while (d != NULL && !strlist_in(&d->alsoaccept, name)) d = d->next; r = d; if (r != NULL) { d = d->next; while (d != NULL && ! strlist_in(&d->alsoaccept, name)) d = d->next; if (d == NULL) return r; fprintf(stderr, "No distribution has codename '%s' and multiple have it in AlsoAcceptFor!\n", name); return NULL; } d = distributions; while (d != NULL && (d->suite == NULL || strcmp(d->suite, name) != 0)) d = d->next; r = d; if (r == NULL) { fprintf(stderr, "No distribution named '%s' found!\n", name); return NULL; } d = d->next; while (d != NULL && (d->suite == NULL || strcmp(d->suite, name) != 0)) d = d->next; if (d == NULL) return r; fprintf(stderr, "No distribution has codename '%s' and multiple have it as suite-name!\n", name); return NULL; } retvalue distribution_loadalloverrides(struct distribution *distribution) { retvalue r; if (distribution->overrides.deb == NULL) { r = override_read(distribution->deb_override, &distribution->overrides.deb, false); if (RET_WAS_ERROR(r)) { distribution->overrides.deb = NULL; return r; } } if (distribution->overrides.udeb == NULL) { r = override_read(distribution->udeb_override, &distribution->overrides.udeb, false); if (RET_WAS_ERROR(r)) { distribution->overrides.udeb = NULL; return r; } } if (distribution->overrides.dsc == NULL) { r = override_read(distribution->dsc_override, &distribution->overrides.dsc, true); if (RET_WAS_ERROR(r)) { distribution->overrides.dsc = NULL; return r; } } if (distribution->overrides.deb != NULL || distribution->overrides.udeb != NULL || distribution->overrides.dsc != NULL) return RET_OK; else return RET_NOTHING; } retvalue distribution_loaduploaders(struct distribution *distribution) { if (distribution->uploaders != NULL) { if (distribution->uploaderslist != NULL) return RET_OK; return uploaders_get(&distribution->uploaderslist, distribution->uploaders); } else { distribution->uploaderslist = NULL; return RET_NOTHING; } } void distribution_unloaduploaders(struct distribution *distribution) { if (distribution->uploaderslist != NULL) { uploaders_unlock(distribution->uploaderslist); distribution->uploaderslist = NULL; } } retvalue distribution_prepareforwriting(struct distribution *distribution) { retvalue r; if (distribution->readonly) { fprintf(stderr, "Error: distribution %s is read-only.\n" "Current operation not possible because it needs write access.\n", distribution->codename); return RET_ERROR; } if (distribution->logger != NULL) { r = logger_prepare(distribution->logger); if (RET_WAS_ERROR(r)) return r; } distribution->lookedat = true; return RET_OK; } /* delete every package decider returns RET_OK for */ retvalue distribution_remove_packages(struct distribution *distribution, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *packagetypes, each_package_action decider, struct trackingdata *trackingdata, void *data) { retvalue result, r; struct target *t; struct target_cursor iterator; const char *package, *control; if (distribution->readonly) { fprintf(stderr, "Error: trying to delete packages in read-only distribution %s.\n", distribution->codename); return RET_ERROR; } result = RET_NOTHING; for (t = distribution->targets ; t != NULL ; t = t->next) { if (!target_matches(t, components, architectures, packagetypes)) continue; r = target_openiterator(t, READWRITE, &iterator); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) return result; while (target_nextpackage(&iterator, &package, &control)) { r = decider(distribution, t, package, control, data); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; if (RET_IS_OK(r)) { r = target_removepackage_by_cursor(&iterator, distribution->logger, trackingdata); RET_UPDATE(result, r); RET_UPDATE(distribution->status, r); } } r = target_closeiterator(&iterator); RET_ENDUPDATE(result, r); if (RET_WAS_ERROR(result)) return result; } return result; } reprepro-4.13.1/override.c0000644000175100017510000002347512152651661012364 00000000000000/* This file is part of "reprepro" * Copyright (C) 2004,2005,2007,2010 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include "error.h" #include "chunks.h" #include "sources.h" #include "names.h" #include "globmatch.h" #include "override.h" #include "configparser.h" struct overridedata { struct strlist fields; }; struct overridepackage { char *packagename; struct overridedata data; }; struct overridepattern { struct overridepattern *next; char *pattern; struct overridedata data; }; struct overridefile { /* a tree root of struct overridepackage */ void *packages; struct overridepattern *patterns; }; #ifdef HAVE_TDESTROY static void freeoverridepackage(void *n) { struct overridepackage *p = n; free(p->packagename); strlist_done(&p->data.fields); free(p); } #endif void override_free(struct overridefile *info) { struct overridepattern *i; if (info == NULL) return; #ifdef HAVE_TDESTROY tdestroy(info->packages, freeoverridepackage); #endif while ((i = info->patterns) != NULL) { if (i == NULL) return; strlist_done(&i->data.fields); free(i->pattern); info->patterns = i->next; free(i); } free(info); } static bool forbidden_field_name(bool source, const char *field) { if (strcasecmp(field, "Package") == 0) return true; if (strcasecmp(field, "Version") == 0) return true; if (source) { if (strcasecmp(field, "Files") == 0) return true; if (strcasecmp(field, "Directory") == 0) return true; if (strcasecmp(field, "Checksums-Sha256") == 0) return true; if (strcasecmp(field, "Checksums-Sha1") == 0) return true; return false; } else { if (strcasecmp(field, "Filename") == 0) return true; if (strcasecmp(field, "MD5sum") == 0) return true; if (strcasecmp(field, "SHA1") == 0) return true; if (strcasecmp(field, "SHA256") == 0) return true; if (strcasecmp(field, "Size") == 0) return true; return false; } } static retvalue add_override_field(struct overridedata *data, const char *secondpart, const char *thirdpart, bool source) { retvalue r; char *p; if (forbidden_field_name(source, secondpart)) { fprintf(stderr, "Error: field '%s' not allowed in override files.\n", secondpart); return RET_ERROR; } if (secondpart[0] == '$') { if (strcasecmp(secondpart, "$Delete") == 0) { if (forbidden_field_name(source, thirdpart)) { fprintf(stderr, "Error: field '%s' not allowed in override files (not even as to be deleted).\n", thirdpart); return RET_ERROR; } } else if (strcasecmp(secondpart, "$Component") != 0) { fprintf(stderr, "Warning: special override field '%s' unknown and will be ignored\n", secondpart); } } p = strdup(secondpart); if (FAILEDTOALLOC(p)) return RET_ERROR_OOM; r = strlist_add(&data->fields, p); if (RET_WAS_ERROR(r)) return r; p = strdup(thirdpart); if (FAILEDTOALLOC(p)) return RET_ERROR_OOM; r = strlist_add(&data->fields, p); return r; } static struct overridepackage *new_package(const char *name) { struct overridepackage *p; p = zNEW(struct overridepackage); if (FAILEDTOALLOC(p)) return NULL; p->packagename = strdup(name); if (FAILEDTOALLOC(p->packagename)) { free(p); return NULL; } return p; } static int opackage_compare(const void *a, const void *b) { const struct overridepackage *p1 = a, *p2 = b; return strcmp(p1->packagename, p2->packagename); } static retvalue add_override(struct overridefile *i, const char *firstpart, const char *secondpart, const char *thirdpart, bool source) { struct overridepackage *pkg, **node; retvalue r; const char *c; struct overridepattern *p, **l; c = firstpart; while (*c != '\0' && *c != '*' && *c != '[' && *c != '?') c++; if (*c != '\0') { /* This is a pattern, put into the pattern list */ l = &i->patterns; while ((p = *l) != NULL && strcmp(p->pattern, firstpart) != 0) { l = &p->next; } if (p == NULL) { p = zNEW(struct overridepattern); if (FAILEDTOALLOC(p)) return RET_ERROR_OOM; p->pattern = strdup(firstpart); if (FAILEDTOALLOC(p->pattern)) { free(p); return RET_ERROR_OOM; } } r = add_override_field(&p->data, secondpart, thirdpart, source); if (RET_WAS_ERROR(r)) { if (*l != p) { free(p->pattern); free(p); } return r; } *l = p; return RET_OK; } pkg = new_package(firstpart); if (FAILEDTOALLOC(pkg)) return RET_ERROR_OOM; node = tsearch(pkg, &i->packages, opackage_compare); if (FAILEDTOALLOC(node)) return RET_ERROR_OOM; if (*node == pkg) { r = strlist_init_n(6, &pkg->data.fields); if (RET_WAS_ERROR(r)) return r; } else { free(pkg->packagename); free(pkg); pkg = *node; } return add_override_field(&(*node)->data, secondpart, thirdpart, source); } retvalue override_read(const char *filename, struct overridefile **info, bool source) { struct overridefile *i; FILE *file; char buffer[1001]; if (filename == NULL) { *info = NULL; return RET_OK; } char *fn = configfile_expandname(filename, NULL); if (FAILEDTOALLOC(fn)) return RET_ERROR_OOM; file = fopen(fn, "r"); free(fn); if (file == NULL) { int e = errno; fprintf(stderr, "Error %d opening override file '%s': %s\n", e, filename, strerror(e)); return RET_ERRNO(e); } i = zNEW(struct overridefile); if (FAILEDTOALLOC(i)) { (void)fclose(file); return RET_ERROR_OOM; } while (fgets(buffer, 1000, file) != NULL){ retvalue r; const char *firstpart, *secondpart, *thirdpart; char *p; size_t l = strlen(buffer); if (buffer[l-1] != '\n') { if (l >= 999) { fprintf(stderr, "Too long line in '%s'!\n", filename); override_free(i); (void)fclose(file); return RET_ERROR; } fprintf(stderr, "Missing line terminator in '%s'!\n", filename); } else { l--; buffer[l] = '\0'; } while (l>0 && xisspace(buffer[l])) { buffer[l] = '\0'; l--; } if (l== 0) continue; p = buffer; while (*p !='\0' && xisspace(*p)) *(p++)='\0'; firstpart = p; while (*p !='\0' && !xisspace(*p)) p++; while (*p !='\0' && xisspace(*p)) *(p++)='\0'; secondpart = p; while (*p !='\0' && !xisspace(*p)) p++; while (*p !='\0' && xisspace(*p)) *(p++)='\0'; thirdpart = p; r = add_override(i, firstpart, secondpart, thirdpart, source); if (RET_WAS_ERROR(r)) { override_free(i); (void)fclose(file); return r; } } (void)fclose(file); if (i->packages != NULL || i->patterns != NULL) { *info = i; return RET_OK; } else { override_free(i); *info = NULL; return RET_NOTHING; } } const struct overridedata *override_search(const struct overridefile *overrides, const char *package) { struct overridepackage pkg, **node; struct overridepattern *p; if (overrides == NULL) return NULL; pkg.packagename = (char*)package; node = tfind(&pkg, &overrides->packages, opackage_compare); if (node != NULL && *node != NULL) return &(*node)->data; for (p = overrides->patterns ; p != NULL ; p = p->next) { if (globmatch(package, p->pattern)) return &p->data; } return NULL; } const char *override_get(const struct overridedata *override, const char *field) { int i; if (override == NULL) return NULL; for (i = 0 ; i+1 < override->fields.count ; i+=2) { // TODO curently case-sensitiv. warn if otherwise? if (strcmp(override->fields.values[i], field) == 0) return override->fields.values[i+1]; } return NULL; } /* add new fields to otherreplaces, but not "Section", or "Priority". * incorporates otherreplaces, or frees them on error, * returns otherreplaces when nothing was to do, NULL on RET_ERROR_OOM*/ struct fieldtoadd *override_addreplacefields(const struct overridedata *override, struct fieldtoadd *otherreplaces) { int i; if (override == NULL) return otherreplaces; for (i = 0 ; i+1 < override->fields.count ; i+=2) { if (strcmp(override->fields.values[i], SECTION_FIELDNAME) != 0 && strcmp(override->fields.values[i], PRIORITY_FIELDNAME) != 0 && override->fields.values[i][0] != '$') { otherreplaces = addfield_new( override->fields.values[i], override->fields.values[i+1], otherreplaces); if (otherreplaces == NULL) return NULL; } else if (strcasecmp(override->fields.values[i], "$delete") == 0) { otherreplaces = deletefield_new( override->fields.values[i+1], otherreplaces); if (otherreplaces == NULL) return NULL; } } return otherreplaces; } retvalue override_allreplacefields(const struct overridedata *override, struct fieldtoadd **fields_p) { int i; struct fieldtoadd *fields = NULL; assert (override != NULL); for (i = 0 ; i+1 < override->fields.count ; i+=2) { if (override->fields.values[i][0] != '$') { fields = addfield_new( override->fields.values[i], override->fields.values[i+1], fields); if (FAILEDTOALLOC(fields)) return RET_ERROR_OOM; } else if (strcasecmp(override->fields.values[i], "$delete") == 0) { fields = deletefield_new( override->fields.values[i+1], fields); if (FAILEDTOALLOC(fields)) return RET_ERROR_OOM; } } if (fields == NULL) return RET_NOTHING; *fields_p = fields; return RET_OK; } reprepro-4.13.1/signature.c0000644000175100017510000003607312152651661012544 00000000000000/* This file is part of "reprepro" * Copyright (C) 2003,2004,2005,2006,2007,2009,2010 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include "signature_p.h" #include "mprintf.h" #include "strlist.h" #include "dirs.h" #include "names.h" #include "chunks.h" #include "readtextfile.h" #ifdef HAVE_LIBGPGME gpgme_ctx_t context = NULL; retvalue gpgerror(gpg_error_t err) { if (err != 0) { fprintf(stderr, "gpgme gave error %s:%d: %s\n", gpg_strsource(err), gpg_err_code(err), gpg_strerror(err)); if (gpg_err_code(err) == GPG_ERR_ENOMEM) return RET_ERROR_OOM; else return RET_ERROR_GPGME; } else return RET_OK; } /* Quick&dirty passphrase asking */ static gpg_error_t signature_getpassphrase(UNUSED(void *hook), const char *uid_hint, UNUSED(const char *info), int prev_was_bad, int fd) { char *msg; const char *p; msg = mprintf("%s needs a passphrase\nPlease enter passphrase%s:", (uid_hint!=NULL)?uid_hint:"key", (prev_was_bad!=0)?" again":""); if (msg == NULL) return gpg_err_make(GPG_ERR_SOURCE_USER_1, GPG_ERR_ENOMEM); p = getpass(msg); write(fd, p, strlen(p)); write(fd, "\n", 1); free(msg); return GPG_ERR_NO_ERROR; } #endif /* HAVE_LIBGPGME */ retvalue signature_init(bool allowpassphrase){ #ifdef HAVE_LIBGPGME gpg_error_t err; if (context != NULL) return RET_NOTHING; gpgme_check_version(NULL); err = gpgme_engine_check_version(GPGME_PROTOCOL_OpenPGP); if (err != 0) return gpgerror(err); err = gpgme_new(&context); if (err != 0) return gpgerror(err); err = gpgme_set_protocol(context, GPGME_PROTOCOL_OpenPGP); if (err != 0) return gpgerror(err); if (allowpassphrase) gpgme_set_passphrase_cb(context, signature_getpassphrase, NULL); gpgme_set_armor(context, 1); #endif /* HAVE_LIBGPGME */ return RET_OK; } void signatures_done(void) { #ifdef HAVE_LIBGPGME if (context != NULL) { gpgme_release(context); context = NULL; } #endif /* HAVE_LIBGPGME */ } #ifdef HAVE_LIBGPGME /* retrieve a list of fingerprints of keys having signed (valid) or * which are mentioned in the signature (all). set broken if all signatures * was broken (hints to a broken file, as opposed to expired or whatever * else may make a signature invalid)). */ static retvalue checksigs(const char *filename, struct signatures **signatures_p, bool *broken) { gpgme_verify_result_t result; gpgme_signature_t s; bool had_valid = false, had_broken = false; size_t count; struct signatures *signatures; struct signature *sig; result = gpgme_op_verify_result(context); if (result == NULL) { fprintf(stderr, "Internal error communicating with libgpgme: no result record!\n\n"); return RET_ERROR_GPGME; } if (signatures_p != NULL) { count = 0; for (s = result->signatures ; s != NULL ; s = s->next) { count++; } signatures = calloc(1, sizeof(struct signatures) + count * sizeof(struct signature)); if (FAILEDTOALLOC(signatures)) return RET_ERROR_OOM; signatures->count = count; signatures->validcount = 0; sig = signatures->signatures; } else { signatures = NULL; sig = NULL; } for (s = result->signatures ; s != NULL ; s = s->next) { enum signature_state state = sist_error; if (signatures_p != NULL) { sig->keyid = strdup(s->fpr); if (FAILEDTOALLOC(sig->keyid)) { signatures_free(signatures); return RET_ERROR_OOM; } } switch (gpg_err_code(s->status)) { case GPG_ERR_NO_ERROR: had_valid = true; state = sist_valid; if (signatures) signatures->validcount++; break; case GPG_ERR_KEY_EXPIRED: had_valid = true; if (verbose > 0) fprintf(stderr, "Ignoring signature with '%s' on '%s', as the key has expired.\n", s->fpr, filename); state = sist_mostly; if (sig != NULL) sig->expired_key = true; break; case GPG_ERR_CERT_REVOKED: had_valid = true; if (verbose > 0) fprintf(stderr, "Ignoring signature with '%s' on '%s', as the key is revoked.\n", s->fpr, filename); state = sist_mostly; if (sig != NULL) sig->revoced_key = true; break; case GPG_ERR_SIG_EXPIRED: had_valid = true; if (verbose > 0) { time_t timestamp = s->timestamp, exp_timestamp = s->exp_timestamp; fprintf(stderr, "Ignoring signature with '%s' on '%s', as the signature has expired.\n" " signature created %s, expired %s\n", s->fpr, filename, ctime(×tamp), ctime(&exp_timestamp)); } state = sist_mostly; if (sig != NULL) sig->expired_signature = true; break; case GPG_ERR_BAD_SIGNATURE: had_broken = true; if (verbose > 0) { fprintf(stderr, "WARNING: '%s' has a invalid signature with '%s'\n", filename, s->fpr); } state = sist_bad; break; case GPG_ERR_NO_PUBKEY: if (verbose > 0) { fprintf(stderr, "Could not check validity of signature with '%s' in '%s' as public key missing!\n", s->fpr, filename); } state = sist_missing; break; case GPG_ERR_GENERAL: fprintf(stderr, "gpgme returned an general error verifing signature with '%s' in '%s'!\n" "Try running gpg --verify '%s' manually for hints what is happening.\n" "If this does not print any errors, retry the command causing this message.\n", s->fpr, filename, filename); signatures_free(signatures); return RET_ERROR_GPGME; /* there sadly no more is a way to make sure we have * all possible ones handled */ default: break; } if (state == sist_error) { fprintf(stderr, "Error checking signature (gpgme returned unexpected value %d)!\n" "Please file a bug report, so reprepro can handle this in the future.\n", gpg_err_code(s->status)); signatures_free(signatures); return RET_ERROR_GPGME; } if (sig != NULL) { sig->state = state; sig++; } } if (broken != NULL && had_broken && ! had_valid) *broken = true; if (signatures_p != NULL) *signatures_p = signatures; return RET_OK; } static retvalue check_primary_keys(struct signatures *signatures) { /* Get the primary keys belonging to each signing key. This might also invalidate a signature previously believed valid if the primary key is expired */ int i; for (i = 0 ; i < signatures->count ; i++) { gpg_error_t err; gpgme_key_t gpgme_key = NULL; gpgme_subkey_t subkey; struct signature *sig = &signatures->signatures[i]; if (sig->state == sist_error || sig->state == sist_missing) { sig->primary_keyid = strdup(sig->keyid); if (FAILEDTOALLOC(sig->primary_keyid)) return RET_ERROR_OOM; continue; } err = gpgme_get_key(context, sig->keyid, &gpgme_key, 0); if (err != 0) { fprintf(stderr, "gpgme error %s:%d retrieving key '%s': %s\n", gpg_strsource(err), (int)gpg_err_code(err), sig->keyid, gpg_strerror(err)); if (gpg_err_code(err) == GPG_ERR_ENOMEM) return RET_ERROR_OOM; else return RET_ERROR_GPGME; } assert (gpgme_key != NULL); /* the first "sub"key is the primary key */ subkey = gpgme_key->subkeys; if (subkey->revoked) { sig->revoced_key = true; if (sig->state == sist_valid) { sig->state = sist_mostly; signatures->validcount--; } } if (subkey->expired) { sig->expired_key = true; if (sig->state == sist_valid) { sig->state = sist_mostly; signatures->validcount--; } } sig->primary_keyid = strdup(subkey->keyid); gpgme_key_unref(gpgme_key); if (FAILEDTOALLOC(sig->primary_keyid)) return RET_ERROR_OOM; } return RET_OK; } #endif /* HAVE_LIBGPGME */ void signatures_free(struct signatures *signatures) { int i; if (signatures == NULL) return; for (i = 0 ; i < signatures->count ; i++) { free(signatures->signatures[i].keyid); free(signatures->signatures[i].primary_keyid); } free(signatures); } #ifdef HAVE_LIBGPGME static retvalue extract_signed_data(const char *buffer, size_t bufferlen, const char *filenametoshow, char **chunkread, /*@null@*/ /*@out@*/struct signatures **signatures_p, bool *brokensignature) { char *chunk; gpg_error_t err; gpgme_data_t dh, dh_gpg; size_t plain_len; char *plain_data; retvalue r; struct signatures *signatures = NULL; bool foundbroken = false; r = signature_init(false); if (RET_WAS_ERROR(r)) return r; err = gpgme_data_new_from_mem(&dh_gpg, buffer, bufferlen, 0); if (err != 0) return gpgerror(err); err = gpgme_data_new(&dh); if (err != 0) { gpgme_data_release(dh_gpg); return gpgerror(err); } err = gpgme_op_verify(context, dh_gpg, NULL, dh); if (gpg_err_code(err) == GPG_ERR_NO_DATA) { if (verbose > 5) fprintf(stderr, "Data seems not to be signed trying to use directly....\n"); gpgme_data_release(dh); gpgme_data_release(dh_gpg); return RET_NOTHING; } else { if (err != 0) { gpgme_data_release(dh_gpg); gpgme_data_release(dh); return gpgerror(err); } if (signatures_p != NULL || brokensignature != NULL) { r = checksigs(filenametoshow, (signatures_p!=NULL)?&signatures:NULL, (brokensignature!=NULL)?&foundbroken:NULL); if (RET_WAS_ERROR(r)) { gpgme_data_release(dh_gpg); gpgme_data_release(dh); return r; } } gpgme_data_release(dh_gpg); plain_data = gpgme_data_release_and_get_mem(dh, &plain_len); if (plain_data == NULL) { fprintf(stderr, "(not yet fatal) ERROR: libgpgme failed to extract the plain data out of\n" "'%s'.\n" "While it did so in a way indicating running out of memory, experience says\n" "this also happens when gpg returns a error code it does not understand.\n" "To check this please try running gpg --verify '%s' manually.\n" "Continuing extracting it ignoring all signatures...", filenametoshow, filenametoshow); signatures_free(signatures); return RET_NOTHING; } if (signatures != NULL) { r = check_primary_keys(signatures); if (RET_WAS_ERROR(r)) { signatures_free(signatures); return r; } } } if (FAILEDTOALLOC(plain_data)) r = RET_ERROR_OOM; else { size_t len; const char *afterchanges; chunk = malloc(plain_len + 1); len = chunk_extract(chunk, plain_data, plain_len, false, &afterchanges); if (len == 0) { fprintf(stderr, "Could only find spaces within '%s'!\n", filenametoshow); free(chunk); r = RET_ERROR; } else if (afterchanges != plain_data + plain_len) { if (*afterchanges == '\0') fprintf(stderr, "Unexpected \\0 character within '%s'!\n", filenametoshow); else fprintf(stderr, "Unexpected data after ending empty line in '%s'!\n", filenametoshow); free(chunk); r = RET_ERROR; } else *chunkread = chunk; } #ifdef HAVE_GPGPME_FREE gpgme_free(plain_data); #else free(plain_data); #endif if (RET_IS_OK(r)) { if (signatures_p != NULL) *signatures_p = signatures; if (brokensignature != NULL) *brokensignature = foundbroken; } else { signatures_free(signatures); } return r; } #endif /* HAVE_LIBGPGME */ /* Read a single chunk from a file, that may be signed. */ retvalue signature_readsignedchunk(const char *filename, const char *filenametoshow, char **chunkread, /*@null@*/ /*@out@*/struct signatures **signatures_p, bool *brokensignature) { char *chunk; const char *startofchanges, *afterchunk; const char *endmarker; size_t chunklen, len; retvalue r; r = readtextfile(filename, filenametoshow, &chunk, &chunklen); if (!RET_IS_OK(r)) return r; if (chunklen == 0) { fprintf(stderr, "Unexpected empty file '%s'!\n", filenametoshow); free(chunk); return RET_ERROR; } startofchanges = chunk_getstart(chunk, chunklen, false); /* fast-track unsigned chunks: */ if (startofchanges[0] != '-') { const char *afterchanges; len = chunk_extract(chunk, chunk, chunklen, false, &afterchanges); if (len == 0) { fprintf(stderr, "Could only find spaces within '%s'!\n", filenametoshow); free(chunk); return RET_ERROR; } if (*afterchanges != '\0') { fprintf(stderr, "Error parsing '%s': Seems not to be signed but has spurious empty line.\n", filenametoshow); free(chunk); return RET_ERROR; } if (verbose > 5 && strncmp(chunk, "Format:", 7) != 0 && strncmp(chunk, "Source:", 7) != 0) fprintf(stderr, "Data seems not to be signed trying to use directly...\n"); assert (chunk[len] == '\0'); *chunkread = realloc(chunk, len + 1); if (FAILEDTOALLOC(*chunkread)) *chunkread = chunk; if (signatures_p != NULL) *signatures_p = NULL; if (brokensignature != NULL) *brokensignature = false; return RET_OK; } #ifdef HAVE_LIBGPGME r = extract_signed_data(chunk, chunklen, filenametoshow, chunkread, signatures_p, brokensignature); if (r != RET_NOTHING) { free(chunk); return r; } #endif /* We have no libgpgme, it failed, or could not find signature data, * trying to extract it manually, ignoring signatures: */ if (strncmp(startofchanges, "-----BEGIN", 10) != 0) { fprintf(stderr, "Strange content of '%s': First non-space character is '-',\n" "but it does not begin with '-----BEGIN'.\n", filenametoshow); free(chunk); return RET_ERROR; #ifndef HAVE_LIBGPGME } else { fprintf(stderr, "Cannot check signatures from '%s' as compiled without support for libgpgme!\n" "Extracting the content manually without looking at the signature...\n", filenametoshow); #endif } startofchanges = chunk_over(startofchanges); len = chunk_extract(chunk, startofchanges, chunklen - (startofchanges - chunk), false, &afterchunk); if (len == 0) { fprintf(stderr, "Could not find any data within '%s'!\n", filenametoshow); free(chunk); return RET_ERROR; } endmarker = strstr(chunk, "\n-----"); if (endmarker != NULL) { endmarker++; assert ((size_t)(endmarker-chunk) < len); len = endmarker-chunk; chunk[len] = '\0'; } else if (*afterchunk == '\0') { fprintf(stderr, "ERROR: Could not find end marker of signed data within '%s'.\n" "Cannot determine what is data and what is not!\n", filenametoshow); free(chunk); return RET_ERROR; } else if (strncmp(afterchunk, "-----", 5) != 0) { fprintf(stderr, "ERROR: Spurious empty line within '%s'.\n" "Cannot determine what is data and what is not!\n", filenametoshow); free(chunk); return RET_ERROR; } assert (chunk[len] == '\0'); if (signatures_p != NULL) { /* pointer to structure with count 0 to make clear * it is not unsigned */ *signatures_p = calloc(1, sizeof(struct signatures)); if (FAILEDTOALLOC(*signatures_p)) { free(chunk); return RET_ERROR_OOM; } } *chunkread = realloc(chunk, len + 1); if (FAILEDTOALLOC(*chunkread)) *chunkread = chunk; if (brokensignature != NULL) *brokensignature = false; return RET_OK; } reprepro-4.13.1/target.c0000644000175100017510000006634312152651661012034 00000000000000/* This file is part of "reprepro" * Copyright (C) 2004,2005,2007,2008 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include "error.h" #include "ignore.h" #include "mprintf.h" #include "strlist.h" #include "names.h" #include "chunks.h" #include "database.h" #include "reference.h" #include "binaries.h" #include "sources.h" #include "names.h" #include "dirs.h" #include "dpkgversions.h" #include "tracking.h" #include "log.h" #include "files.h" #include "target.h" static char *calc_identifier(const char *codename, component_t component, architecture_t architecture, packagetype_t packagetype) { assert (strchr(codename, '|') == NULL); assert (codename != NULL); assert (atom_defined(component)); assert (atom_defined(architecture)); assert (atom_defined(packagetype)); if (packagetype == pt_udeb) return mprintf("u|%s|%s|%s", codename, atoms_components[component], atoms_architectures[architecture]); else return mprintf("%s|%s|%s", codename, atoms_components[component], atoms_architectures[architecture]); } static retvalue target_initialize(/*@dependant@*/struct distribution *distribution, component_t component, architecture_t architecture, packagetype_t packagetype, get_version getversion, get_installdata getinstalldata, get_architecture getarchitecture, get_filekeys getfilekeys, get_checksums getchecksums, get_sourceandversion getsourceandversion, do_reoverride doreoverride, do_retrack doretrack, complete_checksums docomplete, /*@null@*//*@only@*/char *directory, /*@dependent@*/const struct exportmode *exportmode, bool readonly, /*@out@*/struct target **d) { struct target *t; assert(exportmode != NULL); if (FAILEDTOALLOC(directory)) return RET_ERROR_OOM; t = zNEW(struct target); if (FAILEDTOALLOC(t)) { free(directory); return RET_ERROR_OOM; } t->relativedirectory = directory; t->exportmode = exportmode; t->distribution = distribution; assert (atom_defined(component)); t->component = component; assert (atom_defined(architecture)); t->architecture = architecture; assert (atom_defined(packagetype)); t->packagetype = packagetype; t->identifier = calc_identifier(distribution->codename, component, architecture, packagetype); if (FAILEDTOALLOC(t->identifier)) { (void)target_free(t); return RET_ERROR_OOM; } t->getversion = getversion; t->getinstalldata = getinstalldata; t->getarchitecture = getarchitecture; t->getfilekeys = getfilekeys; t->getchecksums = getchecksums; t->getsourceandversion = getsourceandversion; t->doreoverride = doreoverride; t->doretrack = doretrack; t->completechecksums = docomplete; t->readonly = readonly; *d = t; return RET_OK; } static const char *dist_component_name(component_t component, /*@null@*/const char *fakecomponentprefix) { const char *c = atoms_components[component]; size_t len; if (fakecomponentprefix == NULL) return c; len = strlen(fakecomponentprefix); if (strncmp(c, fakecomponentprefix, len) != 0) return c; if (c[len] != '/') return c; return c + len + 1; } retvalue target_initialize_ubinary(struct distribution *d, component_t component, architecture_t architecture, const struct exportmode *exportmode, bool readonly, const char *fakecomponentprefix, struct target **target) { return target_initialize(d, component, architecture, pt_udeb, binaries_getversion, binaries_getinstalldata, binaries_getarchitecture, binaries_getfilekeys, binaries_getchecksums, binaries_getsourceandversion, ubinaries_doreoverride, binaries_retrack, binaries_complete_checksums, mprintf("%s/debian-installer/binary-%s", dist_component_name(component, fakecomponentprefix), atoms_architectures[architecture]), exportmode, readonly, target); } retvalue target_initialize_binary(struct distribution *d, component_t component, architecture_t architecture, const struct exportmode *exportmode, bool readonly, const char *fakecomponentprefix, struct target **target) { return target_initialize(d, component, architecture, pt_deb, binaries_getversion, binaries_getinstalldata, binaries_getarchitecture, binaries_getfilekeys, binaries_getchecksums, binaries_getsourceandversion, binaries_doreoverride, binaries_retrack, binaries_complete_checksums, mprintf("%s/binary-%s", dist_component_name(component, fakecomponentprefix), atoms_architectures[architecture]), exportmode, readonly, target); } retvalue target_initialize_source(struct distribution *d, component_t component, const struct exportmode *exportmode, bool readonly, const char *fakecomponentprefix, struct target **target) { return target_initialize(d, component, architecture_source, pt_dsc, sources_getversion, sources_getinstalldata, sources_getarchitecture, sources_getfilekeys, sources_getchecksums, sources_getsourceandversion, sources_doreoverride, sources_retrack, sources_complete_checksums, mprintf("%s/source", dist_component_name(component, fakecomponentprefix)), exportmode, readonly, target); } retvalue target_free(struct target *target) { retvalue result = RET_OK; if (target == NULL) return RET_OK; if (target->packages != NULL) { result = target_closepackagesdb(target); } else result = RET_OK; if (target->wasmodified) { fprintf(stderr, "Warning: database '%s' was modified but no index file was exported.\n" "Changes will only be visible after the next 'export'!\n", target->identifier); } target->distribution = NULL; free(target->identifier); free(target->relativedirectory); free(target); return result; } /* This opens up the database, if db != NULL, *db will be set to it.. */ retvalue target_initpackagesdb(struct target *target, bool readonly) { retvalue r; if (!readonly && target->readonly) { fprintf(stderr, "Error trying to open '%s' read-write in read-only distribution '%s'\n", target->identifier, target->distribution->codename); return RET_ERROR; } assert (target->packages == NULL); if (target->packages != NULL) return RET_OK; r = database_openpackages(target->identifier, readonly, &target->packages); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) { target->packages = NULL; return r; } return r; } /* this closes databases... */ retvalue target_closepackagesdb(struct target *target) { retvalue r; if (target->packages == NULL) { fprintf(stderr, "Internal Warning: Double close!\n"); r = RET_OK; } else { r = table_close(target->packages); target->packages = NULL; } return r; } /* Remove a package from the given target. */ retvalue target_removereadpackage(struct target *target, struct logger *logger, const char *name, const char *oldcontrol, struct trackingdata *trackingdata) { char *oldpversion = NULL; struct strlist files; retvalue result, r; char *oldsource, *oldsversion; assert (target != NULL && target->packages != NULL); assert (oldcontrol != NULL && name != NULL); if (logger != NULL) { /* need to get the version for logging, if not available */ r = target->getversion(oldcontrol, &oldpversion); if (!RET_IS_OK(r)) oldpversion = NULL; } r = target->getfilekeys(oldcontrol, &files); if (RET_WAS_ERROR(r)) { free(oldpversion); return r; } if (trackingdata != NULL) { r = target->getsourceandversion(oldcontrol, name, &oldsource, &oldsversion); if (!RET_IS_OK(r)) { oldsource = oldsversion = NULL; } } else { oldsource = oldsversion = NULL; } if (verbose > 0) printf("removing '%s' from '%s'...\n", name, target->identifier); result = table_deleterecord(target->packages, name, false); if (RET_IS_OK(result)) { target->wasmodified = true; if (oldsource!= NULL && oldsversion != NULL) { r = trackingdata_remove(trackingdata, oldsource, oldsversion, &files); RET_UPDATE(result, r); } if (trackingdata == NULL) target->staletracking = true; if (logger != NULL) logger_log(logger, target, name, NULL, oldpversion, NULL, oldcontrol, NULL, &files, NULL, NULL); r = references_delete(target->identifier, &files, NULL); RET_UPDATE(result, r); } strlist_done(&files); free(oldpversion); return result; } /* Remove a package from the given target. */ retvalue target_removepackage(struct target *target, struct logger *logger, const char *name, struct trackingdata *trackingdata) { char *oldchunk; retvalue r; assert(target != NULL && target->packages != NULL && name != NULL); r = table_getrecord(target->packages, name, &oldchunk); if (RET_WAS_ERROR(r)) { return r; } else if (r == RET_NOTHING) { if (verbose >= 10) fprintf(stderr, "Could not find '%s' in '%s'...\n", name, target->identifier); return RET_NOTHING; } r = target_removereadpackage(target, logger, name, oldchunk, trackingdata); free(oldchunk); return r; } /* Like target_removepackage, but delete the package record by cursor */ retvalue target_removepackage_by_cursor(struct target_cursor *tc, struct logger *logger, struct trackingdata *trackingdata) { struct target * const target = tc->target; const char * const name = tc->lastname; const char * const control = tc->lastcontrol; char *oldpversion = NULL; struct strlist files; retvalue result, r; char *oldsource, *oldsversion; assert (target != NULL && target->packages != NULL); assert (name != NULL && control != NULL); if (logger != NULL) { /* need to get the version for logging, if not available */ r = target->getversion(control, &oldpversion); if (!RET_IS_OK(r)) oldpversion = NULL; } r = target->getfilekeys(control, &files); if (RET_WAS_ERROR(r)) { free(oldpversion); return r; } if (trackingdata != NULL) { r = target->getsourceandversion(control, name, &oldsource, &oldsversion); if (!RET_IS_OK(r)) { oldsource = oldsversion = NULL; } } else { oldsource = oldsversion = NULL; } if (verbose > 0) printf("removing '%s' from '%s'...\n", name, target->identifier); result = cursor_delete(target->packages, tc->cursor, tc->lastname, NULL); if (RET_IS_OK(result)) { target->wasmodified = true; if (oldsource != NULL && oldsversion != NULL) { r = trackingdata_remove(trackingdata, oldsource, oldsversion, &files); RET_UPDATE(result, r); } if (trackingdata == NULL) target->staletracking = true; if (logger != NULL) logger_log(logger, target, name, NULL, oldpversion, NULL, control, NULL, &files, NULL, NULL); r = references_delete(target->identifier, &files, NULL); RET_UPDATE(result, r); } strlist_done(&files); free(oldpversion); return result; } static retvalue addpackages(struct target *target, const char *packagename, const char *controlchunk, /*@null@*/const char *oldcontrolchunk, const char *version, /*@null@*/const char *oldversion, const struct strlist *files, /*@only@*//*@null@*/struct strlist *oldfiles, /*@null@*/struct logger *logger, /*@null@*/struct trackingdata *trackingdata, architecture_t architecture, /*@null@*//*@only@*/char *oldsource, /*@null@*//*@only@*/char *oldsversion, /*@null@*/const char *causingrule, /*@null@*/const char *suitefrom) { retvalue result, r; struct table *table = target->packages; enum filetype filetype; assert (atom_defined(architecture)); if (architecture == architecture_source) filetype = ft_SOURCE; else if (architecture == architecture_all) filetype = ft_ALL_BINARY; else filetype = ft_ARCH_BINARY; /* mark it as needed by this distribution */ r = references_insert(target->identifier, files, oldfiles); if (RET_WAS_ERROR(r)) { if (oldfiles != NULL) strlist_done(oldfiles); return r; } /* Add package to the distribution's database */ if (oldcontrolchunk != NULL) { result = table_replacerecord(table, packagename, controlchunk); } else { result = table_adduniqrecord(table, packagename, controlchunk); } if (RET_WAS_ERROR(result)) { if (oldfiles != NULL) strlist_done(oldfiles); return result; } if (logger != NULL) logger_log(logger, target, packagename, version, oldversion, controlchunk, oldcontrolchunk, files, oldfiles, causingrule, suitefrom); r = trackingdata_insert(trackingdata, filetype, files, oldsource, oldsversion, oldfiles); RET_UPDATE(result, r); /* remove old references to files */ if (oldfiles != NULL) { r = references_delete(target->identifier, oldfiles, files); RET_UPDATE(result, r); strlist_done(oldfiles); } return result; } retvalue target_addpackage(struct target *target, struct logger *logger, const char *name, const char *version, const char *control, const struct strlist *filekeys, bool downgrade, struct trackingdata *trackingdata, enum filetype filetype, const char *causingrule, const char *suitefrom) { struct strlist oldfilekeys, *ofk; char *oldcontrol, *oldsource, *oldsversion; char *oldpversion; retvalue r; assert(target->packages!=NULL); r = table_getrecord(target->packages, name, &oldcontrol); if (RET_WAS_ERROR(r)) return r; if (r == RET_NOTHING) { ofk = NULL; oldsource = NULL; oldsversion = NULL; oldpversion = NULL; oldcontrol = NULL; } else { r = target->getversion(oldcontrol, &oldpversion); if (RET_WAS_ERROR(r) && !IGNORING(brokenold, "Error parsing old version!\n")) { free(oldcontrol); return r; } if (RET_IS_OK(r)) { int versioncmp; r = dpkgversions_cmp(version, oldpversion, &versioncmp); if (RET_WAS_ERROR(r)) { if (!IGNORING(brokenversioncmp, "Parse errors processing versions of %s.\n", name)) { free(oldpversion); free(oldcontrol); return r; } } else { if (versioncmp <= 0) { /* new Version is not newer than * old version */ if (!downgrade) { fprintf(stderr, "Skipping inclusion of '%s' '%s' in '%s', as it has already '%s'.\n", name, version, target->identifier, oldpversion); free(oldpversion); free(oldcontrol); return RET_NOTHING; } else if (versioncmp < 0) { fprintf(stderr, "Warning: downgrading '%s' from '%s' to '%s' in '%s'!\n", name, oldpversion, version, target->identifier); } else { fprintf(stderr, "Warning: replacing '%s' version '%s' with equal version '%s' in '%s'!\n", name, oldpversion, version, target->identifier); } } } } else oldpversion = NULL; r = target->getfilekeys(oldcontrol, &oldfilekeys); ofk = &oldfilekeys; if (RET_WAS_ERROR(r)) { if (IGNORING(brokenold, "Error parsing files belonging to installed version of %s!\n", name)) { ofk = NULL; oldsversion = oldsource = NULL; } else { free(oldcontrol); free(oldpversion); return r; } } else if (trackingdata != NULL) { r = target->getsourceandversion(oldcontrol, name, &oldsource, &oldsversion); if (RET_WAS_ERROR(r)) { strlist_done(ofk); if (IGNORING(brokenold, "Error searching for source name of installed version of %s!\n", name)) { // TODO: free something of oldfilekeys? ofk = NULL; oldsversion = oldsource = NULL; } else { free(oldcontrol); free(oldpversion); return r; } } } else { oldsversion = oldsource = NULL; } } r = addpackages(target, name, control, oldcontrol, version, oldpversion, filekeys, ofk, logger, trackingdata, filetype, oldsource, oldsversion, causingrule, suitefrom); if (RET_IS_OK(r)) { target->wasmodified = true; if (trackingdata == NULL) target->staletracking = true; } free(oldpversion); free(oldcontrol); return r; } retvalue target_checkaddpackage(struct target *target, const char *name, const char *version, bool tracking, bool permitnewerold) { struct strlist oldfilekeys, *ofk; char *oldcontrol, *oldsource, *oldsversion; char *oldpversion; retvalue r; assert(target->packages!=NULL); r = table_getrecord(target->packages, name, &oldcontrol); if (RET_WAS_ERROR(r)) return r; if (r == RET_NOTHING) { ofk = NULL; oldsource = NULL; oldsversion = NULL; oldpversion = NULL; oldcontrol = NULL; } else { int versioncmp; r = target->getversion(oldcontrol, &oldpversion); if (RET_WAS_ERROR(r)) { fprintf(stderr, "Error extracting version from old '%s' in '%s'. Database corrupted?\n", name, target->identifier); free(oldcontrol); return r; } assert (RET_IS_OK(r)); r = dpkgversions_cmp(version, oldpversion, &versioncmp); if (RET_WAS_ERROR(r)) { fprintf(stderr, "Parse error comparing version '%s' of '%s' with old version '%s' in '%s'\n.", version, name, oldpversion, target->identifier); free(oldpversion); free(oldcontrol); return r; } if (versioncmp <= 0) { r = RET_NOTHING; if (versioncmp < 0) { if (!permitnewerold) { fprintf(stderr, "Error: trying to put version '%s' of '%s' in '%s',\n" "while there already is the stricly newer '%s' in there.\n" "(To ignore this error add Permit: older_version.)\n", name, version, target->identifier, oldpversion); r = RET_ERROR; } else if (verbose >= 0) { printf( "Warning: trying to put version '%s' of '%s' in '%s',\n" "while there already is '%s' in there.\n", name, version, target->identifier, oldpversion); } } else if (verbose > 2) { printf( "Will not put '%s' in '%s', as already there with same version '%s'.\n", name, target->identifier, oldpversion); } free(oldpversion); free(oldcontrol); return r; } r = target->getfilekeys(oldcontrol, &oldfilekeys); ofk = &oldfilekeys; if (RET_WAS_ERROR(r)) { fprintf(stderr, "Error extracting installed files from old '%s' in '%s'.\nDatabase corrupted?\n", name, target->identifier); free(oldcontrol); free(oldpversion); return r; } if (tracking) { r = target->getsourceandversion(oldcontrol, name, &oldsource, &oldsversion); if (RET_WAS_ERROR(r)) { fprintf(stderr, "Error extracting source name and version from '%s' in '%s'. Database corrupted?\n", name, target->identifier); strlist_done(ofk); free(oldcontrol); free(oldpversion); return r; } /* TODO: check if tracking would succeed */ free(oldsversion); free(oldsource); } strlist_done(ofk); } free(oldpversion); free(oldcontrol); return RET_OK; } retvalue target_rereference(struct target *target) { retvalue result, r; struct target_cursor iterator; const char *package, *control; if (verbose > 1) { if (verbose > 2) printf("Unlocking dependencies of %s...\n", target->identifier); else printf("Rereferencing %s...\n", target->identifier); } result = references_remove(target->identifier); if (verbose > 2) printf("Referencing %s...\n", target->identifier); r = target_openiterator(target, READONLY, &iterator); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; while (target_nextpackage(&iterator, &package, &control)) { struct strlist filekeys; r = target->getfilekeys(control, &filekeys); RET_UPDATE(result, r); if (!RET_IS_OK(r)) continue; if (verbose > 10) { fprintf(stderr, "adding references to '%s' for '%s': ", target->identifier, package); (void)strlist_fprint(stderr, &filekeys); (void)putc('\n', stderr); } r = references_insert(target->identifier, &filekeys, NULL); strlist_done(&filekeys); RET_UPDATE(result, r); } r = target_closeiterator(&iterator); RET_ENDUPDATE(result, r); return result; } retvalue package_referenceforsnapshot(UNUSED(struct distribution *di), struct target *target, const char *package, const char *chunk, void *data) { const char *identifier = data; struct strlist filekeys; retvalue r; r = target->getfilekeys(chunk, &filekeys); if (RET_WAS_ERROR(r)) return r; if (verbose > 15) { fprintf(stderr, "adding references to '%s' for '%s': ", identifier, package); (void)strlist_fprint(stderr, &filekeys); (void)putc('\n', stderr); } r = references_add(identifier, &filekeys); strlist_done(&filekeys); return r; } retvalue package_check(UNUSED(struct distribution *di), struct target *target, const char *package, const char *chunk, UNUSED(void *pd)) { struct checksumsarray files; struct strlist expectedfilekeys; char *dummy, *version; retvalue result = RET_OK, r; architecture_t package_architecture; r = target->getversion(chunk, &version); if (!RET_IS_OK(r)) { fprintf(stderr, "Error extraction version number from package control info of '%s'!\n", package); if (r == RET_NOTHING) r = RET_ERROR_MISSING; return r; } r = target->getarchitecture(chunk, &package_architecture); if (!RET_IS_OK(r)) { fprintf(stderr, "Error extraction architecture from package control info of '%s'!\n", package); if (r == RET_NOTHING) r = RET_ERROR_MISSING; return r; } /* check if the architecture matches the architecture where this * package belongs to. */ if (target->architecture != package_architecture && package_architecture != architecture_all) { fprintf(stderr, "Wrong architecture '%s' of package '%s' in '%s'!\n", atoms_architectures[package_architecture], package, target->identifier); result = RET_ERROR; } r = target->getinstalldata(target, package, version, package_architecture, chunk, &dummy, &expectedfilekeys, &files); if (RET_WAS_ERROR(r)) { fprintf(stderr, "Error extracting information of package '%s'!\n", package); result = r; } free(version); if (RET_IS_OK(r)) { free(dummy); if (!strlist_subset(&expectedfilekeys, &files.names, NULL) || !strlist_subset(&expectedfilekeys, &files.names, NULL)) { (void)fprintf(stderr, "Reparsing the package information of '%s' yields to the expectation to find:\n", package); (void)strlist_fprint(stderr, &expectedfilekeys); (void)fputs("but found:\n", stderr); (void)strlist_fprint(stderr, &files.names); (void)putc('\n', stderr); result = RET_ERROR; } strlist_done(&expectedfilekeys); } else { r = target->getchecksums(chunk, &files); if (r == RET_NOTHING) r = RET_ERROR; if (RET_WAS_ERROR(r)) { fprintf(stderr, "Even more errors extracting information of package '%s'!\n", package); return r; } } if (verbose > 10) { fprintf(stderr, "checking files of '%s'\n", package); } r = files_expectfiles(&files.names, files.checksums); if (RET_WAS_ERROR(r)) { fprintf(stderr, "Files are missing for '%s'!\n", package); } RET_UPDATE(result, r); if (verbose > 10) { (void)fprintf(stderr, "checking references to '%s' for '%s': ", target->identifier, package); (void)strlist_fprint(stderr, &files.names); (void)putc('\n', stderr); } r = references_check(target->identifier, &files.names); RET_UPDATE(result, r); checksumsarray_done(&files); return result; } /* Reapply override information */ retvalue target_reoverride(struct target *target, struct distribution *distribution) { struct target_cursor iterator; retvalue result, r; const char *package, *controlchunk; assert(target->packages == NULL); assert(distribution != NULL); if (verbose > 1) { fprintf(stderr, "Reapplying overrides packages in '%s'...\n", target->identifier); } r = target_openiterator(target, READWRITE, &iterator); if (!RET_IS_OK(r)) return r; result = RET_NOTHING; while (target_nextpackage(&iterator, &package, &controlchunk)) { char *newcontrolchunk = NULL; r = target->doreoverride(target, package, controlchunk, &newcontrolchunk); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) { if (verbose > 0) (void)fputs( "target_reoverride: Stopping procession of further packages due to previous errors\n", stderr); break; } if (RET_IS_OK(r)) { r = cursor_replace(target->packages, iterator.cursor, newcontrolchunk, strlen(newcontrolchunk)); free(newcontrolchunk); if (RET_WAS_ERROR(r)) { result = r; break; } target->wasmodified = true; } } r = target_closeiterator(&iterator); RET_ENDUPDATE(result, r); return result; } /* Readd checksum information */ static retvalue complete_package_checksums(struct target *target, const char *control, char **n) { struct checksumsarray files; retvalue r; r = target->getchecksums(control, &files); if (!RET_IS_OK(r)) return r; r = files_checkorimprove(&files.names, files.checksums); if (!RET_IS_OK(r)) { checksumsarray_done(&files); return r; } r = target->completechecksums(control, &files.names, files.checksums, n); checksumsarray_done(&files); return r; } retvalue target_redochecksums(struct target *target, struct distribution *distribution) { struct target_cursor iterator; retvalue result, r; const char *package, *controlchunk; assert(target->packages == NULL); assert(distribution != NULL); if (verbose > 1) { fprintf(stderr, "Redoing checksum information for packages in '%s'...\n", target->identifier); } r = target_openiterator(target, READWRITE, &iterator); if (!RET_IS_OK(r)) return r; result = RET_NOTHING; while (target_nextpackage(&iterator, &package, &controlchunk)) { char *newcontrolchunk = NULL; r = complete_package_checksums(target, controlchunk, &newcontrolchunk); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; if (RET_IS_OK(r)) { r = cursor_replace(target->packages, iterator.cursor, newcontrolchunk, strlen(newcontrolchunk)); free(newcontrolchunk); if (RET_WAS_ERROR(r)) { result = r; break; } target->wasmodified = true; } } r = target_closeiterator(&iterator); RET_ENDUPDATE(result, r); return result; } /* export a database */ retvalue target_export(struct target *target, bool onlyneeded, bool snapshot, struct release *release) { retvalue result; bool onlymissing; if (verbose > 5) { if (onlyneeded) printf(" looking for changes in '%s'...\n", target->identifier); else printf(" exporting '%s'...\n", target->identifier); } /* not exporting if file is already there? */ onlymissing = onlyneeded && !target->wasmodified; result = export_target(target->relativedirectory, target, target->exportmode, release, onlymissing, snapshot); if (!RET_WAS_ERROR(result) && !snapshot) { target->saved_wasmodified = target->saved_wasmodified || target->wasmodified; target->wasmodified = false; } return result; } retvalue package_rerunnotifiers(struct distribution *distribution, struct target *target, const char *package, const char *chunk, UNUSED(void *data)) { struct logger *logger = distribution->logger; struct strlist filekeys; char *version; retvalue r; r = target->getversion(chunk, &version); if (!RET_IS_OK(r)) { fprintf(stderr, "Error extraction version number from package control info of '%s'!\n", package); if (r == RET_NOTHING) r = RET_ERROR_MISSING; return r; } r = target->getfilekeys(chunk, &filekeys); if (RET_WAS_ERROR(r)) { fprintf(stderr, "Error extracting information about used files from package '%s'!\n", package); free(version); return r; } r = logger_reruninfo(logger, target, package, version, chunk, &filekeys); strlist_done(&filekeys); free(version); return r; } reprepro-4.13.1/diffindex.c0000644000175100017510000001412512152651661012475 00000000000000/* This file is part of "reprepro" * Copyright (C) 2003,2004,2005,2007 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include "error.h" #include "names.h" #include "chunks.h" #include "readtextfile.h" #include "checksums.h" #include "diffindex.h" void diffindex_free(struct diffindex *diffindex) { int i; if (diffindex == NULL) return; checksums_free(diffindex->destination); for (i = 0 ; i < diffindex->patchcount ; i ++) { checksums_free(diffindex->patches[i].frompackages); free(diffindex->patches[i].name); checksums_free(diffindex->patches[i].checksums); } free(diffindex); } static void parse_sha1line(const char *p, /*@out@*/struct hashes *hashes, /*@out@*/const char **rest) { setzero(struct hashes, hashes); hashes->hashes[cs_sha1sum].start = p; while ((*p >= 'a' && *p <= 'f') || (*p >= 'A' && *p <= 'F') || (*p >= '0' && *p <= '9')) p++; hashes->hashes[cs_sha1sum].len = p - hashes->hashes[cs_sha1sum].start; while (*p == ' ' || *p == '\t') p++; hashes->hashes[cs_length].start = p; while (*p >= '0' && *p <= '9') p++; hashes->hashes[cs_length].len = p - hashes->hashes[cs_length].start; while (*p == ' ' || *p == '\t') p++; *rest = p; } static inline retvalue add_current(const char *diffindexfile, struct diffindex *n, const char *current) { struct hashes hashes; const char *p; retvalue r; parse_sha1line(current, &hashes, &p); if (hashes.hashes[cs_sha1sum].len == 0 || hashes.hashes[cs_length].len == 0 || *p != '\0') { r = RET_ERROR; } else r = checksums_initialize(&n->destination, hashes.hashes); ASSERT_NOT_NOTHING(r); if (RET_WAS_ERROR(r)) fprintf(stderr, "Error parsing SHA1-Current in '%s'!\n", diffindexfile); return r; } static inline retvalue add_patches(const char *diffindexfile, struct diffindex *n, const struct strlist *patches) { int i; assert (patches->count == n->patchcount); for (i = 0 ; i < n->patchcount; i++) { struct hashes hashes; const char *patchname; retvalue r; parse_sha1line(patches->values[i], &hashes, &patchname); if (hashes.hashes[cs_sha1sum].len == 0 || hashes.hashes[cs_length].len == 0 || *patchname == '\0') { r = RET_ERROR; } else r = checksums_initialize(&n->patches[i].checksums, hashes.hashes); ASSERT_NOT_NOTHING(r); if (RET_WAS_ERROR(r)) { fprintf(stderr, "Error parsing SHA1-Patches line %d in '%s':!\n'%s'\n", i, diffindexfile, patches->values[i]); return r; } n->patches[i].name = strdup(patchname); if (FAILEDTOALLOC(n)) return RET_ERROR_OOM; } return RET_OK; } static inline retvalue add_history(const char *diffindexfile, struct diffindex *n, const struct strlist *history) { int i, j; for (i = 0 ; i < history->count ; i++) { struct hashes hashes; const char *patchname; struct checksums *checksums; retvalue r; parse_sha1line(history->values[i], &hashes, &patchname); if (hashes.hashes[cs_sha1sum].len == 0 || hashes.hashes[cs_length].len == 0 || *patchname == '\0') { r = RET_ERROR; } else r = checksums_initialize(&checksums, hashes.hashes); ASSERT_NOT_NOTHING(r); if (RET_WAS_ERROR(r)) { fprintf(stderr, "Error parsing SHA1-History line %d in '%s':!\n'%s'\n", i, diffindexfile, history->values[i]); return r; } j = 0; while (j < n->patchcount && strcmp(n->patches[j].name, patchname) != 0) j++; if (j >= n->patchcount) { fprintf(stderr, "'%s' lists '%s' in history but not in patches!\n", diffindexfile, patchname); checksums_free(checksums); continue; } if (n->patches[j].frompackages != NULL) { fprintf(stderr, "Warning: '%s' lists multiple histories for '%s'!\nOnly using last one!\n", diffindexfile, patchname); checksums_free(n->patches[j].frompackages); } n->patches[j].frompackages = checksums; } return RET_OK; } retvalue diffindex_read(const char *diffindexfile, struct diffindex **out_p) { retvalue r; char *chunk, *current; struct strlist history, patches; struct diffindex *n; r = readtextfile(diffindexfile, diffindexfile, &chunk, NULL); ASSERT_NOT_NOTHING(r); if (RET_WAS_ERROR(r)) return r; r = chunk_getextralinelist(chunk, "SHA1-History", &history); if (r == RET_NOTHING) { fprintf(stderr, "'%s' misses SHA1-History field\n", diffindexfile); r = RET_ERROR; } if (RET_WAS_ERROR(r)) { free(chunk); return r; } r = chunk_getextralinelist(chunk, "SHA1-Patches", &patches); if (r == RET_NOTHING) { fprintf(stderr, "'%s' misses SHA1-Patches field\n", diffindexfile); r = RET_ERROR; } if (RET_WAS_ERROR(r)) { free(chunk); strlist_done(&history); return r; } r = chunk_getvalue(chunk, "SHA1-Current", ¤t); free(chunk); if (r == RET_NOTHING) { fprintf(stderr, "'%s' misses SHA1-Current field\n", diffindexfile); r = RET_ERROR; } if (RET_WAS_ERROR(r)) { strlist_done(&history); strlist_done(&patches); return r; } n = calloc(1, sizeof(struct diffindex) + patches.count * sizeof(struct diffindex_patch)); if (FAILEDTOALLOC(n)) { strlist_done(&history); strlist_done(&patches); free(current); return r; } n->patchcount = patches.count; r = add_current(diffindexfile, n, current); if (RET_IS_OK(r)) r = add_patches(diffindexfile, n, &patches); if (RET_IS_OK(r)) r = add_history(diffindexfile, n, &history); ASSERT_NOT_NOTHING(r); strlist_done(&history); strlist_done(&patches); free(current); if (RET_IS_OK(r)) *out_p = n; else diffindex_free(n); return r; } reprepro-4.13.1/checkindsc.h0000644000175100017510000000203612152651661012636 00000000000000#ifndef REPREPRO_CHECKINDSC_H #define REPREPRO_CHECKINDSC_H #ifndef REPREPRO_ERROR_H #include "error.h" #warning "What's hapening here?" #endif #ifndef REPREPRO_DATABASE_H #include "database.h" #endif #ifndef REPREPRO_DISTRIBUTION_H #include "distribution.h" #endif #ifndef REPREPRO_SOURCES_H #include "sources.h" #endif /* insert the given .dsc into the mirror in in the * if component is NULL, guess it from the section. */ retvalue dsc_add(component_t, /*@null@*/const char * /*forcesection*/, /*@null@*/const char * /*forcepriority*/, struct distribution *, const char * /*dscfilename*/, int /*delete*/, /*@null@*/trackingdb); /* in two steps: * If basename, filekey and directory are != NULL, then they are used instead * of being newly calculated. * (And all files are expected to already be in the pool), * delete should be D_INPLACE then */ retvalue dsc_addprepared(const struct dsc_headers *, component_t, const struct strlist * /*filekeys*/, struct distribution *, /*@null@*/struct trackingdata *); #endif reprepro-4.13.1/printlistformat.h0000644000175100017510000000024712152651661014003 00000000000000#ifndef REPREPRO_PRINTLISTFORMAT #define REPREPRO_PRINTLISTFORMAT retvalue listformat_print(const char *, const struct target *, const char *, const char *); #endif reprepro-4.13.1/dpkgversions.h0000644000175100017510000000053512152651661013260 00000000000000#ifndef REPREPRO_DPKGVERSIONS_H #define REPREPRO_DPKGVERSIONS_H #ifndef REPREPRO_ERROR_H #include "error.h" #warning wth? #endif /* return error if those are not proper versions, * otherwise RET_OK and result is <0, ==0 or >0, if first is smaller, equal or larger */ retvalue dpkgversions_cmp(const char *, const char *, /*@out@*/int *); #endif reprepro-4.13.1/filterlist.h0000644000175100017510000000203512152651661012720 00000000000000#ifndef REPREPRO_FILTERLIST_H #define REPREPRO_FILTERLIST_H enum filterlisttype { /* must be 0, so it is the default, when there is no list */ flt_install = 0, flt_unchanged, /* special value used by the cmdline lists */ flt_auto_hold, /* special value used by the cmdline lists */ flt_purge, flt_warning, flt_deinstall, flt_hold, flt_supersede, flt_upgradeonly, flt_error }; struct filterlistfile; struct filterlist { size_t count; struct filterlistfile **files; /* to be used when not found */ enum filterlisttype defaulttype; /* true if this is loaded from config */ bool set; }; struct configiterator; retvalue filterlist_load(/*@out@*/struct filterlist *, struct configiterator *); void filterlist_release(struct filterlist *list); enum filterlisttype filterlist_find(const char *name, const char *version, const struct filterlist *); extern struct filterlist cmdline_bin_filter, cmdline_src_filter; retvalue filterlist_cmdline_add_pkg(bool, const char *); retvalue filterlist_cmdline_add_file(bool, const char *); #endif reprepro-4.13.1/printlistformat.c0000644000175100017510000001317712152651661014004 00000000000000/* This file is part of "reprepro" * Copyright (C) 2009 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include "error.h" #include "atoms.h" #include "chunks.h" #include "target.h" #include "distribution.h" #include "printlistformat.h" #include "dirs.h" retvalue listformat_print(const char *listformat, const struct target *target, const char *package, const char *control) { retvalue r; const char *p, *q; if (listformat == NULL) { char *version; r = target->getversion(control, &version); if (RET_IS_OK(r)) { printf( "%s: %s %s\n", target->identifier, package, version); free(version); } else { printf("Could not retrieve version from %s in %s\n", package, target->identifier); } return r; } /* try to produce the same output dpkg-query --show produces: */ for (p = listformat ; *p != '\0' ; p++) { long length; char *value; const char *v; if (*p == '\\') { p++; if (*p == '\0') break; switch (*p) { case 'n': putchar('\n'); break; case 't': putchar('\t'); break; case 'r': putchar('\r'); break; /* extension \0 produces zero byte * (useful for xargs -0) */ case '0': putchar('\0'); break; default: putchar(*p); } continue; } if (*p != '$' || p[1] != '{') { putchar(*p); continue; } p++; /* substitute veriable */ q = p; while (*q != '\0' && *q != '}' && *q != ';') q++; if (*q == '\0' || q == p) { putchar('$'); putchar('{'); continue; } if (q - p == 12 && strncasecmp(p, "{$identifier", 12) == 0) { value = NULL; v = target->identifier; } else if ( (q - p == 10 && strncasecmp(p, "{$basename", 10) == 0) || (q - p == 14 && strncasecmp(p, "{$fullfilename", 14) == 0) || (q - p == 9 && strncasecmp(p, "{$filekey", 9) == 0)) { struct strlist filekeys; r = target->getfilekeys(control, &filekeys); if (RET_WAS_ERROR(r)) return r; if (RET_IS_OK(r) && filekeys.count > 0) { if (q - p == 9) { /* filekey */ value = filekeys.values[0]; filekeys.values[0] = NULL; v = value; } else if (q - p == 10) { /* basename */ value = filekeys.values[0]; filekeys.values[0] = NULL; v = dirs_basename(value);; } else { /* fullfilename */ value = calc_dirconcat(global.basedir, filekeys.values[0]); if (FAILEDTOALLOC(value)) return RET_ERROR_OOM; v = value; } strlist_done(&filekeys); } else { value = NULL; v = ""; } } else if (q - p == 6 && strncasecmp(p, "{$type", 6) == 0) { value = NULL; v = atoms_packagetypes[target->packagetype]; } else if (q - p == 10 && strncasecmp(p, "{$codename", 10) == 0) { value = NULL; v = target->distribution->codename; } else if (q - p == 14 && strncasecmp(p, "{$architecture", 14) == 0) { value = NULL; v = atoms_architectures[target->architecture]; } else if (q - p == 11 && strncasecmp(p, "{$component", 11) == 0) { value = NULL; v = atoms_components[target->component]; } else if (q - p == 8 && strncasecmp(p, "{$source", 8) == 0) { char *dummy = NULL; r = target->getsourceandversion(control, package, &value, &dummy); if (RET_WAS_ERROR(r)) return r; if (RET_IS_OK(r)) { free(dummy); v = value; } else { value = NULL; v = ""; } } else if (q - p == 15 && strncasecmp(p, "{$sourceversion", 15) == 0) { char *dummy = NULL; r = target->getsourceandversion(control, package, &dummy, &value); if (RET_WAS_ERROR(r)) return r; if (RET_IS_OK(r)) { free(dummy); v = value; } else { value = NULL; v = ""; } } else if (q - p == 8 && strncasecmp(p, "{package", 8) == 0) { value = NULL; v = package; } else { char *variable = strndup(p + 1, q - (p + 1)); if (FAILEDTOALLOC(variable)) return RET_ERROR_OOM; r = chunk_getwholedata(control, variable, &value); free(variable); if (RET_WAS_ERROR(r)) return r; if (RET_IS_OK(r)) { v = value; while (*v != '\0' && xisspace(*v)) v++; } else { value = NULL; v = ""; } } if (*q == ';') { /* dpkg-query allows octal an hexadecimal, * so we do, too */ length = strtol(q + 1, (char**)&p, 0); if (*p != '}') { free(value); putchar('$'); putchar('{'); continue; } } else { p = q; length = 0; } /* as in dpkg-query, length 0 means unlimited */ if (length == 0) { fputs(v, stdout); } else { long value_length = strlen(v); if (length < 0) { length = -length; while (value_length < length) { putchar(' '); length--; } } if (value_length > length) { fwrite(v, length, 1, stdout); length = 0; } else if (value_length > 0) { fwrite(v, value_length, 1, stdout); length -= value_length; } while (length-- > 0) putchar(' '); } free(value); } return RET_OK; } reprepro-4.13.1/chunkedit.c0000644000175100017510000002513312152651661012514 00000000000000/* This file is part of "reprepro" * Copyright (C) 2006 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include "error.h" #include "chunkedit.h" #include "names.h" void cef_free(struct chunkeditfield *f) { while (f != NULL) { int i; struct chunkeditfield *p = f; f = f->next; for (i = 0 ; i < p->linecount ; i++) { free(p->lines[i].words); free(p->lines[i].wordlen); } free(p); } } struct chunkeditfield *cef_newfield(const char *field, enum cefaction action, enum cefwhen when, unsigned int linecount, struct chunkeditfield *next) { struct chunkeditfield *n; n = calloc(1, sizeof(struct chunkeditfield) + linecount * sizeof(struct cef_line)); if (FAILEDTOALLOC(n)) { cef_free(next); return NULL; } assert(field != NULL); n->field = field; n->len_field = strlen(field); n->action = action; n->when = when; n->linecount = linecount; n->next = next; return n; } void cef_setdatalen(struct chunkeditfield *cef, const char *data, size_t len) { assert (data != NULL || len == 0); assert (cef->len_all_data >= cef->len_data); cef->len_all_data -= cef->len_data; cef->len_all_data += len; cef->data = data; cef->len_data = len; cef->words = NULL; } void cef_setdata(struct chunkeditfield *cef, const char *data) { cef_setdatalen(cef, data, strlen(data)); } void cef_setwordlist(struct chunkeditfield *cef, const struct strlist *words) { int i; size_t len = 0; for (i = 0 ; i < words->count ; i++) { len += 1+strlen(words->values[i]); } if (len > 0) len--; assert (cef->len_all_data >= cef->len_data); cef->len_all_data -= cef->len_data; cef->len_all_data += len; cef->data = NULL; cef->len_data = len; cef->words = words; } retvalue cef_setline(struct chunkeditfield *cef, int line, int wordcount, ...) { va_list ap; int i; struct cef_line *l; const char *word; size_t len; assert (line < cef->linecount); assert (wordcount > 0); l = &cef->lines[line]; assert (l->wordcount == 0 && l->words == NULL && l->wordlen == NULL); l->wordcount = wordcount; l->words = nzNEW(wordcount, const char*); if (FAILEDTOALLOC(l->words)) return RET_ERROR_OOM; l->wordlen = nzNEW(wordcount, size_t); if (FAILEDTOALLOC(l->wordlen)) { free(l->words);l->words = NULL; return RET_ERROR_OOM; } va_start(ap, wordcount); len = 1; /* newline */ for (i = 0 ; i < wordcount; i++) { word = va_arg(ap, const char*); assert(word != NULL); l->words[i] = word; l->wordlen[i] = strlen(word); len += 1 + l->wordlen[i]; } word = va_arg(ap, const char*); assert (word == NULL); va_end(ap); cef->len_all_data += len; return RET_OK; } retvalue cef_setline2(struct chunkeditfield *cef, int line, const char *hash, size_t hashlen, const char *size, size_t sizelen, int wordcount, ...) { va_list ap; int i; struct cef_line *l; const char *word; size_t len; assert (line < cef->linecount); assert (wordcount >= 0); l = &cef->lines[line]; assert (l->wordcount == 0 && l->words == NULL && l->wordlen == NULL); l->wordcount = wordcount + 2; l->words = nzNEW(wordcount + 2, const char *); if (FAILEDTOALLOC(l->words)) return RET_ERROR_OOM; l->wordlen = nzNEW(wordcount + 2, size_t); if (FAILEDTOALLOC(l->wordlen)) { free(l->words); l->words = NULL; return RET_ERROR_OOM; } va_start(ap, wordcount); len = 1; /* newline */ l->words[0] = hash; l->wordlen[0] = hashlen; len += 1 + hashlen; l->words[1] = size; l->wordlen[1] = sizelen; len += 1 + sizelen; for (i = 0 ; i < wordcount; i++) { word = va_arg(ap, const char*); assert(word != NULL); l->words[i + 2] = word; l->wordlen[i + 2] = strlen(word); len += 1 + l->wordlen[i + 2]; } word = va_arg(ap, const char*); assert (word == NULL); va_end(ap); cef->len_all_data += len; return RET_OK; } static inline int findcef(const struct chunkeditfield *cef, const char *p, size_t len) { int result = 0; while (cef != NULL) { if (cef->len_field == len && strncasecmp(p, cef->field, len) == 0) { return result; } cef = cef->next; result++; } return -1; } retvalue chunk_edit(const char *chunk, char **result, size_t *rlen, const struct chunkeditfield *cefs) { size_t maxlen; int i, processed, count = 0; const struct chunkeditfield *cef; struct field { const struct chunkeditfield *cef; size_t startofs, endofs; /* next in original chunk */ int next; } *fields; const char *p, *q, *e; char *n; size_t len; maxlen = 1; /* a newline might get missed */ for (cef = cefs ; cef != NULL ; cef=cef->next) { maxlen += cef->len_field + cef->len_all_data + 3; /* ': \n' */ count ++; } fields = nzNEW(count, struct field); if (FAILEDTOALLOC(fields)) return RET_ERROR_OOM; i = 0; for (cef = cefs ; cef != NULL ; cef=cef->next) { assert (i < count); fields[i++].cef = cef; } assert (i == count); /* get rid of empty or strange lines at the beginning: */ while (*chunk == ' ' || *chunk == '\t') { while (*chunk != '\0' && *chunk != '\n') chunk++; if (*chunk == '\n') chunk++; } p = chunk; while (true) { q = p; while (*q != '\0' && *q != '\n' && *q != ':') q++; if (*q == '\0') break; if (*q == '\n') { /* header without colon? what kind of junk is this? */ q++; while (*q == ' ' || *q == '\t') { while (*q != '\0' && *q != '\n') q++; if (*q == '\n') q++; } if (p == chunk) chunk = q; p = q; continue; } i = findcef(cefs, p, q-p); /* find begin and end of data */ q++; while (*q == ' ') q++; e = q; while (*e != '\0' && *e != '\n') e++; while (e[0] == '\n' && (e[1] == ' ' || e[1] == '\t')) { e++; while (*e != '\0' && *e != '\n') e++; } if (i < 0) { /* not known, we'll have to copy it */ maxlen += 1+e-p; if (*e == '\0') break; p = e+1; continue; } if (fields[i].endofs == 0) { fields[i].startofs = p-chunk; fields[i].endofs = e-chunk; if (fields[i].cef->action == CEF_KEEP || fields[i].cef->action == CEF_ADDMISSED) maxlen += 1+e-q; } if (*e == '\0') break; p = e+1; } n = malloc(maxlen + 1); if (FAILEDTOALLOC(n)) { free(fields); return RET_ERROR_OOM; } len = 0; for (processed = 0; processed < count && fields[processed].cef->when == CEF_EARLY; processed++) { struct field *f = &fields[processed]; const struct chunkeditfield *ef = f->cef; if (ef->action == CEF_DELETE) continue; if (ef->action == CEF_REPLACE && f->endofs == 0) continue; if (f->endofs != 0 && (ef->action == CEF_KEEP || ef->action == CEF_ADDMISSED)) { size_t l = f->endofs - f->startofs; assert (maxlen >= len + l); memcpy(n+len, chunk + f->startofs, l); len +=l; n[len++] = '\n'; continue; } if (ef->action == CEF_KEEP) continue; assert (maxlen >= len+ 3+ ef->len_field); memcpy(n+len, ef->field, ef->len_field); len += ef->len_field; n[len++] = ':'; n[len++] = ' '; if (ef->data != NULL) { assert (maxlen >= len+1+ef->len_data); memcpy(n+len, ef->data, ef->len_data); len += ef->len_data; } else if (ef->words != NULL) { int j; for (j = 0 ; j < ef->words->count ; j++) { const char *v = ef->words->values[j]; size_t l = strlen(v); if (j > 0) n[len++] = ' '; memcpy(n+len, v, l); len += l; } } for (i = 0 ; i < ef->linecount ; i++) { int j; n[len++] = '\n'; for (j = 0 ; j < ef->lines[i].wordcount ; j++) { n[len++] = ' '; memcpy(n+len, ef->lines[i].words[j], ef->lines[i].wordlen[j]); len += ef->lines[i].wordlen[j]; } } assert(maxlen > len); n[len++] = '\n'; } p = chunk; /* now add all headers in between */ while (true) { q = p; while (*q != '\0' && *q != '\n' && *q != ':') q++; if (*q == '\0') break; if (*q == '\n') { /* header without colon? what kind of junk is this? */ q++; while (*q == ' ' || *q == '\t') { while (*q != '\0' && *q != '\n') q++; if (*q == '\n') q++; } p = q; continue; } i = findcef(cefs, p, q-p); /* find begin and end of data */ q++; while (*q == ' ') q++; e = q; while (*e != '\0' && *e != '\n') e++; while (e[0] == '\n' && (e[1] == ' ' || e[1] == '\t')) { e++; while (*e != '\0' && *e != '\n') e++; } if (i < 0) { /* not known, copy it */ size_t l = e - p; assert (maxlen >= len + l); memcpy(n+len, p, l); len += l; n[len++] = '\n'; if (*e == '\0') break; p = e+1; continue; } if (*e == '\0') break; p = e+1; } for (; processed < count ; processed++) { struct field *f = &fields[processed]; const struct chunkeditfield *ef = f->cef; if (ef->action == CEF_DELETE) continue; if (ef->action == CEF_REPLACE && f->endofs == 0) continue; if (f->endofs != 0 && (ef->action == CEF_KEEP || ef->action == CEF_ADDMISSED)) { size_t l = f->endofs - f->startofs; assert (maxlen >= len + l); memcpy(n+len, chunk + f->startofs, l); len +=l; n[len++] = '\n'; continue; } if (ef->action == CEF_KEEP) continue; assert (maxlen >= len+ 3+ ef->len_field); memcpy(n+len, ef->field, ef->len_field); len += ef->len_field; n[len++] = ':'; n[len++] = ' '; if (ef->data != NULL) { assert (maxlen >= len+1+ef->len_data); memcpy(n+len, ef->data, ef->len_data); len += ef->len_data; } else if (ef->words != NULL) { int j; for (j = 0 ; j < ef->words->count ; j++) { const char *v = ef->words->values[j]; size_t l = strlen(v); if (j > 0) n[len++] = ' '; memcpy(n+len, v, l); len += l; } } for (i = 0 ; i < ef->linecount ; i++) { int j; n[len++] = '\n'; for (j = 0 ; j < ef->lines[i].wordcount ; j++) { n[len++] = ' '; memcpy(n+len, ef->lines[i].words[j], ef->lines[i].wordlen[j]); len += ef->lines[i].wordlen[j]; } } assert(maxlen > len); n[len++] = '\n'; } assert(maxlen >= len); n[len] = '\0'; free(fields); *result = realloc(n, len+1); if (*result == NULL) *result = n; *rlen = len; return RET_OK; } reprepro-4.13.1/descriptions.c0000644000175100017510000001257612152651661013253 00000000000000/* This file is part of "reprepro" * Copyright (C) 2012 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include "error.h" #include "ignore.h" #include "strlist.h" #include "chunks.h" #include "files.h" #include "debfile.h" #include "binaries.h" #include "descriptions.h" #include "md5.h" /* get the description from a .(u)deb file */ static retvalue description_from_package(const char *control, char **description_p) { struct strlist filekeys; char *filename; char *deb_control; retvalue r; r = binaries_getfilekeys(control, &filekeys); if (r == RET_NOTHING) r = RET_ERROR; if (RET_WAS_ERROR(r)) return r; if (filekeys.count != 1) { fprintf(stderr, "Strange number of files for binary package: %d\n", filekeys.count); strlist_done(&filekeys); return RET_ERROR; } filename = files_calcfullfilename(filekeys.values[0]); strlist_done(&filekeys); if (FAILEDTOALLOC(filename)) return RET_ERROR_OOM; r = extractcontrol(&deb_control, filename); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) { free(filename); return r; } r = chunk_getwholedata(deb_control, "Description", description_p); if (r == RET_NOTHING) { fprintf(stderr, "Not found any Description within file '%s'!\n", filename); } free(filename); free(deb_control); return r; } static const char tab[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; /* This only matches the official one if the description is well-formed enough. * If it has less or more leading spaces or anything else our reading has stripped, * it will not match.... */ static void description_genmd5(const char *description, /*@out@*/ char *d, size_t len) { struct MD5Context context; unsigned char md5buffer[MD5_DIGEST_SIZE]; int i; assert (len == 2*MD5_DIGEST_SIZE + 1); MD5Init(&context); MD5Update(&context, (const unsigned char*)description, strlen(description)); MD5Update(&context, (const unsigned char*)"\n", 1); MD5Final(md5buffer, &context); for (i=0 ; i < MD5_DIGEST_SIZE ; i++) { *(d++) = tab[md5buffer[i] >> 4]; *(d++) = tab[md5buffer[i] & 0xF]; } *d = '\0'; } retvalue description_complete(const char *package, const char *control, bool force, char **control_p) { char *description, *description_md5, *deb_description, *newcontrol; struct fieldtoadd *todo; size_t dlen; retvalue r; r = chunk_getwholedata(control, "Description", &description); if (RET_WAS_ERROR(r)) return r; if (r == RET_NOTHING) { fprintf(stderr, "Strange control data for '%s': no Description at all\n", package); return RET_ERROR; } if (strchr(description, '\n') != NULL) { /* there already is a long description, nothing to do */ free(description); return RET_NOTHING; } dlen = strlen(description); r = chunk_getwholedata(control, "Description-md5", &description_md5); if (RET_WAS_ERROR(r)) { free(description); return r; } if (r == RET_NOTHING) { /* only short description and no -md5? try to complete anyway... */ description_md5 = NULL; } r = description_from_package(control, &deb_description); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) { fprintf(stderr, "Cannot retrieve long description for package '%s' out of package's files!\n", package); free(description); free(description_md5); /* not finding the .deb file is not fatal */ return RET_NOTHING; } /* check if the existing short description matches the found one */ if (strncmp(description, deb_description, dlen) != 0) { fprintf(stderr, "Short Description of package '%s' does not match\n" "the start of the long descriptiongfound in the .deb\n", package); if (!force) { free(description); free(description_md5); free(deb_description); /* not fatal, only not processed */ return RET_NOTHING; } } free(description); /* check if Description-md5 matches */ if (description_md5 != NULL) { char found[2 * MD5_DIGEST_SIZE + 1]; description_genmd5(deb_description, found, sizeof(found)); if (strcmp(found, description_md5) != 0) { fprintf(stderr, "Description-md5 of package '%s' does not match\n" "the md5 of the description found in the .deb\n" "('%s' != '%s')!\n", package, description_md5, found); if (!force) { free(description_md5); /* not fatal, only not processed */ free(deb_description); return RET_NOTHING; } } free(description_md5); } todo = deletefield_new("Description-md5", NULL); if (!FAILEDTOALLOC(todo)) todo = addfield_new("Description", deb_description, todo); newcontrol = chunk_replacefields(control, todo, "Description", false); addfield_free(todo); free(deb_description); if (FAILEDTOALLOC(newcontrol)) return RET_ERROR_OOM; *control_p = newcontrol; return RET_OK; } reprepro-4.13.1/tool.c0000644000175100017510000024123312152651661011514 00000000000000/* This file is part of "reprepro" * Copyright (C) 2006,2007,2008,2009,2010 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include "error.h" #include "filecntl.h" #include "mprintf.h" #include "strlist.h" #include "names.h" #include "dirs.h" #include "checksums.h" #include "chunks.h" #include "chunkedit.h" #include "signature.h" #include "debfile.h" #include "sourceextraction.h" #include "uncompression.h" /* for compatibility with used code */ int verbose=0; bool interrupted(void) { return false; } static void about(bool help) NORETURN; static void about(bool help) { fprintf(help?stdout:stderr, "changestool: Modify a Debian style .changes file\n" "Syntax: changestool [--create] \n" "Possible commands include:\n" " verify\n" " updatechecksums []\n" " includeallsources []\n" " adddeb <.deb filenames>\n" " adddsc <.dsc filenames>\n" " addrawfile \n" " add \n" " setdistribution \n" " dumbremove \n" ); if (help) exit(EXIT_SUCCESS); else exit(EXIT_FAILURE); } struct binaryfile { struct binaryfile *next; // in binaries.files list struct binary *binary; // parent struct fileentry *file; char *controlchunk; char *name, *version, *architecture; char *sourcename, *sourceversion; char *maintainer; char *section, *priority; char *shortdescription; bool hasmd5sums; }; static void binaryfile_free(struct binaryfile *p) { if (p == NULL) return; free(p->controlchunk); free(p->name); free(p->version); free(p->architecture); free(p->sourcename); free(p->sourceversion); free(p->maintainer); free(p->section); free(p->priority); free(p->shortdescription); free(p); } enum filetype { ft_UNKNOWN, ft_TAR, ft_ORIG_TAR, ft_DIFF, #define ft_MaxInSource ft_DSC-1 ft_DSC, ft_DEB, ft_UDEB , ft_Count}; #define ft_Max ft_Count-1 static const struct { const char *suffix; size_t len; bool allowcompressed; } typesuffix[ft_Count] = { { "?", -1, false}, { ".tar", 4, true}, { ".orig.tar", 9, true}, { ".diff", 5, true}, { ".dsc", 4, false}, { ".deb", 4, false}, { ".udeb", 5, false} }; struct dscfile { struct fileentry *file; char *name; char *version; struct strlist binaries; char *maintainer; char *controlchunk; // hard to get: char *section, *priority; // TODO: check Architectures? struct checksumsarray expected; struct fileentry **uplink; bool parsed, modified; }; static void dscfile_free(struct dscfile *p) { if (p == NULL) return; free(p->name); free(p->version); free(p->maintainer); free(p->controlchunk); free(p->section); free(p->priority); checksumsarray_done(&p->expected); free(p->uplink); free(p); } struct fileentry { struct fileentry *next; char *basename; size_t namelen; char *fullfilename; /* NULL means was not listed there yet: */ struct checksums *checksumsfromchanges, *realchecksums; char *section, *priority; enum filetype type; enum compression compression; /* only if type deb or udeb */ struct binaryfile *deb; /* only if type dsc */ struct dscfile *dsc; int refcount; }; struct changes; static struct fileentry *add_fileentry(struct changes *c, const char *basefilename, size_t len, bool source, /*@null@*//*@out@*/size_t *ofs_p); struct changes { /* the filename of the .changes file */ char *filename; /* directory of filename */ char *basedir; /* Contents of the .changes file: */ char *name; char *version; char *maintainer; char *control; struct strlist architectures; struct strlist distributions; size_t binarycount; struct binary { char *name; char *description; struct binaryfile *files; bool missedinheader, uncheckable; } *binaries; struct fileentry *files; bool modified; }; static void fileentry_free(/*@only@*/struct fileentry *f) { if (f == NULL) return; free(f->basename); free(f->fullfilename); checksums_free(f->checksumsfromchanges); checksums_free(f->realchecksums); free(f->section); free(f->priority); if (f->type == ft_DEB || f->type == ft_UDEB) { binaryfile_free(f->deb); } else if (f->type == ft_DSC) { dscfile_free(f->dsc); } free(f); } static void changes_free(struct changes *c) { unsigned int i; if (c == NULL) return; free(c->filename); free(c->basedir); free(c->name); free(c->version); free(c->maintainer); free(c->control); strlist_done(&c->architectures); strlist_done(&c->distributions); for (i = 0 ; i < c->binarycount ; i++) { free(c->binaries[i].name); free(c->binaries[i].description); // .files belongs elsewhere } free(c->binaries); while (c->files) { struct fileentry *f = c->files; c->files = f->next; fileentry_free(f); } free(c); } static struct fileentry **find_fileentry(struct changes *c, const char *basefilename, size_t basenamelen, size_t *ofs_p) { struct fileentry **fp = &c->files; struct fileentry *f; size_t ofs = 0; while ((f=*fp) != NULL) { if (f->namelen == basenamelen && strncmp(basefilename, f->basename, basenamelen) == 0) { break; } fp = &f->next; ofs++; } if (ofs_p != NULL) *ofs_p = ofs; return fp; } static struct fileentry *add_fileentry(struct changes *c, const char *basefilename, size_t len, bool source, size_t *ofs_p) { size_t ofs = 0; struct fileentry **fp = find_fileentry(c, basefilename, len, &ofs); struct fileentry *f = *fp; if (f == NULL) { enum compression; f = zNEW(struct fileentry); if (FAILEDTOALLOC(f)) return NULL; f->basename = strndup(basefilename, len); f->namelen = len; if (FAILEDTOALLOC(f->basename)) { free(f); return NULL; } *fp = f; /* guess compression */ f->compression = compression_by_suffix(f->basename, &len); /* guess type */ for (f->type = source?ft_MaxInSource:ft_Max ; f->type > ft_UNKNOWN ; f->type--) { size_t l = typesuffix[f->type].len; if (f->compression != c_none && !typesuffix[f->type].allowcompressed) continue; if (len <= l) continue; if (strncmp(f->basename + (len-l), typesuffix[f->type].suffix, l) == 0) break; } } if (ofs_p != NULL) *ofs_p = ofs; return f; } static retvalue searchforfile(const char *changesdir, const char *basefilename, /*@null@*/const struct strlist *searchpath, /*@null@*/const char *searchfirstin, char **result) { int i; bool found; char *fullname; if (searchfirstin != NULL) { fullname = calc_dirconcat(searchfirstin, basefilename); if (FAILEDTOALLOC(fullname)) return RET_ERROR_OOM; if (isregularfile(fullname)) { *result = fullname; return RET_OK; } free(fullname); } fullname = calc_dirconcat(changesdir, basefilename); if (FAILEDTOALLOC(fullname)) return RET_ERROR_OOM; found = isregularfile(fullname); i = 0; while (!found && searchpath != NULL && i < searchpath->count) { free(fullname); fullname = calc_dirconcat(searchpath->values[i], basefilename); if (FAILEDTOALLOC(fullname)) return RET_ERROR_OOM; if (isregularfile(fullname)) { found = true; break; } i++; } if (found) { *result = fullname; return RET_OK; } else { free(fullname); return RET_NOTHING; } } static retvalue findfile(const char *filename, const struct changes *c, /*@null@*/const struct strlist *searchpath, /*@null@*/const char *searchfirstin, char **result) { char *fullfilename; if (rindex(filename, '/') == NULL) { retvalue r; r = searchforfile(c->basedir, filename, searchpath, searchfirstin, &fullfilename); if (!RET_IS_OK(r)) return r; } else { if (!isregularfile(filename)) return RET_NOTHING; fullfilename = strdup(filename); if (FAILEDTOALLOC(fullfilename)) return RET_ERROR_OOM; } *result = fullfilename; return RET_OK; } static retvalue add_file(struct changes *c, /*@only@*/char *basefilename, /*@only@*/char *fullfilename, enum filetype type, struct fileentry **file) { size_t basenamelen = strlen(basefilename); struct fileentry **fp; struct fileentry *f; fp = find_fileentry(c, basefilename, basenamelen, NULL); f = *fp; if (f != NULL) { *file = f; free(basefilename); free(fullfilename); return RET_NOTHING; } assert (f == NULL); f = zNEW(struct fileentry); if (FAILEDTOALLOC(f)) { free(basefilename); free(fullfilename); return RET_ERROR_OOM; } f->basename = basefilename; f->namelen = basenamelen; f->fullfilename = fullfilename; f->type = type; f->compression = c_none; *fp = f; *file = f; return RET_OK; } static struct binary *get_binary(struct changes *c, const char *p, size_t len) { unsigned int j; for (j = 0 ; j < c->binarycount ; j++) { if (strncmp(c->binaries[j].name, p, len) == 0 && c->binaries[j].name[len] == '\0') break; } if (j == c->binarycount) { char *name = strndup(p, len); struct binary *n; if (FAILEDTOALLOC(name)) return NULL; n = realloc(c->binaries, (j+1)*sizeof(struct binary)); if (FAILEDTOALLOC(n)) { free(name); return NULL; } c->binaries = n; c->binarycount = j+1; c->binaries[j].name = name; c->binaries[j].description = NULL; c->binaries[j].files = NULL; c->binaries[j].missedinheader = true; c->binaries[j].uncheckable = false; } assert (j < c->binarycount); return &c->binaries[j]; } static retvalue parse_changes_description(struct changes *c, struct strlist *tmp) { int i; for (i = 0 ; i < tmp->count ; i++) { struct binary *b; const char *p = tmp->values[i]; const char *e = p; const char *d; while (*e != '\0' && *e != ' ' && *e != '\t') e++; d = e; while (*d == ' ' || *d == '\t') d++; if (*d == '-') d++; while (*d == ' ' || *d == '\t') d++; b = get_binary(c, p, e-p); if (FAILEDTOALLOC(b)) return RET_ERROR_OOM; b->description = strdup(d); if (FAILEDTOALLOC(b->description)) return RET_ERROR_OOM; } return RET_OK; } static retvalue parse_changes_files(struct changes *c, struct strlist filelines[cs_hashCOUNT]) { int i; struct fileentry *f; retvalue r; struct hashes *hashes; struct strlist *tmp; size_t ofs, count = 0; enum checksumtype cs; tmp = &filelines[cs_md5sum]; hashes = nzNEW(tmp->count, struct hashes); if (FAILEDTOALLOC(hashes)) return RET_ERROR_OOM; for (i = 0 ; i < tmp->count ; i++) { char *p; const char *md5start, *md5end, *sizestart, *sizeend, *sectionstart, *sectionend, *priostart, *prioend, *filestart, *fileend; p = tmp->values[i]; #undef xisspace #define xisspace(c) (c == ' ' || c == '\t') while (*p !='\0' && xisspace(*p)) p++; md5start = p; while ((*p >= '0' && *p <= '9') || (*p >= 'A' && *p <= 'F') || (*p >= 'a' && *p <= 'f')) { if (*p >= 'A' && *p <= 'F') (*p) += 'a' - 'A'; p++; } md5end = p; while (*p !='\0' && !xisspace(*p)) p++; while (*p !='\0' && xisspace(*p)) p++; while (*p == '0' && ('0' <= p[1] && p[1] <= '9')) p++; sizestart = p; while ((*p >= '0' && *p <= '9')) p++; sizeend = p; while (*p !='\0' && !xisspace(*p)) p++; while (*p !='\0' && xisspace(*p)) p++; sectionstart = p; while (*p !='\0' && !xisspace(*p)) p++; sectionend = p; while (*p !='\0' && xisspace(*p)) p++; priostart = p; while (*p !='\0' && !xisspace(*p)) p++; prioend = p; while (*p !='\0' && xisspace(*p)) p++; filestart = p; while (*p !='\0' && !xisspace(*p)) p++; fileend = p; while (*p !='\0' && xisspace(*p)) p++; if (*p != '\0') { fprintf(stderr, "Unexpected sixth argument in '%s'!\n", tmp->values[i]); free(hashes); return RET_ERROR; } if (fileend - filestart == 0) continue; f = add_fileentry(c, filestart, fileend-filestart, false, &ofs); assert (ofs <= count); if (ofs == count) count++; if (hashes[ofs].hashes[cs_md5sum].start != NULL) { fprintf(stderr, "WARNING: Multiple occourance of '%s' in .changes file!\nIgnoring all but the first one.\n", f->basename); continue; } hashes[ofs].hashes[cs_md5sum].start = md5start; hashes[ofs].hashes[cs_md5sum].len = md5end - md5start; hashes[ofs].hashes[cs_length].start = sizestart; hashes[ofs].hashes[cs_length].len = sizeend - sizestart; if (sectionend - sectionstart == 1 && *sectionstart == '-') { f->section = NULL; } else { f->section = strndup(sectionstart, sectionend - sectionstart); if (FAILEDTOALLOC(f->section)) return RET_ERROR_OOM; } if (prioend - priostart == 1 && *priostart == '-') { f->priority = NULL; } else { f->priority = strndup(priostart, prioend - priostart); if (FAILEDTOALLOC(f->priority)) return RET_ERROR_OOM; } } const char * const hashname[cs_hashCOUNT] = {"Md5", "Sha1", "Sha256" }; for (cs = cs_firstEXTENDED ; cs < cs_hashCOUNT ; cs++) { tmp = &filelines[cs]; for (i = 0 ; i < tmp->count ; i++) { char *p; const char *hashstart, *hashend, *sizestart, *sizeend, *filestart, *fileend; p = tmp->values[i]; while (*p !='\0' && xisspace(*p)) p++; hashstart = p; while ((*p >= '0' && *p <= '9') || (*p >= 'A' && *p <= 'F') || (*p >= 'a' && *p <= 'f') ) { if (*p >= 'A' && *p <= 'F') (*p) += 'a' - 'A'; p++; } hashend = p; while (*p !='\0' && !xisspace(*p)) p++; while (*p !='\0' && xisspace(*p)) p++; while (*p == '0' && ('0' <= p[1] && p[1] <= '9')) p++; sizestart = p; while ((*p >= '0' && *p <= '9')) p++; sizeend = p; while (*p !='\0' && !xisspace(*p)) p++; while (*p !='\0' && xisspace(*p)) p++; filestart = p; while (*p !='\0' && !xisspace(*p)) p++; fileend = p; while (*p !='\0' && xisspace(*p)) p++; if (*p != '\0') { fprintf(stderr, "Unexpected forth argument in '%s'!\n", tmp->values[i]); return RET_ERROR; } if (fileend - filestart == 0) continue; f = add_fileentry(c, filestart, fileend-filestart, false, &ofs); assert (ofs <= count); // until md5sums are no longer obligatory: if (ofs == count) continue; if (hashes[ofs].hashes[cs].start != NULL) { fprintf(stderr, "WARNING: Multiple occourance of '%s' in Checksums-'%s' of .changes file!\n" "Ignoring all but the first one.\n", f->basename, hashname[cs]); continue; } hashes[ofs].hashes[cs].start = hashstart; hashes[ofs].hashes[cs].len = hashend - hashstart; // TODO: compare instead: // hashes[ofs].hashes[cs_length].start = sizestart; // hashes[ofs].hashes[cs_length].len = sizeend - sizestart; } } ofs = 0; for (f = c->files ; f != NULL ; f = f->next, ofs++) { r = checksums_initialize(&f->checksumsfromchanges, hashes[ofs].hashes); if (RET_WAS_ERROR(r)) return r; } assert (count == ofs); free(hashes); return RET_OK; } static retvalue read_dscfile(const char *fullfilename, struct dscfile **dsc) { struct dscfile *n; struct strlist filelines[cs_hashCOUNT]; enum checksumtype cs; retvalue r; n = zNEW(struct dscfile); if (FAILEDTOALLOC(n)) return RET_ERROR_OOM; r = signature_readsignedchunk(fullfilename, fullfilename, &n->controlchunk, NULL, NULL); assert (r != RET_NOTHING); // TODO: can this be ignored sometimes? if (RET_WAS_ERROR(r)) { free(n); return r; } r = chunk_getname(n->controlchunk, "Source", &n->name, false); if (RET_WAS_ERROR(r)) { dscfile_free(n); return r; } r = chunk_getvalue(n->controlchunk, "Maintainer", &n->maintainer); if (RET_WAS_ERROR(r)) { dscfile_free(n); return r; } r = chunk_getvalue(n->controlchunk, "Version", &n->version); if (RET_WAS_ERROR(r)) { dscfile_free(n); return r; } /* unusally not here, but hidden in the contents */ r = chunk_getvalue(n->controlchunk, "Section", &n->section); if (RET_WAS_ERROR(r)) { dscfile_free(n); return r; } /* dito */ r = chunk_getvalue(n->controlchunk, "Priority", &n->priority); if (RET_WAS_ERROR(r)) { dscfile_free(n); return r; } for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) { assert (source_checksum_names[cs] != NULL); r = chunk_getextralinelist(n->controlchunk, source_checksum_names[cs], &filelines[cs]); if (r == RET_NOTHING) { if (cs == cs_md5sum) { fprintf(stderr, "Error: Missing 'Files' entry in '%s'!\n", fullfilename); r = RET_ERROR; } strlist_init(&filelines[cs]); } if (RET_WAS_ERROR(r)) { while (cs-- > cs_md5sum) { strlist_done(&filelines[cs]); } dscfile_free(n); return r; } } r = checksumsarray_parse(&n->expected, filelines, fullfilename); for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) { strlist_done(&filelines[cs]); } if (RET_WAS_ERROR(r)) { dscfile_free(n); return r; } if (n->expected.names.count > 0) { n->uplink = nzNEW(n->expected.names.count, struct fileentry *); if (FAILEDTOALLOC(n->uplink)) { dscfile_free(n); return RET_ERROR_OOM; } } *dsc = n; return RET_OK; } static retvalue parse_dsc(struct fileentry *dscfile, struct changes *changes) { struct dscfile *n; retvalue r; int i; if (dscfile->fullfilename == NULL) return RET_NOTHING; r = read_dscfile(dscfile->fullfilename, &n); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; for (i = 0 ; i < n->expected.names.count ; i++) { const char *basefilename = n->expected.names.values[i]; n->uplink[i] = add_fileentry(changes, basefilename, strlen(basefilename), true, NULL); if (FAILEDTOALLOC(n->uplink[i])) { dscfile_free(n); return RET_ERROR_OOM; } } dscfile->dsc = n; return RET_OK; } #define DSC_WRITE_FILES 1 #define DSC_WRITE_ALL 0xFFFF #define flagset(a) (flags & a) != 0 static retvalue write_dsc_file(struct fileentry *dscfile, unsigned int flags) { struct dscfile *dsc = dscfile->dsc; int i; struct chunkeditfield *cef; retvalue r; char *control; size_t controllen; struct checksums *checksums; char *destfilename; enum checksumtype cs; if (flagset(DSC_WRITE_FILES)) { cef = NULL; for (cs = cs_hashCOUNT ; (cs--) > cs_md5sum ; ) { cef = cef_newfield(source_checksum_names[cs], CEF_ADD, CEF_LATE, dsc->expected.names.count, cef); if (FAILEDTOALLOC(cef)) return RET_ERROR_OOM; for (i = 0 ; i < dsc->expected.names.count ; i++) { const char *basefilename = dsc->expected.names.values[i]; const char *hash, *size; size_t hashlen, sizelen; if (!checksums_gethashpart(dsc->expected.checksums[i], cs, &hash, &hashlen, &size, &sizelen)) { assert (cs != cs_md5sum); cef = cef_pop(cef); break; } cef_setline2(cef, i, hash, hashlen, size, sizelen, 1, basefilename, NULL); } } } else cef = NULL; r = chunk_edit(dsc->controlchunk, &control, &controllen, cef); cef_free(cef); if (RET_WAS_ERROR(r)) return r; assert (RET_IS_OK(r)); // TODO: try to add the signatures to it again... // TODO: add options to place changed files in different directory... if (dscfile->fullfilename != NULL) destfilename = strdup(dscfile->fullfilename); else destfilename = strdup(dscfile->basename); if (FAILEDTOALLOC(destfilename)) { free(control); return RET_ERROR_OOM; } r = checksums_replace(destfilename, control, controllen, &checksums); if (RET_WAS_ERROR(r)) { free(destfilename); free(control); return r; } assert (RET_IS_OK(r)); free(dscfile->fullfilename); dscfile->fullfilename = destfilename; checksums_free(dscfile->realchecksums); dscfile->realchecksums = checksums; free(dsc->controlchunk); dsc->controlchunk = control; return RET_OK; } static retvalue read_binaryfile(const char *fullfilename, struct binaryfile **result) { retvalue r; struct binaryfile *n; n = zNEW(struct binaryfile); if (FAILEDTOALLOC(n)) return RET_ERROR_OOM; r = extractcontrol(&n->controlchunk, fullfilename); if (!RET_IS_OK(r)) { free(n); if (r == RET_ERROR_OOM) return r; else return RET_NOTHING; } r = chunk_getname(n->controlchunk, "Package", &n->name, false); if (RET_WAS_ERROR(r)) { binaryfile_free(n); return r; } r = chunk_getvalue(n->controlchunk, "Version", &n->version); if (RET_WAS_ERROR(r)) { binaryfile_free(n); return r; } r = chunk_getnameandversion(n->controlchunk, "Source", &n->sourcename, &n->sourceversion); if (RET_WAS_ERROR(r)) { binaryfile_free(n); return r; } r = chunk_getvalue(n->controlchunk, "Maintainer", &n->maintainer); if (RET_WAS_ERROR(r)) { binaryfile_free(n); return r; } r = chunk_getvalue(n->controlchunk, "Architecture", &n->architecture); if (RET_WAS_ERROR(r)) { binaryfile_free(n); return r; } r = chunk_getvalue(n->controlchunk, "Section", &n->section); if (RET_WAS_ERROR(r)) { binaryfile_free(n); return r; } r = chunk_getvalue(n->controlchunk, "Priority", &n->priority); if (RET_WAS_ERROR(r)) { binaryfile_free(n); return r; } r = chunk_getvalue(n->controlchunk, "Description", &n->shortdescription); if (RET_WAS_ERROR(r)) { binaryfile_free(n); return r; } *result = n; return RET_OK; } static retvalue parse_deb(struct fileentry *debfile, struct changes *changes) { retvalue r; struct binaryfile *n; if (debfile->fullfilename == NULL) return RET_NOTHING; r = read_binaryfile(debfile->fullfilename, &n); if (!RET_IS_OK(r)) return r; if (n->name != NULL) { n->binary = get_binary(changes, n->name, strlen(n->name)); if (FAILEDTOALLOC(n->binary)) { binaryfile_free(n); return RET_ERROR_OOM; } n->next = n->binary->files; n->binary->files = n; } debfile->deb = n; return RET_OK; } static retvalue processfiles(const char *changesfilename, struct changes *changes, const struct strlist *searchpath) { char *dir; struct fileentry *file; retvalue r; r = dirs_getdirectory(changesfilename, &dir); if (RET_WAS_ERROR(r)) return r; for (file = changes->files; file != NULL ; file = file->next) { assert (file->fullfilename == NULL); r = searchforfile(dir, file->basename, searchpath, NULL, &file->fullfilename); if (RET_IS_OK(r)) { if (file->type == ft_DSC) r = parse_dsc(file, changes); else if (file->type == ft_DEB || file->type == ft_UDEB) r = parse_deb(file, changes); if (RET_WAS_ERROR(r)) { free(dir); return r; } } if (r == RET_NOTHING) { /* apply heuristics when not readable */ if (file->type == ft_DSC) { } else if (file->type == ft_DEB || file->type == ft_UDEB) { struct binary *b; size_t len; len = 0; while (file->basename[len] != '_' && file->basename[len] != '\0') len++; b = get_binary(changes, file->basename, len); if (FAILEDTOALLOC(b)) { free(dir); return RET_ERROR_OOM; } b->uncheckable = true; } } } free(dir); return RET_OK; } static retvalue parse_changes(const char *changesfile, const char *chunk, struct changes **changes, const struct strlist *searchpath) { retvalue r; struct strlist tmp; struct strlist filelines[cs_hashCOUNT]; enum checksumtype cs; #define R if (RET_WAS_ERROR(r)) { changes_free(n); return r; } struct changes *n = zNEW(struct changes); if (FAILEDTOALLOC(n)) return RET_ERROR_OOM; n->filename = strdup(changesfile); if (FAILEDTOALLOC(n->filename)) { changes_free(n); return RET_ERROR_OOM; } r = dirs_getdirectory(changesfile, &n->basedir); R; // TODO: do getname here? trim spaces? r = chunk_getvalue(chunk, "Source", &n->name); R; if (r == RET_NOTHING) { fprintf(stderr, "Missing 'Source:' field in %s!\n", changesfile); n->name = NULL; } r = chunk_getvalue(chunk, "Version", &n->version); R; if (r == RET_NOTHING) { fprintf(stderr, "Missing 'Version:' field in %s!\n", changesfile); n->version = NULL; } r = chunk_getwordlist(chunk, "Architecture", &n->architectures); R; if (r == RET_NOTHING) strlist_init(&n->architectures); r = chunk_getwordlist(chunk, "Distribution", &n->distributions); R; if (r == RET_NOTHING) strlist_init(&n->distributions); r = chunk_getvalue(chunk, "Maintainer", &n->maintainer); R; if (r == RET_NOTHING) { fprintf(stderr, "Missing 'Maintainer:' field in %s!\n", changesfile); n->maintainer = NULL; } r = chunk_getuniqwordlist(chunk, "Binary", &tmp); R; if (r == RET_NOTHING) { n->binaries = NULL; } else { int i; assert (RET_IS_OK(r)); n->binaries = nzNEW(tmp.count, struct binary); if (FAILEDTOALLOC(n->binaries)) { changes_free(n); return RET_ERROR_OOM; } for (i = 0 ; i < tmp.count ; i++) { n->binaries[i].name = tmp.values[i]; } n->binarycount = tmp.count; free(tmp.values); } r = chunk_getextralinelist(chunk, "Description", &tmp); R; if (RET_IS_OK(r)) { r = parse_changes_description(n, &tmp); strlist_done(&tmp); if (RET_WAS_ERROR(r)) { changes_free(n); return r; } } for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) { assert (changes_checksum_names[cs] != NULL); r = chunk_getextralinelist(chunk, changes_checksum_names[cs], &filelines[cs]); if (r == RET_NOTHING) { if (cs == cs_md5sum) break; strlist_init(&filelines[cs]); } if (RET_WAS_ERROR(r)) { while (cs-- > cs_md5sum) { strlist_done(&filelines[cs]); } changes_free(n); return r; } } if (cs == cs_hashCOUNT) { r = parse_changes_files(n, filelines); for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) { strlist_done(&filelines[cs]); } if (RET_WAS_ERROR(r)) { changes_free(n); return r; } } r = processfiles(changesfile, n, searchpath); R; *changes = n; return RET_OK; } #define CHANGES_WRITE_FILES 0x01 #define CHANGES_WRITE_BINARIES 0x02 #define CHANGES_WRITE_SOURCE 0x04 #define CHANGES_WRITE_VERSION 0x08 #define CHANGES_WRITE_ARCHITECTURES 0x10 #define CHANGES_WRITE_MAINTAINER 0x20 #define CHANGES_WRITE_DISTRIBUTIONS 0x40 #define CHANGES_WRITE_ALL 0xFFFF static retvalue write_changes_file(const char *changesfilename, struct changes *c, unsigned int flags, bool fakefields) { struct chunkeditfield *cef; char datebuffer[100]; retvalue r; char *control; size_t controllen; unsigned int filecount = 0; struct fileentry *f; struct tm *tm; time_t t; unsigned int i; struct strlist binaries; enum checksumtype cs; strlist_init(&binaries); for (f = c->files; f != NULL ; f = f->next) { if (f->checksumsfromchanges != NULL) filecount++; } if (flagset(CHANGES_WRITE_FILES)) { cef = NULL; for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) { cef = cef_newfield(changes_checksum_names[cs], CEF_ADD, CEF_LATE, filecount, cef); if (FAILEDTOALLOC(cef)) return RET_ERROR_OOM; i = 0; for (f = c->files; f != NULL ; f = f->next) { const char *hash, *size; size_t hashlen, sizelen; if (f->checksumsfromchanges == NULL) continue; if (!checksums_gethashpart(f->checksumsfromchanges, cs, &hash, &hashlen, &size, &sizelen)) { assert (cs != cs_md5sum); cef = cef_pop(cef); break; } if (cs == cs_md5sum) cef_setline2(cef, i, hash, hashlen, size, sizelen, 3, f->section?f->section:"-", f->priority?f->priority:"-", f->basename, NULL); else /* strange way, but as dpkg-genchanges * does it this way... */ cef_setline2(cef, i, hash, hashlen, size, sizelen, 1, f->basename, NULL); i++; } assert (f != NULL || i == filecount); } } else { cef = cef_newfield("Files", CEF_KEEP, CEF_LATE, 0, NULL); if (FAILEDTOALLOC(cef)) return RET_ERROR_OOM; } if (fakefields) { cef = cef_newfield("Changes", CEF_ADDMISSED, CEF_LATE, 0, cef); if (FAILEDTOALLOC(cef)) return RET_ERROR_OOM; cef_setdata(cef, "\n Changes information missing, as not an original .changes file"); } else { cef = cef_newfield("Changes", CEF_KEEP, CEF_LATE, 0, cef); if (FAILEDTOALLOC(cef)) return RET_ERROR_OOM; } cef = cef_newfield("Closes", CEF_KEEP, CEF_LATE, 0, cef); if (FAILEDTOALLOC(cef)) return RET_ERROR_OOM; if (flagset(CHANGES_WRITE_BINARIES)) { unsigned int count = 0; for (i = 0 ; i < c->binarycount ; i++) { const struct binary *b = c->binaries + i; if (b->description != NULL) count++; } cef = cef_newfield("Description", CEF_ADD, CEF_LATE, count, cef); if (FAILEDTOALLOC(cef)) return RET_ERROR_OOM; count = 0; for (i = 0 ; i < c->binarycount ; i++) { const struct binary *b = c->binaries + i; if (b->description == NULL) continue; cef_setline(cef, count++, 3, b->name, "-", b->description, NULL); } } // Changed-by: line if (flagset(CHANGES_WRITE_MAINTAINER)) { cef = cef_newfield("Maintainer", CEF_ADD, CEF_EARLY, 0, cef); if (FAILEDTOALLOC(cef)) return RET_ERROR_OOM; cef_setdata(cef, c->maintainer); } else { cef = cef_newfield("Maintainer", CEF_KEEP, CEF_EARLY, 0, cef); if (FAILEDTOALLOC(cef)) return RET_ERROR_OOM; } if (fakefields) { cef = cef_newfield("Urgency", CEF_ADDMISSED, CEF_EARLY, 0, cef); if (FAILEDTOALLOC(cef)) { return RET_ERROR_OOM; } cef_setdata(cef, "low"); } else { cef = cef_newfield("Urgency", CEF_KEEP, CEF_EARLY, 0, cef); if (FAILEDTOALLOC(cef)) return RET_ERROR_OOM; } cef = cef_newfield("Distribution", CEF_KEEP, CEF_EARLY, 0, cef); if (FAILEDTOALLOC(cef)) return RET_ERROR_OOM; if (c->distributions.count > 0) { if (flagset(CHANGES_WRITE_DISTRIBUTIONS)) cef = cef_newfield("Distribution", CEF_ADD, CEF_EARLY, 0, cef); else cef = cef_newfield("Distribution", CEF_ADDMISSED, CEF_EARLY, 0, cef); if (FAILEDTOALLOC(cef)) return RET_ERROR_OOM; cef_setwordlist(cef, &c->distributions); } else if (flagset(CHANGES_WRITE_DISTRIBUTIONS)) { cef = cef_newfield("Distribution", CEF_DELETE, CEF_EARLY, 0, cef); if (FAILEDTOALLOC(cef)) return RET_ERROR_OOM; } if (c->version != NULL) { if (flagset(CHANGES_WRITE_VERSION)) cef = cef_newfield("Version", CEF_ADD, CEF_EARLY, 0, cef); else cef = cef_newfield("Version", CEF_ADDMISSED, CEF_EARLY, 0, cef); if (FAILEDTOALLOC(cef)) return RET_ERROR_OOM; cef_setdata(cef, c->version); } else if (flagset(CHANGES_WRITE_VERSION)) { cef = cef_newfield("Version", CEF_DELETE, CEF_EARLY, 0, cef); if (FAILEDTOALLOC(cef)) return RET_ERROR_OOM; } if (flagset(CHANGES_WRITE_ARCHITECTURES)) { cef = cef_newfield("Architecture", CEF_ADD, CEF_EARLY, 0, cef); if (FAILEDTOALLOC(cef)) return RET_ERROR_OOM; cef_setwordlist(cef, &c->architectures); } else { cef = cef_newfield("Architecture", CEF_KEEP, CEF_EARLY, 0, cef); if (FAILEDTOALLOC(cef)) return RET_ERROR_OOM; } if (flagset(CHANGES_WRITE_BINARIES)) { r = strlist_init_n(c->binarycount, &binaries); if (RET_WAS_ERROR(r)) { cef_free(cef); return r; } assert (RET_IS_OK(r)); for (i = 0 ; i < c->binarycount ; i++) { const struct binary *b = c->binaries + i; if (!b->missedinheader) { r = strlist_add_dup(&binaries, b->name); if (RET_WAS_ERROR(r)) { strlist_done(&binaries); cef_free(cef); return r; } } } cef = cef_newfield("Binary", CEF_ADD, CEF_EARLY, 0, cef); if (FAILEDTOALLOC(cef)) { strlist_done(&binaries); return RET_ERROR_OOM; } cef_setwordlist(cef, &binaries); } else { cef = cef_newfield("Binary", CEF_KEEP, CEF_EARLY, 0, cef); if (FAILEDTOALLOC(cef)) return RET_ERROR_OOM; } if (c->name != NULL) { if (flagset(CHANGES_WRITE_SOURCE)) cef = cef_newfield("Source", CEF_ADD, CEF_EARLY, 0, cef); else cef = cef_newfield("Source", CEF_ADDMISSED, CEF_EARLY, 0, cef); if (FAILEDTOALLOC(cef)) { strlist_done(&binaries); return RET_ERROR_OOM; } cef_setdata(cef, c->name); } else if (flagset(CHANGES_WRITE_SOURCE)) { cef = cef_newfield("Source", CEF_DELETE, CEF_EARLY, 0, cef); if (FAILEDTOALLOC(cef)) { strlist_done(&binaries); return RET_ERROR_OOM; } } // TODO: if localized make sure this uses C locale.... t = time(NULL); if ((tm = localtime(&t)) != NULL && strftime(datebuffer, sizeof(datebuffer)-1, "%a, %e %b %Y %H:%M:%S %Z", tm) > 0) { cef = cef_newfield("Date", CEF_ADD, CEF_EARLY, 0, cef); if (FAILEDTOALLOC(cef)) { strlist_done(&binaries); return RET_ERROR_OOM; } cef_setdata(cef, datebuffer); } else { cef = cef_newfield("Date", CEF_DELETE, CEF_EARLY, 0, cef); if (FAILEDTOALLOC(cef)) { strlist_done(&binaries); return RET_ERROR_OOM; } } cef = cef_newfield("Format", CEF_ADDMISSED, CEF_EARLY, 0, cef); if (FAILEDTOALLOC(cef)) { strlist_done(&binaries); return RET_ERROR_OOM; } cef_setdata(cef, "1.7"); r = chunk_edit((c->control==NULL)?"":c->control, &control, &controllen, cef); strlist_done(&binaries); cef_free(cef); if (RET_WAS_ERROR(r)) return r; assert (RET_IS_OK(r)); // TODO: try to add the signatures to it again... // TODO: add options to place changed files in different directory... r = checksums_replace(changesfilename, control, controllen, NULL); if (RET_WAS_ERROR(r)) { free(control); return r; } assert (RET_IS_OK(r)); free(c->control); c->control = control; return RET_OK; } static retvalue getchecksums(struct changes *changes) { struct fileentry *file; retvalue r; for (file = changes->files; file != NULL ; file = file->next) { if (file->fullfilename == NULL) continue; assert (file->realchecksums == NULL); r = checksums_read(file->fullfilename, &file->realchecksums); if (r == RET_ERROR_OOM) return r; else if (!RET_IS_OK(r)) { // assume everything else is not fatal and means // a file not readable... file->realchecksums = NULL; } } return RET_OK; } static bool may_be_type(const char *name, enum filetype ft) { enum compression c; size_t len = strlen(name); c = compression_by_suffix(name, &len); if (c != c_none && !typesuffix[ft].allowcompressed) return false; return strncmp(name + (len - typesuffix[ft].len), typesuffix[ft].suffix, typesuffix[ft].len) == 0; } static void verify_sourcefile_checksums(struct dscfile *dsc, int i, const char *dscfile) { const struct fileentry * const file = dsc->uplink[i]; const struct checksums * const expectedchecksums = dsc->expected.checksums[i]; const char * const basefilename = dsc->expected.names.values[i]; assert (file != NULL); if (file->checksumsfromchanges == NULL) { if (may_be_type(basefilename, ft_ORIG_TAR)) { fprintf(stderr, "Not checking checksums of '%s', as not included in .changes file.\n", basefilename); return; } else if (file->realchecksums == NULL) { fprintf(stderr, "ERROR: File '%s' mentioned in '%s' was not found and is not mentioned in the .changes!\n", basefilename, dscfile); return; } } if (file->realchecksums == NULL) /* there will be an message later about that */ return; if (checksums_check(expectedchecksums, file->realchecksums, NULL)) return; if (file->checksumsfromchanges != NULL && checksums_check(expectedchecksums, file->checksumsfromchanges, NULL)) fprintf(stderr, "ERROR: checksums of '%s' differ from the ones listed in both '%s' and the .changes file!\n", basefilename, dscfile); else { fprintf(stderr, "ERROR: checksums of '%s' differ from those listed in '%s':\n!\n", basefilename, dscfile); checksums_printdifferences(stderr, expectedchecksums, file->realchecksums); } } static void verify_binary_name(const char *basefilename, const char *name, const char *version, const char *architecture, enum filetype type, enum compression c) { size_t nlen, vlen, alen, slen; const char *versionwithoutepoch; if (name == NULL) return; nlen = strlen(name); if (strncmp(basefilename, name, nlen) != 0 || basefilename[nlen] != '_') { fprintf(stderr, "ERROR: '%s' does not start with '%s_' as expected!\n", basefilename, name); return; } if (version == NULL) return; versionwithoutepoch = strchr(version, ':'); if (versionwithoutepoch == NULL) versionwithoutepoch = version; else versionwithoutepoch++; vlen = strlen(versionwithoutepoch); if (strncmp(basefilename+nlen+1, versionwithoutepoch, vlen) != 0 || basefilename[nlen+1+vlen] != '_') { fprintf(stderr, "ERROR: '%s' does not start with '%s_%s_' as expected!\n", basefilename, name, version); return; } if (architecture == NULL) return; alen = strlen(architecture); slen = typesuffix[type].len; if (strncmp(basefilename+nlen+1+vlen+1, architecture, alen) != 0 || strncmp(basefilename+nlen+1+vlen+1+alen, typesuffix[type].suffix, slen) != 0 || strcmp(basefilename+nlen+1+vlen+1+alen+slen, uncompression_suffix[c]) != 0) fprintf(stderr, "ERROR: '%s' is not called '%s_%s_%s%s%s' as expected!\n", basefilename, name, versionwithoutepoch, architecture, typesuffix[type].suffix, uncompression_suffix[c]); } static retvalue verify(const char *changesfilename, struct changes *changes) { retvalue r; struct fileentry *file; size_t k; printf("Checking Source packages...\n"); for (file = changes->files; file != NULL ; file = file->next) { const char *name, *version, *p; size_t namelen, versionlen, l; bool has_tar, has_diff, has_orig, has_format_tar; int i; if (file->type != ft_DSC) continue; if (!strlist_in(&changes->architectures, "source")) { fprintf(stderr, "ERROR: '%s' contains a .dsc, but does not list Architecture 'source'!\n", changesfilename); } if (file->fullfilename == NULL) { fprintf(stderr, "ERROR: Could not find '%s'!\n", file->basename); continue; } if (file->dsc == NULL) { fprintf(stderr, "WARNING: Could not read '%s', thus it cannot be checked!\n", file->fullfilename); continue; } if (file->dsc->name == NULL) fprintf(stderr, "ERROR: '%s' does not contain a 'Source:' header!\n", file->fullfilename); else if (changes->name != NULL && strcmp(changes->name, file->dsc->name) != 0) fprintf(stderr, "ERROR: '%s' lists Source '%s' while .changes lists '%s'!\n", file->fullfilename, file->dsc->name, changes->name); if (file->dsc->version == NULL) fprintf(stderr, "ERROR: '%s' does not contain a 'Version:' header!\n", file->fullfilename); else if (changes->version != NULL && strcmp(changes->version, file->dsc->version) != 0) fprintf(stderr, "ERROR: '%s' lists Version '%s' while .changes lists '%s'!\n", file->fullfilename, file->dsc->version, changes->version); if (file->dsc->maintainer == NULL) fprintf(stderr, "ERROR: No maintainer specified in '%s'!\n", file->fullfilename); else if (changes->maintainer != NULL && strcmp(changes->maintainer, file->dsc->maintainer) != 0) fprintf(stderr, "Warning: '%s' lists Maintainer '%s' while .changes lists '%s'!\n", file->fullfilename, file->dsc->maintainer, changes->maintainer); if (file->dsc->section != NULL && file->section != NULL && strcmp(file->section, file->dsc->section) != 0) fprintf(stderr, "Warning: '%s' has Section '%s' while .changes says it is '%s'!\n", file->fullfilename, file->dsc->section, file->section); if (file->dsc->priority != NULL && file->priority != NULL && strcmp(file->priority, file->dsc->priority) != 0) fprintf(stderr, "Warning: '%s' has Priority '%s' while .changes says it is '%s'!\n", file->fullfilename, file->dsc->priority, file->priority); // Todo: check types of files it contains... // check names are sensible p = file->basename; while (*p != '\0' && *p != '_') p++; if (*p == '_') { l = strlen(p+1); assert (l >= 4); /* It ends in ".dsc" to come here */ } else l = 0; if (file->dsc->name != NULL) { name = file->dsc->name; namelen = strlen(name); } else { // TODO: more believe file name or changes name? if (changes->name != NULL) { name = changes->name; namelen = strlen(name); } else { if (*p != '_') { name = NULL; namelen = 0; fprintf(stderr, "Warning: '%s' does not contain a '_' separating name and version!\n", file->basename); }else { name = file->basename; namelen = p-name; } } } if (file->dsc->version != NULL) { version = file->dsc->version; versionlen = strlen(version); } else { // TODO: dito if (changes->version != NULL) { version = changes->version; versionlen = strlen(version); } else { if (*p != '_') { version = NULL; SETBUTNOTUSED( versionlen = 0; ) if (name != NULL) fprintf(stderr, "ERROR: '%s' does not contain a '_' separating name and version!\n", file->basename); } else { version = p+1; versionlen = l-4; } } } if (version != NULL) { const char *colon = strchr(version, ':'); if (colon != NULL) { colon++; versionlen -= (colon-version); version = colon; } } if (name != NULL && version != NULL) { if (*p != '_' || (size_t)(p-file->basename) != namelen || l-4 != versionlen || strncmp(p+1, version, versionlen) != 0 || strncmp(file->basename, name, namelen) != 0) fprintf(stderr, "ERROR: '%s' is not called '%*s_%*s.dsc' as expected!\n", file->basename, (unsigned int)namelen, name, (unsigned int)versionlen, version); } has_tar = false; has_format_tar = false; has_diff = false; has_orig = false; for (i = 0 ; i < file->dsc->expected.names.count ; i++) { const char *basefilename = file->dsc->expected.names.values[i]; const struct fileentry *sfile = file->dsc->uplink[i]; size_t expectedversionlen, expectedformatlen; const char *expectedformat; bool istar = false, versionok; switch (sfile->type) { case ft_UNKNOWN: fprintf(stderr, "ERROR: '%s' lists a file '%s' with unrecognized suffix!\n", file->fullfilename, basefilename); break; case ft_TAR: istar = true; has_tar = true; break; case ft_ORIG_TAR: if (has_orig) fprintf(stderr, "ERROR: '%s' lists multiple .orig..tar files!\n", file->fullfilename); has_orig = true; break; case ft_DIFF: if (has_diff) fprintf(stderr, "ERROR: '%s' lists multiple .diff files!\n", file->fullfilename); has_diff = true; break; default: assert (sfile->type == ft_UNKNOWN); } if (name == NULL) // TODO: try extracting it from this continue; if (strncmp(sfile->basename, name, namelen) != 0 || sfile->basename[namelen] != '_') { fprintf(stderr, "ERROR: '%s' does not begin with '%*s_' as expected!\n", sfile->basename, (unsigned int)namelen, name); /* cannot check further */ continue; } if (version == NULL) continue; /* versionlen is now always initialized */ if (sfile->type == ft_ORIG_TAR) { const char *q, *revision; revision = NULL; for (q = version; *q != '\0'; q++) { if (*q == '-') revision = q; } if (revision == NULL) expectedversionlen = versionlen; else expectedversionlen = revision - version; } else expectedversionlen = versionlen; versionok = strncmp(sfile->basename+namelen+1, version, expectedversionlen) == 0; if (istar) { if (!versionok) { fprintf(stderr, "ERROR: '%s' does not start with '%*s_%*s' as expected!\n", sfile->basename, (unsigned int)namelen, name, (unsigned int)expectedversionlen, version); continue; } expectedformat = sfile->basename + namelen + 1 + expectedversionlen; if (strncmp(expectedformat, ".tar.", 5) == 0) expectedformatlen = 0; else { const char *dot; dot = strchr(expectedformat + 1, '.'); if (dot == NULL) expectedformatlen = 0; else { expectedformatlen = dot - expectedformat; has_format_tar = true; } } } else { expectedformat = ""; expectedformatlen = 0; } if (sfile->type == ft_UNKNOWN) continue; if (versionok && strncmp(sfile->basename+namelen+1 +expectedversionlen +expectedformatlen, typesuffix[sfile->type].suffix, typesuffix[sfile->type].len) == 0 && strcmp(sfile->basename+namelen+1 +expectedversionlen +expectedformatlen +typesuffix[sfile->type].len, uncompression_suffix[sfile->compression]) == 0) continue; fprintf(stderr, "ERROR: '%s' is not called '%.*s_%.*s%.*s%s%s' as expected!\n", sfile->basename, (unsigned int)namelen, name, (unsigned int)expectedversionlen, version, (unsigned int)expectedformatlen, expectedformat, typesuffix[sfile->type].suffix, uncompression_suffix[sfile->compression]); } if (!has_tar && !has_orig) if (has_diff) fprintf(stderr, "ERROR: '%s' lists only a .diff, but no .orig.tar!\n", file->fullfilename); else fprintf(stderr, "ERROR: '%s' lists no source files!\n", file->fullfilename); else if (has_diff && !has_orig) fprintf(stderr, "ERROR: '%s' lists a .diff, but the .tar is not called .orig.tar!\n", file->fullfilename); else if (!has_format_tar && !has_diff && has_orig) fprintf(stderr, "ERROR: '%s' lists a .orig.tar, but no .diff!\n", file->fullfilename); } printf("Checking Binary consistency...\n"); for (k = 0 ; k < changes->binarycount ; k++) { struct binary *b = &changes->binaries[k]; if (b->files == NULL && !b->uncheckable) { /* no files - not even conjectured -, * headers must be wrong */ if (b->description != NULL && !b->missedinheader) { fprintf(stderr, "ERROR: '%s' has binary '%s' in 'Binary:' and 'Description:' header, but no files for it found!\n", changesfilename, b->name); } else if (b->description != NULL) { fprintf(stderr, "ERROR: '%s' has unexpected description of '%s'\n", changesfilename, b->name); } else { assert (!b->missedinheader); fprintf(stderr, "ERROR: '%s' has unexpected Binary: '%s'\n", changesfilename, b->name); } } if (b->files == NULL) continue; /* files are there, make sure they are listed and * have a description*/ if (b->description == NULL) { fprintf(stderr, "ERROR: '%s' has no description for '%s'\n", changesfilename, b->name); } if (b->missedinheader) { fprintf(stderr, "ERROR: '%s' does not list '%s' in its Binary header!\n", changesfilename, b->name); } // TODO: check if the files have the names they should // have an architectures as they are listed... } for (file = changes->files; file != NULL ; file = file->next) { const struct binary *b; const struct binaryfile *deb; if (file->type != ft_DEB && file->type != ft_UDEB) continue; if (file->fullfilename == NULL) { fprintf(stderr, "ERROR: Could not find '%s'!\n", file->basename); continue; } if (file->deb == NULL) { fprintf(stderr, "WARNING: Could not read '%s', thus it cannot be checked!\n", file->fullfilename); continue; } deb = file->deb; b = deb->binary; if (deb->shortdescription == NULL) fprintf(stderr, "Warning: '%s' contains no description!\n", file->fullfilename); else if (b->description != NULL && strcmp(b->description, deb->shortdescription) != 0) fprintf(stderr, "Warning: '%s' says '%s' has description '%s' while '%s' has '%s'!\n", changesfilename, b->name, b->description, file->fullfilename, deb->shortdescription); if (deb->name == NULL) fprintf(stderr, "ERROR: '%s' does not contain a 'Package:' header!\n", file->fullfilename); if (deb->sourcename != NULL) { if (strcmp(changes->name, deb->sourcename) != 0) fprintf(stderr, "ERROR: '%s' lists Source '%s' while .changes lists '%s'!\n", file->fullfilename, deb->sourcename, changes->name); } else if (deb->name != NULL && strcmp(changes->name, deb->name) != 0) { fprintf(stderr, "ERROR: '%s' lists Source '%s' while .changes lists '%s'!\n", file->fullfilename, deb->name, changes->name); } if (deb->version == NULL) fprintf(stderr, "ERROR: '%s' does not contain a 'Version:' header!\n", file->fullfilename); if (deb->sourceversion != NULL) { if (strcmp(changes->version, deb->sourceversion) != 0) fprintf(stderr, "ERROR: '%s' lists Source version '%s' while .changes lists '%s'!\n", file->fullfilename, deb->sourceversion, changes->version); } else if (deb->version != NULL && strcmp(changes->version, deb->version) != 0) { fprintf(stderr, "ERROR: '%s' lists Source version '%s' while .changes lists '%s'!\n", file->fullfilename, deb->version, changes->name); } if (deb->maintainer == NULL) fprintf(stderr, "ERROR: No maintainer specified in '%s'!\n", file->fullfilename); else if (changes->maintainer != NULL && strcmp(changes->maintainer, deb->maintainer) != 0) fprintf(stderr, "Warning: '%s' lists Maintainer '%s' while .changes lists '%s'!\n", file->fullfilename, deb->maintainer, changes->maintainer); if (deb->section == NULL) fprintf(stderr, "ERROR: No section specified in '%s'!\n", file->fullfilename); else if (file->section != NULL && strcmp(file->section, deb->section) != 0) fprintf(stderr, "Warning: '%s' has Section '%s' while .changes says it is '%s'!\n", file->fullfilename, deb->section, file->section); if (deb->priority == NULL) fprintf(stderr, "ERROR: No priority specified in '%s'!\n", file->fullfilename); else if (file->priority != NULL && strcmp(file->priority, deb->priority) != 0) fprintf(stderr, "Warning: '%s' has Priority '%s' while .changes says it is '%s'!\n", file->fullfilename, deb->priority, file->priority); verify_binary_name(file->basename, deb->name, deb->version, deb->architecture, file->type, file->compression); if (deb->architecture != NULL && !strlist_in(&changes->architectures, deb->architecture)) { fprintf(stderr, "ERROR: '%s' does not list Architecture: '%s' needed for '%s'!\n", changesfilename, deb->architecture, file->fullfilename); } // todo: check for md5sums file, verify it... } printf("Checking checksums...\n"); r = getchecksums(changes); if (RET_WAS_ERROR(r)) return r; for (file = changes->files; file != NULL ; file = file->next) { if (file->checksumsfromchanges == NULL) /* nothing to check here */ continue; if (file->fullfilename == NULL) { fprintf(stderr, "WARNING: Could not check checksums of '%s' as file not found!\n", file->basename); if (file->type == ft_DSC) { fprintf(stderr, "WARNING: This file most likely contains additional checksums which could also not be checked because it was not found!\n"); } continue; } if (file->realchecksums == NULL) { fprintf(stderr, "WARNING: Could not check checksums of '%s'! File vanished while checking or not readable?\n", file->basename); } else if (!checksums_check(file->realchecksums, file->checksumsfromchanges, NULL)) { fprintf(stderr, "ERROR: checksums of '%s' differ from those listed in .changes:\n", file->fullfilename); checksums_printdifferences(stderr, file->checksumsfromchanges, file->realchecksums); } if (file->type == ft_DSC) { int i; if (file->dsc == NULL) { fprintf(stderr, "WARNING: Could not read '%s', thus the content cannot be checked\n" " and may be faulty and other things depending on it may be incorrect!\n", file->basename); continue; } for (i = 0 ; i < file->dsc->expected.names.count ; i++) { verify_sourcefile_checksums(file->dsc, i, file->fullfilename); } } // TODO: check .deb files } return RET_OK; } static bool isarg(int argc, char **argv, const char *name) { while (argc > 0) { if (strcmp(*argv, name) == 0) return true; argc--; argv++; } return false; } static bool improvedchecksum_supported(const struct changes *c, bool improvedfilehashes[cs_hashCOUNT]) { enum checksumtype cs; struct fileentry *file; for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) { if (!improvedfilehashes[cs]) continue; for (file = c->files; file != NULL ; file = file->next) { const char *dummy1, *dummy3; size_t dummy2, dummy4; if (file->checksumsfromchanges == NULL) continue; if (!checksums_gethashpart(file->checksumsfromchanges, cs, &dummy1, &dummy2, &dummy3, &dummy4)) break; } if (file == NULL) return true; } return false; } static bool anyset(bool *list, size_t count) { while (count > 0) if (list[--count]) return true; return false; } static retvalue updatechecksums(const char *changesfilename, struct changes *c, int argc, char **argv) { retvalue r; struct fileentry *file; bool improvedfilehashes[cs_hashCOUNT]; r = getchecksums(c); if (RET_WAS_ERROR(r)) return r; /* first update all .dsc files and perhaps recalculate their checksums*/ for (file = c->files; file != NULL ; file = file->next) { int i; bool improvedhash[cs_hashCOUNT]; if (file->type != ft_DSC) continue; if (file->dsc == NULL) { fprintf(stderr, "WARNING: Could not read '%s', hopeing the content and its checksums are correct!\n", file->basename); continue; } memset(improvedhash, 0, sizeof(improvedhash)); assert (file->fullfilename != NULL); for (i = 0 ; i < file->dsc->expected.names.count ; i++) { const char *basefilename = file->dsc->expected.names.values[i]; const struct fileentry *sfile = file->dsc->uplink[i]; struct checksums **expected_p = &file->dsc->expected.checksums[i]; const struct checksums * const expected = *expected_p; const char *hashes1, *hashes2; size_t dummy; bool doit; bool improves; assert (expected != NULL); assert (basefilename != NULL); doit = isarg(argc, argv, basefilename); if (argc > 0 && !doit) continue; assert (sfile != NULL); if (sfile->checksumsfromchanges == NULL) { if (!doit) { fprintf(stderr, "Not checking/updating '%s' as not in .changes and not specified on command line.\n", basefilename); continue; } if (sfile->realchecksums == NULL) { fprintf(stderr, "WARNING: Could not check checksums of '%s'!\n", basefilename); continue; } } else { if (sfile->realchecksums == NULL) { fprintf(stderr, "WARNING: Could not check checksums of '%s'!\n", basefilename); continue; } } if (checksums_check(expected, sfile->realchecksums, &improves)) { if (!improves) { /* already correct */ continue; } /* future versions might be able to store them * in the dsc */ r = checksums_combine(expected_p, sfile->realchecksums, improvedhash); if (RET_WAS_ERROR(r)) return r; continue; } r = checksums_getcombined(expected, &hashes1, &dummy); if (!RET_IS_OK(r)) hashes1 = ""; r = checksums_getcombined(sfile->realchecksums, &hashes2, &dummy); if (!RET_IS_OK(r)) hashes2 = ""; fprintf(stderr, "Going to update '%s' in '%s'\nfrom '%s'\nto '%s'.\n", basefilename, file->fullfilename, hashes1, hashes2); checksums_free(*expected_p); *expected_p = checksums_dup(sfile->realchecksums); if (FAILEDTOALLOC(*expected_p)) return RET_ERROR_OOM; file->dsc->modified = true; } checksumsarray_resetunsupported(&file->dsc->expected, improvedhash); if (file->dsc->modified | anyset(improvedhash, cs_hashCOUNT)) { r = write_dsc_file(file, DSC_WRITE_FILES); if (RET_WAS_ERROR(r)) return r; } } memset(improvedfilehashes, 0, sizeof(improvedfilehashes)); for (file = c->files; file != NULL ; file = file->next) { bool improves; const char *hashes1, *hashes2; size_t dummy; if (file->checksumsfromchanges == NULL) /* nothing to check here */ continue; if (file->realchecksums == NULL) { fprintf(stderr, "WARNING: Could not check checksums of '%s'! Leaving it as it is.\n", file->basename); continue; } if (checksums_check(file->checksumsfromchanges, file->realchecksums, &improves)) { if (!improves) continue; /* future versions might store sha sums in .changes: */ r = checksums_combine(&file->checksumsfromchanges, file->realchecksums, improvedfilehashes); if (RET_WAS_ERROR(r)) return r; continue; } r = checksums_getcombined(file->checksumsfromchanges, &hashes1, &dummy); if (!RET_IS_OK(r)) hashes1 = ""; r = checksums_getcombined(file->realchecksums, &hashes2, &dummy); if (!RET_IS_OK(r)) hashes2 = ""; fprintf(stderr, "Going to update '%s' in '%s'\nfrom '%s'\nto '%s'.\n", file->basename, changesfilename, hashes1, hashes2); checksums_free(file->checksumsfromchanges); file->checksumsfromchanges = checksums_dup(file->realchecksums); if (FAILEDTOALLOC(file->checksumsfromchanges)) return RET_ERROR_OOM; c->modified = true; } if (c->modified) { return write_changes_file(changesfilename, c, CHANGES_WRITE_FILES, false); } else if (improvedchecksum_supported(c, improvedfilehashes)) { return write_changes_file(changesfilename, c, CHANGES_WRITE_FILES, false); } else return RET_NOTHING; } static retvalue includeallsources(const char *changesfilename, struct changes *c, int argc, char **argv) { struct fileentry *file; for (file = c->files; file != NULL ; file = file->next) { int i; if (file->type != ft_DSC) continue; if (file->dsc == NULL) { fprintf(stderr, "WARNING: Could not read '%s', thus cannot determine if it depends on unlisted files!\n", file->basename); continue; } assert (file->fullfilename != NULL); for (i = 0 ; i < file->dsc->expected.names.count ; i++) { const char *basefilename = file->dsc->expected.names.values[i]; struct fileentry * const sfile = file->dsc->uplink[i]; struct checksums **expected_p = &file->dsc->expected.checksums[i]; const struct checksums * const expected = *expected_p; assert (expected != NULL); assert (basefilename != NULL); assert (sfile != NULL); if (sfile->checksumsfromchanges != NULL) continue; if (argc > 0 && !isarg(argc, argv, basefilename)) continue; sfile->checksumsfromchanges = checksums_dup(expected); if (FAILEDTOALLOC(sfile->checksumsfromchanges)) return RET_ERROR_OOM; /* copy section and priority information from the dsc */ if (sfile->section == NULL && file->section != NULL) { sfile->section = strdup(file->section); if (FAILEDTOALLOC(sfile->section)) return RET_ERROR_OOM; } if (sfile->priority == NULL && file->priority != NULL) { sfile->priority = strdup(file->priority); if (FAILEDTOALLOC(sfile->priority)) return RET_ERROR_OOM; } fprintf(stderr, "Going to add '%s' to '%s'.\n", basefilename, changesfilename); c->modified = true; } } if (c->modified) { return write_changes_file(changesfilename, c, CHANGES_WRITE_FILES, false); } else return RET_NOTHING; } static retvalue adddsc(struct changes *c, const char *dscfilename, const struct strlist *searchpath) { retvalue r; struct fileentry *f; struct dscfile *dsc; char *fullfilename, *basefilename; char *origdirectory; const char *v; int i; r = findfile(dscfilename, c, searchpath, ".", &fullfilename); if (RET_WAS_ERROR(r)) return r; if (r == RET_NOTHING) { fprintf(stderr, "Cannot find '%s'!\n", dscfilename); return RET_ERROR_MISSING; } r = read_dscfile(fullfilename, &dsc); if (r == RET_NOTHING) { fprintf(stderr, "Error reading '%s'!\n", fullfilename); r = RET_ERROR; } if (RET_WAS_ERROR(r)) { free(fullfilename); return r; } if (dsc->name == NULL || dsc->version == NULL) { if (dsc->name == NULL) fprintf(stderr, "Could not extract name of '%s'!\n", fullfilename); else fprintf(stderr, "Could not extract version of '%s'!\n", fullfilename); dscfile_free(dsc); free(fullfilename); return RET_ERROR; } if (c->name != NULL) { if (strcmp(c->name, dsc->name) != 0) { fprintf(stderr, "ERROR: '%s' lists source '%s' while '%s' already is '%s'!\n", fullfilename, dsc->name, c->filename, c->name); dscfile_free(dsc); free(fullfilename); return RET_ERROR; } } else { c->name = strdup(dsc->name); if (FAILEDTOALLOC(c->name)) { dscfile_free(dsc); free(fullfilename); return RET_ERROR_OOM; } } if (c->version != NULL) { if (strcmp(c->version, dsc->version) != 0) fprintf(stderr, "WARNING: '%s' lists version '%s' while '%s' already lists '%s'!\n", fullfilename, dsc->version, c->filename, c->version); } else { c->version = strdup(dsc->version); if (FAILEDTOALLOC(c->version)) { dscfile_free(dsc); free(fullfilename); return RET_ERROR_OOM; } } // TODO: make sure if the .changes name/version are modified they will // also be written... v = strchr(dsc->version, ':'); if (v != NULL) v++; else v = dsc->version; basefilename = mprintf("%s_%s.dsc", dsc->name, v); if (FAILEDTOALLOC(basefilename)) { dscfile_free(dsc); free(fullfilename); return RET_ERROR_OOM; } r = dirs_getdirectory(fullfilename, &origdirectory); if (RET_WAS_ERROR(r)) { dscfile_free(dsc); free(origdirectory); free(fullfilename); return r; } // TODO: add rename/copy option to be activated when old and new // basefilename differ r = add_file(c, basefilename, fullfilename, ft_DSC, &f); if (RET_WAS_ERROR(r)) { dscfile_free(dsc); free(origdirectory); return r; } if (r == RET_NOTHING) { fprintf(stderr, "ERROR: '%s' already contains a file of the same name!\n", c->filename); dscfile_free(dsc); free(origdirectory); // TODO: check instead if it is already the same... return RET_ERROR; } /* f owns dsc, fullfilename and basefilename now */ f->dsc = dsc; /* now include the files needed by this */ for (i = 0 ; i < dsc->expected.names.count ; i++) { struct fileentry *file; const char *b = dsc->expected.names.values[i]; const struct checksums *checksums = dsc->expected.checksums[i]; file = add_fileentry(c, b, strlen(b), true, NULL); if (FAILEDTOALLOC(file)) { free(origdirectory); return RET_ERROR_OOM; } dsc->uplink[i] = file; /* make them appear in the .changes file if not there: */ // TODO: add missing checksums here from file if (file->checksumsfromchanges == NULL) { file->checksumsfromchanges = checksums_dup(checksums); if (FAILEDTOALLOC(file->checksumsfromchanges)) { free(origdirectory); return RET_ERROR_OOM; } } // TODO: otherwise warn if not the same } c->modified = true; r = checksums_read(f->fullfilename, &f->realchecksums); if (RET_WAS_ERROR(r)) { free(origdirectory); return r; } f->checksumsfromchanges = checksums_dup(f->realchecksums); if (FAILEDTOALLOC(f->checksumsfromchanges)) { free(origdirectory); return RET_ERROR_OOM; } /* for a "extended" dsc with section or priority */ if (dsc->section != NULL) { free(f->section); f->section = strdup(dsc->section); if (FAILEDTOALLOC(f->section)) { free(origdirectory); return RET_ERROR_OOM; } } if (dsc->priority != NULL) { free(f->priority); f->priority = strdup(dsc->priority); if (FAILEDTOALLOC(f->priority)) { free(origdirectory); return RET_ERROR_OOM; } } if (f->section == NULL || f->priority == NULL) { struct sourceextraction *extraction; int j; extraction = sourceextraction_init( (f->section == NULL)?&f->section:NULL, (f->priority == NULL)?&f->priority:NULL); if (FAILEDTOALLOC(extraction)) { free(origdirectory); return RET_ERROR_OOM; } for (j = 0 ; j < dsc->expected.names.count ; j++) { sourceextraction_setpart(extraction, j, dsc->expected.names.values[j]); } while (sourceextraction_needs(extraction, &j)) { if (dsc->uplink[j]->fullfilename == NULL) { /* look for file */ r = findfile(dsc->expected.names.values[j], c, searchpath, origdirectory, &dsc->uplink[j]->fullfilename); if (RET_WAS_ERROR(r)) { sourceextraction_abort(extraction); free(origdirectory); return r; } if (r == RET_NOTHING || dsc->uplink[j]->fullfilename == NULL) break; } r = sourceextraction_analyse(extraction, dsc->uplink[j]->fullfilename); if (RET_WAS_ERROR(r)) { sourceextraction_abort(extraction); free(origdirectory); return r; } } r = sourceextraction_finish(extraction); if (RET_WAS_ERROR(r)) { free(origdirectory); return r; } } free(origdirectory); /* update information in the main .changes file if not there already */ if (c->maintainer == NULL && dsc->maintainer != NULL) { c->maintainer = strdup(dsc->maintainer); if (FAILEDTOALLOC(c->maintainer)) return RET_ERROR_OOM; } if (!strlist_in(&c->architectures, "source")) { r = strlist_add_dup(&c->architectures, "source"); if (RET_WAS_ERROR(r)) return r; } return RET_OK; } static retvalue adddscs(const char *changesfilename, struct changes *c, int argc, char **argv, const struct strlist *searchpath, bool fakefields) { if (argc <= 0) { fprintf(stderr, "Filenames of .dsc files to include expected!\n"); return RET_ERROR; } while (argc > 0) { retvalue r = adddsc(c, argv[0], searchpath); if (RET_WAS_ERROR(r)) return r; argc--; argv++; } if (c->modified) { return write_changes_file(changesfilename, c, CHANGES_WRITE_ALL, fakefields); } else return RET_NOTHING; } static retvalue adddeb(struct changes *c, const char *debfilename, const struct strlist *searchpath) { retvalue r; struct fileentry *f; struct binaryfile *deb; const char *packagetype; enum filetype type; char *fullfilename, *basefilename; const char *v; r = findfile(debfilename, c, searchpath, ".", &fullfilename); if (RET_WAS_ERROR(r)) return r; if (r == RET_NOTHING) { fprintf(stderr, "Cannot find '%s'!\n", debfilename); return RET_ERROR_MISSING; } r = read_binaryfile(fullfilename, &deb); if (r == RET_NOTHING) { fprintf(stderr, "Error reading '%s'!\n", fullfilename); r = RET_ERROR; } if (RET_WAS_ERROR(r)) { free(fullfilename); return r; } // TODO: check if there are other things but the name to distinguish them if (strlen(fullfilename) > 5 && strcmp(fullfilename+strlen(fullfilename)-5, ".udeb") == 0) { packagetype = "udeb"; type = ft_UDEB; } else { packagetype = "deb"; type = ft_DEB; } if (deb->name == NULL || deb->version == NULL || deb->architecture == NULL) { if (deb->name == NULL) fprintf(stderr, "Could not extract packagename of '%s'!\n", fullfilename); else if (deb->version == NULL) fprintf(stderr, "Could not extract version of '%s'!\n", fullfilename); else fprintf(stderr, "Could not extract architecture of '%s'!\n", fullfilename); binaryfile_free(deb); free(fullfilename); return RET_ERROR; } if (c->name != NULL) { const char *sourcename; if (deb->sourcename != NULL) sourcename = deb->sourcename; else sourcename = deb->name; if (strcmp(c->name, sourcename) != 0) { fprintf(stderr, "ERROR: '%s' lists source '%s' while '%s' already is '%s'!\n", fullfilename, sourcename, c->filename, c->name); binaryfile_free(deb); free(fullfilename); return RET_ERROR; } } else { if (deb->sourcename != NULL) c->name = strdup(deb->sourcename); else c->name = strdup(deb->name); if (FAILEDTOALLOC(c->name)) { binaryfile_free(deb); free(fullfilename); return RET_ERROR_OOM; } } if (c->version != NULL) { const char *sourceversion; if (deb->sourceversion != NULL) sourceversion = deb->sourceversion; else sourceversion = deb->version; if (strcmp(c->version, sourceversion) != 0) fprintf(stderr, "WARNING: '%s' lists source version '%s' while '%s' already lists '%s'!\n", fullfilename, sourceversion, c->filename, c->version); } else { if (deb->sourceversion != NULL) c->version = strdup(deb->sourceversion); else c->version = strdup(deb->version); if (FAILEDTOALLOC(c->version)) { binaryfile_free(deb); free(fullfilename); return RET_ERROR_OOM; } } // TODO: make sure if the .changes name/version are modified they will // also be written... v = strchr(deb->version, ':'); if (v != NULL) v++; else v = deb->version; basefilename = mprintf("%s_%s_%s.%s", deb->name, v, deb->architecture, packagetype); if (FAILEDTOALLOC(basefilename)) { binaryfile_free(deb); free(fullfilename); return RET_ERROR_OOM; } // TODO: add rename/copy option to be activated when old and new // basefilename differ r = add_file(c, basefilename, fullfilename, type, &f); if (RET_WAS_ERROR(r)) { binaryfile_free(deb); return r; } if (r == RET_NOTHING) { fprintf(stderr, "ERROR: '%s' already contains a file of the same name!\n", c->filename); binaryfile_free(deb); // TODO: check instead if it is already the same... return RET_ERROR; } /* f owns deb, fullfilename and basefilename now */ f->deb = deb; deb->binary = get_binary(c, deb->name, strlen(deb->name)); if (FAILEDTOALLOC(deb->binary)) return RET_ERROR_OOM; deb->next = deb->binary->files; deb->binary->files = deb; deb->binary->missedinheader = false; c->modified = true; r = checksums_read(f->fullfilename, &f->realchecksums); if (RET_WAS_ERROR(r)) return r; f->checksumsfromchanges = checksums_dup(f->realchecksums); if (FAILEDTOALLOC(f->checksumsfromchanges)) return RET_ERROR_OOM; if (deb->shortdescription != NULL) { if (deb->binary->description == NULL) { deb->binary->description = strdup(deb->shortdescription); deb->binary->missedinheader = false; } else if (strcmp(deb->binary->description, deb->shortdescription) != 0) { fprintf(stderr, "WARNING: '%s' already lists a different description for '%s' than contained in '%s'!\n", c->filename, deb->name, fullfilename); } } if (deb->section != NULL) { free(f->section); f->section = strdup(deb->section); } if (deb->priority != NULL) { free(f->priority); f->priority = strdup(deb->priority); } if (c->maintainer == NULL && deb->maintainer != NULL) { c->maintainer = strdup(deb->maintainer); } if (deb->architecture != NULL && !strlist_in(&c->architectures, deb->architecture)) { strlist_add_dup(&c->architectures, deb->architecture); } return RET_OK; } static retvalue adddebs(const char *changesfilename, struct changes *c, int argc, char **argv, const struct strlist *searchpath, bool fakefields) { if (argc <= 0) { fprintf(stderr, "Filenames of .deb files to include expected!\n"); return RET_ERROR; } while (argc > 0) { retvalue r = adddeb(c, argv[0], searchpath); if (RET_WAS_ERROR(r)) return r; argc--; argv++; } if (c->modified) { return write_changes_file(changesfilename, c, CHANGES_WRITE_ALL, fakefields); } else return RET_NOTHING; } static retvalue addrawfile(struct changes *c, const char *filename, const struct strlist *searchpath) { retvalue r; struct fileentry *f; char *fullfilename, *basefilename; struct checksums *checksums; r = findfile(filename, c, searchpath, ".", &fullfilename); if (RET_WAS_ERROR(r)) return r; if (r == RET_NOTHING) { fprintf(stderr, "Cannot find '%s'!\n", filename); return RET_ERROR_MISSING; } basefilename = strdup(dirs_basename(filename)); if (FAILEDTOALLOC(basefilename)) { free(fullfilename); return RET_ERROR_OOM; } r = checksums_read(fullfilename, &checksums); if (RET_WAS_ERROR(r)) { free(fullfilename); free(basefilename); return r; } r = add_file(c, basefilename, fullfilename, ft_UNKNOWN, &f); // fullfilename and basefilename now belong to *f or are already free'd basefilename = NULL; fullfilename = NULL; if (RET_WAS_ERROR(r)) { checksums_free(checksums); return r; } if (r == RET_NOTHING) { assert (f != NULL); if (f->checksumsfromchanges != NULL) { /* already listed in .changes */ if (!checksums_check(f->checksumsfromchanges, checksums, NULL)) { fprintf(stderr, "ERROR: '%s' already contains a file with name '%s' but different checksums!\n", c->filename, f->basename); checksums_free(checksums); return RET_ERROR; } printf( "'%s' already lists '%s' with same checksums. Doing nothing.\n", c->filename, f->basename); checksums_free(checksums); return RET_NOTHING; } else { /* file already expected by some other part (e.g. a .dsc) */ // TODO: find out whom this files belong to and warn if different } } c->modified = true; assert (f->checksumsfromchanges == NULL); f->checksumsfromchanges = checksums; checksums = NULL; if (f->realchecksums == NULL) f->realchecksums = checksums_dup(f->checksumsfromchanges); if (FAILEDTOALLOC(f->realchecksums)) return RET_ERROR_OOM; return RET_OK; } static retvalue addrawfiles(const char *changesfilename, struct changes *c, int argc, char **argv, const struct strlist *searchpath, bool fakefields) { if (argc <= 0) { fprintf(stderr, "Filenames of files to add (without further parsing) expected!\n"); return RET_ERROR; } while (argc > 0) { retvalue r = addrawfile(c, argv[0], searchpath); if (RET_WAS_ERROR(r)) return r; argc--; argv++; } if (c->modified) { return write_changes_file(changesfilename, c, CHANGES_WRITE_FILES, fakefields); } else return RET_NOTHING; } static retvalue addfiles(const char *changesfilename, struct changes *c, int argc, char **argv, const struct strlist *searchpath, bool fakefields) { if (argc <= 0) { fprintf(stderr, "Filenames of files to add expected!\n"); return RET_ERROR; } while (argc > 0) { retvalue r; const char *filename = argv[0]; size_t l = strlen(filename); if ((l > 4 && strcmp(filename+l-4, ".deb") == 0) || (l > 5 && strcmp(filename+l-5, ".udeb") == 0)) r = adddeb(c, filename, searchpath); else if ((l > 4 && strcmp(filename+l-4, ".dsc") == 0)) r = adddsc(c, filename, searchpath); else r = addrawfile(c, argv[0], searchpath); if (RET_WAS_ERROR(r)) return r; argc--; argv++; } if (c->modified) { return write_changes_file(changesfilename, c, CHANGES_WRITE_ALL, fakefields); } else return RET_NOTHING; } static retvalue dumbremovefiles(const char *changesfilename, struct changes *c, int argc, char **argv) { if (argc <= 0) { fprintf(stderr, "Filenames of files to remove (without further parsing) expected!\n"); return RET_ERROR; } while (argc > 0) { struct fileentry **fp; /*@null@*/ struct fileentry *f; fp = find_fileentry(c, argv[0], strlen(argv[0]), NULL); f = *fp; if (f == NULL) { fprintf(stderr, "Not removing '%s' as not listed in '%s'!\n", argv[0], c->filename); } else if (f->checksumsfromchanges != NULL) { /* removing its checksums makes it vanish from the * .changes file generated, while still keeping pointers * from other files intact */ checksums_free(f->checksumsfromchanges); f->checksumsfromchanges = NULL; c->modified = true; } argc--; argv++; } if (c->modified) { return write_changes_file(changesfilename, c, CHANGES_WRITE_FILES, false); } else return RET_NOTHING; } static retvalue setdistribution(const char *changesfilename, struct changes *c, int argc, char **argv) { retvalue r; struct strlist distributions; int i; if (argc <= 0) { fprintf(stderr, "expected Distribution name to set!\n"); return RET_ERROR; } r = strlist_init_n(argc, &distributions); if (RET_WAS_ERROR(r)) return r; for (i = 0 ; i < argc ; i++) { r = strlist_add_dup(&distributions, argv[i]); if (RET_WAS_ERROR(r)) { strlist_done(&distributions); return r; } } strlist_done(&c->distributions); strlist_move(&c->distributions, &distributions); return write_changes_file(changesfilename, c, CHANGES_WRITE_DISTRIBUTIONS, false); } static int execute_command(int argc, char **argv, const char *changesfilename, const struct strlist *searchpath, bool file_exists, bool create_file, bool fakefields, struct changes *changesdata) { const char *command = argv[0]; retvalue r; assert (argc > 0); if (strcasecmp(command, "verify") == 0) { if (argc > 1) { fprintf(stderr, "Too many arguments!\n"); r = RET_ERROR; } else if (file_exists) r = verify(changesfilename, changesdata); else { fprintf(stderr, "No such file '%s'!\n", changesfilename); r = RET_ERROR; } } else if (strcasecmp(command, "updatechecksums") == 0) { if (file_exists) r = updatechecksums(changesfilename, changesdata, argc-1, argv+1); else { fprintf(stderr, "No such file '%s'!\n", changesfilename); r = RET_ERROR; } } else if (strcasecmp(command, "includeallsources") == 0) { if (file_exists) r = includeallsources(changesfilename, changesdata, argc-1, argv+1); else { fprintf(stderr, "No such file '%s'!\n", changesfilename); r = RET_ERROR; } } else if (strcasecmp(command, "addrawfile") == 0) { if (file_exists || create_file) r = addrawfiles(changesfilename, changesdata, argc-1, argv+1, searchpath, fakefields); else { fprintf(stderr, "No such file '%s'!\n", changesfilename); r = RET_ERROR; } } else if (strcasecmp(command, "adddsc") == 0) { if (file_exists || create_file) r = adddscs(changesfilename, changesdata, argc-1, argv+1, searchpath, fakefields); else { fprintf(stderr, "No such file '%s'!\n", changesfilename); r = RET_ERROR; } } else if (strcasecmp(command, "adddeb") == 0) { if (file_exists || create_file) r = adddebs(changesfilename, changesdata, argc-1, argv+1, searchpath, fakefields); else { fprintf(stderr, "No such file '%s'!\n", changesfilename); r = RET_ERROR; } } else if (strcasecmp(command, "add") == 0) { if (file_exists || create_file) r = addfiles(changesfilename, changesdata, argc-1, argv+1, searchpath, fakefields); else { fprintf(stderr, "No such file '%s'!\n", changesfilename); r = RET_ERROR; } } else if (strcasecmp(command, "setdistribution") == 0) { if (file_exists) r = setdistribution(changesfilename, changesdata, argc-1, argv+1); else { fprintf(stderr, "No such file '%s'!\n", changesfilename); r = RET_ERROR; } } else if (strcasecmp(command, "dumbremove") == 0) { if (file_exists) r = dumbremovefiles(changesfilename, changesdata, argc-1, argv+1); else { fprintf(stderr, "No such file '%s'!\n", changesfilename); r = RET_ERROR; } } else { fprintf(stderr, "Unknown command '%s'\n", command); r = RET_ERROR; } return r; } static retvalue splitpath(struct strlist *list, const char *path) { retvalue r; const char *next; while ((next = index(path, ':')) != NULL) { if (next > path) { char *dir = strndup(path, next-path); if (FAILEDTOALLOC(dir)) { return RET_ERROR_OOM; } r = strlist_add(list, dir); if (RET_WAS_ERROR(r)) return r; } path = next+1; } return strlist_add_dup(list, path); } int main(int argc, char *argv[]) { static int longoption = 0; static const struct option longopts[] = { {"help", no_argument, NULL, 'h'}, {"create", no_argument, NULL, 'C'}, {"create-with-all-fields", no_argument, &longoption, 6}, {"searchpath", required_argument, NULL, 's'}, {"gunzip", required_argument, &longoption, 1}, {"bunzip2", required_argument, &longoption, 2}, {"unlzma", required_argument, &longoption, 3}, {"unxz", required_argument, &longoption, 4}, {"lunzip", required_argument, &longoption, 5}, {NULL, 0, NULL, 0}, }; int c; const char *changesfilename; bool file_exists; bool create_file = false; bool all_fields = false; struct strlist searchpath; struct changes *changesdata; char *gunzip = NULL, *bunzip2 = NULL, *unlzma = NULL, *unxz = NULL, *lunzip = NULL; retvalue r; strlist_init(&searchpath); while ((c = getopt_long(argc, argv, "+hi:s:", longopts, NULL)) != -1) { switch (c) { case '\0': switch (longoption) { case 1: gunzip = strdup(optarg); break; case 2: bunzip2 = strdup(optarg); break; case 3: unlzma = strdup(optarg); break; case 4: unxz = strdup(optarg); break; case 5: lunzip = strdup(optarg); break; case 6: create_file = true; all_fields = true; break; } break; case 'h': about(true); case 'C': create_file = true; break; case 's': r = splitpath(&searchpath, optarg); if (RET_WAS_ERROR(r)) { if (r == RET_ERROR_OOM) fprintf(stderr, "Out of memory!\n"); exit(EXIT_FAILURE); } break; } } if (argc - optind < 2) { about(false); } signature_init(false); uncompressions_check(gunzip, bunzip2, unlzma, unxz, lunzip); changesfilename = argv[optind]; if (strcmp(changesfilename, "-") != 0 && !endswith(changesfilename, ".changes")) { fprintf(stderr, "first argument not ending with '.changes'\n"); exit(EXIT_FAILURE); } file_exists = isregularfile(changesfilename); if (file_exists) { char *changes; r = signature_readsignedchunk(changesfilename, changesfilename, &changes, NULL, NULL); if (!RET_IS_OK(r)) { signatures_done(); if (r == RET_ERROR_OOM) fprintf(stderr, "Out of memory!\n"); exit(EXIT_FAILURE); } r = parse_changes(changesfilename, changes, &changesdata, &searchpath); if (RET_IS_OK(r)) changesdata->control = changes; else { free(changes); changesdata = NULL; } } else { changesdata = zNEW(struct changes); if (FAILEDTOALLOC(changesdata)) r = RET_ERROR_OOM; else { changesdata->filename = strdup(changesfilename); if (FAILEDTOALLOC(changesdata->filename)) r = RET_ERROR_OOM; else r = dirs_getdirectory(changesfilename, &changesdata->basedir); } } if (!RET_WAS_ERROR(r)) { argc -= (optind+1); argv += (optind+1); r = execute_command(argc, argv, changesfilename, &searchpath, file_exists, create_file, all_fields, changesdata); } changes_free(changesdata); signatures_done(); if (RET_IS_OK(r)) exit(EXIT_SUCCESS); if (r == RET_ERROR_OOM) fprintf(stderr, "Out of memory!\n"); exit(EXIT_FAILURE); } reprepro-4.13.1/outhook.c0000644000175100017510000001203112152651661012217 00000000000000/* This file is part of "reprepro" * Copyright (C) 2012 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include "error.h" #include "filecntl.h" #include "mprintf.h" #include "strlist.h" #include "dirs.h" #include "hooks.h" #include "outhook.h" static FILE *outlogfile = NULL; static char *outlogfilename = NULL; static bool outlognonempty = false; retvalue outhook_start(void) { retvalue r; int fd; char *template; assert (outlogfilename == NULL); assert (outlogfile == NULL); r = dirs_create(global.logdir); if (RET_WAS_ERROR(r)) return r; template = mprintf("%s/%010llu-XXXXXX.outlog", global.logdir, (unsigned long long)time(NULL)); if (FAILEDTOALLOC(template)) return RET_ERROR_OOM; fd = mkstemps(template, 7); if (fd < 0) { int e = errno; fprintf(stderr, "Error %d creating new file in %s: %s\n", e, global.logdir, strerror(e)); free(template); return RET_ERRNO(e); } outlogfile = fdopen(fd, "w"); if (outlogfile == NULL) { int e = errno; (void)close(fd); fprintf(stderr, "Error %d from fdopen: %s\n", e, strerror(e)); free(template); return RET_ERRNO(e); } outlogfilename = template; return RET_OK; } void outhook_send(const char *command, const char *arg1, const char *arg2, const char *arg3) { assert (command != NULL); assert (arg1 != NULL); assert (arg3 == NULL || arg2 != NULL); if (outlogfile == NULL) return; if (arg2 == NULL) fprintf(outlogfile, "%s\t%s\n", command, arg1); else if (arg3 == NULL) fprintf(outlogfile, "%s\t%s\t%s\n", command, arg1, arg2); else fprintf(outlogfile, "%s\t%s\t%s\t%s\n", command, arg1, arg2, arg3); outlognonempty = true; } void outhook_sendpool(component_t component, const char *sourcename, const char *name) { assert (name != NULL); if (outlogfile == NULL) return; if (sourcename == NULL || *sourcename == '\0') fprintf(outlogfile, "POOLNEW\t%s\n", name); else if (sourcename[0] == 'l' && sourcename[1] == 'i' && sourcename[2] == 'b' && sourcename[3] != '\0') fprintf(outlogfile, "POOLNEW\tpool/%s/lib%c/%s/%s\n", atoms_components[component], sourcename[3], sourcename, name); else fprintf(outlogfile, "POOLNEW\tpool/%s/%c/%s/%s\n", atoms_components[component], sourcename[0], sourcename, name); outlognonempty = true; } static retvalue callouthook(const char *scriptname, const char *logfilename) { pid_t child; child = fork(); if (child == 0) { /* Try to close all open fd but 0,1,2 */ closefrom(3); sethookenvironment(causingfile, NULL, NULL, NULL); (void)execl(scriptname, scriptname, logfilename, (char*)NULL); { int e = errno; fprintf(stderr, "Error %d executing '%s': %s\n", e, scriptname, strerror(e)); } _exit(255); } if (child < 0) { int e = errno; fprintf(stderr, "Error %d forking: %s!\n", e, strerror(e)); return RET_ERRNO(e); } while (true) { int status; pid_t pid; pid = waitpid(child, &status, 0); if (pid == child) { if (WIFEXITED(status)) { if (WEXITSTATUS(status) == 0) { return RET_OK; } fprintf(stderr, "Outhook '%s' '%s' failed with exit code %d!\n", scriptname, logfilename, (int)(WEXITSTATUS(status))); } else if (WIFSIGNALED(status)) { fprintf(stderr, "Outhook '%s' '%s' killed by signal %d!\n", scriptname, logfilename, (int)(WTERMSIG(status))); } else { fprintf(stderr, "Outhook '%s' '%s' failed!\n", scriptname, logfilename); } return RET_ERROR; } else if (pid == (pid_t)-1) { int e = errno; if (e == EINTR) continue; fprintf(stderr, "Error %d calling waitpid on outhook child: %s\n", e, strerror(e)); return RET_ERRNO(e); } } /* NOT REACHED */ } retvalue outhook_call(const char *scriptname) { retvalue result; assert (outlogfile != NULL); assert (outlogfilename != NULL); if (ferror(outlogfile) != 0) { (void)fclose(outlogfile); fprintf(stderr, "Errors creating '%s'!\n", outlogfilename); result = RET_ERROR; } else if (fclose(outlogfile) != 0) { fprintf(stderr, "Errors creating '%s'!\n", outlogfilename); result = RET_ERROR; } else if (!outlognonempty) { unlink(outlogfilename); result = RET_OK; } else { result = callouthook(scriptname, outlogfilename); } outlogfile = NULL; free(outlogfilename); outlogfilename = NULL; return result; } reprepro-4.13.1/optionsfile.c0000644000175100017510000000660312152651661013072 00000000000000/* This file is part of "reprepro" * Copyright (C) 2005,2006 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include "error.h" #include "names.h" #include "optionsfile.h" void optionsfile_parse(const char *directory, const struct option *longopts, void handle_option(int, const char *)) { FILE *f; char *filename; char buffer[1000]; int linenr = 0; const struct option *option; filename = calc_dirconcat(directory, "options"); if (FAILEDTOALLOC(filename)) { (void)fputs("Out of memory!\n", stderr); exit(EXIT_FAILURE); } f = fopen(filename, "r"); if (f == NULL) { free(filename); return; } while (fgets(buffer, 999, f) != NULL) { size_t l; char *optionname, *argument; linenr++; l = strlen(buffer); if (l == 0 || buffer[l-1] != '\n') { fprintf(stderr, "%s:%d: Ignoring too long (or incomplete) line.\n", filename, linenr); do { if (fgets(buffer, 999, f) == NULL) break; l = strlen(buffer); } while (l > 0 && buffer[l-1] != '\n'); continue; } do{ buffer[l-1] = '\0'; l--; } while (l > 0 && xisspace(buffer[l-1])); if (l == 0) continue; optionname = buffer; while (*optionname != '\0' && xisspace(*optionname)) optionname++; assert (*optionname != '\0'); if (*optionname == '#' || *optionname == ';') continue; argument = optionname; while (*argument != '\0' && !xisspace(*argument)) argument++; while (*argument != '\0' && xisspace(*argument)) { *argument = '\0'; argument++; } if (*argument == '\0') argument = NULL; option = longopts; while (option->name != NULL && strcmp(option->name, optionname) != 0) option++; if (option->name == NULL) { fprintf(stderr, "%s:%d: unknown option '%s'!\n", filename, linenr, optionname); exit(EXIT_FAILURE); } if (option->has_arg==no_argument && argument != NULL) { fprintf(stderr, "%s:%d: option '%s' has an unexpected argument '%s'!\n", filename, linenr, optionname, argument); exit(EXIT_FAILURE); } if (option->has_arg==required_argument && argument == NULL) { fprintf(stderr, "%s:%d: option '%s' is missing an argument!\n", filename, linenr, optionname); exit(EXIT_FAILURE); } if (option->flag == NULL) handle_option(option->val, argument); else { *option->flag = option->val; handle_option(0, argument); } } if (ferror(f) != 0) { int e = ferror(f); fprintf(stderr, "%s: error while reading config file: %d=%s\n", filename, e, strerror(e)); exit(EXIT_FAILURE); } if (fclose(f) != 0) { int e = errno; fprintf(stderr, "%s: error while reading config file: %d=%s\n", filename, e, strerror(e)); exit(EXIT_FAILURE); } free(filename); } reprepro-4.13.1/docs/0000755000175100017510000000000012152655346011402 500000000000000reprepro-4.13.1/docs/Makefile.am0000644000175100017510000000064012152651661013352 00000000000000 EXTRA_DIST = short-howto reprepro.1 changestool.1 rredtool.1 recovery bzip.example tiffany.example di.example/README di.example/DI-filter.sh di.example/distributions di.example/updates reprepro.bash_completion reprepro.zsh_completion FAQ changelogs.example manual.html copybyhand.example outstore.py sftp.py outsftphook.py man_MANS = reprepro.1 changestool.1 rredtool.1 MAINTAINERCLEANFILES = $(srcdir)/Makefile.in reprepro-4.13.1/docs/tiffany.example0000755000175100017510000001755312152651661014351 00000000000000#!/usr/bin/env python ############################################################################# # generates partial package updates list as reprepro hook # (to be used by apt-get >= 0.6.44, apt-qupdate or things compatible with that) # changes Copyright 2005 Bernhard R. Link # as this is used as hook, it does not need any parsing of # Configuration or Handling of architectures and components. # Also reprepro will present old and new file, so it does not # need to store a permanent copy of the last version. # This needs either python-apt installed or you have to change # it to use another sha1 calculation method. # HOW TO USE: # - install python-apt # - make sure your paths contain no ' characters. # - be aware this is still quite experimental and might not # report some errors properly # - copy this file to your conf/ directory # - uncompress this file if it is compressed # - make it executeable # - add something like the following to the every distribution # in conf/distributions you want to have diffs for: # # DscIndices: Sources Release . .gz tiffany # DebIndices: Packages Release . .gz tiffany # # The first line is for source indices, the second for binary indices. # Make sure uncompressed index files are generated (the single dot in those # lines), as this version only diffs the uncompressed files. # This file is a heavily modified version of apt-qupdate's tiffany, # (downloaded from http://ftp-master.debian.org/~ajt/tiffani/tiffany # 2005-02-20)which says: #-------------------------------------------------------------------- # idea and basic implementation by Anthony, some changes by Andreas # parts are stolen from ziyi # # Copyright (C) 2004-5 Anthony Towns # Copyright (C) 2004-5 Andreas Barth #-------------------------------------------------------------------- # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################# import sys, os, time import apt_pkg ################################################################################ def usage (exit_code=0): print """Usage: tiffani directory newfile oldfile mode 3>releaselog Write out ed-style diffs to Packages/Source lists This file is intended to be called by reprepro as hook given to DebIndices, UDebIndices or DscIndices. """ sys.exit(exit_code); def tryunlink(file): try: os.unlink(file) except OSError: print "warning: removing of %s denied" % (file) class Updates: def __init__(self, readpath = None): self.can_path = None self.history = {} self.max = 14 self.readpath = readpath self.filesizesha1 = None if readpath: try: f = open(readpath + "/Index") x = f.readline() def read_hashs(ind, f, self, x=x): while 1: x = f.readline() if not x or x[0] != " ": break l = x.split() if not self.history.has_key(l[2]): self.history[l[2]] = [None,None] self.history[l[2]][ind] = (l[0], int(l[1])) return x while x: l = x.split() if len(l) == 0: x = f.readline() continue if l[0] == "SHA1-History:": x = read_hashs(0,f,self) continue if l[0] == "SHA1-Patches:": x = read_hashs(1,f,self) continue if l[0] == "Canonical-Name:" or l[0]=="Canonical-Path:": self.can_path = l[1] if l[0] == "SHA1-Current:" and len(l) == 3: self.filesizesha1 = (l[1], int(l[2])) x = f.readline() except IOError: 0 def dump(self, out=sys.stdout): if self.can_path: out.write("Canonical-Path: %s\n" % (self.can_path)) if self.filesizesha1: out.write("SHA1-Current: %s %7d\n" % (self.filesizesha1)) hs = self.history l = self.history.keys() l.sort() cnt = len(l) if cnt > self.max: for h in l[:cnt-self.max]: tryunlink("%s/%s.gz" % (self.readpath, h)) del hs[h] l = l[cnt-self.max:] out.write("SHA1-History:\n") for h in l: out.write(" %s %7d %s\n" % (hs[h][0][0], hs[h][0][1], h)) out.write("SHA1-Patches:\n") for h in l: out.write(" %s %7d %s\n" % (hs[h][1][0], hs[h][1][1], h)) def sizesha1(f): size = os.fstat(f.fileno())[6] f.seek(0) sha1sum = apt_pkg.sha1sum(f) return (sha1sum, size) def getsizesha1(name): f = open(name, "r") r = sizesha1(f) f.close() return r def main(): if len(sys.argv) != 5: usage(1) directory = sys.argv[1] newrelfile = sys.argv[2] oldrelfile = sys.argv[3] mode = sys.argv[4] # this is only needed with reprepro <= 0.7 if oldrelfile.endswith(".gz"): sys.exit(0); oldfile = "%s/%s" % (directory,oldrelfile) newfile= "%s/%s" % (directory,newrelfile) outdir = oldfile + ".diff" if mode == "old": # Nothing to do... if os.path.isfile(outdir + "/Index"): os.write(3,oldrelfile + ".diff/Index") sys.exit(0); if mode == "new": # TODO: delete possible existing Index and patch files? sys.exit(0); print "making diffs between %s and %s: " % (oldfile, newfile) o = os.popen("date +%Y-%m-%d-%H%M.%S") patchname = o.readline()[:-1] o.close() difffile = "%s/%s" % (outdir, patchname) upd = Updates(outdir) oldsizesha1 = getsizesha1(oldfile) # should probably early exit if either of these checks fail # alternatively (optionally?) could just trim the patch history if upd.filesizesha1: if upd.filesizesha1 != oldsizesha1: print "old file seems to have changed! %s %s => %s %s" % (upd.filesizesha1 + oldsizesha1) sys.exit(1); newsizesha1 = getsizesha1(newfile) if newsizesha1 == oldsizesha1: print "file unchanged, not generating diff" if os.path.isfile(outdir + "/Index"): os.write(3,oldrelfile + ".diff/Index\n") else: if not os.path.isdir(outdir): os.mkdir(outdir) print "generating diff" while os.path.isfile(difffile + ".gz"): print "This was too fast, diffile already there, waiting a bit..." time.sleep(2) o = os.popen("date +%Y-%m-%d-%H%M.%S") patchname = o.readline()[:-1] o.close() difffile = "%s/%s" % (outdir, patchname) # TODO make this without shell... os.system("diff --ed '%s' '%s' > '%s'" % (oldfile,newfile, difffile)) difsizesha1 = getsizesha1(difffile) # TODO dito os.system("gzip -9 '%s'" %difffile) upd.history[patchname] = (oldsizesha1, difsizesha1) upd.filesizesha1 = newsizesha1 f = open(outdir + "/Index.new", "w") upd.dump(f) f.close() # Specifing the index should be enough, it contains checksums for the diffs os.write(3,oldrelfile + ".diff/Index.new\n") ################################################################################ if __name__ == '__main__': main() reprepro-4.13.1/docs/manual.html0000644000175100017510000021366312152651661013474 00000000000000 reprepro manual

reprepro manual

This manual documents reprepro, a tool to generate and administer Debian package repositories.
Other useful resources:

Table of contents

Sections of this document:

Introduction

What reprepro does

Reprepro is a tool to take care of a repository of Debian packages (.dsc,.deb and .udeb). It installs them to the proper places, generates indices of packages (Packages and Sources and their compressed variants) and of index files (Release and optionally Release.gpg), so tools like apt know what is available and where to get it from. It will keep track which file belongs to where and remove files no longer needed (unless told to not do so). It can also make (partial) partial mirrors of remote repositories, including merging multiple sources and automatically (if explicitly requested) removing packages no longer available in the source. And many other things (sometimes I fear it got a few features too much).

What reprepro needs

It needs some libraries (zlib, libgpgme, libdb (Version 3, 4.3 or 4.4)) and can be compiled with some more for additional features (libarchive, libbz2). Otherwise it only needs apt's methods (only when downloading stuff), gpg (only when signing or checking signatures), and if compiled without libarchive it needs tar and ar installed.
If you tell reprepro to call scripts for you, you will of course need the interpreters for these scripts: The included example to generate pdiff files needs python. The example to extract changelogs needs dpkg-source.

What this manual aims to do

This manual aims to give some overview over the most important features, so people can use them and so that I do not implement something a second time because I forgot support is already there. For a full reference of all possible commands and config options take a look at the man page, as this manual might miss some of the more obscure options.

First steps

generate a repository with local packages

  • Choose a directory (or create it).
  • Create a subdirectory called conf in there.
  • In the conf/ subdirectory create a file called distributions, with content like:
    Codename: mystuff
    Components: main bad
    Architectures: sparc i386 source
    
    or with content like:
    Codename: andy
    Suite: rusty
    Components: main bad
    Architectures: sparc i386 source
    Origin: myorg
    Version: 20.3
    Description: my first little repository
    
    (Multiple distributions are separated by empty lines, Origin, Version and Description are just copied to the generated Release files, more things controlling reprepro can appear which are described later).
  • If your conf/distributions file contained a Suite: and you are too lazy to generate the symbolic links yourself, call:
    reprepro -b $YOURBASEDIR createsymlinks
    
  • Include some package, like:
    reprepro -b $YOURBASEDIR include mystuff mypackage.changes
    
    or:
    reprepro -b $YOURBASEDIR includedeb mystuff mypackage.deb
    
  • Take a look at the generated pool and dists directories. They contain everything needed to apt-get from. Tell apt to include it by adding the following to your sources.list:
    deb file:///$YOURBASEDIR mystuff main bad
    
    or make it available via http or ftp and do the same http:// or ftp:// source.

mirroring packages from other repositories

This example shows how to generate a mirror of a single architecture with all packages of etch plus security updates:
  • Choose a directory (or create it).
  • Create a subdirectory called conf in there (if not already existent).
  • In the conf/ subdirectory create a file called distributions, with content like (or add to that file after an empty line):
    Origin: Debian
    Label: Debian
    Suite: stable
    Version: 4.0
    Codename: etch
    Architectures: i386
    Components: main
    Description: Debian 4.0 etch + security updates
    Update: - debian security
    Log: logfile
    
    Actually only Codename, Components, Architecture and Update is needed, the rest is just information for clients. The Update line tells to delete everything no longer available (-), then add the debian and security rules, which still have to be defined:
  • In the conf/ subdirectory create a file called updates, with content like (or add to that file after an empty line:): or with content like:
    Name: security
    Method: http://security.debian.org/debian-security
    Fallback: ftp://klecker.debian.org/debian-security
    Suite: */updates
    VerifyRelease: A70DAF536070D3A1|B5D0C804ADB11277
    Architectures: i386
    Components: main
    UDebComponents:
    
    Name: debian
    Method: http://ftp2.de.debian.org/debian
    Config: Acquire::Http::Proxy=http://proxy.myorg.de:8080
    VerifyRelease: A70DAF536070D3A1|B5D0C804ADB11277
    
    (If there are no Architecture, Components or UDebComponents, it will try all the distribution to update has. Fallback means a URL to try when the first cannot offer some file (Has to be the same method)).
  • Tell reprepro to update:
    reprepro -b $YOURBASEDIR update etch
    
  • Take a look at at the generated pool and dists directories. They contain everything needed to apt-get from. Tell apt to include it by adding the following to your sources.list:
    deb file:///$YOURBASEDIR etch main
    
    or make it available via http or ftp.

Repository basics

An apt-getable repository of Debian packages consists of two parts: the index files describing what is available and where it is and the actual Debian binary (.deb), installer binary (.udeb), and source (.dsc together with .tar.gz or .orig.tar.gz and .diff.gz) packages.
While you do not know how these look like to use reprepro, it's always a good idea to know what you are creating.

Index files

All index files are in subdirectories of a directory called dists. Apt is very decided what names those should have, including the name of dists. Including all optional and extensional files, the hierarchy looks like this:
dists
CODENAME
Each distribution has it's own subdirectory here, named by it's codename.
Release
This file describes what distribution this is and the checksums of all index files included.
Release.gpg
This is the optional detached gpg signature of the Release file. Take a look at the section about signing for how to active this.
Contents-ARCHITECTURE.gz
This optional file lists all files and which packages they belong to. It's downloaded and used by tools like apt-file to allow users to determine which package to install to get a specific file.
To activate generating of these files by reprepro, you need a Contents header in your distribution declaration.
COMPONENT1
Each component has it's own subdirectory here. They can be named whatever users can be bothered to write into their sources.list, but things like main, non-free and contrib are common. But funny names like bad or universe are just as possible.
source
If this distribution supports sources, this directory lists which source packages are available in this component.
Release
This file contains a copy of those information about the distribution applicable to this directory.
Sources
Sources.gz
Sources.bz2
These files contain the actual description of the source Packages. By default only the .gz file created, to create all three add the following to the declarations of the distributions:
DscIndices Sources Release . .gz .bz2
That header can also be used to name those files differently, but then apt will no longer find them...
Sources.diff
This optional directory contains diffs, so that only parts of the index file must be downloaded if it changed. While reprepro cannot generate these so-called pdiffs itself, it ships both with a program called rredtool and with an example python script to generate those.
binary-ARCHITECTURE
Each architecture has its own directory in each component.
Release
This file contains a copy of those information about the distribution applicable to this directory.
Packages
Packages.gz
Packages.bz2
These files contain the actual description of the binary Packages. By default only the uncompressed and .gz files are created. To create all three, add the following to the declarations of the distributions:
DebIndices Packages Release . .gz .bz2
That header can also be used to name those files differently, but then apt will no longer find them...
Packages.diff
This optional directory contains diffs, so that only parts of the index file must be downloaded if it changed. While reprepro cannot generate these so-called pdiffs itself, it ships both with a program called rredtool and with an example python script to generate those.
debian-installer
This directory contains information about the .udeb modules for the Debian-Installer. Those are actually just a very stripped down form of normal .deb packages and this the hierarchy looks very similar:
binary-ARCHITECTURE
Packages
Packages.gz
COMPONENT2
There is one dir for every component. All look just the same.
To allow accessing distribution by function instead of by name, there are often symbolic links from suite to codenames. That way users can write
deb http://some.domain.tld/debian SUITE COMPONENT1 COMPONENT2
instead of
deb http://some.domain.tld/debian CODENAME COMPONENT1 COMPONENT2
in their /etc/apt/sources.list and totally get surprised by getting something new after a release.

Package pool

While the index files have a required filename, the actual files are given just as relative path to the base directory you specify in your sources list. That means apt can get them no matter what scheme is used to place them. The classical way Debian used till woody was to just put them in subdirectories of the binary-ARCHITECTURE directories, with the exception of the architecture-independent packages, which were put into a artificial binary-all directory. This was replaced for the official repository with package pools, which reprepro also uses. (Actually reprepro stores everything in pool a bit longer than the official repositories, that's why it recalculates all filenames without exception).
In a package pool, all package files of all distributions in that repository are stored in a common directory hierarchy starting with pool/, only separated by the component they belong to and the source package name. As everything this has disadvantages and advantages:
  • disadvantages
    • different files in different distributions must have different filenames
    • it's impossible to determine which distribution a file belongs to by path and filename (think mirroring)
    • packages can no longer be grouped together in common subdirectories by having similar functions
  • advantages
    • the extremely confusing situation of having differently build packages with the same version if different distributions gets impossible by design.
    • the source (well, if it exists) is in the same directory as the binaries generated from it
    • same files in different distributions need disk-space and bandwidth only once
    • each package can be found only knowing component and sourcename
Now let's look at the actual structure of a pool (there is currently no difference between the pool structure of official Debian repositories and those generated by reprepro):
pool
The directory all this resides is is normally called pool. That's nowhere hard coded in apt but that only looks at the relative directory names in the index files. But there is also no reason to name it differently.
COMPONENT1
Each component has it's own subdirectory here. They can be named whatever users can be bothered to write into their sources.list, but things like main, non-free and contrib are common. But funny names like bad or universe are just as possible.
a
As there are really many different source packages, the directory would be too full when all put here. So they are separated in different directories. Source packages starting with lib are put into a directory named after the first four letters of the source name. Everything else is put in a directory having the first letter as name.
asource
Then the source package name follows. So this directory pool/COMPONENT1/a/asource/ would contain all files of different versions of the hypothetical package asource.
asource
a-source_version.dsc
a-source_version.tar.gz
The actual source package consists of its description file (.dsc) and the files references by that.
binary_version_ARCH1deb
binary_version_ARCH2.deb
binary2_version_all.deb
di-module_version_ARCH1.udeb
Binary packages are stored here to. So to know where a binary package is stored you need to know what its source package name is.
liba
As described before packages starting with lib are not stored in l but get a bit more context.
COMPONENT2
There is one dir for every component. All look just the same.
As said before, you don't need to know this hierarchy in normal operation. reprepro will put everything to where it belong, keep account what is there and needed by what distribution or snapshot, and delete files no longer needed. (Unless told otherwise or when you are using the low-level commands).

Config files

Configuring a reprepro repository is done by writing some config files into a directory. This directory is currently the conf subdirectory of the base directory of the repository, unless you specify --confdir or set the environment variable REPREPRO_CONFIG_DIR.
options
If this file exists, reprepro will consider each line an additional command line option. Arguments must be in the same line after an equal sign. Options specified on the command line take precedence.
distributions
This is the main configuration file and the only that is needed in all cases. It lists the distributions this repository contains and their properties.
See First steps for a short example or the manpage for a list of all possible fields.
updates
Rules about where to download packages from other repositories. See the section Mirroring / Updating for more examples or the man page for a full reference.
pulls
Rules about how to move packages in bulk between distributions where to download packages from other repositories. See the section Propagation of packages for an example or the man page for full reference.
incoming
Rules for incoming queues as processed by processincoming. See Processing an incoming queue for more information.

Generation of index files

Deciding when to generate

As reprepro stores all state in its database, you can decide when you want them to be written to the dists/ directory. You can always tell reprepro to generate those files with the export command:
reprepro -b $YOURBASEDIR export $CODENAMES
This can be especially useful, if you just edited conf/distributions and want to test what it generates.

While that command regenerates all files, in normal operation reprepro will only regenerate files where something just changed or that are missing. With --export option you can control when this fill happen:

never
Don't touch any index files. This can be useful for doing multiple operations in a row and not wanting to regenerate the indices all the time. Note that unless you do an explicit export or change the same parts later without that option, the generated index files may be permanently out of date.
changed
This is the default behaviour since 3.0.1. Only export distributions where something changed (and no error occoured that makes an inconsistent state likely). And in those distributions only (re-)generate files which content should have been changed by the current action or which are missing.
lookedat
New name for normal since 3.0.1.
normal
This was the default behaviour until 3.0.0 (changed in 3.0.1). In this mode all distributions are processed that were looked at without error (where error means only errors hapening while the package was open so have a chance to cause strange contents). This ensures that even after a operation that had nothing to do the looked at distribution has all the files exported needed to access it. (But still only files missing or that content would change with this action are regenerated).
force
Also try to write the current state if some error occured. In all other modes reprepro will not write the index files if there was a problem. While this keeps the repository usable for users, it means that you will need an explicit export to write possible other changes done before that in the same run. (reprepro will tell you that at the end of the run with error, but you should not miss it).

Distribution specific fields

There are a lot of conf/distributions headers to control what index files to generate for some distribution, how to name them, how to postprocess them and so on. The most important are:

Fields for the Release files

The following headers are copied verbatim to the Release file, if they exist: Origin, Label, Codename, Suite, Architectures (excluding a possible value "source"), Components, Description, and NotAutomatic, ButAutomaticUpgrades.

Choosing compression and file names

Depending on the type of the index files, different files are generated. No specifying anything is equivalent to:
 DscIndices Sources Release .gz
 DebIndices Packages Release . .gz
 UDebIndices Packages . .gz
This means to generate Release, Sources.gz for sources, Release, Packages and Packages.gz for binaries and Packages and Packages.gz for installer modules.
The format of these headers is the name of index file to generate, followed by the optional name for a per-directory release description (when no name is specified, no file is generated). Then a list of compressions: A single dot (.) means generating an uncompressed index, .gz means generating a gzipped output, while .bz2 requests and bzip2ed file. (.bz2 is not available when disabled at compile time). After the compressions a script can be given that is called to generate/update additional forms, see "Additional index files".

Signing

If there is a SignWith header, reprepro will try to generate a Release.gpg file using libgpgme. If the value of the header is yes it will use the first key it finds, otherwise it will give the option to libgpgme to determine the key. (Which means fingerprints and keyids work fine, and whatever libgpgme supports, which might include most that gpg supports to select a key).
The best way to deal with keys needing passphrases is to use gpg-agent. The only way to specify which keyring to use is to set the GNUPGHOME enviroment variable, which will effect all distributions.

Contents files

Reprepro can generate files called dists/CODENAME/Contents-ARCHITECTURE.gz listing all files in all binary packages available for the selected architecture in that distribution and which package they belong to.
This file can either be used by humans directly or via downloaded and searched with tools like apt-file.
To activate generating of these files by reprepro, you need a Contents header in that distribution's declaration in conf/distributions, like:
Contents:
Versions before 3.0.0 need a ratio number there, like:
Contents: 1
The number is the inverse ratio of not yet looked at and cached files to process in every run. The larger the more packages are missing. 1 means to list everything.
The arguments of the Contents field and other fields control which Architectures to generate Contents files for and which Components to include in those. For example
Contents: udebs nodebs . .gz .bz2
ContentsArchitectures: ia64
ContentsComponents:
ContentsUComponents: main
means to not skip any packages, generate Contents for .udeb files, not generating Contents for .debs. Also it is only generated for the ia64 architecture and only packages in component main are included.

Additional index files (like .diff)

Index files reprepro cannot generate itself, can be generated by telling it to call a script.
using rredtool to generate pdiff files
Starting with version 4.1.0, the rredtool coming with reprepro can be used as hook to create and update Packages.diff/Index files.
Unlike dak (which created the official Debian repositories) or the tiffany.py script (see below) derived from dak, an user will only need to download one of those patches, as new changes are merged into the old files.
To use it, make sure you have diff and gzip installed. Then add something like the following to the headers of the distributions that should use this in conf/distributions:
 DscIndices: Sources Release . .gz /usr/bin/rredtool
 DebIndices: Packages Release . .gz /usr/bin/rredtool
the tiffany example hook script (generates pdiff files)
This example generates Packages.diff and/or Sources.diff directories containing a set of ed-style patches, so that people do not redownload the whole index for just some small changes.
To use it, copy tiffany.example from the examples directory into your conf directory. (or any other directory, then you will need to give an absolute path later). Unpack, if needed. Rename it to tiffany.py and make it executeable. Make sure you have python-apt, diff and gzip installed. Then add something like the following to the headers of the distributions that should use this in conf/distributions:
 DscIndices: Sources Release . .gz tiffany.py
 DebIndices: Packages Release . .gz tiffany.py
More information can be found in the file itself. You should read it.
the bzip2 example hook script
This is an very simple example. Simple and mostly useless, as reprepro has built in .bz2 generation support, unless you compiled it your own with --without-libbz2 or with no libbz2-dev installed.
To use it, copy bzip.example from the examples directory into your conf directory. (or any other directory, then you will need to give an absolute path later). Unpack, if needed. Rename it to bzip2.sh and make it executeable. Then add something like the following to the headers of the distributions that should use this in conf/distributions:
 DscIndices: Sources Release . .gz bzip2.sh
 DebIndices: Packages Release . .gz bzip2.sh
 UDebIndices: Packages . .gz bzip2.sh
The script will compress the index file using the bzip2 program and tell reprepro which files to include in the Release file of the distribution.
internals
TO BE CONTINUED

...

TO BE CONTINUED

Local packages

There are two ways to get packages not yet in any repository into yours.
includedsc, includedeb, include
These are for including packages at the command line. Many options are available to control what actually happens. You can easily force components, section and priority and/or choose to include only some files or only in specific architectures. (Can be quite usefull for architecture all packages depending on some packages you will some time before building for some of your architectures). Files can be moved instead of copied and most sanity checks overwritten. They are also optimized towards being fast and simply try things instead of checking a long time if they would succeed.
processincoming
This command checks for changes files in an incoming directory. Being optimized for automatic processing (i.e. trying to checking everything before actually doing anything), it can be slower (as every file is copied at least once to sure the owner is correct, with multiple partitions another copy can follow). Component, section and priority can only be changed via the distribution's override files. Every inclusion needs a .changes file.
This method is also relatively new (only available since 2.0.0), thus optimisation for automatic procession will happen even more.

Including via command line

There are three commands to directly include packages into your repository: includedeb, includedsc and includechanges. Each needs to codename of the distribution you want to put your package into as first argument and a file of the appropiate type (.deb, .dsc or .changes, respectively) as second argument.
If no component is specified via --component (or short -C), it will be guessed looking at its section and the components of that distribution.
If there are no --section (or short -S) option, and it is not specified by the (binary or source, depending on the type) override file of the distribution, the value from the .changes-file is used (if the command is includechanges) or it is extracted out of the file (if it is a .deb-file, future versions might also try to extract it from a .dsc's diff or tarball).
Same with the priority and the --priority (or short -P) option.
With the --architecture (or short -A) option, the scope of the command is limited to that architecture. includdeb will add a Architecture all packages only to that architecture (and complain about Debian packages for other architectures). include will do the same and ignore packages for other architectures (source packages will only be included if the value for --architecture is source).
To limit the scope to a specify type of package, use the --packagetype or short -T option. Possible values are deb, udeb and dsc.
When using the --delete option, files will be moved or deleted after copying them. Repeating the --delete option will also delete unused files.
TO BE CONTINUED.

Processing an incoming queue

Using the processincoming command reprepro can automatically process incoming queues. While this is still improveable (reprepro still misses ways to send mails and especially an easy way to send rejection mails to the uploader directly), it makes it easy to have an directory where you place your packages and reprepro will automatically include them.
To get this working you need three things:

The file conf/incoming

describes the different incoming queues. As usual the different chunks are separated by empty lines. Each chunk can have the following fields:
Name
This is the name of the incoming queue, that processincoming wants as argument.
IncomingDir
The actual directory to look for .changes files.
TempDir
To ensure integrity of the processed files and their permissions, every file is first copied from the incoming directory to this directory. Only the user reprepro runs as needs write permissions here. It speeds things up if this directory is in the same partition as the pool.
Allow
This field lists the distributions this incoming queue might inject packages into. Each item can be a pair of a name of a distribution to accept and a distribution to put it into. Each upload has each item in its Distribution: field compared first to last to each of this items and is put in the first distribution accepting it. For example
Allow: stable>etch stable>etch-proposed-updates mystuff unstable>sid
will put a .changes file with Distribution: stable into etch. If that is not possible (e.g. because etch has a UploadersList option not allowing this) it will be put into etch-proposed-updates. And a .changes file with Distribution: unstable will be put into sid, while with Distribution: mystuff will end up in mystuff.
If there is a Default field, the Allow field is optional.
Default
Every upload not catched by an item of the Allow field is put into the distribution specified by this.
If there is a Allow field, the Default field is optional.
Multiple
This field only makes a difference if a .changes file has multiple distributions listed in its Distribution: field. Without this field each of those distributions is tried according to the above rules until the package is added to one (or none accepts it). With this field it is tried for each distribution, so a package can be upload to multiple distributions at the same time.
Permit
A list of options to allow things otherwise causing errors. (see the manpage for possible values).
This field os optional.
Cleanup
Determines when and what files to delete from the incoming queue. By default only sucessfully processed .changes files and the files references by those are deleted. For a list of possible options take a look into the man page.
This field os optional.

conf/distribution for processincoming

There are no special requirements on the conf/distribution file by processincoming. So even a simple
Codename: mystuff
Architectures: i386 source
Components: main non-free contrib bad
will work.
The Uploaders field can list a file limiting uploads to this distribution to specific keys and AlsoAcceptFor is used to resolve unknown names in conf/incoming's Allow and Default fields.

Getting processincoming called.

While you can just call reprepro processincoming manually, having an incoming queue needing manual intervention takes all the fun out of having an incoming queue, so usually so automatic way is choosen:
  • Dupload and dput have ways to call an hook after an package was uploaded. This can be an ssh to the host calling reprepro. The disavantage is having to configure this in every .dupload.conf on every host you want to upload and give everyone access to ssh and permissions on the archive who should upload. The advantage is you can configure reprepro to have interactive scripts or ask for passphrases.
  • Install a cron-job calling reprepro every 5 minutes. Cron is usually available everywhere and getting the output sent by mail to you or a mailing list is easy. The annoying part is having to wait almost 5 minutes for the processing.
  • Use something like inoticoming. Linux has a syscall called inotify, allowing a program to be run whenever something happens to a file. One program making use of this is inoticoming. It watches a directory using this facility and whenever a .changes file is completed it can call reprepro for you. (As this happens directly, make sure you always upload the .changes file last, dupload and dput always ensure this). This can be combined with Debian's cron-extension to have a program started at boot time with the @reboot directive. For example with a crontab like:
    MAILTO=myaddress@somewhere.tld
    
    @reboot inoticoming --logfile /my/basedir/logs/i.log /my/basedir/incoming/ --stderr-to-log --stdout-to-log --suffix '.changes' --chdir /my/basedir reprepro -b /my/basedir --waitforlock 100 processincoming local {} \;
    

Mirroring / Updating

Reprepro can fetch packages from other repositories. For this it uses apt's methods from /usr/lib/apt/methods/ so everything (http, ftp, ...) that works with apt should also work with reprepro. Note that this works on the level of packages, even though you can tell reprepro to create a distribution having always the same packages as some remote repository, the repository as a whole may not look exactly the same but only have the same set of packages in the same versions.
You can also only mirror a specific subset of packages, merge multiple repositories into one distribution, or even have distributions mixing remote and local packages.
Each distribution to receive packages from other repositories needs an Update: field listing the update rules applied to it. Those update rules are listed in conf/updates. There is also the magic - update rule, which tells reprepro to delete all packages not readded by later rules.
To make reprepro to update all distributions call reprepro update without further arguments, or give the distributions to update as additional arguments.
Let's start with some examples:

Updating examples

Let's assume you have the following conf/distributions
Codename: etch
Architectures: i386 source
Components: main contrib
Update: local - debian security

Codename: mystuff
Architectures: abacus source
Components: main bad
Update: debiantomystuff
and the following conf/updates
Name: local
Method: http://ftp.myorg.tld/debian

Name: debian
Method: http://ftp.de.debian.org/debian
VerifyRelease: A70DAF536070D3A1
Config: Acquire::Http::Proxy=http://proxy.yours.org:8080

Name: security
Suite: */updates
Method: http://security.eu.debian.org/
Fallback: http://security.debian.org/
VerifyRelease: A70DAF536070D3A1
Config: Acquire::Http::Proxy=http://proxy.yours.org:8080

Name: debiantomystuff
Suite: sid
Method: http://ftp.de.debian.org/debian
Architectures: i386>abacus source
Components: main non-free>bad contrib>bad
FilterFormula: Architecture (== all)| !Architecture
FilterList: deinstall list
and a file conf/list with some output as dpkg --get-selections is printing.
If you then run reprepro update etch or reprepro checkupdate etch, reprepro looks at etch's Update: line and finds four rules. The first is the local rule, which only has a method, so that means it will download the Release file from http://ftp.myorg.tld/debian/dists/etch/Release and (unless it already has downloaded them before or that repository does not have all of them) downloads the binary-i386/Packages.gz and source/Sources.gz files for main and contrib. The same is done for the debian and security rules. As they have a VerifyRelease field, Release.gpg is also downloaded and checked to be signed with the given key (which you should have imported to you gpg keyring before). As security has a Suite: field, not the codename, but the content of this field (with an possible* replaced by the codename), is used as distribution to get.
Then it will parse for each part of the distribution, parse the files it get from left to right. For each package it starts with the version currently in the distribution, if there is a newer on in local it will mark this. Then there is the delete rule -, which will mark it to be deleted (but remembers what was there, so if later the version in the distribution or the version in local are newest, it will get them from here avoiding slow downloads from far away). Then it will look into debian and then in security, if they have a newer version (or the same version, clearing the deletion mark).
If you issued checkupdate reprepro will print what it would do now, otherwise it tries to download all the needed files and when it got all, change the packages in the distribution to the new ones, export the index files for this distribution and finaly delete old files no longer needed.
TO BE CONTINUED.

Propagation of packages

You can copy packages between distributions using the pull and copy commands.
With the copy command you can copy packages by name from one distribution to the other within the same repository.
With the pull command you can pull all packages (or a subset defined by some list, or exceptions by some list, or by some formula, or ...) from one distribution to another within the same formula.
Note that both assume the filenames of the corresponding packages in the pool will not differ, so you cannot move packages from one component to another.
Let's just look at a little example, more information can be found in the man page.
Assume you upload all new packages to a distribution and you want another so you can keep using an old version until you know the newer works, too. One way would be to use something like the following conf/distributions:
Codename: development
Suite: unstable
Components: main extra
Architectures: i386 source

Codename: bla
Suite: testing
Components: main extra
Architectures: i386 source
Pull: from_development
and conf/pulls:
Name: from_development
From: development
i.e. you have two distributions, bla and development. Now you can just upload stuff to development (or it's alias unstable). And when you want a single package to go to testing, you can use the copy command:
reprepro copy bla development name1 name2 name3
If you do not want to copy all pakages of a given name, but only some of them, you can use -A, -T and -C:
reprepro -T deb -A i386 copy bla development name1
will copy .deb packages called name1 from the i386 parts of the distribution.
TO BE CONTINUED

Snapshots

There is a gensnapshot command.
TO BE DOCUMENTED

Source package tracking

TO BE DOCUMENTED

Extending reprepro / Hooks and more

When reprepro misses some functionality, it often can be be added by some kind of hook.
Currently you can execute your own scripts at the following occasions:

Scripts to be run when adding or removing packages

Whenever a package is added or removed, you can tell reprepro to log that to some file and/or call a script using the Log: directive in conf/distributions.
This script can send out mails and do other logging stuff, but despite the name, it is not restricted to logging.

Automatically extracting changelog and copyright information

reprepro ships with an example script to extract debian/changelog and debian/copyright files from source packages into a hierachy loosely resembling the way changelogs are made available at http://packages.debian.org/changelogs/.
All you have to do is to copy (or unpack if compressed) the file changelogs.example from the examples directory in the reprepro source or /usr/share/doc/reprepro/examples/ of your installed reprepro package into your conf/ directory (or somewhere else, then you will need an absolute path later), perhaps change some directories specified in it and add something like the following lines to all distributions in conf/distributions that should use this feature:
Log:
 --type=dsc changelogs.example
If you still want to log to some file, just keep the filename there:
Log: mylogfilename
 --type=dsc changelogs.example
Then cause those files to be generated for all existing files via
reprepro rerunnotifiers
and all future source packages added or removed will get this list automatically updated.

Writing your own Log: scripts

You can list an arbitrary amount of scripts, to be called at specified times (which can overlap or even be the same):
Log: logfilename
 --type=dsc script-to-run-on-source-package-changes
 script-to-run-on-package-changes
 another-script-to-run-on-package-changes
 --type=dsc --component=main script-to-run-on-main-source-packages
 --architecture=i386 --type=udeb script-to-run-on-i386-udebs
 --changes script-to-run-on-include-or-processincoming
There are two kind of scripts: The first one is called when a package was added or removed. Using the --archtecture=, --component= and --type= options you can limit it to specific parts of the distribution. The second kind is marked with --changes and is called when a .changes-file was added with include or processincoming. Both are called asynchonous in the background after everything was done, but before no longer referenced files are deleted (so the files of the replaced or deleted package are still around).
Calling conventions for package addition/removal scripts
This type of script is called with a variable number of arguments. The first argument is the action. This is either add, remove or replace. The next four arguments are the codename of the affected distribution and the packagetype, component and architecture in that distribution affected. The sixth argument is the package's name. After that is the version of the added package (add and replace) and the version of the removed package (remove and replace). Finally the filekeys of the new (add and replace) and/or removed (remove and replace) package are listed starting with the marker "--" followed by each filekey (the name of the file in the pool/ relative to REPREPRO_OUT_DIR) as its own argument.
The environment variable REPREPRO_CAUSING_COMMAND contains the command of the action causing this change. The environment variable REPREPRO_CAUSING_FILE contains the name of the file given at the command line causing this package to be changed, if there is one. (i.e. with includedeb, includedsc and include). The environment variables REPREPRO_CAUSING_RULE and REPREPRO_FROM are the name of the update or pull rule pulling in a package and the name of the distribution a package is coming from. What this name is depends on the command and for most commands it is simply not set at all. And of course all the REPREPRO_*_DIR variables are set.
Calling conventions for .changes scripts
This type of script is called with 5 or 6 arguments. The first is always "accepted", to make it easier to check it is configued the right way. The second argument is the codename of the distribution the .changes-file was added to. The third argument is the source name, the forth the version. The fifth name is the .changes itself (in case of processingcoming the secure copy in the temporary dir). There is a sixth argument if the .changes-file was added to the pool/: The filekey of the added .changes file (i.e. the filename relative to REPREPRO_OUT_DIR).
The environment variable REPREPRO_CAUSING_COMMAND contains the command of the action causing this change. The environment variable REPREPRO_CAUSING_FILE contains the name of the file given at the command line, if there is one (e.g. with include). And of course all the REPREPRO_*_DIR variables are set.

Scripts to be run to process byhand files

.changes files can (beside the usual packages files to be included in the repository) contain additional files to be processed specially. Those are marked by the special section byhand (in Debian) or raw-something (in Ubuntu). Besides storing them just in the pool besides the packages using the includebyhand value in the Tracking settings you can also let reprepro process a hook to process them when encountering them in the processincomming action (Typical usages are uploading documentation files this way that are unpacked next to the repository, or installer images or stuff like that). To use them add to the distribution's defining stanca in conf/distributions a field like:
ByhandHooks:
 byhand * manifesto.txt handle-byhand.sh
This will call the hook script handle-byhand.sh for every byhand file with section byhand, any priority and filename manifesto.txt. (The first three fields allow glob characters for matching). The script will then be alled with 5 arguments: the codename of the distribution, the section, the priority, the filename as found in the changes file and the filename of where the script can find the actual file.

Scripts to be run when creating index files (Packages.gz, Sources.gz)

this hook is described in the section "Additional index files".

Scripts to be run when signing releases

Instead of creating InRelease and Release.gpg files using libgpgme, the SignWith option can also contain a exclamation mark followed by a space and the name of a hook script to call. The script gets three arguments: The filename to sign, the filename of the InRelease file to create and the filename of the Release.gpg to create (a Release.gpg does not need to be created. reprepro will assume you do not care about that legacy file if it

Scripts to be run after changing the visible files of the repository managed

When using the --outhook command line option (or the corresponding outhook in the options file), reprepro will create a .outlog file in the log directory describing any changes done to the out dir and calls the hook script given as argument with this file as argument. The .outlog file consists of lines each starting with a keyword and then some arguments seperated by tab characters. The possible keywords are:
  • POOLNEW: One argument is the filekey of a file newly added to the pool.
  • POOLDELETE: One argument is the filekey of a file removed from the pool.
  • START-DISTRIBUTION, END-DISTRIBUTION: two or three arguments: the codename, the directory, and the suite (if set).
  • START-SNAPSHOT, END-SNAPSHOT: three arguments: the codename, the directory, and the name of the snapshot generated.
  • DISTFILE: three arguments: the directory of the distribution (relative to out dir), the name relative to that directory, and the filename generated by reprepro.
  • DISTSYMLINK: three arguments: the directory of the distribution (relative to out dir), the name relative to that directory, and the symlink target (relative to that directory).
  • DISTDELETE: two arguments: the directory of the distribution (relative to out dir), the name relative to that directory of a file no longer there.
  • DISTKEEP (not yet generated): two arguments: the directory of the distribution (relative to out dir), the name relative to that directory.
All POOLNEW come before any distribution changes referencing them and all POOLDELETE will be afterwards. Each line beloging to a distribution is guaranteed to be between the corresponding START-DISTRIBUTION and END-DISTRIBUTION or between a START-SNAPSHOT and END-SNAPSHOT or between a with the same directory (i.e. there is some redundancy so you can choose to parse the information where it is more conventient for you). The lines starting with DIST describe new or modified files in the distribution description exported by reprepro. No hint is given if that file was previously non-existant, a proper file or a symlink (i.e. if you copy stuff, do not make any assumptions about that). Future versions of reprepro might create DISTKEEP lines to denote files that have not changed (i.e. just ignore those lines to be future-proof). The directories for the distribution entries are what apt expects them (i.e. always starting with dists/, while the third argument to DISTFILE is the name reprepro generated (i.e. starts with the distdir value, which can be configured to not end with dists/).

when reprepro finished

With the --endhook command line option (or the corresponding endhook in the options file) you can specify a hook to be executed after reprepro finished but before reprepro returns the to calling process. The hook gets all the command line arguments after the options (i.e. starting with the name of the action) and the exit code reprepro would have produces. For an example see the man page.

Maintenance

This section lists some commands you can use to check and improve the health of you repository.
Normally nothing of this should be needed, but taking a look from time to time cannot harm.
reprepro -b $YOURBASEDIR dumpunreferenced
This lists all files reprepro knows about that are not marked as needed by anything. Unless you called reprepro with the --keepunreferenced option, those should never occour. Though if reprepro is confused or interupted it may sometimes prefer keeping files around instead of deleting them.
reprepro -b $YOURBASEDIR deleteunreferenced
This is like the command before, only that such files are directly forgotten and deleted.
reprepro -b $YOURBASEDIR check
Look if all needed files are in fact marked needed and known.
reprepro -b $YOURBASEDIR checkpool
Make sure all known files are still there and still have the same checksum.
reprepro -b $YOURBASEDIR checkpool fast
As the command above, but do not compute checksums.
reprepro -b $YOURBASEDIR tidytracks
If you use source package tracking, check for files kept because of this that should no longer by the current rules.
If you fear your tracking data could have became outdated, you can also try the retrack command:
reprepro -b $YOURBASEDIR retrack
That refreshes the tracking information about packages used and then runs a tidytracks. (Beware: don't do this with reprepro versions before 3.0.0).

Internals

reprepro stores the data it collects in Berkeley DB file (.db) in a directory called db/ or whatever you specified via command line. With a few exceptions, those files are NO CACHES, but the actual data. While some of those data can be regained when you lose those files, they are better not deleted.

packages.db

This file contains the actual package information.
It contains a database for every (codename,component,architecture,packagetype) quadruple available.
Each is indexed by package name and essentially contains the information written do the Packages and Sources files.
Note that if you change your conf/distributions to no longer list some codenames, architectures or components, that will not remove the associated databases in this file. That needs an explicit call to clearvanished.

references.db

This file contains a single database that lists for every file why this file is still needed. This is either an identifier for a package database, an tracked source package, or a snapshot.
Some low level commands to access this are (take a look at the manpage for how to use them):
rereference
recreate references (i.e. forget old and create newly)
dumpreferences
print a list of all references
_removereferences
remove everything referenced by a given identifier
_addreference
manually add a reference

files.db / checksums.db

These files contains what reprepro knows about your pool/ directory, i.e. what files it things are there with what sizes and checksums. The file files.db is used by reprepro before version 3.3 and kept for backwards compatibility. If your repository was only used with newer versions you can safely delete it. Otherwise you should run collectnewchecksums before deleting it. The file checksums.db is the new file used since version 3.3. It can store more checksums types (files.db only contained md5sums, checksums.db can store arbitrary checksums and reprepro can even cope with it containing checksum types it does not yet know of) but for compatibility with pre-3.3 versions is not the canonical source of information as long as a files.db file exists).
If you manually put files in the pool or remove them, you should tell reprepro about that. (it sometimes looks for files there without being told, but it never forgets files except when it would have deleted them anyway). Some low level commands (take a look at the man page for how to use them):
collectnewchecksums
Make sure every file is listed in checksums.db and with all checksum types your reprepro supports.
checkpool fast
Make sure all files are still there.
checkpool
Make sure all files are still there and correct.
dumpunreferenced
Show all known files without reference.
deleteunreferenced
Delete all known files without reference.
_listmd5sums
Dump this database (old style)
_listchecksums
Dump this database (new style)
_detect
Add files to the database
_forget
Forget that some file is there
_addmd5sums
Create the database from dumped data
_addchecksums
dito

release.cache.db

In this file reprepro remembers what it already wrote to the dists directory, so that it can write their checksums (including the checksums of the uncompressed variant, even if that was never written to disk) in a newly to create Release file without having to trust those files or having to unpack them.

contents.cache.db

This file contains all the lists of files of binary package files where reprepro already needed them. (which can only happen if you requested Contents files to be generated).

tracking.db

This file contains the information of the source package tracking.

Disaster recovery

TO BE DOCUMENTED (see the recovery file until then)

Paranoia

As all software, reprepro might have bugs. And it uses libraries not written by myself, which I'm thus even more sure that they will have bugs. Some of those bugs might be security relevant. This section contains some tips, to reduce the impact of those.
  • Never run reprepro as root.
    All reprepro needs to work are permissions to files, there is no excuse for running it as root.
  • Don't publish your db/ directory.
    The contents of the db directory are not needed by everyone else. Having them available to everyone may make it easier for them to exploit some hypothetical problem in libdb and makes it easier to know in advance how exactly reprepro will act in a given circumstances, thus easier to exploit some hypothetical problem.
  • Don't accept untrusted data without need.
    If an attacker cannot do anything, they cannot do anything harmful, either. So if there is no need, don't offer an anonymous incoming queue. dput supports uploading via scp, so just having an only group-writable incoming directory, or even better multiple incoming directories can be a better alternative.
External stuff being used and attack vectors opened by it:
libgpgme/gpg
Almost anything is run through libgpgme and thus gpg. It will be used to check the Release.gpg file, or to read .dsc and .changes files (even when there is no key to look for specified, as that is the best way to get the data from the signed block). Avoiding this by just accepting stuff without looking for signatures on untrusted data is not really an option, so I know nothing to prefent this type of problems.
libarchive
The .tar files within .deb files are normaly (unless that library was not available while compiling) read using libarchive. This happens when a .deb file is to be added (though only after deciding if it should be added, so if it does not have the correct checksum or the .changes did not have the signatures you specified, it is not) or when the file list is to be extracted (when creating Contents files). Note that they are not processed when only mirroring them (of course unless Contents files are generated), as then only the information from the Packages file is copied.
dpkg-deb/tar
If reprepro was compiled without libarchive, dpkg-deb is used instead, which most likely will call tar. Otherwise just the same like the last item.
zlib
When mirroring packages, the downloaded Packages.gz and Sources.gz files are read using zlib. Also the generated .gz files are generated using it. There is no option but hoping there is no security relevant problem in that library.
libbz2
Only used to generate .bz2 files. If you fear simple blockwise writing using that library has a security problem that can be exploited by data enough harmless looking to be written to the generated index files, you can always decide to no tell reprepro to generate .bz2 files.

What reprepro cannot do

There are some things reprepro does not do:
Verbatim mirroring
Reprepro aims to put all files into a coherent pool/ hierarchy. Thus it cannot guarantee that files will have the same relatives path as in the original repository (especially if those have no pool). It also creates the index files from its own indices. While this leads to a tidy repository and possible savings of disk-space, the signatures of the repositories you mirror cannot be used to authenticate the mirror, but you will have to sign (or tell reprepro to sign for you) the result. While this is perfect when you only mirror some parts or specific packages or also have local packages that need local signing anyway, reprepro is no suitable tool for creating a full mirror that can be authenticated without adding the key of this repository.
Placing your files on your own
Reprepro does all the calculation of filenames to save files as, bookkeeping what files are there and what are needed and so on. This cannot be switched off or disabled. You can place files where reprepro will expect them and reprepro will use them if their md5sum matches. But reprepro is not suited if you want those files outside of a pool or in places reprepro does not consider their canonical ones.
Having different files with the same name
take a look in the FAQ (currently question 1.2) why and how to avoid the problem.
reprepro-4.13.1/docs/Makefile.in0000644000175100017510000003305112152655327013370 00000000000000# Makefile.in generated by automake 1.11.6 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software # Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__make_dryrun = \ { \ am__dry=no; \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ echo 'am--echo: ; @echo "AM" OK' | $(MAKE) -f - 2>/dev/null \ | grep '^AM OK$$' >/dev/null || am__dry=yes;; \ *) \ for am__flg in $$MAKEFLAGS; do \ case $$am__flg in \ *=*|--*) ;; \ *n*) am__dry=yes; break;; \ esac; \ done;; \ esac; \ test $$am__dry = yes; \ } pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : subdir = docs DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/acinclude.m4 \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } man1dir = $(mandir)/man1 am__installdirs = "$(DESTDIR)$(man1dir)" NROFF = nroff MANS = $(man_MANS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ ARCHIVECPP = @ARCHIVECPP@ ARCHIVELIBS = @ARCHIVELIBS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CYGPATH_W = @CYGPATH_W@ DBLIBS = @DBLIBS@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ LDFLAGS = @LDFLAGS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LTLIBOBJS = @LTLIBOBJS@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ STRIP = @STRIP@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bindir = @bindir@ build_alias = @build_alias@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host_alias = @host_alias@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ EXTRA_DIST = short-howto reprepro.1 changestool.1 rredtool.1 recovery bzip.example tiffany.example di.example/README di.example/DI-filter.sh di.example/distributions di.example/updates reprepro.bash_completion reprepro.zsh_completion FAQ changelogs.example manual.html copybyhand.example outstore.py sftp.py outsftphook.py man_MANS = reprepro.1 changestool.1 rredtool.1 MAINTAINERCLEANFILES = $(srcdir)/Makefile.in all: all-am .SUFFIXES: $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu docs/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --gnu docs/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-man1: $(man_MANS) @$(NORMAL_INSTALL) @list1=''; \ list2='$(man_MANS)'; \ test -n "$(man1dir)" \ && test -n "`echo $$list1$$list2`" \ || exit 0; \ echo " $(MKDIR_P) '$(DESTDIR)$(man1dir)'"; \ $(MKDIR_P) "$(DESTDIR)$(man1dir)" || exit 1; \ { for i in $$list1; do echo "$$i"; done; \ if test -n "$$list2"; then \ for i in $$list2; do echo "$$i"; done \ | sed -n '/\.1[a-z]*$$/p'; \ fi; \ } | while read p; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; echo "$$p"; \ done | \ sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ sed 'N;N;s,\n, ,g' | { \ list=; while read file base inst; do \ if test "$$base" = "$$inst"; then list="$$list $$file"; else \ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man1dir)/$$inst'"; \ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man1dir)/$$inst" || exit $$?; \ fi; \ done; \ for i in $$list; do echo "$$i"; done | $(am__base_list) | \ while read files; do \ test -z "$$files" || { \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man1dir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(man1dir)" || exit $$?; }; \ done; } uninstall-man1: @$(NORMAL_UNINSTALL) @list=''; test -n "$(man1dir)" || exit 0; \ files=`{ for i in $$list; do echo "$$i"; done; \ l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \ sed -n '/\.1[a-z]*$$/p'; \ } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \ -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ dir='$(DESTDIR)$(man1dir)'; $(am__uninstall_files_from_dir) tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @list='$(MANS)'; if test -n "$$list"; then \ list=`for p in $$list; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; else :; fi; done`; \ if test -n "$$list" && \ grep 'ab help2man is required to generate this page' $$list >/dev/null; then \ echo "error: found man pages containing the \`missing help2man' replacement text:" >&2; \ grep -l 'ab help2man is required to generate this page' $$list | sed 's/^/ /' >&2; \ echo " to fix them, install help2man, remove and regenerate the man pages;" >&2; \ echo " typically \`make maintainer-clean' will remove them" >&2; \ exit 1; \ else :; fi; \ else :; fi @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(MANS) installdirs: for dir in "$(DESTDIR)$(man1dir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) clean: clean-am clean-am: clean-generic mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-man install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-man1 install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-man uninstall-man: uninstall-man1 .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic distclean \ distclean-generic distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-man1 install-pdf install-pdf-am install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic pdf pdf-am ps ps-am uninstall \ uninstall-am uninstall-man uninstall-man1 # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: reprepro-4.13.1/docs/short-howto0000644000175100017510000001455512152651661013550 00000000000000This short HOW-TO describes how to setup a repository using reprepro. First choose a directory where you want to store your repository, 1) Configuration: Generate a directory named conf/. Create a file named "distributions" there. Add entries such as: Origin: Debian Label: Debian-All Suite: stable Codename: woody Version: 3.0 Architectures: i386 sparc mips source Components: main non-free contrib Description: Debian woody + woody/non-US + woody/updates #Update: debian non-US security #SignWith: yes Or: Origin: PCPool Label: PCPool Suite: stable Codename: pcpool Version: 3.0 Architectures: i386 source Components: main non-free contrib bad protected server UDebComponents: main Description: PCPool specific (or backported) packages SignWith: yes DebOverride: override UDebOverride: override DscOverride: srcoverride Multiple entries are separated with an empty line. The codename of the distribution is specified with Codename:. It is the primary name of a distribution and e.g. used to determine the directory to create and put the index files into. Update: is described later. If SignWith: is there, it will try to sign it: either use "yes" or give something gpg can use to identify the key you want to use. The other fields are copied into the appropriate "Release" files generated. 2) Adding files to the repository: To add a .deb manually: reprepro -Vb . includedeb pcpool /var/cache/apt/archives/libc6_2.2.5-11.8_i386.deb to add a .changes file: reprepro -Vb . include pcpool test.changes Hint: you can add "-C component", "-A architecture", "-S section" and "-P priority" to give additional hints where it should go. Note -A will not overwrite something to go into another architecture, but simply ignore those not fitting, only "Architecture: all" packages are placed exactly in these architecture. Helps when it is not available for all architectures and each binary version needs a fitting version of the "Architecture: all" package. 3) Removing files from the repository: reprepro -Vb . remove pcpool libc6 to only remove from a specific component or architecture: reprepro -Vb . -C main -A i386 remove pcpool libc6 4) Getting information about a package: To see in which architectures/components a package exists and which version it uses. reprepro -b . list pcpool libc6 5) Override-Files: When including packages via "includedeb", "includedsc" or "include" the applicable override file from the distribution it is placed into is used. The file given by DebOverride: for ".deb"s, the file given by UDebOverride: for ".udeb"s and the file given by DscOverride: for ".dsc"s. If the filename starts with a slash (/) it is not relative to the conf directory given with --conf, defaulting to "conf" in the current directory (or in the directory specified with --basedir, if that is given). Note that the Format is those of apt-ftparchive's ExtraOverride, not the old format. An (stupid) example line for that file would be: libc6 Priority extra 6) importing from upstream repositories: The file conf/updates can contain entries like this: Name: debian Method: http://ftp.debian.de/debian VerifyRelease: F1D53D8C4F368D5D Name: non-US Method: http://ftp.debian.de/debian-non-US Suite: */non-US Architectures: i386 sparc mips source Components: main>main non-free>non-free contrib>contrib UDebComponents: VerifyRelease: B629A24C38C6029A Name: security Method: http://security.debian.org/debian-security Suite: */updates UDebComponents: VerifyRelease: F1D53D8C4F368D5D Which of those are used is determined by the Update: line in the description in conf/distributions. When Suite:, Architecture:, Components: or UDebComponents: are not given, those of the distribution to be added are used. The suite of the target can be used as "*" in the Suite: here. VerifyRelease: tells which GPG key to use checking the Release.gpg. Add a "IgnoreRelease: yes" to ignore any Release files. To import components in other components, use the source>target syntax. Method: describes an apt-method, for which the programs from /usr/lib/apt/methods are used... To update everything possible do: reprepro -b . update To only update some distributions do: reprepro -b . update woody There is no support for updating a distribution from only specific upstreams yet. You will have to edit conf/distributions for that. The value for VerifyRelease: can be retrieved using: gpg --with-colons --list-keys =============================================================================== The following is from V. Stanley Jaddoe . Make sure to include all sources when allowing everyone access to software only available under GPL to you. Well, you should always supply sources, but in some cases not doing so might cause you trouble. Using reprepro with apache2 (sarge, etch, sid) This example assumes the reprepro repository is under /srv/reprepro/ and that apache2 has been correctly installed and configured. The first step is to create a virtual directory called debian/. Assuming your server runs the host http://www.example.com/, the web repository will be placed at http://www.example.com/debian/. Create an apache2 config file in the conf dir of your reprepro repository, using the following command: cat > /srv/reprepro/conf/apache.conf << EOF Alias /debian /srv/reprepro/ Options +Indexes AllowOverride None order allow,deny allow from all EOF To enable this virtual directory, a symlink has to be created. This can be done using the following command: ln -s /srv/reprepro/conf/apache.conf /etc/apache2/conf.d/reprepro.conf The second step is setting the permissions in such a way that web users can browse the repository, but cannot view the reprepro specific configuration. This can be done using the following commands: chown -R root:root /srv/reprepro/ chmod 755 /srv/reprepro/ chown -R root:www-data /srv/reprepro/dists/ /srv/reprepro/pool/ chmod 750 /srv/reprepro/* Reload apache2: /etc/init.d/apache2 reload Check if the repository is viewable by web-users, by pointing your browser to http://www.example.com/debian/ If there are no problems with your reprepro repository and the apache2 configuration, you should see two directories, dists/ and pool/. The last step is to add this new repository to your sources.list. This is as easy as: echo "deb http://www.example.com/debian pcpool main non-free contrib" >> /etc/apt/sources.list reprepro-4.13.1/docs/outstore.py0000755000175100017510000002006212152651661013557 00000000000000#!/usr/bin/python3 # Copyright (C) 2012 Bernhard R. Link # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA # This is an example outhook script. # Actually it is part of the testsuite and does many things # an actual outhook script would never do. # But it checks so many aspects of how a outhook script is called # that it should make quite clear what a outhookscript can expect. import sys, os, subprocess, select, dbm def poolfile(outdir, name): s = os.lstat(outdir + '/' + name) return "poolfile %d bytes" % s.st_size def distfile(outdir, name): s = os.lstat(outdir + '/' + name) return "distfile %d bytes" % s.st_size def distsymlink(distdir, target): return "distsymlink -> %s/%s" % (distdir,target) def collecteddistfile(outdir, name): if os.path.islink(outdir + '/' + name): l = os.readlink(outdir + '/' + name) d = os.path.dirname(name) while d and l[0:3] == '../': d = os.path.dirname(d) l = l[3:] if d: d = d + '/' return "distsymlink -> %s%s" % (d,l) else: return distfile(outdir, name) def processfile(logfile, donefile, db): # print("Parsing '%s'" % logfile) lf = open(logfile, 'r', encoding='utf-8') newpoolfiles = [] distributions = [] deletepoolfiles = [] mode = 'POOLNEW' # This parser is wasteful and unnecessarily complicated, but it's # purpose is mainly making sure the output of reprepro is # well-formed and no so much targeted at doing actual work. for l in lf: if l[-1] != '\n': raise CriticalError("Malformed file '%s' (not a text file)" % logfile) l = l[:-1] fields = l.split('\t') if fields[0] != 'POOLNEW': break if len(fields) != 2: raise CriticalError("Malformed file '%s': POOLNEW with more than one argument" % logfile) newpoolfiles.append(fields[1]) else: fields = ['EOF'] while fields[0] == 'BEGIN-DISTRIBUTION' or fields[0] == 'BEGIN-SNAPSHOT': beginmarker = fields[0] endmarker = 'END-' + beginmarker[6:] if len(fields) != 3 and len(fields) != 4: raise CriticalError("Malformed file '%s': wrong number of arguments for %s" % (logfile,beginmarker)) distname = fields[1] distdir = fields[2] distfiles = [] distsymlinks = [] distdeletes = [] for l in lf: if l[-1] != '\n': raise CriticalError("Malformed file '%s' (not a text file)" % logfile) l = l[:-1] fields = l.split('\t') if fields[0] == endmarker: if len(fields) != 3 and len(fields) != 4: raise CriticalError("Malformed file '%s': wrong number of arguments for %s" % (logfile, endmarker)) if fields[1] != distname or fields[2] != distdir: raise CriticalError("Malformed file '%s': %s not matching previous %s" % (logfile, endmarker, beginmarker)) break elif fields[0] == 'DISTKEEP': continue elif not fields[0] in ['DISTFILE', 'DISTSYMLINK', 'DISTDELETE']: raise CriticalError("Malformed file '%s': Unexpected '%s'" % (logfile, fields[0])) if len(fields) < 3: raise CriticalError("Malformed file '%s': wrong number of arguments for %s" % (logfile, fields[0])) if fields[1] != distdir: raise CriticalError("Malformed file '%s': wrong distdir '%s' in '%s'" %(logfile, fields[1], fields[0])) if fields[0] == 'DISTFILE': if len(fields) != 4: raise CriticalError("Malformed file '%s': wrong number of arguments for %s" % (logfile, fields[0])) distfiles.append((fields[2], fields[3])) elif fields[0] == 'DISTDELETE': if len(fields) != 3: raise CriticalError("Malformed file '%s': wrong number of arguments for %s" % (logfile, fields[0])) distdeletes.append(fields[2]) elif fields[0] == 'DISTSYMLINK': if len(fields) != 4: raise CriticalError("Malformed file '%s': wrong number of arguments for %s" % (logfile, fields[0])) distsymlinks.append((fields[2], fields[3])) else: raise CriticalError("Malformed file '%s': unexpected end of file (%s missing)" % (logfile, endmarker)) distributions.append((distname, distdir, distfiles, distsymlinks, distdeletes)) l = next(lf, 'EOF\n') if l[-1] != '\n': raise CriticalError("Malformed file '%s' (not a text file)" % logfile) l = l[:-1] fields = l.split('\t') while fields[0] == 'POOLDELETE': if len(fields) != 2: raise CriticalError("Malformed file '%s': wrong number of arguments for POOLDELETE" % logfile) deletepoolfiles.append(fields[1]) l = next(lf, 'EOF\n') if l[-1] != '\n': raise CriticalError("Malformed file '%s' (not a text file)" % logfile) l = l[:-1] fields = l.split('\t') if fields[0] != 'EOF' or next(lf, None) != None: raise CriticalError("Malformed file '%s': Unexpected command '%s'" % (logfile, fields[0])) # print("Processing '%s'" % logfile) # Checked input to death, no actualy do something outdir = os.environ['REPREPRO_OUT_DIR'] for p in newpoolfiles: if p in db: raise Exception("duplicate pool file %s" % p) db[p] = poolfile(outdir, p) for distname, distdir, distfiles, distsymlinks, distdeletes in distributions: for name, orig in distfiles: db[distdir + '/' + name] = distfile(outdir, orig) for name, target in distsymlinks: db[distdir + '/' + name] = distsymlink(distdir, target) for name in distdeletes: del db[distdir + '/' + name] for p in deletepoolfiles: if not p in db: raise Exception("deleting non-existant pool file %s" % p) del db[p] def collectfiles(dir, name): for l in os.listdir(dir + '/' + name): n = name + '/' + l if os.path.isdir(dir + '/' + n): for x in collectfiles(dir, n): yield x else: yield n def collectpool(outdir): if os.path.isdir(outdir + '/pool'): return ["%s: %s" % (filename, poolfile(outdir, filename)) for filename in collectfiles(outdir, 'pool')] else: return [] def collectdists(outdir): if os.path.isdir(outdir + '/dists'): return ["%s: %s" % (filename, collecteddistfile(outdir, filename)) for filename in collectfiles(outdir, 'dists')] else: return [] def showdiff(i1, i2): clean = True l1 = next(i1, None) l2 = next(i2, None) while l1 or l2: if l1 == l2: l1 = next(i1, None) l2 = next(i2, None) elif l1 != None and (l2 == None or l1 < l2): print("+ %s" % l1) clean = False l1 = next(i1, None) elif l2 != None and (l1 == None or l1 > l2): print("- %s" % l2) clean = False l2 = next(i2, None) else: raise("unexpected") return clean def check(db): outdir = os.environ['REPREPRO_OUT_DIR'] actualfiles = collectpool(outdir) actualfiles.extend(collectdists(outdir)) expectedfiles = [] for k in db.keys(): expectedfiles.append("%s: %s" % (k.decode(encoding='utf-8'), db[k].decode(encoding='utf-8'))) expectedfiles.sort() actualfiles.sort() if not showdiff(iter(expectedfiles), iter(actualfiles)): raise CriticalError("outdir does not match expected state") class CriticalError(Exception): pass def main(args): if len(args) <= 0: raise CriticalError("No .outlog files given at command line!") if len(args) == 1 and args[0] == '--print': db = dbm.open(os.environ['REPREPRO_OUT_DB'], 'r') for k in sort(db.keys()): print("%s: %s" % (k, db[k])) return if len(args) == 1 and args[0] == '--check': db = dbm.open(os.environ['REPREPRO_OUT_DB'], 'r') check(db) return for f in args: if len(f) < 8 or f[-7:] != ".outlog": raise CriticalError("command line argument '%s' does not look like a .outlog file!" % f) db = dbm.open(os.environ['REPREPRO_OUT_DB'], 'c') for f in args: donefile = f[:-7] + ".outlogdone" if os.path.exists(donefile): print("Ignoring '%s' as '%s' already exists!" % (f,donefile), file=sys.stderr) continue processfile(f, donefile, db) try: main(sys.argv[1:]) except CriticalError as e: print(str(e), file=sys.stderr) raise SystemExit(1) reprepro-4.13.1/docs/rredtool.10000644000175100017510000000512612152651661013236 00000000000000.TH RREDTOOL 1 "2009-11-12" "reprepro" REPREPRO .SH NAME rredtool \- merge or apply a very restricted subset of ed patches .SH SYNOPSIS .B rredtool \-\-help .B rredtool [ \fIoptions\fP ] .B \-\-merge .I patches... .B rredtool [ \fIoptions\fP ] .B \-\-patch .IR file-to-patch " " patches... .B rredtool .IR directory " " newfile " " oldfile " " mode .SH DESCRIPTION rredtool is a tool to handle a subset of ed patches in a safe way. It is especially targeted at ed patches as used in Packages.diff and Sources.diff. Is also has a mode supposed to be called from reprepro as Index Hook to generate and update a \fBPackages.diff/Index\fP file. .SH "MODI" One of the following has to be given, so that rredtool know that to do. .TP .B \-\-version Print the version of this tool (or rather the version of reprepro which it is coming with). .TP .B \-\-help Print a short overview of the modi. .TP .B \-\-patch The first argument of rredtool is the file to patch, the other arguments are ed patches to apply on this one. .TP .B \-\-merge The arguments are treated as ed patches, which are merged into a single one. .TP .BR \-\-reprepro\-hook " (or no other mode flag) Act as reprepro index hook to manage a \fBPackages.diff/index\fP file. That means it expects to get exactly 4 arguments and writes the names of files to place into filedescriptor 3. If neither \-\-patch nor \-\-merge is given, this mode is used, so you can just put \fBDebIndices: Packages Release . .gz /usr/bin/rredtool\fP into reprepro's \fBconf/distributions\fP file to have a Packages.diff directory generated. (Note that you have to generate an uncompressed file (the single dot). You will need to have patch, gzip and gunzip available in your path.) .SH "OPTIONS" .TP .B \-\-debug Print intermediate results or other details that might be interesting when trying to track down bugs in rredtool but not intresting otherwise. .TP .B \-\-max\-patch\-count=\fIcount\fP When generating a \fIPackages\fP\fB.diff/Index\fP file, put at most \fIcount\fP patches in it (not counting possible apt workaround patches). .TP .BR \-o | \-\-output Not yet implemented. .SH "ENVIRONMENT" .TP .BR TMPDIR ", " TEMPDIR temporary files are created in $\fITEMPDIR\fP if set, otherwise in $\fITMPDIR\fP if set, otherwise in \fB/tmp/\fP. .SH "REPORTING BUGS" Report bugs or wishlist requests the Debian BTS (e.g. by using \fBreportbug reperepro\fP) or directly to . .br .SH COPYRIGHT Copyright \(co 2009 Bernhard R. Link .br This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. reprepro-4.13.1/docs/recovery0000644000175100017510000000627012152651661013104 00000000000000Some tips what to do if (hopefully never), your database gets corrupted: First there are three different databases used, residing in three files in your --dbdir (normally db/): 1) references.db This file only contains the information which file in the pool/ is needed by which target (i.e. which type/distribution/ component/architecture quadruple). This is simply repairable by deleting references.db and running "rereference". The current state of this database can be seen with "dumpreferences". All references from some specific target can be removed with "_removereferences". 2) files.db and checksums.db These files contain the information about which files in the pool/ dir are known and what checksums they have. Files not in here will not be deleted with "deleteunreferenced". Files being wrong here will not realized (and thus not corrected even if told to be newly included) If both files exist, files.db is the canonical information and checksums.db can be regenerated with a call to collectnewchecksums. If only checksums.db is there, only that it used. (This means: if you have called collectnewchecksums since you last used a version prior to 3.3 with this repository, you can just delete files.db. But make sure to never ever use a version prior to 3.0 on this repository after that.) To get this database in text form use "_listchecksums" without argument, to add items manually pipe it into "_addchecksums". (Filenames are handled as strings, so be careful). If the database is completely lost or broken, you can regain it by moving files.db and checksums.db out of the way and running: find $BASEDIR/pool -type f -printf "pool/%P\n" | reprepro -b $BASEDIR _detect (or cd $BASEDIR && find pool -type f -print | reprepro -b . _detect) Also single files can be removed or added by "_forget" and "_detect". (Again note filekeys will be handled as strings, so leading "./", double slashes, "/./", symlinks and the like make them differ). 4) packages.db This file contains multiple databases, one for each target, containing the chunks from the Packages or Sources files, indexed by package name. This one is the hardest to reconstruct. If you have still an uncorrupted "dists/" directory around, (e.g. you just deleted db/ accidentally), it can be reconstructed by moving your dists/ directory to some other place, moving the packages.db file (if still existent) away, and set every distribution in conf/distributions a "Update: localreadd" with localreadd in conf/updates like: Name: localreadd Suite: * Method: copy:/ with otherplace being the place you moved the dists/ directory too. If the packages database is corrupt, the described way can at least reconstruct the Packages still landing in the Packages.gz and Sources.gz files. If references.db is still accessible via dumpreferences, it can give hints where the other files belong to. Otherwise removing references.db and calling "rereference" and then "dumpunreferenced" will give you a list of files not yet anywhere. Last but not least, there are also the "check" and "checkpool" commands, which can give some hints about inconsistencies. (Check will also read files missing from files.db+checksums.db if they are needed by packages but in the pool). reprepro-4.13.1/docs/changestool.10000644000175100017510000001357612152651661013722 00000000000000.TH CHANGESTOOL 1 "2010-03-19" "reprepro" REPREPRO .SH NAME changestool \- verify, dump, modify, create or fix Debian .changes files .SH SYNOPSIS .B changestool \-\-help .B changestool [ \fIoptions\fP ] \fI.changes-filename\fP \fIcommand\fP [ \fIper-command-arguments\fP ] .SH DESCRIPTION changestool is a little program to operate on Debian .changes files, as they are produced by \fBdpkg\-genchanges\fP(1) and used to feed built Debian packages into Debian repository managers like .BR reprepro (1) or .BR dak . .SH EXAMPLES .P .B changestool \fIbloat.changes\fP setdistribution \fIlocal\fP .br will modify the \fBDistribution:\fP header inside \fIbloat.changes\fP to say \fIlocal\fP instead of what was there before. .P .B changestool \fIreprepro_1.2.0\-1.local_sparc.changes\fP includeallsources .br will modify the given file to also list \fB.orig.tar.gz\fP it does not list because you forgot to build it with .BR "dpkg\-buildpackage \-sa" . .P .B changestool \fIblafasel_1.0_abacus.changes\fP updatechecksums .br will update the md5sums to those of the files referenced by this file. (So one can do quick'n'dirty corrections to them before uploading to your private package repository) .P .B changestool \-\-create \fItest.changes\fP add \fIbla_1\-1.dsc bla_1\-1_abacus.deb\fP .br will add the specified files (format detected by filename, use \fBadddeb\fP or \fBadddsc\fP if you know it). If the file \fItest.changes\fP does not exist yet, a minimal one will be generated. Though that might be too minimal for most direct uses. .SH "GLOBAL OPTIONS" Options can be specified before the command. Each affects a different subset of commands and is ignored by other commands. .TP .B \-h \-\-help Displays a short list of options and commands with description. .TP .B \-o \-\-outputdir \fIdir\fP Not yet implemented. .TP .B \-s \-\-searchpath \fIpath\fP A colon-separated list of directories to search for files if they are not found in the directory of the .changes file. .TP .B \-\-create Flag for the commands starting with \fBadd\fP to create the \fB.changes\fP file if it does not yet exists. .TP .B \-\-create\-with\-all\-fields Flag for the commands starting with \fBadd\fP to create the \fB.changes\fP file if it does not yet exists. Unlike \fB\-\-create\fP, this creates more fields to make things like dupload happier. Currently that creates fake \fBUrgency\fP and \fBChanges\fP fields. .TP .B \-\-unlzma \fIcommand\fP External uncompressor used to uncompress lzma files to look into .diff.lzma, .tar.lzma or .tar.lzma within .debs. .TP .B \-\-unxz \fIcommand\fP External uncompressor used to uncompress xz files to look into .diff.xz, .tar.xz or .tar.xz within .debs. .TP .B \-\-lunzip \fIcommand\fP External uncompressor used to uncompress lzip files to look into .diff.lz, .tar.lz or .tar.lz within .debs. .TP .B \-\-bunzip2 \fIcommand\fP External uncompressor used to uncompress bz2 when compiled without libbz2. .SH COMMANDS .TP .BR verify Check for inconsistencies in the specified \fB.changes\fP file and the files referenced by it. .TP .BR updatechecksums " [ " \fIfilename\fP " ]" Update the checksum (md5sum and size) information within the specified \fB.changes\fP file and all \fB.dsc\fP files referenced by it. Without arguments, all files will be updated. To only update specific files, give their filename (without path) as arguments. .TP .BR setdistribution " [ " \fIdistributions\fP " ]" Change the \fBDistribution:\fP header to list the remaining arguments. .TP .BR includeallsources " [ " \fIfilename\fP " ]" List all files referenced by \fB.dsc\fP files mentioned in the \fB.changes\fP file in said file. Without arguments, all missing files will be included. To only include specific files, give their filename (without path) as arguments. Take a look at the description of \fB\-si\fP, \fB\-sa\fP and \fB\-sd\fP in the manpage of \fBdpkg\-genchanges\fP/\fBdpkg\-buildpackage\fP how to avoid to have to do this at all. Note that while \fBreprepro\fP will just ignore files listed in a \fB.changes\fP file when it already has the file with the same size and md5sum, \fBdak\fP might choke in that case. .TP .B adddeb \fIfilenames\fP Add the \fB.deb\fP and \fB.udeb\fP files specified by their filenames to the \fB.changes\fP file. Filenames without a slash will be searched in the current directory, the directory the changes file resides in and in the directories specified by the \fB\-\-searchpath\fP. .TP .B adddsc \fIfilenames\fP Add the \fB.dsc\fP files specified by their filenames to the \fB.changes\fP file. Filenames without a slash will be searched in the the current directory, in the directory the changes file resides in and in the directories specified by the \fB\-\-searchpath\fP. .TP .B addrawfile \fIfilenames\fP Add the files specified by their filenames to the \fB.changes\fP file. Filenames without a slash will be searched in the current directory, in the directory the changes file resides in and in the directories specified by the \fB\-\-searchpath\fP. .TP .B add \fIfilenames\fP Behave like \fBadddsc\fP for filenames ending in \fB.dsc\fP, like \fBadddeb\fP for filenames ending in \fB.deb\fP or \fB.udeb\fP, and like \fBaddrawfile\fP for all other filenames .TP .B dumbremove \fIfilenames\fP Remove the specified files from the .changes file. No other fields (Architectures, Binaries, ...) are updated and no related files is removed. Just the given files (which must be specified without any \fB/\fP) are no longer listen in the .changes file (and only no longer in the changes file). .SH "SEE ALSO" .BR reprepro (1), .BR dpkg\-genchanges (1), .BR dpkg\-buildpackage (1), .BR md5sum (1). .SH "REPORTING BUGS" Report bugs or wishlist requests the Debian BTS (e.g. by using \fBreportbug reperepro\fP) or directly to . .br .SH COPYRIGHT Copyright \(co 2006-2009 Bernhard R. Link .br This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. reprepro-4.13.1/docs/bzip.example0000755000175100017510000000241512152651661013644 00000000000000#!/bin/sh # since reprepro 0.8 this is no longer needed, as it can # create .bz2 files on its own (when compiled with libbz2-dev # present). It's still here for reference how to such a filter works. # Copy this script to your conf/ dir as bzip2.sh, make it executable # and add to some definition in conf/distributions # DscIndices: Sources Release . .gz bzip2.sh # DebIndices: Packages Release . .gz bzip2.sh # UDebIndices: Packages . .gz bzip2.sh # and you have .bz2'd Packages and Sources. # (alternatively, if you are very brave, put the full path to this file in there) DIROFDIST="$1" NEWNAME="$2" OLDNAME="$3" # this can be old($3 exists), new($2 exists) or change (both): STATUS="$4" BASENAME="`basename "$OLDNAME"`" # with reprepro <= 0.7 this could also be Packages.gz or Sources.gz, # but now only the uncompressed name is given. (even if not generated) if [ "xPackages" = "x$BASENAME" ] || [ "xSources" = "x$BASENAME" ] ; then if [ "x$STATUS" = "xold" ] ; then if [ -f "$DIROFDIST/$OLDNAME.bz2" ] ; then echo "$OLDNAME.bz2" >&3 else bzip2 -c -- "$DIROFDIST/$OLDNAME" >"$DIROFDIST/$OLDNAME.bz2.new" 3>/dev/null echo "$OLDNAME.bz2.new" >&3 fi else bzip2 -c -- "$DIROFDIST/$NEWNAME" >"$DIROFDIST/$OLDNAME.bz2.new" 3>/dev/null echo "$OLDNAME.bz2.new" >&3 fi fi reprepro-4.13.1/docs/copybyhand.example0000755000175100017510000000130612152651661015036 00000000000000#!/bin/sh # This is an example script for a byhandhook. # Add to you conf/distributions something like ##ByhandHooks: ## * * * copybyhand.sh # and copy this script as copybyhand.sh in your conf/ # directory (or give the full path), and processincoming # will copy all byhand/raw files to dists/codename/extra/* set -e if [ $# != 5 ] ; then echo "to be called by reprepro as byhandhook" >&2 exit 1 fi if [ -z "$REPREPRO_DIST_DIR" ] ; then echo "to be called by reprepro as byhandhook" >&2 exit 1 fi codename="$1" section="$2" priority="$3" basefilename="$4" fullfilename="$5" mkdir -p "$REPREPRO_DIST_DIR/$codename/extra" install -T -- "$fullfilename" "$REPREPRO_DIST_DIR/$codename/extra/$basefilename" reprepro-4.13.1/docs/sftp.py0000755000175100017510000006262712152651661012664 00000000000000# -*- coding: utf-8 -*- # # Copyright 2013 Bernhard R. Link # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # SOFTWARE IN THE PUBLIC INTEREST, INC. BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # """ This is a sftp module to be used in reprepro's outsftphook example. Like the sftp binary it calls ssh to do the connection in a secure way and then speaks the sftp subsystem language over that connection. """ import os, subprocess, select class EnumInternException(Exception): def __init__(self, v): super().__init__(v) self.value = v class _EnumType(type): """ Metaclass for Enum. Allows to set values as parameters. """ def __new__(cls, name, bases, namespace, **values): return type.__new__(cls, name, bases, namespace) def __init__(self, name, bases, namespace, **values): super().__init__(name, bases, namespace) if bases: self._byvalue = dict() self._byname = dict() if values: for k,v in values.items(): self._create_instance(k, v) class Enum(metaclass=_EnumType): """ An enum is a class with a fixed set of instances. Each instance has a name and a integer value. If a new new instance is to be created, one of those fix instances is returned instead. """ @classmethod def _create_instance(cls, name, value): # create a new instance: result = super(Enum, cls).__new__(cls) if isinstance(name, str): result.name = name else: result.name = name[0] result.__name__ = result.name result.value = value cls._byvalue[value] = result if isinstance(name, str): cls._byname[name] = result setattr(cls, name, result) else: for n in name: cls._byname[n] = result setattr(cls, n, result) return result def __new__(cls, l): try: if isinstance(l, cls): return l elif isinstance(l, int): return cls._byvalue[l] elif isinstance(l, str): return cls._byname[l] else: raise EnumInternException(repr(l)) except KeyError: raise EnumInternException(repr(l)) def __init__(self, l): pass def __int__(self): return self.value def __str__(self): return self.name def __repr__(self): return "%s.%s.%s" % (type(self).__module__, type(self).__name__, self.name) class _BitmaskType(type): """ Metaclass for Bitmask types. Allows to set values as parameters. """ @classmethod def __prepare__(cls, name, bases, **values): namespace = type.__prepare__(cls, name, bases) if values: flagcls = _EnumType.__new__(type, "flags of " + name, (Enum,), dict()) flagcls._byvalue = dict() flagcls._byname = dict() namespace["_Values"] = flagcls for (k,v) in values.items(): if isinstance(v, int): e = flagcls._create_instance(k, v) e.mask = v else: e = flagcls._create_instance(k, v[0]) e.mask = v[1] namespace[k] = e return namespace def __new__(cls, name, bases, namespace, **values): return type.__new__(cls, name, bases, namespace) def __init__(self, name, bases, namespace, **values): return super().__init__(name, bases, namespace) class Bitmask(set, metaclass=_BitmaskType): def __init__(self, l): if isinstance(l, int): super().__init__([i for (k,i) in self._Values._byvalue.items() if (l & i.mask) == k]) if l != int(self): raise Exception("Unrepresentable number %d (got parsed as %s = %d)" % (l, str(self), int(self))) elif isinstance(l, str): try: super().__init__([self._Values(i) for i in l.split("|")]) # test for inconsistencies: type(self)(int(self)) except EnumInternException as e: raise Exception("Invalid value '%s' in value '%s' for %s" % (e.value, str(l), type(self).__name__)) else: try: super().__init__([self._Values(i) for i in l]) # test for inconsistencies: type(self)(int(self)) except EnumInternException as e: raise Exception("Invalid value '%s' in value '%s' for %s" % (e.value, str(l), type(self).__name__)) def __int__(self): v = 0 for i in self: v = v | int(i) return v def __str__(self): return "|".join([str(i) for i in self]) class SSH_FILEXFER(Bitmask, ATTR_SIZE = 0x00000001, ATTR_UIDGID = 0x00000002, ATTR_PERMISSIONS = 0x00000004, ATTR_ACMODTIME = 0x00000008, ATTR_EXTENDED = 0x80000000): pass def ssh_data(b): return len(b).to_bytes(4, byteorder='big') + b def ssh_string(s): b = str(s).encode(encoding='utf-8') return len(b).to_bytes(4, byteorder='big') + b def ssh_u8(i): return int(i).to_bytes(1, byteorder='big') def ssh_u32(i): return int(i).to_bytes(4, byteorder='big') def ssh_u64(i): return int(i).to_bytes(8, byteorder='big') def ssh_attrs(**opts): flags = SSH_FILEXFER(0) extended = [] for key in opts: if key == 'size': flags.add(SSH_FILEXFER.ATTR_SIZE) elif key == 'uid' or key == 'gid': flags.add(SSH_FILEXFER.ATTR_UIDGID) elif key == 'permissions': flags.add(SSH_FILEXFER.ATTR_PERMISSIONS) elif key == 'atime' or key == 'mtime': flags.add(SSH_FILEXFER.ATTR_ACMODTIME) elif '@' in key: extended.add(opts[key]) else: raise SftpException("Unsupported file attribute type %s" % repr(key)) if extended: flags.add(SSH_FILEXFER.ATTR_EXTENDED) b = ssh_u32(int(flags)) if SSH_FILEXFER.ATTR_SIZE in flags: b = b + ssh_u64(opts['size']) if SSH_FILEXFER.ATTR_UIDGID in flags: b = b + ssh_u32(opts['uid']) b = b + ssh_u32(opts['gid']) if SSH_FILEXFER.ATTR_PERMISSIONS in flags: b = b + ssh_u32(opts['permissions']) if SSH_FILEXFER.ATTR_ACMODTIME in flags: b = b + ssh_u32(opts['atime']) b = b + ssh_u32(opts['mtime']) if SSH_FILEXFER.ATTR_EXTENDED in flags: b = b + ssh_u32(len(extended)) for key in extended: b = b + ssh_string(key) b = b + ssh_data(opts[key]) return b def ssh_getu32(m): v = int.from_bytes(m[:4], byteorder='big') return v, m[4:] def ssh_getstring(m): l = int.from_bytes(m[:4], byteorder='big') return (m[4:4+l].decode(encoding='utf-8'), m[4+l:]) def ssh_getdata(m): l = int.from_bytes(m[:4], byteorder='big') return (m[4:4+l], m[4+l:]) def ssh_getattrs(m): attrs = dict() flags, m = ssh_getu32(m) flags = SSH_FILEXFER(flags) if SSH_FILEXFER.ATTR_SIZE in flags: attrs['size'], m = ssh_getu64(m) if SSH_FILEXFER.ATTR_UIDGID in flags: attrs['uid'], m = ssh_getu32(m) attrs['gid'], m = ssh_getu32(m) if SSH_FILEXFER.ATTR_PERMISSIONS in flags: attrs['permissions'], m = ssh_getu32(m) if SSH_FILEXFER.ATTR_ACMODTIME in flags: attrs['atime'], m = ssh_getu32(m) attrs['mtime'], m = ssh_getu32(m) if SSH_FILEXFER.ATTR_EXTENDED in flags: count, m = ssh_getu32(m) while count > 0: count -= 1 key, m = ssh_getstring(m) attrs[key], m = ssh_getdata(m) return (attrs, m) class SftpException(Exception): pass class SftpStrangeException(SftpException): """Unparseable stuff from server""" pass class SftpUnexpectedAnswerException(SftpStrangeException): def __init__(self, answer, request): super().__init__("Unexpected answer '%s' to request '%s'" % (str(answer), str(request))) class SftpTooManyRequestsException(SftpException): def __init__(self): super().__init__("Too many concurrent requests (out of request ids)") class SftpInternalException(SftpException): """a programming or programmer mistake""" pass class Request: def __init__(self, **args): self.data = args pass def __int__(self): return self.requestid def __str__(self): return type(self).__name__ + "(" + " ".join(["%s=%s" % (key, repr(val)) for (key, val) in self.data.items()]) + ")" @classmethod def bin(cls, conn, req, *payload): s = 5 for b in payload: s = s + len(b) # print("Sending packet of type %d and size %d" % (cls.typeid, s)) r = ssh_u32(s) + ssh_u8(cls.typeid) + ssh_u32(int(req)) for b in payload: r = r + b return r def send(self, conn): conn.requests[int(self)] = self self.conn = conn conn.send(self.bin(conn, self, **self.data)) def done(self): if self.requestid != None: del self.conn.requests[self.requestid] self.requestid = None class NameRequest(Request): """Base class for requests with a single name as argument""" def __init__(self, name): super().__init__(name = name) @classmethod def bin(cls, conn, req, name): return super().bin(conn, req, ssh_string(name)) class HandleRequest(Request): """Base class for requests with a single name as argument""" def __init__(self, handle): super().__init__(handle = handle) @classmethod def bin(cls, conn, req, handle): return super().bin(conn, req, ssh_data(handle)) class NameAttrRequest(Request): """Base class for requests with a name and attributes as argument""" def __init__(self, name, **attrs): super().__init__(name = name, attrs = attrs) @classmethod def bin(cls, conn, req, name, attrs): return super().bin(conn, req, ssh_string(name), ssh_attrs(**attrs)) class INIT(Request): typeid = 1 @classmethod def bin(cls, conn, version): # INIT has no request id but instead sends a protocol version return super().bin(conn, int(version)) class SSH_FXF(Bitmask, READ = 0x00000001, WRITE = 0x00000002, APPEND = 0x00000004, CREAT = 0x00000008, TRUNC = 0x00000010, EXCL = 0x00000020): pass class OPEN(Request): typeid = 3 def __init__(self, name, flags, **attributes): super().__init__(name = name, flags = SSH_FXF(flags), attrs = attributes) @classmethod def bin(cls, conn, req, name, flags, attrs): return super().bin(conn, req, ssh_string(name), ssh_u32(flags), ssh_attrs(**attrs)) class CLOSE(HandleRequest): typeid = 4 class READ(Request): typeid = 5 def __init__(self, handle, start, length): super().__init__(handle = handle, start = start, length = int(length)) @classmethod def bin(cls, conn, req, handle, start, length): return super().bin(conn, req, ssh_data(handle), ssh_u64(start), ssh_u32(length)) class WRITE(Request): typeid = 6 def __init__(self, handle, start, data): super().__init__(handle = handle, start = start, data = bytes(data)) @classmethod def bin(cls, conn, req, handle, start, data): return super().bin(conn, req, ssh_data(handle), ssh_u64(start), ssh_data(data)) class LSTAT(NameRequest): typeid = 7 class FSTAT(HandleRequest): typeid = 8 class SETSTAT(NameAttrRequest): typeid = 9 class FSETSTAT(Request): typeid = 10 def __init__(self, handle, **attrs): super().__init__(handle = handle, attrs = attrs) @classmethod def bin(cls, conn, req, name, attrs): return super().bin(conn, req, ssh_data(handle), ssh_attrs(**attrs)) class OPENDIR(NameRequest): typeid = 11 class READDIR(HandleRequest): typeid = 12 class REMOVE(NameRequest): typeid = 13 class MKDIR(NameAttrRequest): typeid = 14 class RMDIR(NameRequest): typeid = 15 class REALPATH(NameRequest): typeid = 16 class STAT(NameRequest): typeid = 17 class SSH_FXF_RENAME(Bitmask, OVERWRITE = 0x00000001, ATOMIC = 0x00000002, NATIVE = 0x00000004): pass class RENAME(Request): typeid = 18 def __init__(self, src, dst, flags): if not isinstance(flags, SSH_FXF_RENAME): flags = SSH_FXF_RENAME(flags) super().__init__(src = src, dst = dst, flags = flags) @classmethod def bin(cls, conn, req, src, dst, flags): # TODO: Version 3 has no flags (though they do not seem to harm) return super().bin(conn, req, ssh_string(src), ssh_string(dst), ssh_u32(flags)) class READLINK(NameRequest): typeid = 19 class SYMLINK(Request): typeid = 20 def __init__(self, name, dest): super().__init__(name = name, dest = dest) @classmethod def bin(cls, conn, req, name, dest): # TODO: this is openssh and not the standard (they differ) return super().bin(conn, req, ssh_string(dest), ssh_string(name)) class EXTENDED(Request): typeid = 200 # TODO? ################ Answers ################ class Answer: def __int__(self): return self.id # Fallbacks, can be removed once all are done: def __init__(self, m): self.data = m def __str__(self): return "%s %s" % (type(self).__name__, repr(self.data)) class VERSION(Answer): id = 2 class SSH_FX(Enum, OK = 0, EOF = 1, NO_SUCH_FILE = 2, PERMISSION_DENIED = 3, FAILURE = 4, BAD_MESSAGE = 5, NO_CONNECTION = 6, CONNECTION_LOST = 7, OP_UNSUPPORTED = 8, INVALID_HANDLE = 9, NO_SUCH_PATH = 10, FILE_ALREADY_EXISTS = 11, WRITE_PROTECT = 12, NO_MEDIA = 13 ): pass class STATUS(Answer): id = 101 def __init__(self, m): s, m = ssh_getu32(m) self.status = SSH_FX(s) self.message, m = ssh_getstring(m) self.lang, m = ssh_getstring(m) def __str__(self): return "STATUS %s: %s[%s]" % ( str(self.status), self.message, self.lang) class HANDLE(Answer): id = 102 def __init__(self, m): self.handle, m = ssh_getdata(m) def __str__(self): return "HANDLE %s" % repr(self.handle) class DATA(Answer): id = 103 def __init__(self, m): self.data, m = ssh_getdata(m) def __str__(self): return "DATA %s" % repr(self.data) class NAME(Answer): id = 104 def __init__(self, m): count, m = ssh_getu32(m) self.names = [] while count > 0: count -= 1 filename, m = ssh_getstring(m) longname, m = ssh_getstring(m) attrs, m = ssh_getattrs(m) self.append((filename, longname, attrs)) def __str__(self): return "NAME" + "".join(("%s:%s:%s" % (repr(fn), repr(ln), str(attrs)) for (fn,ln,attrs) in self.names)) class ATTRS(Answer): id = 105 def __init__(self, m): self.attrs, m = ssh_getattrs(m) def __str__(self): return "ATTRS %s" % str(self.attrs) class EXTENDED_REPLY(Answer): id = 201 # TODO? ################ Tasks ################ class Task: """A task is everything that sends requests, receives answers, uses collectors or is awakened by collectors. """ def start(self, connection): self.connection = connection def enqueueRequest(self, request): request.task = self self.connection.enqueueRequest(request) def sftpanswer(self, a): raise SftpInternalException("unimplemented sftpanswer called") def writeready(self): raise SftpInternalException("unimplemented writeready called") def parentinfo(self, command): raise SftpInternalException("unimplemented parentinfo called") class TaskFromGenerator(Task): """A wrapper around a python corotine (generator)""" def __init__(self, gen): super().__init__() self.gen = gen def start(self, connection): super().start(connection) self.enqueue(next(self.gen)) def parentinfo(self, command): self.enqueue(self.gen.send(command)) def sftpanswer(self, answer): self.enqueue(self.gen.send(answer)) def writeready(self): self.enqueue(self.gen.send('canwrite')) def __str__(self): return "Task(by %s)" % self.gen def enqueue(self, joblist): if len(joblist) == 0: return for job in joblist: if isinstance(job, Request): self.enqueueRequest(job) elif job == 'wantwrite': self.connection.enqueueTask(self) elif (isinstance(job, tuple) and len(job) == 2 and isinstance(job[0], Task)): job[0].parentinfo(job[1]) elif (isinstance(job, tuple) and len(job) >= 2 and issubclass(job[1], Collector)): self.connection.collect(self, *job) elif isinstance(job, Task): self.connection.start(job) else: raise SftpInternalException("strange result from generator") class Collector(Task): """ Collectors collect information from Tasks and send them triggers at requested events (parent directory created, another file can be processed, ...) """ def childinfo(self, who, command): raise SftpInternalException("unimplemented parentinfo called") class DebugMode(Bitmask, **{ 'COOKED_IN': 1, 'COOKED_OUT': 2, 'RAW_IN_STAT': 4, 'RAW_OUT_STAT': 8, 'RAW_IN': 16, 'RAW_OUT': 32, 'ENQUEUE': 64, 'LOCKS': 128, }): pass class Connection: def next_request_id(self): i = self.requestid_try_next while i in self.requests: i = (i + 1) % 0x100000000 if i == self.requestid_try_next: raise SftpTooManyRequestsException() self.requestid_try_next = (i + 1) % 0x100000000 return i def __init__(self, servername, sshcommand="ssh", username=None, ssh_options=[], debug=0, debugopts=dict(), maxopenfiles=10): self.debug = DebugMode(debug) self.debugopts = debugopts self.requests = dict() self.collectors = dict() self.queue = list() self.wantwrite = list() self.requestid_try_next = 17 self.semaphores = {'openfile': maxopenfiles} commandline = [sshcommand] if ssh_options: commandline.extend(ssh_options) # those defaults are after the user-supplied ones so they can be overriden. # (earlier ones win with ssh). commandline.extend(["-oProtocol 2", # "-oLogLevel DEBUG", "-oForwardX11 no", "-oForwardAgent no", "-oPermitLocalCommand no", "-oClearAllForwardings yes"]) if username: commandline.extend(["-l", username]) commandline.extend(["-s", "--", servername, "sftp"]) self.connection = subprocess.Popen(commandline, close_fds = True, stdin = subprocess.PIPE, stdout = subprocess.PIPE) self.poll = select.poll() self.poll.register(self.connection.stdout, select.POLLIN) self.inbuffer = bytes() self.send(INIT.bin(self, 3)) t,b = self.getpacket() if t != VERSION.id: raise SftpUnexpectedAnswerException(b, "INIT") # TODO: parse answer data (including available extensions) def close(self): self.connection.send_signal(15) def getmoreinput(self, minlen): while len(self.inbuffer) < minlen: o = self.connection.stdout.read(minlen - len(self.inbuffer)) if o == None: continue if len(o) == 0: raise SftpStrangeException("unexpected EOF") self.inbuffer = self.inbuffer + o def getpacket(self): self.getmoreinput(5) s = int.from_bytes(self.inbuffer[:4], byteorder='big') if s < 1: raise SftpStrangeException("Strange size field in Paket from server!") t = self.inbuffer[4] if DebugMode.RAW_IN_STAT in self.debug: print("receiving packet of length %d and type %d " % (s, t), **self.debugopts) s = s - 1 self.inbuffer = self.inbuffer[5:] self.getmoreinput(s) d = self.inbuffer[:s] self.inbuffer = self.inbuffer[s:] if DebugMode.RAW_IN in self.debug: print("received packet(type %d):" % t, repr(d), **self.debugopts) return (t, d) def send(self, b): if not isinstance(b, bytes): raise SftpInternalException("send not given byte sequence") if DebugMode.RAW_OUT_STAT in self.debug: print("sending packet of %d bytes" % len(b), **self.debugopts) if DebugMode.RAW_OUT in self.debug: print("sending packet:", repr(b), **self.debugopts) self.connection.stdin.write(b) def enqueueRequest(self, job): if DebugMode.ENQUEUE in self.debug: print("enqueue", job, **self.debugopts) if len(self.queue) == 0 and len(self.wantwrite) == 0: self.poll.register(self.connection.stdin, select.POLLOUT) job.requestid = self.next_request_id() self.queue.append(job) def enqueueTask(self, task): if DebugMode.ENQUEUE in self.debug: print("enqueue", task, **self.debugopts) if len(self.queue) == 0 and len(self.wantwrite) == 0: self.poll.register(self.connection.stdin, select.POLLOUT) self.wantwrite.append(task) def collect(self, who, command, collectortype, *collectorargs): """Tell the (possibly to be generated) """ collectorid = (collectortype, collectorargs) if not collectorid in self.collectors: l = collectortype(*collectorargs) self.collectors[collectorid] = l l.start(self) else: l = self.collectors[collectorid] l.childinfo(who, command) def start(self, task): task.start(self) def dispatchanswer(self, answer): task = answer.forr.task try: task.sftpanswer(answer) except StopIteration: orphanreqs = [ r for r in self.requests.values() if r.task == task ] for r in orphanreqs: r.done() def readdata(self): t,m = self.getpacket() for answer in Answer.__subclasses__(): if t == answer.id: break else: raise SftpUnexpectedAnswerException("Unknown answer type %d" % t, "") id, m = ssh_getu32(m) a = answer(m) if DebugMode.COOKED_IN in self.debug: print("got answer for request %d: %s" % (id, str(a)), **self.debugopts) if not id in self.requests: raise SftpUnexpectedAnswerException(a, "unknown-id-%d" % id) else: a.forr = self.requests[id] self.dispatchanswer(a) def senddata(self): if len(self.queue) == 0: while len(self.wantwrite) > 0: w = self.wantwrite.pop(0) if len(self.wantwrite) == 0 and len(self.queue) == 0: self.poll.unregister(self.connection.stdin) w.writeready() if len(self.queue) > 0: request = self.queue.pop(0) if len(self.queue) == 0 and len(self.wantwrite) == 0: self.poll.unregister(self.connection.stdin) if DebugMode.COOKED_OUT in self.debug: print("sending request %d: %s" % (request.requestid, str(request)), **self.debugopts) request.send(self) def dispatch(self): while self.requests or self.queue: for (fd, event) in self.poll.poll(): if event == select.POLLIN: self.readdata() elif event == select.POLLHUP: raise SftpStrangeException( "Server disconnected unexpectedly" " or ssh client process terminated") elif event == select.POLLOUT: self.senddata() else: raise SftpException("Unexpected event %d from poll" % event) class Dirlock(Collector): def __init__(self, name): super().__init__() self.name = name self.dirname = os.path.dirname(name) self.queue = [] def start(self, connection): super().start(connection) if self.dirname: self.mode = "wait-for-parent" self.connection.collect(self, 'waitingfor', Dirlock, self.dirname) else: self.tellparent = False self.mode = "wait-for-client" self.isnew = False def sftpanswer(self, a): assert(self.mode == "creating") if not isinstance(a, STATUS): raise SftpUnexpectedAnswer(a, a.forr) # Only one answer is expected: a.forr.done() if a.status == SSH_FX.OK: self.mode = "exists" self.isnew = True self.releaseallqueued() elif self.tellparent and a.status == SSH_FX.NO_SUCH_FILE: self.mode = "wait-for-parent" self.connection.collect(self, 'missing', Dirlock, self.dirname) else: raise SftpException("Cannot create directory %s: %s" % (self.name, a)) def parentinfo(self, command): assert(self.mode == "wait-for-parent") if command == "createnew": self.tellparent = False self.isnew = True self.createdir() return if command != "tryandtell" and command != "ready": raise SftpInternalException( "Unexpected parent info %s" % command) self.tellparent = command == "tryandtell" if len(self.queue) > 0: self.mode = "testing" self.queue.pop(0).parentinfo("tryandtell") else: self.mode = "wait-for-client" def childinfo(self, who, command): if command == "waitingfor": if self.mode == "exists": if self.isnew: who.parentinfo("createnew") else: who.parentinfo("ready") elif self.mode == "wait-for-client": self.mode = "testing" who.parentinfo("tryandtell") else: self.queue.append(who) elif command == "found": assert(self.mode == "testing") self.mode = "exists" self.isnew = False self.releaseallqueued() elif command == "missing": self.queue.append(who) self.mode = "creating" self.createdir() else: raise SftpInternalException( "Unexpected child information: %s" % command) def createdir(self): self.mode = "creating" self.enqueueRequest(MKDIR(self.name)) def releaseallqueued(self): if self.tellparent: self.connection.collect(self, 'found', Dirlock, self.dirname) self.tellparent = False if self.isnew: command = "createnew" else: command = "ready" # This assumes out mode cannot change any more: while self.queue: self.queue.pop(0).parentinfo(command) class Semaphore(Collector): def __init__(self, name): super().__init__() self.name = name self.queue = [] self.allowed = 10 def start(self, connection): self.allowed = connection.semaphores[self.name] def childinfo(self, who, command): if command == "lock": if self.allowed > 0: self.allowed -= 1 who.parentinfo("unlock") else: self.queue.append(who) elif command == "release": if self.allowed == 0 and self.queue: self.queue.pop(0).parentinfo("unlock") else: self.allowed += 1 else: raise SftpInternalException("Semaphore.childinfo called with invalid command") reprepro-4.13.1/docs/di.example/0000755000175100017510000000000012152655346013430 500000000000000reprepro-4.13.1/docs/di.example/distributions0000644000175100017510000000072412152651661016174 00000000000000Origin: Debian-Installer Label: Debian-Installer Suite: testing Codename: sarge Version: 3.1 Architectures: sparc i386 Components: main UDebComponents: main Description: Debian Installer partial mirror Update: - debian #SignWith: yes Origin: Debian-Installer Label: Debian-Installer Suite: unstable Codename: sid Version: 3.2 Architectures: sparc i386 Components: main UDebComponents: main Description: Debian Installer partial mirror Update: - debian #SignWith: yes reprepro-4.13.1/docs/di.example/README0000644000175100017510000000104112152651661014220 00000000000000This is an example from Goswin Brederlow for the ListHook directive. He describes the example as: > attached a sample config that mirrors only packages from the debian-cd > netinstall CD image task. I think this would make a usefull example > for making a partial mirror by filtering. The speciality of the example needing the ListHook and not easer possible with FilterList is the need for different packages in different architectured. (Though extending FilterList to support this is on my TODO-List) reprepro-4.13.1/docs/di.example/DI-filter.sh0000644000175100017510000000214312152651661015457 00000000000000#!/bin/sh # # Select only debs needed for a D-I netinstall cd IN="$1" OUT="$2" DIR=`dirname "$IN"` FILE=`basename "$IN"` CODENAME=`echo $FILE | cut -d"_" -f1` COMPONENT=`echo $FILE | cut -d"_" -f4` ARCH=`echo $FILE | cut -d"_" -f5` echo "### $IN" echo "# Source: $IN" echo "# Debs: $DIR/$FILE.debs" echo "# Out: $OUT" echo # generate list of packages needed DEBCDDIR="/usr/share/debian-cd" export ARCH CODENAME DEBCDDIR DIR make -f "$DEBCDDIR/Makefile" \ BDIR='$(DIR)' \ INSTALLER_CD='2' \ TASK='$(DEBCDDIR)/tasks/debian-installer+kernel' \ BASEDIR='$(DEBCDDIR)' \ forcenonusoncd1='0' \ VERBOSE_MAKE='yes' \ "$DIR/list" # hotfix abi name for sparc kernel sed -e 's/-1-/-2-/' "$DIR/list" > "$DIR/$FILE.debs" rm -f "$DIR/list" # filter only needed packages grep-dctrl `cat "$DIR/$FILE.debs" | while read P; do echo -n " -o -X -P $P"; done | cut -b 5-` "$IN" >"$OUT" # cleanup rm -f "$DIR/$FILE.debs" reprepro-4.13.1/docs/di.example/updates0000644000175100017510000000022612152651661014734 00000000000000Name: debian Architectures: sparc i386 Method: http://ftp.de.debian.org/debian #VerifyRelease: FBC60EA91B67D3C0 ListHook: /mnt/mirror/DI/DI-filter.sh reprepro-4.13.1/docs/reprepro.10000644000175100017510000030364312152651661013247 00000000000000.TH REPREPRO 1 "2013-05-04" "reprepro" REPREPRO .SH NAME reprepro \- produce, manage and sync a local repository of Debian packages .mso www.tmac .SH SYNOPSIS .B reprepro \-\-help .B reprepro [ \fIoptions\fP ] \fIcommand\fP [ \fIper\-command\-arguments\fP ] .SH DESCRIPTION reprepro is a tool to manage a repository of Debian packages (.deb, .udeb, .dsc, ...). It stores files either being injected manually or downloaded from some other repository (partially) mirrored into a pool/ hierarchy. Managed packages and checksums of files are stored in a Berkeley DB database file, so no database server is needed. Checking signatures of mirrored repositories and creating signatures of the generated Package indices is supported. Former working title of this program was mirrorer. .SH "GLOBAL OPTIONS" Options can be specified before the command. Each affects a different subset of commands and is ignored by other commands. .TP .B \-h \-\-help Displays a short list of options and commands with description. .TP .B \-v, \-V, \-\-verbose Be more verbose. Can be applied multiple times. One uppercase .B \-V counts as five lowercase .B \-v. .TP .B \-\-silent Be less verbose. Can be applied multiple times. One .B \-v and one .B \-s cancel each other out. .TP .B \-f, \-\-force This option is ignored, as it no longer exists. .TP .B \-b, \-\-basedir \fIbasedir\fP Sets the base\-dir all other default directories are relative to. If none is supplied and the .B REPREPRO_BASE_DIR environment variable is not set either, the current directory will be used. .TP .B \-\-outdir \fIoutdir\fP Sets the base\-dir of the repository to manage, i.e. where the .B pool/ subdirectory resides. And in which the .B dists/ directory is placed by default. If this starts with '\fB+b/\fP', it is relative to basedir. The default for this is \fIbasedir\fP. .TP .B \-\-confdir \fIconfdir\fP Sets the directory where the configuration is searched in. If this starts with '\fB+b/\fP', it is relative to basedir. If none is given, \fB+b/conf\fP (i.e. \fIbasedir\fP\fB/conf\fP) will be used. .TP .B \-\-distdir \fIdistdir\fP Sets the directory to generate index files relatively to. (i.e. things like Packages.gz, Sources.gz and Release.gpg) If this starts with '\fB+b/\fP', it is relative to basedir, if starting with '\fB+o/\fP' relative to outdir. If none is given, \fB+o/dists\fP (i.e. \fIoutdir\fP\fB/dists\fP) is used. .B Note: apt has .B dists hard-coded in it, so this is mostly only useful for testing or when your webserver pretends another directory structure than your physical layout. .B Warning: Beware when changing this forth and back between two values not ending in the same directory. Reprepro only looks if files it wants are there. If nothing of the content changed and there is a file it will not touch it, assuming it is the one it wrote last time, assuming any different \fB\-\-distdir\fP ended in the same directory. So either clean a directory before setting \fB\-\-distdir\fP to it or do an \fBexport\fP with the new one first to have a consistent state. .TP .B \-\-logdir \fIlogdir\fP The directory where files generated by the \fBLog:\fP directive are stored if they have no absolute path. If this starts with '\fB+b/\fP', it is relative to basedir, if starting with '\fB+o/\fP' relative to outdir, with '\fB+c/\fP' relative to confdir. If none is given, \fB+b/logs\fP (i.e. \fIbasedir\fP\fB/logs\fP) is used. .TP .B \-\-dbdir \fIdbdir\fP Sets the directory where reprepro keeps its databases. If this starts with '\fB+b/\fP', it is relative to basedir, if starting with '\fB+o/\fP' relative to outdir, with '\fB+c/\fP' relative to confdir. If none is given, \fB+b/db\fP (i.e. \fIbasedir\fP\fB/db\fP) is used. .B Note: This is permanent data, no cache. One has almost to regenerate the whole repository when this is lost. .TP .B \-\-listdir \fIlistdir\fP Sets the directory where downloads it downloads indices to when importing from other repositories. This is temporary data and can be safely deleted when not in an update run. If this starts with '\fB+b/\fP', it is relative to basedir, if starting with '\fB+o/\fP' relative to outdir, with '\fB+c/\fP' relative to confdir. If none is given, \fB+b/lists\fP (i.e. \fIbasedir\fP\fB/lists\fP) is used. .TP .B \-\-morguedir \fImorguedir\fP Files deleted from the pool are stored into \fImorguedir\fP. If this starts with '\fB+b/\fP', it is relative to basedir, if starting with '\fB+o/\fP' relative to outdir, with '\fB+c/\fP' relative to confdir. If none is given, deleted files are just deleted. .TP .B \-\-methoddir \fImethoddir\fP Look in \fImethoddir\fP instead of .B /usr/lib/apt/methods for methods to call when importing from other repositories. .TP .B \-C, \-\-component \fIcomponents\fP Limit the specified command to this components only. This will force added packages to this components, limit removing packages from this components, only list packages in this components, and/or otherwise only look at packages in this components, depending on the command in question. Multiple components are specified by separating them with \fB|\fP, as in \fB\-C 'main|contrib'\fP. .TP .B \-A, \-\-architecture \fIarchitectures\fP Limit the specified command to this architectures only. (i.e. only list such packages, only remove packages from the specified architectures, or otherwise only look at/act on this architectures depending on the specific command). Multiple architectures are specified by separating them with \fB|\fP, as in \fB\-A 'sparc|i386'\fP. Note that architecture \fBall\fP packages can be included to each architecture but are then handled separately. Thus by using \fB\-A\fP in a specific way one can have different versions of an architecture \fBall\fP package in different architectures of the same distribution. .TP .B \-T, \-\-type \fRdsc|deb|udeb Limit the specified command to this packagetypes only. (i.e. only list such packages, only remove such packages, only include such packages, ...) .TP .B \-S, \-\-section \fIsection\fP Overrides the section of inclusions. (Also override possible override files) .TP .B \-P, \-\-priority \fIpriority\fP Overrides the priority of inclusions. (Also override possible override files) .TP .BR \-\-export= ( never | changed | lookedat | force ) This option specify whether and how the high level actions (e.g. install, update, pull, delete) should export the index files of the distributions they work with. .TP .BR \-\-export=lookedat In this mode every distribution the action handled will be exported, unless there was an error possibly corrupting it. .br \fINote\fP that only missing files and files whose intended content changed between before and after the action will be written. To get a guaranteed current export, use the \fBexport\fP action. .br For backwards compatibility, \fBlookedat\fP is also available under the old name \fBnormal\fP. The name \fBnormal\fP is deprecated and will be removed in future versions. .TP .BR \-\-export=changed In this mode every distribution actually changed will be exported, unless there was an error possibly corrupting it. (i.e. if nothing changed, not even missing files will be created.) .br \fINote\fP that only missing files and files whose intended content changed between before and after the action will be written. To get a guaranteed current export, use the \fBexport\fP action. .TP .BR \-\-export=force Always export all distributions looked at, even if there was some error possibly bringing it into a inconsistent state. .TP .BR \-\-export=never No index files are exported. You will have to call \fBexport\fP later. .br \fINote\fP that you most likely additionally need the \fB\-\-keepunreferencedfiles\fP option, if you do not want some of the files pointed to by the untouched index files to vanish. .TP .B \-\-ignore=\fIwhat\fP Ignore errors of type \fIwhat\fP. See the section \fBERROR IGNORING\fP for possible values. .TP .B \-\-nolistsdownload When running \fBupdate\fP, \fBcheckupdate\fP or \fBpredelete\fP do not download any Release or index files. This is hardly useful except when you just run one of those command for the same distributions. And even then reprepro is usually good in not downloading except \fBRelease\fP and \fBRelease.gpg\fP files again. .TP .B \-\-nothingiserror If nothing was done, return with exitcode 1 instead of the usual 0. Note that "nothing was done" means the primary purpose of the action in question. Auxillary actions (opening and closeing the database, exporting missing files with \-\-export=lookedat, ...) usually do not count. Also note that this is not very well tested. If you find an action that claims to have done something in some cases where you think it should not, please let me know. .TP .B \-\-keeptemporaries Do not delete temporary \fB.new\fP files when exporting a distribution fails. (reprepro first create \fB.new\fP files in the \fBdists\fP directory and only if everything is generated, all files are put into their final place at once. If this option is not specified and something fails, all are deleted to keep \fBdists\fP clean). .TP .B \-\-keepunreferencedfiles Do not delete files that are no longer used because the package they are from is deleted/replaced with a newer version from the last distribution it was in. .TP .B \-\-keepunusednewfiles The include, includedsc, includedeb and processincoming by default delete any file they added to the pool that is not marked used at the end of the operation. While this keeps the pool clean and allows changing before trying to add again, this needs copying and checksum calculation every time one tries to add a file. .TP .B \-\-keepdirectories Do not try to rmdir parent directories after files or directories have been removed from them. (Do this if your directories have special permissions you want keep, do not want to be pestered with warnings about errors to remove them, or have a buggy rmdir call deleting non-empty directories.) .TP .B \-\-keeptemporaries If an export of an distribution fails, this option causes reprepro to not delete the temporary \fB.new\fP files in the \fBdists/\fP directory, so one can look at the partial result. .TP .B \-\-ask\-passphrase Ask for passphrases when signing things and one is needed. This is a quick and dirty implementation using the obsolete \fBgetpass(3)\fP function with the description gpgme is supplying. So the prompt will look quite funny and support for passphrases with more than 8 characters depend on your libc. I suggest using gpg\-agent or something like that instead. .TP .B \-\-noskipold When updating do not skip targets where no new index files and no files marked as already processed are available. If you changed a script to preprocess downloaded index files or changed a Listfilter, you most likely want to call reprepro with \-\-noskipold. .TP .B \-\-waitforlock \fIcount If there is a lockfile indicating another instance of reprepro is currently using the database, retry \fIcount\fP times after waiting for 10 seconds each time. The default is 0 and means to error out instantly. .TP .B \-\-spacecheck full\fR|\fPnone The default is \fBfull\fR: .br In the update commands, check for every to be downloaded file which filesystem it is on and how much space is left. .br To disable this behaviour, use \fBnone\fP. .TP .BI \-\-dbsafetymargin " bytes-count" If checking for free space, reserve \fIbyte-count\fP bytes on the filesystem containing the \fBdb/\fP directory. The default is 104857600 (i.e. 100MB), which is quite large. But as there is no way to know in advance how large the databases will grow and libdb is extremely touchy in that regard, lower only when you know what you do. .TP .BI \-\-safetymargin " bytes-count" If checking for free space, reserve \fIbyte-count\fP bytes on filesystems not containing the \fBdb/\fP directory. The default is 1048576 (i.e. 1MB). .TP .B \-\-noguessgpgtty Don't set the environment variable .BR GPG_TTY , even when it is not set, stdin is terminal and .B /proc/self/fd/0 is a readable symbolic link. .TP .B \-\-gnupghome Set the .B GNUPGHOME evnironment variable to the given directory as argument to this option. And your gpg will most likely use the content of this variable instead of "~/.gnupg". Take a look at .BR gpg (1) to be sure. This option in the command line is usually not very useful, as it is possible to set the environment variable directly. Its main reason for existance is that it can be used in \fIconf\fP\fB/options\fP. .TP .BI \-\-gunzip " gz-uncompressor" While reprepro links against \fBlibz\fP, it will look for the program given with this option (or \fBgunzip\fP if not given) and use that when uncompressing index files while downloading from remote repositories. (So that downloading and uncompression can happen at the same time). If the program is not found or is \fBNONE\fP (all-uppercase) then uncompressing will always be done using the built in uncompression method. The program has to accept the compressed file as stdin and write the uncompressed file into stdout. .TP .BI \-\-bunzip2 " bz2-uncompressor" When uncompressing downloaded index files or when not linked against \fBlibbz2\fP reprepro will use this program to uncompress \fB.bz2\fP files. The default value is \fBbunzip2\fP. If the program is not found or is \fBNONE\fP (all-uppercase) then uncompressing will always be done using the built in uncompression method or not be possible when not linked against \fBlibbz2\fP. The program has to accept the compressed file as stdin and write the uncompressed file into stdout. .TP .BI \-\-unlzma " lzma-uncompressor" When trying to uncompress or read \fPlzma\fP compressed files, this program will be used. The default value is \fBunlzma\fP. If the program is not found or is \fBNONE\fP (all-uppercase) then uncompressing lzma files will not be possible. The program has to accept the compressed file as stdin and write the uncompressed file into stdout. .TP .BI \-\-unxz " xz-uncompressor" When trying to uncompress or read \fPxz\fP compressed files, this program will be used. The default value is \fBunxz\fP. If the program is not found or is \fBNONE\fP (all-uppercase) then uncompressing xz files will not be possible. The program has to accept the compressed file as stdin and write the uncompressed file into stdout. .TP .BI \-\-lunzip " lzip-uncompressor" When trying to uncompress or read \fPlzip\fP compressed files, this program will be used. The default value is \fBlunzip\fP. If the program is not found or is \fBNONE\fP (all-uppercase) then uncompressing lz files will not be possible. The program has to accept the compressed file as stdin and write the uncompressed file into stdout. .TP .BI \-\-list\-max " count" Limits the output of \fBlist\fP, \fBlistmatched\fP and \fBlistfilter\fP to the first \fIcount\fP results. The default is 0, which means unlimited. .TP .BI \-\-list\-skip " count" Omitts the first \fIcount\fP results from the output of \fBlist\fP, \fBlistmatched\fP and \fBlistfilter\fP. .TP .BI \-\-list\-format " format" Set the output format of \fBlist\fP, \fBlistmatched\fP and \fBlistfilter\fP commands. The format is similar to dpkg\-query's \fB\-\-showformat\fP: fields are specified as .BI ${ fieldname } or .BI ${ fieldname ; length }\fR.\fP Zero length or no length means unlimited. Positive numbers mean fill with spaces right, negative fill with spaces left. .BR \[rs]n ", " \[rs]r ", " \[rs]t ", " \[rs]0 are new-line, carriage-return, tabulator and zero-byte. Backslash (\fB\[rs]\fP) can be used to escape every non-letter-or-digit. The special field names .BR $identifier ", " $architecture ", " $component ", " $type ", " $codename denote where the package was found. The special field names .BR $source " and " $sourceversion denote the source and source version a package belongs to. (i.e. .B ${$source} will either be the same as .B ${source} (without a possible version in parentheses at the end) or the same as .BR ${package} . The special field names .BR $basename ", " $filekey " and " $fullfilename denote the first package file part of this entry (i.e. usually the .deb, .udeb or .dsc file) as basename, as filekey (filename relative to the outdir) and the full filename with outdir prepended (i.e. as relative or absolute as your outdir (or basedir if you did not set outdir) is). When \fB\-\-list\-format\fP is not given or \fBNONE\fP, then the default is equivalent to .br .BR "${$identifier} ${package} ${version}\[rs]n" . Escaping digits or letters not in above list, using dollars not escaped outside specified constructs, or any field names not listed as special and not consisting entirely out of letters, digits and minus signs have undefined behaviour and might change meaning without any further notice. .TP .B \-\-show\-percent When downloading packages, show each completed percent of completed package downloads together with the size of completely downloaded packages. (Repeating this option increases the frequency of this output). .TP .B \-\-onlysmalldeletes The pull and update commands will skip every distribution in which one target loses more than 20% of its packages (and at least 10). Using this option (or putting it in the options config file) can avoid removing large quantities of data but means you might often give .B \-\-noonlysmalldeletes to override it. .TP .B \-\-restrict \fIsrc\fP\fR[\fP=\fIversion\fP\fR|\fP:\fItype\fP\fR]\fP Restrict a \fBpull\fP or \fBupdate\fP to only act on packages belonging to source-package \fIsrc\fP. Any other package will not be updated (unless it matches a \fB\-\-restrict\-bin\fP). Only packages that would otherwise be updated or are at least marked with \fBhold\fP in a \fBFilterList\fP or \fBFilerSrcList\fP will be updated. The action can be restricted to a source version using a equal sign or changed to another type (see FilterList) using a colon. This option can be given multiple times to list multiple packages, but each package may only be named once (even when there are different versions or types). .TP .B \-\-restrict\-binary \fIname\fP\fR[\fP=\fIversion\fP\fR|\fP:\fItype\fP\fR]\fP Like \fB\-\-restrict\fP but restrict to binary packages (\fB.deb\fP and \fB.udeb\fP). Source packages are not upgraded unless they appear in a \fB\-\-restrict\fP. .TP .B \-\-restrict\-file \fIfilename\fP Like \fB\-\-restrict\fP but read a whole file in the \fBFilterSrcList\fP format. .TP .B \-\-restrict\-file\-bin \fIfilename\fP Like \fB\-\-restrict\-bin\fP but read a whole file in the \fBFilterList\fP format. .TP .B \-\-endhook \fIhookscript\fP Run the specified \fIhookscript\fP once reprepro exits. It will get the usual \fBREPREPRO_\fP* environment variables set (or unset) and additionally a variable \fBREPREPRO_EXIT_CODE\fP that is the exit code with which reprepro would have exited (the hook is always called once the initial parsing of global options and the command name is done, no matter if reprepro did anything or not). Reprepro will return to the calling process with the exitcode of this script. Reprepro has closed all its databases and removed all its locks, so you can run reprepro again in this script (unless someone else did so in the same repository before, of course). The only advantage over running that command always directly after reprepro is that you can some environment variables set and cannot so easily forget it if this option is in conf/options. The script is supposed to be located relative to \fIconfdir\fP, unless its name starts with \fB/\fP, \fB./\fP, \fB+b/\fP, \fB+o/\fP, or \fB+c/\fP and the name may not start (except in the cases given before) with a \fB+\fP. An example script looks like: \fB #!/bin/sh if [ "$REPREPRO_EXIT_CODE" -ne 0 ] ; then exit "$REPREPRO_EXIT_CODE" fi echo "congratulations, reprepro with arguments: $*" echo "seems to have run successfully. REPREPRO_ part of the environment is:" set | grep ^REPREPRO_ exit 0 \fP .TP .B \-\-outhook \fIhookscript\fP \fIhookscript\fP is called with a \fB.outlog\fP file as argument (located in \fIlogdir\fP) containing a description of all changes made to \fIoutdir\fP. The script is supposed to be located relative to \fIconfdir\fP, unless its name starts with \fB/\fP, \fB./\fP, \fB+b/\fP, \fB+o/\fP, or \fB+c/\fP and the name may not start (except in the cases given before) with a \fB+\fP. For a format of the \fB.outlog\fP files generated for this script see the \fBmanual.html\fP shiped with reprepro. .SH COMMANDS .TP .BR export " [ " \fIcodenames\fP " ]" Generate all index files for the specified distributions. This regenerates all files unconditionally. It is only useful if you want to be sure \fBdists\fP is up to date, you called some other actions with \fB\-\-export=never\fP before or you want to create an initial empty but fully equipped .BI dists/ codename directory. .TP .RB " [ " \-\-delete " ] " createsymlinks " [ " \fIcodenames\fP " ]" Creates \fIsuite\fP symbolic links in the \fBdists/\fP-directory pointing to the corresponding \fIcodename\fP. It will not create links, when multiple of the given codenames would be linked from the same suite name, or if the link already exists (though when \fB\-\-delete\fP is given it will delete already existing symlinks) .TP .B list \fIcodename\fP \fR[\fP \fIpackagename\fP \fR]\fP List all packages (source and binary, except when .B \-T or .B \-A is given) with the given name in all components (except when .B \-C is given) and architectures (except when .B \-A is given) of the specified distribution. If no package name is given, list everything. The format of the output can be changed with \fB\-\-list\-format\fP. To only get parts of the result, use \fB\-\-list\-max\fP and \fB\-\-list\-skip\fP. .TP .B listmatched \fIcodename\fP \fIglob\fP as list, but does not list a single package, but all packages matching the given shell-like \fIglob\fP. (i.e. \fB*\fP, \fB?\fP and \fB[\fP\fIchars\fP\fB]\fP are allowed). Examples: .B reprepro \-b . listmatched test2 'linux\-*' lists all packages starting with \fBlinux\-\fP. .TP .B listfilter \fIcodename\fP \fIcondition\fP as list, but does not list a single package, but all packages matching the given condition. The format of the formulas is those of the dependency lines in Debian packages' control files with some extras. That means a formula consists of names of fields with a possible condition for its content in parentheses. These atoms can be combined with an exclamation mark '!' (meaning not), a pipe symbol '|' (meaning or) and a comma ',' (meaning and). Additionally parentheses can be used to change binding (otherwise '!' binds more than '|' than ','). The values given in the search expression are directly alphabetically compared to the headers in the respective index file. That means that each part \fIFieldname\fP\fB (\fP\fIcmp\fP\fB \fP\fIvalue\fP\fB)\fP of the formula will be true for exactly those package that have in the \fBPackage\fP or \fBSources\fP file a line starting with \fIfieldname\fP and a value is alphabetically \fIcmp\fP to \fIvalue\fP. Additionally since reprepro 3.11.0, '\fB%\fP' can be used as comparison operator, denoting matching a name with shell like wildcard (with '\fB*\fP', '\fB?\fP' and '\fB[\fP..\fB]\fP'). The special field names starting with '\fB$\fP' have special meaning (available since 3.11.1): .B $Version The version of the package, comparison is not alphabetically, but as Debian version strings. .B $Source The source name of the package. .B $SourceVersion The source version of the package. .B $Architecture The architecture the package is in (listfilter) or to be put into. .B $Component The component the package is in (listfilter) or to be put into. .B $Packagetype The packagetype of the package. Examples: .B reprepro \-b . listfilter test2 'Section (== admin)' will list all packages in distribution test2 with a Section field and the value of that field being \fBadmin\fP. .B reprepro \-b . \-T deb listfilter test2 'Source (== \fIblub\fP) | ( !Source , Package (== \fIblub\fP) )' will find all .deb Packages with either a Source field blub or no Source field and a Package field blub. (That means all package generated by a source package \fIblub\fP, except those also specifying a version number with its Source). .B reprepro \-b . \-T deb listfilter test2 '$Source (==\fIblub\fP) is the better way to do this (but only available since 3.11.1). .B reprepro \-b . listfilter test2 '$PackageType (==deb), $Source (==\fIblub\fP) is another (less efficient) way. .B reprepro \-b . listfilter test2 'Package (% linux\-*\-2.6*)' lists all packages with names starting with \fBlinux\-\fP and later having an \fB\-2.6\fP. .TP .B ls \fIpackage-name\fP List the versions of the the specified package in all distributions. .TP .B lsbycomponent \fIpackage-name\fP Like ls, but group by component (and print component names). .TP .B remove \fIcodename\fP \fIpackage-names\fP Delete all packages in the specified distribution, that have package name listed as argument. (i.e. remove all packages \fBlist\fP with the same arguments and options would list, except that an empty package list is not allowed.) Note that like any other operation removing or replacing a package, the old package's files are unreferenced and thus may be automatically deleted if this was their last reference and no \fB\-\-keepunreferencedfiles\fP specified. .TP .B removematched \fIcodename\fP \fIglob\fP Delete all packages \fBlistmatched\fP with the same arguments would list. .TP .B removefilter \fIcodename\fP \fIcondition\fP Delete all packages \fBlistfilter\fP with the same arguments would list. .TP .B removesrc \fIcodename\fP \fIsource-name\fP \fR[\fP\fIversion\fP\fR]\fP Remove all packages in distribution \fIcodename\fP belonging to source package \fIsource-name\fP. (Limited to those with source version \fIversion\fP if specified). If package tracking is activated, it will use that information to find the packages, otherwise it traverses all package indices for the distribution. .TP .B removesrcs \fIcodename\fP \fIsource-name\fP\fR[\fP=\fIversion\fP\fR]\fP \fI...\fP Like \fBremovesrc\fP, but can be given multiple source names and source versions must be specified by appending '\fB=\fP' and the version to the name (without spaces). .TP .BR update " [ " \fIcodenames\fP " ]" Sync the specified distributions (all if none given) as specified in the config with their upstreams. See the description of .B conf/updates below. .TP .BR checkupdate " [ " \fIcodenames\fP " ]" Same like .BR update , but will show what it will change instead of actually changing it. .TP .BR dumpupdate " [ " \fIcodenames\fP " ]" Same like .BR checkupdate , but less suiteable for humans and more suitable for computers. .TP .BR predelete " [ " \fIcodenames\fP " ]" This will determine which packages a \fBupdate\fP would delete or replace and remove those packages. This can be useful for reducing space needed while upgrading, but there will be some time where packages are vanished from the lists so clients will mark them as obsolete. Plus if you cannot download a updated package in the (hopefully) following update run, you will end up with no package at all instead of an old one. This will also blow up \fB.diff\fP files if you are using the tiffany example or something similar. So be careful when using this option or better get some more space so that update works. .TP .B cleanlists Delete all files in \fIlistdir\fP (default \fIbasedir\fP\fB/lists\fP) that do not belong to any update rule for any distribution. I.e. all files are deleted in that directory that no \fBupdate\fP command in the current configuration can use. (The files are usually left there, so if they are needed again they do not need to be downloaded again. Though in many easy cases not even those files will be needed.) .TP .BR pull " [ " \fIcodenames\fP " ]" pull in newer packages into the specified distributions (all if none given) from other distributions in the same repository. See the description of .B conf/pulls below. .TP .BR checkpull " [ " \fIcodenames\fP " ]" Same like .BR pull , but will show what it will change instead of actually changing it. .TP .BR dumppull " [ " \fIcodenames\fP " ]" Same like .BR checkpull , but less suiteable for humans and more suitable for computers. .TP .B includedeb \fIcodename\fP \fI.deb-filename\fP Include the given binary Debian package (.deb) in the specified distribution, applying override information and guessing all values not given and guessable. .TP .B includeudeb \fIcodename\fP \fI.udeb-filename\fP Same like \fBincludedeb\fP, but for .udeb files. .TP .B includedsc \fIcodename\fP \fI.dsc-filename\fP Include the given Debian source package (.dsc, including other files like .orig.tar.gz, .tar.gz and/or .diff.gz) in the specified distribution, applying override information and guessing all values not given and guessable. Note that .dsc files do not contain section or priority, but the Sources.gz file needs them. reprepro tries to parse .diff and .tar files for it, but is only able to resolve easy cases. If reprepro fails to extract those automatically, you have to either specify a DscOverride or give them via .B \-S and .B \-P .TP .B include \fIcodename\fP \fI.changes-filename\fP Include in the specified distribution all packages found and suitable in the \fI.changes\fP file, applying override information guessing all values not given and guessable. .TP .B processincoming \fIrulesetname\fP \fR[\fP\fI.changes-file\fP\fR]\fP Scan an incoming directory and process the .changes files found there. If a filename is supplied, processing is limited to that file. .I rulesetname identifies which rule-set in .B conf/incoming determines which incoming directory to use and in what distributions to allow packages into. See the section about this file for more information. .TP .BR check " [ " \fIcodenames\fP " ]" Check if all packages in the specified distributions have all files needed properly registered. .TP .BR checkpool " [ " fast " ]" Check if all files believed to be in the pool are actually still there and have the known md5sum. When .B fast is specified md5sum is not checked. .TP .BR collectnewchecksums Calculate all supported checksums for all files in the pool. (Versions prior to 3.3 did only store md5sums, 3.3 added sha1, 3.5 added sha256). .TP .BR translatelegacychecksums Remove the legacy \fBfiles.db\fP file after making sure all information is also found in the new \fBchecksums.db\fP file. (Alternatively you can call \fBcollecnewchecksums\fP and remove the file on your own.) .TP .B rereference Forget which files are needed and recollect this information. .TP .B dumpreferences Print out which files are marked to be needed by whom. .TP .B dumpunreferenced Print a list of all filed believed to be in the pool, that are not known to be needed. .TP .B deleteunreferenced Remove all known files (and forget them) in the pool not marked to be needed by anything. .TP .BR deleteifunreferenced " [ " \fIfilekeys\fP " ]" Remove the given files (and forget them) in the pool if they are not marked to be used by anything. If no command line arguments are given, stdin is read and every line treated as one filekey. This is mostly useful together with \fB\-\-keepunreferenced\fP in \fBconf/options\fP or in situations where one does not want to run \fBdeleteunreferenced\fP, which removes all files eligible to be deleted with this command. .TP .BR reoverride " [ " \fIcodenames\fP " ]" Reapply the override files to the given distributions (Or only parts thereof given by \fB\-A\fP,\fB\-C\fP or \fB\-T\fP). Note: only the control information is changed. Changing a section to a value, that would cause another component to be guessed, will not cause any warning. .TP .BR redochecksums " [ " \fIcodenames\fP " ]" Readd the information about file checksums to the package indices. Usually the package's control information is created at inclusion time or imported from some remote source and not changed later. This command modifies it to readd missing checksum types. Only checksums already known are used. To update known checkums about files run \fBcollectnewchecksums\fP first. .TP .BR dumptracks " [ " \fIcodenames\fP " ]" Print out all information about tracked source packages in the given distributions. .TP .BR retrack " [ " \fIcodenames\fP " ]" Recreate a tracking database for the specified distributions. This contains ouf of three steps. First all files marked as part of a source package are set to unused. Then all files actually used are marked as thus. Finally tidytracks is called remove everything no longer needed with the new information about used files. (This behaviour, though a bit longsome, keeps even files only kept because of tracking mode \fBkeep\fP and files not otherwise used but kept due to \fBincludechanges\fP or its relatives. Before version 3.0.0 such files were lost by running retrack). .TP .BR removealltracks " [ " \fIcodenames\fP " ]" Removes all source package tracking information for the given distributions. .TP .B removetrack " " \fIcodename\fP " " \fIsourcename\fP " " \fIversion\fP Remove the trackingdata of the given version of a given sourcepackage from a given distribution. This also removes the references for all used files. .TP .BR tidytracks " [ " \fIcodenames\fP " ]" Check all source package tracking information for the given distributions for files no longer to keep. .TP .B copy \fIdestination-codename\fP \fIsource-codename\fP \fIpackages...\fP Copy the given packages from one distribution to another. The packages are copied verbatim, no override files are consulted. Only components and architectures present in the source distribution are copied. .TP .B copysrc \fIdestination-codename\fP \fIsource-codename\fP \fIsource-package\fP \fR[\fP\fIversions\fP\fR]\fP look at each package (where package means, as usual, every package be it dsc, deb or udeb) in the distribution specified by \fIsource-codename\fP and identifies the relevant source package for each. All packages matching the specified \fIsource-package\fP name (and any \fIversion\fP if specified) are copied to the \fIdestination-codename\fP distribution. The packages are copied verbatim, no override files are consulted. Only components and architectures present in the source distribution are copied. .TP .B copymatched \fIdestination-codename\fP \fIsource-codename\fP \fIglob\fP Copy packages matching the given glob (see \fBlistmatched\fP). The packages are copied verbatim, no override files are consulted. Only components and architectures present in the source distribution are copied. .TP .B copyfilter \fIdestination-codename\fP \fIsource-codename\fP \fIformula\fP Copy packages matching the given formula (see \fBlistfilter\fP). (all versions if no version is specified). The packages are copied verbatim, no override files are consulted. Only components and architectures present in the source distribution are copied. .TP .B restore \fIcodename\fP \fIsnapshot\fP \fIpackages...\fP .TP .B restoresrc \fIcodename\fP \fIsnapshot\fP \fIsource-epackage\fP \fR[\fP\fIversions\fP\fR]\fP .TP .B restorefilter \fIdestination-codename\fP \fIsnapshot\fP \fIformula\fP Like the copy commands, but do not copy from another distribution, but from a snapshot generated with \fBgensnapshot\fP. Note that this blindly trusts the contents of the files in your \fBdists/\fP directory and does no checking. .TP .B clearvanished Remove all package databases that no longer appear in \fBconf/distributions\fP. If \fB\-\-delete\fP is specified, it will not stop if there are still packages left. Even without \fB\-\-delete\fP it will unreference files still marked as needed by this target. (Use \fB\-\-keepunreferenced\fP to not delete them if that was the last reference.) Do not forget to remove all exported package indices manually. .TP .B gensnapshot " " \fIcodename\fP " " \fIdirectoryname\fP Generate a snapshot of the distribution specified by \fIcodename\fP in the directory \fIdists\fB/\fIcodename\fB/snapshots/\fIdirectoryname\fB/\fR and reference all needed files in the pool as needed by that. No Content files are generated and no export hooks are run. Note that there is currently no automated way to remove that snapshot again (not even clearvanished will unlock the referenced files after the distribution itself vanished). You will have to remove the directory yourself and tell reprepro to \fB_removereferences s=\fP\fIcodename\fP\fB=\fP\fIdirectoryname\fP before \fBdeleteunreferenced\fP will delete the files from the pool locked by this. To access such a snapshot with apt, add something like the following to your sources.list file: .br \fBdeb method://as/without/snapshot \fIcodename\fB/snapshots/\fIname\fB main\fR .TP .BR rerunnotifiers " [ " \fIcodenames\fP " ]" Run all external scripts specified in the \fBLog:\fP options of the specified distributions. .TP .B build\-needing \fIcodename\fP \fIarchitecture\fP \fR[\fP \fIglob\fP \fR]\fP List source packages (matching \fIglob\fP) that likely need a build on the given architecture. List all source package in the given distribution without a binary package of the given architecture built from that version of the source, without a \fB.changes\fP or \fB.log\fP file for the given architecture, with an Architecture field including \fBany\fP, \fIos\fP\fB-any\fP (with \fIos\fP being the part before the hyphen in the architecture or \fBlinux\fP if there is no hypen) or the architecture and at least one package in the Binary field not yet available. If instead of \fIarchitecture\fP the term \fBany\fP is used, all architectures are iterated and the architecture is printed as fourth field in every line. If the \fIarchitecture\fP is \fBall\fP, then only source packages with an Architecture field including \fBall\fP are considered (i.e. as above with real architectures but \fBany\fP does not suffice). Note that dpkg\-dev << 1.16.1 does not both set \fBany\fP and \fBall\fP so source packages building both architecture dependent and independent packages will never show up unless built with a new enough dpkg\-source). .TP .B translatefilelists Translate the file list cache within .IB db /contents.cache.db into the new format used since reprepro 3.0.0. Make sure you have at least half of the space of the current .IB db /contents.cache.db file size available in that partition. .TP .B flood \fIdistribution\fP \fR[\fP\fIarchitecture\fP\fR]\fP For each architecture of \fIdistribution\fP or for the one specified add architecture \fBall\fP packages from another architectures (but the same component or packagetype) under the following conditions: Packages are only upgraded, never downgraded. If there is a package not being architecture \fPall\fP, then architecture \fBall\fP packages of the same source from the same source version are preferred over those that have no such binary sibling. Otherwise the package with the highest version wins. You can restrict with architectures are looked for architecture \fPall\fP packages using \fB\-A\fP and which components/packagetypes are flooded by \fB\-C\fP/\fB\-T\fP as usual. There are mostly two use cases for this command: If you added an new distribution and want to copy all architecture all packages to it. Or if you included some architecture all packages only to some architectures using \fB\-A\fP to avoid breaking the other architectures for which the binary packages were still missing and now want to copy it to those architectures were they are unlikely to break something (because a newbinary is already available). .TP .B unusedsources \fR[\fP\fIdistributions\fP\fR]\fP List all source packages for which no binary package build from them is found. .TP .B sourcemissing \fR[\fP\fIdistributions\fP\fR]\fP List all binary packages for which no source package is found (the source package must be in the same distribution, but source packages only kept by package tracking is enough). .TP .B reportcruft \fR[\fP\fIdistributions\fP\fR]\fP List all source package versions that either have a source package and no longer a binary package or binary packages left without source package in the index. (Unless sourcemissing also list packages where the source package in only in the pool due to enabled tracking but no longer in the index). .TP .BR sizes " [ " \fIcodenames\fP " ]" List the size of all packages in the distributions specified or in all distributions. Each row contains 4 numbers, each being a number of bytes in a set of packages, which are: The packages in this distribution (including anything only kept because of tracking), the packages only in this distribution (anything in this distribution and a snapshot of this distribution counts as only in this distribution), the packages in this distribution and its snapshots, the packages only in this distribution or its snapshots. If more than one distribution is selected, also list a sum of those (in which 'Only' means only in selected ones, and not only only in one of the selected ones). .TP .BR repairdescriptions " [ " \fIcodenames\fP " ]" Look for binary packages only having a short description and try to get the long description from the .deb file (and also remove a possible Description-md5 in this case). The variant \fBforcerepairdescriptions\fP also replaces descriptions that do not match the previous short Description or the Description-md5 header. .SS internal commands These are hopefully never needed, but allow manual intervention. .B WARNING: Is is quite easy to get into an inconsistent and/or unfixable state. .TP .BR _detect " [ " \fIfilekeys\fP " ]" Look for the files, which \fIfilekey\fP is given as argument or as a line of the input (when run without arguments), and calculate their md5sum and add them to the list of known files. (Warning: this is a low level operation, no input validation or normalization is done.) .TP .BR _forget " [ " \fIfilekeys\fP " ]" Like .B _detect but remove the given \fIfilekey\fP from the list of known files. (Warning: this is a low level operation, no input validation or normalization is done.) .TP .B _listmd5sums Print a list of all known files and their md5sums. .TP .B _listchecksums Print a list of all known files and their recorded checksums. .TP .B _addmd5sums alias for the newer .TP .B _addchecksums Add information of known files (without any check done) in the strict format of _listchecksums output (i.e. don't dare to use a single space anywhere more than needed). .TP .BI _dumpcontents " identifier" Printout all the stored information of the specified part of the repository. (Or in other words, the content the corresponding Packages or Sources file would get) .TP .BI "_addreference " filekey " " identifier Manually mark \fIfilekey\fP to be needed by \fIidentifier\fP .TP .BI "_removereferences " identifier Remove all references what is needed by .I identifier. .TP .BI __extractcontrol " .deb-filename" Look what reprepro believes to be the content of the .B control file of the specified .deb-file. .TP .BI __extractfilelist " .deb-filename" Look what reprepro believes to be the list of files of the specified .deb-file. .TP .BI _fakeemptyfilelist " filekey" Insert an empty filelist for \fIfilekey\fP. This is a evil hack around broken .deb files that cannot be read by reprepro. .TP .B _addpackage \fIcodenam\fP \fIfilename\fP \fIpackages...\fP Add packages from the specified filename to part specified by \fB\-C\fP \fB\-A\fP and \fB\-T\fP of the specified distribution. Very strange things can happen if you use it improperly. .TP .B __dumpuncompressors List what compressions format can be uncompressed and how. .TP .BI __uncompress " format compressed-file uncompressed-file" Use builtin or external uncompression to uncompress the specified file of the specified format into the specified target. .TP .B _listconfidentifiers \fIidentifier\fP \fR[\fP \fIdistributions...\fP \fR]\fP Print - one per line - all identifiers of subdatabases as derived from the configuration. If a list of distributions is given, only identifiers of those are printed. .TP .B _listdbidentifiers \fIidentifier\fP \fR[\fP \fIdistributions...\fP \fR]\fP Print - one per line - all identifiers of subdatabases in the current database. This will be a subset of the ones printed by \fB_listconfidentifiers\fP or most commands but \fBclearvanished\fP will refuse to run, and depending on the database compatibility version, will include all those if reprepro was run since the config was last changed. .SH "CONFIG FILES" .B reprepo uses three config files, which are searched in the directory specified with .B \-\-confdir or in the .B conf/ subdirectory of the \fIbasedir\fP. If a file .B options exists, it is parsed line by line. Each line can be the long name of a command line option (without the \-\-) plus an argument, where possible. Those are handled as if they were command line options given before (and thus lower priority than) any other command line option. (and also lower priority than any environment variable). To allow command line options to override options file options, most boolean options also have a corresponding form starting with \fB\-\-no\fP. (The only exception is when the path to look for config files changes, the options file will only opened once and of course before any options within the options file are parsed.) The file .B distributions is always needed and describes what distributions to manage, while .B updates is only needed when syncing with external repositories and .B pulls is only needed when syncing with repositories in the same reprepro database. The last three are in the format control files in Debian are in, i.e. paragraphs separated by empty lines consisting of fields. Each field consists of a fieldname, followed by a colon, possible whitespace and the data. A field ends with a newline not followed by a space or tab. Lines starting with # as first character are ignored, while in other lines the # character and everything after it till the newline character are ignored. A paragraph can also consist of only a single field .RB \(dq !include: \(dq which causes the named file (relative to confdir unless starting with .BR ~/ ", " +b/ ", " +c/ " or " / " )" to be read as if it was found at this place. Each of the three files or a file included as described above can also be a directory, in which case all files it contains with a filename ending in .B .conf and not starting with .B . are read. .SS conf/distributions .TP .B Codename This required field is the unique identifier of a distribution and used as directory name within .B dists/ It is also copied into the Release files. Note that this name is not supposed to change. You most likely \fBnever ever\fP want a name like \fBtesting\fP or \fBstable\fP here (those are suite names and supposed to point to another distribution later). .TP .B Suite This optional field is simply copied into the Release files. In Debian it contains names like stable, testing or unstable. To create symlinks from the Suite to the Codename, use the \fBcreatesymlinks\fP command of reprepro. .TP .B FakeComponentPrefix If this field is present, its argument is added - separated by a slash - before every Component written to the main Release file (unless the component already starts with it), and removed from the end of the Codename and Suite fields in that file. Also if a component starts with it, its directory in the dists dir is shortened by this. .br So \fB Codename: bla/updates Suite: foo/updates FakeComponentPrefix: updates Components: main bad \fP will create a Release file with \fB Codename: bla Suite: foo Components: updates/main updates/bad \fP in it, but otherwise nothing is changed, while\fB Codename: bla/updates Suite: foo/updates FakeComponentPrefix: updates Components: updates/main updates/bad \fP will also create a Release file with \fB Codename: bla Suite: foo Components: updates/main updates/bad \fP but the packages will actually be in the components \fBupdates/main\fP and \fBupdates/bad\fP, most likely causing the same file using duplicate storage space. This makes the distribution look more like Debian's security archive, thus work around problems with apt's workarounds for that. .TP .B AlsoAcceptFor A list of distribution names. When a \fB.changes\fP file is told to be included into this distribution with the \fBinclude\fP command and the distribution header of that file is neither the codename, nor the suite name, nor any name from the list, a \fBwrongdistribution\fP error is generated. The \fBprocess_incoming\fP command will also use this field, see the description of \fBAllow\fP and \fBDefault\fP from the \fBconf/incoming\fP file for more information. .TP .B Version This optional field is simply copied into the Release files. .TP .B Origin This optional field is simply copied into the Release files. .TP .B Label This optional field is simply copied into the Release files. .TP .B NotAutomatic This optional field is simply copied into the Release files. (The value is handled as an arbitrary string, though anything but \fByes\fP does not make much sense right now.) .TP .B ButAutomaticUpgrades This optional field is simply copied into the Release files. (The value is handled as an arbitrary string, though anything but \fByes\fP does not make much sense right now.) .TP .B Description This optional field is simply copied into the Release files. .TP .B Architectures This required field lists the binary architectures within this distribution and if it contains .B source (i.e. if there is an item .B source in this line this Distribution has source. All other items specify things to be put after "binary\-" to form directory names and be checked against "Architecture:" fields.) This will also be copied into the Release files. (With exception of the .B source item, which will not occur in the topmost Release file whether it is present here or not) .TP .B Components This required field lists the component of a distribution. See .B GUESSING for rules which component packages are included into by default. This will also be copied into the Release files. .TP .B UDebComponents Components with a debian\-installer subhierarchy containing .udebs. (E.g. simply "main") .TP .B Update When this field is present, it describes which update rules are used for this distribution. There also can be a magic rule minus ("\-"), see below. .TP .B Pull When this field is present, it describes which pull rules are used for this distribution. Pull rules are like Update rules, but get their stuff from other distributions and not from external sources. See the description for \fBconf/pulls\fP. .TP .B SignWith When this field is present, a Release.gpg file will be generated. If the value is "yes" or "default", the default key of gpg is used. If the field starts with an exlamation mark ("!"), the given script is executed to do the signing. Otherwise the value will be given to libgpgme to determine to key to use. If there are problems with signing, you can try .br .B gpg \-\-list\-secret\-keys \fIvalue\fP .br to see how gpg could interprete the value. If that command does not list any keys or multiple ones, try to find some other value (like the keyid), that gpg can more easily associate with a unique key. If this key has a passphrase, you need to use gpg\-agent or the insecure option \fB\-\-ask\-passphrase\fP. A '\fB!\fP' hook script is looked for in the confdir, unless it starts with .BR ~/ ", " ./ ", " +b/ ", " +o/ ", " +c/ " or " / " ." Is gets three command line arguments: The filename to sign, an empty argument or the filename to create with an inline signature (i.e. InRelease) and an empty argument or the filename to create an detached signature (i.e. Release.gpg). The script may generate no Release.gpg file if it choses to (then the repository will look like unsigned for older clients), but generating empty files is not allowed. Reprepro waits for the script to finish and will abort the exporting of the distribution this signing is part of unless the scripts returns normally with exit code 0. Using a space after ! is recommended to avoid incompatibilities with possible future extensions. .TP .B DebOverride When this field is present, it describes the override file used when including .deb files. .TP .B UDebOverride When this field is present, it describes the override file used when including .udeb files. .TP .B DscOverride When this field is present, it describes the override file used when including .dsc files. .TP .B DebIndices\fR, \fBUDebIndices\fR, \fBDscIndices Choose what kind of Index files to export. The first part describes what the Index file shall be called. The second argument determines the name of a Release file to generate or not to generate if missing. Then at least one of "\fB.\fP", "\fB.gz\fP" or "\fB.bz2\fP" specifying whether to generate uncompressed output, gzipped output, bzip2ed output or any combination. (bzip2 is only available when compiled with bzip2 support, so it might not be available when you compiled it on your own). If an argument not starting with dot follows, it will be executed after all index files are generated. (See the examples for what argument this gets). The default is: .br DebIndices: Packages Release . .gz .br UDebIndices: Packages . .gz .br DscIndices: Sources Release .gz .TP .B Contents Enable the creation of Contents files listing all the files within the binary packages of a distribution. (Which is quite slow, you have been warned). In earlier versions, the first argument was a rate at which to extract file lists. As this did not work and was no longer easily possible after some factorisation, this is no longer supported. The arguments of this field is a space separated list of options. If there is a \fBudebs\fP keyword, \fB.udeb\fPs are also listed (in a file called \fBuContents\-\fP\fIarchitecture\fP.) If there is a \fBnodebs\fP keyword, \fB.deb\fPs are not listed. (Only useful together with \fBudebs\fP) If there is at least one of the keywords \fB.\fP, \fB.gz\fP and/or \fB.bz2\fP, the Contents files are written uncompressed, gzipped and/or bzip2ed instead of only gzipped. If there is a \fBpercomponent\fP then one Contents\-\fIarch\fP file per component is created. If there is a \fBallcomponents\fP then one global Contents\-\fIarch\fP file is generated. If both are given, both are created. If none of both is specified then \fBpercomponent\fP is taken as default (earlier versions had other defaults). The switches \fBcompatsymlink\fP or \fBnocompatsymlink\fP (only possible if \fBallcomponents\fP was not specified explicitly) control whether a compatibility symlink is created so old versions of apt\-file looking for the component independent filenames at least see the contents of the first component. Unless \fBallcomponents\fP is given, \fBcompatsymlinks\fP currently is the default, but that will change in some future (current estimate: after wheezy was released) .TP .B ContentsArchitectures Limit generation of Contents files to the architectures given. If this field is not there, all architectures are processed. An empty field means no architectures are processed, thus not very useful. .TP .B ContentsComponents Limit what components are processed for the \fBContents\-\fP\fIarch\fP files to the components given. If this field is not there, all components are processed. An empty field is equivalent to specify \fBnodebs\fP in the \fBContents\fP field, while a non-empty field overrides a \fBnodebs\fP there. .TP .B ContentsUComponents Limit what components are processed for the uContents files to the components given. If this field is not there and there is the \fBudebs\fP keyword in the Contents field, all .udebs of all components are put in the \fBuContents.\fP\fIarch\fP files. If this field is not there and there is no \fBudebs\fP keyword in the Contents field, no \fBuContents\-\fP\fIarch\fP files are generated at all. A non-empty fields implies generation of \fBuContents\-\fP\fIarch\fP files (just like the \fBudebs\fP keyword in the Contents field), while an empty one causes no \fBuContents\-\fP\fIarch\fP files to be generated. .TP .B Uploaders Specifies a file (relative to confdir if not starting with .BR ~/ ", " +b/ ", " +c/ " or " / " )" to specify who is allowed to upload packages. Without this there are no limits, and this file can be ignored via \fB\-\-ignore=uploaders\fP. See the section \fBUPLOADERS FILES\fP below. .TP .B Tracking Enable the (experimental) tracking of source packages. The argument list needs to contain exactly one of the following: .br .B keep Keeps all files of a given source package, until that is deleted explicitly via \fBremovetrack\fP. This is currently the only possibility to keep older packages around when all indices contain newer files. .br .B all Keep all files belonging to a given source package until the last file of it is no longer used within that distribution. .br .B minimal Remove files no longer included in the tracked distribution. (Remove changes, logs and includebyhand files once no file is in any part of the distribution). .br And any number of the following (or none): .br .B includechanges Add the .changes file to the tracked files of a source package. Thus it is also put into the pool. .br .B includebyhand Add \fBbyhand\fP and \fBraw\-\fP\fI*\fP files to the tracked files and thus in the pool. .br .B includelogs Add log files to the tracked files and thus in the pool. (Not that putting log files in changes files is a reprepro extension not found in normal changes files) .br .B embargoalls Not yet implemented. .br .B keepsources Even when using minimal mode, do not remove source files until no file is needed any more. .br .B needsources Not yet implemented. .TP .B Log Specify a file to log additions and removals of this distribution into and/or external scripts to call when something is added or removed. The rest of the \fBLog:\fP line is the filename, every following line (as usual, have to begin with a single space) the name of a script to call. The name of the script may be preceded with options of the form \fB\-\-type=\fP(\fBdsc\fP|\fBdeb\fP|\fBudeb\fP), \fB\-\-architecture=\fP\fIname\fP or \fB\-\-component=\fP\fIname\fP to only call the script for some parts of the distribution. An script with argument \fB\-\-changes\fP is called when a \fB.changes\fP file was accepted by \fBinclude\fP or \fBprocessincoming\fP (and with other arguments). Both type of scripts can have a \fB\-\-via=\fP\fIcommand\fP specified, in which case it is only called when caused by reprepro command \fIcommand\fP. For information how it is called and some examples take a look at manual.html in reprepro's source or .B /usr/share/doc/reprepro/ If the filename for the log files does not start with a slash, it is relative to the directory specified with \fB\-\-logdir\fP, the scripts are relative to \fB\-\-confdir\fP unless starting with .BR ~/ ", " +b/ ", " +c/ " or " / . .TP .B ValidFor If this field exists, an Valid\-Until field is put into generated .B Release files for this distribution with an date as much in the future as the argument specifies. The argument has to be an number followed by one of the units .BR d ", " m " or " y , where \fBd\fP means days, \fBm\fP means 31 days and \fBy\fP means 365 days. So .B ValidFor: 1m 11 d causes the generation of a .B Valid\-Until: header in Release files that points 42 days into the future. .TP .B ReadOnly Disallow all modifications of this distribution or its directory in \fBdists/\fP\fIcodename\fP (with the exception of snapshot subdirectories). .TP .B ByHandHooks This species hooks to call for handling byhand/raw files by processincoming (and in future versions perhaps by include). Each line consists out of 4 arguments: A glob pattern for the section (clasically \fBbyhand\fP, though Ubuntu uses \fBraw\-\fP*), a glob pattern for the priority (not usually used), and a glob pattern for the filename. The 4th argument is the script to be called when all of the above match. It gets 5 arguments: the codename of the distribution, the section (usually \fBbyhand\fP), the priority (usually only \fB\-\fP), the filename in the changes file and the full filename (with processincoming in the secure TmpDir). .SS conf/updates .TP .B Name The name of this update\-upstream as it can be used in the .B Update field in conf/distributions. .TP .B Method An URI as one could also give it apt, e.g. .I http://ftp.debian.de/debian which is simply given to the corresponding .B apt\-get method. (So either .B apt\-get has to be installed, or you have to point with .B \-\-methoddir to a place where such methods are found. .TP .B Fallback (Still experimental:) A fallback URI, where all files are tried that failed the first one. They are given to the same method as the previous URI (e.g. both http://), and the fallback-server must have everything at the same place. No recalculation is done, but single files are just retried from this location. .TP .B Config This can contain any number of lines, each in the format .B apt\-get \-\-option would expect. (Multiple lines \(hy as always \(hy marked with leading spaces). .P For example: Config: Acquire::Http::Proxy=http://proxy.yours.org:8080 .TP .B From The name of another update rule this rules derives from. The rule containing the \fBFrom\fP may not contain .BR Method ", " Fallback " or " Config "." All other fields are used from the rule referenced in \fBFrom\fP, unless found in this containing the \fBFrom\fP. The rule referenced in \fBFrom\fP may itself contain a \fBFrom\fP. Reprepro will only assume two remote index files are the same, if both get their \fBMethod\fP information from the same rule. .TP .B Suite The suite to update from. If this is not present, the codename of the distribution using this one is used. Also "*/whatever" is replaced by "/whatever" .TP .B Components The components to update. Each item can be either the name of a component or a pair of a upstream component and a local component separated with ">". (e.g. "main>all contrib>all non\-free>notall") If this field is not there, all components from the distribution to update are tried. An empty field means no source or .deb packages are updated by this rule, but only .udeb packages, if there are any. A rule might list components not available in all distributions using this rule. In this case unknown components are silently ignored. (Unless you start reprepro with the \fB\-\-fast\fP option, it will warn about components unusable in all distributions using that rule. As exceptions, unusable components called \fBnone\fP are never warned about, for compatibility with versions prior to 3.0.0 where and empty field had a different meaning.) .TP .B Architectures The architectures to update. If omitted all from the distribution to update from. (As with components, you can use ">" to download from one architecture and add into another one. (This only determine in which Package list they land, it neither overwrites the Architecture line in its description, nor the one in the filename determined from this one. In other words, it is no really useful without additional filtering)) .TP .B UDebComponents Like .B Components but for the udebs. .TP .B VerifyRelease Download the .B Release.gpg file and check if it is a signature of the .B Releasefile with the key given here. (In the Format as "gpg \-\-with\-colons \-\-list\-key" prints it, i.e. the last 16 hex digits of the fingerprint) Multiple keys can be specified by separating them with a "\fB|\fP" sign. Then finding a signature from one of the will suffice. To allow revoked or expired keys, add a "\fB!\fP" behind a key. (but to accept such signatures, the appropriate \fB\-\-ignore\fP is also needed). To also allow subkeys of a specified key, add a "\fB+\fP" behind a key. .TP .B IgnoreRelease: yes If this is present, no .B InRelease or .B Release file will be downloaded and thus the md5sums of the other index files will not be checked. .TP .B GetInRelease: no IF this is present, no .B InRelease file is downloaded but only .B Release (and .B Release.gpg ) are tried. .TP .B Flat If this field is in an update rule, it is supposed to be a flat repository, i.e. a repository without a \fBdists\fP dir and no subdirectories for the index files. (If the corresponding \fBsources.list\fP line has the suite end with a slash, then you might need this one.) The argument for the \fBFlat:\fP field is the Component to put those packages into. No \fBComponents\fP or \fBUDebComponents\fP fields are allowed in a flat update rule. If the \fBArchitecture\fP field has any \fB>\fP items, the part left of the "\fB>\fP" is ignored. .br For example the \fBsources.list\fP line deb http://cran.r\-project.org/bin/linux/debian etch\-cran/ .br would translate to .br Name: R Method: http://cran.r\-project.org/bin/linux/debian Suite: etch\-cran Flat: whatevercomponentyoudlikethepackagesin .TP .B IgnoreHashes This directive tells reprepro to not check the listed hashes in the downloaded Release file (and only in the Release file). Possible values are currently \fBmd5\fP, \fBsha1\fP and \fBsha256\fP. Note that this does not speed anything up in any measurable way. The only reason to specify this if the Release file of the distribution you want to mirror from uses a faulty algorithm implementation. Otherwise you will gain nothing and only lose security. .TP .B FilterFormula This can be a formula to specify which packages to accept from this source. The format is misusing the parser intended for Dependency lines. To get only architecture all packages use "architecture (== all)", to get only at least important packages use "priority (==required) | priority (==important)". See the description of the listfilter command for the semantics of formulas. .TP .B FilterList\fR, \fPFilterSrcList These take at least two arguments: The first one is the default action when something is not found in the list, then a list of filenames (relative to .B \-\-confdir\fR, if not starting with .BR ~/ ", " +b/ ", " +c/ " or " / " )" in the format of dpkg \-\-get\-selections and only packages listed in there as .B install or that are already there and are listed with .B upgradeonly will be installed. Things listed as .B deinstall or .B purge will be ignored. Packages having .B supersede will not be installed but instead cause the removal of packages with strictly smaller version (i.e. if a package would be replaced by this package if this was .BR install , it will be removed instead and no new package being installed). Things listed with .B warning are also ignored, but a warning message is printed to stderr. A package being .B hold will not be upgraded but also not downgraded or removed by previous delete rules. To abort the whole upgrade/pull if a package is available, use .B error\fR. Instead of a keyword you can also use "\fB= \fP\fIversion\fP" which is treated like \fBinstall\fP if the version matches and like no entry if it does not match. Only one such entry per package is currently supported and the version is currently compared as string. If there is both \fBFilterList\fP and \fBFilterSrcList\fP then the first is used for \fB.deb\fP and \fB.udeb\fP and the second for \fB.dsc\fP packages. If there is only \fBFilterList\fP that is applied to everything. If there is only \fBFilterSrcList\fP that is applied to everything, too, but the source package name (and source version) is used to do the lookup. .TP .B ListHook If this is given, it is executed for all downloaded index files with the downloaded list as first and a filename that will be used instead of this. (e.g. "ListHook: /bin/cp" works but does nothing.) If a file will be read multiple times, it is processed multiple times, with the environment variables .BR REPREPRO_FILTER_CODENAME ", " REPREPRO_FILTER_PACKAGETYPE ", " .BR REPREPRO_FILTER_COMPONENT " and " REPREPRO_FILTER_ARCHITECTURE set to the where this file will be added and .B REPREPRO_FILTER_PATTERN to the name of the update rule causing it. .TP .B ListShellHook This is like ListHook, but the whole argument is given to the shell as argument, and the input and output file are stdin and stdout. i.e.: .br ListShellHook: cat .br works but does nothing but useless use of a shell and cat, while .br ListShellHook: grep\-dctrl \-X \-S apt \-o \-X \-S dpkg || [ $? \-eq 1 ] .br will limit the update rule to packages from the specified source packages. .TP .B DownloadListsAs The arguments of this field specify which index files reprepro will download. Allowed values are .BR . ", " .gz ", " .bz2 ", " .lzma ", " .xz ", " .lz ", " .diff ", " .BR force.gz ", " force.bz2 ", " force.lzma ", " force.xz ", " .BR force.lz ", and " force.diff "." Reprepro will try the first supported variant in the list given: Only compressions compiled in or for which an uncompressor was found are used. Unless the value starts with \fBforce.\fP, it is only tried if if is found in the Release or InRelease file. The default value is \fB.diff .xz .lzma .bz2 .gz .\fP, i.e. download Packages.diff if listed in the Release file, otherwise or if not usable download .xz if listed in the Release file and there is a way to uncompress it, then .lzma if usable, then .bz2 if usable, then .gz and then uncompressed). Note there is no way to see if an uncompressed variant of the file is available (as the Release file always lists their checksums, even if not there), so putting '\fB.\fP' anywhere but as the last argument can mean trying to download a file that does not exist. Together with \fBIgnoreRelease\fP reprepro will download the first in this list that could be unpacked (i.e. \fBforce\fP is always assumed) and the default value is \fB.gz .bzip2 . .lzma .xz\fP. .SS conf/pulls This file contains the rules for pulling packages from one distribution to another. While this can also be done with update rules using the file or copy method and using the exported indices of that other distribution, this way is faster. It also ensures the current files are used and no copies are made. (This also leads to the limitation that pulling from one component to another is not possible.) Each rule consists out of the following fields: .TP .B Name The name of this pull rule as it can be used in the .B Pull field in conf/distributions. .TP .B From The codename of the distribution to pull packages from. .TP .B Components The components of the distribution to get from. If this field is not there, all components from the distribution to update are tried. A rule might list components not available in all distributions using this rule. In this case unknown components are silently ignored. (Unless you start reprepro with the \-\-fast option, it will warn about components unusable in all distributions using that rule. As exception, unusable components called \fBnone\fP are never warned about, for compatibility with versions prior to 3.0.0 where and empty field had a different meaning.) .TP .B Architectures The architectures to update. If omitted all from the distribution to pull from. As in .BR conf/updates , you can use ">" to download from one architecture and add into another one. (And again, only useful with filtering to avoid packages not architecture \fBall\fP to migrate). .TP .B UDebComponents Like .B Components but for the udebs. .TP .B FilterFormula .TP .B FilterList .TP .B FilterSrcList The same as with update rules. .SH "OVERRIDE FILES" The format of override files used by reprepro should resemble the extended ftp\-archive format, to be specific it is: .B \fIpackagename\fP \fIfield name\fP \fInew value\fP For example: .br .B kernel\-image\-2.4.31\-yourorga Section protected/base .br .B kernel\-image\-2.4.31\-yourorga Priority standard .br .B kernel\-image\-2.4.31\-yourorga Maintainer That's me .br .B reprepro Priority required All fields of a given package will be replaced by the new value specified in the override file with the exception of special fields starting with a dollar sign ($). While the field name is compared case-insensitive, it is copied in exactly the form in the override file there. (Thus I suggest to keep to the exact case it is normally found in index files in case some other tool confuses them.) More than copied is the Section header (unless \fB\-S\fP is supplied), which is also used to guess the component (unless \fB\-C\fP is there). Some values like \fBPackage\fP, \fBFilename\fP, \fBSize\fP or \fBMD5sum\fP are forbidden, as their usage would severly confuse reprepro. As an extension reprepro also supports patterns instead of packagenames. If the package name contains '*', '[' or '?', it is considered a pattern and applied to each package that is not matched by any non-pattern override nor by any previous pattern. Fieldnames starting with a dollar ($) are not be placed in the exported control data but have special meaning. Unknown ones are loudly ignored. Special fields are: \fB$Component\fP: includedeb, includedsc, include and processincoming will put the package in the component given as value (unless itself overridden with \fB\-C\fP). Note that the proper way to specify the component is by setting the section field and using this extension will most likely confuse people. \fB$Delete\fP: the value is treated a fieldname and fields of that name are removed. (This way one can remove fields previously added without removing and readding the package. And fields already included in the package can be removed, too). .SS conf/incoming Every chunk is a rule set for the .B process_incoming command. Possible fields are: .TP .B Name The name of the rule-set, used as argument to the scan command to specify to use this rule. .TP .B IncomingDir The Name of the directory to scan for .B .changes files. .TP .B TempDir A directory where the files listed in the processed .changes files are copied into before they are read. You can avoid some copy operatations by placing this directory within the same moint point the pool hierarchy is (at least partially) in. .TP .B LogDir A directory where .changes files, .log files and otherwise unused .byhand files are stored upon procession. .TP .B Allow Each argument is either a pair \fIname1\fB>\fIname2\fR or simply \fIname\fP which is short for \fIname\fB>\fIname\fR. Each \fIname2\fP must identify a distribution, either by being Codename, a unique Suite, or a unique AlsoAcceptFor from \fBconf/distributions\fP. Each upload has each item in its .B Distribution: header compared first to last with each \fIname1\fP in the rules and is put in the first one accepting this package. e.g.: .br Allow: local unstable>sid .br or .br Allow: stable>security\-updates stable>proposed\-updates .br (Note that this makes only sense if Multiple is set to true or if there are people only allowed to upload to proposed\-updates but not to security\-updates). .TP .B Default \fIdistribution Every upload not put into any other distribution because of an Allow argument is put into \fIdistribution\fP if that accepts it. .TP .B Multiple Allow putting an upload in multiple distributions if it lists more than one. (Without this field, procession stops after the first success). .TP .B Options A list of options .br .B multiple_distributions .br Allow putting an upload in multiple distributions if it lists more than one. (Without this field, procession stops after the first success). .br .B limit_arch_all .br If an upload contains binaries from some architecture and architecture all packages, the architecture all packages are only put into the architectures within this upload. Useful to combine with the \fBflood\fP command. .TP .B Permit A list of options to allow things otherwise causing errors: .br .B unused_files .br Do not stop with error if there are files listed in the \fB.changes\fP file if it lists files not belonging to any package in it. .br .B older_version .br Ignore a package not added because there already is a strictly newer version available instead of treating this as an error. .TP .B Cleanup \fIoptions A list of options to cause more files in the incoming directory to be deleted: .br .B unused_files .br If there is \fBunused_files\fP in \fBPermit\fP then also delete those files when the package is deleted after successful processing. .br .B on_deny .br If a \fB.changes\fP file is denied processing because of missing signatures or allowed distributions to be put in, delete it and all the files it references. .br .B on_error .br If a \fB.changes\fP file causes errors while processing, delete it and the files it references. Note that allowing cleanup in publically accessible incoming queues allows a denial of service by sending in .changes files deleting other peoples files before they are completed. Especially when .changes files are handled directly (e.g. by inoticoming). .TP .B MorgueDir If files are to be deleted by Cleanup, they are instead moved to a subdirectory of the directory given as value to this field. This directory has to be on the same partition as the incoming directory and files are moved (i.e. owner and permission stay the same) and never copied. .SH "UPLOADERS FILES" These files specified by the \fBUploaders\fP header in the distribution definition as explained above describe what key a \fB.changes\fP file as to be signed with to be included in that distribution. .P Empty lines and lines starting with a hash are ignored, every other line must be of one of the following nine forms or an include directive: .TP .B allow \fIcondition\fP by anybody which allows everyone to upload packages matching \fIcondition\fP, .TP .B allow \fIcondition\fP by unsigned which allows everything matching that has no pgp/gpg header, .TP .B allow \fIcondition\fP by any key which allows everything matching with any valid signature in or .TP .B allow \fIcondition\fP by key \fIkey-id\fP which allows everything matching signed by this \fIkey-id\fP (to be specified without any spaces). If the \fIkey-id\fP ends with a \fB+\fP (plus), a signature with a subkey of this primary key also suffices. \fIkey-id\fP must be a suffix of the id libgpgme uses to identify this key, i.e. a number of hexdigits from the end of the fingerprint of the key, but no more than what libgpgme uses. (The maximal number should be what gpg \-\-list-key \-\-with\-colons prints, as of the time of this writing that is at most 16 hex-digits). .TP .B allow \fIcondition\fP by group \fIgroupname\fP which allows every member of group \fIgroupname\fP. Groups can be manipulated by .TP .B group \fIgroupname\fP add \fIkey-id\fP to add a \fIkey-id\fP (see above for details) to this group, or .TP .B group \fIgroupname\fP contains \fIgroupname\fP to add a whole group to a group. To avoid warnings in incomplete config files there is also .TP .B group \fIgroupname\fP empty to declare a group has no members (avoids warnings that it is used without those) and .TP .B group \fIgroupname\fP unused to declare that a group is not yet used (avoid warnings that it is not used). .PP A line starting with \fBinclude\fP causes the rest of the line to be interpreted as filename, which is opened and processed before the rest of the file is processed. The only conditions currently supported are: .TP .B * which means any package, .TP .BI "source '" name ' which means any package with source \fIname\fP. ('\fB*\fP', '\fB?\fP' and '\fB[\fP..\fB]\fP' are treated as in shell wildcards). .TP .B sections '\fIname\fP'\fR(\fP|'\fIname\fP'\fR)*\fP matches an upload in which each section matches one of the names given. As upload conditions are checked very early, this is the section listed in the .changes file, not the one from the override file. (But this might change in the future, if you have the need for the one or the other behavior, let me know). .TP .B sections contain '\fIname\fP'\fR(\fP|'\fIname\fP'\fR)*\fP The same, but not all sections must be from the given set, but at least one source or binary package needs to have one of those given. .TP .B binaries '\fIname\fP'\fR(\fP|'\fIname\fP'\fR)*\fP matches an upload in which each binary (type deb or udeb) matches one of the names given. .TP .B binaries contain '\fIname\fP'\fR(\fP|'\fIname\fP'\fR)*\fP again only at least one instead of all is required. .TP .B architectures '\fIarchitecture\fP'\fR(\fP|'\fIname\fP'\fR)*\fP matches an upload in which each package has only architectures from the given set. \fBsource\fP and \fBall\fP are treated as unique architectures. Wildcards are not allowed. .TP .B architectures contain '\fIarchitecture\fP'\fR(\fP|'\fIarchitecture\fP'\fR)*\fP again only at least one instead of all is required. .TP .B byhand matches an upload with at least one byhand file (i.e. a file with section \fBbyhand\fP or \fBraw\-\fP\fIsomething\fP). .TP .B byhand '\fIsection\fP'\fR(\fP|'\fIsection\fP'\fR)*\fP matches an upload with at least one byhand file and all byhand files having a section listed in the list of given section. (i.e. \fBbyhand 'byhand'|'raw\-*'\fP is currently is the same as \fBbyhand\fP). .TP .BI "distribution '" codename ' which means any package when it is to be included in \fIcodename\fP. As the uploaders file is given by distribution, this is only useful to reuse a complex uploaders file for multiple distributions. .PP Putting \fBnot\fP in front of a condition, inverses it's meaning. For example .br \fBallow not source 'r*' by anybody\fP .br means anybody may upload packages which source name does not start with an 'r'. .PP Multiple conditions can be connected with \fBand\fP and \fBor\fP, with \fBor\fP binding stronger (but both weaker than \fBnot\fP). That means .br \fBallow source 'r*' and source '*xxx' or source '*o' by anybody\fP .br is equivalent to .br \fBallow source 'r*xxx' by anybody\fP .br \fBallow source 'r*o' by anybody\fP (Other conditions will follow once somebody tells me what restrictions are useful. Currently planned is only something for architectures). .SH "ERROR IGNORING" With \fB\-\-ignore\fP on the command line or an \fIignore\fP line in the options file, the following type of errors can be ignored: .TP .B brokenold \fR(hopefully never seen) If there are errors parsing an installed version of package, do not error out, but assume it is older than anything else, has not files or no source name. .TP .B brokensignatures If a .changes or .dsc file contains at least one invalid signature and no valid signature (not even expired or from an expired or revoked key), reprepro assumes the file got corrupted and refuses to use it unless this ignore directive is given. .TP .B brokenversioncmp \fR(hopefully never seen) If comparing old and new version fails, assume the new one is newer. .TP .B dscinbinnmu If a .changes file has an explicit Source version that is different the to the version header of the file, than reprepro assumes it is binary non maintainer upload (NMU). In that case, source files are not permitted in .changes files processed by .B include or .BR processincoming . Adding \fB\-\-ignore=dscinbinnmu\fP allows it for the \fBinclude\fP command. .TP .B emptyfilenamepart \fR(insecure) Allow strings to be empty that are used to construct filenames. (like versions, architectures, ...) .TP .B extension Allow to \fBincludedeb\fP files that do not end with \fB.deb\fP, to \fBincludedsc\fP files not ending in \fB.dsc\fP and to \fBinclude\fP files not ending in \fB.changes\fP. .TP .B forbiddenchar \fR(insecure) Do not insist on Debian policy for package and source names and versions. Thus allowing all 7-bit characters but slashes (as they would break the file storage) and things syntactically active (spaces, underscores in filenames in .changes files, opening parentheses in source names of binary packages). To allow some 8-bit chars additionally, use \fB8bit\fP additionally. .TP .B 8bit \fR(more insecure) Allow 8-bit characters not looking like overlong UTF-8 sequences in filenames and things used as parts of filenames. Though it hopefully rejects overlong UTF-8 sequences, there might be other characters your filesystem confuses with special characters, thus creating filenames possibly equivalent to \fB/mirror/pool/main/../../../etc/shadow\fP (Which should be safe, as you do not run reprepro as root, do you?) or simply overwriting your conf/distributions file adding some commands in there. So do not use this if you are paranoid, unless you are paranoid enough to have checked the code of your libs, kernel and filesystems. .TP .B ignore \fR(for forward compatibility) Ignore unknown ignore types given to \fI\-\-ignore\fP. .TP .B flatandnonflat \fR(only supresses a warning) Do not warn about a flat and a non-flat distribution from the same source with the same name when updating. (Hopefully never ever needed.) .TP .B malformedchunk \fR(I hope you know what you do) Do not stop when finding a line not starting with a space but no colon(:) in it. These are otherwise rejected as they have no defined meaning. .TP .B missingfield \fR(safe to ignore) Ignore missing fields in a .changes file that are only checked but not processed. Those include: Format, Date, Urgency, Maintainer, Description, Changes .TP .B missingfile \fR(might be insecure) When including a .dsc file from a .changes file, try to get files needed but not listed in the .changes file (e.g. when someone forgot to specify \-sa to dpkg\-buildpackage) from the directory the .changes file is in instead of erroring out. (\fB\-\-delete\fP will not work with those files, though.) .TP .B spaceonlyline \fR(I hope you know what you do) Allow lines containing only (but non-zero) spaces. As these do not separate chunks as thus will cause reprepro to behave unexpected, they cause error messages by default. .TP .B surprisingarch Do not reject a .changes file containing files for a architecture not listed in the Architecture-header within it. .TP .B surprisingbinary Do not reject a .changes file containing .deb files containing packages whose name is not listed in the "Binary:" header of that changes file. .TP .B undefinedtarget \fR(hope you are not using the wrong db directory) Do not stop when the packages.db file contains databases for codename/packagetype/component/architectures combinations that are not listed in your distributions file. This allows you to temporarily remove some distribution from the config files, without having to remove the packages in it with the \fBclearvanished\fP command. You might even temporarily remove single architectures or components, though that might cause inconsistencies in some situations. .TP .B undefinedtracking \fR(hope you are not using the wrong db directory) Do not stop when the tracking file contains databases for distributions that are not listed in your \fBdistributions\fP file. This allows you to temporarily remove some distribution from the config files, without having to remove the packages in it with the \fBclearvanished\fP command. You might even temporarily disable tracking in some distribution, but that is likely to cause inconsistencies in there, if you do not know, what you are doing. .TP .B unknownfield \fR(for forward compatibility) Ignore unknown fields in the config files, instead of refusing to run then. .TP .B unusedarch \fR(safe to ignore) No longer reject a .changes file containing no files for any of the architectures listed in the Architecture-header within it. .TP .B unusedoption Do not complain about command line options not used by the specified action (like \fB\-\-architecture\fP). .TP .B uploaders The include command will accept packages that would otherwise been rejected by the uploaders file. .TP .B wrongarchitecture \fR(safe to ignore) Do not warn about wrong "Architecture:" lines in downloaded Packages files. (Note that wrong Architectures are always ignored when getting stuff from flat repostories or importing stuff from one architecture to another). .TP .B wrongdistribution \fR(safe to ignore) Do not error out if a .changes file is to be placed in a distribution not listed in that files' Distributions: header. .TP .B wrongsourceversion Do not reject a .changes file containing .deb files with a different opinion on what the version of the source package is. .br (Note: reprepro only compares literally here, not by meaning.) .TP .B wrongversion Do not reject a .changes file containing .dsc files with a different version. .br (Note: reprepro only compares literally here, not by meaning.) .TP .B expiredkey \fR(I hope you know what you do) Accept signatures with expired keys. (Only if the expired key is explicitly requested). .TP .B expiredsignature \fR(I hope you know what you do) Accept expired signatures with expired keys. (Only if the key is explicitly requested). .TP .B revokedkey \fR(I hope you know what you do) Accept signatures with revoked keys. (Only if the revoked key is explicitly requested). .SH GUESSING When including a binary or source package without explicitly declaring a component with .B \-C it will take the first component with the name of the section, being prefix to the section, being suffix to the section or having the section as prefix or any. (In this order) Thus having specified the components: "main non\-free contrib non\-US/main non\-US/non\-free non\-US/contrib" should map e.g. "non\-US" to "non\-US/main" and "contrib/editors" to "contrib", while having only "main non\-free and contrib" as components should map "non\-US/contrib" to "contrib" and "non\-US" to "main". .B NOTE: Always specify main as the first component, if you want things to end up there. .B NOTE: unlike in dak, non\-US and non\-us are different things... .SH NOMENCLATURE .B Codename the primary identifier of a given distribution. This are normally things like \fBsarge\fP, \fBetch\fP or \fBsid\fP. .TP .B basename the name of a file without any directory information. .TP .B byhand Changes files can have files with section 'byhand' (Debian) or 'raw\-' (Ubuntu). Those files are not packages but other data generated (usually together with packages) and then uploaded together with this changes files. With reprepro those can be stored in the pool next to their packages whith tracking, put in some log directory when using processincoming, or given to an hook script (currently only possible with processincoming). .TP .B filekey the position relative to the outdir. (as found in "Filename:" in Packages.gz) .TP .B "full filename" the position relative to / .TP .B architecture The term like \fBsparc\fP, \fBi386\fP, \fBmips\fP, ... . To refer to the source packages, \fBsource\fP is sometimes also treated as architecture. .TP .B component Things like \fBmain\fP, \fBnon\-free\fP and \fBcontrib\fP (by policy and some other programs also called section, reprepro follows the naming scheme of apt here.) .TP .B section Things like \fBbase\fP, \fBinterpreters\fP, \fBoldlibs\fP and \fBnon\-free/math\fP (by policy and some other programs also called subsections). .TP .B md5sum The checksum of a file in the format "\fI\fP \fI\fP" .SH Some note on updates .SS A version is not overwritten with the same version. .B reprepro will never update a package with a version it already has. This would be equivalent to rebuilding the whole database with every single upgrade. To force the new same version in, remove it and then update. (If files of the packages changed without changing their name, make sure the file is no longer remembered by reprepro. Without \fB\-\-keepunreferencedfiled\fP and without errors while deleting it should already be forgotten, otherwise a \fBdeleteunreferenced\fP or even some \fB__forget\fP might help.) .SS The magic delete rule ("\-"). A minus as a single word in the .B Update: line of a distribution marks everything to be deleted. The mark causes later rules to get packages even if they have (strict) lower versions. The mark will get removed if a later rule sets the package on hold (hold is not yet implemented, in case you might wonder) or would get a package with the same version (Which it will not, see above). If the mark is still there at the end of the processing, the package will get removed. .P Thus the line "Update: \- .I rules " will cause all packages to be exactly the highest Version found in .I rules. The line "Update: .I near \- .I rules " will do the same, except if it needs to download packages, it might download it from .I near except when too confused. (It will get too confused e.g. when .I near or .I rules have multiple versions of the package and the highest in .I near is not the first one in .I rules, as it never remember more than one possible spring for a package. .P Warning: This rule applies to all type/component/architecture triplets of a distribution, not only those some other update rule applies to. (That means it will delete everything in those!) .SH ENVIRONMENT VARIABLES Environment variables are always overwritten by command line options, but overwrite options set in the \fBoptions\fP file. (Even when the options file is obviously parsed after the environment variables as the environment may determine the place of the options file). .TP .B REPREPRO_BASE_DIR The directory in this variable is used instead of the current directory, if no \fB\-b\fP or \fB\-\-basedir\fP options are supplied. .br It is also set in all hook scripts called by reprepro (relative to the current directory or absolute, depending on how reprepro got it). .TP .B REPREPRO_CONFIG_DIR The directory in this variable is used when no \fB\-\-confdir\fP is supplied. .br It is also set in all hook scripts called by reprepro (relative to the current directory or absolute, depending on how reprepro got it). .TP .B REPREPRO_OUT_DIR This is not used, but only set in hook scripts called by reprepro to the directory in which the \fBpool\fP subdirectory resides (relative to the current directory or absolute, depending on how reprepro got it). .TP .B REPREPRO_DIST_DIR This is not used, but only set in hook scripts called by reprepro to the \fBdists\fP directory (relative to the current directory or absolute, depending on how reprepro got it). .TP .B REPREPRO_LOG_DIR This is not used, but only set in hook scripts called by reprepro to the value setable by \fB\-\-logdir\fP. .TP .B REPREPRO_CAUSING_COMMAND .TP .B REPREPRO_CAUSING_FILE Those two environment variable are set (or unset) in \fBLog:\fP and \fBByHandHooks:\fP scripts and hint what command and what file caused the hook to be called (if there is some). .TP .B REPREPRO_CAUSING_RULE This environment variable is set (or unset) in \fBLog:\fP scripts and hint what update or pull rule caused this change. .TP .B REPREPRO_FROM This environment variable is set (or unset) in \fBLog:\fP scripts and denotes what other distribution a package is copied from (with pull and copy commands). .TP .B REPREPRO_FILTER_ARCHITECTURE .TP .B REPREPRO_FILTER_CODENAME .TP .B REPREPRO_FILTER_COMPONENT .TP .B REPREPRO_FILTER_PACKAGETYPE .TP .B REPREPRO_FILTER_PATTERN Set in \fBFilterList:\fP and \fBFilterSrcList:\fP scripts. .TP .B GNUPGHOME Not used by reprepro directly. But reprepro uses libgpgme, which calls gpg for signing and verification of signatures. And your gpg will most likely use the content of this variable instead of "~/.gnupg". Take a look at .BR gpg (1) to be sure. You can also tell reprepro to set this with the \fB\-\-gnupghome\fP option. .TP .B GPG_TTY When there is a gpg\-agent running that does not have the passphrase cached yet, gpg will most likely try to start some pinentry program to get it. If that is pinentry\-curses, that is likely to fail without this variable, because it cannot find a terminal to ask on. In this cases you might set this variable to something like the value of .B $(tty) or .B $SSH_TTY or anything else denoting a usable terminal. (You might also want to make sure you actually have a terminal available. With ssh you might need the .B \-t option to get a terminal even when telling gpg to start a specific command). By default, reprepro will set this variable to what the symbolic link .B /proc/self/fd/0 points to, if stdin is a terminal, unless you told with .B \-\-noguessgpgtty to not do so. .SH BUGS Increased verbosity always shows those things one does not want to know. (Though this might be inevitable and a corollary to Murphy) Reprepro uses berkeley db, which was a big mistake. The most annoying problem not yet worked around is database corruption when the disk runs out of space. (Luckily if it happens while downloading packages while updating, only the files database is affected, which is easy (though time consuming) to rebuild, see \fBrecovery\fP file in the documentation). Ideally put the database on another partition to avoid that. While the source part is mostly considered as the architecture .B source some parts may still not use this notation. .SH "WORK-AROUNDS TO COMMON PROBLEMS" .TP .B gpgme returned an impossible condition With the woody version this normally meant that there was no .gnupg directory in $HOME, but it created one and reprepro succeeds when called again with the same command. Since sarge the problem sometimes shows up, too. But it is no longer reproducible and it does not fix itself, neither. Try running \fBgpg \-\-verify \fP\fIfile-you-had-problems-with\fP manually as the user reprepro is running and with the same $HOME. This alone might fix the problem. It should not print any messages except perhaps .br gpg: no valid OpenPGP data found. .br gpg: the signature could not be verified. .br if it was an unsigned file. .TP .B not including .orig.tar.gz when a .changes file's version does not end in \-0 or \-1 If dpkg\-buildpackage is run without the \fB\-sa\fP option to build a version with a Debian revision not being \-0 or \-1, it does not list the \fB.orig.tar.gz\fP file in the \fB.changes\fP file. If you want to \fBinclude\fP such a file with reprepro when the .orig.tar.gz file does not already exist in the pool, reprepro will report an error. This can be worked around by: .br call \fBdpkg\-buildpackage\fP with \fB\-sa\fP (recommended) .br copy the .orig.tar.gz file to the proper place in the pool before .br call reprepro with \-\-ignore=missingfile (discouraged) .TP .B leftover files in the pool directory. reprepro is sometimes a bit too timid of deleting stuff. When things go wrong and there have been errors it sometimes just leaves everything where it is. To see what files reprepro remembers to be in your pool directory but does not know anything needing them right know, you can use .br \fBreprepro dumpunreferenced\fP .br To delete them: .br \fBreprepro deleteunreferenced\fP .SH INTERRUPTING Interrupting reprepro has its problems. Some things (like speaking with apt methods, database stuff) can cause problems when interrupted at the wrong time. Then there are design problems of the code making it hard to distinguish if the current state is dangerous or non-dangerous to interrupt. Thus if reprepro receives a signal normally sent to tell a process to terminate itself softly, it continues its operation, but does not start any new operations. (I.e. it will not tell the apt\-methods any new file to download, it will not replace a package in a target, unless it already had started with it, it will not delete any files gotten dereferenced, and so on). \fBIt only catches the first signal of each type. The second signal of a given type will terminate reprepro. You will risk database corruption and have to remove the lockfile manually.\fP Also note that even normal interruption leads to code-paths mostly untested and thus expose a multitude of bugs including those leading to data corruption. Better think a second more before issuing a command than risking the need for interruption. .SH "REPORTING BUGS" Report bugs or wishlist requests to the Debian BTS .br (e.g. by using \fBreportbug reprepro\fP under Debian) .br or directly to .MTO brlink@debian.org .SH COPYRIGHT Copyright \(co 2004,2005,2006,2007,2008,2009,2010,2011,2012 .URL http://www.brlink.eu "Bernhard R. Link" .br This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. reprepro-4.13.1/docs/reprepro.zsh_completion0000644000175100017510000005335312152651661016144 00000000000000#compdef reprepro # This is a zsh completion script for reprepro. # To make use of it make sure it is stored as _reprepro in your # zsh's fpath (like /usr/local/share/zsh/site-functions/). # # to install as user: # # mkdir ~/.zshfiles # cp reprepro.zsh_completion ~/.zshfiles/_reprepro # echo 'fpath=(~/.zshfiles $fpath)' >> ~/.zshrc # echo 'autoload -U ~/.zshfiles*(:t)' >> ~/.zshrc # # make sure compinit is called after those lines in .zshrc local context state line confdir distfile incomingfile incomingdir outdir basedir confdirset basedirset typeset -A opt_args local -a codenames architectures list commands hiddencommands function _reprepro_calcbasedir () { if [[ -n "$opt_args[-b]" ]]; then basedir=${opt_args[-b]} basedirset=true elif [[ -n "$opt_args[--basedir]" ]]; then basedir=${opt_args[--basedir]} basedirset=true elif [[ -n "$REPREPRO_BASE_DIR" ]]; then basedir=${REPREPRO_BASE_DIR} basedirset=true else basedir=$PWD basedirset=false fi if [[ -n "$opt_args[--confdir]" ]]; then confdir=${opt_args[--confdir]} elif [[ -n "$REPREPRO_CONFIG_DIR" ]]; then confdir=${REPREPRO_CONFIG_DIR} else confdir=$basedir/conf fi if [[ -e "$confdir/options" ]] ; then if [ "$basedirset" != "true" ] && grep -q '^basedir ' -- "$confdir/options" 2>/dev/null ; then basedir="$(grep '^basedir ' -- "$confdir/options" 2>/dev/null | sed -e 's/^basedir *//')" fi fi } function _reprepro_filekeys () { _reprepro_calcbasedir if [[ -n "$opt_args[--outdir]" ]]; then outdir=${opt_args[--outdir]} else outdir=$basedir fi list=( $outdir ) _files -W list } function _reprepro_calcconfdir () { if [[ -n "$opt_args[--confdir]" ]]; then confdir=${opt_args[--confdir]} confdirset=direct elif [[ -n "$REPREPRO_CONFIG_DIR" ]]; then confdir=${REPREPRO_CONFIG_DIR} confdirset=direct elif [[ -n "$opt_args[-b]" ]]; then confdir=${opt_args[-b]}/conf confdirset=basedir basedirset=true elif [[ -n "$opt_args[--basedir]" ]]; then confdir=${opt_args[--basedir]}/conf confdirset=basedir basedirset=true elif [[ -n "$REPREPRO_BASE_DIR" ]]; then confdir=${REPREPRO_BASE_DIR}/conf confdirset=basedir basedirset=true else confdir=$PWD/conf confdirset=default basedirset=false fi if [ "$confdirset" != "direct" ] && [[ -e "$confdir/options" ]] ; then if grep -q '^confdir ' -- "$confdir/options" 2>/dev/null ; then confdir="$(grep '^confdir ' -- "$confdir/options" 2>/dev/null | sed -e 's/^confdir *//')" elif [ "$basedirset" = "false" ] \ && grep -q '^basedir ' -- "$confdir/options" 2>/dev/null ; then confdir="$(grep '^basedir ' -- "$confdir/options" 2>/dev/null | sed -e 's/^basedir *//')/conf" fi fi } function _reprepro_finddistributions () { _reprepro_calcconfdir distfile="$confdir"/distributions test -e "$distfile" } function _reprepro_findincoming () { _reprepro_calcconfdir incomingfile="$confdir"/incoming test -e "$incomingfile" } function _reprepro_grepdistfile () { _reprepro_finddistributions && if test -d "$distfile" ; then sed -n -e 's#^'"$1"': \(.*\)#\1#p' "$distfile"/*.conf else sed -n -e 's#^'"$1"': \(.*\)#\1#p' "$distfile" fi } function _reprepro_architectures () { architectures=($(_reprepro_grepdistfile '[Aa][Rr][Cc][Hh][Ii][Tt][Ee][Cc][Tt][Uu][Rr][Ee][Ss]')) \ || architectures=(i386 m68k sparc alpha powerpc arm mips mipsel hppa ia64 s390 amd64 ppc64 sh armeb m32r hurd-i386 netbsd-i386 netbsd-alpha kfreebsd-gnu) _wanted -V 'architectures' expl 'architecture' compadd -a architectures } function _reprepro_components () { components=($(_reprepro_grepdistfile '[Cc][Oo][Mm][Pp][Oo][Nn][Ee][Nn][Tt][Ss]')) \ || components=(main contrib non-free bad) _wanted -V 'components' expl 'component' compadd -a components } function _reprepro_codenames () { codenames=($(_reprepro_grepdistfile '[Cc][Oo][Dd][Ee][Nn][Aa][Mm][Ee]')) \ || codenames=(sid lenny etch sarge unstable testing stable local) _wanted -V 'codenames' expl 'codename' compadd -a codenames } function _reprepro_identifiers () { _reprepro_finddistributions \ && list=($(if test -d "$distfile" ; then set -- "$distfile"/*.conf ; else set -- "$distfile" ; fi && awk ' /^$/ {for(a=2;a<=acount;a++){ for(c=2;c<=ccount;c++){ print codename "|" components[c] "|" architectures[a] } if( architectures[a] != "source" ) { for(c=2;c<=uccount;c++){ print "u|" codename "|" ucomponents[c] "|" architectures[a] } } }; acount=0;ccount=0;ucount=0} /^[Cc][Oo][Mm][Pp][Oo][Nn][Ee][Nn][Tt][Ss]: / {ccount = split($0,components); next} /^[Uu][Dd][Ee][Bb][Cc][Oo][Mm][Pp][Oo][Nn][Ee][Nn][Tt][Ss]: / {uccount = split($0,ucomponents); next} /^[Aa][Rr][Cc][Hh][Ii][Tt][Ee][Cc][Tt][Uu][Rr][Ee][Ss]: / {acount = split($0,architectures); next} /^[Cc][Oo][Dd][Ee][Nn][Aa][Mm][Ee]: / {codename = $2; next} END {for(a=2;a<=acount;a++){ for(c=2;c<=ccount;c++){ print codename "|" components[c] "|" architectures[a] } if( architectures[a] != "source" ) { for(c=2;c<=uccount;c++){ print "u|" codename "|" ucomponents[c] "|" architectures[a] } } }; acount=0;ccount=0;ucount=0} {next} ' "$@" )) \ || list=(identifier) _wanted -V 'identifiers' expl 'identifier' compadd -a list } function _reprepro_incomings () { _reprepro_findincoming \ && list=($(if test -d "$incomingfile" ; then set -- "$incomingfile"/*.conf ; else set -- "$incomingfile" ; fi && awk '/^[Nn][Aa][Mm][Ee]: / {print $2}' "$@")) \ || list=(rule-name) _wanted -V 'rule names' expl 'rule name' compadd -a list } function _reprepro_incomingdir () { local rulename=$1 shift _reprepro_findincoming \ && incomingdir=($(if test -d "$incomingfile" ; then set -- "$incomingfile"/*.conf ; else set -- "$incomingfile" ; fi && awk ' /^[Ii][Nn][Cc][Oo][Mm][Ii][Nn][Gg][Dd][Ii][Rr]: / {dir=$2; next} /^[Nn][Aa][Mm][Ee]: / {name=$2; next} /^$/ { if( name="'"$rulename"'" ) { print dir } ; next } END { if( name="'"$rulename"'" ) { print dir }} {next} ' "$@")) # needs to be an array, as it might not be absolute... list=( $incomingdir ) } function _reprepro_package_names () { #todo get package names?... _wanted -V 'package names' expl 'package name' compadd name } function _reprepro_source_package_names () { #todo get package names?... _wanted -V 'source package names' expl 'source package name' compadd name } commands=( build-needing:"list packages likely needing a build" check:"check if all references are correct" checkpool:"check if all files are still there and correct" checkpull:"check what would be pulled" checkupdate:"check what would be updated" cleanlists:"clean unneeded downloaded list files" clearvanished:"remove empty databases" collectnewchecksums:"calculate missing file hashes" copy:"copy a package from one distribution to another" copyfilter:"copy packages from one distribution to another" copymatched:"copy packages from one distribution to another" copysrc:"copy packages belonging to a specific source from one distribution to another" createsymlinks:"create suite symlinks" deleteunreferenced:"delete files without reference" dumpreferences:"dump reference information" dumppull:"dump what would be pulled" dumptracks:"dump tracking information" dumpupdate:"dump what would be updated" dumpunreferenced:"dump files without reference (i.e. unneded)" export:"export index files" forcerepairdescriptions:"forcefully readd lost long descriptions from .deb file" flood:"copy architecture all packages within a distribution" generatefilelists:"pre-prepare filelist caches for all binary packages" gensnapshot:"generate a snapshot" includedeb:"include a .deb file" includedsc:"include a .dsc file" include:"include a .changes file" includeudeb:"include a .udeb file" listfilter:"list packages matching filter" listmatched:"list packages matching filter" list:"list packages" ls:"list versions of package" lsbycomponent:"list versions of package (grouped by component)" predelete:"delete what would be removed or superseeded by an update" processincoming:"process files from an incoming directory" pull:"update from another local distribtuion" removealltracks:"remove tracking information" remove:"remove packages" removefilter:"remove packages matching a formula" removematched:"remove packages matching a glob" removesrc:"remove packages belonging to a source package" removesrcs:"remove packages belonging to names source packages" removetrack:"remove a single tracking data" reoverride:"apply override information to already existing packages" repairdescriptions:"readd lost long descriptions from .deb file" reportcruft:"report source packages without binaries and vice versa" rereference:"recreate references" rerunnotifiers:"call notificators as if all packages were just included" restore:"restore a package from a distribution's snapshot" restorefilter:"restore packages matching a filter from a snapshot" restorematched:"restore packages matching a glob from a snapshot" restoresrc:"restore packages belonging to a specific source from a snapshot" retrack:"refresh tracking information" sourcemissing:"list binary packages with no source package" tidytracks:"look for files referened by tracks but no longer needed" translatefilelists:"translate pre-3.0.0 contents.cache.db into new format" translatelegacychecksums:"get rid of obsolete files.db" unusedsources:"list source packages with no binary packages" update:"update from external source" ) hiddencommands=( __dumpuncompressors:"list what external uncompressors are available" __extractcontrol:"extract the control file from a .deb file" __extractfilelist:"extract the filelist from a .deb file" __extractsourcesection:"extract source and priority from a .dsc" __uncompress:"uncompress a file" _addchecksums:"add checksums to database" _addmd5sums:"add checksums to database" _addreference:"mark a filekey needed by an identifier" _detect:"look if the file belonging to a filekey exists and add to the database." _dumpcontents:"output contents of a part of the repository" _fakeemptyfilelist:"create an empty fake filelist cache item for a filekey" _forget:"forget a file specified by filekey." _listchecksums:"print a list of filekeys and their checksums" _listconfidentifiers:"list parts of the repository in the configuration" _listdbidentifiers:"list parts of the repository in the database" _listmd5sums:"print a list of filekeys and their md5 hashes" _removereferences:"remove all references by an identifer" ) _arguments \ '*'{-v,-V,--verbose}'[be more verbose]' \ '*--silent[be less verbose]' \ '*--delete[Delete files after inclusion]' \ '(-b --basedir)'{-b,--basedir}'[Base drectory]:basedir:_files -/' \ '--outdir[Directory where pool and dist are in]:out dir:_files -/' \ '--confdir[Directory where config files are]:config dir:_files -/' \ '--distdir[Directory where index files will be exported to]:dist dir:_files -/' \ '--logdir[Directory where log files will be generated]:log dir:_files -/' \ '--morguedir[Directory where files removed from the pool are stored]:morgue dir:_files -/' \ '--dbdir[Directory where the database is stored]:database dir:_files -/' \ '--listdir[Directory where downloaded index files will be stored]:list dir:_files -/' \ '--methoddir[Directory to search apt methods in]:method dir:_files -/' \ '(-C --component)'{-C,--component}'[Override component]:component:{_reprepro_components}' \ '(-A --architecture)'{-A,--architecture}'[Limit to a specific architecture]:architecture:{_reprepro_architectures}' \ '(-T --type)'{-T,--type}'[Limit to a specific type]:file type:(dsc deb udeb)' \ '(-S --section)'{-S,--section}'[Override section]:section:(admin base comm contrib devel doc editors electronics embedded games gnome graphics hamradio interpreters kde libs libdevel mail math misc net news non-free oldlibs otherosfs perl python science shells sound tex text utils web x11 contrib/admin contrib/base contrib/comm contrib/contrib contrib/devel contrib/doc contrib/editors contrib/electronics contrib/embedded contrib/games contrib/gnome contrib/graphics contrib/hamradio contrib/interpreters contrib/kde contrib/libs contrib/libdevel contrib/mail contrib/math contrib/misc contrib/net contrib/news contrib/non-free contrib/oldlibs contrib/otherosfs contrib/perl contrib/python contrib/science contrib/shells contrib/sound contrib/tex contrib/text contrib/utils contrib/web contrib/x11 non-free/admin non-free/base non-free/comm non-free/contrib non-free/devel non-free/doc non-free/editors non-free/electronics non-free/embedded non-free/games non-free/gnome non-free/graphics non-free/hamradio non-free/interpreters non-free/kde non-free/libs non-free/libdevel non-free/mail non-free/math non-free/misc non-free/net non-free/news non-free/non-free non-free/oldlibs non-free/otherosfs non-free/perl non-free/python non-free/science non-free/shells non-free/sound non-free/tex non-free/text non-free/utils non-free/web non-free/x11)' \ '(-P --priority)'{-P,--priority}'[Override priority]:priority:(required important standard optional extra)' \ '--export=[]:when:(never changed normal force)' \ '*--ignore=[Do ignore errors of some type]:error type:((\ ignore\:"ignore unknown ignore tags"\ flatandnonflat\:"ignore warnings about flat and non-flat distribution"\ forbiddenchar\:"allow more 7bit characters for names and versions"\ 8bit\:"allow 8 bit characters"\ emptyfilenamepart\:"allow strings used to construct filenames to be empty"\ spaceonlyline\:"do not warn about lines containing only spaces"\ malformedchunk\:"ignore lines without colons"\ unknownfield\:"ignore unknown fields"\ wrongdistribution\:"put .changes files in distributed they were not made for"\ wrongarchitecture\:"do not warn about wrong Architecture fields in downloaded Packages files"\ missingfield\:"allow missing fields"\ brokenold\:"ignore broken packages in database"\ brokenversioncmp\:"ignore versions not parseable"\ extension\:"ignore unexpected suffixes of files"\ unusedarch\:"allow changes files to list achitectures not used"\ unusedoption\:"ignore command line options not used by an action"\ undefinedtarget\:"allow unspecified package databases"\ undefinedtracking\:"allow unspecified tracking databases"\ surprisingarch\:"do not protest when a changes file does not list a architecture it has files for"\ surprisingbinary\:"do not demand a .changes Binaries header to list all binaries"\ wrongsourceversion\:"do not demand coherent source versions in a .changes"\ wrongversion\:"do not demand coherent version of source packages in a .changes"\ dscinbinnmu\:"do not reject source files in what looks like a binMNU"\ brokensignatures\:"ignore corrupted signatures if there is a valid one"\ uploaders\:"allow even when forbidden by uploaders file"\ missingfile\:"include commands search harder for missing files like .orig.tar.gz"\ expiredkey\:"allow signatures with expired keys"\ expiredsignature\:"allow expired signatures"\ revokedkey\:"allow signatures with revoked keys"\ oldfile\:"silence warnings about strange old files in dists"\ longkeyid\:"do not warn about keyid in uploaders files gpgme might not accept"\ ))' \ '*--unignore=[Do not ignore errors of type]:error type:( ignore flatandnonflat forbiddenchar 8bit emptyfilenamepart\ spaceonlyline malformedchunk unknownfield unusedoption\ wrongdistribution missingfield brokenold brokenversioncmp\ extension unusedarch surprisingarch surprisingbinary\ wrongsourceversion wrongversion brokensignatures\ missingfile uploaders undefinedtarget undefinedtracking\ expiredkey expiredsignature revokedkey wrongarchitecture)' \ '--waitforlock=[Time to wait if database is locked]:count:(0 3600)' \ '--spacecheck[Mode for calculating free space before downloading packages]:behavior:(full none)' \ '--dbsafetymargin[Safety margin for the partition with the database]:bytes count:' \ '--safetymargin[Safety margin per partition]:bytes count:' \ '--gunzip[external Program to extract .gz files]:gunzip binary:_files' \ '--bunzip2[external Program to extract .bz2 files]:bunzip binary:_files' \ '--unlzma[external Program to extract .lzma files]:unlzma binary:_files' \ '--unxz[external Program to extract .xz files]:unxz binary:_files' \ '--lunzip[external Program to extract .lz files]:lunzip binary:_files' \ '--list-format[Format for list output]:listfilter format:' \ '--list-skip[Number of packages to skip in list output]:list skip:' \ '--list-max[Maximum number of packages in list output]:list max:' \ '(--nonothingiserror)--nothingiserror[Return error code when nothing was done]' \ '(--listsdownload --nonolistsdownload)--nolistsdownload[Do not download Release nor index files]' \ '(--nokeepunneededlists)--keepunneededlists[Do not delete list/ files that are no longer needed]' \ '(--nokeepunreferencedfiles)--keepunreferencedfiles[Do not delete files that are no longer used]' \ '(--nokeepunusednewfiles)--keepunusednewfiles[Do not delete newly added files that later were found to not be used]' \ '(--nokeepdirectories)--keepdirectories[Do not remove directories when they get emtpy]' \ '(--nokeeptemporaries)--keeptemporaries[When exporting fail do not remove temporary files]' \ '(--noask-passphrase)--ask-passphrase[Ask for passphrases (insecure)]' \ '(--nonoskipold --skipold)--noskipold[Do not ignore parts where no new index file is available]' \ '(--guessgpgtty --nonoguessgpgtty)--noguessgpgtty[Do not set GPG_TTY variable even when unset and stdin is a tty]' \ ':reprepro command:->commands' \ '2::arguments:->first' \ '3::arguments:->second' \ '4::arguments:->third' \ '*::arguments:->argument' && return 0 case "$state" in (commands) if [[ -prefix _* ]] ; then _describe "reprepro command" hiddencommands else _describe "reprepro command" commands fi ;; (first argument|second argument|third argument|argument) case "$words[1]" in (export|update|checkupdate|predelete|pull|checkpull|check|reoverride|repairdescriptions|forcerepairdescriptions|rereference|dumptracks|retrack|removealltracks|tidytracks|dumppull|dumpupdate|rerunnotifiers|unusedsources|sourcemissing|reportcruft) _reprepro_codenames ;; (checkpool) if [[ "$state" = "first argument" ]] ; then _wanted -V 'modifiers' expl 'modifier' compadd fast fi ;; (cleanlists|clearvanished|dumpreferences|dumpunreferened|deleteunreferenced|_listmd5sums|_listchecksums|_addmd5sums|_addchecksums|__dumpuncompressors|transatelegacychecksums) ;; (_dumpcontents|_removereferences) if [[ "$state" = "first argument" ]] ; then _reprepro_identifiers fi ;; (list|listfilter|listmatched) if [[ "$state" = "first argument" ]] ; then _reprepro_codenames fi ;; (remove) if [[ "$state" = "first argument" ]] ; then _reprepro_codenames else _reprepro_package_names "$words[2]" fi ;; # removesrcs might be improveable... (removesrc|removesrcs) if [[ "$state" = "first argument" ]] ; then _reprepro_codenames else _reprepro_source_package_names "$words[2]" fi ;; (removefilter|removematched) if [[ "$state" = "first argument" ]] ; then _reprepro_codenames fi ;; (gensnapshot) if [[ "$state" = "first argument" ]] ; then _reprepro_codenames elif [[ "$state" = "second argument" ]] ; then _wanted -V 'snapshot names' expl 'snapshot name' compadd $(date -I) fi ;; (removetrack) if [[ "$state" = "first argument" ]] ; then _reprepro_codenames elif [[ "$state" = "second argument" ]] ; then _reprepro_source_package_names "$words[2]" elif [[ "$state" = "third argument" ]] ; then #and version... fi ;; (includedeb) if [[ "$state" = "first argument" ]] ; then _reprepro_codenames elif [[ "$state" = "second argument" ]] ; then _files -g "*.deb" fi ;; (includedsc) if [[ "$state" = "first argument" ]] ; then _reprepro_codenames elif [[ "$state" = "second argument" ]] ; then _files -g "*.dsc" fi ;; (__extractsourcesection) if [[ "$state" = "first argument" ]] ; then _files -g "*.dsc" fi ;; (copy|copysrc|copyfilter|copymatched) if [[ "$state" = "first argument" ]] ; then _reprepro_codenames elif [[ "$state" = "second argument" ]] ; then _reprepro_codenames fi ;; (restore|restoresrc|restorefilter|restorematched) if [[ "$state" = "first argument" ]] ; then _reprepro_codenames # TODO: # elif [[ "$state" = "second argument" ]] ; then # _reprepro_codenames fi ;; (include) if [[ "$state" = "first argument" ]] ; then _reprepro_codenames elif [[ "$state" = "second argument" ]] ; then _files -g "*.changes" fi ;; (__extractfilelist|__extractcontrol) _files -g "*.deb" ;; (processincoming) if [[ "$state" = "first argument" ]] ; then _reprepro_incomings elif [[ "$state" = "second argument" ]] ; then _reprepro_incomingdir "$words[2]" \ && _files -g "*.changes" -W list \ || _files -g "*.changes" fi ;; (_detect|_forget) _reprepro_filekeys ;; (_fakeemptyfilelist) if [[ "$state" = "first argument" ]] ; then _reprepro_filekeys fi ;; (_addreference) if [[ "$state" = "first argument" ]] ; then _reprepro_filekeys elif [[ "$state" = "second argument" ]] ; then _reprepro_identifiers fi ;; (__uncompress) if [[ "$state" = "first argument" ]] ; then uncompressions=(.gz .bz2 .lzma .xz .lz) _wanted -V 'uncompressions' expl 'uncompression' compadd -a uncompressions elif [[ "$state" = "second argument" ]] ; then _files elif [[ "$state" = "third argument" ]] ; then _files fi ;; (build-needing) if [[ "$state" = "first argument" ]] ; then _reprepro_codenames elif [[ "$state" = "second argument" ]] ; then _reprepro_architectures ##TODO elif [[ "$state" = "third argument" ]] ; then ##TODO _reprepro_glob fi ;; (flood) if [[ "$state" = "first argument" ]] ; then _reprepro_codenames elif [[ "$state" = "second argument" ]] ; then _reprepro_architectures fi ;; (*) _files ;; esac ;; esac reprepro-4.13.1/docs/outsftphook.py0000755000175100017510000004536612152651661014276 00000000000000#!/usr/bin/python3 # Copyright (C) 2013 Bernhard R. Link # # This example script is free software; you can redistribute it # and/or modify it under the terms of the GNU General Public License # version 2 as published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA # Those can be set here or in conf/outsftphook.conf: servername = None username = None targetdir = "" import sys, os, subprocess, select, sftp class Round(sftp.Enum, DONE = -2, INDIRECT = -1, POOLFILES = 0, DISTFILES = 1, DELETES = 2, ): pass errors = 0 def printe(s): global errors print(s, file=sys.stderr) errors += 1 # renaming file, assuming all directories exist... def renamefile(dst, src, donefunc): a = yield [sftp.REMOVE(targetdir + dst), sftp.RENAME(targetdir + src, targetdir + dst, [sftp.SSH_FXF_RENAME.OVERWRITE])] while True: l = [] if not isinstance(a, sftp.STATUS): raise SftpUnexpectedAnswer(a, "expecting STATUS") if isinstance(a.forr, sftp.REMOVE): if a.status != sftp.SSH_FX.OK and a.status != sftp.SSH_FX.NO_SUCH_FILE: printe("%s failed: %s" % (a.forr, a)) elif isinstance(a.forr, sftp.RENAME): if a.status != sftp.SSH_FX.OK: printe("%s failed: %s" % (a.forr, a)) else: l = donefunc(dst) else: raise SftpUnexpectedAnswer(a, a.forr) a.forr.done() a = yield l # create symlink, assuming all directories exist... def symlinkfile(dst, src, donefunc): a = yield [sftp.REMOVE(targetdir + dst), sftp.SYMLINK(targetdir + dst, targetdir + src)] while True: l = [] if not isinstance(a, sftp.STATUS): raise SftpUnexpectedAnswer(a, "expecting STATUS") if isinstance(a.forr, sftp.REMOVE): if a.status != sftp.SSH_FX.OK and a.status != sftp.SSH_FX.NO_SUCH_FILE: printe("%s failed: %s" % (a.forr, a)) elif isinstance(a.forr, sftp.SYMLINK): if a.status != sftp.SSH_FX.OK: printe("%s failed: %s" % (a.forr, a)) else: l = donefunc(dst, message="symlink done") else: raise SftpUnexpectedAnswer(a, a.forr) a.forr.done() a = yield l def deletefile(dst, donefunc): a = yield [sftp.REMOVE(targetdir + dst)] if not isinstance(a, sftp.STATUS): raise SftpUnexpectedAnswer(a, "expecting STATUS") if a.status == sftp.SSH_FX.OK: l = donefunc(dst, message="deleted") elif a.status == sftp.SSH_FX.NO_SUCH_FILE: l = donefunc(dst, message="already deleted") else: printe("%s failed: %s" % (a.forr, a)) l = [] a.forr.done() a = yield l raise SftpUnexpectedAnswer(a, a.forr) def writefile(fname, filetocopy, donefunc): filename = targetdir + fname fd = open(filetocopy, 'rb') dirname = os.path.dirname(filename) if dirname: mode = yield [('waitingfor', sftp.Dirlock, dirname)] else: mode = "top-level" a = yield [('lock', sftp.Semaphore, 'openfile')] if a != "unlock": raise SftpUnexpectedAnswer(a, "waiting for unlock event") a = yield [sftp.OPEN(filename, "CREAT|WRITE", permissions=0o0700)] if mode == "tryandtell" and isinstance(a, sftp.STATUS) and a.status == a.status.NO_SUCH_FILE: a.forr.done() a = yield [('missing', sftp.Dirlock, dirname), ('release', sftp.Semaphore, 'openfile')] if a != "createnew": raise SftpUnexpectedAnswer(a, "waiting for %s" % dirname) mode = a a = yield [('lock', sftp.Semaphore, 'openfile')] if a != "unlock": raise SftpUnexpectedAnswer(a, "waiting for unlock event") a = yield [sftp.OPEN(filename, "CREAT|WRITE")] if not isinstance(a, sftp.HANDLE): a.forr.done() printe("Failed to create %s: %s" % (filename, a)) return # raise SftpException("Failed to create %s: %s" % (filename, a)) h = a.handle a.forr.done() if mode == "tryandtell": f = [('found', sftp.Dirlock, dirname), 'wantwrite'] else: f = ['wantwrite'] a = yield f if a != 'canwrite': raise SftpUnexpectedAnswer(a, "waiting for 'canwrite'") ofs = 0 while True: b = fd.read(16376) if len(b) == 0: break a = yield [sftp.WRITE(h, ofs, b), 'wantwrite'] ofs += len(b) b = None while a != 'canwrite': a.forr.done() fd.close() a = yield [sftp.CLOSE(h), ('release', sftp.Semaphore, 'openfile')] while True: if type(a.forr) == sftp.CLOSE: if a.status != sftp.SSH_FX.OK: printe("%s failed: %s" % (a.forr, a)) l = donefunc(fname) else: if a.status != sftp.SSH_FX.OK: printe("%s failed: %s" % (a.forr, a)) l = [] a.forr.done() a = yield l class CriticalError(Exception): pass class ParseError(CriticalError): pass class ParseErrorWrongCount(ParseError): def __init__(field): super().__init__("Wrong number of arguments for %s" % field) class CollectedDistDir: def __init__(self, dir): self.done = False self.failed = False self.dir = dir self.files = dict() self.deletes = dict() self.symlinks = dict() self.transfered = 0 def onedone(self, filename): assert(filename.endswith(".new")) filename = filename[:-4] assert (filename in self.files) self.transfered += 1 self.files[filename].markpartial(filename, "asdotnew") return self.finalizeifready() def finalizeifready(self): assert (not self.done) if len(self.files) != self.transfered: assert (len(self.files) > self.transfered) return [] # everything copied as .new as needed, let's start finalisation self.done = True l = [] for m,e in self.files.items(): l.append(sftp.TaskFromGenerator(renamefile(m, m + ".new", e.doneone))) for m,e in self.deletes.items(): l.append(sftp.TaskFromGenerator(deletefile(m, e.doneone))) for m,(t,e) in self.symlinks.items(): l.append(sftp.TaskFromGenerator(symlinkfile(m, t, e.doneone))) return l class DistDir: def __init__(self, dir, onelog=True): self.dir = dir self.files = [] self.deletes = [] self.symlinks = [] def queue(self, todo, distdirs, logfile): if not self.dir in distdirs: collection = CollectedDistDir(self.dir) distdirs[self.dir] = collection else: collection = distdirs[self.dir] for fn, fr in self.files: ffn = self.dir + "/" + fn if logfile.alreadydone.get(ffn, "") == "asdotnew": if logfile.enqueue(todo, ffn, Round.INDIRECT): collection.files[ffn] = logfile collection.transfered += 1 else: if logfile.enqueue(todo, ffn, Round.DISTFILES, ffn + ".new", fr, collection.onedone): collection.files[ffn] = logfile for fn in self.deletes: ffn = self.dir + "/" + fn if logfile.enqueue(todo, ffn, Round.INDIRECT): collection.deletes[ffn] = logfile for fn, flt in self.symlinks: ffn = self.dir + "/" + fn if logfile.enqueue(todo, ffn, Round.INDIRECT): collection.symlinks[ffn] = (flt, logfile) class LogFile: def parselogline(self, fields): if fields[0] == 'POOLNEW': if len(fields) != 2: raise ParseErrorWrongCount(fields[0]) self.newpoolfiles.append(fields[1]) elif fields[0] == 'POOLDELETE': if len(fields) != 2: raise ParseErrorWrongCount(fields[0]) self.deletepoolfiles.append(fields[1]) elif fields[0].startswith('BEGIN-'): pass elif fields[0].startswith('END-'): pass elif fields[0].startswith('DIST'): command = fields[0][4:] if command not in ['KEEP', 'FILE', 'DELETE', 'SYMLINK']: raise ParseError("Unknown command %s" % command) if not fields[1] in self.dists: d = self.dists[fields[1]] = DistDir(fields[1]) else: d = self.dists[fields[1]] if command == 'FILE': if len(fields) != 4: raise ParseErrorWrongCount(fields[0]) d.files.append((fields[2], fields[3])) elif command == 'DELETE': if len(fields) != 3: raise ParseErrorWrongCount(fields[0]) d.deletes.append(fields[2]) elif command == 'SYMLINK': if len(fields) != 4: raise ParseErrorWrongCount(fields[0]) d.symlinks.append((fields[2], fields[3])) elif fields[0] == "DONE": self.alreadydone[fields[2]] = fields[1] else: raise ParseError("Unknown command %s" % fields[0]) def __init__(self, logfile, donefile): self.alreadydone = dict() self.logfile = logfile self.donefile = donefile try: lf = open(logfile, 'r', encoding='utf-8') except Exception as e: raise CriticalError("Cannot open %s: %s" % (repr(logfile), e)) self.newpoolfiles = [] self.dists = {} self.deletepoolfiles = [] self.todocount = 0 for l in lf: if l[-1] != '\n': raise ParseError("not a text file") self.parselogline(l[:-1].split('\t')) lf.close() def queue(self, todo, distdirs): self.todo = set() for f in self.deletepoolfiles: self.enqueue(todo, f, Round.DELETES, f, None, self.doneone) for f in self.newpoolfiles: self.enqueue(todo, f, Round.POOLFILES, f, f, self.doneone) for d in self.dists.values(): d.queue(todo, distdirs, self) if not self.todocount: # nothing to do left, mark as done: os.rename(self.logfile, self.donefile) del self.todo return self.todocount > 0 def enqueue(self, dic, elem, *something): if elem in self.alreadydone and self.alreadydone[elem] != "asdotnew": if not elem in dic: dic[elem] = (Round.DONE,) return False elif not elem in dic: self.todo.add(elem) self.todocount += 1 dic[elem] = something return True else: self.markpartial(elem, "obsoleted") return False def markpartial(self, filename, message="done"): if options.verbose: print("%s: %s" % (message, repr(filename))) f = open(self.logfile, "a", encoding="utf-8") print("DONE\t%s\t%s" % (message, filename), file=f) f.close() def doneone(self, filename, message="done"): assert (filename in self.todo) self.todo.discard(filename) assert (self.todocount > 0) self.todocount -= 1 self.markpartial(filename, message=message) if self.todocount == 0: os.rename(self.logfile, self.donefile) return [] def doround(s, r, todo): for p,v in todo.items(): assert (isinstance(v[0], Round)) if v[0] != r: continue round, filename, source, donefunc = v if round != r: continue if source is None: s.start(sftp.TaskFromGenerator(deletefile(filename, donefunc))) else: s.start(sftp.TaskFromGenerator(writefile(filename, options.outdir + "/" + source, donefunc))) s.dispatch() class Options: def __init__(self): self.verbose = None self.pending = False self.autoretry = None self.ignorepending = False self.forceorder = False self.confdir = None self.basedir = None self.outdir = None self.logdir = None self.confdir = None self.debugsftp = 0 options = Options() def parseoptions(args): while args and args[0].startswith("--"): arg = args.pop(0) if arg == "--verbose" or arg == "-v": options.verbose = True elif arg.startswith("--debug-sftp="): options.debugsftp = int(arg[13:]) elif arg == "--pending": options.pending = True elif arg == "--ignore-pending": options.ignorepending = True elif arg == "--force-order": options.forceorder = True elif arg == "--basedir=": options.basedir = arg[:10] elif arg == "--basedir": options.basedir = args.pop(0) elif arg == "--outdir=": options.outdir = arg[:9] elif arg == "--outdir": options.outdir = args.pop(0) elif arg == "--logdir=": options.logdir = arg[:9] elif arg == "--logdir": options.logdir = args.pop(0) elif arg == "--help": print("""outsftphook.py: an reprepro outhook example using sftp This hook sends changed files over sftp to a remote host. It is usually put into conf/options as outhook, but may also be called manually. Options: --verbose tell what you did --basedir DIR sets the following to default values --outdir DIR directory to find pool/ and dist/ directories in --logdir DIR directory to check for unprocessed outlog files --pending process pending files instead of arguments --autoretry reprocess older pending files, too --ignore-pending ignore pending files --force-order do not bail out if the given files are not ordered --debug-sftp=N debug sftp.py (or your remote sftp server) """) raise SystemExit(0) else: raise CriticalError("Unexpected command line option %s" %repr(arg)) if options.pending and options.ignorepending: raise CriticalError("Cannot do both --pending and --ignore-pending") if options.autoretry and options.forceorder: raise CriticalError("Cannot do both --pending and --force-order") if options.autoretry and options.ignorepending: raise CriticalError("Cannot do both --autoretry and --ignore-pending") # we need confdir, logdir and outdir, if they are given, all is done if options.logdir is not None and options.outdir is not None and options.confdir is not None: return # otherwise it gets more complicated... preconfdir = options.confdir if preconfdir is None: preconfdir = os.environ.get("REPREPRO_CONFIG_DIR", None) if preconfdir is None: if options.basedir is not None: preconfdir = options.basedir + "/conf" elif "REPREPRO_BASE_DIR" in os.environ: preconfdir = os.environ["REPREPRO_BASE_DIR"] + "/conf" else: raise CriticalError("If not called by reprepro, please either give (--logdir and --outdir) or --basedir!") optionsfile = preconfdir + "/options" if os.path.exists(optionsfile): f = open(optionsfile, "r") for line in f: line = line.strip() if len(line) == 0 or line[0] == '#' or line[0] == ';': continue line = line.split() if line[0] == "basedir" and options.basedir is None: options.basedir = line[1] elif line[0] == "confdir" and options.confdir is None: options.confdir = line[1] elif line[0] == "logdir" and options.logdir is None: options.logdir = line[1] elif line[0] == "outdir" and options.outdir is None: options.outdir = line[1] f.close() if options.basedir is None: options.basedir = os.environ.get("REPREPRO_BASE_DIR", None) if options.outdir is None: if options.basedir is None: raise CriticalError("Need --basedir if not called by reprepro") options.outdir = options.basedir if options.logdir is None: if options.basedir is None: raise CriticalError("Need --basedir if not called by reprepro") options.logdir = options.basedir + "/logs" if options.confdir is None: if "REPREPRO_CONFIG_DIR" in os.environ: options.confdir = os.environ["REPREPRO_CONFIG_DIR"] else: if options.basedir is None: raise CriticalError("Need --basedir if not called by reprepro") options.confdir = options.basedir + "/conf" def main(args): global errors, servername, username, targetdir if "REPREPRO_OUT_DIR" in os.environ or "REPREPRO_LOG_DIR" in os.environ: # assume being called by reprepro if one of those variable # is set, so they all should be set: options.outdir = os.environ["REPREPRO_OUT_DIR"] options.logdir = os.environ["REPREPRO_LOG_DIR"] options.confdir = os.environ["REPREPRO_CONFIG_DIR"] else: parseoptions(args) assert (options.outdir and (options.ignorepending or options.logdir) and options.confdir) conffilename = options.confdir + "/outsftphook.conf" if os.path.exists(conffilename): conffile = open(conffilename, "r") for line in conffile: line = line.strip().split(None, 1) if len(line) == 0 or line[0].startswith("#"): continue if line[0] == "servername": servername = line[1] elif line[0] == "username": username = line[1] elif line[0] == "targetdir": targetdir = line[1] elif line[0] == "verbose": if line[1].lower() in {'yes', 'on', '1', 'true'}: if options.verbose is None: options.verbose = True elif line[1].lower() in {'no', 'off', '0', 'false'}: if options.verbose is None: options.verbose = False else: raise CriticalError("Cannot parse %s: " + "unparseable truth value %s" % (repr(conffilename), repr(line[1]))) elif line[0] == "autoretry": if line[1].lower() in {'yes', 'on', '1', 'true'}: if options.autoretry is None: options.autoretry = True elif line[1].lower() in {'no', 'off', '0', 'false'}: if options.autoretry is None: options.autoretry = False else: raise CriticalError("Cannot parse %s: " + "unparseable truth value %s" % (repr(conffilename), repr(line[1]))) else: raise CriticalError("Cannot parse %s: unknown option %s" % (repr(conffilename), repr(line[0]))) conffile.close() if targetdir and not targetdir.endswith("/"): targetdir = targetdir + "/" if not servername: raise CriticalError("No servername configured!") if not username: raise CriticalError("No username configured!") if len(args) <= 0: if not options.pending: raise CriticalError("No .outlog files given at command line!") else: if options.pending: raise CriticalError("--pending might not be combined with arguments!") if options.ignorepending: pendinglogs = set() else: pendinglogs = set(name for name in os.listdir(options.logdir) if name.endswith(".outlog")) maxbasename = None for f in args: if len(f) < 8 or f[-7:] != ".outlog": raise CriticalError("command line argument '%s' does not look like a .outlog file!" % f) bn = os.path.basename(f) pendinglogs.discard(bn) if maxbasename: if maxbasename < bn: maxbasename = bn elif not options.forceorder: raise CriticalError("The arguments are not in order (%s <= %s). Applying in this order might not be safe. (use --force-order to proceed in this order anyway)" % (bn, maxbasename)) else: maxbasename = bn if options.pending: pendinglogs = sorted(pendinglogs) else: pendinglogs = sorted(filter(lambda bn: bn < maxbasename, pendinglogs)) if pendinglogs and not options.autoretry: raise CriticalError("Unprocessed earlier outlogs found: %s\nYou need to process them first (or use --autoretry or autoretry true in outsftphook.conf to automatically process them)") if pendinglogs and len(args) > 1: raise CriticalError("autoretry does not work with multiple log files given (yet).") args = list(map(lambda bn: options.logdir + "/" + bn, pendinglogs)) + args outlogfiles = [] for f in args: donefile = f[:-7] + ".done" if options.verbose: print("Parsing '%s'" % f) try: outlogfiles.append(LogFile(f, donefile)) except ParseError as e: raise CriticalError("Error parsing %s: %s" %(f, str(e))) todo = {} distdirs = {} workpending = False for o in reversed(outlogfiles): workpending |= o.queue(todo, distdirs) if not workpending: if options.verbose: print("Nothing to do") raise SystemExit(0) s = sftp.Connection(servername=servername, username=username, debug=options.debugsftp) doround(s, Round.POOLFILES, todo) if errors: raise SystemExit(1) for d in distdirs.values(): for t in d.finalizeifready(): s.start(t) doround(s, Round.DISTFILES, todo) if errors: raise SystemExit(1) doround(s, Round.DELETES, todo) if errors: raise SystemExit(1) try: main(sys.argv[1:]) except CriticalError as e: print(str(e), file=sys.stderr) raise SystemExit(1) reprepro-4.13.1/docs/FAQ0000644000175100017510000003042512152651661011654 00000000000000This is a list of "frequently" asked questions. 1.1) What can I do when reprepro complains about a missing .orig.tar.gz? 1.2) Why does it refuse a file when one in an other suite has the same name? 1.4) The key to sign my Release files needs a passphrase, what to do? 1.5) How do I change how files are downloaded. 1.6) How to omit packages missing files when updating. 2.1) Does reprepro support to generate Release.gpg files? 2.2) Does reprepro support tildes ('~') in versions? 2.3) Does reprepro support generation of Contents-.gz files? 3.1) Can I have two versions of a package in the same distribution? 3.2) Can reprepro pass through a server-supplied Release.gpg? 9) Feature ... is missing, can you add it? 1.1) What can I do when reprepro complains about a missing .orig.tar.gz? ------------------------------------------------------------------------ When 'include'ing a .changes file reprepro by default only adds files referenced in the .changes file into the pool/-hierarchy and does not search for files referenced in a .dsc file and thus fails if this .orig.tar.gz is not already in the pool. You are facing the choice: - copy the .orig.tar.gz by hand into the appropriate place within pool/ and try again. reprepro will find it there when you try it the next time and add it to its database. - use --ignore=missingfile to tell reprepro to search for such files in the directory the .changes file resides in. - modify the .changes file by hand to reference the .orig.tar.gz - use changestool (comes with reprepro since version 1.3.0) to list the file. ("changestool <.changesfile> includeallsources") - use dpkg-buildpackage -sa the next time you build a package so that it calls dpkg-genchanges with -sa which then always lists .orig.tar.gz and not only if it ends in -0 or -1. 1.2) Why does it refuse a file when one in an other suite has the same name? ---------------------------------------------------------------------------- Reprepro uses Debian's way to organize the pool hierarchy, which means that the directory and name a file is saved under is only determined by its sourcename, its name and its version and especially not by the distribution it belongs to. (This is the intent of having a pool directory, so that if two distributions have the same version, the disk space is only used once). This means that if different versions of a packaged having the same version string are put in the same reprepro repository (even if put into different distributions within that), reprepro will refuse to do so. (Unless you have a md5sum collision, in which case it will put the one and just replace the second with the first). The only way to work around, is too put the different distributions into different repositories. But in general it is really worth the effort to get the versioning right instead: Having multiple packages with the same version make it hard to track down problems, because it is easy to mix them up. Also up/downgrading a host from one distribution to the other will not change the package but just keep the old (as they are the same version, so they have to be the same, apt and dpkg will think). How to deal with this without separating repositories depends on how you reached this situation: - in the past Debian's stable and stable-security buildds sometimes both built a package and for some architectures the one version entered security.debian.org and the other ftp.debian.org with the next point release. (This should be fixed now. And it is always considered a bug, so if you hit this, please report it). If you mirror such a situation, just update one of the distributions and manually include the package into the other distribution. As the versions are the same, reprepro will keep with this and not try to download the other version, err other same version, err ... - backports (i.e. packages rebuild for older distributions) Common practise is to append the version with reducing ~, i.e. 1.0-1 becomes 1.0-1~bpo.7, or 3.0 becomes 3.0~sarge. (This makes sure that if a host is updated the backport is replaced by the real package). If backporting to multiple distributions you get bonus points for making sure newer distributions have higher version numbers. (To make sure which version is considered newer by dpkg use dpkg's --compare-versions action). - a package built for multiple distributions is equivalent to the backports case - locally modified packages that should be replace by newer official versions: append something like "a0myname". If it should be replaced by security updates of the official package, make sure (using dpkg's --compare-versions) that a security update would have a higher version. - locally modified packages that should not be replaced by newer official versions: prefix the version with "99:" and perhaps appending it with something like "-myname". (appending only makes it easier to distinguish, as some tools do not show epochs). 1.4) The key to sign my Release files needs a passphrase, what to do? --------------------------------------------------------------------- Please take a look at gpg-agent. You can also use the --ask-passphrase option, but please note this is quite insecure. 1.5) How do I change how files are downloaded. ---------------------------------------------- reprepro just calls apt's methods for file retrieval. You can give them options in conf/updates like Config: Acquire::Http::Proxy=http://proxy.yours.org:8080 or replace them with other programs speaking the same interface. 1.6) How to omit packages missing files when updating. ------------------------------------------------------ reprepro does not like broken upstream repositories and just splits out errors and does not process the rest. (Implementing simply a ignore for that is not that easy, as I would have special case holding an old version in that case when unavailable packages should be deleted, and make some costly information-pushing between layers (after all, each file can belong to multiple packages and packages can have more than one file, so keeping track which package should get a mark that files where missing needs a n-to-n relation that should never be uses expect the case where such a error happens)). What you can do when a upstream repository you update from misses a file: - try once with a different mirror not missing those files. You can either change the mirror to use once and change it back afterwards. Or if both mirrors have the same inner directory structure (they usually have) and are accessible via the same method (like both http or both ftp) you can also use the Fallback: option in conf/updates to tell reprepro to get missing files from the other Mirror. This an even be used for things not being a mirror of the same thing, but only having some files at the same place. For example to work around etch r1 listing many older kernel packages but no longer having the needed files, a line Fallback: http://snapshot.debian.net/archive/2007/04/02/debian/ can help. (But make sure to look at the run and remove this line once reprepro downloaded the missing files. With this line active and the primary mirror you list in Method: unreachable, reprepro will also download index files from snapshot and make your repository a copy of unstable from 2007-04-02 instead of an updated etch version.) - get the file elsewhere (with the correct md5sum), place it in the appropriate place in the pool/ hierarchy and do the update. Reprepro will see the file is already there, add it to its database and just continue with the update. - tell reprepro to exclude this package * There are multiple ways to do so. Easiest is adding something like FilterFormula: package (!= xen-shell) or FilterFormula: package (!= xen-shell) | version (!=1.0-2) | !files to your rule in conf/updates. ( the "| ! files" tells it to only omit the source package xen-shell, as source packages have a files field. Make sure the package in question does not require you to make the source available or you are not making your repository accessible to others). * Another way is adding something like FilterList: install excludefile and adding a file conf/excludefile with content xen-shell deinstall (the install will tell it to install what is not listed in the file, the deinstall on xen-shell will tell it to not install that package) * Finally you can also supply a ListHook: with a script copying its first argument to the second argument, removing all occurrences of the package you do not want (take a look intro the dctrl-tool package for tools helping you with this). - the worst solution is to just propagate the problem further, by just telling reprepro the file is there with the correct md5sum while it is not. (Via the _addmd5sums command of reprepro). Unless you run checkpool reprepro will not notice what you have done and will not even try to download that file once it becomes available. So don't do this. 2.1) Does reprepro support to generate Release.gpg files? --------------------------------------------------------- Yes, add a SignWith in the suite's definition in conf/distributions. (and take a look what the man page says about SignWith) 2.2) Does reprepro support tildes ('~') in versions? ---------------------------------------------------- Yes, but in .changes files only since version 0.5. (You can work around this in older versions by using includedeb and includedsc on the .deb and .dsc files within the .changes file, though) 2.3) Does reprepro support generation of Contents-.gz files? ------------------------------------------------------------------ Yes, since version 1.1.0 (well, actually since 0.8.2 but a bug caused the generated files to not be up to date unless manually exporting the distributions in question). Look for "Contents" in the man page. 3.1) Can I have two versions of a package in the same distribution? ------------------------------------------------------------------- Sorry, this is not possible right now, as reprepro heavily optimizes at only having one version of a package in a suite-type-component-architecture quadruple. You can have different versions in different architectures and/or components within the same suite. (Even different versions of a architecture all package in different architectures of the same suite). But within the same architecture and the same component of a distribution it is not possible. 3.2) Can reprepro pass through a server-supplied Release.gpg? ------------------------------------------------------------------- No. The reason for this is that the Release file will be different, so a Release.gpg would not match. The reason that the Release file looks differently is that reprepro mirrors packages. While it can create a distribution with the same packages as a distribution it mirrors. It will decide on its own where to put the files, so their Filename: or Directory: might differ. It may create a different set of compressions for the generated index files. It does not mirror Packages.diff directories (but only comes with helpers to create diffs between different states of the mirror). It does not mirror Contents files but creates them; and so on. So to be able to mirror distribution signatures almost all the functionality of reprepro would need to be duplicated (once supporting literal mirroring, once support local packages, partial mirroring, merging mirroring, pool condensing), thus I decided that this is better a task for another program. (Note that if you already have a local literal mirror, you can also use that as upstream for partial/merged/extended mirrored distributions of that. If you use the file:/// in Method: (as opposed to copy:///), reprepro will make hardlinks for files in pool/ if possible). 9) Feature ... is missing, can you add it? ------------------------------------------ First, please take another look at the man page. My documentation is not very good, so it is easy to overlook some feature even when it is described already. If it is not there, just write me a mail (or better write a wishlist report to the Debian BTS, then it cannot get lost). Some things I add quite fast, other stuff takes a bit. Things incompatible with the current underlying infrastructures or past design decisions may never come, but if I have it on the TODO list of things to add, it help the code to develop in a direction that things like that might be possible in the future. reprepro-4.13.1/docs/reprepro.bash_completion0000644000175100017510000005040612152651661016251 00000000000000_reprepro() { local cur prev commands options noargoptions i state cmd ignores hiddencommands commands codenames confdir outdir basedir architectures components importrules snapshots confdir="" basedir="" outdir="" function parse_config() { local conffile distfile if [[ -n "$confdir" ]] ; then conffile="$confdir/options" distfile="$confdir/distributions" elif [[ -n "$basedir" ]] ; then conffile="$basedir/conf/options" distfile="$basedir/conf/distributions" else conffile="./conf/options" distfile="./conf/distributions" fi if [ -z "$confdir" ] && [[ -e "$conffile" ]] ; then if grep -q '^confdir ' -- "$conffile" 2>/dev/null ; then distfile="$(grep '^confdir ' -- "$conffile" 2>/dev/null | sed -e 's/^confdir *//')/distributions" elif [ -z "$basedir" ] && grep -q '^basedir ' -- "$conffile" 2>/dev/null ; then distfile="$(grep '^basedir ' -- "$conffile" 2>/dev/null | sed -e 's/^basedir *//')/conf/distributions" fi fi if [[ -d "$distfile" ]] ; then codenames="$(awk -- '/^[Cc][Oo][Dd][Ee][Nn][Aa][Mm][Ee]: / {$1="";print}' "$distfile"/*.conf)" architectures="$(awk -- '/^[Aa][Rr][Cc][Hh][Ii][Tt][Ee][Cc][Tt][Uu][Rr][Ee][Ss]: / {$1="";print}' "$distfile"/*.conf)" components="$(awk -- '/^[Cc][Oo][Mm][Pp][Oo][Nn][Ee][Nn][Tt][Ss]: / {$1="";print}' "$distfile"/*.conf)" elif [[ -e "$distfile" ]] ; then codenames="$(awk -- '/^[Cc][Oo][Dd][Ee][Nn][Aa][Mm][Ee]: / {$1="";print}' "$distfile")" architectures="$(awk -- '/^[Aa][Rr][Cc][Hh][Ii][Tt][Ee][Cc][Tt][Uu][Rr][Ee][Ss]: / {$1="";print}' "$distfile")" components="$(awk -- '/^[Cc][Oo][Mm][Pp][Oo][Nn][Ee][Nn][Tt][Ss]: / {$1="";print}' "$distfile")" else codenames="experimental woody sarge sid etch whatever-you-defined" architectures="source i386 abacus whatever-you-defined" components="main contrib non-free whatever-you-defined" fi } function parse_config_for_distdir() { local conffile if [[ -n "$confdir" ]] ; then conffile="$confdir/options" elif [[ -n "$basedir" ]] ; then conffile="$basedir/conf/options" else conffile="./conf/options" fi if [ -z "$basedir" ] && [[ -e "$conffile" ]] ; then if grep -q '^basedir ' -- "$conffile" 2>/dev/null ; then basedir="$(grep '^basedir ' -- "$conffile" 2>/dev/null | sed -e 's/^basedir *//')" fi fi if [ -z "$outdir" ] && [[ -e "$conffile" ]] ; then if grep -q '^outdir ' -- "$conffile" 2>/dev/null ; then outdir="$(grep '^outdir ' -- "$conffile" 2>/dev/null | sed -e 's/^outdir *//')" fi fi if [ -z "$distdir" ] && [[ -e "$conffile" ]] ; then if grep -q '^distdir ' -- "$conffile" 2>/dev/null ; then distdir="$(grep '^distdir ' -- "$conffile" 2>/dev/null | sed -e 's/^distdir *//')" fi fi if [ -z "$basedir" ] ; then basedir="." fi if [ -z "$outdir" ] && ! [ -z "$basedir" ] ; then outdir="$basedir" fi if [ -z "$distdir" ] && ! [ -z "$outdir" ] ; then distdir="$outdir/dists" fi } function parse_incoming() { local conffile incomingfile if [[ -n "$confdir" ]] ; then conffile="$confdir/options" incomingfile="$confdir/incoming" elif [[ -n "$basedir" ]] ; then conffile="$basedir/conf/options" incomingfile="$basedir/conf/incoming" else conffile="./conf/options" incomingfile="./conf/incoming" fi if [ -z "$confdir" ] && [[ -e "$conffile" ]] ; then if grep -q '^confdir ' -- "$conffile" 2>/dev/null ; then incomingfile="$(grep '^confdir ' -- "$conffile" 2>/dev/null | sed -e 's/^confdir //')/incoming" elif [ -z "$basedir" ] && grep -q '^basedir ' -- "$conffile" 2>/dev/null ; then incoming="$(grep '^basedir ' -- "$conffile" 2>/dev/null | sed -e 's/^basedir //')/conf/incoming" fi fi if [[ -d "$incomingfile" ]] ; then importrules="$(awk -- '/^[Nn][Aa][Mm][Ee]: / {$1="";print}' "$incomingfile"/*.conf)" elif [[ -e "$incomingfile" ]] ; then importrules="$(awk -- '/^[Nn][Aa][Mm][Ee]: / {$1="";print}' "$incomingfile")" else importrules="rule-name" fi } COMPREPLY=() ignores='ignore flatandnonflat forbiddenchar 8bit emptyfilenamepart\ spaceonlyline malformedchunk unknownfield\ wrongdistribution missingfield brokenold\ undefinedtracking undefinedtarget unusedoption\ brokenversioncmp extension unusedarch surprisingarch\ surprisingbinary wrongsourceversion wrongversion dscinbinnmu\ brokensignatures uploaders missingfile longkeyid\ expiredkey expiredsignature revokedkey oldfile wrongarchitecture' noargoptions='--delete --nodelete --help -h --verbose -v\ --nothingiserror --nolistsdownload --keepunreferencedfiles --keepunusednewfiles\ --keepdirectories --keeptemporaries --keepuneededlists\ --ask-passphrase --nonothingiserror --listsdownload\ --nokeepunreferencedfiles --nokeepdirectories --nokeeptemporaries\ --nokeepuneededlists --nokeepunusednewfiles\ --noask-passphrase --skipold --noskipold --show-percent \ --version --guessgpgtty --noguessgpgtty --verbosedb --silent -s --fast' options='-b -i --basedir --outdir --ignore --unignore --methoddir --distdir --dbdir\ --listdir --confdir --logdir --morguedir \ --section -S --priority -P --component -C\ --architecture -A --type -T --export --waitforlock \ --spacecheck --safetymargin --dbsafetymargin\ --gunzip --bunzip2 --unlzma --unxz --lunzip --gnupghome --list-format --list-skip --list-max\ --outhook --endhook' i=1 prev="" cmd="XYZnoneyetXYZ" while [[ $i -lt $COMP_CWORD ]] ; do cur=${COMP_WORDS[i]} prev="" case "$cur" in --basedir=*) basedir="${cur#--basedir=}" i=$((i+1)) ;; --outdir=*) outdir="${cur#--basedir=}" i=$((i+1)) ;; --distdir=*) distdir="${cur#--basedir=}" i=$((i+1)) ;; --confdir=*) confdir="${cur#--confdir=}" i=$((i+1)) ;; --*=*) i=$((i+1)) ;; -b|--basedir) prev="$cur" basedir="${COMP_WORDS[i+1]}" i=$((i+2)) ;; --outdir) prev="$cur" outdir="${COMP_WORDS[i+1]}" i=$((i+2)) ;; --distdir) prev="$cur" distdir="${COMP_WORDS[i+1]}" i=$((i+2)) ;; --confdir) prev="$cur" confdir="${COMP_WORDS[i+1]}" i=$((i+2)) ;; -i|--ignore|--unignore|--methoddir|--distdir|--dbdir|--listdir|--section|-S|--priority|-P|--component|-C|--architecture|-A|--type|-T|--export|--waitforlock|--spacecheck|--checkspace|--safetymargin|--dbsafetymargin|--logdir|--gunzip|--bunzip2|--unlzma|--unxz|--lunzip|--gnupghome|--morguedir) prev="$cur" i=$((i+2)) ;; --*|-*) i=$((i+1)) ;; *) cmd="$cur" i=$((i+1)) break ;; esac done cur=${COMP_WORDS[COMP_CWORD]} if [[ $i -gt $COMP_CWORD && -n "$prev" ]]; then case "$prev" in -b|--basedir|--outdir|--methoddir|--distdir|--dbdir|--listdir|--confdir) COMPREPLY=( $( compgen -d -- $cur ) ) return 0 ;; -T|--type) COMPREPLY=( $( compgen -W "dsc deb udeb" -- $cur ) ) return 0 ;; -i|--ignore|--unignore) COMPREPLY=( $( compgen -W "$ignores" -- $cur ) ) return 0 ;; -P|--priority) COMPREPLY=( $( compgen -W "required important standard optional extra" -- $cur ) ) return 0 ;; -S|--section) COMPREPLY=( $( compgen -W "admin base comm contrib devel doc editors electronics embedded games gnome graphics hamradio interpreters kde libs libdevel mail math misc net news non-free oldlibs otherosfs perl python science shells sound tex text utils web x11 contrib/admin contrib/base contrib/comm contrib/contrib contrib/devel contrib/doc contrib/editors contrib/electronics contrib/embedded contrib/games contrib/gnome contrib/graphics contrib/hamradio contrib/interpreters contrib/kde contrib/libs contrib/libdevel contrib/mail contrib/math contrib/misc contrib/net contrib/news contrib/non-free contrib/oldlibs contrib/otherosfs contrib/perl contrib/python contrib/science contrib/shells contrib/sound contrib/tex contrib/text contrib/utils contrib/web contrib/x11 non-free/admin non-free/base non-free/comm non-free/contrib non-free/devel non-free/doc non-free/editors non-free/electronics non-free/embedded non-free/games non-free/gnome non-free/graphics non-free/hamradio non-free/interpreters non-free/kde non-free/libs non-free/libdevel non-free/mail non-free/math non-free/misc non-free/net non-free/news non-free/non-free non-free/oldlibs non-free/otherosfs non-free/perl non-free/python non-free/science non-free/shells non-free/sound non-free/tex non-free/text non-free/utils non-free/web non-free/x11" -- $cur ) ) return 0 ;; -A|--architecture) parse_config COMPREPLY=( $( compgen -W "$architectures" -- $cur ) ) return 0 ;; -C|--component) parse_config COMPREPLY=( $( compgen -W "$components" -- $cur ) ) return 0 ;; --export) COMPREPLY=( $( compgen -W "never changed normal force" -- $cur ) ) return 0 ;; --waitforlock) COMPREPLY=( $( compgen -W "0 60 3600 86400" -- $cur ) ) return 0 ;; --spacecheck) COMPREPLY=( $( compgen -W "none full" -- $cur ) ) return 0 ;; --safetymargin) COMPREPLY=( $( compgen -W "0 1048576" -- $cur ) ) return 0 ;; --dbsafetymargin) COMPREPLY=( $( compgen -W "0 104857600" -- $cur ) ) return 0 ;; esac fi if [[ "XYZnoneyetXYZ" = "$cmd" ]] ; then commands='build-needing\ check\ checkpool\ checkpull\ checkupdate\ cleanlists\ clearvanished\ collectnewchecksums\ copy\ copyfilter\ copymatched\ copysrc\ createsymlinks\ deleteunreferenced\ deleteifunreferenced\ dumpreferences\ dumptracks\ dumppull\ dumpunreferenced\ dumpupdate\ export\ forcerepairdescriptions\ flood\ generatefilelists\ gensnapshot\ include\ includedeb\ includedsc\ includeudeb\ list\ listfilter\ listmatched\ ls\ lsbycomponent\ predelete\ processincoming\ pull\ remove\ removealltracks\ removefilter\ removematched\ removesrc\ removesrcs\ removetrack\ reoverride\ repairdescriptions\ reportcruft\ rereference\ rerunnotifiers\ restore\ restorefilter\ restorematched\ restoresrc\ retrack\ sourcemissing\ tidytracks\ translatefilelists\ translatelegacychecksums\ unusedsources\ update' hiddencommands='__d\ __dumpuncompressors __extractcontrol\ __extractfilelist\ __extractsourcesection\ __uncompress\ _addchecksums\ _addpackage\ _addreference\ _detect\ _dumpcontents\ _fakeemptyfilelist\ _forget\ _listchecksums\ _listconfidentifiers\ _listdbidentifiers\ _listmd5sums\ _removereferences\ _versioncompare' if [[ "$cur" == -* ]]; then case "$cur" in --ignore=*) COMPREPLY=( $( compgen -W "$ignores" -- ${cur#--ignore=} ) ) ;; --unignore=*) COMPREPLY=( $( compgen -W "$ignores" -- ${cur#--unignore=} ) ) ;; --component=*) parse_config COMPREPLY=( $( compgen -W "$components" -- {cur#--component=} ) ) ;; --architectures=*) parse_config COMPREPLY=( $( compgen -W "$architectures" -- {cur#--architectures=} ) ) ;; *) COMPREPLY=( $( compgen -W "$options $noargoptions" -- $cur ) ) ;; esac elif [[ "$cur" == _* ]]; then COMPREPLY=( $( compgen -W "$hiddencommands" -- $cur ) ) else COMPREPLY=( $( compgen -W "$commands" -- $cur ) ) fi return 0 fi case "$cmd" in remove|list|listfilter|removefilter|removetrack|listmatched|removematched|removesrc|removesrcs) # first argument is the codename if [[ $i -eq $COMP_CWORD ]] ; then parse_config COMPREPLY=( $( compgen -W "$codenames" -- $cur ) ) return 0 fi # these later could also look for stuff, but # that might become a bit slow ;; export|update|checkupdate|pull|checkpull|rereference|retrack|removealltracks|tidytracks|dumptracks|check|repairdescriptions|forcerepairdescriptions|reoverride|rerunnotifiers|dumppull|dumpupdate|unusedsources|sourcemissing|reportcruft) # all arguments are codenames parse_config COMPREPLY=( $( compgen -W "$codenames" -- $cur ) ) return 0 ;; processincoming) # arguments are rule-name from conf/incoming parse_config parse_incoming if [[ $i -eq $COMP_CWORD ]] ; then COMPREPLY=( $( compgen -W "$importrules" -- $cur ) ) return 0 fi ;; collectnewchecksums|cleanlists) return 0 ;; checkpool) # first argument can be fast if [[ $i -eq $COMP_CWORD ]] ; then COMPREPLY=( $( compgen -W "fast" -- $cur ) ) return 0 fi return 0 ;; flood) # first argument is the codename if [[ $i -eq $COMP_CWORD ]] ; then parse_config COMPREPLY=( $( compgen -W "$codenames" -- $cur ) ) return 0 fi # then an architecture might follow if [[ $(( $i + 1 )) -eq $COMP_CWORD ]] ; then parse_config COMPREPLY=( $( compgen -W "$architectures" -- $cur ) ) return 0 fi # then nothing else return 0 ;; build-needing) # first argument is the codename if [[ $i -eq $COMP_CWORD ]] ; then parse_config COMPREPLY=( $( compgen -W "$codenames" -- $cur ) ) return 0 fi # then an architecture if [[ $(( $i + 1 )) -eq $COMP_CWORD ]] ; then parse_config COMPREPLY=( $( compgen -W "$architectures" -- $cur ) ) return 0 fi # then a glob if [[ $(( $i + 2 )) -eq $COMP_CWORD ]] ; then COMPREPLY=( $( compgen -W "$cur'\*'" -- $cur ) ) return 0 fi return 0 ;; __uncompress) # first argument is method if [[ $i -eq $COMP_CWORD ]] ; then COMPREPLY=( $( compgen -W ".gz .bz2 .lzma .xz .lz" -- $cur ) ) return 0 fi if [[ $(( $i + 1 )) -eq $COMP_CWORD ]] ; then COMPREPLY=( $( compgen -f -- $cur ) ) return 0 fi if [[ $(( $i + 2 )) -eq $COMP_CWORD ]] ; then COMPREPLY=( $( compgen -f -- $cur ) ) return 0 fi return 0 ;; __extractsourcesection) if [[ $i -eq $COMP_CWORD ]] ; then _filedir dsc fi return 0 ;; includedeb) # first argument is the codename if [[ $i -eq $COMP_CWORD ]] ; then parse_config COMPREPLY=( $( compgen -W "$codenames" -- $cur ) ) return 0 fi # then one .deb file follows if [[ $(( $i + 1 )) -eq $COMP_CWORD ]] ; then _filedir deb fi return 0 ;; includedsc) # first argument is the codename if [[ $i -eq $COMP_CWORD ]] ; then parse_config COMPREPLY=( $( compgen -W "$codenames" -- $cur ) ) return 0 fi # then one .dsc file follows if [[ $(( $i + 1 )) -eq $COMP_CWORD ]] ; then _filedir dsc fi return 0 ;; include) # first argument is the codename if [[ $i -eq $COMP_CWORD ]] ; then parse_config COMPREPLY=( $( compgen -W "$codenames" -- $cur ) ) return 0 fi # then one .changes file follows if [[ $(( $i + 1 )) -eq $COMP_CWORD ]] ; then _filedir changes fi return 0 ;; gensnapshot) # first argument is a codename if [[ $i -eq $COMP_CWORD ]] ; then parse_config COMPREPLY=( $( compgen -W "$codenames" -- $cur ) ) return 0 fi # then the name of a snapshot, add a suggestion if [[ $(( $i + 1 )) -eq $COMP_CWORD ]] ; then COMPREPLY=( $( compgen -W "$(date +%Y/%m/%d)" -- $cur ) ) return 0 fi return 0; ;; copy|copysrc|copyfilter|copymatched) # first argument is a codename if [[ $i -eq $COMP_CWORD ]] ; then parse_config COMPREPLY=( $( compgen -W "$codenames" -- $cur ) ) return 0 fi # second argument is a codename if [[ $(( $i + 1 )) -eq $COMP_CWORD ]] ; then parse_config COMPREPLY=( $( compgen -W "$codenames" -- $cur ) ) return 0 fi # here we could look for package names existing in # that distribution, but that would be slow... ;; restore|restoresrc|restorefilter|restorematched) # first argument is a codename if [[ $i -eq $COMP_CWORD ]] ; then parse_config COMPREPLY=( $( compgen -W "$codenames" -- $cur ) ) return 0 fi # second argument is snapshot of that name if [[ $(( $i + 1 )) -eq $COMP_CWORD ]] ; then parse_config_for_distdir snapshots="$( ls "$distdir/${COMP_WORDS[i]}/snapshots" )" COMPREPLY=( $( compgen -W "$snapshots" -- $cur ) ) return 0 fi # here we could look for package names existing in # that distribution, but that would be slow... ;; __dumpuncompressors|translatelageacychecksums|deleteunreferenced) # no arguments return 0 ;; deleteifunreferenced) # less usefull than the output of dumpunreferenced, # but this way it should be massively faster: parse_config_for_distdir COMPREPLY=( $(cd "$outdir" && compgen -o filenames -f -- $cur) ) return 0 ;; esac COMPREPLY=( $( compgen -f -- $cur ) ) return 0 } # This -o filename has its problems when there are directories named like # commands in you current directory. But it makes adding filenames so much # easier. I wished I knew a way to only active it for those parts that are # filenames. complete -o filenames -F _reprepro reprepro _changestool() { local cur prev commands options noargoptions i j cmd ignores wascreate changesfilename COMPREPLY=() ignores=' notyetimplemented ' noargoptions='--help --create' options='--ignore --searchpath' wascreate=no i=1 prev="" while [[ $i -lt $COMP_CWORD ]] ; do cur=${COMP_WORDS[i]} prev="" case "$cur" in --*=*) i=$((i+1)) ;; -i|--ignore|--unignore|-s|--searchpath) prev="$cur" i=$((i+2)) ;; --create|-c) i=$((i+1)) wascreate=yes ;; --*|-*) i=$((i+1)) ;; *) break ;; esac done cur=${COMP_WORDS[COMP_CWORD]} if [[ $i -gt $COMP_CWORD && -n "$prev" ]]; then case "$prev" in -i|--ignore|--unignore) COMPREPLY=( $( compgen -W "$ignores" -- $cur ) ) return 0 ;; -s|--searchpath) COMPREPLY=( $( compgen -d -- $cur ) ) return 0 ;; esac fi if [[ $i -ge $COMP_CWORD ]] ; then # No changes filename yet specified: commands='addrawfile adddsc adddeb add includeallsources setdistribution updatechecksums verify' if [[ "$cur" == -* ]]; then case "$cur" in *) COMPREPLY=( $( compgen -W "$options $noargoptions" -- $cur ) ) ;; esac return 0 fi if [ "$wascreate" = "yes" ] ; then _filedir else _filedir changes fi return 0 fi changesfilename=${COMP_WORDS[i]} i=$((i+1)) if [[ $i -ge $COMP_CWORD ]] ; then # No command yet specified: commands='addrawfile adddsc adddeb add includeallsources setdistribution updatechecksums verify' # todo: restrict to add commands when --create and file not yet existing? COMPREPLY=( $( compgen -W "$commands" -- $cur ) ) return 0 fi cmd=${COMP_WORDS[i]} case "$cmd" in # with searchpath it should also list the files available there, # but I know no easy way to get that done... addrawfile) _filedir return 0 ;; adddsc) _filedir dsc return 0 ;; adddeb) _filedir deb return 0 ;; adddeb) _filedir return 0 ;; includeallsources) prev="$(grep '^ [0-9a-f]\{32\} \+[0-9]\+ \+[a-zA-Z/0-9.:-]\+ \+[a-zA-Z/0-9.:-]\+ \+[^ ]\+\.dsc$' -- "$changesfilename" | sed -e 's/^ [0-9a-f]\+ \+[0-9]\+ \+[^ ]\+ \+[^ ]\+ \+//')" j=0 options=() for i in $prev ; do if [ -f "$i" ] ; then options=(${options[@]:-} $(grep '^ [0-9a-f]\{32\} \+[0-9]\+ \+[^ ]\+$' -- "$i" | sed -e 's/^ [0-9a-f]\+ \+[0-9]\+ \+//') ) elif [ -f "$(dirname $changesfilename)/$i" ] ; then options=(${options[@]:-} $(grep '^ [0-9a-f]\{32\} \+[0-9]\+ \+[^ ]\+$' -- "$(dirname $changesfilename)/$i" | sed -e 's/^ [0-9a-f]\+ \+[0-9]\+ \+//') ) else cmd="missing" fi done COMPREPLY=( $( compgen -W "${options[@]}" -- $cur ) ) # if some .dsc cannot be found or read, offer everythin additionally if [ "$cmd" = "missing" ] ; then _filedir fi return 0 ;; setdistribution) COMPREPLY=( $( compgen -W "unstable testing stable sarge etch lenny sid backports local" -- $cur ) ) return 0 ;; updatechecksums) options="$(grep '^ [0-9a-f]\{32\} \+[0-9]\+ \+[a-zA-Z/0-9.:-]\+ \+[a-zA-Z/0-9.:-]\+ \+[^ ]\+$' -- "$changesfilename" | sed -e 's/^ [0-9a-f]\+ \+[0-9]\+ \+[^ ]\+ \+[^ ]\+ \+//')" if [ -n "$options" ] ; then COMPREPLY=( $( compgen -W "$options" -- $cur ) ) else _filedir fi return 0 ;; verify) return 0 ;; esac COMPREPLY=( $( compgen -f -- $cur ) ) return 0 } # same problem as above with -o filenames, # but I guess still better than without... complete -o filenames -F _changestool changestool reprepro-4.13.1/docs/changelogs.example0000755000175100017510000001511712152651661015015 00000000000000#!/bin/sh # This is an example script that can be hooked into reprepro # to either generate a hierachy like packages.debian.org/changelogs/ # or to generate changelog files in the "third party sites" # location apt-get changelogs looks if it is not found in # Apt::Changelogs::Server. # # All you have to do is to: # - copy it into you conf/ directory, # - if you want "third party site" style changelogs, edit the # CHANGELOGDIR variable below, # and # - add the following to any distribution in conf/distributions # you want to have changelogs and copyright files extracted: #Log: # --type=dsc changelogs.example # (note the space at the beginning of the second line). # This will cause this script to extract changelogs for all # newly added source packages. (To generate them for already # existing packages, call "reprepro rerunnotifiers"). # DEPENDENCIES: dpkg >= 1.13.9 if test "x${REPREPRO_OUT_DIR:+set}" = xset ; then # Note: due to cd, REPREPRO_*_DIR will no longer # be usable. And only things relative to outdir will work... cd "${REPREPRO_OUT_DIR}" || exit 1 else # this will also trigger if reprepro < 3.5.1 is used, # in that case replace this with a manual cd to the # correct directory... cat "changelog.example needs to be run by reprepro!" >&2 exit 1 fi # CHANGELOGDIR set means generate full hierachy # (clients need to set Apt::Changelogs::Server to use that) CHANGELOGDIR=changelogs # CHANGELOGDIR empty means generate changelog (and only changelog) files # in the new "third party site" place apt-get changelog is using as fallback: #CHANGELOGDIR= # Set to avoid using some predefined TMPDIR or even /tmp as # tempdir: # TMPDIR=/var/cache/whateveryoucreated if test -z "$CHANGELOGDIR" ; then addsource() { DSCFILE="$1" CANONDSCFILE="$(readlink --canonicalize "$DSCFILE")" CHANGELOGFILE="${DSCFILE%.dsc}.changelog" BASEDIR="$(dirname "$CHANGELOGFILE")" if ! [ -f "$CHANGELOGFILE" ] ; then EXTRACTDIR="$(mktemp -d)" (cd -- "$EXTRACTDIR" && dpkg-source --no-copy -x "$CANONDSCFILE" > /dev/null) install --mode=644 -- "$EXTRACTDIR"/*/debian/changelog "$CHANGELOGFILE" chmod -R u+rwX -- "$EXTRACTDIR" rm -r -- "$EXTRACTDIR" fi if [ -L "$BASEDIR"/current."$CODENAME" ] ; then # should not be there, just to be sure rm -f -- "$BASEDIR"/current."$CODENAME" fi # mark this as needed by this distribution ln -s -- "$(basename "$CHANGELOGFILE")" "$BASEDIR/current.$CODENAME" JUSTADDED="$CHANGELOGFILE" } delsource() { DSCFILE="$1" CHANGELOGFILE="${DSCFILE%.dsc}.changelog" BASEDIR="$(dirname "$CHANGELOGFILE")" BASENAME="$(basename "$CHANGELOGFILE")" if [ "x$JUSTADDED" = "x$CHANGELOGFILE" ] ; then exit 0 fi # echo "delete, basedir=$BASEDIR changelog=$CHANGELOGFILE, dscfile=$DSCFILE, " if [ "x$(readlink "$BASEDIR/current.$CODENAME")" = "x$BASENAME" ] ; then rm -- "$BASEDIR/current.$CODENAME" fi NEEDED=0 for c in "$BASEDIR"/current.* ; do if [ "x$(readlink -- "$c")" = "x$BASENAME" ] ; then NEEDED=1 fi done if [ "$NEEDED" -eq 0 -a -f "$CHANGELOGFILE" ] ; then rm -r -- "$CHANGELOGFILE" # to remove the directory if now empty rmdir --ignore-fail-on-non-empty -- "$BASEDIR" fi } else # "$CHANGELOGDIR" set: addsource() { DSCFILE="$1" CANONDSCFILE="$(readlink --canonicalize "$DSCFILE")" TARGETDIR="${CHANGELOGDIR}/${DSCFILE%.dsc}" SUBDIR="$(basename $TARGETDIR)" BASEDIR="$(dirname $TARGETDIR)" if ! [ -d "$TARGETDIR" ] ; then echo "extract $CANONDSCFILE information to $TARGETDIR" mkdir -p -- "$TARGETDIR" EXTRACTDIR="$(mktemp -d)" (cd -- "$EXTRACTDIR" && dpkg-source --no-copy -x "$CANONDSCFILE" > /dev/null) install --mode=644 -- "$EXTRACTDIR"/*/debian/copyright "$TARGETDIR/copyright" install --mode=644 -- "$EXTRACTDIR"/*/debian/changelog "$TARGETDIR/changelog" chmod -R u+rwX -- "$EXTRACTDIR" rm -r -- "$EXTRACTDIR" fi if [ -L "$BASEDIR"/current."$CODENAME" ] ; then # should not be there, just to be sure rm -f -- "$BASEDIR"/current."$CODENAME" fi # mark this as needed by this distribution ln -s -- "$SUBDIR" "$BASEDIR/current.$CODENAME" JUSTADDED="$TARGETDIR" } delsource() { DSCFILE="$1" TARGETDIR="${CHANGELOGDIR}/${DSCFILE%.dsc}" SUBDIR="$(basename $TARGETDIR)" BASEDIR="$(dirname $TARGETDIR)" if [ "x$JUSTADDED" = "x$TARGETDIR" ] ; then exit 0 fi # echo "delete, basedir=$BASEDIR targetdir=$TARGETDIR, dscfile=$DSCFILE, " if [ "x$(readlink "$BASEDIR/current.$CODENAME")" = "x$SUBDIR" ] ; then rm -- "$BASEDIR/current.$CODENAME" fi NEEDED=0 for c in "$BASEDIR"/current.* ; do if [ "x$(readlink -- "$c")" = "x$SUBDIR" ] ; then NEEDED=1 fi done if [ "$NEEDED" -eq 0 -a -d "$TARGETDIR" ] ; then rm -r -- "$TARGETDIR" # to remove the directory if now empty rmdir --ignore-fail-on-non-empty -- "$BASEDIR" fi } fi # CHANGELOGDIR ACTION="$1" CODENAME="$2" PACKAGETYPE="$3" if [ "x$PACKAGETYPE" != "xdsc" ] ; then # the --type=dsc should cause this to never happen, but better safe than sorry. exit 1 fi COMPONENT="$4" ARCHITECTURE="$5" if [ "x$ARCHITECTURE" != "xsource" ] ; then exit 1 fi NAME="$6" shift 6 JUSTADDED="" if [ "x$ACTION" = "xadd" -o "x$ACTION" = "xinfo" ] ; then VERSION="$1" shift if [ "x$1" != "x--" ] ; then exit 2 fi shift while [ "$#" -gt 0 ] ; do case "$1" in *.dsc) addsource "$1" ;; --) exit 2 ;; esac shift done elif [ "x$ACTION" = "xremove" ] ; then OLDVERSION="$1" shift if [ "x$1" != "x--" ] ; then exit 2 fi shift while [ "$#" -gt 0 ] ; do case "$1" in *.dsc) delsource "$1" ;; --) exit 2 ;; esac shift done elif [ "x$ACTION" = "xreplace" ] ; then VERSION="$1" shift OLDVERSION="$1" shift if [ "x$1" != "x--" ] ; then exit 2 fi shift while [ "$#" -gt 0 -a "x$1" != "x--" ] ; do case "$1" in *.dsc) addsource "$1" ;; esac shift done if [ "x$1" != "x--" ] ; then exit 2 fi shift while [ "$#" -gt 0 ] ; do case "$1" in *.dsc) delsource "$1" ;; --) exit 2 ;; esac shift done fi exit 0 # Copyright 2007,2008,2012 Bernhard R. Link # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA reprepro-4.13.1/optionsfile.h0000644000175100017510000000047612152651661013101 00000000000000#ifndef REPREPRO_OPTIONSFILE_H #define REPREPRO_OPTIONSFILE_H #include #ifndef REPREPRO_ERROR_H #include "error.h" #warning "What's hapening here?" #endif void optionsfile_parse(const char * /*directory*/, const struct option *, void handle_option(int, const char *)); #endif /*REPREPRO_OPTIONSFILE_H*/ reprepro-4.13.1/terms.h0000644000175100017510000000361312152651661011674 00000000000000#ifndef REPREPRO_TERMS_H #define REPREPRO_TERMS_H enum term_comparison { tc_none=0, tc_equal, tc_strictless, tc_strictmore, tc_lessorequal, tc_moreorequal, tc_notequal, tc_globmatch, tc_notglobmatch}; struct term_special; typedef struct term_atom { /* global list to allow freeing them all */ struct term_atom *next; /* the next atom to look at if this is true, resp. false, * nextiftrue == NULL means total result is true, * nextiffalse == NULL means total result is false. */ /*@dependent@*/struct term_atom *nextiftrue, *nextiffalse; bool negated, isspecial; /* architecture requirements */ bool architectures_negated; struct strlist architectures; /* version/value requirement */ enum term_comparison comparison; union { struct { /* package-name or key */ char *key; /* version/value requirement */ char *comparewith; } generic; struct { const struct term_special *type; struct compare_with { void *pointer; long number; } comparewith; } special; }; } term; struct term_special { const char *name; retvalue (*parse)(enum term_comparison, const char *, size_t len, struct compare_with *); bool (*compare)(enum term_comparison, const struct compare_with *, const void*, const void*); void (*done)(enum term_comparison, struct compare_with *); }; /* | is allowed in terms */ #define T_OR 0x01 /* () are allowed to build sub-expressions */ #define T_BRACKETS 0x02 /* expressions may be negated */ #define T_NEGATION 0x04 /* ( ) is allowed */ #define T_VERSION 0x10 /* [archlist] is allowed */ #define T_ARCHITECTURES 0x20 /* (!= value) is allowed */ #define T_NOTEQUAL 0x40 /* (% ) and (!% globpattern) are allowed */ #define T_GLOBMATCH 0x80 retvalue term_compile(/*@out@*/term **, const char * /*formula*/, int /*options*/, /*@null@*/const struct term_special *specials); void term_free(/*@null@*//*@only@*/term *); #endif reprepro-4.13.1/globals.h0000644000175100017510000000507612152651661012172 00000000000000#ifndef REPREPRO_GLOBALS_H #define REPREPRO_GLOBALS_H #ifdef AVOID_CHECKPROBLEMS # define bool _Bool # define true (1==1) # define false (0==42) /* avoid problems with __builtin_expect being long instead of boolean */ # define __builtin_expect(a, b) (a) # define __builtin_constant_p(a) (__builtin_constant_p(a) != 0) #else # if HAVE_STDBOOL_H # include # else # if ! HAVE__BOOL typedef int _Bool; # endif # define true (1==1) # define false (0==42) # endif #endif #define xisspace(c) (isspace(c)!=0) #define xisblank(c) (isblank(c)!=0) #define xisdigit(c) (isdigit(c)!=0) #define READONLY true #define READWRITE false #define ISSET(a, b) ((a & b) != 0) #define NOTSET(a, b) ((a & b) == 0) /* sometimes something is initializes though the value is never used to * work around some gcc uninitialized-use false-positives */ #define SETBUTNOTUSED(a) a #ifdef SPLINT #define UNUSED(a) /*@unused@*/ a #define NORETURN #define likely(a) (a) #define unlikely(a) (a) #else #define likely(a) (!(__builtin_expect(!(a), false))) #define unlikely(a) __builtin_expect(a, false) #define NORETURN __attribute((noreturn)) #ifndef NOUNUSEDATTRIBUTE #define UNUSED(a) a __attribute((unused)) #else #define UNUSED(a) a #endif #endif #define ARRAYCOUNT(a) (sizeof(a)/sizeof(a[0])) enum config_option_owner { CONFIG_OWNER_DEFAULT=0, CONFIG_OWNER_FILE, CONFIG_OWNER_ENVIRONMENT, CONFIG_OWNER_CMDLINE}; #ifndef _D_EXACT_NAMLEN #define _D_EXACT_NAMLEN(r) (strlen((r)->d_name)) #endif /* for systems defining NULL to 0 instead of the nicer (void*)0 */ #define ENDOFARGUMENTS ((char *)0) /* global information */ extern int verbose; extern struct global_config { const char *basedir; const char *dbdir; const char *outdir; const char *distdir; const char *confdir; const char *methoddir; const char *logdir; const char *listdir; const char *morguedir; /* flags: */ bool keepdirectories; bool keeptemporaries; bool onlysmalldeletes; /* verbosity of downloading statistics */ int showdownloadpercent; } global; enum compression { c_none, c_gzip, c_bzip2, c_lzma, c_xz, c_lunzip, c_COUNT }; #define setzero(type, pointer) ({type *__var = pointer; memset(__var, 0, sizeof(type));}) #define NEW(type) ((type *)malloc(sizeof(type))) #define nNEW(num, type) ((type *)malloc((num) * sizeof(type))) #define zNEW(type) ((type *)calloc(1, sizeof(type))) #define nzNEW(num, type) ((type *)calloc(num, sizeof(type))) #define arrayinsert(type, array, position, length) ({type *__var = array; memmove(__var + (position) + 1, __var + (position), sizeof(type) * ((length) - (position)));}) #endif reprepro-4.13.1/indexfile.c0000644000175100017510000001702612152651661012507 00000000000000/* This file is part of "reprepro" * Copyright (C) 2003,2004,2005,2007,2008,2010 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include "error.h" #include "ignore.h" #include "chunks.h" #include "names.h" #include "uncompression.h" #include "indexfile.h" /* the purpose of this code is to read index files, either from a snapshot * previously generated or downloaded while updating. */ struct indexfile { struct compressedfile *f; char *filename; int linenumber, startlinenumber; retvalue status; char *buffer; int size, ofs, content; bool failed; }; retvalue indexfile_open(struct indexfile **file_p, const char *filename, enum compression compression) { struct indexfile *f = zNEW(struct indexfile); retvalue r; if (FAILEDTOALLOC(f)) return RET_ERROR_OOM; f->filename = strdup(filename); if (FAILEDTOALLOC(f->filename)) { free(f); return RET_ERROR_OOM; } r = uncompress_open(&f->f, filename, compression); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) { free(f->filename); free(f); return RET_ERRNO(errno); } f->linenumber = 0; f->startlinenumber = 0; f->status = RET_OK; f->size = 256*1024; f->ofs = 0; f->content = 0; /* +1 for *d = '\0' in eof case */ f->buffer = malloc(f->size + 1); if (FAILEDTOALLOC(f->buffer)) { uncompress_abort(f->f); free(f->filename); free(f); return RET_ERROR_OOM; } *file_p = f; return RET_OK; } retvalue indexfile_close(struct indexfile *f) { retvalue r; r = uncompress_close(f->f); free(f->filename); free(f->buffer); RET_UPDATE(r, f->status); free(f); return r; } static retvalue indexfile_get(struct indexfile *f) { char *p, *d, *e, *start; bool afternewline, nothingyet; int bytes_read; if (f->failed) return RET_ERROR; d = f->buffer; afternewline = true; nothingyet = true; do { start = f->buffer + f->ofs; p = start ; e = p + f->content; // TODO: if the chunk_get* are more tested with strange // input, this could be kept in-situ and only chunk_edit // beautifying this chunk... while (p < e) { /* just ignore '\r', even if not line-end... */ if (*p == '\r') { p++; continue; } if (*p == '\n') { f->linenumber++; if (afternewline) { p++; f->content -= (p - start); f->ofs += (p - start); assert (f->ofs == (p - f->buffer)); if (nothingyet) /* restart */ return indexfile_get(f); if (d > f->buffer && *(d-1) == '\n') d--; *d = '\0'; return RET_OK; } afternewline = true; nothingyet = false; } else afternewline = false; if (unlikely(*p == '\0')) { *(d++) = ' '; p++; } else *(d++) = *(p++); } /* ** out of data, read new ** */ /* start at beginning of free space */ f->ofs = (d - f->buffer); f->content = 0; if (f->size - f->ofs <= 2048) { /* Adding code to enlarge the buffer in this case * is risky as hard to test properly. * * Also it is almost certainly caused by some * mis-representation of the file or perhaps * some attack. Requesting all existing memory in * those cases does not sound very useful. */ fprintf(stderr, "Error parsing %s line %d: Ridiculous long (>= 256K) control chunk!\n", f->filename, f->startlinenumber); f->failed = true; return RET_ERROR; } bytes_read = uncompress_read(f->f, d, f->size - f->ofs); if (bytes_read < 0) return RET_ERROR; else if (bytes_read == 0) break; f->content = bytes_read; } while (true); if (d == f->buffer) return RET_NOTHING; /* end of file reached, return what we got so far */ assert (f->content == 0); assert (d-f->buffer <= f->size); if (d > f->buffer && *(d-1) == '\n') d--; *d = '\0'; return RET_OK; } bool indexfile_getnext(struct indexfile *f, char **name_p, char **version_p, const char **control_p, architecture_t *architecture_p, const struct target *target, bool allowwrongarchitecture) { retvalue r; bool ignorecruft = false; // TODO char *packagename, *version, *architecture; const char *control; architecture_t atom; packagename = NULL; version = NULL; do { free(packagename); packagename = NULL; free(version); version = NULL; f->startlinenumber = f->linenumber + 1; r = indexfile_get(f); if (!RET_IS_OK(r)) break; control = f->buffer; r = chunk_getvalue(control, "Package", &packagename); if (r == RET_NOTHING) { fprintf(stderr, "Error parsing %s line %d to %d: Chunk without 'Package:' field!\n", f->filename, f->startlinenumber, f->linenumber); if (!ignorecruft) r = RET_ERROR_MISSING; else continue; } if (RET_WAS_ERROR(r)) break; r = chunk_getvalue(control, "Version", &version); if (r == RET_NOTHING) { fprintf(stderr, "Error parsing %s line %d to %d: Chunk without 'Version:' field!\n", f->filename, f->startlinenumber, f->linenumber); if (!ignorecruft) r = RET_ERROR_MISSING; else continue; } if (RET_WAS_ERROR(r)) break; r = chunk_getvalue(control, "Architecture", &architecture); if (RET_WAS_ERROR(r)) break; if (r == RET_NOTHING) architecture = NULL; if (target->packagetype == pt_dsc) { free(architecture); atom = architecture_source; } else { /* check if architecture fits for target and error out if not ignorewrongarchitecture */ if (architecture == NULL) { fprintf(stderr, "Error parsing %s line %d to %d: Chunk without 'Architecture:' field!\n", f->filename, f->startlinenumber, f->linenumber); if (!ignorecruft) { r = RET_ERROR_MISSING; break; } else continue; } else if (strcmp(architecture, "all") == 0) { atom = architecture_all; } else if (strcmp(architecture, atoms_architectures[ target->architecture ]) == 0) { atom = target->architecture; } else if (!allowwrongarchitecture && !ignore[IGN_wrongarchitecture]) { fprintf(stderr, "Warning: ignoring package because of wrong 'Architecture:' field '%s'" " (expected 'all' or '%s') in %s lines %d to %d!\n", architecture, atoms_architectures[ target->architecture], f->filename, f->startlinenumber, f->linenumber); if (ignored[IGN_wrongarchitecture] == 0) { fprintf(stderr, "This either mean the repository you get packages from is of an extremly\n" "low quality, or something went wrong. Trying to ignore it now, though.\n" "To no longer get this message use '--ignore=wrongarchitecture'.\n"); } ignored[IGN_wrongarchitecture]++; free(architecture); continue; } else { /* just ignore this because of wrong * architecture */ free(architecture); continue; } free(architecture); } if (RET_WAS_ERROR(r)) break; *control_p = control; *name_p = packagename; *version_p = version; *architecture_p = atom; return true; } while (true); free(packagename); free(version); RET_UPDATE(f->status, r); return false; } reprepro-4.13.1/changes.h0000644000175100017510000000153112152651661012147 00000000000000#ifndef REPREPRO_CHANGES_H #define REPREPRO_CHANGES_H #ifndef REPREPRO_ATOMS_H #include "atoms.h" #endif typedef enum { fe_UNKNOWN=0, fe_DEB, fe_UDEB, fe_DSC, fe_DIFF, fe_ORIG, fe_TAR, fe_ALTSRC, fe_BYHAND, fe_LOG, fe_CHANGES } filetype; #define FE_PACKAGE(ft) ((ft) == fe_DEB || (ft) == fe_UDEB || (ft) == fe_DSC) #define FE_BINARY(ft) ((ft) == fe_DEB || (ft) == fe_UDEB) #define FE_SOURCE(ft) ((ft) == fe_DIFF || (ft) == fe_ORIG || (ft) == fe_TAR || (ft) == fe_DSC || (ft) == fe_UNKNOWN || (ft) == fe_ALTSRC) struct hash_data; retvalue changes_parsefileline(const char * /*fileline*/, /*@out@*/filetype *, /*@out@*/char ** /*result_basename*/, /*@out@*/struct hash_data *, /*@out@*/struct hash_data *, /*@out@*/char ** /*result_section*/, /*@out@*/char ** /*result_priority*/, /*@out@*/architecture_t *, /*@out@*/char ** /*result_name*/); #endif reprepro-4.13.1/uncompression.h0000644000175100017510000000444412152651661013451 00000000000000#ifndef REPREPRO_UNCOMPRESS_H #define REPREPRO_UNCOMPRESS_H /* "", ".gz", ... */ extern const char * const uncompression_suffix[c_COUNT]; extern /*@null@*/ char *extern_uncompressors[c_COUNT]; /* so help messages know which option to cite: */ extern const char * const uncompression_option[c_COUNT]; extern const char * const uncompression_config[c_COUNT]; /* there are two different modes: uncompress a file to memory, * or uncompress (possibly multiple files) on the filesystem, * controled by aptmethods */ #ifdef HAVE_LIBBZ2 #define uncompression_builtin(c) ((c) == c_bzip2 || (c) == c_gzip) #else #define uncompression_builtin(c) ((c) == c_gzip) #endif #define uncompression_supported(c) ((c) == c_none || \ uncompression_builtin(c) || \ extern_uncompressors[c] != NULL) enum compression compression_by_suffix(const char *, size_t *); /**** functions for aptmethod.c ****/ /* we got an pid, check if it is a uncompressor we care for */ retvalue uncompress_checkpid(pid_t, int); /* still waiting for a client to exit */ bool uncompress_running(void); typedef retvalue finishaction(void *, const char *, bool /*failed*/); /* uncompress and call action when finished */ retvalue uncompress_queue_file(const char *, const char *, enum compression, finishaction *, void *); /**** functions for update.c (uncompressing an earlier downloaded file) ****/ retvalue uncompress_file(const char *, const char *, enum compression); /**** functions for indexfile.c (uncompressing to memory) and ar.c ****/ // and perhaps also sourceextraction.c struct compressedfile; retvalue uncompress_open(/*@out@*/struct compressedfile **, const char *, enum compression); int uncompress_read(struct compressedfile *, void *buffer, int); retvalue uncompress_error(/*@const@*/struct compressedfile *); void uncompress_abort(/*@only@*/struct compressedfile *); retvalue uncompress_close(/*@only@*/struct compressedfile *); retvalue uncompress_fdclose(/*@only@*/struct compressedfile *, int *, const char **); retvalue uncompress_fdopen(/*@out@*/struct compressedfile **, int, off_t, enum compression, int *, const char **); /**** general initialisation ****/ /* check for existance of external programs */ void uncompressions_check(const char *gunzip, const char *bunzip2, const char *unlzma, const char *unxz, const char *lunzip); #endif reprepro-4.13.1/donefile.h0000644000175100017510000000147012152651661012326 00000000000000#ifndef REPREPRO_DONEFILE_H #define REPREPRO_DONEFILE_H #ifndef REPREPRO_ERROR_H #include "error.h" #endif struct checksums; struct markdonefile; retvalue markdone_create(const char *, /*@out@*/struct markdonefile **); void markdone_finish(/*@only@*/struct markdonefile *); void markdone_target(struct markdonefile *, const char *); void markdone_index(struct markdonefile *, const char *, const struct checksums *); void markdone_cleaner(struct markdonefile *); struct donefile; retvalue donefile_open(const char *, /*@out@*/struct donefile **); void donefile_close(/*@only@*/struct donefile *); retvalue donefile_nexttarget(struct donefile *, /*@out@*/const char **); bool donefile_nextindex(struct donefile *, /*@out@*/const char **, /*@out@*/struct checksums **); bool donefile_iscleaner(struct donefile *); #endif reprepro-4.13.1/target.h0000644000175100017510000001664412152651661012040 00000000000000#ifndef REPREPRO_TARGET_H #define REPREPRO_TARGET_H #ifndef REPREPRO_STRLIST_H #include "strlist.h" #endif #ifndef REPREPRO_NAMES_H #include "names.h" #endif #ifndef REPREPRO_ATOMS_H #include "atoms.h" #endif #ifndef REPREPRO_DATABASE_H #include "database.h" #endif #ifndef REPREPRO_TRACKINGT_H #include "trackingt.h" #endif #ifndef REPREPRO_CHECKSUMS_H #include "checksums.h" #endif #ifndef REPREPRO_EXPORTS_H #include "exports.h" #endif struct target; struct alloverrides; typedef retvalue get_version(const char *, /*@out@*/char **); typedef retvalue get_architecture(const char *, /*@out@*/architecture_t *); typedef retvalue get_installdata(const struct target *, const char *, const char *, architecture_t, const char *, /*@out@*/char **, /*@out@*/struct strlist *, /*@out@*/struct checksumsarray *); /* md5sums may be NULL */ typedef retvalue get_filekeys(const char *, /*@out@*/struct strlist *); typedef retvalue get_checksums(const char *, /*@out@*/struct checksumsarray *); typedef retvalue do_reoverride(const struct target *, const char * /*packagename*/, const char *, /*@out@*/char **); typedef retvalue do_retrack(const char * /*packagename*/, const char * /*controlchunk*/, trackingdb); typedef retvalue get_sourceandversion(const char *, const char * /*packagename*/, /*@out@*/char ** /*source_p*/, /*@out@*/char ** /*version_p*/); typedef retvalue complete_checksums(const char *, const struct strlist *, struct checksums **, /*@out@*/char **); struct distribution; struct target { struct distribution *distribution; component_t component; architecture_t architecture; packagetype_t packagetype; char *identifier; /* links into the correct description in distribution */ /*@dependent@*/const struct exportmode *exportmode; /* the directory relative to // to use */ char *relativedirectory; /* functions to use on the packages included */ get_version *getversion; /* binary packages might be "all" or the architecture of the target */ get_architecture *getarchitecture; get_installdata *getinstalldata; get_filekeys *getfilekeys; get_checksums *getchecksums; get_sourceandversion *getsourceandversion; do_reoverride *doreoverride; do_retrack *doretrack; complete_checksums *completechecksums; bool wasmodified, saved_wasmodified; /* set when existed at startup time, only valid in --nofast mode */ bool existed; /* the next one in the list of targets of a distribution */ struct target *next; /* is initialized as soon as needed: */ struct table *packages; /* do not allow write operations */ bool readonly; /* was updated without tracking data (no problem when distribution * has no tracking, otherwise cause warning later) */ bool staletracking; }; retvalue target_initialize_ubinary(/*@dependant@*/struct distribution *, component_t, architecture_t, /*@dependent@*/const struct exportmode *, bool /*readonly*/, /*@NULL@*/const char *fakecomponentprefix, /*@out@*/struct target **); retvalue target_initialize_binary(/*@dependant@*/struct distribution *, component_t, architecture_t, /*@dependent@*/const struct exportmode *, bool /*readonly*/, /*@NULL@*/const char *fakecomponentprefix, /*@out@*/struct target **); retvalue target_initialize_source(/*@dependant@*/struct distribution *, component_t, /*@dependent@*/const struct exportmode *, bool /*readonly*/, /*@NULL@*/const char *fakecomponentprefix, /*@out@*/struct target **); retvalue target_free(struct target *); retvalue target_export(struct target *, bool /*onlyneeded*/, bool /*snapshot*/, struct release *); /* This opens up the database, if db != NULL, *db will be set to it.. */ retvalue target_initpackagesdb(struct target *, bool /*readonly*/); /* this closes databases... */ retvalue target_closepackagesdb(struct target *); struct target_cursor { /*@temp@*/struct target *target; struct cursor *cursor; const char *lastname; const char *lastcontrol; }; #define TARGET_CURSOR_ZERO {NULL, NULL, NULL, NULL} /* wrapper around initpackagesdb and table_newglobalcursor */ static inline retvalue target_openiterator(struct target *t, bool readonly, /*@out@*/struct target_cursor *tc) { retvalue r, r2; struct cursor *c; r = target_initpackagesdb(t, readonly); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; r = table_newglobalcursor(t->packages, &c); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) { r2 = target_closepackagesdb(t); RET_UPDATE(r, r2); return r; } tc->target = t; tc->cursor = c; return RET_OK; } /* wrapper around cursor_nexttemp */ static inline bool target_nextpackage(struct target_cursor *tc, /*@out@*/const char **packagename_p, /*@out@*/const char **chunk_p) { bool success; success = cursor_nexttemp(tc->target->packages, tc->cursor, &tc->lastname, &tc->lastcontrol); if (success) { *packagename_p = tc->lastname; *chunk_p = tc->lastcontrol; } else { tc->lastname = NULL; tc->lastcontrol = NULL; } return success; } /* wrapper around cursor_nexttemp */ static inline bool target_nextpackage_len(struct target_cursor *tc, /*@out@*//*@null@*/const char **packagename_p, /*@out@*/const char **chunk_p, /*@out@*/size_t *len_p) { tc->lastname = NULL; tc->lastcontrol = NULL; return cursor_nexttempdata(tc->target->packages, tc->cursor, packagename_p, chunk_p, len_p); } /* wrapper around cursor_close and target_closepackagesdb */ static inline retvalue target_closeiterator(struct target_cursor *tc) { retvalue result, r; result = cursor_close(tc->target->packages, tc->cursor); r = target_closepackagesdb(tc->target); RET_UPDATE(result, r); return result; } /* The following calls can only be called if target_initpackagesdb was called before: */ struct logger; retvalue target_addpackage(struct target *, /*@null@*/struct logger *, const char *name, const char *version, const char *control, const struct strlist *filekeys, bool downgrade, /*@null@*/struct trackingdata *, enum filetype, /*@null@*/const char *causingrule, /*@null@*/const char *suitefrom); retvalue target_checkaddpackage(struct target *, const char *name, const char *version, bool tracking, bool permitnewerold); retvalue target_removepackage(struct target *, /*@null@*/struct logger *, const char *name, struct trackingdata *); /* like target_removepackage, but do not read control data yourself but use available */ retvalue target_removereadpackage(struct target *, /*@null@*/struct logger *, const char *name, const char *oldcontrol, /*@null@*/struct trackingdata *); /* Like target_removepackage, but delete the package record by cursor */ retvalue target_removepackage_by_cursor(struct target_cursor *, /*@null@*/struct logger *, /*@null@*/struct trackingdata *); retvalue package_check(struct distribution *, struct target *, const char *, const char *, void *); retvalue target_rereference(struct target *); retvalue package_referenceforsnapshot(struct distribution *, struct target *, const char *, const char *, void *); retvalue target_reoverride(struct target *, struct distribution *); retvalue target_redochecksums(struct target *, struct distribution *); retvalue package_rerunnotifiers(struct distribution *, struct target *, const char *, const char *, void *); static inline bool target_matches(const struct target *t, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *packagetypes) { if (limitations_missed(components, t->component)) return false; if (limitations_missed(architectures, t->architecture)) return false; if (limitations_missed(packagetypes, t->packagetype)) return false; return true; } #endif reprepro-4.13.1/copypackages.h0000644000175100017510000000337512152651661013220 00000000000000#ifndef REPREPRO_COPYPACKAGES_H #define REPREPRO_COPYPACKAGES_H #ifndef REPREPRO_STRLIST_H #include "strlist.h" #endif retvalue copy_by_name(struct distribution * /*into*/, struct distribution * /*from*/, int, const char **, const struct atomlist *, const struct atomlist *, const struct atomlist *); retvalue copy_by_source(struct distribution * /*into*/, struct distribution * /*from*/, int, const char **, const struct atomlist *, const struct atomlist *, const struct atomlist *); retvalue copy_by_formula(struct distribution * /*into*/, struct distribution * /*from*/, const char *formula, const struct atomlist *, const struct atomlist *, const struct atomlist *); retvalue copy_by_glob(struct distribution * /*into*/, struct distribution * /*from*/, const char * /*glob*/, const struct atomlist *, const struct atomlist *, const struct atomlist *); retvalue copy_from_file(struct distribution * /*into*/, component_t, architecture_t, packagetype_t, const char * /*filename*/ , int, const char **); /* note that snapshotname must live till logger_wait has run */ retvalue restore_by_name(struct distribution *, const struct atomlist *, const struct atomlist *, const struct atomlist *, const char * /*snapshotname*/, int, const char **); retvalue restore_by_source(struct distribution *, const struct atomlist *, const struct atomlist *, const struct atomlist *, const char * /*snapshotname*/, int, const char **); retvalue restore_by_formula(struct distribution *, const struct atomlist *, const struct atomlist *, const struct atomlist *, const char * /*snapshotname*/, const char *filter); retvalue restore_by_glob(struct distribution *, const struct atomlist *, const struct atomlist *, const struct atomlist *, const char * /*snapshotname*/, const char * /*glob*/); #endif reprepro-4.13.1/termdecide.c0000644000175100017510000002265612152651661012652 00000000000000/* This file is part of "reprepro" * Copyright (C) 2004,2005,2007,2009 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include "error.h" #include "mprintf.h" #include "strlist.h" #include "names.h" #include "chunks.h" #include "globmatch.h" #include "dpkgversions.h" #include "terms.h" #include "termdecide.h" static inline bool check_field(enum term_comparison c, const char *value, const char *with) { if (c == tc_none) { return true; } else if (c == tc_globmatch) { return globmatch(value, with); } else if (c == tc_notglobmatch) { return !globmatch(value, with); } else { int i; i = strcmp(value, with); if (i < 0) return c == tc_strictless || c == tc_lessorequal || c == tc_notequal; else if (i > 0) return c == tc_strictmore || c == tc_moreorequal || c == tc_notequal; else return c == tc_lessorequal || c == tc_moreorequal || c == tc_equal; } } retvalue term_decidechunk(const term *condition, const char *controlchunk, const void *privdata) { const struct term_atom *atom = condition; while (atom != NULL) { bool correct; char *value; enum term_comparison c = atom->comparison; retvalue r; if (atom->isspecial) { correct = atom->special.type->compare(c, &atom->special.comparewith, controlchunk, privdata); } else { r = chunk_getvalue(controlchunk, atom->generic.key, &value); if (RET_WAS_ERROR(r)) return r; if (r == RET_NOTHING) { correct = (c == tc_notequal || c == tc_notglobmatch); } else { correct = check_field(c, value, atom->generic.comparewith); free(value); } } if (atom->negated) correct = !correct; if (correct) { atom = atom->nextiftrue; } else { atom = atom->nextiffalse; if (atom == NULL) { /* do not include */ return RET_NOTHING; } } } /* do include */ return RET_OK; } static retvalue parsestring(enum term_comparison c, const char *value, size_t len, struct compare_with *v) { if (c == tc_none) { fprintf(stderr, "Error: Special formula predicates (those starting with '$') are always\n" "defined, thus specifying them without parameter to compare against\n" "makes not sense!\n"); return RET_ERROR; } v->pointer = strndup(value, len); if (FAILEDTOALLOC(v->pointer)) return RET_ERROR_OOM; return RET_OK; } // TODO: check for well-formed versions #define parseversion parsestring static bool comparesource(enum term_comparison c, const struct compare_with *v, const void *d1, const void *d2) { const char *control = d1; const struct target *target = d2; char *package, *source, *version; retvalue r; bool matches; // TODO: make more efficient r = chunk_getvalue(control, "Package", &package); if (!RET_IS_OK(r)) return false; r = target->getsourceandversion(control, package, &source, &version); free(package); if (!RET_IS_OK(r)) return false; free(version); matches = check_field(c, source, v->pointer); free(source); return matches; } static inline bool compare_dpkgversions(enum term_comparison c, const char *version, const char *param) { if (c != tc_globmatch && c != tc_notglobmatch) { int cmp; retvalue r; r = dpkgversions_cmp(version, param, &cmp); if (RET_IS_OK(r)) { if (cmp < 0) return c == tc_strictless || c == tc_lessorequal || c == tc_notequal; else if (cmp > 0) return c == tc_strictmore || c == tc_moreorequal || c == tc_notequal; else return c == tc_lessorequal || c == tc_moreorequal || c == tc_equal; } else return false; } else return check_field(c, version, param); } static bool compareversion(enum term_comparison c, const struct compare_with *v, const void *d1, const void *d2) { const char *control = d1; const struct target *target = d2; char *version; retvalue r; bool matches; r = target->getversion(control, &version); if (!RET_IS_OK(r)) return false; matches = compare_dpkgversions(c, version, v->pointer); free(version); return matches; } static bool comparesourceversion(enum term_comparison c, const struct compare_with *v, const void *d1, const void *d2) { const char *control = d1; const struct target *target = d2; char *package, *source, *version; retvalue r; bool matches; // TODO: make more efficient r = chunk_getvalue(control, "Package", &package); if (!RET_IS_OK(r)) return false; r = target->getsourceandversion(control, package, &source, &version); free(package); if (!RET_IS_OK(r)) return false; free(source); matches = compare_dpkgversions(c, version, v->pointer); free(version); return matches; } static void freestring(UNUSED(enum term_comparison c), struct compare_with *d) { free(d->pointer); } static void freeatom(enum term_comparison c, struct compare_with *d) { if (c != tc_equal && c != tc_notequal) free(d->pointer); } static retvalue parsetype(enum term_comparison c, const char *value, size_t len, struct compare_with *v) { if (c == tc_none) { fprintf(stderr, "Error: $Type is always defined, it does not make sense without parameter\n" "to compare against!\n"); return RET_ERROR; } if (c != tc_equal && c != tc_notequal) { v->pointer = strndup(value, len); if (FAILEDTOALLOC(v->pointer)) return RET_ERROR_OOM; return RET_OK; } v->number = packagetype_find_l(value, len); if (atom_defined(v->number)) return RET_OK; fprintf(stderr, "Unknown package type '%.*s' in formula!\n", (int)len, value); return RET_ERROR; } static retvalue parsearchitecture(enum term_comparison c, const char *value, size_t len, struct compare_with *v) { if (c == tc_none) { fprintf(stderr, "Error: $Architecture is always defined, it does not make sense without parameter\n" "to compare against!\n"); return RET_ERROR; } if (c != tc_equal && c != tc_notequal) { v->pointer = strndup(value, len); if (FAILEDTOALLOC(v->pointer)) return RET_ERROR_OOM; return RET_OK; } v->number = architecture_find_l(value, len); if (atom_defined(v->number)) return RET_OK; fprintf(stderr, "Unknown architecture '%.*s' in formula (must be listed in conf/distributions to be known)!\n", (int)len, value); return RET_ERROR; } static retvalue parsecomponent(enum term_comparison c, const char *value, size_t len, struct compare_with *v) { if (c == tc_none) { fprintf(stderr, "Error: $Component is always defined, it does not make sense without parameter\n" "to compare against!\n"); return RET_ERROR; } if (c != tc_equal && c != tc_notequal) { v->pointer = strndup(value, len); if (FAILEDTOALLOC(v->pointer)) return RET_ERROR_OOM; return RET_OK; } v->number = component_find_l(value, len); if (atom_defined(v->number)) return RET_OK; fprintf(stderr, "Unknown component '%.*s' in formula (must be listed in conf/distributions to be known)!\n", (int)len, value); return RET_ERROR; } static bool comparetype(enum term_comparison c, const struct compare_with *v, UNUSED(const void *d1), const void *d2) { const struct target *target = d2; if (c == tc_equal) return v->number == target->packagetype; else if (c == tc_notequal) return v->number != target->packagetype; else return check_field(c, atoms_packagetypes[target->packagetype], v->pointer); } static bool comparearchitecture(enum term_comparison c, const struct compare_with *v, UNUSED(const void *d1), const void *d2) { const struct target *target = d2; if (c == tc_equal) return v->number == target->architecture; else if (c == tc_notequal) return v->number != target->architecture; else return check_field(c, atoms_architectures[target->architecture], v->pointer); } static bool comparecomponent(enum term_comparison c, const struct compare_with *v, UNUSED(const void *d1), const void *d2) { const struct target *target = d2; if (c == tc_equal) return v->number == target->component; else if (c == tc_notequal) return v->number != target->component; else return check_field(c, atoms_components[target->component], v->pointer); } static struct term_special targetdecisionspecial[] = { {"$Source", parsestring, comparesource, freestring}, {"$SourceVersion", parseversion, comparesourceversion, freestring}, {"$Version", parseversion, compareversion, freestring}, {"$Architecture", parsearchitecture, comparearchitecture, freeatom}, {"$Component", parsecomponent, comparecomponent, freeatom}, {"$Type", parsetype, comparetype, freeatom}, {"$PackageType", parsetype, comparetype, freeatom}, {NULL, NULL, NULL, NULL} }; retvalue term_compilefortargetdecision(term **term_p, const char *formula) { return term_compile(term_p, formula, T_GLOBMATCH|T_OR|T_BRACKETS|T_NEGATION|T_VERSION|T_NOTEQUAL, targetdecisionspecial); } retvalue term_decidechunktarget(const term *condition, const char *controlchunk, const struct target *target) { return term_decidechunk(condition, controlchunk, target); } reprepro-4.13.1/names.c0000644000175100017510000000764612152651661011652 00000000000000/* This file is part of "reprepro" * Copyright (C) 2003,2004,2005,2006,2007,2008 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include "error.h" #include "mprintf.h" #include "strlist.h" #include "names.h" char *calc_addsuffix(const char *str1, const char *str2) { return mprintf("%s.%s", str1, str2); } char *calc_dirconcat(const char *str1, const char *str2) { return mprintf("%s/%s", str1, str2); } char *calc_dirconcat3(const char *str1, const char *str2, const char *str3) { return mprintf("%s/%s/%s", str1, str2, str3); } /* Create a strlist consisting out of calc_dirconcat'ed entries of the old */ retvalue calc_dirconcats(const char *directory, const struct strlist *basefilenames, struct strlist *files) { retvalue r; int i; assert (directory != NULL && basefilenames != NULL && files != NULL); r = strlist_init_n(basefilenames->count, files); if (RET_WAS_ERROR(r)) return r; r = RET_NOTHING; for (i = 0 ; i < basefilenames->count ; i++) { char *file; file = calc_dirconcat(directory, basefilenames->values[i]); if (FAILEDTOALLOC(file)) { strlist_done(files); return RET_ERROR_OOM; } r = strlist_add(files, file); if (RET_WAS_ERROR(r)) { strlist_done(files); return r; } } return r; } retvalue calc_inplacedirconcats(const char *directory, struct strlist *files) { int i; assert (directory != NULL && files != NULL ); for (i = 0 ; i < files->count ; i++) { char *file; file = calc_dirconcat(directory, files->values[i]); if (FAILEDTOALLOC(file)) return RET_ERROR_OOM; free(files->values[i]); files->values[i] = file; } return RET_OK; } void names_overversion(const char **version, bool epochsuppressed) { const char *n = *version; bool hadepoch = epochsuppressed; if (*n < '0' || *n > '9') { if ((*n < 'a' || *n > 'z') && (*n < 'A' || *n > 'Z')) return; } else n++; while (*n >= '0' && *n <= '9') n++; if (*n == ':') { hadepoch = true; n++; } while ((*n >= '0' && *n <= '9') || (*n >= 'a' && *n <= 'z') || (*n >= 'A' && *n <= 'Z') || *n == '.' || *n == '~' || *n == '-' || *n == '+' || (hadepoch && *n == ':')) n++; *version = n; } char *calc_trackreferee(const char *codename, const char *sourcename, const char *sourceversion) { return mprintf("%s %s %s", codename, sourcename, sourceversion); } char *calc_changes_basename(const char *name, const char *version, const struct strlist *architectures) { size_t name_l, version_l, l; int i; char *n, *p; name_l = strlen(name); version_l = strlen(version); l = name_l + version_l + sizeof("__.changes"); for (i = 0 ; i < architectures->count ; i++) { l += strlen(architectures->values[i]); if (i != 0) l++; } n = malloc(l); if (FAILEDTOALLOC(n)) return n; p = n; memcpy(p, name, name_l); p+=name_l; *(p++) = '_'; memcpy(p, version, version_l); p+=version_l; *(p++) = '_'; for (i = 0 ; i < architectures->count ; i++) { size_t a_l = strlen(architectures->values[i]); if (i != 0) *(p++) = '+'; assert ((size_t)((p+a_l)-n) < l); memcpy(p, architectures->values[i], a_l); p += a_l; } assert ((size_t)(p-n) < l-8); memcpy(p, ".changes", 9); p += 9; assert (*(p-1) == '\0'); assert ((size_t)(p-n) == l); return n; } reprepro-4.13.1/COPYING0000644000175100017510000004313312152651661011425 00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc. 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License. reprepro-4.13.1/signature.h0000644000175100017510000000440112152651661012537 00000000000000#ifndef REPREPRO_SIGNATURE_H #define REPREPRO_SIGNATURE_H #ifndef REPREPRO_ERROR_H #include "error.h" #warning "What's hapening here?" #endif /* does not need to be called if allowpassphrase if false, * argument will only take effect if called the first time */ retvalue signature_init(bool allowpassphrase); struct signature_requirement; void signature_requirements_free(/*@only@*/struct signature_requirement *); retvalue signature_requirement_add(struct signature_requirement **, const char *); void free_known_keys(void); retvalue signature_check(const struct signature_requirement *, const char *, const char *, const char *, size_t); retvalue signature_check_inline(const struct signature_requirement *, const char *, /*@out@*/char **); struct signatures { int count, validcount; struct signature { char *keyid; char *primary_keyid; /* valid is only true if none of the others is true, all may be false due to non-signing keys used for signing or things like that */ enum signature_state { /* internal error: */ sist_error=0, /* key missing, can not be checked: */ sist_missing, /* broken signature, content may be corrupt: */ sist_bad, /* good signature, but may not sign or al: */ sist_invalid, /* good signature, but check expire bits: */ sist_mostly, /* good signature, no objections: */ sist_valid } state; /* subkey or primary key are expired */ bool expired_key; /* signature is expired */ bool expired_signature; /* key or primary key revoced */ bool revoced_key; } signatures[]; }; void signatures_free(/*@null@*//*@only@*/struct signatures *); /* Read a single chunk from a file, that may be signed. */ retvalue signature_readsignedchunk(const char *filename, const char *filenametoshow, char **chunkread, /*@null@*/ /*@out@*/struct signatures **signatures, bool *brokensignature); struct signedfile; struct strlist; retvalue signature_startsignedfile(/*@out@*/struct signedfile **); void signedfile_write(struct signedfile *, const void *, size_t); /* generate signature in temporary file */ retvalue signedfile_create(struct signedfile *, const char *, char **, char **, const struct strlist *, bool /*willcleanup*/); void signedfile_free(/*@only@*/struct signedfile *); void signatures_done(void); #endif reprepro-4.13.1/updates.c0000644000175100017510000022672312152655314012212 00000000000000/* This file is part of "reprepro" * Copyright (C) 2003,2004,2005,2006,2007,2008,2009 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ /* This module handles the updating of distribtions from remote repositories. * It's using apt's methods (the files in /usr/lib/apt/methods) for the * actuall getting of needed lists and package files. * * It's only task is to request the right actions in the right order, * almost everything is done in other modules: * * aptmethod.c start, feed and take care of the apt methods * downloadcache.c keep track of what is downloaded to avoid duplicates * signature.c verify Release.gpg files, if requested * remoterepository.c cache remote index files and decide which to download * upgradelist.c decide which packages (and version) should be installed * * An update run consists of the following steps, in between done some * downloading, checking and so on: * * Step 1: parsing the conf/updates file with the patterns * Step 2: create rules for some distribution based on those patterns * Step 3: calculate which remote indices are to be retrieved and processed * Step 4: * Step 5: preperations for actually doing anything * Step 6: queue downloading of list of lists (Release, Release.gpg, ...) * Step 7: queue downloading of lists (Packages.gz, Sources.gz, ...) * Step 8: call possible list hooks allowing them to modify the lists * Step 9: search for missing packages i.e. needing to be added or upgraded * Step 10: enqueue downloading of missing packages * Step 11: install the missing packages * Step 12: remember processed index files as processed * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "error.h" #include "ignore.h" #include "mprintf.h" #include "strlist.h" #include "atoms.h" #include "dirs.h" #include "names.h" #include "signature.h" #include "aptmethod.h" #include "downloadcache.h" #include "updates.h" #include "upgradelist.h" #include "distribution.h" #include "tracking.h" #include "termdecide.h" #include "filterlist.h" #include "log.h" #include "donefile.h" #include "freespace.h" #include "configparser.h" #include "filecntl.h" #include "remoterepository.h" #include "uncompression.h" /* The data structures of this one: ("u_" is short for "update_") updates_getpatterns read a list of patterns from /updates: u_pattern --> u_pattern --> u_pattern --> NULL / \ / \ / \ / \ | \ | | \ ----\ | | ------------ | | | \ . | . | | updates_getupstreams instances them for a given distribution: | | u_distribution --> u_origin -> u_origin --> NULL | | / \ / \ / \ / \ | \ / | | | | | u_target -> u_index -> u_index -> NULL | | | | | \ / | | | u_target -> u_index -> u_index -> NULL | | | \ / | NULL . . \ / | | u_distribution ---> u_origin -> u_origin -> NULL | | / \ / \ | \ / | | | u_target --> u_index ---> u_index -> NULL | | | \ / | NULL omitted in this image: | not every target must have an index in each \ / origin. (Some origin might only support a NULL limited number of architectures or components) also omitted are delete rules, i.e. markers that all versions previously found are not to be kept or even installed, unless a later index again adds them. */ /* the data for some upstream part to get updates from, some * some fields can be NULL or empty */ struct update_pattern { struct update_pattern *next; //e.g. "Name: woody" char *name; /* another pattern to take value from */ char *from; /*@dependent@*/struct update_pattern *pattern_from; //e.g. "Method: ftp://ftp.uni-freiburg.de/pub/linux/debian" /*@null@*/ char *method; //e.g. "Fallback: ftp://ftp.debian.org/pub/linux/debian" /*@null@*/ char *fallback; // can be other server or dir, but must be same method //e.g. "Config: Dir=/" struct strlist config; //e.g. "Suite: woody" or "Suite: /updates" (NULL means "*") /*@null@*/char *suite_from; //e.g. "VerifyRelease: B629A24C38C6029A" (NULL means not check) /*@null@*/char *verifyrelease; //e.g. "Architectures: i386 sparc mips" (not set means all) struct strlist architectures_from; struct strlist architectures_into; //e.g. "Components: main>main non-free>non-free contrib>contrib" // (empty means all) struct strlist components_from; struct strlist components_into; //e.g. "UDebComponents: main>main" // (empty means all) struct strlist udebcomponents_from; struct strlist udebcomponents_into; // NULL means no condition /*@null@*/term *includecondition; struct filterlist filterlist; struct filterlist filtersrclist; // NULL means nothing to execute after lists are downloaded... /*@null@*/char *listhook; /*@null@*/char *shellhook; /* checksums to not read check in Release file: */ bool ignorehashes[cs_hashCOUNT]; /* the name of the flat component, causing flat mode if non-NULL*/ component_t flat; //e.g. "IgnoreRelease: Yes" for 1 (default is 0) bool ignorerelease; //e.g. "GetInRelease: No" for 0 (default is 1) bool getinrelease; /* the form in which index files are preferably downloaded */ struct encoding_preferences downloadlistsas; /* if the specific field is there (to destinguish from an empty one) */ bool ignorehashes_set; bool ignorerelease_set; bool getinrelease_set; bool architectures_set; bool components_set; bool udebcomponents_set; bool includecondition_set; bool config_set; bool downloadlistsas_set; /* to check circular references */ bool visited; bool used; struct remote_repository *repository; }; struct update_origin { struct update_origin *next; /* all following are NULL when this is a delete rule */ /*@null@*/const struct update_pattern *pattern; /*@null@*/char *suite_from; /*@null@*/const struct distribution *distribution; /*@null@*/struct remote_distribution *from; /* cache for flat mode */ bool flat; /* set when there was a error and it should no longer be used */ bool failed; }; struct update_index_connector { struct update_index_connector *next; /* NULL when this is a delete rule */ /*@null@*/ struct remote_index *remote; /*@null@*/ struct update_origin *origin; /*@null@*/char *afterhookfilename; /* ignore wrong architecture packages (arch1>arch2 or flat) */ bool ignorewrongarchitecture; /* if newly downloaded or not in done file */ bool new; /* content needed (i.e. listhooks have to be run) */ bool needed; /* there was something missed here */ bool failed; /* do not generate 'done' file */ bool incomplete; }; struct update_target { /*@null@*/struct update_target *next; /*@null@*/struct update_index_connector *indices; /*@dependent@*/struct target *target; /*@null@*/struct upgradelist *upgradelist; /* Ignore delete marks (as some lists were missing) */ bool ignoredelete; /* don't do anything because of --skipold */ bool nothingnew; /* if true do not generate donefiles */ bool incomplete; }; struct update_distribution { struct update_distribution *next; struct distribution *distribution; struct update_pattern **patterns; struct update_origin *origins; struct update_target *targets; }; static void update_pattern_free(/*@only@*/struct update_pattern *update) { if (update == NULL) return; free(update->name); free(update->from); free(update->method); free(update->fallback); free(update->suite_from); free(update->verifyrelease); strlist_done(&update->config); strlist_done(&update->architectures_from); strlist_done(&update->architectures_into); strlist_done(&update->components_from); strlist_done(&update->components_into); strlist_done(&update->udebcomponents_from); strlist_done(&update->udebcomponents_into); term_free(update->includecondition); filterlist_release(&update->filterlist); filterlist_release(&update->filtersrclist); free(update->listhook); free(update->shellhook); remote_repository_free(update->repository); free(update); } void updates_freepatterns(struct update_pattern *p) { while (p != NULL) { struct update_pattern *pattern; pattern = p; p = pattern->next; update_pattern_free(pattern); } } static void updates_freeorigins(/*@only@*/struct update_origin *o) { while (o != NULL) { struct update_origin *origin; origin = o; o = origin->next; free(origin->suite_from); free(origin); } } static void updates_freetargets(/*@only@*/struct update_target *t) { while (t != NULL) { struct update_target *ut; ut = t; t = ut->next; while (ut->indices != NULL) { struct update_index_connector *ui; ui = ut->indices; ut->indices = ui->next; free(ui->afterhookfilename); free(ui); } free(ut); } } void updates_freeupdatedistributions(struct update_distribution *d) { while (d != NULL) { struct update_distribution *next; next = d->next; free(d->patterns); updates_freetargets(d->targets); updates_freeorigins(d->origins); free(d); d = next; } } static inline retvalue newupdatetarget(struct update_target **ts, /*@dependent@*/struct target *target) { struct update_target *ut; ut = malloc(sizeof(struct update_target)); if (FAILEDTOALLOC(ut)) return RET_ERROR_OOM; ut->target = target; ut->next = *ts; ut->indices = NULL; ut->upgradelist = NULL; ut->ignoredelete = false; ut->nothingnew = false; ut->incomplete = false; *ts = ut; return RET_OK; } /**************************************************************************** * Step 1: parsing the conf/updates file with the patterns * ****************************************************************************/ CFlinkedlistinit(update_pattern) CFvalueSETPROC(update_pattern, name) CFvalueSETPROC(update_pattern, suite_from) CFatomSETPROC(update_pattern, flat, at_component) CFvalueSETPROC(update_pattern, from) CFurlSETPROC(update_pattern, method) CFurlSETPROC(update_pattern, fallback) /* what here? */ CFallSETPROC(update_pattern, verifyrelease) CFlinelistSETPROC(update_pattern, config) CFtruthSETPROC(update_pattern, ignorerelease) CFtruthSETPROC(update_pattern, getinrelease) CFscriptSETPROC(update_pattern, listhook) CFallSETPROC(update_pattern, shellhook) CFfilterlistSETPROC(update_pattern, filterlist) CFfilterlistSETPROC(update_pattern, filtersrclist) CFtermSSETPROC(update_pattern, includecondition) CFUSETPROC(update_pattern, downloadlistsas) { CFSETPROCVAR(update_pattern, this); char *word; const char *u; retvalue r; unsigned int e = 0; enum compression c; this->downloadlistsas_set = true; r = config_getword(iter, &word); while (RET_IS_OK(r)) { bool force; if (e >= ARRAYCOUNT(this->downloadlistsas.requested)) { fprintf(stderr, "%s:%d:%d: Ignoring all but first %d entries...\n", config_filename(iter), config_markerline(iter), config_markercolumn(iter), (int)(ARRAYCOUNT( this->downloadlistsas.requested))); free(word); break; } if (strncmp(word, "force.", 6) == 0) { u = word + 5; force = true; } else { u = word; force = false; } for (c = 0 ; c < c_COUNT ; c++) { if (strcmp(uncompression_config[c], u) == 0 || strcmp(uncompression_config[c]+1, u) == 0) { break; } } if (c < c_COUNT) { this->downloadlistsas.requested[e].compression = c; this->downloadlistsas.requested[e].diff = false; this->downloadlistsas.requested[e].force = force; e++; free(word); r = config_getword(iter, &word); continue; } if (strcmp(u, ".diff") == 0 || strcmp(u, "diff") == 0) { this->downloadlistsas.requested[e].compression = c_gzip; this->downloadlistsas.requested[e].diff = true; this->downloadlistsas.requested[e].force = force; e++; free(word); r = config_getword(iter, &word); continue; } fprintf(stderr, "%s:%d:%d: Error: unknown list download mode '%s'!\n", config_filename(iter), config_markerline(iter), config_markercolumn(iter), u); free(word); return RET_ERROR; } if (RET_WAS_ERROR(r)) return r; this->downloadlistsas.count = e; return RET_OK; } CFUSETPROC(update_pattern, components) { CFSETPROCVAR(update_pattern, this); retvalue r; int i; this->components_set = true; r = config_getsplitwords(iter, "Components", &this->components_from, &this->components_into); if (RET_IS_OK(r)) { // TODO: instead of this save numbers directly... for (i = 0 ; i < this->components_into.count ; i++) { component_t c; c = component_find(this->components_into.values[i]); if (c == atom_unknown) { fprintf(stderr, "Warning parsing %s, line %u: unknown component '%s' will be ignored!\n", config_filename(iter), config_markerline(iter), this->components_into.values[i]); } } } return r; } CFUSETPROC(update_pattern, udebcomponents) { CFSETPROCVAR(update_pattern, this); retvalue r; int i; this->udebcomponents_set = true; r = config_getsplitwords(iter, "UdebComponents", &this->udebcomponents_from, &this->udebcomponents_into); if (RET_IS_OK(r)) { // TODO: instead of this save numbers directly... for (i = 0 ; i < this->udebcomponents_into.count ; i++) { component_t c; c = component_find(this->udebcomponents_into.values[i]); if (c == atom_unknown) { fprintf(stderr, "Warning parsing %s, line %u: unknown udeb component '%s' will be ignored!\n", config_filename(iter), config_markerline(iter), this->udebcomponents_into.values[i]); } } } return r; } CFUSETPROC(update_pattern, architectures) { CFSETPROCVAR(update_pattern, this); retvalue r; int i; this->architectures_set = true; r = config_getsplitwords(iter, "Architectures", &this->architectures_from, &this->architectures_into); if (r == RET_NOTHING) { strlist_init(&this->architectures_from); strlist_init(&this->architectures_into); fprintf(stderr, "Warning parsing %s, line %u: an empty Architectures field\n" "causes the whole pattern to do nothing.\n", config_filename(iter), config_markerline(iter)); } if (RET_IS_OK(r)) { // TODO: instead of this save numbers directly... for (i = 0 ; i < this->architectures_into.count ; i++) { architecture_t a; a = architecture_find(this->architectures_into.values[i]); if (a == atom_unknown) { fprintf(stderr, "Warning parsing %s, line %u: unknown architecture '%s' will be ignored!\n", config_filename(iter), config_markerline(iter), this->architectures_into.values[i]); } } } return r; } CFhashesSETPROC(update_pattern, ignorehashes); static const struct configfield updateconfigfields[] = { CFr("Name", update_pattern, name), CF("From", update_pattern, from), CF("Method", update_pattern, method), CF("Fallback", update_pattern, fallback), CF("Config", update_pattern, config), CF("Suite", update_pattern, suite_from), CF("Architectures", update_pattern, architectures), CF("Components", update_pattern, components), CF("Flat", update_pattern, flat), CF("UDebComponents", update_pattern, udebcomponents), CF("GetInRelease", update_pattern, getinrelease), CF("IgnoreRelease", update_pattern, ignorerelease), CF("IgnoreHashes", update_pattern, ignorehashes), CF("VerifyRelease", update_pattern, verifyrelease), CF("ListHook", update_pattern, listhook), CF("ListShellHook", update_pattern, shellhook), CF("FilterFormula", update_pattern, includecondition), CF("FilterList", update_pattern, filterlist), CF("FilterSrcList", update_pattern, filtersrclist), CF("DownloadListsAs", update_pattern, downloadlistsas) }; CFfinishparse(update_pattern) { CFUfinishparseVARS(update_pattern, n, last_p, mydata); if (complete) { if (n->components_set && atom_defined(n->flat)) { fprintf(stderr, "%s:%u to %u: Update pattern may not contain Components and Flat fields ad the same time.\n", config_filename(iter), config_firstline(iter), config_line(iter)); return RET_ERROR; } if (n->udebcomponents_set && atom_defined(n->flat)) { fprintf(stderr, "%s:%u to %u: Update pattern may not contain UDebComponents and Flat fields ad the same time.\n", config_filename(iter), config_firstline(iter), config_line(iter)); return RET_ERROR; } if (n->from != NULL && n->method != NULL) { fprintf(stderr, "%s:%u to %u: Update pattern may not contain From: and Method: fields ad the same time.\n", config_filename(iter), config_firstline(iter), config_line(iter)); return RET_ERROR; } if (n->from == NULL && n->method == NULL) { fprintf(stderr, "%s:%u to %u: Update pattern must either contain a Methods: field or reference another one with a From: field.\n", config_filename(iter), config_firstline(iter), config_line(iter)); return RET_ERROR; } if (n->from != NULL && n->fallback != NULL) { fprintf(stderr, "%s:%u to %u: Update pattern may not contain From: and Fallback: fields ad the same time.\n", config_filename(iter), config_firstline(iter), config_line(iter)); return RET_ERROR; } if (n->from != NULL && n->config_set) { fprintf(stderr, "%s:%u to %u: Update pattern may not contain From: and Config: fields ad the same time.\n", config_filename(iter), config_firstline(iter), config_line(iter)); return RET_ERROR; } if (n->suite_from != NULL && strcmp(n->suite_from, "*") != 0 && strncmp(n->suite_from, "*/", 2) != 0 && strchr(n->suite_from, '*') != NULL) { fprintf(stderr, "%s:%u to %u: Unsupported suite pattern '%s'\n", config_filename(iter), config_firstline(iter), config_line(iter), n->suite_from); return RET_ERROR; } if (n->listhook != NULL && n->shellhook != NULL) { fprintf(stderr, "%s:%u to %u: Only one of ListHook and ListShellHook allowed per update rule\n", config_filename(iter), config_firstline(iter), config_line(iter)); return RET_ERROR; } } return linkedlistfinish(privdata_update_pattern, thisdata_update_pattern, lastdata_p_update_pattern, complete, iter); } retvalue updates_getpatterns(struct update_pattern **patterns) { struct update_pattern *update = NULL, *u, *v; bool progress; int i; retvalue r; r = configfile_parse("updates", IGNORABLE(unknownfield), configparser_update_pattern_init, finishparseupdate_pattern, "update rule", updateconfigfields, ARRAYCOUNT(updateconfigfields), &update); if (RET_IS_OK(r)) { for (u = update ; u != NULL ; u = u->next) { v = update; while (v != NULL && (v == u || strcmp(v->name, u->name) != 0)) v = v->next; if (v != NULL) { // TODO: store line information... fprintf(stderr, "%s/updates: Multiple update patterns named '%s'!\n", global.confdir, u->name); updates_freepatterns(update); return RET_ERROR; } if (u->from == NULL) continue; v = update; while (v != NULL && strcmp(v->name, u->from) != 0) v = v->next; if (v == NULL) { fprintf(stderr, "%s/updates: Update pattern '%s' references unknown pattern '%s' via From!\n", global.confdir, u->name, u->from); updates_freepatterns(update); return RET_ERROR; } u->pattern_from = v; } /* check for circular references */ do { progress = false; for (u = update ; u != NULL ; u = u->next) { if (u->visited) continue; if (u->pattern_from == NULL || u->pattern_from->visited) { u->visited = true; progress = true; } } } while (progress); u = update; while (u != NULL && u->visited) u = u->next; if (u != NULL) { /* The actual error is more likely found later. * If someone creates a cycle and a chain into that * more than 1000 rules long, having a slightly * misleading error message will be the last of * their problems... */ for (i = 0 ; i < 1000 ; i++) { u = u->pattern_from; assert (u != NULL && !u->visited); } fprintf(stderr, "Error: Update rule '%s' part of circular From-referencing.\n", u->name); updates_freepatterns(update); return RET_ERROR; } *patterns = update; } else if (r == RET_NOTHING) { assert (update == NULL); *patterns = NULL; r = RET_OK; } else { if (r == RET_ERROR_UNKNOWNFIELD) (void)fputs( "To ignore unknown fields use --ignore=unknownfield\n", stderr); updates_freepatterns(update); } return r; } static inline void markfound(int count, struct update_pattern * const *patterns, const struct update_pattern *lookfor, const struct strlist *searched, const struct strlist *have, bool *found, bool (*hasattribute)(const struct update_pattern*)) { int i, j, o; for (i = 0 ; i < count ; i++) { const struct update_pattern *p = patterns[i]; /* check if this uses this attribute */ while (p != NULL && !hasattribute(p)) p = p->pattern_from; if (p != lookfor) continue; for (j = 0 ; j < have->count ; j++) { o = strlist_ofs(searched, have->values[j]); if (o >= 0) found[o] = true; } break; } } static inline bool hasarchitectures(const struct update_pattern *p) { return p->architectures_set; } static inline bool hascomponents(const struct update_pattern *p) { return p->components_set; } static inline bool hasudebcomponents(const struct update_pattern *p) { return p->udebcomponents_set; } /**************************************************************************** * Step 2: create rules for some distribution based on those patterns * ****************************************************************************/ static retvalue new_deleterule(struct update_origin **origins) { struct update_origin *update; update = zNEW(struct update_origin); if (FAILEDTOALLOC(update)) return RET_ERROR_OOM; *origins = update; return RET_OK; } static inline char *translate_suite_pattern(const struct update_pattern *p, const char *codename) { /* look for first specified suite: */ while (p != NULL && p->suite_from == NULL) p = p->pattern_from; if (p == NULL || strcmp(p->suite_from, "*") == 0) return strdup(codename); if (p->suite_from[0] == '*' && p->suite_from[1] == '/') return calc_dirconcat(codename, p->suite_from + 2); else if (strchr(p->suite_from, '*') == NULL) return strdup(p->suite_from); //TODO: implement this // but already checked in parsing... assert(0); return NULL; } static retvalue instance_pattern(struct update_pattern *pattern, const struct distribution *distribution, struct update_origin **origins) { struct update_origin *update; /*@dependant@*/struct update_pattern *declaration, *p, *listscomponents; bool ignorehashes[cs_hashCOUNT], ignorerelease, getinrelease; const char *verifyrelease; retvalue r; update = zNEW(struct update_origin); if (FAILEDTOALLOC(update)) return RET_ERROR_OOM; update->suite_from = translate_suite_pattern(pattern, distribution->codename); if (FAILEDTOALLOC(update->suite_from)) { free(update); return RET_ERROR_OOM; } if (!pattern->used) { declaration = pattern; while (declaration->pattern_from != NULL) declaration = declaration->pattern_from; if (declaration->repository == NULL) declaration->repository = remote_repository_prepare( declaration->name, declaration->method, declaration->fallback, &declaration->config); if (FAILEDTOALLOC(declaration->repository)) { free(update->suite_from); free(update); return RET_ERROR_OOM; } pattern->used = true; } else { declaration = pattern; while (declaration->pattern_from != NULL) declaration = declaration->pattern_from; assert (declaration->repository != NULL); } update->distribution = distribution; update->pattern = pattern; update->failed = false; p = pattern; while (p != NULL && !p->ignorerelease_set) p = p->pattern_from; if (p == NULL) ignorerelease = false; else ignorerelease = p->ignorerelease; p = pattern; while (p != NULL && !p->getinrelease_set) p = p->pattern_from; if (p == NULL) getinrelease = true; else getinrelease = p->getinrelease; /* find the first set values: */ p = pattern; while (p != NULL && p->verifyrelease == NULL) p = p->pattern_from; if (p == NULL) verifyrelease = NULL; else verifyrelease = p->verifyrelease; if (!ignorerelease && verifyrelease == NULL && verbose >= 0) { fprintf(stderr, "Warning: No VerifyRelease line in '%s' or any rule it includes via 'From:'.\n" "Release.gpg cannot be checked unless you tell which key to check with.\n" "(To avoid this warning and not check signatures add 'VerifyRelease: blindtrust').\n", pattern->name); } p = pattern; while (p != NULL && !p->ignorehashes_set) p = p->pattern_from; if (p == NULL) memset(ignorehashes, 0, sizeof(ignorehashes)); else { assert (sizeof(ignorehashes) == sizeof(p->ignorehashes)); memcpy(ignorehashes, p->ignorehashes, sizeof(ignorehashes)); } listscomponents = NULL; p = pattern; while (p != NULL && !atom_defined(p->flat)) { if (p->components_set || p->udebcomponents_set) listscomponents = p; p = p->pattern_from; } update->flat = p != NULL; if (update->flat && listscomponents != NULL) { fprintf(stderr, "WARNING: update pattern '%s' (first encountered via '%s' in '%s')\n" "sets components that are always ignored as '%s' sets Flat mode.\n", listscomponents->name, pattern->name, distribution->codename, p->name); } if (p != NULL && !atomlist_in(&distribution->components, p->flat)) { fprintf(stderr, "Error: distribution '%s' uses flat update pattern '%s'\n" "with target component '%s' which it does not contain!\n", distribution->codename, pattern->name, atoms_components[p->flat]); updates_freeorigins(update); return RET_ERROR; } r = remote_distribution_prepare(declaration->repository, update->suite_from, ignorerelease, getinrelease, verifyrelease, update->flat, ignorehashes, &update->from); if (RET_WAS_ERROR(r)) { updates_freeorigins(update); return r; } *origins = update; return RET_OK; } static retvalue findpatterns(struct update_pattern *patterns, const struct distribution *distribution, struct update_pattern ***patterns_p) { int i; struct update_pattern **used_patterns; if (distribution->updates.count == 0) return RET_NOTHING; used_patterns = nzNEW(distribution->updates.count, struct update_pattern *); if (FAILEDTOALLOC(used_patterns)) return RET_ERROR_OOM; for (i = 0; i < distribution->updates.count ; i++) { const char *name = distribution->updates.values[i]; struct update_pattern *pattern; if (strcmp(name, "-") == 0) continue; pattern = patterns; while (pattern != NULL && strcmp(name, pattern->name) != 0) pattern = pattern->next; if (pattern == NULL) { fprintf(stderr, "Cannot find definition of upgrade-rule '%s' for distribution '%s'!\n", name, distribution->codename); if (distribution->selected) { free(used_patterns); return RET_ERROR; } else fprintf(stderr, "This is no error now as '%s' is not used, bug might cause spurious warnings...\n", distribution->codename); } used_patterns[i] = pattern; } *patterns_p = used_patterns; return RET_OK; } static retvalue getorigins(struct update_distribution *d) { const struct distribution *distribution = d->distribution; struct update_origin *updates = NULL; retvalue result; int i; assert (d->patterns != NULL); result = RET_NOTHING; for (i = 0; i < distribution->updates.count ; i++) { struct update_pattern *pattern = d->patterns[i]; struct update_origin *update SETBUTNOTUSED(= NULL); retvalue r; if (pattern == NULL) { assert (strcmp(distribution->updates.values[i], "-") == 0); r = new_deleterule(&update); } else { r = instance_pattern(pattern, distribution, &update); } RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; if (RET_IS_OK(r)) { assert (update != NULL); update->next = updates; updates = update; } } if (RET_WAS_ERROR(result)) { updates_freeorigins(updates); } else { d->origins = updates; } return result; } /**************************************************************************** * Step 3: calculate which remote indices are to be retrieved and processed * ****************************************************************************/ static inline bool addremoteindex(struct update_origin *origin, struct target *target, struct update_target *updatetargets, const char *architecture, const char *component) { struct update_index_connector *uindex; const struct update_pattern *p; uindex = zNEW(struct update_index_connector); if (FAILEDTOALLOC(uindex)) return false; p = origin->pattern; while (p != NULL && !p->downloadlistsas_set) p = p->pattern_from; uindex->origin = origin; uindex->remote = remote_index(origin->from, architecture, component, target->packagetype, (p == NULL)?NULL:&p->downloadlistsas); if (FAILEDTOALLOC(uindex->remote)) { free(uindex); return false; } assert (!origin->flat); uindex->next = updatetargets->indices; uindex->ignorewrongarchitecture = strcmp(architecture, atoms_architectures[ target->architecture]) != 0; updatetargets->indices = uindex; return true; } static retvalue addorigintotarget(struct update_origin *origin, struct target *target, struct distribution *distribution, struct update_target *updatetargets) { const struct update_pattern *p; const struct strlist *c_from = NULL, *c_into = NULL; const struct strlist *a_from = NULL, *a_into = NULL; const char *architecture = atoms_architectures[target->architecture]; const char *component = atoms_components[target->component]; int ai, ci; assert (origin != NULL && origin->pattern != NULL); p = origin->pattern; while (p != NULL && !p->architectures_set) p = p->pattern_from; if (p != NULL) { a_from = &p->architectures_from; a_into = &p->architectures_into; } p = origin->pattern; if (target->packagetype == pt_udeb) { while (p != NULL && !p->udebcomponents_set) p = p->pattern_from; if (p != NULL) { c_from = &p->udebcomponents_from; c_into = &p->udebcomponents_into; } } else { while (p != NULL && !p->components_set) p = p->pattern_from; if (p != NULL) { c_from = &p->components_from; c_into = &p->components_into; } } if (a_into == NULL) { assert (atomlist_in(&distribution->architectures, target->architecture)); if (c_into == NULL) { if (!addremoteindex(origin, target, updatetargets, architecture, component)) return RET_ERROR_OOM; return RET_OK; } for (ci = 0 ; ci < c_into->count ; ci++) { if (strcmp(c_into->values[ci], component) != 0) continue; if (!addremoteindex(origin, target, updatetargets, architecture, c_from->values[ci])) return RET_ERROR_OOM; } return RET_OK; } for (ai = 0 ; ai < a_into->count ; ai++) { if (strcmp(architecture, a_into->values[ai]) != 0) continue; if (c_into == NULL) { if (!addremoteindex(origin, target, updatetargets, a_from->values[ai], component)) return RET_ERROR_OOM; continue; } for (ci = 0 ; ci < c_into->count ; ci++) { if (strcmp(component, c_into->values[ci]) != 0) continue; if (!addremoteindex(origin, target, updatetargets, a_from->values[ai], c_from->values[ci])) return RET_ERROR_OOM; } } return RET_OK; } static retvalue addflatorigintotarget(struct update_origin *origin, struct target *target, struct update_target *updatetargets) { const struct update_pattern *p; const struct strlist *a_into; const struct encoding_preferences *downloadlistsas; int ai; assert (origin != NULL); if (target->packagetype == pt_udeb) return RET_NOTHING; p = origin->pattern; while (p != NULL && !p->downloadlistsas_set) p = p->pattern_from; if (p == NULL) downloadlistsas = NULL; else downloadlistsas = &p->downloadlistsas; p = origin->pattern; while (p != NULL && !atom_defined(p->flat)) p = p->pattern_from; assert (p != NULL); if (p->flat != target->component) return RET_NOTHING; p = origin->pattern; while (p != NULL && !p->architectures_set) p = p->pattern_from; if (p == NULL) { struct update_index_connector *uindex; uindex = zNEW(struct update_index_connector); if (FAILEDTOALLOC(uindex)) return RET_ERROR_OOM; uindex->origin = origin; uindex->remote = remote_flat_index(origin->from, target->packagetype, downloadlistsas); if (FAILEDTOALLOC(uindex->remote)) { free(uindex); return RET_ERROR_OOM; } uindex->next = updatetargets->indices; assert (origin->flat); uindex->ignorewrongarchitecture = true; updatetargets->indices = uindex; return RET_OK; } a_into = &p->architectures_into; for (ai = 0 ; ai < a_into->count ; ai++) { struct update_index_connector *uindex; const char *a = atoms_architectures[target->architecture]; if (strcmp(a_into->values[ai], a) != 0) continue; uindex = zNEW(struct update_index_connector); if (FAILEDTOALLOC(uindex)) return RET_ERROR_OOM; uindex->origin = origin; uindex->remote = remote_flat_index(origin->from, target->packagetype, downloadlistsas); if (FAILEDTOALLOC(uindex->remote)) { free(uindex); return RET_ERROR_OOM; } uindex->next = updatetargets->indices; assert (origin->flat); uindex->ignorewrongarchitecture = true; updatetargets->indices = uindex; } return RET_OK; } static retvalue adddeleteruletotarget(struct update_target *updatetargets) { struct update_index_connector *uindex; uindex = zNEW(struct update_index_connector); if (FAILEDTOALLOC(uindex)) return RET_ERROR_OOM; uindex->next = updatetargets->indices; updatetargets->indices = uindex; return RET_OK; } static retvalue gettargets(struct update_origin *origins, struct distribution *distribution, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *types, struct update_target **ts) { struct target *target; struct update_origin *origin; struct update_target *updatetargets; retvalue r; updatetargets = NULL; for (target = distribution->targets ; target != NULL ; target = target->next) { if (!target_matches(target, components, architectures, types)) continue; r = newupdatetarget(&updatetargets, target); if (RET_WAS_ERROR(r)) { updates_freetargets(updatetargets); return r; } for (origin = origins ; origin != NULL ; origin=origin->next) { if (origin->pattern == NULL) r = adddeleteruletotarget(updatetargets); else if (!origin->flat) r = addorigintotarget(origin, target, distribution, updatetargets); else r = addflatorigintotarget(origin, target, updatetargets); if (RET_WAS_ERROR(r)) { updates_freetargets(updatetargets); return r; } } } *ts = updatetargets; return RET_OK; } static inline retvalue findmissingupdate(const struct distribution *distribution, struct update_origin *updates) { retvalue result; struct update_origin *last; int count; assert (updates != NULL); last = updates; count = 1; while (last->next != NULL) { last = last->next; count++; } result = RET_OK; if (count != distribution->updates.count) { int i; // TODO: why is this here? can this actually happen? for (i=0; iupdates.count; i++){ const char *update = distribution->updates.values[i]; struct update_origin *u; u = updates; while (u != NULL && strcmp(u->pattern->name, update) != 0) u = u->next; if (u == NULL) { fprintf(stderr, "Update '%s' is listed in distribution '%s', but was not found!\n", update, distribution->codename); result = RET_ERROR_MISSING; break; } } if (RET_IS_OK(result)) { fprintf(stderr, "Did you write an update two times in the update-line of '%s'?\n", distribution->codename); result = RET_NOTHING; } } return result; } retvalue updates_calcindices(struct update_pattern *patterns, struct distribution *distributions, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *types, struct update_distribution **update_distributions) { struct distribution *distribution; struct update_distribution *u_ds; retvalue result, r; u_ds = NULL; result = RET_NOTHING; for (distribution = distributions ; distribution != NULL ; distribution = distribution->next) { struct update_distribution *u_d; struct update_pattern **translated_updates; if (!distribution->selected) continue; r = findpatterns(patterns, distribution, &translated_updates); if (r == RET_NOTHING) continue; if (RET_WAS_ERROR(r)) { result = r; break; } u_d = zNEW(struct update_distribution); if (FAILEDTOALLOC(u_d)) { free(translated_updates); result = RET_ERROR_OOM; break; } u_d->distribution = distribution; u_d->patterns = translated_updates; u_d->next = u_ds; u_ds = u_d; r = getorigins(u_d); if (RET_WAS_ERROR(r)) { result = r; break; } if (RET_IS_OK(r)) { /* Check if we got all: */ r = findmissingupdate(distribution, u_d->origins); if (RET_WAS_ERROR(r)) { result = r; break; } r = gettargets(u_d->origins, distribution, components, architectures, types, &u_d->targets); if (RET_WAS_ERROR(r)) { result = r; break; } } result = RET_OK; } if (RET_IS_OK(result)) { *update_distributions = u_ds; } else updates_freeupdatedistributions(u_ds); return result; } /**************************************************************************** * Step 5: preperations for actually doing anything: * * - printing some warnings * * - prepare distribution for writing * * - rest moved to remote_startup * ****************************************************************************/ static retvalue updates_startup(struct aptmethodrun *run, struct update_distribution *distributions, bool willwrite) { retvalue r; struct update_distribution *d; for (d=distributions ; d != NULL ; d=d->next) { if (willwrite) { r = distribution_prepareforwriting(d->distribution); if (RET_WAS_ERROR(r)) return r; } r = distribution_loadalloverrides(d->distribution); if (RET_WAS_ERROR(r)) return r; } return remote_startup(run); } /**************************************************************************** * Step 6: queue downloading of list of lists (Release, Release.gpg, ...) * **************************************************************************** -> moved to remoterepository.c */ /**************************************************************************** * Step 7: queue downloading of lists * * (using information from previously downloaded meta-lists) * **************************************************************************** -> moved to remoterepository.c */ /**************************************************************************** * Step 8: call possible list hooks allowing them to modify the lists * ****************************************************************************/ static retvalue calllisthook(struct update_target *ut, struct update_index_connector *f, const char *listhook) { struct update_origin *origin = f->origin; const char *oldfilename = remote_index_file(f->remote); const char *oldbasefilename = remote_index_basefile(f->remote); char *newfilename; pid_t child, c; int status; /* distribution, component, architecture and pattern specific... */ newfilename = genlistsfilename(oldbasefilename, 5, "", ut->target->distribution->codename, atoms_components[ut->target->component], atoms_architectures[ut->target->architecture], origin->pattern->name, ENDOFARGUMENTS); if (FAILEDTOALLOC(newfilename)) return RET_ERROR_OOM; child = fork(); if (child < 0) { int e = errno; free(newfilename); fprintf(stderr, "Error %d while forking for listhook: %s\n", e, strerror(e)); return RET_ERRNO(e); } if (child == 0) { int e; (void)closefrom(3); sethookenvironment(NULL, NULL, NULL, NULL); setenv("REPREPRO_FILTER_CODENAME", ut->target->distribution->codename, true); setenv("REPREPRO_FILTER_PACKAGETYPE", atoms_architectures[ut->target->packagetype], true); setenv("REPREPRO_FILTER_COMPONENT", atoms_components[ut->target->component], true); setenv("REPREPRO_FILTER_ARCHITECTURE", atoms_architectures[ut->target->architecture], true); setenv("REPREPRO_FILTER_PATTERN", origin->pattern->name, true); execl(listhook, listhook, oldfilename, newfilename, ENDOFARGUMENTS); e = errno; fprintf(stderr, "Error %d while executing '%s': %s\n", e, listhook, strerror(e)); exit(255); } if (verbose > 5) fprintf(stderr, "Called %s '%s' '%s'\n", listhook, oldfilename, newfilename); f->afterhookfilename = newfilename; do { c = waitpid(child, &status, WUNTRACED); if (c < 0) { int e = errno; fprintf(stderr, "Error %d while waiting for hook '%s' to finish: %s\n", e, listhook, strerror(e)); return RET_ERRNO(e); } } while (c != child); if (WIFEXITED(status)) { if (WEXITSTATUS(status) == 0) { if (verbose > 5) fprintf(stderr, "Listhook successfully returned!\n"); return RET_OK; } else { fprintf(stderr, "Listhook failed with exitcode %d!\n", (int)WEXITSTATUS(status)); return RET_ERROR; } } else { fprintf(stderr, "Listhook terminated abnormally. (status is %x)!\n", status); return RET_ERROR; } return RET_OK; } static retvalue callshellhook(struct update_target *ut, struct update_index_connector *f, const char *shellhook) { struct update_origin *origin = f->origin; const char *oldfilename = remote_index_file(f->remote); const char *oldbasefilename = remote_index_basefile(f->remote); char *newfilename; pid_t child, c; int status; int infd, outfd; /* distribution, component, architecture and pattern specific... */ newfilename = genlistsfilename(oldbasefilename, 5, "", ut->target->distribution->codename, atoms_components[ut->target->component], atoms_architectures[ut->target->architecture], origin->pattern->name, ENDOFARGUMENTS); if (FAILEDTOALLOC(newfilename)) return RET_ERROR_OOM; infd = open(oldfilename, O_RDONLY|O_NOCTTY|O_NOFOLLOW); if (infd < 0) { int e = errno; fprintf(stderr, "Error %d opening expected file '%s': %s!\n" "Something strange must go on!\n", e, oldfilename, strerror(e)); return RET_ERRNO(e); } (void)unlink(newfilename); outfd = open(newfilename, O_WRONLY|O_NOCTTY|O_NOFOLLOW|O_CREAT|O_EXCL, 0666); if (outfd < 0) { int e = errno; fprintf(stderr, "Error %d creating '%s': %s!\n", e, newfilename, strerror(e)); close(infd); return RET_ERRNO(e); } child = fork(); if (child < 0) { int e = errno; free(newfilename); fprintf(stderr, "Error %d while forking for shell hook: %s\n", e, strerror(e)); (void)close(infd); (void)close(outfd); (void)unlink(newfilename); return RET_ERRNO(e); } if (child == 0) { int e; assert (dup2(infd, 0) == 0); assert (dup2(outfd, 1) == 1); close(infd); close(outfd); (void)closefrom(3); sethookenvironment(NULL, NULL, NULL, NULL); setenv("REPREPRO_FILTER_CODENAME", ut->target->distribution->codename, true); setenv("REPREPRO_FILTER_PACKAGETYPE", atoms_architectures[ut->target->packagetype], true); setenv("REPREPRO_FILTER_COMPONENT", atoms_components[ut->target->component], true); setenv("REPREPRO_FILTER_ARCHITECTURE", atoms_architectures[ut->target->architecture], true); setenv("REPREPRO_FILTER_PATTERN", origin->pattern->name, true); execlp("sh", "sh", "-c", shellhook, ENDOFARGUMENTS); e = errno; fprintf(stderr, "Error %d while executing sh -c '%s': %s\n", e, shellhook, strerror(e)); exit(255); } (void)close(infd); (void)close(outfd); if (verbose > 5) fprintf(stderr, "Called sh -c '%s' <'%s' >'%s'\n", shellhook, oldfilename, newfilename); f->afterhookfilename = newfilename; do { c = waitpid(child, &status, WUNTRACED); if (c < 0) { int e = errno; fprintf(stderr, "Error %d while waiting for shell hook '%s' to finish: %s\n", e, shellhook, strerror(e)); return RET_ERRNO(e); } } while (c != child); if (WIFEXITED(status)) { if (WEXITSTATUS(status) == 0) { if (verbose > 5) fprintf(stderr, "shell hook successfully returned!\n"); return RET_OK; } else { fprintf(stderr, "shell hook '%s' failed with exitcode %d!\n", shellhook, (int)WEXITSTATUS(status)); return RET_ERROR; } } else { fprintf(stderr, "shell hook '%s' terminated abnormally. (status is %x)!\n", shellhook, status); return RET_ERROR; } return RET_OK; } static retvalue calllisthooks(struct update_distribution *d) { retvalue result, r; struct update_target *target; struct update_index_connector *uindex; result = RET_NOTHING; for (target = d->targets; target != NULL ; target = target->next) { if (target->nothingnew) continue; /* if anything is new, we will to need to look at * all (in case there are delete rules) */ for (uindex = target->indices ; uindex != NULL ; uindex = uindex->next) { const struct update_pattern *p; if (uindex->remote == NULL) continue; if (uindex->failed) continue; p = uindex->origin->pattern; while (p != NULL && p->listhook == NULL && p->shellhook == NULL) p = p->pattern_from; if (p == NULL) continue; if (p->listhook != NULL) r = calllisthook(target, uindex, p->listhook); else { assert (p->shellhook != NULL); r = callshellhook(target, uindex, p->shellhook); } if (RET_WAS_ERROR(r)) { uindex->failed = true; return r; } RET_UPDATE(result, r); } } return result; } static retvalue updates_calllisthooks(struct update_distribution *distributions) { retvalue result, r; struct update_distribution *d; result = RET_NOTHING; for (d=distributions ; d != NULL ; d=d->next) { r = calllisthooks(d); RET_UPDATE(result, r); } return result; } /**************************************************************************** * Step 9: search for missing packages i.e. needing to be added or upgraded * * (all the logic in upgradelist.c, this is only clue code) * ****************************************************************************/ static upgrade_decision ud_decide_by_pattern(void *privdata, const struct target *target, const char *package, const char *source, /*@null@*/const char *old_version, const char *new_version, const char *new_src_version, const char *newcontrolchunk) { const struct update_pattern *pattern = privdata, *p; retvalue r; upgrade_decision decision = UD_UPGRADE; enum filterlisttype listdecision; bool cmdline_still_undecided; if (target->packagetype == pt_dsc) { p = pattern; while (p != NULL && !p->filtersrclist.set) p = p->pattern_from; if (p != NULL) listdecision = filterlist_find(package, new_version, &p->filtersrclist); else { p = pattern; while (p != NULL && !p->filterlist.set) p = p->pattern_from; if (p == NULL) listdecision = flt_install; else listdecision = filterlist_find(package, new_version, &p->filterlist); } } else { p = pattern; while (p != NULL && !p->filterlist.set) p = p->pattern_from; if (p != NULL) listdecision = filterlist_find(package, new_version, &p->filterlist); else { p = pattern; while (p != NULL && !p->filtersrclist.set) p = p->pattern_from; if (p == NULL) listdecision = flt_install; else listdecision = filterlist_find(source, new_src_version, &p->filtersrclist); } } switch (listdecision) { case flt_deinstall: case flt_purge: return UD_NO; case flt_warning: return UD_LOUDNO; case flt_supersede: decision = UD_SUPERSEDE; break; case flt_hold: decision = UD_HOLD; break; case flt_error: /* cannot yet be handled! */ fprintf(stderr, "Package name marked to be unexpected('error'): '%s'!\n", package); return UD_ERROR; case flt_upgradeonly: if (old_version == NULL) return UD_NO; break; case flt_install: break; case flt_unchanged: case flt_auto_hold: assert (listdecision != listdecision); } cmdline_still_undecided = false; switch (filterlist_find(source, new_src_version, &cmdline_src_filter)) { case flt_deinstall: case flt_purge: return UD_NO; case flt_warning: return UD_LOUDNO; case flt_auto_hold: cmdline_still_undecided = true; decision = UD_HOLD; break; case flt_hold: decision = UD_HOLD; break; case flt_supersede: decision = UD_SUPERSEDE; break; case flt_error: /* cannot yet be handled! */ fprintf(stderr, "Package name marked to be unexpected('error'): '%s'!\n", package); return UD_ERROR; case flt_upgradeonly: if (old_version == NULL) return UD_NO; break; case flt_install: decision = UD_UPGRADE; break; case flt_unchanged: cmdline_still_undecided = true; break; } if (target->packagetype != pt_dsc) { switch (filterlist_find(package, new_version, &cmdline_bin_filter)) { case flt_deinstall: case flt_purge: return UD_NO; case flt_warning: return UD_LOUDNO; case flt_supersede: decision = UD_SUPERSEDE; break; case flt_hold: decision = UD_HOLD; break; case flt_error: /* cannot yet be handled! */ fprintf(stderr, "Package name marked to be unexpected('error'): '%s'!\n", package); return UD_ERROR; case flt_upgradeonly: if (old_version == NULL) return UD_NO; break; case flt_install: decision = UD_UPGRADE; break; case flt_unchanged: break; case flt_auto_hold: /* hold only if it was not in the src-filter */ if (cmdline_still_undecided) decision = UD_HOLD; break; } } p = pattern; while (p != NULL && !p->includecondition_set) p = p->pattern_from; if (p != NULL) { r = term_decidechunktarget(p->includecondition, newcontrolchunk, target); if (RET_WAS_ERROR(r)) return UD_ERROR; if (r == RET_NOTHING) { return UD_NO; } } return decision; } static inline retvalue searchformissing(/*@null@*/FILE *out, struct update_target *u) { struct update_index_connector *uindex; retvalue result, r; if (u->nothingnew) { if (u->indices == NULL && verbose >= 4 && out != NULL) fprintf(out, " nothing to do for '%s'\n", u->target->identifier); else if (u->indices != NULL && verbose >= 0 && out != NULL) fprintf(out, " nothing new for '%s' (use --noskipold to process anyway)\n", u->target->identifier); return RET_NOTHING; } if (verbose > 2 && out != NULL) fprintf(out, " processing updates for '%s'\n", u->target->identifier); r = upgradelist_initialize(&u->upgradelist, u->target); if (RET_WAS_ERROR(r)) return r; result = RET_NOTHING; for (uindex = u->indices ; uindex != NULL ; uindex = uindex->next) { const char *filename; if (uindex->origin == NULL) { if (verbose > 4 && out != NULL) fprintf(out, " marking everything to be deleted\n"); r = upgradelist_deleteall(u->upgradelist); if (RET_WAS_ERROR(r)) u->incomplete = true; RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) return result; u->ignoredelete = false; continue; } if (uindex->afterhookfilename != NULL) filename = uindex->afterhookfilename; else filename = remote_index_file(uindex->remote); if (uindex->failed || uindex->origin->failed) { if (verbose >= 1) fprintf(stderr, " missing '%s'\n", filename); u->incomplete = true; u->ignoredelete = true; continue; } if (verbose > 4 && out != NULL) fprintf(out, " reading '%s'\n", filename); r = upgradelist_update(u->upgradelist, uindex, filename, ud_decide_by_pattern, (void*)uindex->origin->pattern, uindex->ignorewrongarchitecture); if (RET_WAS_ERROR(r)) { u->incomplete = true; u->ignoredelete = true; } RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) return result; } return result; } static retvalue updates_readindices(/*@null@*/FILE *out, struct update_distribution *d) { retvalue result, r; struct update_target *u; result = RET_NOTHING; for (u=d->targets ; u != NULL ; u=u->next) { r = searchformissing(out, u); if (RET_WAS_ERROR(r)) u->incomplete = true; RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; } return result; } /**************************************************************************** * Step 10: enqueue downloading of missing packages * ****************************************************************************/ static retvalue enqueue_upgrade_package(void *calldata, const struct checksumsarray *origfiles, const struct strlist *filekeys, void *privdata) { struct update_index_connector *uindex = privdata; struct aptmethod *aptmethod; struct downloadcache *cache = calldata; assert(privdata != NULL); aptmethod = remote_aptmethod(uindex->origin->from); assert(aptmethod != NULL); return downloadcache_addfiles(cache, aptmethod, origfiles, filekeys); } static retvalue updates_enqueue(struct downloadcache *cache, struct update_distribution *distribution) { retvalue result, r; struct update_target *u; result = RET_NOTHING; for (u=distribution->targets ; u != NULL ; u=u->next) { if (u->nothingnew) continue; r = upgradelist_enqueue(u->upgradelist, enqueue_upgrade_package, cache); if (RET_WAS_ERROR(r)) u->incomplete = true; RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; } return result; } /**************************************************************************** * Step 11: install the missing packages * * (missing files should have been downloaded first) * ****************************************************************************/ static bool isbigdelete(struct update_distribution *d) { struct update_target *u, *v; for (u = d->targets ; u != NULL ; u=u->next) { if (u->nothingnew || u->ignoredelete) continue; if (upgradelist_isbigdelete(u->upgradelist)) { d->distribution->omitted = true; for (v = d->targets ; v != NULL ; v = v->next) { upgradelist_free(v->upgradelist); v->upgradelist = NULL; } return true; } } return false; } static void updates_from_callback(void *privdata, const char **rule_p, const char **from_p) { struct update_index_connector *uindex = privdata; *from_p = uindex->origin->suite_from; *rule_p = uindex->origin->pattern->name; } static retvalue updates_install(struct update_distribution *distribution) { retvalue result, r; struct update_target *u; struct distribution *d = distribution->distribution; assert (logger_isprepared(d->logger)); result = RET_NOTHING; for (u=distribution->targets ; u != NULL ; u=u->next) { if (u->nothingnew) continue; r = upgradelist_install(u->upgradelist, d->logger, u->ignoredelete, updates_from_callback); RET_UPDATE(d->status, r); if (RET_WAS_ERROR(r)) u->incomplete = true; RET_UPDATE(result, r); upgradelist_free(u->upgradelist); u->upgradelist = NULL; if (RET_WAS_ERROR(r)) break; } if (RET_IS_OK(result) && d->tracking != dt_NONE) { r = tracking_retrack(d, false); RET_ENDUPDATE(result, r); } return result; } /**************************************************************************** * Step 12: mark index files as processed, so they won't process a second * * time, unless --noskipold is given * ****************************************************************************/ static void markdone(struct update_distribution *d) { struct markdonefile *done; struct update_index_connector *i; struct update_target *t; retvalue r; r = markdone_create(d->distribution->codename, &done); if (!RET_IS_OK(r)) return; for (t = d->targets ; t != NULL ; t = t->next) { if (t->incomplete) continue; markdone_target(done, t->target->identifier); for (i = t->indices ; i != NULL ; i = i->next) if (i->remote == NULL) markdone_cleaner(done); else remote_index_markdone(i->remote, done); } markdone_finish(done); } /**************************************************************************** * All together now: everything done step after step, in between telling * * the apt methods to actually download what was enqueued. * ****************************************************************************/ static retvalue markold(struct update_distribution *ud) { struct update_target *ut; struct update_index_connector *ui; retvalue r; struct donefile *donefile; const char *identifier; r = donefile_open(ud->distribution->codename, &donefile); if (!RET_IS_OK(r)) return r; while (donefile_nexttarget(donefile, &identifier)) { ut = ud->targets; while (ut != NULL && strcmp(identifier, ut->target->identifier) != 0) ut = ut->next; if (ut == NULL) continue; ut->nothingnew = true; for (ui = ut->indices ; ui != NULL ; ui = ui->next) { /* if the order does not match, it does not matter * if they are new or not, they should be processed * anyway */ if (ui->remote == NULL) { if (!donefile_iscleaner(donefile)) { ut->nothingnew = false; break; } continue; } if (remote_index_isnew(ui->remote, donefile)) { ut->nothingnew = false; break; } } } donefile_close(donefile); return RET_OK; } static retvalue updates_preparelists(struct aptmethodrun *run, struct update_distribution *distributions, bool nolistsdownload, bool skipold, bool *anythingtodo) { struct update_distribution *d; struct update_target *ut; struct update_index_connector *ui; retvalue r; r = remote_preparemetalists(run, nolistsdownload); if (RET_WAS_ERROR(r)) return r; for (d = distributions ; d != NULL ; d = d->next) { /* first check what is old */ if (skipold) { r = markold(d); if (RET_WAS_ERROR(r)) return r; } /* we need anything that is needed in a target * where something is new (as new might mean * a package is left hiding leftmore packages, * and everthing in rightmore packages is needed * to see what in the new takes effect) */ for (ut = d->targets; ut != NULL ; ut = ut->next) { if (ut->nothingnew) continue; if (ut->indices == NULL) { ut->nothingnew = true; continue; } for (ui = ut->indices ; ui != NULL ; ui = ui->next) { if (ui->remote == NULL) continue; remote_index_needed(ui->remote); *anythingtodo = true; } } } r = remote_preparelists(run, nolistsdownload); if (RET_WAS_ERROR(r)) return r; return RET_OK; } static retvalue updates_prepare(struct update_distribution *distributions, bool willwrite, bool nolistsdownload, bool skipold, struct aptmethodrun **run_p) { retvalue result, r; struct aptmethodrun *run; bool anythingtodo = !skipold; r = aptmethod_initialize_run(&run); if (RET_WAS_ERROR(r)) return r; /* preperations */ result = updates_startup(run, distributions, willwrite); if (RET_WAS_ERROR(result)) { aptmethod_shutdown(run); return result; } r = updates_preparelists(run, distributions, nolistsdownload, skipold, &anythingtodo); RET_UPDATE(result, r); if (RET_WAS_ERROR(result)) { aptmethod_shutdown(run); return result; } if (!anythingtodo && skipold) { if (verbose >= 0) { if (willwrite) printf( "Nothing to do found. (Use --noskipold to force processing)\n"); else fprintf(stderr, "Nothing to do found. (Use --noskipold to force processing)\n"); } aptmethod_shutdown(run); return RET_NOTHING; } /* Call ListHooks (if given) on the downloaded index files. * (This is done even when nolistsdownload is given, as otherwise * the filename to look in is not calculated) */ r = updates_calllisthooks(distributions); RET_UPDATE(result, r); if (RET_WAS_ERROR(result)) { aptmethod_shutdown(run); return result; } *run_p = run; return RET_OK; } retvalue updates_update(struct update_distribution *distributions, bool nolistsdownload, bool skipold, enum spacecheckmode mode, off_t reserveddb, off_t reservedother) { retvalue result, r; struct update_distribution *d; struct downloadcache *cache; struct aptmethodrun *run; bool todo; causingfile = NULL; result = updates_prepare(distributions, true, nolistsdownload, skipold, &run); if (!RET_IS_OK(result)) return result; /* Then get all packages */ if (verbose >= 0) printf("Calculating packages to get...\n"); r = downloadcache_initialize(mode, reserveddb, reservedother, &cache); if (!RET_IS_OK(r)) { aptmethod_shutdown(run); RET_UPDATE(result, r); return result; } todo = false; for (d=distributions ; d != NULL ; d=d->next) { r = updates_readindices(stdout, d); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; if (global.onlysmalldeletes) { if (isbigdelete(d)) continue; } r = updates_enqueue(cache, d); if (RET_IS_OK(r)) todo = true; RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; } if (!RET_WAS_ERROR(result)) { r = space_check(cache->devices); RET_ENDUPDATE(result, r); } if (!RET_WAS_ERROR(result) && !todo) { for (d=distributions ; !todo && d != NULL ; d=d->next) { struct update_target *u; if (d->distribution->omitted) continue; for (u = d->targets ; u != NULL ; u = u->next) { if (u->nothingnew || u->ignoredelete) continue; if (upgradelist_woulddelete(u->upgradelist)) { todo = true; break; } } } } if (RET_WAS_ERROR(result) || !todo) { for (d=distributions ; d != NULL ; d=d->next) { struct update_target *u; if (d->distribution->omitted) { fprintf(stderr, "Not processing updates for '%s' because of --onlysmalldeletes!\n", d->distribution->codename); } else if (RET_IS_OK(result)) markdone(d); for (u=d->targets ; u != NULL ; u=u->next) { upgradelist_free(u->upgradelist); u->upgradelist = NULL; } } r = downloadcache_free(cache); RET_UPDATE(result, r); aptmethod_shutdown(run); return result; } if (verbose >= 0) printf("Getting packages...\n"); r = aptmethod_download(run); RET_UPDATE(result, r); r = downloadcache_free(cache); RET_ENDUPDATE(result, r); if (verbose > 0) printf("Shutting down aptmethods...\n"); r = aptmethod_shutdown(run); RET_UPDATE(result, r); if (RET_WAS_ERROR(result)) { for (d=distributions ; d != NULL ; d=d->next) { struct update_target *u; for (u=d->targets ; u != NULL ; u=u->next) { upgradelist_free(u->upgradelist); u->upgradelist = NULL; } } return result; } if (verbose >= 0) printf("Installing (and possibly deleting) packages...\n"); for (d=distributions ; d != NULL ; d=d->next) { if (d->distribution->omitted) continue; r = updates_install(d); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; } for (d=distributions ; d != NULL ; d=d->next) { if (d->distribution->omitted) { fprintf(stderr, "Not processing updates for '%s' because of --onlysmalldeletes!\n", d->distribution->codename); } else markdone(d); } logger_wait(); return result; } /**************************************************************************** * Alternatively, don't download and install, but list what is needed to be * * done. (For the checkupdate command) * ****************************************************************************/ static void upgrade_dumppackage(const char *packagename, /*@null@*/const char *oldversion, /*@null@*/const char *newversion, /*@null@*/const char *bestcandidate, /*@null@*/const struct strlist *newfilekeys, /*@null@*/const char *newcontrol, void *privdata) { struct update_index_connector *uindex = privdata; if (newversion == NULL) { if (oldversion != NULL && bestcandidate != NULL) { printf("'%s': '%s' will be deleted" " (best new: '%s')\n", packagename, oldversion, bestcandidate); } else if (oldversion != NULL) { printf("'%s': '%s' will be deleted" " (no longer available or superseded)\n", packagename, oldversion); } else { printf("'%s': will NOT be added as '%s'\n", packagename, bestcandidate); } } else if (newversion == oldversion) { if (bestcandidate != NULL) { if (verbose > 1) printf("'%s': '%s' will be kept" " (best new: '%s')\n", packagename, oldversion, bestcandidate); } else { if (verbose > 0) printf("'%s': '%s' will be kept" " (unavailable for reload)\n", packagename, oldversion); } } else { const char *via = uindex->origin->pattern->name; assert (newfilekeys != NULL); assert (newcontrol != NULL); if (oldversion != NULL) (void)printf( "'%s': '%s' will be upgraded to '%s' (from '%s'):\n files needed: ", packagename, oldversion, newversion, via); else (void)printf( "'%s': newly installed as '%s' (from '%s'):\n files needed: ", packagename, newversion, via); (void)strlist_fprint(stdout, newfilekeys); if (verbose > 2) (void)printf("\n installing as: '%s'\n", newcontrol); else (void)putchar('\n'); } } static void updates_dump(struct update_distribution *distribution) { struct update_target *u; for (u=distribution->targets ; u != NULL ; u=u->next) { if (u->nothingnew) continue; printf("Updates needed for '%s':\n", u->target->identifier); upgradelist_dump(u->upgradelist, upgrade_dumppackage); upgradelist_free(u->upgradelist); u->upgradelist = NULL; } } static void upgrade_dumplistpackage(const char *packagename, /*@null@*/const char *oldversion, /*@null@*/const char *newversion, /*@null@*/const char *bestcandidate, /*@null@*/const struct strlist *newfilekeys, /*@null@*/const char *newcontrol, void *privdata) { struct update_index_connector *uindex = privdata; if (newversion == NULL) { if (oldversion == NULL) return; printf("delete '%s' '%s'\n", packagename, oldversion); } else if (newversion == oldversion) { if (bestcandidate != NULL) printf("keep '%s' '%s' '%s'\n", packagename, oldversion, bestcandidate); else printf("keep '%s' '%s' unavailable\n", packagename, oldversion); } else { const char *via = uindex->origin->pattern->name; assert (newfilekeys != NULL); assert (newcontrol != NULL); if (oldversion != NULL) (void)printf("update '%s' '%s' '%s' '%s'\n", packagename, oldversion, newversion, via); else (void)printf("add '%s' - '%s' '%s'\n", packagename, newversion, via); } } static void updates_dumplist(struct update_distribution *distribution) { struct update_target *u; for (u=distribution->targets ; u != NULL ; u=u->next) { if (u->nothingnew) continue; printf("Updates needed for '%s':\n", u->target->identifier); upgradelist_dump(u->upgradelist, upgrade_dumplistpackage); upgradelist_free(u->upgradelist); u->upgradelist = NULL; } } retvalue updates_checkupdate(struct update_distribution *distributions, bool nolistsdownload, bool skipold) { struct update_distribution *d; retvalue result, r; struct aptmethodrun *run; result = updates_prepare(distributions, false, nolistsdownload, skipold, &run); if (!RET_IS_OK(result)) return result; if (verbose > 0) fprintf(stderr, "Shutting down aptmethods...\n"); r = aptmethod_shutdown(run); RET_UPDATE(result, r); if (RET_WAS_ERROR(result)) { return result; } /* Then look what packages to get */ if (verbose >= 0) fprintf(stderr, "Calculating packages to get...\n"); for (d=distributions ; d != NULL ; d=d->next) { r = updates_readindices(stderr, d); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; updates_dump(d); } return result; } retvalue updates_dumpupdate(struct update_distribution *distributions, bool nolistsdownload, bool skipold) { struct update_distribution *d; retvalue result, r; struct aptmethodrun *run; result = updates_prepare(distributions, false, nolistsdownload, skipold, &run); if (!RET_IS_OK(result)) return result; r = aptmethod_shutdown(run); RET_UPDATE(result, r); if (RET_WAS_ERROR(result)) { return result; } for (d=distributions ; d != NULL ; d=d->next) { r = updates_readindices(NULL, d); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; updates_dumplist(d); } return result; } /****************************************************************************** * For the predelete command: delete everything a following update run would * * delete. (Assuming no unexpected errors occur, like a file missing upstream.* *****************************************************************************/ retvalue updates_predelete(struct update_distribution *distributions, bool nolistsdownload, bool skipold) { retvalue result, r; struct update_distribution *d; struct aptmethodrun *run; causingfile = NULL; result = updates_prepare(distributions, true, nolistsdownload, skipold, &run); if (!RET_IS_OK(result)) return result; if (verbose > 0) printf("Shutting down aptmethods...\n"); r = aptmethod_shutdown(run); RET_UPDATE(result, r); if (RET_WAS_ERROR(result)) { return result; } if (verbose >= 0) printf("Removing obsolete or to be replaced packages...\n"); for (d=distributions ; d != NULL ; d=d->next) { struct distribution *dd = d->distribution; struct update_target *u; for (u=d->targets ; u != NULL ; u=u->next) { r = searchformissing(stdout, u); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) { u->incomplete = true; continue; } if (u->nothingnew || u->ignoredelete) { upgradelist_free(u->upgradelist); u->upgradelist = NULL; continue; } r = upgradelist_predelete(u->upgradelist, dd->logger); RET_UPDATE(dd->status, r); if (RET_WAS_ERROR(r)) u->incomplete = true; RET_UPDATE(result, r); upgradelist_free(u->upgradelist); u->upgradelist = NULL; if (RET_WAS_ERROR(r)) return r; if (RET_IS_OK(result) && dd->tracking != dt_NONE) { r = tracking_retrack(dd, false); RET_ENDUPDATE(result, r); } } } logger_wait(); return result; } /****************************************************************************** * The cleanlists command has to mark all files that might be scheduled to be * * downloaded again, so that the rest can be deleted * ******************************************************************************/ static void marktargetsneeded(struct cachedlistfile *files, const struct distribution *d, component_t flat, /*@null@*/const struct strlist *a_from, /*@null@*/const struct strlist *a_into, /*@null@*/const struct strlist *c_from, /*@null@*/const struct strlist *uc_from, const char *repository, const char *suite) { struct target *t; int i, ai; if (atom_defined(flat)) { bool deb_needed = false, dsc_needed = false; for (t = d->targets ; t != NULL ; t = t->next) { if (t->packagetype == pt_udeb) continue; if (flat != t->architecture) continue; if (a_into != NULL && !strlist_in(a_into, atoms_architectures[ t->architecture])) continue; if (t->packagetype == pt_deb) deb_needed = true; else if (t->packagetype == pt_dsc) dsc_needed = true; } if (deb_needed) cachedlistfile_need_flat_index(files, repository, suite, pt_deb); if (dsc_needed) cachedlistfile_need_flat_index(files, repository, suite, pt_dsc); return; } /* .dsc */ if ((a_into != NULL && strlist_in(a_into, "source")) || (a_into == NULL && atomlist_in(&d->architectures, architecture_source))) { if (c_from != NULL) for (i = 0 ; i < c_from->count ; i++) cachedlistfile_need_index(files, repository, suite, "source", c_from->values[i], pt_dsc); else for (i = 0 ; i < d->components.count ; i++) cachedlistfile_need_index(files, repository, suite, "source", atoms_components[ d->components.atoms[i]], pt_dsc); } /* .deb and .udeb */ if (a_into != NULL) { for (ai = 0 ; ai < a_into->count ; ai++) { const char *a = a_from->values[ai]; if (strcmp(a_into->values[ai], "source") == 0) continue; if (c_from != NULL) for (i = 0 ; i < c_from->count ; i++) cachedlistfile_need_index(files, repository, suite, a, c_from->values[i], pt_deb); else for (i = 0 ; i < d->components.count ; i++) cachedlistfile_need_index(files, repository, suite, a, atoms_components[ d->components.atoms[i]], pt_deb); if (uc_from != NULL) for (i = 0 ; i < uc_from->count ; i++) cachedlistfile_need_index(files, repository, suite, a, uc_from->values[i], pt_udeb); else for (i = 0 ; i < d->udebcomponents.count ; i++) cachedlistfile_need_index(files, repository, suite, a, atoms_components[ d->components.atoms[i]], pt_udeb); } } else { for (ai = 0 ; ai < d->architectures.count ; ai++) { const char *a = atoms_architectures[ d->architectures.atoms[ai]]; if (d->architectures.atoms[ai] == architecture_source) continue; if (c_from != NULL) for (i = 0 ; i < c_from->count ; i++) cachedlistfile_need_index(files, repository, suite, a, c_from->values[i], pt_deb); else for (i = 0 ; i < d->components.count ; i++) cachedlistfile_need_index(files, repository, suite, a, atoms_components[ d->components.atoms[i]], pt_deb); if (uc_from != NULL) for (i = 0 ; i < uc_from->count ; i++) cachedlistfile_need_index(files, repository, suite, a, uc_from->values[i], pt_udeb); else for (i = 0 ; i < d->udebcomponents.count ; i++) cachedlistfile_need_index(files, repository, suite, a, atoms_components[ d->components.atoms[i]], pt_udeb); } } } retvalue updates_cleanlists(const struct distribution *distributions, const struct update_pattern *patterns) { retvalue result; const struct distribution *d; const struct update_pattern *p, *q; struct cachedlistfile *files; int i; bool isflat; const struct strlist *uc_from = NULL; const struct strlist *c_from = NULL; const struct strlist *a_from = NULL, *a_into = NULL; const char *repository; char *suite; result = cachedlists_scandir(&files); if (!RET_IS_OK(result)) return result; result = RET_OK; for (d = distributions ; d != NULL ; d = d->next) { if (d->updates.count == 0) continue; cachedlistfile_need(files, "lastseen", 2, "", d->codename, NULL); for (i = 0; i < d->updates.count ; i++) { const char *name = d->updates.values[i]; if (strcmp(name, "-") == 0) continue; p = patterns; while (p != NULL && strcmp(name, p->name) != 0) p = p->next; if (p == NULL) { fprintf(stderr, "Cannot find definition of upgrade-rule '%s' for distribution '%s'!\n", name, d->codename); result = RET_ERROR; continue; } q = p; while (q != NULL && q->pattern_from != NULL) q = q->pattern_from; repository = q->name; q = p; while (q != NULL && !atom_defined(q->flat)) q = q->pattern_from; isflat = q != NULL; q = p; while (q != NULL && !q->architectures_set) q = q->pattern_from; if (q != NULL) { a_from = &q->architectures_from; a_into = &q->architectures_into; } q = p; while (q != NULL && !q->components_set) q = q->pattern_from; if (q != NULL) c_from = &q->components_from; q = p; while (q != NULL && !q->udebcomponents_set) q = q->pattern_from; if (q != NULL) uc_from = &q->udebcomponents_from; suite = translate_suite_pattern(p, d->codename); if (FAILEDTOALLOC(suite)) { cachedlistfile_freelist(files); return RET_ERROR_OOM; } /* Only index files are intresting, everything else * Release, Release.gpg, compressed files, hook processed * files is deleted */ marktargetsneeded(files, d, isflat, a_from, a_into, c_from, uc_from, repository, suite); free(suite); } } cachedlistfile_deleteunneeded(files); cachedlistfile_freelist(files); return RET_OK; } reprepro-4.13.1/chunkedit.h0000644000175100017510000000365212152651661012523 00000000000000#ifndef REPREPRO_CHUNKEDIT_H #define REPREPRO_CHUNKEDIT_H #ifndef REPREPRO_ERROR_H #include "error.h" #warning "What's hapening here?" #endif #ifndef REPREPRO_STRLIST_H #include "strlist.h" #endif /* modifications of a chunk: */ struct chunkeditfield { /*@null@*/struct chunkeditfield *next; /* The name of the field: */ const char *field; size_t len_field; enum cefaction { CEF_DELETE, /* delete if there */ CEF_ADDMISSED, /* add if not there */ CEF_REPLACE, /* replace if there */ CEF_ADD, /* add if not there or replace if there */ CEF_KEEP /* keep it */ } action; enum cefwhen { CEF_EARLY, CEF_LATE } when; /* the following must be 0 or NULL for CEF_DELETE */ size_t len_all_data; /*@null@*/const char *data; size_t len_data; const struct strlist *words; int linecount; struct cef_line { int wordcount; const char **words; size_t *wordlen; } lines[]; }; /* those return NULL on out of memory and free next in that case */ /*@null@*/struct chunkeditfield *cef_newfield(const char *, enum cefaction, enum cefwhen, unsigned int /*linecount*/, /*@only@*//*@null@*/struct chunkeditfield *); void cef_setdata(struct chunkeditfield *, const char *); void cef_setdatalen(struct chunkeditfield *, const char *, size_t); /* calculate the length, do not change the strlist after that before free */ void cef_setwordlist(struct chunkeditfield *, const struct strlist *); retvalue cef_setline(struct chunkeditfield *, int /*line*/, int /*wordcount*/, ...); retvalue cef_setline2(struct chunkeditfield *, int, const char *, size_t, const char *, size_t, int, ...); retvalue chunk_edit(const char *, char **, size_t *, const struct chunkeditfield *); void cef_free(/*@only@*//*@null@*/struct chunkeditfield *); static inline struct chunkeditfield *cef_pop(/*@only@*/struct chunkeditfield *cef) { struct chunkeditfield *next = cef->next; cef->next = NULL; cef_free(cef); return next; } #endif reprepro-4.13.1/sizes.c0000644000175100017510000001510612152651661011672 00000000000000/* This file is part of "reprepro" * Copyright (C) 2011 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include "error.h" #include "strlist.h" #include "distribution.h" #include "database.h" #include "database_p.h" #include "files.h" #include "sizes.h" struct distribution_sizes { struct distribution_sizes *next; const char *codename; char *v; size_t codename_len; struct { unsigned long long all, onlyhere; } this, withsnapshots; bool seen, seensnapshot; }; static void distribution_sizes_freelist(struct distribution_sizes *ds) { while (ds != NULL) { struct distribution_sizes *s = ds; ds = ds->next; free(s->v); free(s); } } static bool fromdist(struct distribution_sizes *dist, const char *data, size_t len, bool *snapshot_p) { if (len < dist->codename_len + 1) return false; if (data[dist->codename_len] == '=') *snapshot_p = true; else if (data[dist->codename_len] == '|' || data[dist->codename_len] == ' ') *snapshot_p = false; else return false; return memcmp(data, dist->codename, dist->codename_len) == 0; } static retvalue count_sizes(struct cursor *cursor, bool specific, struct distribution_sizes *ds, unsigned long long *all_p, unsigned long long *onlyall_p) { const char *key, *data; size_t len; char *last_file = NULL; unsigned long long filesize = 0; bool onlyone = true; struct distribution_sizes *last_dist; struct distribution_sizes *s; bool snapshot; unsigned long long all = 0, onlyall = 0; while (cursor_nexttempdata(rdb_references, cursor, &key, &data, &len)) { if (last_file == NULL || strcmp(last_file, key) != 0) { if (last_file != NULL) { free(last_file); for (s = ds ; s != NULL ; s = s->next) { s->seen = false; s->seensnapshot = false; } } last_file = strdup(key); if (FAILEDTOALLOC(last_file)) return RET_ERROR_OOM; onlyone = true; filesize = 0; last_dist = NULL; } if (data[0] == 'u' && data[1] == '|') { data += 2; len -= 2; } else if (data[0] == 's' && data[1] == '=') { data += 2; len -= 2; } if (last_dist != NULL && fromdist(last_dist, data, len, &snapshot)) { /* same distribution again */ if (!snapshot && !last_dist->seen) { last_dist->seen = true; last_dist->this.all += filesize; if (onlyone) last_dist->this.onlyhere += filesize; } continue; } s = ds; while (s != NULL && !fromdist(s, data, len, &snapshot)) s = s->next; if (s == NULL) { if (onlyone && last_dist != NULL) { if (!last_dist->seen) last_dist->this.onlyhere -= filesize; last_dist->withsnapshots.onlyhere -= filesize; } if (last_dist != NULL) onlyall -= filesize; onlyone = false; if (!specific) { struct distribution_sizes **s_p = &ds->next; const char *p; p = data; while (*p != '\0' && *p != ' ' && *p != '|' && *p != '=') p++; if (*p == '\0') continue; while (*s_p != NULL) s_p = &(*s_p)->next; s = zNEW(struct distribution_sizes); if (FAILEDTOALLOC(s)) { free(last_file); return RET_ERROR_OOM; } *s_p = s; s->v = strndup(data, (p-data) + 1); if (FAILEDTOALLOC(s)) { free(last_file); return RET_ERROR_OOM; } s->v[p-data] = '*'; s->codename = s->v; s->codename_len = p-data; snapshot = *p == '='; } else /* last_dist not changed on purpose */ continue; } /* found it to belong to distribution s */ if (s->seen) { assert (last_dist != NULL); assert (!onlyone); continue; } if (s->seensnapshot && !snapshot) { s->seen = true; s->this.all += filesize; assert (last_dist != NULL); assert (!onlyone); continue; } /* distribution seen for this file the first time */ if (last_dist != NULL) { if (onlyone) { last_dist->withsnapshots.onlyhere -= filesize; if (last_dist->seen) last_dist->this.onlyhere -= filesize; onlyone = false; } assert (filesize != 0); } else { /* and this is the first time * we are interested in the file */ filesize = files_getsize(key); assert (filesize != 0); if (onlyone) onlyall += filesize; all += filesize; } last_dist = s; if (snapshot) { s->seensnapshot = true; } else { s->seen = true; last_dist->this.all += filesize; if (onlyone) last_dist->this.onlyhere += filesize; } last_dist->withsnapshots.all += filesize; if (onlyone) last_dist->withsnapshots.onlyhere += filesize; } free(last_file); *all_p = all; *onlyall_p = onlyall; return RET_OK; } retvalue sizes_distributions(struct distribution *alldistributions, bool specific) { struct cursor *cursor; retvalue result, r; struct distribution_sizes *ds = NULL, **lds = &ds, *s; struct distribution *d; unsigned long long all = 0, onlyall = 0; for (d = alldistributions ; d != NULL ; d = d->next) { if (!d->selected) continue; s = zNEW(struct distribution_sizes); if (FAILEDTOALLOC(s)) { distribution_sizes_freelist(ds); return RET_ERROR_OOM; } s->codename = d->codename; s->codename_len = strlen(d->codename); *lds = s; lds = &s->next; } if (ds == NULL) return RET_NOTHING; r = table_newglobalcursor(rdb_references, &cursor); if (!RET_IS_OK(r)) { distribution_sizes_freelist(ds); return r; } result = count_sizes(cursor, specific, ds, &all, &onlyall); r = cursor_close(rdb_references, cursor); RET_ENDUPDATE(result, r); if (RET_IS_OK(result)) { printf("%-15s %13s %13s %13s %13s\n", "Codename", "Size", "Only", "Size(+s)", "Only(+s)"); for (s = ds ; s != NULL ; s = s->next) { printf("%-15s %13llu %13llu %13llu %13llu\n", s->codename, s->this.all, s->this.onlyhere, s->withsnapshots.all, s->withsnapshots.onlyhere); } if (specific && ds->next != NULL) printf("%-15s %13s %13s %13llu %13llu\n", " ", "", "", all, onlyall); } distribution_sizes_freelist(ds); return result; } reprepro-4.13.1/reference.h0000644000175100017510000000305512152651661012500 00000000000000#ifndef REPREPRO_REFERENCE_H #define REPREPRO_REFERENCE_H #ifndef REPREPRO_ERROR_H #include "error.h" #warning "What's happening?" #endif #ifndef REPREPRO_STRLIST_H #include "strlist.h" #warning "What's happening?" #endif #ifndef REPREPRO_DATABASE_H #include "database.h" #endif struct references; /* remove all references from a given identifier */ retvalue references_remove(const char *neededby); /* Add an reference by for the given , * excluding , if it is nonNULL. */ retvalue references_insert(const char *, const struct strlist *, const struct strlist * /*exclude*/); /* Add an reference by for the given , * do not error out if reference already exists */ retvalue references_add(const char *, const struct strlist *); /* Remove reference by for the given , * excluding , if it is nonNULL. */ retvalue references_delete(const char *, struct strlist *, /*@null@*/const struct strlist * /*exclude*/); /* add an reference to a file for an identifier. */ retvalue references_increment(const char * /*needed*/, const char * /*needey*/); /* delete reference to a file for an identifier */ retvalue references_decrement(const char * /*needed*/, const char * /*needey*/); /* check if an item is needed, returns RET_NOTHING if not */ retvalue references_isused(const char *); /* check if a reference is found as expected */ retvalue references_check(const char * /*referee*/, const struct strlist */*what*/); /* output all references to stdout */ retvalue references_dump(void); #endif reprepro-4.13.1/guesscomponent.h0000644000175100017510000000061312152651661013610 00000000000000#ifndef REPREPRO_GUESSCOMPONENT_H #define REPREPRO_GUESSCOMPONENT_H #ifndef REPREPRO_ERROR_H #include "error.h" #warning "What's hapening here?" #endif #ifndef REPREPRO_ATOMS_H #include "atoms.h" #endif retvalue guess_component(const char * /*codename*/, const struct atomlist * /*components*/, const char * /*package*/, const char * /*section*/, component_t, /*@out@*/component_t *); #endif reprepro-4.13.1/md5.h0000644000175100017510000000251012152651661011222 00000000000000/* * This is the header file for the MD5 message-digest algorithm. * The algorithm is due to Ron Rivest. This code was * written by Colin Plumb in 1993, no copyright is claimed. * This code is in the public domain; do with it what you wish. * * Equivalent code is available from RSA Data Security, Inc. * This code has been tested against that, and is equivalent, * except that you don't need to include two pages of legalese * with every copy. * * To compute the message digest of a chunk of bytes, declare an * MD5Context structure, pass it to MD5Init, call MD5Update as * needed on buffers full of bytes, and then call MD5Final, which * will fill a supplied 16-byte array with the digest. * * Changed so as no longer to depend on Colin Plumb's `usual.h' * header definitions; now uses stuff from dpkg's config.h * - Ian Jackson . * Still in the public domain. */ #ifndef MD5_H #define MD5_H #define MD5_DIGEST_SIZE 16 #define md5byte unsigned char #define UWORD32 unsigned int struct MD5Context { UWORD32 buf[4]; UWORD32 bytes[2]; UWORD32 in[16]; }; void MD5Init(/*@out@*/struct MD5Context *context); void MD5Update(struct MD5Context *context, md5byte const *buf, unsigned int len); void MD5Final(/*@out@*/unsigned char digest[MD5_DIGEST_SIZE], struct MD5Context *context); #endif /* !MD5_H */ reprepro-4.13.1/tracking.c0000644000175100017510000011377412152651661012351 00000000000000/* This file is part of "reprepro" * Copyright (C) 2005,2006,2007,2008,2009 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include "error.h" #include "names.h" #include "dirs.h" #include "names.h" #include "reference.h" #include "ignore.h" #include "configparser.h" #include "database_p.h" #include "tracking.h" #ifndef NOPARANOIA #define PARANOIA #endif struct s_tracking { char *codename; struct table *table; enum trackingtype type; struct trackingoptions options; }; retvalue tracking_done(trackingdb db) { retvalue r; if (db == NULL) return RET_OK; r = table_close(db->table); free(db->codename); free(db); return r; } retvalue tracking_initialize(/*@out@*/trackingdb *db, const struct distribution *distribution, bool readonly) { struct s_tracking *t; retvalue r; t = zNEW(struct s_tracking); if (FAILEDTOALLOC(t)) return RET_ERROR_OOM; t->codename = strdup(distribution->codename); if (FAILEDTOALLOC(t->codename)) { free(t); return RET_ERROR_OOM; } assert (distribution->tracking != dt_NONE || readonly); t->type = distribution->tracking; t->options = distribution->trackingoptions; r = database_opentracking(t->codename, readonly, &t->table); if (!RET_IS_OK(r)) { free(t->codename); free(t); return r; } *db = t; return RET_OK; } static inline enum filetype filetypechar(enum filetype filetype) { switch (filetype) { case ft_LOG: case ft_CHANGES: case ft_ALL_BINARY: case ft_ARCH_BINARY: case ft_SOURCE: case ft_XTRA_DATA: return filetype; } assert(false); return ft_XTRA_DATA; } retvalue trackedpackage_addfilekey(trackingdb tracks, struct trackedpackage *pkg, enum filetype filetype, char *filekey, bool used) { char *id; enum filetype ft = filetypechar(filetype); int i, *newrefcounts; enum filetype *newfiletypes; retvalue r; if (FAILEDTOALLOC(filekey)) return RET_ERROR_OOM; for (i = 0 ; i < pkg->filekeys.count ; i++) { if (strcmp(pkg->filekeys.values[i], filekey) == 0) { if (pkg->filetypes[i] != ft) { /* if old file has refcount 0, just repair: */ if (pkg->refcounts[i] <= 0) { free(filekey); pkg->filetypes[i] = ft; if (used) pkg->refcounts[i] = 1; return RET_OK; } fprintf(stderr, "Filekey '%s' already registered for '%s_%s' as type '%c' is tried to be reregistered as type '%c'!\n", filekey, pkg->sourcename, pkg->sourceversion, pkg->filetypes[i], ft); free(filekey); return RET_ERROR; } free(filekey); if (used) pkg->refcounts[i]++; return RET_OK; } } newrefcounts = realloc(pkg->refcounts, (pkg->filekeys.count + 1) * sizeof(int)); if (FAILEDTOALLOC(newrefcounts)) { free(filekey); return RET_ERROR_OOM; } if (used) newrefcounts[pkg->filekeys.count]=1; else newrefcounts[pkg->filekeys.count]=0; pkg->refcounts = newrefcounts; newfiletypes = realloc(pkg->filetypes, (pkg->filekeys.count + 1) * sizeof(enum filetype)); if (FAILEDTOALLOC(newfiletypes)) { free(filekey); return RET_ERROR_OOM; } newfiletypes[pkg->filekeys.count] = filetype; pkg->filetypes = newfiletypes; r = strlist_add(&pkg->filekeys, filekey); if (RET_WAS_ERROR(r)) return r; id = calc_trackreferee(tracks->codename, pkg->sourcename, pkg->sourceversion); if (FAILEDTOALLOC(id)) return RET_ERROR_OOM; r = references_increment(filekey, id); free(id); return r; } retvalue trackedpackage_adddupfilekeys(trackingdb tracks, struct trackedpackage *pkg, enum filetype filetype, const struct strlist *filekeys, bool used) { int i; retvalue result, r; assert (filekeys != NULL); result = RET_OK; for (i = 0 ; i < filekeys->count ; i++) { char *filekey = strdup(filekeys->values[i]); r = trackedpackage_addfilekey(tracks, pkg, filetype, filekey, used); RET_UPDATE(result, r); } return result; } static inline retvalue trackedpackage_removefilekey(trackingdb tracks, struct trackedpackage *pkg, const char *filekey) { int i; for (i = 0 ; i < pkg->filekeys.count ; i++) { if (strcmp(pkg->filekeys.values[i], filekey) == 0) { if (pkg->refcounts[i] > 0) { pkg->refcounts[i]--; } else fprintf(stderr, "Warning: tracking database of %s has inconsistent refcounts of %s_%s.\n", tracks->codename, pkg->sourcename, pkg->sourceversion); return RET_OK; } } fprintf(stderr, "Warning: tracking database of %s missed files for %s_%s.\n", tracks->codename, pkg->sourcename, pkg->sourceversion); return RET_OK; } retvalue trackedpackage_removefilekeys(trackingdb tracks, struct trackedpackage *pkg, const struct strlist *filekeys) { int i; retvalue result, r; assert (filekeys != NULL); result = RET_OK; for (i = 0 ; i < filekeys->count ; i++) { const char *filekey = filekeys->values[i]; r = trackedpackage_removefilekey(tracks, pkg, filekey); RET_UPDATE(result, r); } return result; } void trackedpackage_free(struct trackedpackage *pkg) { if (pkg != NULL) { free(pkg->sourcename); free(pkg->sourceversion); strlist_done(&pkg->filekeys); free(pkg->refcounts); free(pkg->filetypes); free(pkg); } } static inline int parsenumber(const char **d, size_t *s) { int count; count = 0; do { if (**d < '0' || **d > '7') return -1; count = (count*8) + (**d-'0'); (*d)++; (*s)--; if (*s == 0) return -1; } while (**d != '\0'); (*d)++; (*s)--; return count; } static retvalue tracking_new(const char *sourcename, const char *version, /*@out@*/struct trackedpackage **pkg) { struct trackedpackage *p; assert (pkg != NULL && sourcename != NULL && version != NULL); p = zNEW(struct trackedpackage); if (FAILEDTOALLOC(p)) return RET_ERROR_OOM; p->sourcename = strdup(sourcename); p->sourceversion = strdup(version); p->flags.isnew = true; if (FAILEDTOALLOC(p->sourcename) || FAILEDTOALLOC(p->sourceversion)) { trackedpackage_free(p); return RET_ERROR_OOM; } *pkg = p; return RET_OK; } static inline retvalue parse_data(const char *name, const char *version, const char *data, size_t datalen, /*@out@*/struct trackedpackage **pkg) { struct trackedpackage *p; int i; p = zNEW(struct trackedpackage); if (FAILEDTOALLOC(p)) return RET_ERROR_OOM; p->sourcename = strdup(name); p->sourceversion = strdup(version); if (FAILEDTOALLOC(p->sourcename) || FAILEDTOALLOC(p->sourceversion) /* || FAILEDTOALLOC(p->sourcedir) */) { trackedpackage_free(p); return RET_ERROR_OOM; } while (datalen > 0 && *data != '\0') { char *filekey; const char *separator; size_t filekeylen; retvalue r; if (((p->filekeys.count)&31) == 0) { enum filetype *n = realloc(p->filetypes, (p->filekeys.count+32)*sizeof(enum filetype)); if (FAILEDTOALLOC(n)) { trackedpackage_free(p); return RET_ERROR_OOM; } p->filetypes = n; } p->filetypes[p->filekeys.count] = *data; data++; datalen--; separator = memchr(data, '\0', datalen); if (separator == NULL) { fprintf(stderr, "Internal Error: Corrupt tracking data for %s %s\n", name, version); trackedpackage_free(p); return RET_ERROR; } filekeylen = separator - data; filekey = strndup(data, filekeylen); if (FAILEDTOALLOC(filekey)) { trackedpackage_free(p); return RET_ERROR_OOM; } r = strlist_add(&p->filekeys, filekey); if (RET_WAS_ERROR(r)) { trackedpackage_free(p); return r; } data += filekeylen + 1; datalen -= filekeylen + 1; } data++; datalen--; p->refcounts = nzNEW(p->filekeys.count, int); if (FAILEDTOALLOC(p->refcounts)) { trackedpackage_free(p); return RET_ERROR_OOM; } for (i = 0 ; i < p->filekeys.count ; i++) { if ((p->refcounts[i] = parsenumber(&data, &datalen)) < 0) { fprintf(stderr, "Internal Error: Corrupt tracking data for %s %s\n", name, version); trackedpackage_free(p); return RET_ERROR; } } if (datalen > 0) { fprintf(stderr, "Internal Error: Trailing garbage in tracking data for %s %s\n (%ld bytes)", name, version, (long)datalen); trackedpackage_free(p); return RET_ERROR; } p->flags.isnew = false; p->flags.deleted = false; *pkg = p; return RET_OK; } retvalue tracking_get(trackingdb t, const char *sourcename, const char *version, /*@out@*/struct trackedpackage **pkg) { const char *data; size_t datalen; retvalue r; assert (pkg != NULL && sourcename != NULL && version != NULL); r = table_getpair(t->table, sourcename, version, &data, &datalen); if (!RET_IS_OK(r)) return r; return parse_data(sourcename, version, data, datalen, pkg); } retvalue tracking_getornew(trackingdb tracks, const char *name, const char *version, /*@out@*/struct trackedpackage **pkg) { retvalue r; r = tracking_get(tracks, name, version, pkg); if (r == RET_NOTHING) r = tracking_new(name, version, pkg); return r; } static retvalue gen_data(struct trackedpackage *pkg, /*@out@*/char **newdata_p, /*@out@*/size_t *newdatalen_p) { size_t versionsize = strlen(pkg->sourceversion)+1; int i; char *d, *data; size_t datalen; datalen = versionsize + 1; for (i = 0 ; i < pkg->filekeys.count ; i++) { size_t l; l = strlen(pkg->filekeys.values[i]); if (l > 0) datalen += l+9; } data = malloc(datalen + 1); if (FAILEDTOALLOC(data)) return RET_ERROR_OOM; memcpy(data, pkg->sourceversion, versionsize); d = data + versionsize; for (i = 0 ; i < pkg->filekeys.count ; i++) { size_t l; l = strlen(pkg->filekeys.values[i]); if (l > 0) { *d = pkg->filetypes[i]; d++; memcpy(d, pkg->filekeys.values[i], l + 1); d+=l+1; } } *d ='\0'; d++; for (i = 0 ; i < pkg->filekeys.count ; i++) { int j; #define MAXREFCOUNTOCTETS 7 char countstring[MAXREFCOUNTOCTETS]; size_t count = pkg->refcounts[i]; countstring[MAXREFCOUNTOCTETS-1] = '\0'; for (j = MAXREFCOUNTOCTETS-2 ; j >= 0 ; j--) { countstring[j] = '0' + (count & 7); count >>= 3; if (count == 0) break; } #undef MAXREFCOUNTOCTETS assert (count == 0); memcpy(d, countstring+j, 7 - j); d+=7-j; datalen -= j; } *d ='\0'; assert ((size_t)(d-data) == datalen); *newdata_p = data; *newdatalen_p = datalen; return RET_OK; } static retvalue tracking_saveatcursor(trackingdb t, struct cursor *cursor, struct trackedpackage *pkg) { if (pkg->flags.deleted) { /* delete if delete is requested * (all unreferencing has to be done before) */ return cursor_delete(t->table, cursor, pkg->sourcename, pkg->sourceversion); } else { char *newdata; size_t newdatalen; retvalue r; r = gen_data(pkg, &newdata, &newdatalen); if (RET_IS_OK(r)) { r = cursor_replace(t->table, cursor, newdata, newdatalen); free(newdata); } return r; } } static retvalue tracking_saveonly(trackingdb t, struct trackedpackage *pkg) { retvalue r, r2; char *newdata; size_t newdatalen; assert (pkg != NULL); if (!pkg->flags.isnew) { struct cursor *cursor; r = table_newpairedcursor(t->table, pkg->sourcename, pkg->sourceversion, &cursor, NULL, NULL); if (RET_WAS_ERROR(r)) return r; if (r == RET_NOTHING) { fprintf(stderr, "Internal error: tracking_save with isnew=false called but could not find %s_%s in %s!\n", pkg->sourcename, pkg->sourceversion, t->codename); pkg->flags.isnew = true; } else { r = tracking_saveatcursor(t, cursor, pkg); r2 = cursor_close(t->table, cursor); RET_ENDUPDATE(r, r2); return r; } } if (pkg->flags.deleted) return RET_OK; r = gen_data(pkg, &newdata, &newdatalen); assert (r != RET_NOTHING); if (!RET_IS_OK(r)) return r; r = table_addrecord(t->table, pkg->sourcename, newdata, newdatalen, false); free(newdata); if (verbose > 18) fprintf(stderr, "Adding tracked package '%s'_'%s' to '%s'\n", pkg->sourcename, pkg->sourceversion, t->codename); return r; } retvalue tracking_save(trackingdb t, struct trackedpackage *pkg) { retvalue r = tracking_saveonly(t, pkg); trackedpackage_free(pkg); return r; } retvalue tracking_listdistributions(struct strlist *distributions) { return database_listsubtables("tracking.db", distributions); } retvalue tracking_drop(const char *codename) { retvalue result, r; result = database_dropsubtable("tracking.db", codename); r = references_remove(codename); RET_UPDATE(result, r); return result; } static retvalue tracking_recreatereferences(trackingdb t) { struct cursor *cursor; retvalue result, r; struct trackedpackage *pkg; char *id; int i; const char *key, *value, *data; size_t datalen; r = table_newglobalcursor(t->table, &cursor); if (!RET_IS_OK(r)) return r; result = RET_NOTHING; while (cursor_nextpair(t->table, cursor, &key, &value, &data, &datalen)) { r = parse_data(key, value, data, datalen, &pkg); if (RET_WAS_ERROR(r)) { (void)cursor_close(t->table, cursor); return r; } id = calc_trackreferee(t->codename, pkg->sourcename, pkg->sourceversion); if (FAILEDTOALLOC(id)) { trackedpackage_free(pkg); (void)cursor_close(t->table, cursor); return RET_ERROR_OOM; } for (i = 0 ; i < pkg->filekeys.count ; i++) { const char *filekey = pkg->filekeys.values[i]; r = references_increment(filekey, id); RET_UPDATE(result, r); } free(id); trackedpackage_free(pkg); } r = cursor_close(t->table, cursor); RET_UPDATE(result, r); return result; } retvalue tracking_rereference(struct distribution *distribution) { retvalue result, r; trackingdb tracks; result = references_remove(distribution->codename); if (distribution->tracking == dt_NONE) return result; r = tracking_initialize(&tracks, distribution, true); RET_UPDATE(result, r); if (!RET_IS_OK(r)) return result; r = tracking_recreatereferences(tracks); RET_UPDATE(result, r); r = tracking_done(tracks); RET_ENDUPDATE(result, r); return result; } retvalue tracking_remove(trackingdb t, const char *sourcename, const char *version) { retvalue result, r; struct cursor *cursor; const char *data; size_t datalen; char *id; struct trackedpackage *pkg SETBUTNOTUSED(= NULL); r = table_newpairedcursor(t->table, sourcename, version, &cursor, &data, &datalen); if (!RET_IS_OK(r)) return r; id = calc_trackreferee(t->codename, sourcename, version); if (FAILEDTOALLOC(id)) { (void)cursor_close(t->table, cursor); return RET_ERROR_OOM; } result = parse_data(sourcename, version, data, datalen, &pkg); if (RET_IS_OK(r)) { assert (pkg != NULL); r = references_delete(id, &pkg->filekeys, NULL); RET_UPDATE(result, r); trackedpackage_free(pkg); } else { RET_UPDATE(result, r); fprintf(stderr, "Could not parse data, removing all references blindly...\n"); r = references_remove(id); RET_UPDATE(result, r); } free(id); r = cursor_delete(t->table, cursor, sourcename, version); if (RET_IS_OK(r)) fprintf(stderr, "Removed %s_%s from %s.\n", sourcename, version, t->codename); RET_UPDATE(result, r); r = cursor_close(t->table, cursor); RET_ENDUPDATE(result, r); return result; } static void print(const char *codename, const struct trackedpackage *pkg){ int i; printf("Distribution: %s\n", codename); printf("Source: %s\n", pkg->sourcename); printf("Version: %s\n", pkg->sourceversion); printf("Files:\n"); for (i = 0 ; i < pkg->filekeys.count ; i++) { const char *filekey = pkg->filekeys.values[i]; printf(" %s %c %d\n", filekey, pkg->filetypes[i], pkg->refcounts[i]); } (void)fputs("\n", stdout); } retvalue tracking_printall(trackingdb t) { struct cursor *cursor; retvalue result, r; struct trackedpackage *pkg; const char *key, *value, *data; size_t datalen; r = table_newglobalcursor(t->table, &cursor); if (!RET_IS_OK(r)) return r; result = RET_NOTHING; while (cursor_nextpair(t->table, cursor, &key, &value, &data, &datalen)) { r = parse_data(key, value, data, datalen, &pkg); if (RET_IS_OK(r)) { print(t->codename, pkg); trackedpackage_free(pkg); } RET_UPDATE(result, r); } r = cursor_close(t->table, cursor); RET_ENDUPDATE(result, r); return result; } retvalue tracking_foreach_ro(struct distribution *d, tracking_foreach_ro_action *action) { trackingdb t; struct cursor *cursor; retvalue result, r; struct trackedpackage *pkg; const char *key, *value, *data; size_t datalen; r = tracking_initialize(&t, d, true); if (!RET_IS_OK(r)) return r; r = table_newglobalcursor(t->table, &cursor); if (!RET_IS_OK(r)) { (void)tracking_done(t); return r; } result = RET_NOTHING; while (cursor_nextpair(t->table, cursor, &key, &value, &data, &datalen)) { r = parse_data(key, value, data, datalen, &pkg); if (RET_IS_OK(r)) { r = action(d, pkg); trackedpackage_free(pkg); } RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; } r = cursor_close(t->table, cursor); RET_ENDUPDATE(result, r); r = tracking_done(t); RET_ENDUPDATE(result, r); return result; } retvalue tracking_parse(struct distribution *d, struct configiterator *iter) { enum trackingflags { tf_keep, tf_all, tf_minimal, tf_includechanges, tf_includebyhand, tf_includelogs, tf_keepsources, tf_needsources, tf_embargoalls, tf_COUNT /* must be last */ }; static const struct constant trackingflags[] = { {"keep", tf_keep}, {"all", tf_all}, {"minimal", tf_minimal}, {"includechanges", tf_includechanges}, {"includelogs", tf_includelogs}, {"includebyhand", tf_includebyhand}, {"keepsources", tf_keepsources}, {"needsources", tf_needsources}, {"embargoalls", tf_embargoalls}, {NULL, -1} }; bool flags[tf_COUNT]; retvalue r; int modecount; assert (d->tracking == dt_NONE); memset(flags, 0, sizeof(flags)); r = config_getflags(iter, "Tracking", trackingflags, flags, IGNORABLE(unknownfield), ""); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; modecount = flags[tf_keep]?1:0 + flags[tf_minimal]?1:0 + flags[tf_all]?1:0; if (modecount > 1) { fprintf(stderr, "Error parsing config file %s, line %u:\n" "Only one of 'keep','all' or 'minimal' can be in one Tracking header.\n", config_filename(iter), config_line(iter)); return RET_ERROR; } if (modecount < 1) { fprintf(stderr, "Error parsing config file %s, line %u, column %u:\n" "Tracking mode ('keep','all' or 'minimal') expected.\n", config_filename(iter), config_line(iter), config_column(iter)); return RET_ERROR; } if (flags[tf_keep]) d->tracking = dt_KEEP; else if (flags[tf_minimal]) d->tracking = dt_MINIMAL; else d->tracking = dt_ALL; d->trackingoptions.includechanges = flags[tf_includechanges]; d->trackingoptions.includebyhand = flags[tf_includebyhand]; d->trackingoptions.includelogs = flags[tf_includelogs]; d->trackingoptions.keepsources = flags[tf_keepsources]; d->trackingoptions.needsources = flags[tf_needsources]; if (flags[tf_needsources]) fprintf(stderr, "Warning parsing config file %s, line %u:\n" "'needsources' ignored as not yet supported.\n", config_filename(iter), config_line(iter)); d->trackingoptions.embargoalls = flags[tf_embargoalls]; if (flags[tf_embargoalls]) fprintf(stderr, "Warning parsing config file %s, line %u:\n" "'embargoall' ignored as not yet supported.\n", config_filename(iter), config_line(iter)); return RET_OK; } static retvalue trackingdata_remember(struct trackingdata *td, /*@only@*/char*name, /*@only@*/char*version) { struct trackingdata_remember *r; r = NEW(struct trackingdata_remember); if (FAILEDTOALLOC(r)) return RET_ERROR_OOM; r->name = name; r->version = version; r->next = td->remembered; td->remembered = r; return RET_OK; } retvalue trackingdata_summon(trackingdb tracks, const char *name, const char *version, struct trackingdata *data) { struct trackedpackage *pkg; retvalue r; r = tracking_getornew(tracks, name, version, &pkg); assert (r != RET_NOTHING); if (RET_IS_OK(r)) { data->tracks = tracks; data->pkg = pkg; data->remembered = NULL; return r; } return r; } retvalue trackingdata_new(trackingdb tracks, struct trackingdata *data) { data->tracks = tracks; data->pkg = NULL; data->remembered = NULL; return RET_OK; } retvalue trackingdata_switch(struct trackingdata *data, const char *source, const char *version) { retvalue r; if (data->pkg != NULL) { if (strcmp(data->pkg->sourcename, source) == 0 && strcmp(data->pkg->sourceversion, version) == 0) return RET_OK; r = tracking_saveonly(data->tracks, data->pkg); if (RET_WAS_ERROR(r)) return r; r = trackingdata_remember(data, data->pkg->sourcename, data->pkg->sourceversion); strlist_done(&data->pkg->filekeys); free(data->pkg->refcounts); free(data->pkg->filetypes); free(data->pkg); data->pkg = NULL; if (RET_WAS_ERROR(r)) return r; } r = tracking_getornew(data->tracks, source, version, &data->pkg); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; return RET_OK; } retvalue trackingdata_insert(struct trackingdata *data, enum filetype filetype, const struct strlist *filekeys, /*@null@*//*@only@*/char*oldsource, /*@null@*//*@only@*/char*oldversion, /*@null@*/const struct strlist *oldfilekeys) { retvalue result, r; struct trackedpackage *pkg; if (data == NULL) { assert(oldversion == NULL && oldsource == NULL); free(oldversion); free(oldsource); return RET_OK; } assert(data->pkg != NULL); result = trackedpackage_adddupfilekeys(data->tracks, data->pkg, filetype, filekeys, true); if (RET_WAS_ERROR(result)) { free(oldsource); free(oldversion); return result; } if (oldsource == NULL || oldversion == NULL || oldfilekeys == NULL) { assert(oldsource==NULL&&oldversion==NULL&&oldfilekeys==NULL); return RET_OK; } if (strcmp(oldversion, data->pkg->sourceversion) == 0 && strcmp(oldsource, data->pkg->sourcename) == 0) { /* Unlikely, but it may also be the same source version as * the package we are currently adding */ free(oldsource); free(oldversion); return trackedpackage_removefilekeys(data->tracks, data->pkg, oldfilekeys); } r = tracking_get(data->tracks, oldsource, oldversion, &pkg); if (RET_WAS_ERROR(r)) { free(oldsource); free(oldversion); return r; } if (r == RET_NOTHING) { fprintf(stderr, "Could not found tracking data for %s_%s in %s to remove old files from it.\n", oldsource, oldversion, data->tracks->codename); free(oldsource); free(oldversion); return result; } r = trackedpackage_removefilekeys(data->tracks, pkg, oldfilekeys); RET_UPDATE(result, r); r = tracking_save(data->tracks, pkg); RET_UPDATE(result, r); r = trackingdata_remember(data, oldsource, oldversion); RET_UPDATE(result, r); return result; } retvalue trackingdata_remove(struct trackingdata *data, /*@only@*/char*oldsource, /*@only@*/char*oldversion, const struct strlist *oldfilekeys) { retvalue result, r; struct trackedpackage *pkg; assert(oldsource != NULL && oldversion != NULL && oldfilekeys != NULL); if (data->pkg != NULL && strcmp(oldversion, data->pkg->sourceversion) == 0 && strcmp(oldsource, data->pkg->sourcename) == 0) { /* Unlikely, but it may also be the same source version as * the package we are currently adding */ free(oldsource); free(oldversion); return trackedpackage_removefilekeys(data->tracks, data->pkg, oldfilekeys); } result = tracking_get(data->tracks, oldsource, oldversion, &pkg); if (RET_WAS_ERROR(result)) { free(oldsource); free(oldversion); return result; } if (result == RET_NOTHING) { fprintf(stderr, "Could not found tracking data for %s_%s in %s to remove old files from it.\n", oldsource, oldversion, data->tracks->codename); free(oldsource); free(oldversion); return RET_OK; } r = trackedpackage_removefilekeys(data->tracks, pkg, oldfilekeys); RET_UPDATE(result, r); r = tracking_save(data->tracks, pkg); RET_UPDATE(result, r); r = trackingdata_remember(data, oldsource, oldversion); RET_UPDATE(result, r); return result; } void trackingdata_done(struct trackingdata *d) { trackedpackage_free(d->pkg); d->pkg = NULL; d->tracks = NULL; while (d->remembered != NULL) { struct trackingdata_remember *h = d->remembered; d->remembered = h->next; free(h->name); free(h->version); free(h); } } static inline retvalue trackedpackage_removeall(trackingdb tracks, struct trackedpackage *pkg) { retvalue result = RET_OK, r; char *id; // printf("[trackedpackage_removeall %s %s %s]\n", tracks->codename, pkg->sourcename, pkg->sourceversion); id = calc_trackreferee(tracks->codename, pkg->sourcename, pkg->sourceversion); if (FAILEDTOALLOC(id)) return RET_ERROR_OOM; pkg->flags.deleted = true; r = references_delete(id, &pkg->filekeys, NULL); RET_UPDATE(result, r); free(id); strlist_done(&pkg->filekeys); strlist_init(&pkg->filekeys); free(pkg->refcounts); pkg->refcounts = NULL; return result; } static inline bool tracking_needed(trackingdb tracks, struct trackedpackage *pkg, int ofs) { if (pkg->refcounts[ofs] > 0) return true; // TODO: add checks so that only .changes and .log files belonging // to still existing binaries are kept in minimal mode if (pkg->filetypes[ofs] == ft_LOG && tracks->options.includelogs) return true; if (pkg->filetypes[ofs] == ft_CHANGES && tracks->options.includechanges) return true; if (pkg->filetypes[ofs] == ft_XTRA_DATA) return true; if (pkg->filetypes[ofs] == ft_SOURCE && tracks->options.keepsources) return true; return false; } static inline retvalue trackedpackage_removeunneeded(trackingdb tracks, struct trackedpackage *pkg) { retvalue result = RET_OK, r; char *id = NULL; int i, j, count; assert(tracks->type == dt_MINIMAL); count = pkg->filekeys.count; j = 0; for (i = 0 ; i < count ; i++) { if (tracking_needed(tracks, pkg, i)) { if (j < i) { pkg->filekeys.values[j] = pkg->filekeys.values[i]; pkg->refcounts[j] = pkg->refcounts[i]; pkg->filetypes[j] = pkg->filetypes[i]; } j++; } else { char *filekey = pkg->filekeys.values[i]; pkg->filekeys.values[i] = NULL; if (FAILEDTOALLOC(id)) { id = calc_trackreferee(tracks->codename, pkg->sourcename, pkg->sourceversion); if (id == NULL) result = RET_ERROR_OOM; } if (id != NULL) { // printf("[trackedpackage_removeunneeded %s %s %s: '%s']\n", tracks->codename, pkg->sourcename, pkg->sourceversion, filekey); r = references_decrement(filekey, id); RET_UPDATE(result, r); } free(filekey); } } assert (j <= pkg->filekeys.count); pkg->filekeys.count = j; free(id); return result; } static inline retvalue trackedpackage_tidy(trackingdb tracks, struct trackedpackage *pkg) { int i; if (tracks->type == dt_KEEP) return RET_OK; /* look if anything clings to this package */ for (i = 0 ; i < pkg->filekeys.count ; i++) { if (pkg->refcounts[i] > 0) break; } if (i >= pkg->filekeys.count) /* nothing left, remove it all */ return trackedpackage_removeall(tracks, pkg); else if (tracks->type == dt_MINIMAL) /* remove all files no longer needed */ return trackedpackage_removeunneeded(tracks, pkg); else return RET_OK; } retvalue trackingdata_finish(trackingdb tracks, struct trackingdata *d) { retvalue r; assert (d->tracks == tracks); if (d->pkg != NULL) { r = trackedpackage_tidy(tracks, d->pkg); r = tracking_save(tracks, d->pkg); } else r = RET_OK; d->pkg = NULL; /* call for all remembered actions... */ while (d->remembered != NULL) { struct trackingdata_remember *h = d->remembered; struct trackedpackage *pkg; d->remembered = h->next; r = tracking_get(tracks, h->name, h->version, &pkg); free(h->name); free(h->version); free(h); if (RET_IS_OK(r)) { r = trackedpackage_tidy(tracks, pkg); r = tracking_save(tracks, pkg); } } d->tracks = NULL; return r; } retvalue tracking_tidyall(trackingdb t) { struct cursor *cursor; retvalue result, r; struct trackedpackage *pkg; const char *key, *value, *data; size_t datalen; r = table_newglobalcursor(t->table, &cursor); if (!RET_IS_OK(r)) return r; result = RET_NOTHING; while (cursor_nextpair(t->table, cursor, &key, &value, &data, &datalen)) { r = parse_data(key, value, data, datalen, &pkg); if (RET_WAS_ERROR(r)) { result = r; break; } r = trackedpackage_tidy(t, pkg); RET_UPDATE(result, r); r = tracking_saveatcursor(t, cursor, pkg); RET_UPDATE(result, r); trackedpackage_free(pkg); } r = cursor_close(t->table, cursor); RET_UPDATE(result, r); return result; } retvalue tracking_reset(trackingdb t) { struct cursor *cursor; retvalue result, r; struct trackedpackage *pkg; const char *key, *value, *data; char *newdata; size_t datalen, newdatalen; int i; r = table_newglobalcursor(t->table, &cursor); if (!RET_IS_OK(r)) return r; result = RET_NOTHING; while (cursor_nextpair(t->table, cursor, &key, &value, &data, &datalen)) { // this would perhaps be more stable if it just replaced // everything within the string just received... result = parse_data(key, value, data, datalen, &pkg); if (RET_WAS_ERROR(result)) break; for (i = 0 ; i < pkg->filekeys.count ; i++) { pkg->refcounts[i] = 0; } result = gen_data(pkg, &newdata, &newdatalen); trackedpackage_free(pkg); if (RET_IS_OK(result)) result = cursor_replace(t->table, cursor, newdata, newdatalen); free(newdata); if (RET_WAS_ERROR(result)) break; } r = cursor_close(t->table, cursor); RET_UPDATE(result, r); return result; } static retvalue tracking_foreachversion(trackingdb t, struct distribution *distribution, const char *sourcename, retvalue (action)(trackingdb t, struct trackedpackage *, struct distribution *)) { struct cursor *cursor; retvalue result, r; struct trackedpackage *pkg; const char *value, *data; size_t datalen; r = table_newduplicatecursor(t->table, sourcename, &cursor, &value, &data, &datalen); if (!RET_IS_OK(r)) return r; result = RET_NOTHING; do { r = parse_data(sourcename, value, data, datalen, &pkg); if (RET_WAS_ERROR(r)) { result = r; break; } if (verbose > 10) printf("Processing track of '%s' version '%s'\n", pkg->sourcename, pkg->sourceversion); r = action(t, pkg, distribution); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) { (void)cursor_close(t->table, cursor); trackedpackage_free(pkg); return r; } r = trackedpackage_tidy(t, pkg); RET_ENDUPDATE(result, r); r = tracking_saveatcursor(t, cursor, pkg); RET_UPDATE(result, r); trackedpackage_free(pkg); } while (cursor_nextpair(t->table, cursor, NULL, &value, &data, &datalen)); r = cursor_close(t->table, cursor); RET_UPDATE(result, r); return result; } static retvalue targetremovesourcepackage(trackingdb t, struct trackedpackage *pkg, struct distribution *distribution, struct target *target) { size_t component_len, arch_len; retvalue result, r; int i; const char *packagetype = atoms_packagetypes[target->packagetype]; const char *architecture = atoms_architectures[target->architecture]; const char *component = atoms_components[target->component]; result = RET_NOTHING; component_len = strlen(component); arch_len = strlen(architecture); for (i = 0 ; i < pkg->filekeys.count ; i++) { const char *s, *basefilename, *filekey = pkg->filekeys.values[i]; char *package, *control, *source, *version; struct strlist filekeys; bool savedstaletracking; if (pkg->refcounts[i] <= 0) continue; if (strncmp(filekey, "pool/", 5) != 0) continue; if (strncmp(filekey+5, component, component_len) != 0) continue; if (filekey[5+component_len] != '/') continue; /* check this file could actuall be in this target */ if (pkg->filetypes[i] == ft_ALL_BINARY) { if (target->packagetype == pt_dsc) continue; s = strrchr(filekey, '.'); if (s == NULL) continue; if (strcmp(s+1, packagetype) != 0) continue; } else if (pkg->filetypes[i] == ft_SOURCE) { if (target->packagetype != pt_dsc) continue; s = strrchr(filekey, '.'); if (s == NULL) continue; if (strcmp(s+1, "dsc") != 0) continue; } else if (pkg->filetypes[i] == ft_ARCH_BINARY) { if (target->packagetype == pt_dsc) continue; s = strrchr(filekey, '_'); if (s == NULL) continue; s++; if (strncmp(s, architecture, arch_len) != 0 || s[arch_len] != '.' || strcmp(s+arch_len+1, packagetype) != 0) continue; } else continue; /* get this package, check it has the right source and version, * and if yes, remove... */ basefilename = strrchr(filekey, '/'); if (basefilename == NULL) basefilename = filekey; else basefilename++; s = strchr(basefilename, '_'); package = strndup(basefilename, s - basefilename); if (FAILEDTOALLOC(package)) return RET_ERROR_OOM; r = table_getrecord(target->packages, package, &control); if (RET_WAS_ERROR(r)) { free(package); return r; } if (r == RET_NOTHING) { if (pkg->filetypes[i] != ft_ALL_BINARY && verbose >= -1) { fprintf(stderr, "Warning: tracking data might be incosistent:\n" "cannot find '%s' in '%s', but '%s' should be there.\n", package, target->identifier, filekey); } free(package); continue; } r = target->getsourceandversion(control, package, &source, &version); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) { free(package); free(control); return r; } if (strcmp(source, pkg->sourcename) != 0) { if (pkg->filetypes[i] != ft_ALL_BINARY && verbose >= -1) { fprintf(stderr, "Warning: tracking data might be incosistent:\n" "'%s' has '%s' of source '%s', but source '%s' contains '%s'.\n", target->identifier, package, source, pkg->sourcename, filekey); } free(source); free(version); free(package); free(control); continue; } free(source); if (strcmp(version, pkg->sourceversion) != 0) { if (pkg->filetypes[i] != ft_ALL_BINARY && verbose >= -1) { fprintf(stderr, "Warning: tracking data might be incosistent:\n" "'%s' has '%s' of source version '%s', but version '%s' contains '%s'.\n", target->identifier, package, version, pkg->sourceversion, filekey); } free(package); free(version); free(control); continue; } free(version); r = target->getfilekeys(control, &filekeys); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) { free(package); free(control); return r; } /* we remove the tracking data outself, so this is not * told to remove the tracking data, so it might mark things * as stale, which we do not want.. */ savedstaletracking = target->staletracking; /* that is a bit wasteful, as it parses some stuff again, but * but that is better than reimplementing logger here */ r = target_removereadpackage(target, distribution->logger, package, control, NULL); target->staletracking = savedstaletracking; free(control); free(package); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) { strlist_done(&filekeys); return r; } trackedpackage_removefilekeys(t, pkg, &filekeys); strlist_done(&filekeys); result = RET_OK; } return result; } /* Try to remove all packages causing refcounts in this tracking record */ static retvalue removesourcepackage(trackingdb t, struct trackedpackage *pkg, struct distribution *distribution) { struct target *target; retvalue result, r; int i; result = RET_NOTHING; for (target = distribution->targets ; target != NULL ; target = target->next) { r = target_initpackagesdb(target, READWRITE); RET_ENDUPDATE(result, r); if (RET_IS_OK(r)) { r = targetremovesourcepackage(t, pkg, distribution, target); RET_UPDATE(result, r); RET_UPDATE(distribution->status, r); r = target_closepackagesdb(target); RET_ENDUPDATE(result, r); RET_ENDUPDATE(distribution->status, r); if (RET_WAS_ERROR(result)) return result; } } for (i = 0 ; i < pkg->filekeys.count ; i++) { const char *filekey = pkg->filekeys.values[i]; if (pkg->refcounts[i] <= 0) continue; if (pkg->filetypes[i] != ft_ALL_BINARY && pkg->filetypes[i] != ft_SOURCE && pkg->filetypes[i] != ft_ARCH_BINARY) continue; fprintf(stderr, "There was an inconsistency in the tracking data of '%s':\n" "'%s' has refcount > 0, but was nowhere found.\n", distribution->codename, filekey); pkg->refcounts[i] = 0; } return result; } retvalue tracking_removepackages(trackingdb t, struct distribution *distribution, const char *sourcename, /*@null@*/const char *version) { struct trackedpackage *pkg; retvalue result, r; if (version == NULL) return tracking_foreachversion(t, distribution, sourcename, removesourcepackage); result = tracking_get(t, sourcename, version, &pkg); if (RET_IS_OK(result)) { result = removesourcepackage(t, pkg, distribution); if (RET_IS_OK(result)) { r = trackedpackage_tidy(t, pkg); RET_ENDUPDATE(result, r); r = tracking_save(t, pkg); RET_ENDUPDATE(result, r); } else trackedpackage_free(pkg); } return result; } static retvalue package_retrack(UNUSED(struct distribution *di), struct target *target, const char *packagename, const char *controlchunk, void *data) { trackingdb tracks = data; return target->doretrack(packagename, controlchunk, tracks); } retvalue tracking_retrack(struct distribution *d, bool needsretrack) { struct target *t; trackingdb tracks; retvalue r, rr; if (d->tracking == dt_NONE) return RET_NOTHING; for (t = d->targets ; !needsretrack && t != NULL ; t = t->next) { if (t->staletracking) needsretrack = true; } if (!needsretrack) return RET_NOTHING; if (verbose > 0) printf("Retracking %s...\n", d->codename); r = tracking_initialize(&tracks, d, false); if (!RET_IS_OK(r)) return r; /* first forget that any package is there*/ r = tracking_reset(tracks); if (!RET_WAS_ERROR(r)) { /* add back information about actually used files */ r = distribution_foreach_package(d, atom_unknown, atom_unknown, atom_unknown, package_retrack, NULL, tracks); } if (RET_IS_OK(r)) { for (t = d->targets ; t != NULL ; t = t->next) { t->staletracking = false; } } if (!RET_WAS_ERROR(r)) { /* now remove everything no longer needed */ r = tracking_tidyall(tracks); } rr = tracking_done(tracks); RET_ENDUPDATE(r, rr); return r; } reprepro-4.13.1/copypackages.c0000644000175100017510000007127012152651661013212 00000000000000/* This file is part of "reprepro" * Copyright (C) 2008,2009 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include "error.h" #include "ignore.h" #include "strlist.h" #include "indexfile.h" #include "files.h" #include "target.h" #include "terms.h" #include "termdecide.h" #include "dpkgversions.h" #include "tracking.h" #include "filecntl.h" #include "mprintf.h" #include "globmatch.h" #include "copypackages.h" struct target_package_list { struct target_package_list *next; struct target *target; struct package { /*@null@*/struct package *next; char *name; char *version; char *sourcename; char *sourceversion; char *control; struct checksumsarray origfiles; struct strlist filekeys; architecture_t architecture; } *packages; }; struct package_list { /*@null@*/struct target_package_list *targets; }; static retvalue list_newpackage(struct package_list *list, struct target *target, const char *sourcename, const char *sourceversion, const char *packagename, const char *packageversion, /*@out@*/struct package **package_p) { struct target_package_list *t, **t_p; struct package *package, **p_p; int c; t_p = &list->targets; while (*t_p != NULL && (*t_p)->target != target) t_p = &(*t_p)->next; if (*t_p == NULL) { t = zNEW(struct target_package_list); if (FAILEDTOALLOC(t)) return RET_ERROR_OOM; t->target = target; t->next = *t_p; *t_p = t; } else t = *t_p; p_p = &t->packages; while (*p_p != NULL && (c = strcmp(packagename, (*p_p)->name)) < 0) p_p = &(*p_p)->next; if (*p_p != NULL && c == 0) { // TODO: improve this message..., or some context elsewhere fprintf(stderr, "Multiple occurences of package '%s'!\n", packagename); return RET_ERROR_EXIST; } package = zNEW(struct package); if (FAILEDTOALLOC(package)) return RET_ERROR_OOM; package->name = strdup(packagename); if (FAILEDTOALLOC(package->name)) { free(package); return RET_ERROR_OOM; } package->version = strdup(packageversion); if (FAILEDTOALLOC(package->version)) { free(package->name); free(package); return RET_ERROR_OOM; } package->sourcename = strdup(sourcename); if (FAILEDTOALLOC(package->sourcename)) { free(package->name); free(package->version); free(package); return RET_ERROR_OOM; } package->sourceversion = strdup(sourceversion); if (FAILEDTOALLOC(package->sourceversion)) { free(package->name); free(package->version); free(package->sourcename); free(package); return RET_ERROR_OOM; } package->next = *p_p; *p_p = package; *package_p = package; return RET_OK; } static void package_free(/*@only@*/struct package *package) { if (package == NULL) return; free(package->name); free(package->version); free(package->sourcename); free(package->sourceversion); free(package->control); checksumsarray_done(&package->origfiles); strlist_done(&package->filekeys); free(package); } static void list_cancelpackage(struct package_list *list, /*@only@*/struct package *package) { struct target_package_list *target; struct package **p_p; assert (package != NULL); for (target = list->targets ; target != NULL ; target = target->next) { p_p = &target->packages; while (*p_p != NULL && *p_p != package) p_p = &(*p_p)->next; if (*p_p == package) { *p_p = package->next; package_free(package); return; } } assert (package == NULL); } static retvalue list_prepareadd(struct package_list *list, struct target *target, const char *packagename, /*@null@*/const char *v, architecture_t package_architecture, const char *chunk) { char *version; char *source, *sourceversion; struct package *new SETBUTNOTUSED(= NULL); retvalue r; int i; if (v == NULL) { r = target->getversion(chunk, &version); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) { return r; } } r = target->getsourceandversion(chunk, packagename, &source, &sourceversion); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) { free(version); return r; } r = list_newpackage(list, target, source, sourceversion, packagename, (v==NULL)?version:v, &new); free(source); source = NULL; free(sourceversion); sourceversion = NULL; if (v == NULL) free(version); version = NULL; assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; assert (new != NULL); new->architecture = package_architecture; r = target->getinstalldata(target, new->name, new->version, package_architecture, chunk, &new->control, &new->filekeys, &new->origfiles); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) { list_cancelpackage(list, new); return r; } assert (new->filekeys.count == new->origfiles.names.count); for (i = 0 ; i < new->filekeys.count ; i++) { const char *newfilekey = new->filekeys.values[i]; const char *oldfilekey = new->origfiles.names.values[i]; const struct checksums *checksums = new->origfiles.checksums[i]; r = files_canadd(newfilekey, checksums); /* normaly it should just already have that file, * in which case we have nothing to do: */ if (r == RET_NOTHING) continue; /* otherwise try to cope with it */ if (r == RET_ERROR_WRONG_MD5) { if (strcmp(newfilekey, oldfilekey) == 0) { fprintf(stderr, "Error: package %s version %s lists different checksums than in the pool!\n", new->name, new->version); } else { fprintf(stderr, "Error: package %s version %s needs '%s' which previously was '%s',\n" "but the new file is already listed with different checksums!\n", new->name, new->version, newfilekey, oldfilekey); } } if (RET_WAS_ERROR(r)) { list_cancelpackage(list, new); return r; } assert (RET_IS_OK(r)); if (strcmp(newfilekey, oldfilekey) == 0) { fprintf(stderr, "Error: package %s version %s lists file %s not yet in the pool!\n", new->name, new->version, newfilekey); list_cancelpackage(list, new); return RET_ERROR_MISSING; } // TODO: // check new // - if exists and other checksums delete // - if exists and correct checksums use // otherwise check old // - if exists and other checksums bail out // - if exists and correct checksum, hardlink/copy fprintf(stderr, "Error: cannot yet deal with files changing their position\n" "(%s vs %s in %s version %s)\n", newfilekey, oldfilekey, new->name, new->version); list_cancelpackage(list, new); return RET_ERROR_MISSING; } return RET_OK; } static retvalue package_add(struct distribution *into, /*@null@*/trackingdb tracks, struct target *target, const struct package *package, /*@null@*/ const char *suitefrom) { struct trackingdata trackingdata; retvalue r; if (verbose >= 1) { printf("Adding '%s' '%s' to '%s'.\n", package->name, package->version, target->identifier); } r = files_expectfiles(&package->filekeys, package->origfiles.checksums); if (RET_WAS_ERROR(r)) return r; if (interrupted()) return RET_ERROR_INTERRUPTED; if (tracks != NULL) { r = trackingdata_summon(tracks, package->sourcename, package->version, &trackingdata); if (RET_WAS_ERROR(r)) return r; } r = target_addpackage(target, into->logger, package->name, package->version, package->control, &package->filekeys, true, (tracks != NULL)? &trackingdata:NULL, package->architecture, NULL, suitefrom); RET_UPDATE(into->status, r); if (tracks != NULL) { retvalue r2; r2 = trackingdata_finish(tracks, &trackingdata); RET_ENDUPDATE(r, r2); } return r; } static retvalue packagelist_add(struct distribution *into, const struct package_list *list, /*@null@*/const char *suitefrom) { retvalue result, r; struct target_package_list *tpl; struct package *package; trackingdb tracks; r = distribution_prepareforwriting(into); if (RET_WAS_ERROR(r)) return r; if (into->tracking != dt_NONE) { r = tracking_initialize(&tracks, into, false); if (RET_WAS_ERROR(r)) return r; } else tracks = NULL; result = RET_NOTHING; for (tpl = list->targets; tpl != NULL ; tpl = tpl->next) { struct target *target = tpl->target; r = target_initpackagesdb(target, READWRITE); RET_ENDUPDATE(result, r); if (RET_WAS_ERROR(r)) break; for (package = tpl->packages; package != NULL ; package = package->next) { r = package_add(into, tracks, target, package, suitefrom); RET_UPDATE(result, r); } r = target_closepackagesdb(target); RET_UPDATE(into->status, r); RET_ENDUPDATE(result, r); } r = tracking_done(tracks); RET_ENDUPDATE(result, r); return result; } static retvalue copy_by_func(struct package_list *list, struct distribution *into, struct distribution *from, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *packagetypes, retvalue action(struct package_list*, struct distribution *, struct distribution *, struct target *, struct target *, void *), void *data) { retvalue result, r; struct target *origtarget, *desttarget; result = RET_NOTHING; for (origtarget = from->targets ; origtarget != NULL ; origtarget = origtarget->next) { if (!target_matches(origtarget, components, architectures, packagetypes)) continue; desttarget = distribution_gettarget(into, origtarget->component, origtarget->architecture, origtarget->packagetype); if (desttarget == NULL) { if (verbose > 2) printf( "Not looking into '%s' as no matching target in '%s'!\n", origtarget->identifier, into->codename); continue; } r = action(list, into, from, desttarget, origtarget, data); RET_UPDATE(result, r); if (RET_WAS_ERROR(result)) return result; } return result; } struct namelist { int argc; const char **argv; bool *warnedabout; bool *found; }; static retvalue by_name(struct package_list *list, UNUSED(struct distribution *into), UNUSED(struct distribution *from), struct target *desttarget, struct target *fromtarget, void *data) { struct namelist *d = data; retvalue result, r; int i, j; r = target_initpackagesdb(fromtarget, READONLY); if (RET_WAS_ERROR(r)) return r; result = RET_NOTHING; for (i = 0 ; i < d->argc ; i++) { const char *name = d->argv[i]; char *chunk; architecture_t package_architecture; for (j = 0 ; j < i ; j++) if (strcmp(d->argv[i], d->argv[j]) == 0) break; if (j < i) { if (verbose >= 0 && ! d->warnedabout[j]) fprintf(stderr, "Hint: '%s' was listed multiple times, ignoring all but first!\n", d->argv[i]); d->warnedabout[j] = true; /* do not complain second is missing if we ignore it: */ d->found[i] = true; continue; } r = table_getrecord(fromtarget->packages, name, &chunk); if (r == RET_NOTHING) continue; RET_ENDUPDATE(result, r); if (RET_WAS_ERROR(r)) break; r = fromtarget->getarchitecture(chunk, &package_architecture); RET_ENDUPDATE(result, r); if (RET_WAS_ERROR(r)) break; r = list_prepareadd(list, desttarget, name, NULL, package_architecture, chunk); free(chunk); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; d->found[i] = true; } r = target_closepackagesdb(fromtarget); RET_ENDUPDATE(result, r); return result; } static void packagelist_done(struct package_list *list) { struct target_package_list *target; struct package *package; while ((target = list->targets) != NULL) { list->targets = target->next; while ((package = target->packages) != NULL) { target->packages = package->next; package_free(package); } free(target); } } retvalue copy_by_name(struct distribution *into, struct distribution *from, int argc, const char **argv, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *packagetypes) { struct package_list list; struct namelist names = { argc, argv, nzNEW(argc, bool), nzNEW(argc, bool) }; retvalue r; if (FAILEDTOALLOC(names.warnedabout) || FAILEDTOALLOC(names.found)) { free(names.found); free(names.warnedabout); return RET_ERROR_OOM; } memset(&list, 0, sizeof(list)); r = copy_by_func(&list, into, from, components, architectures, packagetypes, by_name, &names); free(names.warnedabout); if (verbose >= 0 && !RET_WAS_ERROR(r)) { int i; bool first = true; assert(names.found != NULL); for (i = 0 ; i < argc ; i++) { if (names.found[i]) continue; if (first) (void)fputs( "Will not copy as not found: ", stderr); else (void)fputs(", ", stderr); first = false; (void)fputs(argv[i], stderr); } if (!first) { (void)fputc('.', stderr); (void)fputc('\n', stderr); } } free(names.found); if (!RET_IS_OK(r)) return r; r = packagelist_add(into, &list, from->codename); packagelist_done(&list); return r; } static retvalue by_source(struct package_list *list, UNUSED(struct distribution *into), UNUSED(struct distribution *from), struct target *desttarget, struct target *fromtarget, void *data) { struct namelist *d = data; struct target_cursor iterator; const char *packagename, *chunk; retvalue result, r; assert (d->argc > 0); r = target_openiterator(fromtarget, READONLY, &iterator); assert (r != RET_NOTHING); if (!RET_IS_OK(r)) return r; result = RET_NOTHING; while (target_nextpackage(&iterator, &packagename, &chunk)) { int i; char *source, *sourceversion; architecture_t package_architecture; r = fromtarget->getsourceandversion(chunk, packagename, &source, &sourceversion); if (r == RET_NOTHING) continue; if (RET_WAS_ERROR(r)) { result = r; break; } /* only include if source name matches */ if (strcmp(source, d->argv[0]) != 0) { free(source); free(sourceversion); continue; } i = 0; if (d->argc > 1) { int c; i = d->argc; while (--i > 0) { r = dpkgversions_cmp(sourceversion, d->argv[i], &c); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) { free(source); free(sourceversion); (void)target_closeiterator(&iterator); return r; } if (c == 0) break; } /* there are source versions specified and * the source version of this package differs */ if (i == 0) { free(source); free(sourceversion); continue; } } free(source); free(sourceversion); r = fromtarget->getarchitecture(chunk, &package_architecture); if (RET_WAS_ERROR(r)) { result = r; break; } r = list_prepareadd(list, desttarget, packagename, NULL, package_architecture, chunk); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; d->found[0] = true; d->found[i] = true; } r = target_closeiterator(&iterator); RET_ENDUPDATE(result, r); return result; } retvalue copy_by_source(struct distribution *into, struct distribution *from, int argc, const char **argv, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *packagetypes) { struct package_list list; struct namelist names = { argc, argv, NULL, nzNEW(argc, bool) }; retvalue r; if (FAILEDTOALLOC(names.found)) { free(names.found); return RET_ERROR_OOM; } memset(&list, 0, sizeof(list)); // TODO: implement fast way by looking at source tracking // (also allow copying .changes and .logs) r = copy_by_func(&list, into, from, components, architectures, packagetypes, by_source, &names); if (argc == 1 && !RET_WAS_ERROR(r) && verbose >= 0) { assert(names.found != NULL); if (!names.found[0]) { assert (r == RET_NOTHING); fprintf(stderr, "Nothing to do as no package with source '%s' found!\n", argv[0]); free(names.found); return RET_NOTHING; } } else if (!RET_WAS_ERROR(r) && verbose >= 0) { int i; bool first = true, anything = false; for (i = 1 ; i < argc ; i++) { if (names.found[i]) anything = true; } if (!anything) { assert (r == RET_NOTHING); fprintf(stderr, "Nothing to do as no packages with source '%s' and a requested source version found!\n", argv[0]); free(names.found); return RET_NOTHING; } for (i = 1 ; i < argc ; i++) { if (names.found[i]) continue; if (first) (void)fputs( "Will not copy as not found: ", stderr); else (void)fputs(", ", stderr); first = false; (void)fputs(argv[i], stderr); } if (!first) { (void)fputc('.', stderr); (void)fputc('\n', stderr); } if (verbose > 5) { (void)fputs("Found versions are: ", stderr); first = true; for (i = 1 ; i < argc ; i++) { if (!names.found[i]) continue; if (!first) (void)fputs(", ", stderr); first = false; (void)fputs(argv[i], stderr); } (void)fputc('.', stderr); (void)fputc('\n', stderr); } } free(names.found); if (!RET_IS_OK(r)) return r; r = packagelist_add(into, &list, from->codename); packagelist_done(&list); return r; } static retvalue by_formula(struct package_list *list, UNUSED(struct distribution *into), UNUSED(struct distribution *from), struct target *desttarget, struct target *fromtarget, void *data) { term *condition = data; struct target_cursor iterator; const char *packagename, *chunk; architecture_t package_architecture; retvalue result, r; r = target_openiterator(fromtarget, READONLY, &iterator); assert (r != RET_NOTHING); if (!RET_IS_OK(r)) return r; result = RET_NOTHING; while (target_nextpackage(&iterator, &packagename, &chunk)) { r = term_decidechunktarget(condition, chunk, desttarget); if (r == RET_NOTHING) continue; if (RET_WAS_ERROR(r)) { result = r; break; } r = fromtarget->getarchitecture(chunk, &package_architecture); if (RET_WAS_ERROR(r)) { result = r; break; } r = list_prepareadd(list, desttarget, packagename, NULL, package_architecture, chunk); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; } r = target_closeiterator(&iterator); RET_ENDUPDATE(result, r); return result; } static retvalue by_glob(struct package_list *list, UNUSED(struct distribution *into), UNUSED(struct distribution *from), struct target *desttarget, struct target *fromtarget, void *data) { const char *glob = data; struct target_cursor iterator; const char *packagename, *chunk; architecture_t package_architecture; retvalue result, r; r = target_openiterator(fromtarget, READONLY, &iterator); assert (r != RET_NOTHING); if (!RET_IS_OK(r)) return r; result = RET_NOTHING; while (target_nextpackage(&iterator, &packagename, &chunk)) { if (!globmatch(packagename, glob)) continue; r = fromtarget->getarchitecture(chunk, &package_architecture); if (RET_WAS_ERROR(r)) { result = r; break; } r = list_prepareadd(list, desttarget, packagename, NULL, package_architecture, chunk); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; } r = target_closeiterator(&iterator); RET_ENDUPDATE(result, r); return result; } retvalue copy_by_glob(struct distribution *into, struct distribution *from, const char *glob, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *packagetypes) { struct package_list list; retvalue r; memset(&list, 0, sizeof(list)); r = copy_by_func(&list, into, from, components, architectures, packagetypes, by_glob, (void*)glob); if (!RET_IS_OK(r)) return r; r = packagelist_add(into, &list, from->codename); packagelist_done(&list); return r; } retvalue copy_by_formula(struct distribution *into, struct distribution *from, const char *filter, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *packagetypes) { struct package_list list; term *condition; retvalue r; memset(&list, 0, sizeof(list)); r = term_compilefortargetdecision(&condition, filter); if (!RET_IS_OK(r)) { return r; } r = copy_by_func(&list, into, from, components, architectures, packagetypes, by_formula, condition); term_free(condition); if (!RET_IS_OK(r)) return r; r = packagelist_add(into, &list, from->codename); packagelist_done(&list); return r; } static retvalue choose_by_name(UNUSED(struct target *target), const char *packagename, UNUSED(const char *version), UNUSED(const char *chunk), void *privdata) { const struct namelist *l = privdata; int i; for (i = 0 ; i < l->argc ; i++) { if (strcmp(packagename, l->argv[i]) == 0) break; } if (i >= l->argc) return RET_NOTHING; return RET_OK; } static retvalue choose_by_source(struct target *target, const char *packagename, UNUSED(const char *versiondummy), const char *chunk, void *privdata) { const struct namelist *l = privdata; char *source, *sourceversion; retvalue r; // TODO: why doesn't this use version? r = target->getsourceandversion(chunk, packagename, &source, &sourceversion); if (!RET_IS_OK(r)) { return r; } assert (l->argc > 0); /* only include if source name matches */ if (strcmp(source, l->argv[0]) != 0) { free(source); free(sourceversion); return RET_NOTHING; } if (l->argc > 1) { int i, c; i = l->argc; while (--i > 0) { r = dpkgversions_cmp(sourceversion, l->argv[i], &c); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) { free(source); free(sourceversion); return r; } if (c == 0) break; } /* there are source versions specified and * the source version of this package differs */ if (i == 0) { free(source); free(sourceversion); return RET_NOTHING; } } free(source); free(sourceversion); return RET_OK; } static retvalue choose_by_condition(struct target *target, UNUSED(const char *packagename), UNUSED(const char *version), const char *chunk, void *privdata) { term *condition = privdata; return term_decidechunktarget(condition, chunk, target); } static retvalue choose_by_glob(UNUSED(struct target *target), const char *packagename, UNUSED(const char *version), UNUSED(const char *chunk), void *privdata) { const char *glob = privdata; if (globmatch(packagename, glob)) return RET_OK; else return RET_NOTHING; } retvalue copy_from_file(struct distribution *into, component_t component, architecture_t architecture, packagetype_t packagetype, const char *filename, int argc, const char **argv) { struct indexfile *i; retvalue result, r; struct target *target; struct package_list list; struct namelist d = {argc, argv, NULL, NULL}; char *packagename, *version; architecture_t package_architecture; const char *control; assert (atom_defined(architecture)); assert (atom_defined(component)); assert (atom_defined(packagetype)); memset(&list, 0, sizeof(list)); target = distribution_gettarget(into, component, architecture, packagetype); if (target == NULL) { if (!atomlist_in(&into->architectures, architecture)) { fprintf(stderr, "Distribution '%s' does not contain architecture '%s!'\n", into->codename, atoms_architectures[architecture]); } if (packagetype != pt_udeb) { if (!atomlist_in(&into->components, component)) { fprintf(stderr, "Distribution '%s' does not contain component '%s!'\n", into->codename, atoms_components[component]); } } else { if (!atomlist_in(&into->udebcomponents, component)) { fprintf(stderr, "Distribution '%s' does not contain udeb component '%s!'\n", into->codename, atoms_components[component]); } } /* -A source needing -T dsc and vice versa already checked * in main.c */ fprintf(stderr, "No matching part of distribution '%s' found!\n", into->codename); return RET_ERROR; } result = indexfile_open(&i, filename, c_none); if (!RET_IS_OK(result)) return result; result = RET_NOTHING; while (indexfile_getnext(i, &packagename, &version, &control, &package_architecture, target, false)) { r = choose_by_name(target, packagename, version, control, &d); if (RET_IS_OK(r)) r = list_prepareadd(&list, target, packagename, version, package_architecture, control); free(packagename); free(version); RET_UPDATE(result, r); if (RET_WAS_ERROR(result)) break; } r = indexfile_close(i); RET_ENDUPDATE(result, r); if (RET_IS_OK(result)) result = packagelist_add(into, &list, NULL); packagelist_done(&list); return result; } typedef retvalue chooseaction(struct target *, const char *, const char *, const char *, void *); static retvalue restore_from_snapshot(struct distribution *into, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *packagetypes, const char *snapshotname, chooseaction action, void *d) { retvalue result, r; struct package_list list; struct target *target; char *basedir; enum compression compression; architecture_t package_architecture; basedir = calc_snapshotbasedir(into->codename, snapshotname); if (FAILEDTOALLOC(basedir)) return RET_ERROR_OOM; memset(&list, 0, sizeof(list)); result = RET_NOTHING; for (target = into->targets ; target != NULL ; target = target->next) { char *filename, *packagename, *version; const char *control; struct indexfile *i; if (!target_matches(target, components, architectures, packagetypes)) continue; /* we do not know what compressions where used back then * and not even how the file was named, just look for * how the file is named now and try all readable * compressions */ compression = c_none; filename = calc_dirconcat3( basedir, target->relativedirectory, target->exportmode->filename); if (filename != NULL && !isregularfile(filename)) { /* no uncompressed file found, try .gz */ free(filename); compression = c_gzip; filename = mprintf("%s/%s/%s.gz", basedir, target->relativedirectory, target->exportmode->filename); } #ifdef HAVE_LIBBZ2 if (filename != NULL && !isregularfile(filename)) { /* no uncompressed or .gz file found, try .bz2 */ free(filename); compression = c_bzip2; filename = mprintf("%s/%s/%s.bz2", basedir, target->relativedirectory, target->exportmode->filename); } #endif if (filename != NULL && !isregularfile(filename)) { free(filename); fprintf(stderr, "Could not find '%s/%s/%s' nor '%s/%s/%s.gz',\n" "ignoring that part of the snapshot.\n", basedir, target->relativedirectory, target->exportmode->filename, basedir, target->relativedirectory, target->exportmode->filename); continue; } if (FAILEDTOALLOC(filename)) { result = RET_ERROR_OOM; break; } result = indexfile_open(&i, filename, compression); if (!RET_IS_OK(result)) break; while (indexfile_getnext(i, &packagename, &version, &control, &package_architecture, target, false)) { result = action(target, packagename, version, control, d); if (RET_IS_OK(result)) result = list_prepareadd(&list, target, packagename, version, package_architecture, control); free(packagename); free(version); if (RET_WAS_ERROR(result)) break; } r = indexfile_close(i); RET_ENDUPDATE(result, r); free(filename); if (RET_WAS_ERROR(result)) break; } free(basedir); if (RET_WAS_ERROR(result)) return result; r = packagelist_add(into, &list, snapshotname); packagelist_done(&list); return r; } retvalue restore_by_name(struct distribution *into, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *packagetypes, const char *snapshotname, int argc, const char **argv) { struct namelist d = {argc, argv, NULL, NULL}; return restore_from_snapshot(into, components, architectures, packagetypes, snapshotname, choose_by_name, &d); } retvalue restore_by_source(struct distribution *into, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *packagetypes, const char *snapshotname, int argc, const char **argv) { struct namelist d = {argc, argv, NULL, NULL}; return restore_from_snapshot(into, components, architectures, packagetypes, snapshotname, choose_by_source, &d); } retvalue restore_by_formula(struct distribution *into, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *packagetypes, const char *snapshotname, const char *filter) { term *condition; retvalue r; r = term_compilefortargetdecision(&condition, filter); if (!RET_IS_OK(r)) { return r; } r = restore_from_snapshot(into, components, architectures, packagetypes, snapshotname, choose_by_condition, condition); term_free(condition); return r; } retvalue restore_by_glob(struct distribution *into, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *packagetypes, const char *snapshotname, const char *glob) { return restore_from_snapshot(into, components, architectures, packagetypes, snapshotname, choose_by_glob, (void*)glob); } reprepro-4.13.1/uncompression.c0000644000175100017510000007054012152651661013444 00000000000000/* This file is part of "reprepro" * Copyright (C) 2008 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_LIBBZ2 #include #endif #include "globals.h" #include "error.h" #include "mprintf.h" #include "filecntl.h" #include "uncompression.h" const char * const uncompression_suffix[c_COUNT] = { "", ".gz", ".bz2", ".lzma", ".xz", ".lz" }; /* So help messages can hint what option to try */ const char * const uncompression_option[c_COUNT] = { NULL, NULL, "--bunzip2", "--unlzma", "--unxz", "--lunzip" }; /* how those are called in the config file */ const char * const uncompression_config[c_COUNT] = { ".", ".gz", ".bz2", ".lzma", ".xz", "lz" }; /*@null@*/ char *extern_uncompressors[c_COUNT] = { NULL, NULL, NULL, NULL, NULL}; /*@null@*/ static struct uncompress_task { struct uncompress_task *next; enum compression compression; char *compressedfilename; char *uncompressedfilename; /* when != NULL, call when finished */ /*@null@*/finishaction *callback; /*@null@*/void *privdata; /* if already started, the pid > 0 */ pid_t pid; } *tasks = NULL; static void uncompress_task_free(/*@only@*/struct uncompress_task *t) { free(t->compressedfilename); free(t->uncompressedfilename); free(t); } static retvalue startchild(enum compression c, int stdinfd, int stdoutfd, /*@out@*/pid_t *pid_p) { int e, i; pid_t pid; pid = fork(); if (pid < 0) { e = errno; fprintf(stderr, "Error %d forking: %s\n", e, strerror(e)); (void)close(stdinfd); (void)close(stdoutfd); return RET_ERRNO(e); } if (pid == 0) { /* setup child */ i = dup2(stdoutfd, 1); if (i < 0) { e = errno; fprintf(stderr, "Error %d in dup(%d, 1): %s\n", e, stdoutfd, strerror(e)); raise(SIGUSR2); } i = dup2(stdinfd, 0); if (i < 0) { e = errno; fprintf(stderr, "Error %d in dup(%d, 0): %s\n", e, stdinfd, strerror(e)); raise(SIGUSR2); } closefrom(3); execlp(extern_uncompressors[c], extern_uncompressors[c], ENDOFARGUMENTS); e = errno; fprintf(stderr, "Error %d starting '%s': %s\n", e, extern_uncompressors[c], strerror(e)); raise(SIGUSR2); exit(EXIT_FAILURE); } (void)close(stdinfd); (void)close(stdoutfd); *pid_p = pid; return RET_OK; } static retvalue startpipeoutchild(enum compression c, int fd, /*@out@*/int *pipefd, /*@out@*/pid_t *pid_p) { int i, e, filedes[2]; retvalue r; i = pipe(filedes); if (i < 0) { e = errno; fprintf(stderr, "Error %d creating pipe: %s\n", e, strerror(e)); (void)close(fd); return RET_ERRNO(e); } markcloseonexec(filedes[0]); r = startchild(c, fd, filedes[1], pid_p); if (RET_WAS_ERROR(r)) (void)close(filedes[0]); else *pipefd = filedes[0]; return r; } static retvalue startpipeinoutchild(enum compression c, /*@out@*/int *infd, /*@out@*/int *outfd, /*@out@*/pid_t *pid_p) { int i, e, infiledes[2]; retvalue r; i = pipe(infiledes); if (i < 0) { e = errno; fprintf(stderr, "Error %d creating pipe: %s\n", e, strerror(e)); return RET_ERRNO(e); } markcloseonexec(infiledes[1]); r = startpipeoutchild(c, infiledes[0], outfd, pid_p); if (RET_WAS_ERROR(r)) (void)close(infiledes[1]); else *infd = infiledes[1]; return r; } static void uncompress_start_queued(void) { struct uncompress_task *t; int running_count = 0; int e, stdinfd, stdoutfd; for (t = tasks ; t != NULL ; t = t->next) { if (t->pid > 0) running_count++; } // TODO: make the maximum number configurable, // until that 1 is the best guess... if (running_count >= 1) return; t = tasks; while (t != NULL && t->pid > 0) t = t->next; if (t == NULL) /* nothing to do... */ return; if (verbose > 1) { fprintf(stderr, "Uncompress '%s' into '%s' using '%s'...\n", t->compressedfilename, t->uncompressedfilename, extern_uncompressors[t->compression]); } stdinfd = open(t->compressedfilename, O_RDONLY|O_NOCTTY); if (stdinfd < 0) { e = errno; fprintf(stderr, "Error %d opening %s: %s\n", e, t->compressedfilename, strerror(e)); return ; // RET_ERRNO(e); } stdoutfd = open(t->uncompressedfilename, O_WRONLY|O_CREAT|O_EXCL|O_NOFOLLOW, 0666); if (stdoutfd < 0) { close(stdinfd); e = errno; fprintf(stderr, "Error %d creating %s: %s\n", e, t->uncompressedfilename, strerror(e)); return ; // RET_ERRNO(e); } // return startchild(t->compression, stdinfd, stdoutfd, &t->pid); } static inline retvalue builtin_uncompress(const char *compressed, const char *destination, enum compression compression); /* we got an pid, check if it is a uncompressor we care for */ retvalue uncompress_checkpid(pid_t pid, int status) { struct uncompress_task *t, **t_p; retvalue r; bool error = false; if (pid <= 0) return RET_NOTHING; t_p = &tasks; while ((t = (*t_p)) != NULL && t->pid != pid) t_p = &t->next; if (t == NULL) { /* not one we started */ return RET_NOTHING; } if (WIFEXITED(status)) { if (WEXITSTATUS(status) != 0) { fprintf(stderr, "'%s' < %s > %s exited with errorcode %d!\n", extern_uncompressors[t->compression], t->compressedfilename, t->uncompressedfilename, (int)(WEXITSTATUS(status))); error = true; } } else if (WIFSIGNALED(status)) { if (WTERMSIG(status) != SIGUSR2) fprintf(stderr, "'%s' < %s > %s killed by signal %d!\n", extern_uncompressors[t->compression], t->compressedfilename, t->uncompressedfilename, (int)(WTERMSIG(status))); error = true; } else { fprintf(stderr, "'%s' < %s > %s terminated abnormally!\n", extern_uncompressors[t->compression], t->compressedfilename, t->uncompressedfilename); error = true; } if (error) { /* no need to leave partial stuff around */ (void)unlink(t->uncompressedfilename); } if (!error && verbose > 10) printf("'%s' < %s > %s finished successfully!\n", extern_uncompressors[t->compression], t->compressedfilename, t->uncompressedfilename); if (error && uncompression_builtin(t->compression)) { /* try builtin method instead */ r = builtin_uncompress(t->compressedfilename, t->uncompressedfilename, t->compression); if (RET_WAS_ERROR(r)) { (void)unlink(t->uncompressedfilename); } else if (RET_IS_OK(r)) { error = false; } } /* call the notification, if asked for */ if (t->callback != NULL) { r = t->callback(t->privdata, t->compressedfilename, error); if (r == RET_NOTHING) r = RET_OK; } else if (error) r = RET_ERROR; else r = RET_OK; /* take out of the chain and free */ *t_p = t->next; uncompress_task_free(t); uncompress_start_queued(); return r; } bool uncompress_running(void) { uncompress_start_queued(); return tasks != NULL; } /* check if a program is available. This is needed because things like execlp * are to late (we want to know if downloading a Packages.bz2 does make sense * when compiled without libbz2 before actually calling the uncompressor) */ static void search_binary(/*@null@*/const char *setting, const char *default_program, /*@out@*/char **program_p) { char *program; const char *path, *colon; /* not set or empty means default */ if (setting == NULL || setting[0] == '\0') setting = default_program; /* all-caps NONE means I do not want any... */ if (strcmp(setting, "NONE") == 0) return; /* look for the file, look in $PATH if not qualified, * only check existance, if someone it putting files not executable * by us there it is their fault (as being executeable by us is hard * to check) */ if (strchr(setting, '/') != NULL) { if (!isregularfile(setting)) return; if (access(setting, X_OK) != 0) return; program = strdup(setting); } else { path = getenv("PATH"); if (path == NULL) return; program = NULL; while (program == NULL && path[0] != '\0') { if (path[0] == ':') { path++; continue; } colon = strchr(path, ':'); if (colon == NULL) colon = path + strlen(path); assert (colon > path); program = mprintf("%.*s/%s", (int)(colon - path), path, setting); if (program == NULL) return; if (!isregularfile(program) || access(program, X_OK) != 0) { free(program); program = NULL; } if (*colon == ':') path = colon + 1; else path = colon; } } if (program == NULL) return; *program_p = program; } /* check for existance of external programs */ void uncompressions_check(const char *gunzip, const char *bunzip2, const char *unlzma, const char *unxz, const char *lunzip) { search_binary(gunzip, "gunzip", &extern_uncompressors[c_gzip]); search_binary(bunzip2, "bunzip2", &extern_uncompressors[c_bzip2]); search_binary(unlzma, "unlzma", &extern_uncompressors[c_lzma]); search_binary(unxz, "unxz", &extern_uncompressors[c_xz]); search_binary(lunzip, "lunzip", &extern_uncompressors[c_lunzip]); } static inline retvalue builtin_uncompress(const char *compressed, const char *destination, enum compression compression) { struct compressedfile *f; char buffer[4096]; int bytes_read, bytes_written, written; int destfd; int e; retvalue r; r = uncompress_open(&f, compressed, compression); if (!RET_IS_OK(r)) return r; destfd = open(destination, O_WRONLY|O_CREAT|O_EXCL|O_NOCTTY, 0666); if (destfd < 0) { e = errno; fprintf(stderr, "Error %d creating '%s': %s\n", e, destination, strerror(e)); uncompress_abort(f); return RET_ERRNO(e); } do { bytes_read = uncompress_read(f, buffer, 4096); if (bytes_read <= 0) break; bytes_written = 0; while (bytes_written < bytes_read) { written = write(destfd, buffer + bytes_written, bytes_read - bytes_written); if (written < 0) { e = errno; fprintf(stderr, "Error %d writing to '%s': %s\n", e, destination, strerror(e)); close(destfd); uncompress_abort(f); return RET_ERRNO(e); } bytes_written += written; } } while (true); r = uncompress_close(f); if (RET_WAS_ERROR(r)) { (void)close(destfd); return r; } if (close(destfd) != 0) { e = errno; fprintf(stderr, "Error %d writing to '%s': %s!\n", e, destination, strerror(e)); return RET_ERROR; } return RET_OK; } static retvalue uncompress_queue_external(enum compression compression, const char *compressed, const char *uncompressed, /*@null@*/finishaction *action, /*@null@*/void *privdata) { struct uncompress_task *t, **t_p; t_p = &tasks; while ((t = (*t_p)) != NULL) t_p = &t->next; t = zNEW(struct uncompress_task); if (FAILEDTOALLOC(t)) return RET_ERROR_OOM; t->compressedfilename = strdup(compressed); t->uncompressedfilename = strdup(uncompressed); if (FAILEDTOALLOC(t->compressedfilename) || FAILEDTOALLOC(t->uncompressedfilename)) { uncompress_task_free(t); return RET_ERROR_OOM; } t->compression = compression; t->callback = action; t->privdata = privdata; *t_p = t; uncompress_start_queued(); return RET_OK; } retvalue uncompress_queue_file(const char *compressed, const char *destination, enum compression compression, finishaction *action, void *privdata) { retvalue r; (void)unlink(destination); if (extern_uncompressors[compression] != NULL) { r = uncompress_queue_external(compression, compressed, destination, action, privdata); if (r != RET_NOTHING) { return r; } if (!uncompression_builtin(compression)) return RET_ERROR; } if (verbose > 1) { fprintf(stderr, "Uncompress '%s' into '%s'...\n", compressed, destination); } assert (uncompression_builtin(compression)); r = builtin_uncompress(compressed, destination, compression); if (RET_WAS_ERROR(r)) { (void)unlink(destination); return r; } return action(privdata, compressed, false); } retvalue uncompress_file(const char *compressed, const char *destination, enum compression compression) { retvalue r; /* not allowed within a aptmethod session */ assert (tasks == NULL); (void)unlink(destination); if (uncompression_builtin(compression)) { if (verbose > 1) { fprintf(stderr, "Uncompress '%s' into '%s'...\n", compressed, destination); } r = builtin_uncompress(compressed, destination, compression); } else if (extern_uncompressors[compression] != NULL) { r = uncompress_queue_external(compression, compressed, destination, NULL, NULL); if (r == RET_NOTHING) r = RET_ERROR; if (RET_IS_OK(r)) { /* wait for the child to finish... */ assert (tasks != NULL && tasks->next == NULL); do { int status; pid_t pid; pid = wait(&status); if (pid < 0) { int e = errno; if (interrupted()) { r = RET_ERROR_INTERRUPTED; break; } if (e == EINTR) continue; fprintf(stderr, "Error %d waiting for uncompression child %lu: %s\n", e, (unsigned long)pid, strerror(e)); r = RET_ERRNO(e); } else r = uncompress_checkpid(pid, status); } while (r == RET_NOTHING); } } else { assert ("Impossible uncompress error" == NULL); r = RET_ERROR; } if (RET_WAS_ERROR(r)) { (void)unlink(destination); return r; } return RET_OK; } struct compressedfile { char *filename; enum compression compression; int error; pid_t pid; int fd, infd, pipeinfd; off_t len; struct intermediate_buffer { char *buffer; int ofs; int ready; } intermediate; union { gzFile gz; #ifdef HAVE_LIBBZ2 BZFILE *bz; #endif }; }; retvalue uncompress_open(/*@out@*/struct compressedfile **file_p, const char *filename, enum compression compression) { struct compressedfile *f; int fd, e; retvalue r; f = zNEW(struct compressedfile); if (FAILEDTOALLOC(f)) return RET_ERROR_OOM; f->filename = strdup(filename); if (FAILEDTOALLOC(f->filename)) { free(f); return RET_ERROR_OOM; } f->compression = compression; f->fd = -1; f->infd = -1; f->pipeinfd = -1; f->len = -1; switch (compression) { case c_none: f->fd = open(filename, O_RDONLY|O_NOCTTY); if (f->fd < 0) { e = errno; free(f->filename); free(f); // if (e == || e ==) // return RET_NOTHING; fprintf(stderr, "Error %d opening '%s': %s!\n", e, filename, strerror(e)); return RET_ERRNO(e); } *file_p = f; return RET_OK; case c_gzip: f->gz = gzopen(filename, "r"); if (f->gz == NULL) { // TODO: better error message... fprintf(stderr, "Could not read %s\n", filename); free(f->filename); free(f); return RET_ERROR; } *file_p = f; return RET_OK; #ifdef HAVE_LIBBZ2 case c_bzip2: f->bz = BZ2_bzopen(filename, "r"); if (f->bz == NULL) { // TODO: better error message... fprintf(stderr, "Could not read %s\n", filename); free(f->filename); free(f); return RET_ERROR; } *file_p = f; return RET_OK; #endif default: assert (extern_uncompressors[compression] != NULL); } /* call external helper instead */ fd = open(f->filename, O_RDONLY|O_NOCTTY); if (fd < 0) { e = errno; fprintf(stderr, "Error %d opening '%s': %s\n", e, f->filename, strerror(e)); return RET_ERRNO(e); } r = startpipeoutchild(compression, fd, &f->fd, &f->pid); if (RET_WAS_ERROR(r)) return r; *file_p = f; return RET_OK; } static int intermediate_size = 0; retvalue uncompress_fdopen(struct compressedfile **file_p, int fd, off_t len, enum compression compression, int *errno_p, const char **msg_p) { struct compressedfile *f; retvalue r; f = zNEW(struct compressedfile); if (FAILEDTOALLOC(f)) { *errno_p = ENOMEM; *msg_p = "Out of memory"; return RET_ERROR_OOM; } f->filename = NULL; f->compression = compression; f->infd = fd; f->fd = -1; f->pipeinfd = -1; f->len = len; switch (compression) { case c_none: f->fd = fd; f->infd = -1; break; case c_gzip: // TODO: perhaps rather implement your own reading and // uncompression, this way length read cannot be controlled f->gz = gzdopen(dup(fd), "r"); if (f->gz == NULL) { *errno_p = errno; *msg_p = strerror(errno); // TODO: better error message... fprintf(stderr, "Error opening internal gz uncompression using zlib...\n"); free(f); return RET_ERROR; } break; #ifdef HAVE_LIBBZ2 case c_bzip2: f->bz = BZ2_bzdopen(dup(fd), "r"); if (f->bz == NULL) { *errno_p = errno; *msg_p = strerror(errno); // TODO: better error message... fprintf(stderr, "Error opening internal bz2 uncompression using libbz2\n"); free(f); return RET_ERROR; } break; #endif default: if (intermediate_size == 0) { /* pipes are guaranteed to swallow a full * page without blocking if poll * tells you can write */ long l = sysconf(_SC_PAGESIZE); if (l <= 0) intermediate_size = 512; else if (l > 4096) intermediate_size = 4096; else intermediate_size = l; } f->intermediate.buffer = malloc(intermediate_size); f->intermediate.ready = 0; f->intermediate.ofs = 0; if (FAILEDTOALLOC(f->intermediate.buffer)) { *errno_p = ENOMEM; *msg_p = "Out of memory"; free(f); return RET_ERROR_OOM; } r = startpipeinoutchild(f->compression, &f->pipeinfd, &f->fd, &f->pid); if (RET_WAS_ERROR(r)) { *errno_p = -EINVAL; *msg_p = "Error starting external uncompressor"; free(f->intermediate.buffer); free(f); return r; } } *file_p = f; return RET_OK; } static inline int pipebackforth(struct compressedfile *file, void *buffer, int size) { /* we have to make sure we only read when things are available and only * write when there is still space in the pipe, otherwise we can end up * in a because we are waiting for the output of a program that cannot * generate output because it needs more input from us first or because * we wait for a program to accept input that waits for us to consume * the output... */ struct pollfd p[2]; ssize_t written; int i; do { p[0].fd = file->pipeinfd; p[0].events = POLLOUT; p[1].fd = file->fd; p[1].events = POLLIN; /* wait till there is something to do */ i = poll(p, 2, -1); if (i < 0) { if (errno == EINTR) continue; file->error = errno; return -1; } if ((p[0].revents & POLLERR) != 0) { file->error = EIO; return -1; } if ((p[0].revents & POLLHUP) != 0) { /* not being able to send when we have something * is an error */ if (file->len > 0 || file->intermediate.ready > 0) { file->error = EIO; return -1; } (void)close(file->pipeinfd); file->pipeinfd = -1; /* wait for the rest */ return read(file->fd, buffer, size); } if ((p[0].revents & POLLOUT) != 0) { struct intermediate_buffer *im = &file->intermediate; if (im->ready < 0) return -1; if (im->ready == 0) { // TODO: check if splice is safe or will create // dead-locks... int isize = intermediate_size; im->ofs = 0; if (file->len >= 0 && isize > file->len) isize = file->len; if (isize == 0) im->ready = 0; else im->ready = read(file->infd, im->buffer + im->ofs, isize); if (im->ready < 0) { file->error = errno; return -1; } if (im->ready == 0) { (void)close(file->pipeinfd); file->pipeinfd = -1; /* wait for the rest */ return read(file->fd, buffer, size); } file->len -= im->ready; } written = write(file->pipeinfd, im->buffer + im->ofs, im->ready); if (written < 0) { file->error = errno; return -1; } im->ofs += written; im->ready -= written; } if ((p[1].revents & POLLIN) != 0) return read(file->fd, buffer, size); } while (true); } int uncompress_read(struct compressedfile *file, void *buffer, int size) { ssize_t s; int i; switch (file->compression) { case c_none: if (file->len == 0) return 0; if (file->len > 0 && size > file->len) size = file->len; s = read(file->fd, buffer, size); if (s < 0) file->error = errno; file->len -= s; return s; case c_gzip: i = gzread(file->gz, buffer, size); file->error = errno; return i; #ifdef HAVE_LIBBZ2 case c_bzip2: i = BZ2_bzread(file->bz, buffer, size); file->error = errno; return i; #endif default: if (file->pipeinfd != -1) { /* things more complicated, as perhaps something needs writing first... */ return pipebackforth(file, buffer, size); } s = read(file->fd, buffer, size); if (s < 0) file->error = errno; return s; } } static retvalue uncompress_commonclose(struct compressedfile *file, int *errno_p, const char **msg_p) { retvalue result; const char *msg; int zerror, e; pid_t pid; int status; #define ERRORBUFFERSIZE 100 static char errorbuffer[ERRORBUFFERSIZE]; if (file == NULL) return RET_OK; free(file->intermediate.buffer); switch (file->compression) { case c_none: if (file->error != 0) { *errno_p = file->error; *msg_p = strerror(file->error); return RET_ERRNO(file->error); } else return RET_OK; case c_gzip: file->fd = -1; msg = gzerror(file->gz, &zerror); if (zerror == Z_ERRNO) { *errno_p = file->error; (void)gzclose(file->gz); *msg_p = strerror(file->error); return RET_ERRNO(file->error); } else if (zerror < 0) { *errno_p = -EINVAL; snprintf(errorbuffer, ERRORBUFFERSIZE, "Zlib error %d: %s", zerror, msg); *msg_p = errorbuffer; (void)gzclose(file->gz); return RET_ERROR_Z; } zerror = gzclose(file->gz); if (zerror == Z_ERRNO) { *errno_p = file->error; *msg_p = strerror(file->error); return RET_ERRNO(file->error); } if (zerror < 0) { *errno_p = -EINVAL; snprintf(errorbuffer, ERRORBUFFERSIZE, "Zlib error %d", zerror); *msg_p = errorbuffer; return RET_ERROR_Z; } else return RET_OK; #ifdef HAVE_LIBBZ2 case c_bzip2: file->fd = -1; msg = BZ2_bzerror(file->bz, &zerror); if (zerror < 0) { *errno_p = -EINVAL; snprintf(errorbuffer, ERRORBUFFERSIZE, "libbz2 error %d: %s", zerror, msg); *msg_p = errorbuffer; BZ2_bzclose(file->bz); return RET_ERROR_BZ2; } /* no return value? does this mean no checksums? */ BZ2_bzclose(file->bz); return RET_OK; #endif default: (void)close(file->fd); if (file->pipeinfd != -1) (void)close(file->pipeinfd); file->fd = file->infd; file->infd = -1; result = RET_OK; if (file->pid <= 0) return RET_OK; do { pid = waitpid(file->pid, &status, 0); e = errno; if (interrupted()) { *errno_p = EINTR; *msg_p = "Interrupted"; result = RET_ERROR_INTERRUPTED; } } while (pid == -1 && (e == EINTR || e == EAGAIN)); if (pid == -1) { *errno_p = e; *msg_p = strerror(file->error); return RET_ERRNO(e); } if (WIFEXITED(status)) { if (WEXITSTATUS(status) == 0) return result; else { *errno_p = -EINVAL; snprintf(errorbuffer, ERRORBUFFERSIZE, "%s exited with code %d", extern_uncompressors[file->compression], (int)(WEXITSTATUS(status))); *msg_p = errorbuffer; return RET_ERROR; } } else if (WIFSIGNALED(status) && WTERMSIG(status) != SIGUSR2) { *errno_p = -EINVAL; snprintf(errorbuffer, ERRORBUFFERSIZE, "%s killed by signal %d", extern_uncompressors[file->compression], (int)(WTERMSIG(status))); *msg_p = errorbuffer; return RET_ERROR; } else { *errno_p = -EINVAL; snprintf(errorbuffer, ERRORBUFFERSIZE, "%s failed", extern_uncompressors[file->compression]); *msg_p = errorbuffer; return RET_ERROR; } return result; } /* not reached */ } /* check if there has been an error yet for this stream */ retvalue uncompress_error(struct compressedfile *file) { int e, zerror, status; const char *msg; pid_t pid; if (file == NULL) return RET_NOTHING; switch (file->compression) { case c_none: if (file->error == 0) return RET_OK; break; case c_gzip: msg = gzerror(file->gz, &zerror); if (zerror >= 0) return RET_OK; if (zerror != Z_ERRNO) { fprintf(stderr, "Zlib error %d uncompressing file '%s': %s\n", zerror, file->filename, msg); return RET_ERROR_Z; } break; #ifdef HAVE_LIBBZ2 case c_bzip2: msg = BZ2_bzerror(file->bz, &zerror); if (zerror < 0) { fprintf(stderr, "libbz2 error %d uncompressing file '%s': %s\n", zerror, file->filename, msg); return RET_ERROR_BZ2; } else return RET_OK; #endif default: if (file->error != 0) break; if (file->pid <= 0) return RET_OK; pid = waitpid(file->pid, &status, WNOHANG); if (pid < 0) { e = errno; fprintf(stderr, "Error looking for child %lu (a '%s'): %s\n", (long unsigned)file->pid, extern_uncompressors[file->compression], strerror(e)); return RET_ERRNO(e); } if (pid != file->pid) { /* still running */ return RET_OK; } file->pid = -1; if (WIFEXITED(status)) { if (WEXITSTATUS(status) == 0) return RET_OK; else { fprintf(stderr, "%s exited with code %d\n", extern_uncompressors[file->compression], (int)(WEXITSTATUS(status))); return RET_ERROR; } } else if (WIFSIGNALED(status) && WTERMSIG(status) != SIGUSR2) { fprintf(stderr, "%s killed by signal %d\n", extern_uncompressors[file->compression], (int)(WTERMSIG(status))); return RET_ERROR; } else { fprintf(stderr, "%s failed\n", extern_uncompressors[file->compression]); return RET_ERROR; } } /* an error, but which? */ if (file->error != 0) { fprintf(stderr, "Error %d uncompressing file '%s': %s\n", file->error, file->filename, strerror(file->error)); return RET_ERRNO(file->error); } else return RET_ERROR; } void uncompress_abort(struct compressedfile *file) { pid_t pid; int e, status; if (file == NULL) return; switch (file->compression) { case c_none: if (file->fd >= 0) (void)close(file->fd); break; case c_gzip: (void)gzclose(file->gz); break; #ifdef HAVE_LIBBZ2 case c_bzip2: BZ2_bzclose(file->bz); break; #endif default: /* kill before closing, to avoid it getting * a sigpipe */ if (file->pid > 0) kill(file->pid, SIGTERM); if (file->infd >= 0) (void)close(file->infd); if (file->pipeinfd != -1) (void)close(file->pipeinfd); do { pid = waitpid(file->pid, &status, 0); e = errno; if (interrupted()) { break; } } while (pid == -1 && (e == EINTR || e == EAGAIN)); if (pid == -1) break; if (file->fd >= 0) (void)close(file->fd); if (WIFEXITED(status)) { break; } else if (WIFSIGNALED(status) && WTERMSIG(status) != SIGTERM && WTERMSIG(status) != SIGUSR2) { fprintf(stderr, "%s killed by signal %d\n", extern_uncompressors[file->compression], (int)(WTERMSIG(status))); } } free(file->filename); free(file); } retvalue uncompress_fdclose(struct compressedfile *file, int *errno_p, const char **msg_p) { retvalue result; if (file == NULL) return RET_OK; assert (file->filename == NULL); result = uncompress_commonclose(file, errno_p, msg_p); free(file); return result; } retvalue uncompress_close(struct compressedfile *file) { const char *msg; retvalue r; int e; if (file == NULL) return RET_OK; assert (file->filename != NULL); r = uncompress_commonclose(file, &e, &msg); if (RET_IS_OK(r)) { if (file->fd >= 0 && close(file->fd) != 0) { e = errno; fprintf(stderr, "Error %d reading from %s: %s!\n", e, file->filename, strerror(e)); } free(file->filename); free(file); return r; } if (file->fd >= 0) (void)close(file->fd); if (e == -EINVAL) { fprintf(stderr, "Error reading from %s: %s!\n", file->filename, msg); } else { fprintf(stderr, "Error %d reading from %s: %s!\n", e, file->filename, msg); } free(file->filename); free(file); return r; } enum compression compression_by_suffix(const char *name, size_t *len_p) { enum compression c; size_t len = *len_p; for (c = c_COUNT - 1 ; c > c_none ; c--) { size_t l = strlen(uncompression_suffix[c]); if (len <= l) continue; if (strncmp(name + len - l, uncompression_suffix[c], l) == 0) { *len_p -= l; return c; } } return c_none; } reprepro-4.13.1/aclocal.m40000644000175100017510000011041712152655325012233 00000000000000# generated automatically by aclocal 1.11.6 -*- Autoconf -*- # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, # 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, # Inc. # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. m4_ifndef([AC_AUTOCONF_VERSION], [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.69],, [m4_warning([this file was generated for autoconf 2.69. You have another version of autoconf. It may work, but is not guaranteed to. If you have problems, you may need to regenerate the build system entirely. To do so, use the procedure documented by the package, typically `autoreconf'.])]) # Copyright (C) 2002, 2003, 2005, 2006, 2007, 2008, 2011 Free Software # Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 1 # AM_AUTOMAKE_VERSION(VERSION) # ---------------------------- # Automake X.Y traces this macro to ensure aclocal.m4 has been # generated from the m4 files accompanying Automake X.Y. # (This private macro should not be called outside this file.) AC_DEFUN([AM_AUTOMAKE_VERSION], [am__api_version='1.11' dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to dnl require some minimum version. Point them to the right macro. m4_if([$1], [1.11.6], [], [AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl ]) # _AM_AUTOCONF_VERSION(VERSION) # ----------------------------- # aclocal traces this macro to find the Autoconf version. # This is a private macro too. Using m4_define simplifies # the logic in aclocal, which can simply ignore this definition. m4_define([_AM_AUTOCONF_VERSION], []) # AM_SET_CURRENT_AUTOMAKE_VERSION # ------------------------------- # Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced. # This function is AC_REQUIREd by AM_INIT_AUTOMAKE. AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION], [AM_AUTOMAKE_VERSION([1.11.6])dnl m4_ifndef([AC_AUTOCONF_VERSION], [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl _AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))]) # AM_AUX_DIR_EXPAND -*- Autoconf -*- # Copyright (C) 2001, 2003, 2005, 2011 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 1 # For projects using AC_CONFIG_AUX_DIR([foo]), Autoconf sets # $ac_aux_dir to `$srcdir/foo'. In other projects, it is set to # `$srcdir', `$srcdir/..', or `$srcdir/../..'. # # Of course, Automake must honor this variable whenever it calls a # tool from the auxiliary directory. The problem is that $srcdir (and # therefore $ac_aux_dir as well) can be either absolute or relative, # depending on how configure is run. This is pretty annoying, since # it makes $ac_aux_dir quite unusable in subdirectories: in the top # source directory, any form will work fine, but in subdirectories a # relative path needs to be adjusted first. # # $ac_aux_dir/missing # fails when called from a subdirectory if $ac_aux_dir is relative # $top_srcdir/$ac_aux_dir/missing # fails if $ac_aux_dir is absolute, # fails when called from a subdirectory in a VPATH build with # a relative $ac_aux_dir # # The reason of the latter failure is that $top_srcdir and $ac_aux_dir # are both prefixed by $srcdir. In an in-source build this is usually # harmless because $srcdir is `.', but things will broke when you # start a VPATH build or use an absolute $srcdir. # # So we could use something similar to $top_srcdir/$ac_aux_dir/missing, # iff we strip the leading $srcdir from $ac_aux_dir. That would be: # am_aux_dir='\$(top_srcdir)/'`expr "$ac_aux_dir" : "$srcdir//*\(.*\)"` # and then we would define $MISSING as # MISSING="\${SHELL} $am_aux_dir/missing" # This will work as long as MISSING is not called from configure, because # unfortunately $(top_srcdir) has no meaning in configure. # However there are other variables, like CC, which are often used in # configure, and could therefore not use this "fixed" $ac_aux_dir. # # Another solution, used here, is to always expand $ac_aux_dir to an # absolute PATH. The drawback is that using absolute paths prevent a # configured tree to be moved without reconfiguration. AC_DEFUN([AM_AUX_DIR_EXPAND], [dnl Rely on autoconf to set up CDPATH properly. AC_PREREQ([2.50])dnl # expand $ac_aux_dir to an absolute path am_aux_dir=`cd $ac_aux_dir && pwd` ]) # AM_CONDITIONAL -*- Autoconf -*- # Copyright (C) 1997, 2000, 2001, 2003, 2004, 2005, 2006, 2008 # Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 9 # AM_CONDITIONAL(NAME, SHELL-CONDITION) # ------------------------------------- # Define a conditional. AC_DEFUN([AM_CONDITIONAL], [AC_PREREQ(2.52)dnl ifelse([$1], [TRUE], [AC_FATAL([$0: invalid condition: $1])], [$1], [FALSE], [AC_FATAL([$0: invalid condition: $1])])dnl AC_SUBST([$1_TRUE])dnl AC_SUBST([$1_FALSE])dnl _AM_SUBST_NOTMAKE([$1_TRUE])dnl _AM_SUBST_NOTMAKE([$1_FALSE])dnl m4_define([_AM_COND_VALUE_$1], [$2])dnl if $2; then $1_TRUE= $1_FALSE='#' else $1_TRUE='#' $1_FALSE= fi AC_CONFIG_COMMANDS_PRE( [if test -z "${$1_TRUE}" && test -z "${$1_FALSE}"; then AC_MSG_ERROR([[conditional "$1" was never defined. Usually this means the macro was only invoked conditionally.]]) fi])]) # Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2009, # 2010, 2011 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 12 # There are a few dirty hacks below to avoid letting `AC_PROG_CC' be # written in clear, in which case automake, when reading aclocal.m4, # will think it sees a *use*, and therefore will trigger all it's # C support machinery. Also note that it means that autoscan, seeing # CC etc. in the Makefile, will ask for an AC_PROG_CC use... # _AM_DEPENDENCIES(NAME) # ---------------------- # See how the compiler implements dependency checking. # NAME is "CC", "CXX", "GCJ", or "OBJC". # We try a few techniques and use that to set a single cache variable. # # We don't AC_REQUIRE the corresponding AC_PROG_CC since the latter was # modified to invoke _AM_DEPENDENCIES(CC); we would have a circular # dependency, and given that the user is not expected to run this macro, # just rely on AC_PROG_CC. AC_DEFUN([_AM_DEPENDENCIES], [AC_REQUIRE([AM_SET_DEPDIR])dnl AC_REQUIRE([AM_OUTPUT_DEPENDENCY_COMMANDS])dnl AC_REQUIRE([AM_MAKE_INCLUDE])dnl AC_REQUIRE([AM_DEP_TRACK])dnl ifelse([$1], CC, [depcc="$CC" am_compiler_list=], [$1], CXX, [depcc="$CXX" am_compiler_list=], [$1], OBJC, [depcc="$OBJC" am_compiler_list='gcc3 gcc'], [$1], UPC, [depcc="$UPC" am_compiler_list=], [$1], GCJ, [depcc="$GCJ" am_compiler_list='gcc3 gcc'], [depcc="$$1" am_compiler_list=]) AC_CACHE_CHECK([dependency style of $depcc], [am_cv_$1_dependencies_compiler_type], [if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then # We make a subdir and do the tests there. Otherwise we can end up # making bogus files that we don't know about and never remove. For # instance it was reported that on HP-UX the gcc test will end up # making a dummy file named `D' -- because `-MD' means `put the output # in D'. rm -rf conftest.dir mkdir conftest.dir # Copy depcomp to subdir because otherwise we won't find it if we're # using a relative directory. cp "$am_depcomp" conftest.dir cd conftest.dir # We will build objects and dependencies in a subdirectory because # it helps to detect inapplicable dependency modes. For instance # both Tru64's cc and ICC support -MD to output dependencies as a # side effect of compilation, but ICC will put the dependencies in # the current directory while Tru64 will put them in the object # directory. mkdir sub am_cv_$1_dependencies_compiler_type=none if test "$am_compiler_list" = ""; then am_compiler_list=`sed -n ['s/^#*\([a-zA-Z0-9]*\))$/\1/p'] < ./depcomp` fi am__universal=false m4_case([$1], [CC], [case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac], [CXX], [case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac]) for depmode in $am_compiler_list; do # Setup a source with many dependencies, because some compilers # like to wrap large dependency lists on column 80 (with \), and # we should not choose a depcomp mode which is confused by this. # # We need to recreate these files for each test, as the compiler may # overwrite some of them when testing with obscure command lines. # This happens at least with the AIX C compiler. : > sub/conftest.c for i in 1 2 3 4 5 6; do echo '#include "conftst'$i'.h"' >> sub/conftest.c # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with # Solaris 8's {/usr,}/bin/sh. touch sub/conftst$i.h done echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf # We check with `-c' and `-o' for the sake of the "dashmstdout" # mode. It turns out that the SunPro C++ compiler does not properly # handle `-M -o', and we need to detect this. Also, some Intel # versions had trouble with output in subdirs am__obj=sub/conftest.${OBJEXT-o} am__minus_obj="-o $am__obj" case $depmode in gcc) # This depmode causes a compiler race in universal mode. test "$am__universal" = false || continue ;; nosideeffect) # after this tag, mechanisms are not by side-effect, so they'll # only be used when explicitly requested if test "x$enable_dependency_tracking" = xyes; then continue else break fi ;; msvc7 | msvc7msys | msvisualcpp | msvcmsys) # This compiler won't grok `-c -o', but also, the minuso test has # not run yet. These depmodes are late enough in the game, and # so weak that their functioning should not be impacted. am__obj=conftest.${OBJEXT-o} am__minus_obj= ;; none) break ;; esac if depmode=$depmode \ source=sub/conftest.c object=$am__obj \ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ >/dev/null 2>conftest.err && grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && grep $am__obj sub/conftest.Po > /dev/null 2>&1 && ${MAKE-make} -s -f confmf > /dev/null 2>&1; then # icc doesn't choke on unknown options, it will just issue warnings # or remarks (even with -Werror). So we grep stderr for any message # that says an option was ignored or not supported. # When given -MP, icc 7.0 and 7.1 complain thusly: # icc: Command line warning: ignoring option '-M'; no argument required # The diagnosis changed in icc 8.0: # icc: Command line remark: option '-MP' not supported if (grep 'ignoring option' conftest.err || grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else am_cv_$1_dependencies_compiler_type=$depmode break fi fi done cd .. rm -rf conftest.dir else am_cv_$1_dependencies_compiler_type=none fi ]) AC_SUBST([$1DEPMODE], [depmode=$am_cv_$1_dependencies_compiler_type]) AM_CONDITIONAL([am__fastdep$1], [ test "x$enable_dependency_tracking" != xno \ && test "$am_cv_$1_dependencies_compiler_type" = gcc3]) ]) # AM_SET_DEPDIR # ------------- # Choose a directory name for dependency files. # This macro is AC_REQUIREd in _AM_DEPENDENCIES AC_DEFUN([AM_SET_DEPDIR], [AC_REQUIRE([AM_SET_LEADING_DOT])dnl AC_SUBST([DEPDIR], ["${am__leading_dot}deps"])dnl ]) # AM_DEP_TRACK # ------------ AC_DEFUN([AM_DEP_TRACK], [AC_ARG_ENABLE(dependency-tracking, [ --disable-dependency-tracking speeds up one-time build --enable-dependency-tracking do not reject slow dependency extractors]) if test "x$enable_dependency_tracking" != xno; then am_depcomp="$ac_aux_dir/depcomp" AMDEPBACKSLASH='\' am__nodep='_no' fi AM_CONDITIONAL([AMDEP], [test "x$enable_dependency_tracking" != xno]) AC_SUBST([AMDEPBACKSLASH])dnl _AM_SUBST_NOTMAKE([AMDEPBACKSLASH])dnl AC_SUBST([am__nodep])dnl _AM_SUBST_NOTMAKE([am__nodep])dnl ]) # Generate code to set up dependency tracking. -*- Autoconf -*- # Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2008 # Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. #serial 5 # _AM_OUTPUT_DEPENDENCY_COMMANDS # ------------------------------ AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS], [{ # Autoconf 2.62 quotes --file arguments for eval, but not when files # are listed without --file. Let's play safe and only enable the eval # if we detect the quoting. case $CONFIG_FILES in *\'*) eval set x "$CONFIG_FILES" ;; *) set x $CONFIG_FILES ;; esac shift for mf do # Strip MF so we end up with the name of the file. mf=`echo "$mf" | sed -e 's/:.*$//'` # Check whether this is an Automake generated Makefile or not. # We used to match only the files named `Makefile.in', but # some people rename them; so instead we look at the file content. # Grep'ing the first line is not enough: some people post-process # each Makefile.in and add a new line on top of each file to say so. # Grep'ing the whole file is not good either: AIX grep has a line # limit of 2048, but all sed's we know have understand at least 4000. if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then dirpart=`AS_DIRNAME("$mf")` else continue fi # Extract the definition of DEPDIR, am__include, and am__quote # from the Makefile without running `make'. DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"` test -z "$DEPDIR" && continue am__include=`sed -n 's/^am__include = //p' < "$mf"` test -z "am__include" && continue am__quote=`sed -n 's/^am__quote = //p' < "$mf"` # When using ansi2knr, U may be empty or an underscore; expand it U=`sed -n 's/^U = //p' < "$mf"` # Find all dependency output files, they are included files with # $(DEPDIR) in their names. We invoke sed twice because it is the # simplest approach to changing $(DEPDIR) to its actual value in the # expansion. for file in `sed -n " s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \ sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g' -e 's/\$U/'"$U"'/g'`; do # Make sure the directory exists. test -f "$dirpart/$file" && continue fdir=`AS_DIRNAME(["$file"])` AS_MKDIR_P([$dirpart/$fdir]) # echo "creating $dirpart/$file" echo '# dummy' > "$dirpart/$file" done done } ])# _AM_OUTPUT_DEPENDENCY_COMMANDS # AM_OUTPUT_DEPENDENCY_COMMANDS # ----------------------------- # This macro should only be invoked once -- use via AC_REQUIRE. # # This code is only required when automatic dependency tracking # is enabled. FIXME. This creates each `.P' file that we will # need in order to bootstrap the dependency handling code. AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS], [AC_CONFIG_COMMANDS([depfiles], [test x"$AMDEP_TRUE" != x"" || _AM_OUTPUT_DEPENDENCY_COMMANDS], [AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir"]) ]) # Copyright (C) 1996, 1997, 2000, 2001, 2003, 2005 # Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 8 # AM_CONFIG_HEADER is obsolete. It has been replaced by AC_CONFIG_HEADERS. AU_DEFUN([AM_CONFIG_HEADER], [AC_CONFIG_HEADERS($@)]) # Do all the work for Automake. -*- Autoconf -*- # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, # 2005, 2006, 2008, 2009 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 16 # This macro actually does too much. Some checks are only needed if # your package does certain things. But this isn't really a big deal. # AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE]) # AM_INIT_AUTOMAKE([OPTIONS]) # ----------------------------------------------- # The call with PACKAGE and VERSION arguments is the old style # call (pre autoconf-2.50), which is being phased out. PACKAGE # and VERSION should now be passed to AC_INIT and removed from # the call to AM_INIT_AUTOMAKE. # We support both call styles for the transition. After # the next Automake release, Autoconf can make the AC_INIT # arguments mandatory, and then we can depend on a new Autoconf # release and drop the old call support. AC_DEFUN([AM_INIT_AUTOMAKE], [AC_PREREQ([2.62])dnl dnl Autoconf wants to disallow AM_ names. We explicitly allow dnl the ones we care about. m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl AC_REQUIRE([AM_SET_CURRENT_AUTOMAKE_VERSION])dnl AC_REQUIRE([AC_PROG_INSTALL])dnl if test "`cd $srcdir && pwd`" != "`pwd`"; then # Use -I$(srcdir) only when $(srcdir) != ., so that make's output # is not polluted with repeated "-I." AC_SUBST([am__isrc], [' -I$(srcdir)'])_AM_SUBST_NOTMAKE([am__isrc])dnl # test to see if srcdir already configured if test -f $srcdir/config.status; then AC_MSG_ERROR([source directory already configured; run "make distclean" there first]) fi fi # test whether we have cygpath if test -z "$CYGPATH_W"; then if (cygpath --version) >/dev/null 2>/dev/null; then CYGPATH_W='cygpath -w' else CYGPATH_W=echo fi fi AC_SUBST([CYGPATH_W]) # Define the identity of the package. dnl Distinguish between old-style and new-style calls. m4_ifval([$2], [m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl AC_SUBST([PACKAGE], [$1])dnl AC_SUBST([VERSION], [$2])], [_AM_SET_OPTIONS([$1])dnl dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT. m4_if(m4_ifdef([AC_PACKAGE_NAME], 1)m4_ifdef([AC_PACKAGE_VERSION], 1), 11,, [m4_fatal([AC_INIT should be called with package and version arguments])])dnl AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl AC_SUBST([VERSION], ['AC_PACKAGE_VERSION'])])dnl _AM_IF_OPTION([no-define],, [AC_DEFINE_UNQUOTED(PACKAGE, "$PACKAGE", [Name of package]) AC_DEFINE_UNQUOTED(VERSION, "$VERSION", [Version number of package])])dnl # Some tools Automake needs. AC_REQUIRE([AM_SANITY_CHECK])dnl AC_REQUIRE([AC_ARG_PROGRAM])dnl AM_MISSING_PROG(ACLOCAL, aclocal-${am__api_version}) AM_MISSING_PROG(AUTOCONF, autoconf) AM_MISSING_PROG(AUTOMAKE, automake-${am__api_version}) AM_MISSING_PROG(AUTOHEADER, autoheader) AM_MISSING_PROG(MAKEINFO, makeinfo) AC_REQUIRE([AM_PROG_INSTALL_SH])dnl AC_REQUIRE([AM_PROG_INSTALL_STRIP])dnl AC_REQUIRE([AM_PROG_MKDIR_P])dnl # We need awk for the "check" target. The system "awk" is bad on # some platforms. AC_REQUIRE([AC_PROG_AWK])dnl AC_REQUIRE([AC_PROG_MAKE_SET])dnl AC_REQUIRE([AM_SET_LEADING_DOT])dnl _AM_IF_OPTION([tar-ustar], [_AM_PROG_TAR([ustar])], [_AM_IF_OPTION([tar-pax], [_AM_PROG_TAR([pax])], [_AM_PROG_TAR([v7])])]) _AM_IF_OPTION([no-dependencies],, [AC_PROVIDE_IFELSE([AC_PROG_CC], [_AM_DEPENDENCIES(CC)], [define([AC_PROG_CC], defn([AC_PROG_CC])[_AM_DEPENDENCIES(CC)])])dnl AC_PROVIDE_IFELSE([AC_PROG_CXX], [_AM_DEPENDENCIES(CXX)], [define([AC_PROG_CXX], defn([AC_PROG_CXX])[_AM_DEPENDENCIES(CXX)])])dnl AC_PROVIDE_IFELSE([AC_PROG_OBJC], [_AM_DEPENDENCIES(OBJC)], [define([AC_PROG_OBJC], defn([AC_PROG_OBJC])[_AM_DEPENDENCIES(OBJC)])])dnl ]) _AM_IF_OPTION([silent-rules], [AC_REQUIRE([AM_SILENT_RULES])])dnl dnl The `parallel-tests' driver may need to know about EXEEXT, so add the dnl `am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen. This macro dnl is hooked onto _AC_COMPILER_EXEEXT early, see below. AC_CONFIG_COMMANDS_PRE(dnl [m4_provide_if([_AM_COMPILER_EXEEXT], [AM_CONDITIONAL([am__EXEEXT], [test -n "$EXEEXT"])])])dnl ]) dnl Hook into `_AC_COMPILER_EXEEXT' early to learn its expansion. Do not dnl add the conditional right here, as _AC_COMPILER_EXEEXT may be further dnl mangled by Autoconf and run in a shell conditional statement. m4_define([_AC_COMPILER_EXEEXT], m4_defn([_AC_COMPILER_EXEEXT])[m4_provide([_AM_COMPILER_EXEEXT])]) # When config.status generates a header, we must update the stamp-h file. # This file resides in the same directory as the config header # that is generated. The stamp files are numbered to have different names. # Autoconf calls _AC_AM_CONFIG_HEADER_HOOK (when defined) in the # loop where config.status creates the headers, so we can generate # our stamp files there. AC_DEFUN([_AC_AM_CONFIG_HEADER_HOOK], [# Compute $1's index in $config_headers. _am_arg=$1 _am_stamp_count=1 for _am_header in $config_headers :; do case $_am_header in $_am_arg | $_am_arg:* ) break ;; * ) _am_stamp_count=`expr $_am_stamp_count + 1` ;; esac done echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count]) # Copyright (C) 2001, 2003, 2005, 2008, 2011 Free Software Foundation, # Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 1 # AM_PROG_INSTALL_SH # ------------------ # Define $install_sh. AC_DEFUN([AM_PROG_INSTALL_SH], [AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl if test x"${install_sh}" != xset; then case $am_aux_dir in *\ * | *\ *) install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; *) install_sh="\${SHELL} $am_aux_dir/install-sh" esac fi AC_SUBST(install_sh)]) # Copyright (C) 2003, 2005 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 2 # Check whether the underlying file-system supports filenames # with a leading dot. For instance MS-DOS doesn't. AC_DEFUN([AM_SET_LEADING_DOT], [rm -rf .tst 2>/dev/null mkdir .tst 2>/dev/null if test -d .tst; then am__leading_dot=. else am__leading_dot=_ fi rmdir .tst 2>/dev/null AC_SUBST([am__leading_dot])]) # Add --enable-maintainer-mode option to configure. -*- Autoconf -*- # From Jim Meyering # Copyright (C) 1996, 1998, 2000, 2001, 2002, 2003, 2004, 2005, 2008, # 2011 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 5 # AM_MAINTAINER_MODE([DEFAULT-MODE]) # ---------------------------------- # Control maintainer-specific portions of Makefiles. # Default is to disable them, unless `enable' is passed literally. # For symmetry, `disable' may be passed as well. Anyway, the user # can override the default with the --enable/--disable switch. AC_DEFUN([AM_MAINTAINER_MODE], [m4_case(m4_default([$1], [disable]), [enable], [m4_define([am_maintainer_other], [disable])], [disable], [m4_define([am_maintainer_other], [enable])], [m4_define([am_maintainer_other], [enable]) m4_warn([syntax], [unexpected argument to AM@&t@_MAINTAINER_MODE: $1])]) AC_MSG_CHECKING([whether to enable maintainer-specific portions of Makefiles]) dnl maintainer-mode's default is 'disable' unless 'enable' is passed AC_ARG_ENABLE([maintainer-mode], [ --][am_maintainer_other][-maintainer-mode am_maintainer_other make rules and dependencies not useful (and sometimes confusing) to the casual installer], [USE_MAINTAINER_MODE=$enableval], [USE_MAINTAINER_MODE=]m4_if(am_maintainer_other, [enable], [no], [yes])) AC_MSG_RESULT([$USE_MAINTAINER_MODE]) AM_CONDITIONAL([MAINTAINER_MODE], [test $USE_MAINTAINER_MODE = yes]) MAINT=$MAINTAINER_MODE_TRUE AC_SUBST([MAINT])dnl ] ) AU_DEFUN([jm_MAINTAINER_MODE], [AM_MAINTAINER_MODE]) # Check to see how 'make' treats includes. -*- Autoconf -*- # Copyright (C) 2001, 2002, 2003, 2005, 2009 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 4 # AM_MAKE_INCLUDE() # ----------------- # Check to see how make treats includes. AC_DEFUN([AM_MAKE_INCLUDE], [am_make=${MAKE-make} cat > confinc << 'END' am__doit: @echo this is the am__doit target .PHONY: am__doit END # If we don't find an include directive, just comment out the code. AC_MSG_CHECKING([for style of include used by $am_make]) am__include="#" am__quote= _am_result=none # First try GNU make style include. echo "include confinc" > confmf # Ignore all kinds of additional output from `make'. case `$am_make -s -f confmf 2> /dev/null` in #( *the\ am__doit\ target*) am__include=include am__quote= _am_result=GNU ;; esac # Now try BSD make style include. if test "$am__include" = "#"; then echo '.include "confinc"' > confmf case `$am_make -s -f confmf 2> /dev/null` in #( *the\ am__doit\ target*) am__include=.include am__quote="\"" _am_result=BSD ;; esac fi AC_SUBST([am__include]) AC_SUBST([am__quote]) AC_MSG_RESULT([$_am_result]) rm -f confinc confmf ]) # Fake the existence of programs that GNU maintainers use. -*- Autoconf -*- # Copyright (C) 1997, 1999, 2000, 2001, 2003, 2004, 2005, 2008 # Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 6 # AM_MISSING_PROG(NAME, PROGRAM) # ------------------------------ AC_DEFUN([AM_MISSING_PROG], [AC_REQUIRE([AM_MISSING_HAS_RUN]) $1=${$1-"${am_missing_run}$2"} AC_SUBST($1)]) # AM_MISSING_HAS_RUN # ------------------ # Define MISSING if not defined so far and test if it supports --run. # If it does, set am_missing_run to use it, otherwise, to nothing. AC_DEFUN([AM_MISSING_HAS_RUN], [AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl AC_REQUIRE_AUX_FILE([missing])dnl if test x"${MISSING+set}" != xset; then case $am_aux_dir in *\ * | *\ *) MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;; *) MISSING="\${SHELL} $am_aux_dir/missing" ;; esac fi # Use eval to expand $SHELL if eval "$MISSING --run true"; then am_missing_run="$MISSING --run " else am_missing_run= AC_MSG_WARN([`missing' script is too old or missing]) fi ]) # Copyright (C) 2003, 2004, 2005, 2006, 2011 Free Software Foundation, # Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 1 # AM_PROG_MKDIR_P # --------------- # Check for `mkdir -p'. AC_DEFUN([AM_PROG_MKDIR_P], [AC_PREREQ([2.60])dnl AC_REQUIRE([AC_PROG_MKDIR_P])dnl dnl Automake 1.8 to 1.9.6 used to define mkdir_p. We now use MKDIR_P, dnl while keeping a definition of mkdir_p for backward compatibility. dnl @MKDIR_P@ is magic: AC_OUTPUT adjusts its value for each Makefile. dnl However we cannot define mkdir_p as $(MKDIR_P) for the sake of dnl Makefile.ins that do not define MKDIR_P, so we do our own dnl adjustment using top_builddir (which is defined more often than dnl MKDIR_P). AC_SUBST([mkdir_p], ["$MKDIR_P"])dnl case $mkdir_p in [[\\/$]]* | ?:[[\\/]]*) ;; */*) mkdir_p="\$(top_builddir)/$mkdir_p" ;; esac ]) # Helper functions for option handling. -*- Autoconf -*- # Copyright (C) 2001, 2002, 2003, 2005, 2008, 2010 Free Software # Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 5 # _AM_MANGLE_OPTION(NAME) # ----------------------- AC_DEFUN([_AM_MANGLE_OPTION], [[_AM_OPTION_]m4_bpatsubst($1, [[^a-zA-Z0-9_]], [_])]) # _AM_SET_OPTION(NAME) # -------------------- # Set option NAME. Presently that only means defining a flag for this option. AC_DEFUN([_AM_SET_OPTION], [m4_define(_AM_MANGLE_OPTION([$1]), 1)]) # _AM_SET_OPTIONS(OPTIONS) # ------------------------ # OPTIONS is a space-separated list of Automake options. AC_DEFUN([_AM_SET_OPTIONS], [m4_foreach_w([_AM_Option], [$1], [_AM_SET_OPTION(_AM_Option)])]) # _AM_IF_OPTION(OPTION, IF-SET, [IF-NOT-SET]) # ------------------------------------------- # Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. AC_DEFUN([_AM_IF_OPTION], [m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])]) # Check to make sure that the build environment is sane. -*- Autoconf -*- # Copyright (C) 1996, 1997, 2000, 2001, 2003, 2005, 2008 # Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 5 # AM_SANITY_CHECK # --------------- AC_DEFUN([AM_SANITY_CHECK], [AC_MSG_CHECKING([whether build environment is sane]) # Just in case sleep 1 echo timestamp > conftest.file # Reject unsafe characters in $srcdir or the absolute working directory # name. Accept space and tab only in the latter. am_lf=' ' case `pwd` in *[[\\\"\#\$\&\'\`$am_lf]]*) AC_MSG_ERROR([unsafe absolute working directory name]);; esac case $srcdir in *[[\\\"\#\$\&\'\`$am_lf\ \ ]]*) AC_MSG_ERROR([unsafe srcdir value: `$srcdir']);; esac # Do `set' in a subshell so we don't clobber the current shell's # arguments. Must try -L first in case configure is actually a # symlink; some systems play weird games with the mod time of symlinks # (eg FreeBSD returns the mod time of the symlink's containing # directory). if ( set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` if test "$[*]" = "X"; then # -L didn't work. set X `ls -t "$srcdir/configure" conftest.file` fi rm -f conftest.file if test "$[*]" != "X $srcdir/configure conftest.file" \ && test "$[*]" != "X conftest.file $srcdir/configure"; then # If neither matched, then we have a broken ls. This can happen # if, for instance, CONFIG_SHELL is bash and it inherits a # broken ls alias from the environment. This has actually # happened. Such a system could not be considered "sane". AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken alias in your environment]) fi test "$[2]" = conftest.file ) then # Ok. : else AC_MSG_ERROR([newly created file is older than distributed files! Check your system clock]) fi AC_MSG_RESULT(yes)]) # Copyright (C) 2001, 2003, 2005, 2011 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 1 # AM_PROG_INSTALL_STRIP # --------------------- # One issue with vendor `install' (even GNU) is that you can't # specify the program used to strip binaries. This is especially # annoying in cross-compiling environments, where the build's strip # is unlikely to handle the host's binaries. # Fortunately install-sh will honor a STRIPPROG variable, so we # always use install-sh in `make install-strip', and initialize # STRIPPROG with the value of the STRIP variable (set by the user). AC_DEFUN([AM_PROG_INSTALL_STRIP], [AC_REQUIRE([AM_PROG_INSTALL_SH])dnl # Installed binaries are usually stripped using `strip' when the user # run `make install-strip'. However `strip' might not be the right # tool to use in cross-compilation environments, therefore Automake # will honor the `STRIP' environment variable to overrule this program. dnl Don't test for $cross_compiling = yes, because it might be `maybe'. if test "$cross_compiling" != no; then AC_CHECK_TOOL([STRIP], [strip], :) fi INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" AC_SUBST([INSTALL_STRIP_PROGRAM])]) # Copyright (C) 2006, 2008, 2010 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 3 # _AM_SUBST_NOTMAKE(VARIABLE) # --------------------------- # Prevent Automake from outputting VARIABLE = @VARIABLE@ in Makefile.in. # This macro is traced by Automake. AC_DEFUN([_AM_SUBST_NOTMAKE]) # AM_SUBST_NOTMAKE(VARIABLE) # -------------------------- # Public sister of _AM_SUBST_NOTMAKE. AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)]) # Check how to create a tarball. -*- Autoconf -*- # Copyright (C) 2004, 2005, 2012 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 2 # _AM_PROG_TAR(FORMAT) # -------------------- # Check how to create a tarball in format FORMAT. # FORMAT should be one of `v7', `ustar', or `pax'. # # Substitute a variable $(am__tar) that is a command # writing to stdout a FORMAT-tarball containing the directory # $tardir. # tardir=directory && $(am__tar) > result.tar # # Substitute a variable $(am__untar) that extract such # a tarball read from stdin. # $(am__untar) < result.tar AC_DEFUN([_AM_PROG_TAR], [# Always define AMTAR for backward compatibility. Yes, it's still used # in the wild :-( We should find a proper way to deprecate it ... AC_SUBST([AMTAR], ['$${TAR-tar}']) m4_if([$1], [v7], [am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'], [m4_case([$1], [ustar],, [pax],, [m4_fatal([Unknown tar format])]) AC_MSG_CHECKING([how to create a $1 tar archive]) # Loop over all known methods to create a tar archive until one works. _am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none' _am_tools=${am_cv_prog_tar_$1-$_am_tools} # Do not fold the above two line into one, because Tru64 sh and # Solaris sh will not grok spaces in the rhs of `-'. for _am_tool in $_am_tools do case $_am_tool in gnutar) for _am_tar in tar gnutar gtar; do AM_RUN_LOG([$_am_tar --version]) && break done am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"' am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"' am__untar="$_am_tar -xf -" ;; plaintar) # Must skip GNU tar: if it does not support --format= it doesn't create # ustar tarball either. (tar --version) >/dev/null 2>&1 && continue am__tar='tar chf - "$$tardir"' am__tar_='tar chf - "$tardir"' am__untar='tar xf -' ;; pax) am__tar='pax -L -x $1 -w "$$tardir"' am__tar_='pax -L -x $1 -w "$tardir"' am__untar='pax -r' ;; cpio) am__tar='find "$$tardir" -print | cpio -o -H $1 -L' am__tar_='find "$tardir" -print | cpio -o -H $1 -L' am__untar='cpio -i -H $1 -d' ;; none) am__tar=false am__tar_=false am__untar=false ;; esac # If the value was cached, stop now. We just wanted to have am__tar # and am__untar set. test -n "${am_cv_prog_tar_$1}" && break # tar/untar a dummy directory, and stop if the command works rm -rf conftest.dir mkdir conftest.dir echo GrepMe > conftest.dir/file AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar]) rm -rf conftest.dir if test -s conftest.tar; then AM_RUN_LOG([$am__untar /dev/null 2>&1 && break fi done rm -rf conftest.dir AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool]) AC_MSG_RESULT([$am_cv_prog_tar_$1])]) AC_SUBST([am__tar]) AC_SUBST([am__untar]) ]) # _AM_PROG_TAR m4_include([acinclude.m4]) reprepro-4.13.1/rredtool.c0000644000175100017510000011013412152651661012364 00000000000000/* This file is part of "reprepro" * Copyright (C) 2009 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "globals.h" #include "error.h" #include "mprintf.h" #include "sha1.h" #include "filecntl.h" #include "rredpatch.h" #include "time.h" /* apt had a bug, http://bugs.debian.org/545694 * to fail if a patch file only prepends text. * This if fixed in apt version 0.7.24, * so this workaround can be disabled when older apt * versions are no longer expected (i.e. sqeeze is oldstable) */ #define APT_545694_WORKAROUND /* apt always wants to apply the last patch * (see http://bugs.debian.org/545699), so * always create an fake-empty patch last */ #define APT_545699_WORKAROUND static int max_patch_count = 20; static const struct option options[] = { {"version", no_argument, NULL, 'V'}, {"help", no_argument, NULL, 'h'}, {"debug", no_argument, NULL, 'D'}, {"merge", no_argument, NULL, 'm'}, {"max-patch-count", required_argument, NULL, 'N'}, {"reprepro-hook", no_argument, NULL, 'R'}, {"patch", no_argument, NULL, 'p'}, {NULL, 0, NULL, 0} }; static void usage(FILE *f) { fputs( "rredtool: handle the restricted subset of ed patches\n" " as used by Debian {Packages,Sources}.diff files.\n" "Syntax:\n" " rredtool \n" " update .diff directory (to be called from reprepro)\n" " rredtool --merge \n" " merge patches into one patch\n" " rredtool --patch \n" " apply patches to file\n", f); } static const char tab[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; struct hash { char sha1[2*SHA1_DIGEST_SIZE+1]; off_t len; }; /* we only need sha1 sum and we need it a lot, so implement a "only sha1" */ static void finalize_sha1(struct SHA1_Context *context, off_t len, /*@out@*/struct hash *hash){ char *sha1; unsigned char sha1buffer[SHA1_DIGEST_SIZE]; int i; SHA1Final(context, sha1buffer); sha1 = hash->sha1; for (i = 0 ; i < SHA1_DIGEST_SIZE ; i++) { *(sha1++) = tab[sha1buffer[i] >> 4]; *(sha1++) = tab[sha1buffer[i] & 0xF]; } *sha1 = '\0'; hash->len = len; } static retvalue gen_sha1sum(const char *fullfilename, /*@out@*/struct hash *hash) { struct SHA1_Context context; static const size_t bufsize = 16384; unsigned char *buffer = malloc(bufsize); ssize_t sizeread; int e, i; int infd; struct stat s; if (FAILEDTOALLOC(buffer)) return RET_ERROR_OOM; SHA1Init(&context); infd = open(fullfilename, O_RDONLY); if (infd < 0) { e = errno; if ((e == EACCES || e == ENOENT) && !isregularfile(fullfilename)) { free(buffer); return RET_NOTHING; } fprintf(stderr, "Error %d opening '%s': %s\n", e, fullfilename, strerror(e)); free(buffer); return RET_ERRNO(e); } i = fstat(infd, &s); if (i != 0) { e = errno; fprintf(stderr, "Error %d getting information about '%s': %s\n", e, fullfilename, strerror(e)); (void)close(infd); free(buffer); return RET_ERRNO(e); } do { sizeread = read(infd, buffer, bufsize); if (sizeread < 0) { e = errno; fprintf(stderr, "Error %d while reading %s: %s\n", e, fullfilename, strerror(e)); free(buffer); (void)close(infd); return RET_ERRNO(e); } SHA1Update(&context, buffer, (size_t)sizeread); } while (sizeread > 0); free(buffer); i = close(infd); if (i != 0) { e = errno; fprintf(stderr, "Error %d reading %s: %s\n", e, fullfilename, strerror(e)); return RET_ERRNO(e); } finalize_sha1(&context, s.st_size, hash); return RET_OK; } struct fileandhash { FILE *f; off_t len; struct SHA1_Context context; }; static void hash_and_write(const void *data, size_t len, void *p) { struct fileandhash *fh = p; fwrite(data, len, 1, fh->f); SHA1Update(&fh->context, data, len); fh->len += len; } #define DATEFMT "%Y-%m-%d-%H%M.%S" #define DATELEN (4 + 1 + 2 + 1 + 2 + 1 + 2 + 2 + 1 + 2) static retvalue get_date_string(char *date, size_t max) { struct tm *tm; time_t current_time; size_t len; assert (max == DATELEN + 1); current_time = time(NULL); if (current_time == ((time_t) -1)) { int e = errno; fprintf(stderr, "rredtool: Error %d from time: %s\n", e, strerror(e)); return RET_ERROR; } tm = gmtime(¤t_time); if (tm == NULL) { int e = errno; fprintf(stderr, "rredtool: Error %d from gmtime: %s\n", e, strerror(e)); return RET_ERROR; } errno = 0; len = strftime(date, max, DATEFMT, tm); if (len == 0 || len != DATELEN) { fprintf(stderr, "rredtool: internal problem calling strftime!\n"); return RET_ERROR; } return RET_OK; } static int create_temporary_file(void) { const char *tempdir; char *filename; int fd; tempdir = getenv("TMPDIR"); if (tempdir == NULL) tempdir = getenv("TEMPDIR"); if (tempdir == NULL) tempdir = "/tmp"; filename = mprintf("%s/XXXXXX", tempdir); if (FAILEDTOALLOC(filename)) { errno = ENOMEM; return -1; } #ifdef HAVE_MKOSTEMP fd = mkostemp(filename, 0600); #else #ifdef HAVE_MKSTEMP fd = mkstemp(filename); #else #error Need mkostemp or mkstemp #endif #endif if (fd >= 0) unlink(filename); free(filename); return fd; } static retvalue execute_into_file(const char * const argv[], /*@out@*/int *fd_p, int expected_exit_code) { pid_t child, pid; int fd, status; fd = create_temporary_file(); if (fd < 0) { int e = errno; fprintf(stderr, "Error %d creating temporary file: %s\n", e, strerror(e)); return RET_ERRNO(e); } child = fork(); if (child == (pid_t)-1) { int e = errno; fprintf(stderr, "rredtool: Error %d forking: %s\n", e, strerror(e)); return RET_ERRNO(e); } if (child == 0) { int e, i; do { i = dup2(fd, 1); e = errno; } while (i < 0 && (e == EINTR || e == EBUSY)); if (i < 0) { fprintf(stderr, "rredtool: Error %d in dup2(%d, 0): %s\n", e, fd, strerror(e)); raise(SIGUSR1); exit(EXIT_FAILURE); } close(fd); closefrom(3); execvp(argv[0], (char * const *)argv); fprintf(stderr, "rredtool: Error %d executing %s: %s\n", e, argv[0], strerror(e)); raise(SIGUSR1); exit(EXIT_FAILURE); } do { pid = waitpid(child, &status, 0); } while (pid == (pid_t)-1 && errno == EINTR); if (pid == (pid_t)-1) { int e = errno; fprintf(stderr, "rredtool: Error %d waiting for %s child %lu: %s!\n", e, argv[0], (unsigned long)child, strerror(e)); (void)close(fd); return RET_ERROR; } if (WIFEXITED(status) && WEXITSTATUS(status) == expected_exit_code) { if (lseek(fd, 0, SEEK_SET) == (off_t)-1) { int e = errno; fprintf(stderr, "rredtool: Error %d rewinding temporary file to start: %s!\n", e, strerror(e)); (void)close(fd); return RET_ERROR; } *fd_p = fd; return RET_OK; } close(fd); if (WIFEXITED(status)) { fprintf(stderr, "rredtool: %s returned with unexpected exit code %d\n", argv[0], (int)(WEXITSTATUS(status))); return RET_ERROR; } if (WIFSIGNALED(status)) { if (WTERMSIG(status) != SIGUSR1) fprintf(stderr, "rredtool: %s killed by signal %d\n", argv[0], (int)(WTERMSIG(status))); return RET_ERROR; } fprintf(stderr, "rredtool: %s child dies mysteriously (status=%d)\n", argv[0], status); return RET_ERROR; } struct old_index_file { struct old_patch { struct old_patch *next, *prev; char *basefilename; /* part until the + in the name */ char *nameprefix; struct hash hash; } *first, *last; struct hash hash; }; static void old_index_done(/*@only@*/struct old_index_file *o) { while (o->first != NULL) { struct old_patch *p = o->first; o->first = p->next; free(p->basefilename); free(p->nameprefix); free(p); } o->last = NULL; } static retvalue make_prefix_uniq(struct old_patch *o) { struct old_patch *p, *last = NULL; const char *lookfor = o->nameprefix; /* make the prefix uniq by extending all previous occurences * of this prefix with an additional +. As this might already * have happened, this has to be possibly repeated */ while (true) { for (p = o->prev ; p != NULL ; p = p->prev) { if (p == last) continue; if (strcmp(p->nameprefix, lookfor) == 0) { char *h; size_t l = strlen(p->nameprefix); h = realloc(p->nameprefix, l+2); if (FAILEDTOALLOC(h)) return RET_ERROR_OOM; h[l] = '+' ; h[l+1] = '\0'; p->nameprefix = h; lookfor = h; last = p; break; } } if (p == NULL) return RET_OK; } } static inline retvalue parse_old_index(char *p, size_t len, struct old_index_file *oldindex) { char *q, *e = p + len; off_t filesize; struct old_patch *o; retvalue r; /* This is only supposed to parse files it wrote itself * (otherwise not having merged patches would most likely break * things in ugly ways), so parsing it can be very strict and easy: */ #define checkorfail(val) if (e - p < (intptr_t)strlen(val) || memcmp(p, val, strlen(val)) != 0) return RET_NOTHING; else { p += strlen(val); } checkorfail("SHA1-Current: "); q = strchr(p, '\n'); if (q != NULL && q - p > 2 * SHA1_DIGEST_SIZE) q = memchr(p, ' ', q - p); if (q == NULL || q - p != 2 * SHA1_DIGEST_SIZE) return RET_NOTHING; memcpy(oldindex->hash.sha1, p, 2 * SHA1_DIGEST_SIZE); oldindex->hash.sha1[2 * SHA1_DIGEST_SIZE] = '\0'; p = q; if (*p == ' ') { p++; filesize = 0; while (*p >= '0' && *p <= '9') { filesize = 10 * filesize + (*p - '0'); p++; } oldindex->hash.len = filesize; } else oldindex->hash.len = (off_t)-1; checkorfail("\nSHA1-History:\n"); while (*p == ' ') { p++; q = p; while ((*p >= '0' && *p <= '9') || (*p >= 'a' && *p <= 'f')) { p++; } if (p - q != 2 * SHA1_DIGEST_SIZE) return RET_NOTHING; o = zNEW(struct old_patch); if (FAILEDTOALLOC(o)) return RET_ERROR_OOM; o->prev = oldindex->last; oldindex->last = o; if (o->prev == NULL) oldindex->first = o; else o->prev->next = o; memcpy(o->hash.sha1, q, 2 * SHA1_DIGEST_SIZE); while (*p == ' ') p++; if (*p < '0' || *p > '9') return RET_NOTHING; filesize = 0; while (*p >= '0' && *p <= '9') { filesize = 10 * filesize + (*p - '0'); p++; } o->hash.len = filesize; if (*p != ' ') return RET_NOTHING; p++; q = strchr(p, '\n'); if (q == NULL) return RET_NOTHING; o->basefilename = strndup(p, (size_t)(q-p)); if (FAILEDTOALLOC(o->basefilename)) return RET_ERROR_OOM; p = q + 1; q = strchr(o->basefilename, '+'); if (q == NULL) o->nameprefix = mprintf("%s+", o->basefilename); else o->nameprefix = strndup(o->basefilename, 1 + (size_t)(q - o->basefilename)); if (FAILEDTOALLOC(o->nameprefix)) return RET_ERROR_OOM; r = make_prefix_uniq(o); if (RET_WAS_ERROR(r)) return r; /* allow pseudo-empty fake patches */ if (memcmp(o->hash.sha1, oldindex->hash.sha1, 2 * SHA1_DIGEST_SIZE) == 0) continue; // TODO: verify filename and create prefix... } checkorfail("SHA1-Patches:\n"); o = oldindex->first; while (*p == ' ') { p++; if (o == NULL) return RET_NOTHING; q = p; while ((*p >= '0' && *p <= '9') || (*p >= 'a' && *p <= 'f')) { p++; } if (p - q != 2 * SHA1_DIGEST_SIZE) return RET_NOTHING; while (*p == ' ') p++; if (*p < '0' || *p > '9') return RET_NOTHING; while (*p >= '0' && *p <= '9') { p++; } if (*p != ' ') return RET_NOTHING; p++; q = strchr(p, '\n'); if (q == NULL) return RET_NOTHING; if (strncmp(o->basefilename, p, (size_t)(q-p)) != 0 || o->basefilename[q-p] != '\0') return RET_NOTHING; p = q + 1; o = o->next; } checkorfail("X-Patch-Precedence: merged\n"); if (*p != '\0' || p != e) return RET_NOTHING; // TODO: check for dangerous stuff (like ../ in basename) // TODO: ignore patches where the filename is missing? return RET_OK; #undef checkorfail } static retvalue read_old_index(const char *fullfilename, /*@out@*/struct old_index_file *oldindex) { int fd, i; char *buffer; size_t buffersize = 102400, available = 0; ssize_t bytes_read; retvalue r; setzero(struct old_index_file, oldindex); if (!isregularfile(fullfilename)) return RET_NOTHING; fd = open(fullfilename, O_RDONLY); if (fd < 0) { int e = errno; fprintf(stderr, "rredtool: Error %d opening '%s': %s\n", e, fullfilename, strerror(e)); return RET_ERRNO(e); } /* index file should not be that big, so read into memory as a whole */ buffer = malloc(buffersize); if (FAILEDTOALLOC(buffer)) { close(fd); return RET_ERROR_OOM; } do { bytes_read = read(fd, buffer + available, buffersize - available - 1); if (bytes_read < 0) { int e = errno; fprintf(stderr, "rredtool: Error %d reading '%s': %s\n", e, fullfilename, strerror(e)); (void)close(fd); free(buffer); return RET_ERRNO(e); } assert ((size_t)bytes_read < buffersize - available); available += bytes_read; if (available + 1 >= buffersize) { fprintf(stderr, "rredtool: Ridicilous long '%s' file!\n", fullfilename); (void)close(fd); free(buffer); return RET_ERROR; } } while (bytes_read > 0); i = close(fd); if (i != 0) { int e = errno; fprintf(stderr, "rredtool: Error %d reading '%s': %s\n", e, fullfilename, strerror(e)); free(buffer); return RET_ERRNO(e); } buffer[available] = '\0'; r = parse_old_index(buffer, available, oldindex); free(buffer); if (r == RET_NOTHING) { /* wrong format, most likely a left over file */ fprintf(stderr, "rredtool: File '%s' does not look like created by rredtool, ignoring!\n", fullfilename); old_index_done(oldindex); setzero(struct old_index_file, oldindex); return RET_NOTHING; } if (RET_WAS_ERROR(r)) { old_index_done(oldindex); setzero(struct old_index_file, oldindex); return r; } return RET_OK; } struct patch { struct patch *next; char *basefilename; size_t basefilename_len; char *fullfilename; struct hash hash, from; }; static void patches_free(struct patch *r) { while (r != NULL) { struct patch *n = r->next; free(r->basefilename); if (r->fullfilename != NULL) { (void)unlink(r->fullfilename); free(r->fullfilename); } free(r); r = n; } } static retvalue new_diff_file(struct patch **root_p, const char *directory, const char *relfilename, const char *since, const char date[DATELEN+1], struct modification *r) { struct patch *p; int i, status, fd, pipefds[2], tries = 3; pid_t child, pid; retvalue result; struct fileandhash fh; p = zNEW(struct patch); if (FAILEDTOALLOC(p)) return RET_ERROR_OOM; if (since == NULL) since = ""; p->basefilename = mprintf("%s%s", since, date); if (FAILEDTOALLOC(p->basefilename)) { patches_free(p); return RET_ERROR_OOM; } p->basefilename_len = strlen(p->basefilename); p->fullfilename = mprintf("%s/%s.diff/%s.gz.new", directory, relfilename, p->basefilename); if (FAILEDTOALLOC(p->fullfilename)) { patches_free(p); return RET_ERROR_OOM; } /* create the file */ while (tries-- > 0) { int e; fd = open(p->fullfilename, O_CREAT|O_EXCL|O_NOCTTY|O_WRONLY, 0666); if (fd >= 0) break; e = errno; if (e == EEXIST && tries > 0) unlink(p->fullfilename); else { fprintf(stderr, "rredtool: Error %d creating '%s': %s\n", e, p->fullfilename, strerror(e)); return RET_ERROR; } } assert (fd > 0); /* start an child to compress connected via a pipe */ i = pipe(pipefds); assert (pipefds[0] > 0); if (i != 0) { int e = errno; fprintf(stderr, "rredtool: Error %d creating pipe: %s\n", e, strerror(e)); unlink(p->fullfilename); return RET_ERROR; } child = fork(); if (child == (pid_t)-1) { int e = errno; fprintf(stderr, "rredtool: Error %d forking: %s\n", e, strerror(e)); unlink(p->fullfilename); return RET_ERROR; } if (child == 0) { int e; close(pipefds[1]); do { i = dup2(pipefds[0], 0); e = errno; } while (i < 0 && (e == EINTR || e == EBUSY)); if (i < 0) { fprintf(stderr, "rredtool: Error %d in dup2(%d, 0): %s\n", e, pipefds[0], strerror(e)); raise(SIGUSR1); exit(EXIT_FAILURE); } do { i = dup2(fd, 1); e = errno; } while (i < 0 && (e == EINTR || e == EBUSY)); if (i < 0) { fprintf(stderr, "rredtool: Error %d in dup2(%d, 0): %s\n", e, fd, strerror(e)); raise(SIGUSR1); exit(EXIT_FAILURE); } close(pipefds[0]); close(fd); closefrom(3); execlp("gzip", "gzip", "-9", (char*)NULL); fprintf(stderr, "rredtool: Error %d executing gzip: %s\n", e, strerror(e)); raise(SIGUSR1); exit(EXIT_FAILURE); } close(pipefds[0]); close(fd); /* send the data to the child */ fh.f = fdopen(pipefds[1], "w"); if (fh.f == NULL) { int e = errno; fprintf(stderr, "rredtool: Error %d fdopen'ing write end of pipe to compressor: %s\n", e, strerror(e)); close(pipefds[1]); unlink(p->fullfilename); patches_free(p); kill(child, SIGTERM); waitpid(child, NULL, 0); return RET_ERROR; } SHA1Init(&fh.context); fh.len = 0; modification_printaspatch(&fh, r, hash_and_write); result = RET_OK; i = ferror(fh.f); if (i != 0) { fprintf(stderr, "rredtool: Error sending data to gzip!\n"); (void)fclose(fh.f); result = RET_ERROR; } else { i = fclose(fh.f); if (i != 0) { int e = errno; fprintf(stderr, "rredtool: Error %d sending data to gzip: %s!\n", e, strerror(e)); result = RET_ERROR; } } do { pid = waitpid(child, &status, 0); } while (pid == (pid_t)-1 && errno == EINTR); if (pid == (pid_t)-1) { int e = errno; fprintf(stderr, "rredtool: Error %d waiting for gzip child %lu: %s!\n", e, (unsigned long)child, strerror(e)); return RET_ERROR; } if (WIFEXITED(status) && WEXITSTATUS(status) == 0) { if (RET_IS_OK(result)) { finalize_sha1(&fh.context, fh.len, &p->hash); p->next = *root_p; *root_p = p; } return result; } unlink(p->fullfilename); patches_free(p); if (WIFEXITED(status)) { fprintf(stderr, "rredtool: gzip returned with non-zero exit code %d\n", (int)(WEXITSTATUS(status))); return RET_ERROR; } if (WIFSIGNALED(status)) { fprintf(stderr, "rredtool: gzip killed by signal %d\n", (int)(WTERMSIG(status))); return RET_ERROR; } fprintf(stderr, "rredtool: gzip child dies mysteriously (status=%d)\n", status); return RET_ERROR; } static retvalue write_new_index(const char *newindexfilename, const struct hash *newhash, const struct patch *root) { int tries, fd, i; const struct patch *p; tries = 2; while (tries > 0) { errno = 0; fd = open(newindexfilename, O_WRONLY|O_CREAT|O_EXCL|O_NOCTTY, 0666); if (fd >= 0) break; if (errno == EINTR) continue; tries--; if (errno != EEXIST) break; unlink(newindexfilename); } if (fd < 0) { int e = errno; fprintf(stderr, "Error %d creating '%s': %s\n", e, newindexfilename, strerror(e)); return RET_ERROR; } i = dprintf(fd, "SHA1-Current: %s %lld\n" "SHA1-History:\n", newhash->sha1, (long long)newhash->len); for (p = root ; i >= 0 && p != NULL ; p = p->next) { i = dprintf(fd, " %s %7ld %s\n", p->from.sha1, (long int)p->from.len, p->basefilename); } if (i >= 0) i = dprintf(fd, "SHA1-Patches:\n"); for (p = root ; i >= 0 && p != NULL ; p = p->next) { i = dprintf(fd, " %s %7ld %s\n", p->hash.sha1, (long int)p->hash.len, p->basefilename); } if (i >= 0) i = dprintf(fd, "X-Patch-Precedence: merged\n"); if (i >= 0) { i = close(fd); fd = -1; } if (i < 0) { int e = errno; fprintf(stderr, "Error %d writing to '%s': %s\n", e, newindexfilename, strerror(e)); if (fd >= 0) (void)close(fd); unlink(newindexfilename); return RET_ERRNO(e); } return RET_OK; } static void remove_old_diffs(const char *relfilename, const char *diffdirectory, const char *indexfilename, const struct patch *keep) { struct dirent *de; DIR *dir; const struct patch *p; if (!isdirectory(diffdirectory)) return; dir = opendir(diffdirectory); if (dir == NULL) return; while ((de = readdir(dir)) != NULL) { size_t len = strlen(de->d_name); /* special rule for that */ if (len == 5 && strcmp(de->d_name, "Index") == 0) continue; /* if it does not end with .gz or .gz.new, ignore */ if (len >= 4 && memcmp(de->d_name + len - 4, ".new", 4) == 0) len -= 4; if (len < 3) continue; if (memcmp(de->d_name + len - 3, ".gz", 3) != 0) continue; len -= 3; /* do not mark files to be deleted we still need: */ for (p = keep ; p != NULL ; p = p->next) { if (p->basefilename_len != len) continue; if (memcmp(p->basefilename, de->d_name, len) == 0) break; } if (p != NULL) continue; /* otherwise, tell reprepro this file is no longer needed: */ dprintf(3, "%s.diff/%s.tobedeleted\n", relfilename, de->d_name); } closedir(dir); if (isregularfile(indexfilename) && keep == NULL) dprintf(3, "%s.diff/Index.tobedeleted\n", relfilename); } static retvalue ed_diff(const char *oldfullfilename, const char *newfullfilename, /*@out@*/struct rred_patch **rred_p) { const char *argv[6]; int fd; retvalue r; argv[0] = "diff"; argv[1] = "--ed"; argv[2] = "--minimal"; argv[3] = oldfullfilename; argv[4] = newfullfilename; argv[5] = NULL; r = execute_into_file(argv, &fd, 1); if (RET_WAS_ERROR(r)) return r; return patch_loadfd("", fd, -1, rred_p); } static retvalue read_old_patch(const char *directory, const char *relfilename, const struct old_patch *o, /*@out@*/struct rred_patch **rred_p) { retvalue r; const char *args[4]; char *filename; int fd; filename = mprintf("%s/%s.diff/%s.gz", directory, relfilename, o->basefilename); if (!isregularfile(filename)) return RET_NOTHING; args[0] = "gunzip"; args[1] = "-c"; args[2] = filename; args[3] = NULL; r = execute_into_file(args, &fd, 0); free(filename); if (RET_WAS_ERROR(r)) return r; return patch_loadfd("", fd, -1, rred_p); } static retvalue handle_diff(const char *directory, const char *mode, const char *relfilename, const char *fullfilename, const char *fullnewfilename, const char *diffdirectory, const char *indexfilename, const char *newindexfilename) { retvalue r; int patch_count; struct hash oldhash, newhash; char date[DATELEN + 1]; struct patch *p, *root = NULL; enum {mode_OLD, mode_NEW, mode_CHANGE} m; struct rred_patch *new_rred_patch; struct modification *new_modifications; struct old_index_file old_index; struct old_patch *o; #if defined(APT_545694_WORKAROUND) || defined(APT_545699_WORKAROUND) char *line; struct modification *newdup; #endif if (strcmp(mode, "new") == 0) m = mode_NEW; else if (strcmp(mode, "old") == 0) m = mode_OLD; else if (strcmp(mode, "change") == 0) m = mode_CHANGE; else { usage(stderr); fprintf(stderr, "Error: 4th argument to rredtool in .diff maintenance mode must be 'new', 'old' or 'change'!\n"); return RET_ERROR; } if (m == mode_NEW) { /* There is no old file, nothing to do. * except checking for old diff files * and marking them to be deleted */ remove_old_diffs(relfilename, diffdirectory, indexfilename, NULL); return RET_OK; } r = get_date_string(date, sizeof(date)); if (RET_WAS_ERROR(r)) return r; assert (m == mode_OLD || m == mode_CHANGE); /* calculate sha1 checksum of old file */ r = gen_sha1sum(fullfilename, &oldhash); if (r == RET_NOTHING) { fprintf(stderr, "rredtool: expected file '%s' is missing!\n", fullfilename); r = RET_ERROR; } if (RET_WAS_ERROR(r)) return r; if (m == mode_CHANGE) { /* calculate sha1 checksum of the new file */ r = gen_sha1sum(fullnewfilename, &newhash); if (r == RET_NOTHING) { fprintf(stderr, "rredtool: expected file '%s' is missing!\n", fullnewfilename); r = RET_ERROR; } if (RET_WAS_ERROR(r)) return r; /* if new == old, nothing to do */ if (newhash.len == oldhash.len && strcmp(newhash.sha1, oldhash.sha1) == 0) { m = mode_OLD; } } if (oldhash.len == 0 || (m == mode_CHANGE && newhash.len == 0)) { /* Old or new file empty. treat as mode_NEW. * (checked here instead of letting later * more general optimisations catch this as * this garantees there are enough lines to * make patches longer to work around apt bugs, * and because no need to parse Index if we want to delete * it anyway) */ remove_old_diffs(relfilename, diffdirectory, indexfilename, NULL); return RET_OK; } r = read_old_index(indexfilename, &old_index); if (RET_WAS_ERROR(r)) return r; /* ignore old Index file if it does not match the old file */ if (old_index.hash.len != (off_t)-1 && old_index.hash.len != oldhash.len) { old_index_done(&old_index); memset(&old_index, 0, sizeof(old_index)); } if (memcmp(old_index.hash.sha1, oldhash.sha1, 2*SHA1_DIGEST_SIZE) != 0) { old_index_done(&old_index); memset(&old_index, 0, sizeof(old_index)); } if (m == mode_OLD) { /* this index file did not change. * keep old or delete if not current */ if (old_index.hash.sha1[0] != '\0') { for (o = old_index.first ; o != NULL ; o = o->next) dprintf(3, "%s.diff/%s.gz.keep\n", relfilename, o->basefilename); dprintf(3, "%s.diff/Index\n", relfilename); } else { remove_old_diffs(relfilename, diffdirectory, indexfilename, NULL); } old_index_done(&old_index); return RET_OK; } assert (m == mode_CHANGE); mkdir(diffdirectory, 0777); #ifdef APT_545699_WORKAROUND /* create a fake diff to work around http://bugs.debian.org/545699 */ newdup = NULL; r = modification_addstuff(fullnewfilename, &newdup, &line); if (RET_WAS_ERROR(r)) { modification_freelist(newdup); old_index_done(&old_index); return r; } /* save this compressed and store it's sha1sum */ r = new_diff_file(&root, directory, relfilename, "aptbug545699+", date, newdup); modification_freelist(newdup); free(line); if (RET_WAS_ERROR(r)) { old_index_done(&old_index); return r; } root->from = newhash; #endif /* create new diff calling diff --ed */ r = ed_diff(fullfilename, fullnewfilename, &new_rred_patch); if (RET_WAS_ERROR(r)) { old_index_done(&old_index); patches_free(root); return r; } new_modifications = patch_getmodifications(new_rred_patch); assert (new_modifications != NULL); #ifdef APT_545694_WORKAROUND newdup = modification_dup(new_modifications); if (RET_WAS_ERROR(r)) { modification_freelist(new_modifications); patch_free(new_rred_patch); old_index_done(&old_index); patches_free(root); return r; } r = modification_addstuff(fullnewfilename, &newdup, &line); if (RET_WAS_ERROR(r)) { modification_freelist(newdup); modification_freelist(new_modifications); patch_free(new_rred_patch); old_index_done(&old_index); patches_free(root); return r; } #endif /* save this compressed and store it's sha1sum */ r = new_diff_file(&root, directory, relfilename, NULL, date, #ifdef APT_545694_WORKAROUND newdup); modification_freelist(newdup); free(line); #else new_modifications); #endif // TODO: special handling of misparsing to cope with that better? if (RET_WAS_ERROR(r)) { modification_freelist(new_modifications); patch_free(new_rred_patch); old_index_done(&old_index); patches_free(root); return r; } root->from = oldhash; /* if the diff is bigger than the new file, * there is no point in not getting the full file. * And as in all but extremly strange situations this * also means all older patches will get bigger when merged, * do not even bother to calculate them but remove all. */ if (root->hash.len > newhash.len) { modification_freelist(new_modifications); patch_free(new_rred_patch); old_index_done(&old_index); patches_free(root); remove_old_diffs(relfilename, diffdirectory, indexfilename, NULL); return RET_OK; } patch_count = 1; /* merge this into the old patches */ for (o = old_index.last ; o != NULL ; o = o->prev) { struct rred_patch *old_rred_patch; struct modification *d, *merged; /* ignore old and new hash, to filter out old * pseudo-empty patches and to reduce the number * of patches in case the file is reverted to an * earlier state */ if (memcmp(o->hash.sha1, old_index.hash.sha1, sizeof(old_index.hash.sha1)) == 0) continue; if (memcmp(o->hash.sha1, newhash.sha1, sizeof(newhash.sha1)) == 0) continue; /* limit number of patches * (Index needs to be downloaded, too) */ if (patch_count >= max_patch_count) continue; /* empty files only make problems. * If you have a non-empty file with the sha1sum of an empty * one: Congratulations */ if (strcmp(o->hash.sha1, "da39a3ee5e6b4b0d3255bfef95601890afd80709") == 0) continue; r = read_old_patch(directory, relfilename, o, &old_rred_patch); if (r == RET_NOTHING) continue; // TODO: special handling of misparsing to cope with that better? if (RET_WAS_ERROR(r)) { modification_freelist(new_modifications); patch_free(new_rred_patch); old_index_done(&old_index); patches_free(root); return r; } d = modification_dup(new_modifications); if (RET_WAS_ERROR(r)) { patch_free(old_rred_patch); patch_free(new_rred_patch); old_index_done(&old_index); patches_free(root); return r; } r = combine_patches(&merged, patch_getmodifications(old_rred_patch), d); if (RET_WAS_ERROR(r)) { modification_freelist(new_modifications); patch_free(old_rred_patch); patch_free(new_rred_patch); old_index_done(&old_index); patches_free(root); return r; } if (merged == NULL) { /* this should never happen as the sha1sum should * already be the same, but better safe than sorry */ patch_free(old_rred_patch); continue; } #ifdef APT_545694_WORKAROUND r = modification_addstuff(fullnewfilename, &merged, &line); if (RET_WAS_ERROR(r)) { modification_freelist(merged); patch_free(old_rred_patch); modification_freelist(new_modifications); patch_free(new_rred_patch); old_index_done(&old_index); patches_free(root); return r; } #endif r = new_diff_file(&root, directory, relfilename, o->nameprefix, date, merged); modification_freelist(merged); #ifdef APT_545694_WORKAROUND free(line); #endif patch_free(old_rred_patch); if (RET_WAS_ERROR(r)) { modification_freelist(new_modifications); patch_free(new_rred_patch); old_index_done(&old_index); patches_free(root); return r; } root->from = o->hash; /* remove patches that are bigger than the new file */ if (root->hash.len >= newhash.len) { struct patch *n; n = root; root = n->next; n->next = NULL; patches_free(n); } patch_count++; } modification_freelist(new_modifications); patch_free(new_rred_patch); old_index_done(&old_index); assert (root != NULL); #ifdef APT_545699_WORKAROUND assert (root->next != NULL); #endif /* write new Index file */ r = write_new_index(newindexfilename, &newhash, root); if (RET_WAS_ERROR(r)) { patches_free(root); return r; } /* tell reprepro to remove all no longer needed files */ remove_old_diffs(relfilename, diffdirectory, indexfilename, root); /* tell reprepro to move those files to their final place * and include the Index in the Release file */ for (p = root ; p != NULL ; p = p->next) { /* the trailing . means add but do not put in Release */ dprintf(3, "%s.diff/%s.gz.new.\n", relfilename, p->basefilename); /* no longer delete: */ free(p->fullfilename); p->fullfilename = NULL; } dprintf(3, "%s.diff/Index.new\n", relfilename); patches_free(root); return RET_OK; } static retvalue handle_diff_dir(const char *args[4]) { const char *directory = args[0]; const char *mode = args[3]; const char *relfilename = args[2]; const char *relnewfilename = args[1]; char *fullfilename, *fullnewfilename; char *diffdirectory; char *indexfilename; char *newindexfilename; retvalue r; fullfilename = mprintf("%s/%s", directory, relfilename); fullnewfilename = mprintf("%s/%s", directory, relnewfilename); if (FAILEDTOALLOC(fullfilename) || FAILEDTOALLOC(fullnewfilename)) { free(fullfilename); free(fullnewfilename); return RET_ERROR_OOM; } diffdirectory = mprintf("%s.diff", fullfilename); indexfilename = mprintf("%s.diff/Index", fullfilename); newindexfilename = mprintf("%s.diff/Index.new", fullfilename); if (FAILEDTOALLOC(diffdirectory) || FAILEDTOALLOC(indexfilename) || FAILEDTOALLOC(newindexfilename)) { free(diffdirectory); free(indexfilename); free(newindexfilename); free(fullfilename); free(fullnewfilename); return RET_ERROR_OOM; } r = handle_diff(directory, mode, relfilename, fullfilename, fullnewfilename, diffdirectory, indexfilename, newindexfilename); free(diffdirectory); free(indexfilename); free(newindexfilename); free(fullfilename); free(fullnewfilename); return r; } static void write_to_file(const void *data, size_t len, void *to) { FILE *f = to; fwrite(data, len, 1, f); } int main(int argc, const char *argv[]) { struct rred_patch *patches[argc]; struct modification *m; retvalue r; bool mergemode = false; bool patchmode = false; bool repreprohook = false; int i, count; const char *sourcename; int debug = 0; while ((i = getopt_long(argc, (char**)argv, "+hVDmpR", options, NULL)) != -1) { switch (i) { case 'h': usage(stdout); return EXIT_SUCCESS; case 'V': printf( "rred-tool from " PACKAGE_NAME " version " PACKAGE_VERSION); return EXIT_SUCCESS; case 'D': debug++; break; case 'm': mergemode = 1; break; case 'p': patchmode = 1; break; case 'N': max_patch_count = atoi(optarg); break; case 'R': repreprohook = 1; break; case '?': default: return EXIT_FAILURE; } } if (repreprohook && mergemode) { fprintf(stderr, "Cannot do --reprepro-hook and --merge at the same time!\n"); return EXIT_FAILURE; } if (repreprohook && patchmode) { fprintf(stderr, "Cannot do --reprepro-hook and --patch at the same time!\n"); return EXIT_FAILURE; } if (repreprohook || (!mergemode && !patchmode)) { if (optind + 4 != argc) { usage(stderr); return EXIT_FAILURE; } r = handle_diff_dir(argv + optind); if (r == RET_ERROR_OOM) { fputs("Out of memory!\n", stderr); } if (RET_WAS_ERROR(r)) return EXIT_FAILURE; return EXIT_SUCCESS; } i = optind; if (!mergemode) { if (i >= argc) { fprintf(stderr, "Not enough arguments!\n"); return EXIT_FAILURE; } sourcename = argv[i++]; } else { SETBUTNOTUSED( sourcename = NULL; ) } if (mergemode && patchmode) { fprintf(stderr, "Cannot do --merge and --patch at the same time!\n"); return EXIT_FAILURE; } count = 0; while (i < argc) { r = patch_load(argv[i], -1, &patches[count]); if (RET_IS_OK(r)) count++; if (RET_WAS_ERROR(r)) { if (r == RET_ERROR_OOM) fputs("Out of memory!\n", stderr); else fputs("Aborting...\n", stderr); return EXIT_FAILURE; } i++; } if (count <= 0) { fprintf(stderr, "Not enough patches for operation...\n"); return EXIT_FAILURE; } m = patch_getmodifications(patches[0]); for (i = 1; i < count ; i++) { struct modification *a = patch_getmodifications(patches[i]); if (debug) { fputs("--------RESULT SO FAR--------\n", stderr); modification_printaspatch(stderr, m, write_to_file); fputs("--------TO BE MERGED WITH-----\n", stderr); modification_printaspatch(stderr, a, write_to_file); fputs("-------------END--------------\n", stderr); } r = combine_patches(&m, m, a); if (RET_WAS_ERROR(r)) { for (i = 0 ; i < count ; i++) { patch_free(patches[i]); } if (r == RET_ERROR_OOM) fputs("Out of memory!\n", stderr); else fputs("Aborting...\n", stderr); return EXIT_FAILURE; } } r = RET_OK; if (mergemode) { modification_printaspatch(stdout, m, write_to_file); } else { r = patch_file(stdout, sourcename, m); } if (ferror(stdout)) { fputs("Error writing to stdout!\n", stderr); r = RET_ERROR; } modification_freelist(m); for (i = 0 ; i < count ; i++) patch_free(patches[i]); if (r == RET_ERROR_OOM) fputs("Out of memory!\n", stderr); if (RET_WAS_ERROR(r)) return EXIT_FAILURE; return EXIT_SUCCESS; } reprepro-4.13.1/checkindeb.c0000644000175100017510000002715512152651661012623 00000000000000/* This file is part of "reprepro" * Copyright (C) 2003,2004,2005,2006,2007,2009,2012 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include "error.h" #include "ignore.h" #include "filecntl.h" #include "strlist.h" #include "checksums.h" #include "names.h" #include "checkindeb.h" #include "reference.h" #include "binaries.h" #include "files.h" #include "guesscomponent.h" #include "tracking.h" #include "override.h" #include "hooks.h" /* This file includes the code to include binaries, i.e. to create the chunk for the Packages.gz-file and to put it in the various databases. Things to do with .deb's checkin by hand: (by comparison with apt-ftparchive) - extract the control file (that's the hard part -> extractcontrol.c ) - check for Package, Version, Architecture, Maintainer, Description - apply overwrite if neccesary (section, priority and perhaps maintainer). - add Size, MD5sum, Filename, Priority, Section - remove Status (warning if existant?) - check for Optional-field and reject then.. */ struct debpackage { /* things to be set by deb_read: */ struct deb_headers deb; /* things that will still be NULL then: */ component_t component; /* with deb_calclocations: */ const char *filekey; struct strlist filekeys; }; void deb_free(/*@only@*/struct debpackage *pkg) { if (pkg != NULL) { binaries_debdone(&pkg->deb); if (pkg->filekey != NULL) strlist_done(&pkg->filekeys); } free(pkg); } /* read the data from a .deb, make some checks and extract some data */ static retvalue deb_read(/*@out@*/struct debpackage **pkg, const char *filename, bool needssourceversion) { retvalue r; struct debpackage *deb; deb = zNEW(struct debpackage); if (FAILEDTOALLOC(deb)) return RET_ERROR_OOM; r = binaries_readdeb(&deb->deb, filename, needssourceversion); if (RET_IS_OK(r)) r = properpackagename(deb->deb.name); if (RET_IS_OK(r)) r = propersourcename(deb->deb.source); if (RET_IS_OK(r) && needssourceversion) r = properversion(deb->deb.sourceversion); if (RET_IS_OK(r)) r = properversion(deb->deb.version); if (RET_WAS_ERROR(r)) { deb_free(deb); return r; } *pkg = deb; return RET_OK; } static retvalue deb_preparelocation(struct debpackage *pkg, component_t forcecomponent, const struct atomlist *forcearchitectures, const char *forcesection, const char *forcepriority, packagetype_t packagetype, struct distribution *distribution, const struct overridedata **oinfo_ptr, const char *debfilename){ const struct atomlist *components; const struct overridefile *binoverride; const struct overridedata *oinfo; retvalue r; if (packagetype == pt_udeb) { binoverride = distribution->overrides.udeb; components = &distribution->udebcomponents; } else { binoverride = distribution->overrides.deb; components = &distribution->components; } oinfo = override_search(binoverride, pkg->deb.name); *oinfo_ptr = oinfo; if (forcesection == NULL) { forcesection = override_get(oinfo, SECTION_FIELDNAME); } if (forcepriority == NULL) { forcepriority = override_get(oinfo, PRIORITY_FIELDNAME); } if (!atom_defined(forcecomponent)) { const char *fc; fc = override_get(oinfo, "$Component"); if (fc != NULL) { forcecomponent = component_find(fc); if (!atom_defined(forcecomponent)) { fprintf(stderr, "Unparseable component '%s' in $Component override of '%s'\n", fc, pkg->deb.name); return RET_ERROR; } } } if (forcesection != NULL) { free(pkg->deb.section); pkg->deb.section = strdup(forcesection); if (FAILEDTOALLOC(pkg->deb.section)) { return RET_ERROR_OOM; } } if (forcepriority != NULL) { free(pkg->deb.priority); pkg->deb.priority = strdup(forcepriority); if (FAILEDTOALLOC(pkg->deb.priority)) { return RET_ERROR_OOM; } } if (pkg->deb.section == NULL) { fprintf(stderr, "No section given for '%s', skipping.\n", pkg->deb.name); return RET_ERROR; } if (pkg->deb.priority == NULL) { fprintf(stderr, "No priority given for '%s', skipping.\n", pkg->deb.name); return RET_ERROR; } if (strcmp(pkg->deb.section, "unknown") == 0 && verbose >= 0) { fprintf(stderr, "Warning: strange section '%s'!\n", pkg->deb.section); } /* decide where it has to go */ r = guess_component(distribution->codename, components, pkg->deb.name, pkg->deb.section, forcecomponent, &pkg->component); if (RET_WAS_ERROR(r)) return r; if (verbose > 0 && !atom_defined(forcecomponent)) { fprintf(stderr, "%s: component guessed as '%s'\n", debfilename, atoms_components[pkg->component]); } /* some sanity checks: */ if (forcearchitectures != NULL && pkg->deb.architecture != architecture_all && !atomlist_in(forcearchitectures, pkg->deb.architecture)) { fprintf(stderr, "Cannot add '%s', as it is architecture '%s' and you specified to only include ", debfilename, atoms_architectures[pkg->deb.architecture]); atomlist_fprint(stderr, at_architecture, forcearchitectures); fputs(".\n", stderr); return RET_ERROR; } else if (pkg->deb.architecture != architecture_all && !atomlist_in(&distribution->architectures, pkg->deb.architecture)) { (void)fprintf(stderr, "Error looking at '%s': '%s' is not one of the valid architectures: '", debfilename, atoms_architectures[pkg->deb.architecture]); (void)atomlist_fprint(stderr, at_architecture, &distribution->architectures); (void)fputs("'\n", stderr); return RET_ERROR; } if (!atomlist_in(components, pkg->component)) { fprintf(stderr, "Error looking at %s': Would be placed in unavailable component '%s'!\n", debfilename, atoms_components[pkg->component]); /* this cannot be ignored * as there is not data structure available */ return RET_ERROR; } r = binaries_calcfilekeys(pkg->component, &pkg->deb, packagetype, &pkg->filekeys); if (RET_WAS_ERROR(r)) return r; pkg->filekey = pkg->filekeys.values[0]; return RET_OK; } retvalue deb_prepare(/*@out@*/struct debpackage **deb, component_t forcecomponent, architecture_t forcearchitecture, const char *forcesection, const char *forcepriority, packagetype_t packagetype, struct distribution *distribution, const char *debfilename, const char * const givenfilekey, const struct checksums * checksums, const struct strlist *allowed_binaries, const char *expectedsourcepackage, const char *expectedsourceversion){ retvalue r; struct debpackage *pkg; const struct overridedata *oinfo; char *control; struct atomlist forcearchitectures; assert (givenfilekey != NULL); assert (checksums != NULL); assert (allowed_binaries != NULL); assert (expectedsourcepackage != NULL); assert (expectedsourceversion != NULL); /* First taking a closer look in the file: */ r = deb_read(&pkg, debfilename, true); if (RET_WAS_ERROR(r)) { return r; } if (!strlist_in(allowed_binaries, pkg->deb.name) && !IGNORING(surprisingbinary, "'%s' has packagename '%s' not listed in the .changes file!\n", debfilename, pkg->deb.name)) { deb_free(pkg); return RET_ERROR; } if (strcmp(pkg->deb.source, expectedsourcepackage) != 0) { /* this cannot be ignored easily, as it determines * the directory this file is stored into */ fprintf(stderr, "'%s' lists source package '%s', but .changes says it is '%s'!\n", debfilename, pkg->deb.source, expectedsourcepackage); deb_free(pkg); return RET_ERROR; } if (strcmp(pkg->deb.sourceversion, expectedsourceversion) != 0 && !IGNORING(wrongsourceversion, "'%s' lists source version '%s', but .changes says it is '%s'!\n", debfilename, pkg->deb.sourceversion, expectedsourceversion)) { deb_free(pkg); return RET_ERROR; } forcearchitectures.count = 1; forcearchitectures.size = 1; forcearchitectures.atoms = &forcearchitecture; r = deb_preparelocation(pkg, forcecomponent, &forcearchitectures, forcesection, forcepriority, packagetype, distribution, &oinfo, debfilename); if (RET_WAS_ERROR(r)) { deb_free(pkg); return r; } if (strcmp(givenfilekey, pkg->filekey) != 0) { fprintf(stderr, "Name mismatch: .changes indicates '%s', but the file itself says '%s'!\n", givenfilekey, pkg->filekey); deb_free(pkg); return RET_ERROR; } /* Prepare everything that can be prepared beforehand */ r = binaries_complete(&pkg->deb, pkg->filekey, checksums, oinfo, pkg->deb.section, pkg->deb.priority, &control); if (RET_WAS_ERROR(r)) { deb_free(pkg); return r; } free(pkg->deb.control); pkg->deb.control = control; *deb = pkg; return RET_OK; } retvalue deb_addprepared(const struct debpackage *pkg, const struct atomlist *forcearchitectures, packagetype_t packagetype, struct distribution *distribution, struct trackingdata *trackingdata) { return binaries_adddeb(&pkg->deb, forcearchitectures, packagetype, distribution, trackingdata, pkg->component, &pkg->filekeys, pkg->deb.control); } /* insert the given .deb into the mirror in in the * putting things with architecture of "all" into architectures> (and also * causing error, if it is not one of them otherwise) * if component is NULL, guessing it from the section. */ retvalue deb_add(component_t forcecomponent, const struct atomlist *forcearchitectures, const char *forcesection, const char *forcepriority, packagetype_t packagetype, struct distribution *distribution, const char *debfilename, int delete, /*@null@*/trackingdb tracks) { struct debpackage *pkg; retvalue r; struct trackingdata trackingdata; const struct overridedata *oinfo; char *control; struct checksums *checksums; causingfile = debfilename; r = deb_read(&pkg, debfilename, tracks != NULL); if (RET_WAS_ERROR(r)) { return r; } r = deb_preparelocation(pkg, forcecomponent, forcearchitectures, forcesection, forcepriority, packagetype, distribution, &oinfo, debfilename); if (RET_WAS_ERROR(r)) { deb_free(pkg); return r; } r = files_preinclude(debfilename, pkg->filekey, &checksums); if (RET_WAS_ERROR(r)) { deb_free(pkg); return r; } /* Prepare everything that can be prepared beforehand */ r = binaries_complete(&pkg->deb, pkg->filekey, checksums, oinfo, pkg->deb.section, pkg->deb.priority, &control); checksums_free(checksums); if (RET_WAS_ERROR(r)) { deb_free(pkg); return r; } free(pkg->deb.control); pkg->deb.control = control; if (tracks != NULL) { assert(pkg->deb.sourceversion != NULL); r = trackingdata_summon(tracks, pkg->deb.source, pkg->deb.sourceversion, &trackingdata); if (RET_WAS_ERROR(r)) { deb_free(pkg); return r; } } r = binaries_adddeb(&pkg->deb, forcearchitectures, packagetype, distribution, (tracks!=NULL)?&trackingdata:NULL, pkg->component, &pkg->filekeys, pkg->deb.control); RET_UPDATE(distribution->status, r); deb_free(pkg); if (tracks != NULL) { retvalue r2; r2 = trackingdata_finish(tracks, &trackingdata); RET_ENDUPDATE(r, r2); } if (RET_IS_OK(r) && delete >= D_MOVE) { deletefile(debfilename); } else if (r == RET_NOTHING && delete >= D_DELETE) deletefile(debfilename); return r; } reprepro-4.13.1/pull.h0000644000175100017510000000170412152651661011515 00000000000000#ifndef REPREPRO_PULLS_H #define REPREPRO_PULLS_H #ifndef REPREPRO_ERROR_H #include "error.h" #warning "What's hapening here?" #endif #ifndef REPREPRO_RELEASE_H #include "release.h" #endif #ifndef REPREPRO_DISTRIBUTION_H #include "distribution.h" #endif #ifndef REPREPRO_STRLIST_H #include "strlist.h" #endif struct pull_rule; struct pull_distribution; retvalue pull_getrules(/*@out@*/struct pull_rule **); void pull_freerules(/*@only@*/struct pull_rule *p); void pull_freedistributions(/*@only@*/struct pull_distribution *p); retvalue pull_prepare(struct distribution *, struct pull_rule *, bool fast, /*@null@*/const struct atomlist */*components*/,/*@null@*/const struct atomlist */*architectures*/,/*@null@*/const struct atomlist */*packagetypes*/, /*@out@*/struct pull_distribution **); retvalue pull_update(struct pull_distribution *); retvalue pull_checkupdate(struct pull_distribution *); retvalue pull_dumpupdate(struct pull_distribution *); #endif reprepro-4.13.1/trackingt.h0000644000175100017510000000155312152651661012531 00000000000000#ifndef REPREPRO_TRACKINGT_H #define REPREPRO_TRACKINGT_H enum filetype { ft_ALL_BINARY='a', ft_ARCH_BINARY='b', ft_CHANGES = 'c', ft_LOG='l', ft_SOURCE='s', ft_XTRA_DATA='x'}; struct trackedpackage { char *sourcename; char *sourceversion; struct strlist filekeys; int *refcounts; enum filetype *filetypes; struct { bool isnew; bool deleted; } flags; }; typedef struct s_tracking *trackingdb; struct trackingdata { /*@temp@*/trackingdb tracks; struct trackedpackage *pkg; /*@null@*/ struct trackingdata_remember { /*@null@*/struct trackingdata_remember *next; char *name; char *version; } *remembered; }; struct distribution; typedef retvalue tracking_foreach_ro_action(struct distribution *, const struct trackedpackage *); retvalue tracking_foreach_ro(struct distribution *, tracking_foreach_ro_action *); #endif /*REPREPRO_TRACKINGT_H*/ reprepro-4.13.1/signature_p.h0000644000175100017510000000045212152651661013060 00000000000000#ifndef REPREPRO_SIGNATURE_P_H #define REPREPRO_SIGNATURE_P_H #ifdef HAVE_LIBGPGME #include #include extern gpgme_ctx_t context; #endif #include "globals.h" #include "error.h" #include "signature.h" #ifdef HAVE_LIBGPGME retvalue gpgerror(gpg_error_t err); #endif #endif reprepro-4.13.1/descriptions.h0000644000175100017510000000022412152651661013243 00000000000000#ifndef REPREPRO_DESCRIPTIONS_H #define REPREPRO_DESCRIPTIONS_H retvalue description_complete(const char *, const char *, bool, char **); #endif reprepro-4.13.1/sha1.h0000644000175100017510000000063012152651661011372 00000000000000#ifndef REPREPRO_SHA1_H #define REPREPRO_SHA1_H struct SHA1_Context { uint32_t state[5]; uint64_t count; uint8_t buffer[64]; }; #define SHA1_DIGEST_SIZE 20 void SHA1Init(/*@out@*/struct SHA1_Context *context); void SHA1Update(struct SHA1_Context *context, const uint8_t *data, const size_t len); void SHA1Final(struct SHA1_Context *context, /*@out@*/uint8_t digest[SHA1_DIGEST_SIZE]); #endif reprepro-4.13.1/files.h0000644000175100017510000000606712152651661011652 00000000000000#ifndef REPREPRO_FILES_H #define REPREPRO_FILES_H #ifndef REPREPRO_ERROR_H #include "error.h" #warning "What's hapening here?" #endif #ifndef REPREPRO_DATABASE_H #include "database.h" #endif #ifndef REPREPRO_NAMES_H #include "names.h" #endif struct checksums; struct checksumsarray; /* Add file's md5sum to database */ retvalue files_add_checksums(const char *, const struct checksums *); /* remove file's md5sum from database */ retvalue files_remove(const char * /*filekey*/); /* same but do not call pool_markremoved */ retvalue files_removesilent(const char * /*filekey*/); /* check for file in the database and if not found there in the pool */ retvalue files_expect(const char *, const struct checksums *, bool warnifreadded); /* same for multiple files */ retvalue files_expectfiles(const struct strlist *, struct checksums **); /* check for several files in the database and update information */ retvalue files_checkorimprove(const struct strlist *, struct checksums **); /* what to do with files */ /* file should already be there, just make sure it is in the database */ #define D_INPLACE -1 /* copy the file to the given location, return RET_NOTHING, if already in place */ #define D_COPY 0 /* move the file in place: */ #define D_MOVE 1 /* move needed and delete unneeded files: */ #define D_DELETE 2 /* Include a given file into the pool * return RET_NOTHING, if a file with the same checksums is already there * return RET_OK, if copied and added * return RET_ERROR_MISSING, if there is no file to copy. * return RET_ERROR_WRONG_MD5 if wrong md5sum. * (the original file is not deleted in that case, even if delete is positive) */ retvalue files_preinclude(const char *sourcefilename, const char *filekey, /*@null@*//*@out@*/struct checksums **); retvalue files_checkincludefile(const char *directory, const char *sourcefilename, const char *filekey, struct checksums **); typedef retvalue per_file_action(void *data, const char *filekey); /* callback for each registered file */ retvalue files_foreach(per_file_action, void *); /* check if all files are corect. (skip md5sum if fast is true) */ retvalue files_checkpool(bool /*fast*/); /* calculate all missing hashes */ retvalue files_collectnewchecksums(void); /* dump out all information */ retvalue files_printmd5sums(void); retvalue files_printchecksums(void); /* look for the given filekey and add it into the filesdatabase */ retvalue files_detect(const char *); retvalue files_regenerate_filelist(bool redo); /* hardlink file with known checksums and add it to database */ retvalue files_hardlinkandadd(const char * /*tempfile*/, const char * /*filekey*/, const struct checksums *); /* RET_NOTHING: file is already there * RET_OK : could be added * RET_ERROR_WRONG_MD5SUM: filekey is already there with different md5sum */ retvalue files_canadd(const char *filekey, const struct checksums *); /* make a filekey to a fullfilename. return NULL if OutOfMemory */ static inline char *files_calcfullfilename(const char *filekey) { return calc_dirconcat(global.outdir, filekey); } off_t files_getsize(const char *); #endif reprepro-4.13.1/atoms.h0000644000175100017510000000526212152651661011667 00000000000000#ifndef REPREPRO_ATOMS_H #define REPREPRO_ATOMS_H typedef int atom_t; typedef atom_t architecture_t; typedef atom_t component_t; typedef atom_t packagetype_t; typedef atom_t command_t; enum atom_type { at_architecture, at_component, at_packagetype, at_command }; #define atom_unknown ((atom_t)0) #define architecture_source ((architecture_t)1) #define architecture_all ((architecture_t)2) #define component_strange ((component_t)1) #define pt_dsc ((packagetype_t)1) #define pt_deb ((packagetype_t)2) #define pt_udeb ((packagetype_t)3) #define atom_defined(a) ((a) > (atom_t)0) extern const char **atomtypes, **atoms_architectures, **atoms_components, **atoms_packagetypes, **atoms_commands; retvalue atoms_init(int command_count); retvalue architecture_intern(const char *, /*@out@*/architecture_t *); architecture_t architecture_find(const char *); architecture_t architecture_find_l(const char *, size_t); retvalue component_intern(const char *, /*@out@*/component_t *); component_t component_find(const char *); component_t component_find_l(const char *, size_t); component_t components_count(void); packagetype_t packagetype_find(const char *); packagetype_t packagetype_find_l(const char *, size_t); atom_t atom_find(enum atom_type, const char *); retvalue atom_intern(enum atom_type, const char *, /*@out@*/atom_t *); #define limitation_missed(a, b) ((atom_defined(a) && (a) != (b))) #define limitations_missed(a, b) ((a) != NULL && !atomlist_in(a, b)) struct atomlist { atom_t *atoms; int count, size; }; void atomlist_init(/*@out@*/struct atomlist *); void atomlist_done(/*@special@*/struct atomlist *atomlist) /*@releases atomlist->values @*/; /* add a atom uniquely (not sorted, component guessing might not like it), * RET_NOTHING when already there */ retvalue atomlist_add_uniq(struct atomlist *, atom_t); /* always add to the end */ retvalue atomlist_add(struct atomlist *, atom_t); /* replace the contents of dest with those from orig, which get emptied */ void atomlist_move(/*@out@*/struct atomlist *, /*@special@*/struct atomlist *orig) /*@releases orig->values @*/; bool atomlist_hasexcept(const struct atomlist *, atom_t); bool atomlist_in(const struct atomlist *, atom_t); int atomlist_ofs(const struct atomlist *, atom_t); /* if missing != NULL And subset no subset of atomlist, set *missing to the first missing one */ bool atomlist_subset(const struct atomlist *, const struct atomlist * /*subset*/, /*@null@*/atom_t * /*missing*/ ); /* print a space separated list of elements */ retvalue atomlist_fprint(FILE *, enum atom_type, const struct atomlist *); retvalue atomlist_filllist(enum atom_type, /*@out@*/struct atomlist *, char * /*string*/, /*@out@*/const char ** /*missing*/); #endif reprepro-4.13.1/sha256.c0000644000175100017510000001654612152651661011556 00000000000000/* sha256 implementation, taken (with minor modification) from sha256crypt.c, which states: Released into the Public Domain by Ulrich Drepper . Neglegible modifications by Bernhard R. Link, also in the public domain. */ #include #include #include #include #include #include #include #include #include "sha256.h" #ifndef WORDS_BIGENDIAN # define SWAP(n) \ (((n) << 24) | (((n) & 0xff00) << 8) | (((n) >> 8) & 0xff00) | ((n) >> 24)) #else # define SWAP(n) (n) #endif /* This array contains the bytes used to pad the buffer to the next 64-byte boundary. (FIPS 180-2:5.1.1) */ static const unsigned char fillbuf[64] = { 0x80, 0 /* , 0, 0, ... */ }; /* Constants for SHA256 from FIPS 180-2:4.2.2. */ static const uint32_t K[64] = { 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 }; /* Process LEN bytes of BUFFER, accumulating context into CTX. It is assumed that LEN % 64 == 0. */ static void sha256_process_block (const void *buffer, size_t len, struct SHA256_Context *ctx) { const uint32_t *words = buffer; size_t nwords = len / sizeof (uint32_t); uint32_t a = ctx->H[0]; uint32_t b = ctx->H[1]; uint32_t c = ctx->H[2]; uint32_t d = ctx->H[3]; uint32_t e = ctx->H[4]; uint32_t f = ctx->H[5]; uint32_t g = ctx->H[6]; uint32_t h = ctx->H[7]; /* First increment the byte count. FIPS 180-2 specifies the possible length of the file up to 2^64 bits. Here we only compute the number of bytes. */ ctx->total += len; /* Process all bytes in the buffer with 64 bytes in each round of the loop. */ while (nwords > 0) { uint32_t W[64]; uint32_t a_save = a; uint32_t b_save = b; uint32_t c_save = c; uint32_t d_save = d; uint32_t e_save = e; uint32_t f_save = f; uint32_t g_save = g; uint32_t h_save = h; /* Operators defined in FIPS 180-2:4.1.2. */ #define Ch(x, y, z) ((x & y) ^ (~x & z)) #define Maj(x, y, z) ((x & y) ^ (x & z) ^ (y & z)) #define S0(x) (CYCLIC (x, 2) ^ CYCLIC (x, 13) ^ CYCLIC (x, 22)) #define S1(x) (CYCLIC (x, 6) ^ CYCLIC (x, 11) ^ CYCLIC (x, 25)) #define R0(x) (CYCLIC (x, 7) ^ CYCLIC (x, 18) ^ (x >> 3)) #define R1(x) (CYCLIC (x, 17) ^ CYCLIC (x, 19) ^ (x >> 10)) /* It is unfortunate that C does not provide an operator for cyclic rotation. Hope the C compiler is smart enough. */ #define CYCLIC(w, s) ((w >> s) | (w << (32 - s))) /* Compute the message schedule according to FIPS 180-2:6.2.2 step 2. */ for (unsigned int t = 0; t < 16; ++t) { W[t] = SWAP (*words); ++words; } for (unsigned int t = 16; t < 64; ++t) W[t] = R1 (W[t - 2]) + W[t - 7] + R0 (W[t - 15]) + W[t - 16]; /* The actual computation according to FIPS 180-2:6.2.2 step 3. */ for (unsigned int t = 0; t < 64; ++t) { uint32_t T1 = h + S1 (e) + Ch (e, f, g) + K[t] + W[t]; uint32_t T2 = S0 (a) + Maj (a, b, c); h = g; g = f; f = e; e = d + T1; d = c; c = b; b = a; a = T1 + T2; } /* Add the starting values of the context according to FIPS 180-2:6.2.2 step 4. */ a += a_save; b += b_save; c += c_save; d += d_save; e += e_save; f += f_save; g += g_save; h += h_save; /* Prepare for the next round. */ nwords -= 16; } /* Put checksum in context given as argument. */ ctx->H[0] = a; ctx->H[1] = b; ctx->H[2] = c; ctx->H[3] = d; ctx->H[4] = e; ctx->H[5] = f; ctx->H[6] = g; ctx->H[7] = h; } /* Initialize structure containing state of computation. (FIPS 180-2:5.3.2) */ void SHA256Init(struct SHA256_Context *ctx) { ctx->H[0] = 0x6a09e667; ctx->H[1] = 0xbb67ae85; ctx->H[2] = 0x3c6ef372; ctx->H[3] = 0xa54ff53a; ctx->H[4] = 0x510e527f; ctx->H[5] = 0x9b05688c; ctx->H[6] = 0x1f83d9ab; ctx->H[7] = 0x5be0cd19; ctx->total = 0; ctx->buflen = 0; } /* Process the remaining bytes in the internal buffer and the usual prolog according to the standard and write the result to digest. */ void SHA256Final(struct SHA256_Context *ctx, uint8_t *digest) { /* Take yet unprocessed bytes into account. */ uint32_t bytes = ctx->buflen; uint32_t bitslow, bitshigh; size_t pad; int i; /* Now count remaining bytes. */ ctx->total += bytes; pad = bytes >= 56 ? 64 + 56 - bytes : 56 - bytes; memcpy (&ctx->buffer[bytes], fillbuf, pad); /* Put the 64-bit file length in *bits* at the end of the buffer. */ bitslow = ctx->total << 3; bitshigh = ctx->total >> 29; bitslow = SWAP(bitslow); memcpy(ctx->buffer + bytes + pad + 4, &bitslow, 4); bitshigh = SWAP(bitshigh); memcpy(ctx->buffer + bytes + pad, &bitshigh, 4); /* Process last bytes. */ sha256_process_block (ctx->buffer, bytes + pad + 8, ctx); for (i = 0; i < SHA256_DIGEST_SIZE; i++) { digest[i] = (uint8_t) ((ctx->H[i>>2] >> ((3-(i & 3)) * 8) ) & 255); } } void SHA256Update(struct SHA256_Context *ctx, const uint8_t *buffer, size_t len) { /* When we already have some bits in our internal buffer concatenate both inputs first. */ if (ctx->buflen != 0) { size_t left_over = ctx->buflen; size_t add = 128 - left_over > len ? len : 128 - left_over; memcpy (&ctx->buffer[left_over], buffer, add); ctx->buflen += add; if (ctx->buflen > 64) { sha256_process_block (ctx->buffer, ctx->buflen & ~63, ctx); ctx->buflen &= 63; /* The regions in the following copy operation cannot overlap. */ memcpy (ctx->buffer, &ctx->buffer[(left_over + add) & ~63], ctx->buflen); } buffer = buffer + add; len -= add; } /* Process available complete blocks. */ if (len >= 64) { /* To check alignment gcc has an appropriate operator. Other compilers don't. */ #if __GNUC__ >= 2 # define UNALIGNED_P(p) (((uintptr_t) p) % __alignof__ (uint32_t) != 0) #else # define UNALIGNED_P(p) (((uintptr_t) p) % sizeof (uint32_t) != 0) #endif if (UNALIGNED_P (buffer)) while (len > 64) { sha256_process_block (memcpy (ctx->buffer, buffer, 64), 64, ctx); buffer = buffer + 64; len -= 64; } else { sha256_process_block (buffer, len & ~63, ctx); buffer = buffer + (len & ~63); len &= 63; } } /* Move remaining bytes into internal buffer. */ if (len > 0) { size_t left_over = ctx->buflen; memcpy (&ctx->buffer[left_over], buffer, len); left_over += len; if (left_over >= 64) { sha256_process_block (ctx->buffer, 64, ctx); left_over -= 64; memcpy (ctx->buffer, &ctx->buffer[64], left_over); } ctx->buflen = left_over; } } reprepro-4.13.1/tests/0000755000175100017510000000000012152655346011614 500000000000000reprepro-4.13.1/tests/uncompress.test0000644000175100017510000002653712152651661014644 00000000000000set -u . "$TESTSDIR"/test.inc # First test if finding the binaries works properly... testrun - --lunzip=NONE --unxz=NONE __dumpuncompressors 3< testfile dd if=/dev/zero bs=1024 count=1024 >> testfile echo "" >> testfile echo "middle" >> testfile dd if=/dev/zero bs=1024 count=1024 >> testfile echo "" >> testfile echo "end" >> testfile echo "Ohm" > smallfile echo gzip -c testfile \> testfile.gz gzip -c testfile > testfile.gz echo bzip2 -c testfile \> testfile.bz2 bzip2 -c testfile > testfile.bz2 echo lzma -c testfile \> testfile.lzma lzma -c testfile > testfile.lzma if test -x /usr/bin/xz ; then echo xz -c testfile \> testfile.xz xz -c testfile > testfile.xz fi echo gzip -c smallfile \> smallfile.gz gzip -c smallfile > smallfile.gz echo bzip2 -c smallfile \> smallfile.bz2 bzip2 -c smallfile > smallfile.bz2 echo lzma -c smallfile \> smallfile.lzma lzma -c smallfile > smallfile.lzma if test -x /usr/bin/xz ; then echo xz -c smallfile \> smallfile.xz xz -c smallfile > smallfile.xz testrun - __uncompress .xz testfile.xz testfile.xz.uncompressed 3< smallfile.lzma.uncompressed exited with errorcode 1! -v0*=There have been errors! returns 255 EOF dodo test ! -e smallfile.lzma.uncompressed # Now check for compressed parts of an .a file: cat > control < control.tar.bz2 tar -cf - ./control | bzip2 > control.tar.bz2 rm control echo tar -cf - testfile\* \| lzma \> data.tar.lzma tar -cf - testfile* | lzma > data.tar.lzma echo 2.0 > debian-binary dodo ar qcfS fake.deb debian-binary control.tar.bz2 data.tar.lzma rm *.tar.bz2 *.tar.lzma debian-binary # TODO: there could be a problem here with .deb files that have data after the # ./control file in data.tar and using an external uncompressor. # But how to test this when there is no way to trigger it in the default built? testrun - __extractcontrol fake.deb 3< debian/control < debian/dirs dd if=/dev/zero of=debian/zzz bs=1024 count=4096 tar -cf - debian | lzma > fake_1-1.debian.tar.lzma mkdir fake-1 mkdir fake-1.orig cp -al debian fake-1/debian cp -al debian fake-1.orig/debian sed -e 's/1/2/' fake-1/debian/dirs > fake-1/debian.dirs.new mv fake-1/debian.dirs.new fake-1/debian/dirs diff -ruN fake-1.orig fake-1 | lzma > fake_1-1.diff.lzma rm -r debian # .debian.tar and .diff usually do not happen at the same time, but easier testing... cat > fake_1-1.dsc << EOF Format: 3.0 Source: fake Binary: abinary Architecture: all Version: 17 Maintainer: Me Files: $(mdandsize fake_1-1.diff.lzma) fake_1-1.diff.lzma $(mdandsize fake_1-1.debian.tar.lzma) fake_1-1.debian.tar.lzma 00000000000000000000000000000000 0 fake_1.orig.tar.lzma EOF testrun - __extractsourcesection fake_1-1.dsc 3< fake_1-1.debian.tar.lzma tar -cf - debian | lzma > fake_1-1.debian.tar.lzma rm -r debian testrun - __extractsourcesection fake_1-1.dsc 3< fake-1/debian/control < fake-1/debian/aaaaa < fake_1-1.diff.lzma rm -r fake-1 fake-1.orig cat > fake_1-1.dsc << EOF Format: 3.0 Source: fake Binary: abinary Architecture: all Version: 17 Maintainer: Me Files: $(mdandsize fake_1-1.diff.lzma) fake_1-1.diff.lzma $(mdandsize fake_1-1.debian.tar.lzma) fake_1-1.debian.tar.lzma 00000000000000000000000000000000 0 fake_1.orig.tar.lzma EOF testrun - __extractsourcesection fake_1-1.dsc 3< fake_1-2.diff < fake_1-2.dsc << EOF Format: 3.0 Source: fake Binary: abinary Architecture: all Version: 17 Maintainer: Me Files: $(mdandsize fake_1-2.diff.gz) fake_1-2.diff.gz 00000000000000000000000000000000 0 fake_1.orig.tar.gz EOF testrun - __extractsourcesection fake_1-2.dsc 3< conf/options < conf/distributions < conf/pulls < conf/bin < conf/src < results.expected < results.expected < results.expected <conf/distributions <conf/pulls <conf/incoming <conf/options < pi.rules <>pi.rules <pull.rules <>pull.rules < conf/options < conf/distributions < importindex < importindex < importindex < importindex < foo_0_abacus.deb cat > importindex < pool/dog/f/foo/foo_1.dsc echo "tar-content" > pool/dog/f/foo/foo_1.tar.gz cat > importindex < references.normal grep '^s=' results | sed -e 's/^s=\(.\)=[^ ]* /\1 contains /' > references.snapshot dodiff -u references.normal references.snapshot rm references.normal references.snapshot # Remove contents from original, to make them more look alike: for n in dists/B/Release dists/B/snapshots/now/Release dists/A/Release dists/A/snapshots/now/Release ; do ed -s $n <&2 exit 1 ;; *) break ;; esac done if [ "2" -lt "$#" ] ; then echo "Syntax: test.sh [] []" >&2 exit 1 fi echo "SRCDIR is '$SRCDIR'" if [ ! -d "$SRCDIR" ] || [ ! -d "$SRCDIR/tests" ] ; then echo "Error: Could not find source directory (tried: '$SRCDIR')!" >&2 exit 1 fi TESTSDIR="$SRCDIR/tests" if [ "1" -le "$#" ] ; then TESTTOOL="$(readlink -e "$1")" else TESTTOOL=testtool fi if [ "2" -le "$#" ] ; then REPREPRO="$(readlink -e "$2")" else REPREPRO="$SRCDIR/reprepro" fi RREDTOOL="$(dirname "$REPREPRO")/rredtool" if [ -z "$TESTOPTIONS" ] ; then if [ -z "$USE_VALGRIND" ] ; then TESTOPTIONS="-e -a" elif [ -z "$VALGRIND_SUP" ] ; then # leak-check=full is better than leak-check=summary, # sadly squeeze's valgrind counts them into the error number # with full, and we want to ignore them for childs.... TESTOPTIONS="-e -a --debug --leak-check=${VALGRIND_LEAK} --suppressions=$TESTSDIR/valgrind.supp" else TESTOPTIONS="-e -a --debug --leak-check=${VALGRIND_LEAK} --suppressions=$VALGRIND_SUP" fi fi case "$verbosity" in -1) VERBOSITY="-s" ;; 0) VERBOSITY="" ;; 1) VERBOSITY="-v" ;; 2) VERBOSITY="-vv" ;; 3) VERBOSITY="-vvv" ;; 4) VERBOSITY="-vvvv" ;; 5) VERBOSITY="-vvvvv" ;; 6) VERBOSITY="-vvvvvv" ;; *) echo "Unsupported verbosity $verbosity" >&2 exit 1 ;; esac TESTOPTIONS="-D v=$verbosity $TESTOPTIONS" REPREPROOPTIONS="$VERBOSITY" if test -n "$VERBOSEDB" ; then TESTOPTIONS="-D x=0 -D d=1 $TESTOPTIONS" REPREPROOPTIONS="--verbosedb $REPREPROOPTIONS" else TESTOPTIONS="-D x=0 -D d=0 $TESTOPTIONS" fi TRACKINGTESTOPTIONS="-D t=0" if ! [ -x "$REPREPRO" ] ; then echo "Could not find $REPREPRO!" >&2 exit 1 fi TESTTOOLVERSION="`$TESTTOOL --version`" case $TESTTOOLVERSION in "testtool version "*) ;; *) echo "Failed to get version of testtool($TESTTOOL)" exit 1 ;; esac if test -d "$WORKDIR" && test -f "$WORKDIR/ThisDirectoryWillBeDeleted" && $deleteifmarked ; then rm -r "$WORKDIR" || exit 3 fi mkdir "$WORKDIR" || exit 1 echo "Remove this file to avoid silent removal" > "$WORKDIR"/ThisDirectoryWillBeDeleted cd "$WORKDIR" # dpkg-deb doesn't like too restrictive directories umask 022 number_tests=0 number_missing=0 number_success=0 number_skipped=0 number_failed=0 runtest() { if ! test -f "$SRCDIR/tests/$1.test" ; then echo "Cannot find $SRCDIR/tests/$1.test!" >&2 number_missing="$(( $number_missing + 1 ))" return fi number_tests="$(( $number_tests + 1 ))" echo "Running test '$1'.." TESTNAME=" $1" mkdir "dir_$1" rc=0 ( cd "dir_$1" || exit 1 export TESTNAME export SRCDIR TESTSDIR export TESTTOOL RREDTOOL REPREPRO export TRACKINGTESTOPTIONS TESTOPTIONS REPREPROOPTIONS verbosity WORKDIR="$WORKDIR/dir_$1" CALLEDFROMTESTSUITE=true dash "$SRCDIR/tests/$1.test" ) > "log_$1" 2>&1 || rc=$? if test "$rc" -ne 0 ; then number_failed="$(( $number_failed + 1 ))" echo "test '$1' failed (see $WORKDIR/log_$1 for details)!" >&2 elif grep -q -s '^SKIPPED: ' "log_$1" ; then number_skipped="$(( $number_skipped + 1 ))" echo "test '$1' skipped:" sed -n -e 's/^SKIPPED://p' "log_$1" rm -r "dir_$1" "log_$1" else number_success="$(( $number_success + 1 ))" rm -r "dir_$1" "log_$1" fi } if test x"$testtorun" != x"all" ; then runtest "$testtorun" else runtest descriptions runtest easyupdate runtest srcfilterlist runtest uploaders runtest wrongarch runtest flood runtest exporthooks runtest updatecorners runtest packagediff runtest includeextra runtest atoms runtest trackingcorruption runtest layeredupdate runtest layeredupdate2 runtest uncompress runtest check runtest flat runtest subcomponents runtest snapshotcopyrestore runtest various1 runtest various2 runtest various3 runtest copy runtest buildneeding runtest morgue runtest diffgeneration runtest onlysmalldeletes runtest override fi echo "$number_tests tests, $number_success succeded, $number_failed failed, $number_skipped skipped, $number_missing missing" exit 0 reprepro-4.13.1/tests/Makefile.in0000644000175100017510000002434712152655327013612 00000000000000# Makefile.in generated by automake 1.11.6 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, # 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software # Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__make_dryrun = \ { \ am__dry=no; \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ echo 'am--echo: ; @echo "AM" OK' | $(MAKE) -f - 2>/dev/null \ | grep '^AM OK$$' >/dev/null || am__dry=yes;; \ *) \ for am__flg in $$MAKEFLAGS; do \ case $$am__flg in \ *=*|--*) ;; \ *n*) am__dry=yes; break;; \ esac; \ done;; \ esac; \ test $$am__dry = yes; \ } pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : subdir = tests DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/acinclude.m4 \ $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ ARCHIVECPP = @ARCHIVECPP@ ARCHIVELIBS = @ARCHIVELIBS@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CYGPATH_W = @CYGPATH_W@ DBLIBS = @DBLIBS@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ LDFLAGS = @LDFLAGS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LTLIBOBJS = @LTLIBOBJS@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MKDIR_P = @MKDIR_P@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ STRIP = @STRIP@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_CC = @ac_ct_CC@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bindir = @bindir@ build_alias = @build_alias@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host_alias = @host_alias@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ EXTRA_DIST = \ brokenunlzma.sh \ genpackage.sh \ test.inc \ test.sh \ atoms.test \ buildneeding.test \ check.test \ copy.test \ descriptions.test \ diffgeneration.test \ easyupdate.test \ exporthooks.test \ flat.test \ flood.test \ includeextra.test \ layeredupdate.test \ layeredupdate2.test \ morgue.test \ onlysmalldeletes.test \ override.test \ packagediff.test \ signatures.test \ signed.test \ snapshotcopyrestore.test \ srcfilterlist.test \ subcomponents.test \ template.test \ trackingcorruption.test \ uncompress.test \ updatecorners.test \ uploaders.test \ various1.test \ various2.test \ various3.test \ verify.test \ wrongarch.test \ evil.key \ expired.key \ expiredwithsubkey.key \ expiredwithsubkey-working.key \ good.key \ revoked.key \ revoked.pkey \ withsubkeys.key \ withsubkeys-works.key MAINTAINERCLEANFILES = $(srcdir)/Makefile.in all: all-am .SUFFIXES: $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu tests/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --gnu tests/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): tags: TAGS TAGS: ctags: CTAGS CTAGS: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) clean: clean-am clean-am: clean-generic mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic distclean \ distclean-generic distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic pdf pdf-am ps ps-am uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: reprepro-4.13.1/tests/various3.test0000644000175100017510000010521312152651661014206 00000000000000set -u . "$TESTSDIR"/test.inc dodo test ! -e dists mkdir conf db logs lists for tracking in true false ; do if $tracking ; then echo "this is the test variant with tracking on" else echo "this is the test variant with tracking off" fi if $tracking ; then cat >> conf/distributions <> conf/distributions < conf/pulls <toa froma>toa2 froma2>toa2 Components: c1 c2 UDebComponents: u1 u2 EOF testrun - -b . --export=changed pull a b 3<> conf/distributions < conf/pulls <results.expected < results.expected <results.expected < results.expected <results.expected < results.expected < results.expected < broken.changes testrun - -b . --delete --delete include a broken.changes 3<> broken.changes testrun - -b . --delete --delete include a broken.changes 3<results.expected < results.expected << EOF pool/all/a/ab/ab_3-1.diff.gz EOF dodiff /dev/null results || dodiff results.expected results testrun - -b . deleteunreferenced 3<> conf/distributions cat >conf/updates < results.expected < results.expected < results.expected < results.expected < results.expected <> aa)" 3< results.expected < results.expected < conf/distributions < conf/distributions < fakes/1 < Architecture: coal Version: 0.0-1 Filename: pool/main/5/5dchess/5dchess_0.0-1_coal.deb MD5sum: $EMPTYMD5ONLY Size: 0 Description: the lazy fox jumps over the quick brown dog. Package: a Priority: critical Section: required Installed-Size: 1 Maintainer: test Architecture: all Version: 1 Filename: pool/main/a/a/a_1_all.deb MD5sum: $EMPTYMD5ONLY Size: 0 Description: the lazy fox jumps over the quick brown dog. Package: b Source: baa Priority: critical Section: required Installed-Size: 1 Maintainer: test Architecture: coal Version: 2 Filename: pool/main/b/baa/b_2_coal.deb MD5sum: $EMPTYMD5ONLY Size: 0 Description: the lazy fox jumps over the quick brown dog. EOF cat > fakes/2 < Architecture: all Version: 2 Filename: pool/main/a/a/a_2_all.deb MD5sum: $EMPTYMD5ONLY Size: 0 Description: the lazy fox jumps over the quick brown dog. EOF testrun - -b . -C main -A coal -T deb _addpackage sourcedistribution fakes/1 a 3< addchecksums.rules < conf/updates < conf/updates < conf/distributions < conf/updates.base <>conf/updates <>conf/updates < flatsource/Release testrun - -b . update 1234 3<> flatsource/Release testrun - -b . update 1234 3< flatsource/Sources.gz gzip -c < /dev/null > flatsource/Packages.gz cat > flatsource/Release < flatsource/Packages < flatsource/Release < flatsource/Packages < flatsource/Release < flatsource/Packages < flatsource/Release < flatsource/Packages < flatsource/Release < flatsource/Sources < flatsource/Release < flatsource/Sources < flatsource/Release < conf/options cat > conf/updatelog.sh <> '$WORKDIR/updatelog' exit 0 EOF cat > conf/shouldnothappen.sh <> '$WORKDIR/shouldnothappen' exit 0 EOF chmod a+x conf/updatelog.sh conf/shouldnothappen.sh cat > conf/distributions < conf/updates <=firmware/), section(<< firmware0) Name: 1 From: pre1 Suite: x Name: 2 Flat: main From: b #without this I do not get a warning, why? Architectures: abacus coal source FilterFormula: section (<= firmware0) | !section Suite: x Name: 5 From: b Name: 6 From: b Name: 7 From: b Name: 8 From: b EOF DISTRI=dummy PACKAGE=aa EPOCH="" VERSION=1 REVISION=-1000 SECTION="base" genpackage.sh -sa DISTRI=dummy PACKAGE=bb EPOCH="" VERSION=2 REVISION=-0 SECTION="firmware/base" genpackage.sh -sa DISTRI=dummy PACKAGE=cc EPOCH="" VERSION=1 REVISION=-1000 SECTION="base" genpackage.sh -sa DISTRI=dummy PACKAGE=dd EPOCH="" VERSION=2 REVISION=-0 SECTION="firmware/base" genpackage.sh -sa mkdir source1/pool source1/pool/main source1/pool/firmware mv aa* source1/pool/main mv bb* source1/pool/firmware mv cc* source2 mv dd* source2 mkdir source2/x cd source2 echo 'dpkg-scanpackages . /dev/null > x/Packages' dpkg-scanpackages . /dev/null > x/Packages cd .. cat > sourcesections < x/Sources' dpkg-scansources . ../sourcesections > x/Sources cd .. rm sourcesections cat > source2/x/InRelease < dists/suitename/main/source/Sources dpkg-scanpackages pool/main /dev/null > dists/suitename/main/binary-abacus/Packages dpkg-scanpackages -a coal pool/main /dev/null > dists/suitename/main/binary-coal/Packages dpkg-scansources pool/firmware /dev/null > dists/suitename/firmware/source/Sources dpkg-scanpackages pool/firmware /dev/null > dists/suitename/firmware/binary-abacus/Packages dpkg-scanpackages -a coal pool/firmware /dev/null > dists/suitename/firmware/binary-coal/Packages cd .. cat > source1/dists/suitename/InRelease < source1/dists/suitename/InRelease < source1/dists/suitename/InRelease < dists/suitename/main/source/Sources dpkg-scanpackages pool/main /dev/null > dists/suitename/main/binary-abacus/Packages dpkg-scanpackages -a coal pool/main /dev/null > dists/suitename/main/binary-coal/Packages dpkg-scansources pool/firmware /dev/null > dists/suitename/firmware/source/Sources dpkg-scanpackages pool/firmware /dev/null > dists/suitename/firmware/binary-abacus/Packages dpkg-scanpackages -a coal pool/firmware /dev/null > dists/suitename/firmware/binary-coal/Packages cd .. cat > source1/dists/suitename/InRelease < results.expected </dev/null ; then echo "SKIPPED: gpg not found!" exit 0 fi rm -rf db dists pool lists conf gpgtestdir mkdir -p gpgtestdir export GNUPGHOME="`pwd`/gpgtestdir" gpg --import $TESTSDIR/good.key $TESTSDIR/evil.key $TESTSDIR/expired.key $TESTSDIR/revoked.key mkdir -p conf cat > conf/options < conf/distributions < conf/auploaders < conf/buploaders < conf/cuploaders < conf/incoming < debian/rules <<'EOF' #!/usr/bin/make tmp = $(CURDIR)/debian/tmp binary-indep: install -m 755 -d $(tmp)/DEBIAN $(tmp)/usr/share/doc/documentation echo "I have told you so" > $(tmp)/usr/share/doc/documentation/NEWS gzip -c9 debian/changelog > $(tmp)/usr/share/doc/documentation/changelog.gz chown -R root.root $(tmp) && chmod -R go=rX $(tmp) dpkg-gencontrol -isp dpkg --build $(tmp) .. echo "I forgot" >> ../manifesto.txt echo "What?" >> ../history.txt dpkg-distaddfile manifesto.txt byhand - dpkg-distaddfile history.txt byhand - .PHONY: clean binary-arch binary-indep binary build build-indep buil-arch EOF chmod a+x debian/rules cat > debian/changelog < Sat, 15 Jan 2011 17:12:05 +2700 EOF cat > debian/control < Standards-Version: Aleph_17 Package: documentation Architecture: all Description: documentation documentation EOF cd .. dpkg-source -b documentation-9876AD "" cd documentation-9876AD fakeroot make -f debian/rules binary-indep > ../documentation_9876AD_coal+all.log dpkg-genchanges > ../test.changes cd .. rm -r documentation-9876AD ed -s test.changes < conf/distributions < conf/distributions <results.expected < conf/distributions <results.expected < conf/distributions <results.expected < conf/incoming <results.expected <> conf/incoming < results cat > results.expected < results cat > results.expected <> conf/distributions < conf/handle-byhand.sh <<'EOF' #!/bin/sh echo "byhand-script called with: " "'$*'" >&2 EOF cat > conf/handle-alternate.sh <<'EOF' #!/bin/sh echo "alternate-script called with: " "'$*'" >&2 EOF chmod u+x conf/handle-alternate.sh chmod u+x conf/handle-byhand.sh testrun - processincoming foo 3< results cat > results.expected < results cat > results.expected < results cat > results.expected <> conf/distributions < results cat > results.expected <> conf/distributions <package-1.0/debian/control < Standards-Version: 0.0 Package: rumsrumsrums Architecture: all Description: a package . Package: dumdidum Architecture: another Description: a package not build . Package: troettroet Architecture: abacus Description: some test-package . END cat >package-1.0/debian/changelog < Mon, 01 Jan 1980 01:02:02 +0000 END dpkg-source -b package-1.0 cat > conf/distributions <> conf/options < package_1.0-1_another.log echo "package_1.0-1_another.log - -" > package-1.0/debian/files cd package-1.0 dpkg-genchanges -B > ../package_1.0-1_another.changes cd .. testrun - -C main include test package_1.0-1_another.changes 3<onlyonearch-1.0/debian/control < Standards-Version: 0.0 Package: onearch Architecture: abacus Description: some test-onlyonearch . END cat >onlyonearch-1.0/debian/changelog < Mon, 01 Jan 1980 01:02:02 +0000 END dpkg-source -b onlyonearch-1.0 mkdir onlyonearch-1.0/debian/tmp mkdir onlyonearch-1.0/debian/tmp/DEBIAN mkdir -p onlyonearch-1.0/debian/tmp/usr/bin touch onlyonearch-1.0/debian/tmp/usr/bin/program cd onlyonearch-1.0 dpkg-gencontrol -ponearch dpkg --build debian/tmp .. cd .. rm -r onlyonearch-1.0 testrun - --delete includedsc test onlyonearch_1.0-1.dsc 3<onlyarchall-1.0/debian/control < Standards-Version: 0.0 Package: archall Architecture: all Description: some test-arch all package . END cat >onlyarchall-1.0/debian/changelog < Mon, 01 Jan 1980 01:02:02 +0000 END dpkg-source -b onlyarchall-1.0 mkdir onlyarchall-1.0/debian/tmp mkdir onlyarchall-1.0/debian/tmp/DEBIAN mkdir -p onlyarchall-1.0/debian/tmp/usr/bin touch onlyarchall-1.0/debian/tmp/usr/bin/program cd onlyarchall-1.0 dpkg-gencontrol -parchall dpkg --build debian/tmp .. cd .. rm -r onlyarchall-1.0 testrun - --delete includedsc test onlyarchall_1.0-1.dsc 3<allandany-1.0/debian/control < Standards-Version: 0.0 Package: allpart Architecture: all Description: some test-arch all package . Package: anypart Architecture: any Description: some test-arch any package . END cat >allandany-1.0/debian/changelog < Mon, 01 Jan 1980 01:02:02 +0000 END dpkg-source -b allandany-1.0 mkdir allandany-1.0/debian/tmp mkdir allandany-1.0/debian/tmp/DEBIAN mkdir -p allandany-1.0/debian/tmp/usr/bin touch allandany-1.0/debian/tmp/usr/bin/program cd allandany-1.0 dpkg-gencontrol -panypart dpkg --build debian/tmp .. cd .. rm -r allandany-1.0/debian/tmp mkdir allandany-1.0/debian/tmp mkdir allandany-1.0/debian/tmp/DEBIAN mkdir -p allandany-1.0/debian/tmp/usr/share touch allandany-1.0/debian/tmp/usr/share/data cd allandany-1.0 dpkg-gencontrol -pallpart dpkg --build debian/tmp .. cd .. echo "There was nothing to do on this architecture!" > allandany_1.0-1_another.log echo "allandany_1.0-1_another.log - -" > allandany-1.0/debian/files cd allandany-1.0 dpkg-genchanges -B > ../allandany_1.0-1_another.changes cd .. rm -r allandany-1.0 testrun - --delete includedsc test allandany_1.0-1.dsc 3<anyonly-1.0/debian/control < Standards-Version: 0.0 Package: anyonly Architecture: any Description: some test-arch any package . END cat >anyonly-1.0/debian/changelog < Mon, 01 Jan 1980 01:02:02 +0000 END dpkg-source -b anyonly-1.0 mkdir anyonly-1.0/debian/tmp mkdir anyonly-1.0/debian/tmp/DEBIAN mkdir -p anyonly-1.0/debian/tmp/usr/bin touch anyonly-1.0/debian/tmp/usr/bin/program cd anyonly-1.0 dpkg-gencontrol -panyonly dpkg --build debian/tmp .. cd .. rm -r anyonly-1.0 testrun - --delete includedsc test anyonly_1.0-1.dsc 3<linuxwildcard-1.0/debian/control < Standards-Version: 0.0 Package: linuxwildcard Architecture: linux-any Description: some test-arch any package . END cat >linuxwildcard-1.0/debian/changelog < Mon, 01 Jan 1980 01:02:02 +0000 END dpkg-source -b linuxwildcard-1.0 rm -r linuxwildcard-1.0 mkdir kfreebsdwildcard-1.0 mkdir kfreebsdwildcard-1.0/debian cat >kfreebsdwildcard-1.0/debian/control < Standards-Version: 0.0 Package: kfreebsdwildcard Architecture: kfreebsd-any Description: some test-arch any package . END cat >kfreebsdwildcard-1.0/debian/changelog < Mon, 01 Jan 1980 01:02:02 +0000 END dpkg-source -b kfreebsdwildcard-1.0 rm -r kfreebsdwildcard-1.0 testrun - includedsc oses linuxwildcard_1.0-1.dsc 3< conf/distributions <&1 exit 1 fi testrun - -b . repairdescriptions 4321 3< conf/distributions < conf/options < conf/distributions < conf/distributions < conf/distributions < conf/distributions < conf/distributions < results cat > results.expected < %l" | LC_ALL=C sort -f > results cat > results.expected < dog/Contents-abacus.gz END sed -e "s/^Contents: compatsymlink/Contents: allcomponents/" -i conf/distributions dodiff results.expected results testrun - -b . processincoming default 3< conf/incoming < conf/incoming < conf/incoming < conf/incoming < conf/incoming < conf/incoming < results.log.expected < results dodiff /dev/null results find i -type f > results dodiff /dev/null results cat > results.expected < results dodiff results.expected results cat > results.expected < results dodiff results.expected results cat > results.expected < results dodiff results.expected results printindexpart pool/dog/b/bird/bird_1_abacus.deb > results.expected printindexpart pool/dog/b/bird/bird-addons_1_all.deb >> results.expected dodiff results.expected dists/B/dog/binary-abacus/Packages withoutchecksums pool/dog/b/bird/bird_1.dsc > results.expected ed -s results.expected < results dodiff results.expected results echo "DebOverride: debo" >> conf/distributions echo "DscOverride: dsco" >> conf/distributions echo "bird Section cat/tasty" > conf/debo echo "bird Priority hungry" >> conf/debo echo "bird Task lunch" >> conf/debo echo "bird-addons Section cat/ugly" >> conf/debo echo "bird Section cat/nest" > conf/dsco echo "bird Priority hurry" >> conf/dsco echo "bird Homepage gopher://tree" >> conf/dsco mv i2/* i/ rmdir i2 testrun - -b . processincoming default 3< results dodiff /dev/null results find i -type f > results dodiff /dev/null results cat > results.expected < results dodiff results.expected results cat > results.expected < results dodiff results.expected results cat > results.expected < results dodiff results.expected results printindexpart pool/cat/b/bird/bird_1_abacus.deb > results.expected printindexpart pool/cat/b/bird/bird-addons_1_all.deb >> results.expected ed -s results.expected < results.expected ed -s results.expected < results dodiff results.expected results # now missing: checking what all can go wrong in a .changes or .dsc file... mkdir pkg mkdir pkg/a touch pkg/a/b mkdir pkg/DEBIAN cat > pkg/DEBIAN/control < i/test.changes < i/test.changes testrun - -b . processincoming default 3< i/test.changes < i/test.changes < i/test.changes < i/test.changes < i/test.changes < i/test.changes < i/test.changes testrun - -b . processincoming default 3<> i/test.changes testrun - -b . processincoming default 3<> i/test.changes testrun - -b . processincoming default 3<> i/test.changes testrun - -b . processincoming default 3<> i/test.changes testrun - -b . processincoming default 3<> i/test.changes testrun - -b . processincoming default 3<> i/test.changes testrun - -b . processincoming default 3<> i/test.changes touch "$(printf 'i/\300\257.\300\257_v_funny.deb')" testrun - -b . processincoming default 3<> i/test.changes mv "$(printf 'i/\300\257.\300\257_v_funny.deb')" "$(printf 'i/\300\257.\300\257_v_all.deb')" testrun - -b . processincoming default 3<> i/test.changes testrun - -b . processincoming default 3<> i/test.changes # TODO: this error message has to be improved: testrun - -b . processincoming default 3<> i/test.changes testrun - -b . processincoming default 3<> i/test.changes # TODO: these will hopefully change to not divulge the place of the temp dir some day... testrun - -b . processincoming default 3<" >> pkg/DEBIAN/control dpkg-deb -b pkg i/debfilename_debfileversion~2_all.deb DEBMD5S="$(md5sum i/debfilename_debfileversion~2_all.deb | cut -d' ' -f1) $(stat -c '%s' i/debfilename_debfileversion~2_all.deb)" printf '$d\nw\nq\n' | ed -s i/test.changes echo " $DEBMD5S - - debfilename_debfileversion~2_all.deb" >> i/test.changes testrun - -b . processincoming default 3<> pkg/DEBIAN/control echo " a package to test reprepro" >> pkg/DEBIAN/control dpkg-deb -b pkg i/debfilename_debfileversion~2_all.deb DEBMD5S="$(md5sum i/debfilename_debfileversion~2_all.deb | cut -d' ' -f1) $(stat -c '%s' i/debfilename_debfileversion~2_all.deb)" printf '$d\nw\nq\n' | ed -s i/test.changes echo " $DEBMD5S - - debfilename_debfileversion~2_all.deb" >> i/test.changes testrun - -b . processincoming default 3<> pkg/DEBIAN/control dpkg-deb -b pkg i/debfilename_debfileversion~2_all.deb DEBMD5S="$(md5sum i/debfilename_debfileversion~2_all.deb | cut -d' ' -f1) $(stat -c '%s' i/debfilename_debfileversion~2_all.deb)" printf '$d\nw\nq\n' | ed -s i/test.changes echo " $DEBMD5S - - debfilename_debfileversion~2_all.deb" >> i/test.changes testrun - -b . processincoming default 3<> i/test.changes testrun - -b . processincoming default 3<> i/test.changes testrun - -b . processincoming default 3<> pkg/DEBIAN/control dpkg-deb -b pkg i/indebname_debfileversion~2_all.deb DEBMD5S="$(md5sum i/indebname_debfileversion~2_all.deb | cut -d' ' -f1) $(stat -c '%s' i/indebname_debfileversion~2_all.deb)" printf '$d\nw\nq\n' | ed -s i/test.changes echo " $DEBMD5S - - indebname_debfileversion~2_all.deb" >> i/test.changes testrun - -b . processincoming default 3<> i/test.changes testrun - -b . processincoming default 3<> pkg/DEBIAN/control dpkg-deb -b pkg i/indebname_debfileversion~2_all.deb DEBMD5S="$(md5sum i/indebname_debfileversion~2_all.deb | cut -d' ' -f1) $(stat -c '%s' i/indebname_debfileversion~2_all.deb)" printf '$d\nw\nq\n' | ed -s i/test.changes echo " $DEBMD5S test - indebname_debfileversion~2_all.deb" >> i/test.changes testrun - -b . processincoming default 3<> i/test.changes checknolog logfile testrun - -b . processincoming default 3< results echo "pool/dog/s/sourceindeb/indebname_0versionindeb~1_all.deb" > results.expected dodiff results.expected results touch i/dscfilename_fileversion~.dsc DSCMD5S="$(md5sum i/dscfilename_fileversion~.dsc | cut -d' ' -f1) $(stat -c '%s' i/dscfilename_fileversion~.dsc)" cat > i/test.changes <> i/test.changes testrun - -b . processincoming default 3< i/dscfilename_fileversion~.dsc DSCMD5S="$(md5sum i/dscfilename_fileversion~.dsc | cut -d' ' -f1) $(stat -c '%s' i/dscfilename_fileversion~.dsc)" printf '$d\nw\nq\n' | ed -s i/test.changes echo " $DSCMD5S - - dscfilename_fileversion~.dsc" >> i/test.changes testrun - -b . processincoming default 3< i/dscfilename_fileversion~.dsc DSCMD5S="$(md5sum i/dscfilename_fileversion~.dsc | cut -d' ' -f1) $(stat -c '%s' i/dscfilename_fileversion~.dsc)" printf '$d\nw\nq\n' | ed -s i/test.changes echo " $DSCMD5S - - dscfilename_fileversion~.dsc" >> i/test.changes testrun - -b . processincoming default 3<> i/dscfilename_fileversion~.dsc DSCMD5S="$(md5sum i/dscfilename_fileversion~.dsc | cut -d' ' -f1) $(stat -c '%s' i/dscfilename_fileversion~.dsc)" printf '$d\nw\nq\n' | ed -s i/test.changes echo " $DSCMD5S - - dscfilename_fileversion~.dsc" >> i/test.changes testrun - -b . processincoming default 3<" >> i/dscfilename_fileversion~.dsc DSCMD5S="$(md5sum i/dscfilename_fileversion~.dsc | cut -d' ' -f1) $(stat -c '%s' i/dscfilename_fileversion~.dsc)" printf '$d\nw\nq\n' | ed -s i/test.changes echo " $DSCMD5S - - dscfilename_fileversion~.dsc" >> i/test.changes testrun - -b . processincoming default 3<> i/dscfilename_fileversion~.dsc DSCMD5S="$(md5sum i/dscfilename_fileversion~.dsc | cut -d' ' -f1) $(stat -c '%s' i/dscfilename_fileversion~.dsc)" printf '$d\nw\nq\n' | ed -s i/test.changes echo " $DSCMD5S - - dscfilename_fileversion~.dsc" >> i/test.changes testrun - -b . processincoming default 3<> i/dscfilename_fileversion~.dsc DSCMD5S="$(md5sum i/dscfilename_fileversion~.dsc | cut -d' ' -f1) $(stat -c '%s' i/dscfilename_fileversion~.dsc)" printf '$d\nw\nq\n' | ed -s i/test.changes echo " $DSCMD5S - - dscfilename_fileversion~.dsc" >> i/test.changes testrun - -b . processincoming default 3<> i/dscfilename_fileversion~.dsc DSCMD5S="$(md5sum i/dscfilename_fileversion~.dsc | cut -d' ' -f1) $(stat -c '%s' i/dscfilename_fileversion~.dsc)" printf '$d\nw\nq\n' | ed -s i/test.changes echo " $DSCMD5S - - dscfilename_fileversion~.dsc" >> i/test.changes testrun - -b . processincoming default 3<> i/test.changes testrun - -b . processincoming default 3<> i/test.changes testrun - -b . processincoming default 3<> i/test.changes testrun - -b . processincoming default 3<> i/test.changes checknolog logfile testrun - -b . processincoming default 3<i/strangefile <i/dscfilename_fileversion~.dsc < Standards-Version: 0 Version: 1:newversion~ Files: md5sumindsc sizeindsc strangefile EOF DSCMD5S="$(md5sum i/dscfilename_fileversion~.dsc | cut -d' ' -f1) $(stat -c '%s' i/dscfilename_fileversion~.dsc)" cat >i/test.changes <> i/test.changes # this is a stupid error message, needs to get some context testrun - -b . processincoming default 3<> i/test.changes testrun - -b . processincoming default 3<> i/test.changes testrun - -b . processincoming default 3<> i/test.changes testrun - -b . processincoming default 3<> i/test.changes testrun - -b . processincoming default 3<> i/dscfilename_fileversion~.dsc DSCMD5S="$(mdandsize i/dscfilename_fileversion~.dsc)" DSCSHA1S="$(sha1andsize i/dscfilename_fileversion~.dsc)" DSCSHA2S="$(sha2andsize i/dscfilename_fileversion~.dsc)" DSCFILENAMEMD5S="$DSCMD5S" DSCFILENAMESHA1S="$DSCSHA1S" DSCFILENAMESHA2S="$DSCSHA2S" printf '$-1,$d\nw\nq\n' | ed -s i/test.changes echo " $DSCMD5S dummy unneeded dscfilename_fileversion~.dsc" >> i/test.changes echo " 33a1096ff883d52f0c1f39e652d6336f 33 - - strangefile_xyz" >> i/test.changes testrun - -b . processincoming default 3< results cat > results.expected < results cat > results.expected < results withoutchecksums pool/dog/b/bird/bird_1.dsc >bird.preprocessed ed -s bird.preprocessed < results.expected < Standards-Version: 0 Version: 0versionindsc Priority: can't-live-without Section: dummy Directory: pool/dog/d/dscfilename Files: $OLDDSCFILENAMEMD5S dscfilename_0versionindsc.dsc Checksums-Sha1: $OLDDSCFILENAMESHA1S dscfilename_0versionindsc.dsc Checksums-Sha256: $OLDDSCFILENAMESHA2S dscfilename_0versionindsc.dsc EOF dodiff results.expected results testout "" -b . dumpunreferenced dodiff /dev/null results printf '$d\nw\nq\n' | ed -s i/test.changes echo " 31a1096ff883d52f0c1f39e652d6336f 33 - - strangefile_xyz" >> i/test.changes checknolog logfile testrun - -b . processincoming default 3< results cat > results.expected < results cat > results.expected < results cat bird.preprocessed - > results.expected < Standards-Version: 0 Version: 1:newversion~ Priority: unneeded Section: dummy Directory: pool/dog/d/dscfilename Files: $DSCFILENAMEMD5S dscfilename_newversion~.dsc 31a1096ff883d52f0c1f39e652d6336f 33 strangefile_xyz Checksums-Sha1: $DSCFILENAMESHA1S dscfilename_newversion~.dsc 4453da6ca46859b207c5b55af6213ff8369cd383 33 strangefile_xyz Checksums-Sha256: $DSCFILENAMESHA2S dscfilename_newversion~.dsc c40fcf711220c0ce210159d43b22f1f59274819bf3575e11cc0057ed1988a575 33 strangefile_xyz EOF dodiff results.expected results testout "" -b . dumpunreferenced dodiff /dev/null results REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py --check rm -r conf db pool dists i pkg logs temp rm results.expected results.log.expected results bird.preprocessed testsuccess reprepro-4.13.1/tests/verify.test0000644000175100017510000003376012152651661013746 00000000000000set -u . "$TESTSDIR"/test.inc mkdir gpgtestdir chmod go-rwx gpgtestdir export GNUPGHOME="`pwd`/gpgtestdir" gpg --import $TESTSDIR/good.key $TESTSDIR/evil.key $TESTSDIR/expired.key $TESTSDIR/revoked.key $TESTSDIR/expiredwithsubkey-working.key $TESTSDIR/withsubkeys-works.key CURDATE="$(date +"%Y-%m-%d")" mkdir conf lists cat > conf/distributions < conf/updates < conf/updates < conf/updates < conf/updates < test/dists/test/Release < conf/updates < conf/updates < conf/updates < conf/updates < conf/updates < test/dists/test/InRelease <<'EOF' -----BEGIN PGP SIGNED MESSAGE----- Hash: SHA1 Codename: test Components: everything Architectures: coal -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.12 (GNU/Linux) iKIEAQECAAwFAk+6EiEFgwABUYAACgkQFU9je/YsbTv4LgP8DkaRBhBG7+JDD1N1 GANCsth4rzKDfpyMrttFjW6Ra9QegDdnHyLz09IL5Hyzmst4s8DQ69q2LyZaQt3+ 0C2OG9iQ2GjQt8xvppDufvymFpqTbqnGn/LeG6KjP542Su8XZxptFPT2DyPNCe0W Vz5f8yupwc67sAWj/qhmBEpZp9E= =025V -----END PGP SIGNATURE----- EOF testrun - -b . update Test 3< test/dists/test/InRelease < conf/options cat > conf/distributions < conf/updates < conf/updates < conf/updates < conf/distributions < conf/updates < conf/updates < conf/distributions < conf/updates < conf/updates < testsource/dists/codename1/InRelease < testsource/dists/codename2/InRelease < testsource/dists/codename1/InRelease < testsource/dists/codename2/InRelease <> testsource/dists/codename2/InRelease < results.expected if [ $verbosity -ge 0 ] ; then echo "Calculating packages to get..." > results.expected ; fi if [ $verbosity -ge 3 ] ; then echo " processing updates for 'codename2|bb|yyyyyyyyyy'" >>results.expected ; fi if [ $verbosity -ge 5 ] ; then echo " reading './lists/base_codename2_bb_yyyyyyyyyy_Packages'" >>results.expected echo " marking everything to be deleted" >>results.expected echo " reading './lists/base_codename1_bb_yyyyyyyyyy_Packages'" >>results.expected ; fi if [ $verbosity -ge 3 ] ; then echo " processing updates for 'codename2|bb|x'" >>results.expected ; fi if [ $verbosity -ge 5 ] ; then echo " reading './lists/base_codename2_bb_x_Packages'" >>results.expected echo " marking everything to be deleted" >>results.expected echo " reading './lists/base_codename1_bb_x_Packages'" >>results.expected ; fi if [ $verbosity -ge 3 ] ; then echo " processing updates for 'codename2|a|yyyyyyyyyy'" >>results.expected ; fi if [ $verbosity -ge 5 ] ; then echo " reading './lists/base_codename2_a_yyyyyyyyyy_Packages'" >>results.expected echo " marking everything to be deleted" >>results.expected echo " reading './lists/base_codename1_a_yyyyyyyyyy_Packages'" >>results.expected ; fi if [ $verbosity -ge 3 ] ; then echo " processing updates for 'codename2|a|x'" >>results.expected ; fi if [ $verbosity -ge 5 ] ; then echo " reading './lists/base_codename2_a_x_Packages'" >>results.expected echo " marking everything to be deleted" >>results.expected echo " reading './lists/base_codename1_a_x_Packages'" >>results.expected ; fi dodiff results.expected results mv results.expected results2.expected testout - -b . update codename1 3< results.expected if [ $verbosity -ge 0 ] ; then echo "Calculating packages to get..." > results.expected ; fi if [ $verbosity -ge 3 ] ; then echo " processing updates for 'codename1|bb|source'" >>results.expected ; fi if [ $verbosity -ge 5 ] ; then echo " reading './lists/base_codename1_bb_Sources'" >>results.expected echo " reading './lists/base_codename2_bb_Sources'" >>results.expected echo " marking everything to be deleted" >>results.expected echo " reading './lists/base_codename2_bb_Sources'" >>results.expected echo " reading './lists/base_codename1_bb_Sources'" >>results.expected fi if [ $verbosity -ge 3 ] ; then echo " processing updates for 'codename1|bb|yyyyyyyyyy'" >>results.expected ; fi if [ $verbosity -ge 5 ] ; then echo " reading './lists/base_codename1_bb_yyyyyyyyyy_Packages'" >>results.expected echo " reading './lists/base_codename2_bb_yyyyyyyyyy_Packages'" >>results.expected echo " marking everything to be deleted" >>results.expected echo " reading './lists/base_codename2_bb_yyyyyyyyyy_Packages'" >>results.expected echo " reading './lists/base_codename1_bb_yyyyyyyyyy_Packages'" >>results.expected fi if [ $verbosity -ge 3 ] ; then echo " processing updates for 'codename1|bb|x'" >>results.expected ; fi if [ $verbosity -ge 5 ] ; then echo " reading './lists/base_codename1_bb_x_Packages'" >>results.expected echo " reading './lists/base_codename2_bb_x_Packages'" >>results.expected echo " marking everything to be deleted" >>results.expected echo " reading './lists/base_codename2_bb_x_Packages'" >>results.expected echo " reading './lists/base_codename1_bb_x_Packages'" >>results.expected fi if [ $verbosity -ge 3 ] ; then echo " processing updates for 'codename1|a|source'" >>results.expected ; fi if [ $verbosity -ge 5 ] ; then echo " reading './lists/base_codename1_a_Sources'" >>results.expected echo " reading './lists/base_codename2_a_Sources'" >>results.expected echo " marking everything to be deleted" >>results.expected echo " reading './lists/base_codename2_a_Sources'" >>results.expected echo " reading './lists/base_codename1_a_Sources'" >>results.expected fi if [ $verbosity -ge 3 ] ; then echo " processing updates for 'u|codename1|a|yyyyyyyyyy'" >>results.expected ; fi if [ $verbosity -ge 5 ] ; then echo " reading './lists/base_codename1_a_yyyyyyyyyy_uPackages'" >>results.expected echo " reading './lists/base_codename2_a_yyyyyyyyyy_uPackages'" >>results.expected echo " marking everything to be deleted" >>results.expected echo " reading './lists/base_codename2_a_yyyyyyyyyy_uPackages'" >>results.expected echo " reading './lists/base_codename1_a_yyyyyyyyyy_uPackages'" >>results.expected fi if [ $verbosity -ge 3 ] ; then echo " processing updates for 'codename1|a|yyyyyyyyyy'" >>results.expected ; fi if [ $verbosity -ge 5 ] ; then echo " reading './lists/base_codename1_a_yyyyyyyyyy_Packages'" >>results.expected echo " reading './lists/base_codename2_a_yyyyyyyyyy_Packages'" >>results.expected echo " marking everything to be deleted" >>results.expected echo " reading './lists/base_codename2_a_yyyyyyyyyy_Packages'" >>results.expected echo " reading './lists/base_codename1_a_yyyyyyyyyy_Packages'" >>results.expected fi if [ $verbosity -ge 3 ] ; then echo " processing updates for 'u|codename1|a|x'" >>results.expected ; fi if [ $verbosity -ge 5 ] ; then echo " reading './lists/base_codename1_a_x_uPackages'" >>results.expected echo " reading './lists/base_codename2_a_x_uPackages'" >>results.expected echo " marking everything to be deleted" >>results.expected echo " reading './lists/base_codename2_a_x_uPackages'" >>results.expected echo " reading './lists/base_codename1_a_x_uPackages'" >>results.expected fi if [ $verbosity -ge 3 ] ; then echo " processing updates for 'codename1|a|x'" >>results.expected ; fi if [ $verbosity -ge 5 ] ; then echo " reading './lists/base_codename1_a_x_Packages'" >>results.expected echo " reading './lists/base_codename2_a_x_Packages'" >>results.expected echo " marking everything to be deleted" >>results.expected echo " reading './lists/base_codename2_a_x_Packages'" >>results.expected echo " reading './lists/base_codename1_a_x_Packages'" >>results.expected fi dodiff results.expected results testrun - -b . update codename2 codename1 3< resultsboth.expected || true grep '^ ' results2.expected >> resultsboth.expected || true grep '^ ' results.expected >> resultsboth.expected || true grep '^[^ C]' results.expected >> resultsboth.expected || true dodiff resultsboth.expected results sed -i -e "s/Method: file:/Method: copy:/" conf/updates dodo rm lists/_codename* testout - -b . update codename1 3< conf/distributions < conf/updates <a2 source Suite: test Method: file:${WORKDIR}/test IgnoreRelease: yes EOF mkdir test mkdir test/dists mkdir test/dists/test mkdir test/dists/test/main mkdir test/dists/test/main/binary-a mkdir test/dists/test/main/source cat > test/dists/test/main/binary-a/Packages < test/dists/test/main/source/Sources <' ='File not found' *=aptmethod error receiving 'file:${WORKDIR}/test/dists/test/main/binary-a/Packages.bz2': *=aptmethod error receiving 'file:${WORKDIR}/test/dists/test/main/source/Sources.gz': *='' *=aptmethod error receiving 'file:${WORKDIR}/test/dists/test/main/source/Sources.bz2': -v1*=aptmethod got 'file:${WORKDIR}/test/dists/test/main/binary-a/Packages' -v2*=Copy file '${WORKDIR}/test/dists/test/main/binary-a/Packages' to './lists/update_test_main_a_Packages'... -v1*=aptmethod got 'file:${WORKDIR}/test/dists/test/main/source/Sources' -v2*=Copy file '${WORKDIR}/test/dists/test/main/source/Sources' to './lists/update_test_main_Sources'... stdout $(odb) -v2*=Created directory "./lists" *=Updates needed for 'test|main|source': *=add 'fake1' - '0s' 'update' *=add 'fake2' - '2s' 'update' *=Updates needed for 'test|main|a2': *=add 'fake2' - '2all' 'update' EOF rm -r conf lists test db testsuccess reprepro-4.13.1/tests/brokenunlzma.sh0000755000175100017510000000045112152651661014576 00000000000000#!/bin/sh if [ $# -ne 0 ] ; then echo "brokenunlzma.sh: Wrong number of arguments: $#" >&2 exit 17 fi unlzma if test -f breakon2nd ; then rm breakon2nd exit 0; fi # Breaking an .lzma stream is hard, faking it is more reproduceable... echo "brokenunlzma.sh: claiming broken archive" >&2 exit 1 reprepro-4.13.1/tests/genpackage.sh0000755000175100017510000000261312152651661014156 00000000000000#!/bin/bash set -e #PACKAGE=bloat+-0a9z.app #EPOCH=99: #VERSION=0.9-A:Z+a:z #REVISION=-0+aA.9zZ if [ "x$OUTPUT" == "x" ] ; then OUTPUT=test.changes fi DIR="$PACKAGE-$VERSION" mkdir "$DIR" mkdir "$DIR"/debian cat >"$DIR"/debian/control < Standards-Version: 0.0 Package: $PACKAGE Architecture: abacus Description: bla blub Package: ${PACKAGE}-addons Architecture: all Description: bla blub END if test -z "$DISTRI" ; then DISTRI=test1 fi cat >"$DIR"/debian/changelog < Mon, 01 Jan 1980 01:02:02 +0000 END dpkg-source -b "$DIR" mkdir -p "$DIR"/debian/tmp/DEBIAN touch "$DIR"/debian/tmp/x mkdir "$DIR"/debian/tmp/a touch "$DIR"/debian/tmp/a/1 mkdir "$DIR"/debian/tmp/dir touch "$DIR"/debian/tmp/dir/file touch "$DIR"/debian/tmp/dir/another mkdir "$DIR"/debian/tmp/dir/subdir touch "$DIR"/debian/tmp/dir/subdir/file cd "$DIR" for pkg in `grep '^Package: ' debian/control | sed -e 's/^Package: //'` ; do if [ "x$pkg" != "x${pkg%-addons}" -a -n "$FAKEVER" ] ; then dpkg-gencontrol -p$pkg -v"$FAKEVER" else dpkg-gencontrol -p$pkg fi dpkg --build debian/tmp .. done #dpkg-genchanges > ../"${PACKAGE}_$VERSION$REVISION"_abbacus.changes dpkg-genchanges "$@" > ../"$OUTPUT" cd .. rm -r "$DIR" reprepro-4.13.1/tests/signed.test0000644000175100017510000000344112152651661013704 00000000000000set -u . "$TESTSDIR"/test.inc mkdir -p gpgtestdir export GNUPGHOME="`pwd`/gpgtestdir" gpg --import $TESTSDIR/good.key mkdir -p conf cat > conf/distributions < results cat > results.expected < conf/options < conf/distributions <#a|l#' conf/distributions testrun - -b . update 3<##' -e 's#component#compo|nent#' conf/distributions testrun - -b . update 3<> conf/distributions < conf/distributions < test_1.tar.gz cat > test_1.dsc < X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaa some lines to make it long enough aaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa X-Data: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa Files: $(mdandsize test_1.tar.gz) test_1.tar.gz EOF echo "Dummy file" > pre_1.tar.gz cat > pre_1.dsc < Section: pre Priority: extra Files: $(mdandsize pre_1.tar.gz) pre_1.tar.gz EOF echo "New file" > pre_2.tar.gz cat > pre_2.dsc < Section: pre Priority: extra Files: $(mdandsize pre_2.tar.gz) pre_2.tar.gz EOF echo "Even newer" > pre_3.tar.gz cat > pre_3.dsc < Section: pre Priority: extra Files: $(mdandsize pre_3.tar.gz) pre_3.tar.gz EOF mkdir old testrun - includedsc test test_1.dsc 3< patches cat > results.expected <> results.expected <> results.expected <> results.expected <> results.expected < results.expected << EOF 1c Package: pre Format: 1.0 Binary: pre Architecture: all Version: 3 Maintainer: Guess Who Priority: extra Section: pre Directory: pool/main/p/pre Files: $(mdandsize pre_3.dsc) pre_3.dsc $(mdandsize pre_3.tar.gz) pre_3.tar.gz Checksums-Sha1: $(sha1andsize pre_3.dsc) pre_3.dsc $(sha1andsize pre_3.tar.gz) pre_3.tar.gz Checksums-Sha256: $(sha2andsize pre_3.dsc) pre_3.dsc $(sha2andsize pre_3.tar.gz) pre_3.tar.gz Package: test . EOF dodiff results.expected 1.diff rm 1.diff cat > results.expected << EOF 17,18c $(sha2andsize pre_3.dsc) pre_3.dsc $(sha2andsize pre_3.tar.gz) pre_3.tar.gz . 14,15c $(sha1andsize pre_3.dsc) pre_3.dsc $(sha1andsize pre_3.tar.gz) pre_3.tar.gz . 11,12c $(mdandsize pre_3.dsc) pre_3.dsc $(mdandsize pre_3.tar.gz) pre_3.tar.gz . 5c Version: 3 . EOF dodiff results.expected 2.diff rm 2.diff dodiff results.expected 3.diff rm 3.diff cat > results.expected << EOF 1c Package: pre . EOF dodiff results.expected 4.diff rm 4.diff rm -r old db pool conf dists pre_*.dsc pre_*.tar.gz test_1.dsc test_1.tar.gz results.expected patches testsuccess reprepro-4.13.1/tests/flood.test0000644000175100017510000005563112152651661013546 00000000000000set -u . "$TESTSDIR"/test.inc mkdir test-1 mkdir test-1/debian cat >test-1/debian/control < Standards-Version: 0.0 Package: sibling Architecture: any Description: bla blub Package: siblingtoo Architecture: any Description: bla blub Package: mytest Architecture: all Description: bla blub END cat >test-1/debian/changelog < Mon, 01 Jan 1980 01:02:02 +0000 END mkdir -p test-1/debian/tmp/DEBIAN touch test-1/debian/tmp/best-file-in-the-root cd test-1 DEB_HOST_ARCH="another" dpkg-gencontrol -psibling -v2 DEB_HOST_ARCH="another" dpkg --build debian/tmp .. DEB_HOST_ARCH="another" dpkg-gencontrol -psiblingtoo -v3 DEB_HOST_ARCH="another" dpkg --build debian/tmp .. DEB_HOST_ARCH="another" dpkg-gencontrol -pmytest -v2 DEB_HOST_ARCH="another" dpkg --build debian/tmp .. DEB_HOST_ARCH="another" dpkg-genchanges -b > ../test-1.changes DEB_HOST_ARCH="somemore" dpkg-gencontrol -psiblingtoo -v3 DEB_HOST_ARCH="somemore" dpkg --build debian/tmp .. cd .. rm -r test-1 mkdir test-2 mkdir test-2/debian cat >test-2/debian/control < Standards-Version: 0.0 Package: sibling Architecture: any Description: bla blub Package: siblingalso Architecture: any Description: bla blub Package: mytest Architecture: all Description: bla blub END cat >test-2/debian/changelog < Mon, 01 Jan 1980 01:02:02 +0000 test (1-1) test; urgency=critical * new upstream release (Closes: #allofthem) -- me Mon, 01 Jan 1980 01:02:02 +0000 END mkdir -p test-2/debian/tmp/DEBIAN touch test-2/debian/tmp/best-file-in-the-root cd test-2 dpkg-gencontrol -psiblingalso -v3.1 dpkg --build debian/tmp .. dpkg-gencontrol -pmytest -v2.4 dpkg --build debian/tmp .. dpkg-gencontrol -psibling -v2.2 dpkg --build debian/tmp .. dpkg-genchanges -b > ../test-2.changes rm debian/files DEB_HOST_ARCH="another" dpkg-gencontrol -psibling -v2.2 DEB_HOST_ARCH="another" dpkg --build debian/tmp .. dpkg-genchanges -b > ../test-2a.changes cd .. rm -r test-2 for tracking in false true ; do mkdir conf cat > conf/distributions <> conf/distributions TRACKINGTESTOPTIONS="-D t=1" else TRACKINGTESTOPTIONS="-D t=0" fi cat >> conf/distributions <> conf/distributions testrun - -b . retrack test 3< results.expected < results.expected < conf/incoming << EOF Name: myrule Allow: test>two Options: limit_arch_all IncomingDir: i TempDir: tmp EOF ls *.changes mkdir i tmp cp test-1.changes sibling_2_another.deb siblingtoo_3_another.deb mytest_2_all.deb i/ testrun - -b . processincoming myrule 3< results.expected < results.expected < results.expected < results.expected < conf/distributions < conf/override-c-deb < conf/override-c-dsc < conf/override-d-deb < conf/override-d-dsc <Ôoµ+òR/à ‹ AðìÄXÀ;é‰ËtsGüO A´NReprepro Testsuite Key 3 (FOR USE WITHIN TESTSUITE ONLY) ˆ` H,?Ï € rñÖhZ÷¶< ùâ •"NÿkÈÒ*T›á|M‚ †ŸÅ]ÞQÿÝik³TÍ}}õ½Øreprepro-4.13.1/tests/trackingcorruption.test0000644000175100017510000000575512152651661016374 00000000000000set -u . "$TESTSDIR"/test.inc dodo test ! -d db mkdir -p conf echo "export silent-never" > conf/options cat > conf/distributions < 0, but was nowhere found. *='pool/something/a/aa/aa_1-1_abacus.deb' has refcount > 0, but was nowhere found. *='pool/something/a/aa/aa_1-1.dsc' has refcount > 0, but was nowhere found. *='pool/something/a/aa/aa_1-1.tar.gz' has refcount > 0, but was nowhere found. stdout -d1*=db: 'aa' '1-1' removed from tracking.db(breakme). -v1*=4 files lost their last reference. -v1*=(dumpunreferenced lists such files, use deleteunreferenced to delete them.) EOF testrun - retrack breakme 3< test/test/test.dsc echo "fake-gz-file" > test/test/test.tar.gz cat >test/dists/a/c/source/Sources < test/dists/a/InRelease <conf/distributions <conf/updates <>conf/updates <> conf/updates testrun - -b . update 3< conf/options < conf/distributions < conf/testhook <<'EOF' #!/bin/sh echo "testhook got $#: '$1' '$2' '$3' '$4'" if test -f "$1/$3.deprecated" ; then echo "$3.deprecated.tobedeleted" >&3 fi echo "super-compressed" > "$1/$3.super.new" echo "$3.super.new" >&3 EOF chmod a+x conf/testhook mkdir -p "dists/test2/stupid/binary-abacus" touch "dists/test2/stupid/binary-abacus/Packages.deprecated" cat > logs/fake.outlog << EOF BEGIN-DISTRIBUTION test2 dists/test2 DISTFILE dists/test2 stupid/binary-abacus/Packages.deprecated dists/test2/stupid/binary-abacus/Packages.deprecated END-DISTRIBUTION test2 dists/test2 EOF REPREPRO_OUT_DIR=. "$SRCDIR"/docs/outstore.py logs/fake.outlog rm logs/fake.outlog set -v checknolog logfile testrun - -b . export test1 test2 3< dists/test1/stupid/binary-abacus/Release.expected < dists/test1/ugly/binary-abacus/Release.expected < "fakesuper" FAKESUPERMD5="$(mdandsize fakesuper)" FAKESUPERSHA1="$(sha1andsize fakesuper)" FAKESUPERSHA2="$(sha2andsize fakesuper)" cat > Release.test1.expected < dists/test2/stupid/binary-abacus/Release.expected < dists/test2/ugly/binary-abacus/Release.expected < Release.test2.expected < dists/test1/Release.normalized normalizerelease dists/test2/Release > dists/test2/Release.normalized dodiff Release.test1.expected dists/test1/Release.normalized dodiff Release.test2.expected dists/test2/Release.normalized rm dists/*/Release.normalized PACKAGE=simple EPOCH="" VERSION=1 REVISION="" SECTION="stupid/base" genpackage.sh checknolog log1 testrun - -b . include test1 test.changes 3< Release.test1.normalized dodiff Release.test1.expected Release.test1.normalized rm Release.test1.normalized cat > conf/srcoverride < conf/binoverride < results cat >results.expected < results.expected << END test2|ugly|abacus: simple 1 test2|ugly|coal: simple-addons 1 test2|ugly|source: simple 1 END dodiff results.expected results testout "" -b . listfilter test2 'Source(==bloat+-0a9z.app)|(!Source,Package(==bloat+-0a9z.app))' cat > results.expected << END test2|stupid|abacus: bloat+-0a9z.app 99:0.9-A:Z+a:z-0+aA.9zZ test2|stupid|coal: bloat+-0a9z.app-addons 99:0.9-A:Z+a:z-0+aA.9zZ test2|stupid|source: bloat+-0a9z.app 99:0.9-A:Z+a:z-0+aA.9zZ END dodiff results.expected results cat >conf/updates <abacus abacus source FilterFormula: Priority(==optional),Package(>=alpha),Package(<=zeta) FilterList: error list ListHook: /bin/cp END cat >conf/list <> conf/updates <> conf/updates <> conf/updates < test2 find dists/test1/ \( -name "Packages.gz" -o -name "Sources.gz" \) -print0 | xargs -0 zgrep '^Package: ' | sort > test1 dodiff test2 test1 testrun - -b . check test1 test2 3<results.expected <results.expected <includeerror.rules < <.changes-file> EOF testrun includeerror -b . include unknown 3< results.expected < results.expected < results.expected < results.expected < conf2/distributions testrun - -b . --confdir ./conf2 update 3<> conf2/distributions testrun - -b . --confdir ./conf2 update 3<> conf2/distributions testrun - -b . --confdir ./conf2 update 3< broken.changes testrun - -b . include test2 broken.changes 3<> broken.changes testrun - -b . include test2 broken.changes 3<> broken.changes testrun - -b . include test2 broken.changes 3<> broken.changes testrun - -b . include test2 broken.changes 3<> broken.changes testrun - -b . include test2 broken.changes 3<> broken.changes testrun - -b . include test2 broken.changes 3<> broken.changes testrun - -b . include test2 broken.changes 3<> broken.changes testrun - -b . --ignore=missingfield include test2 broken.changes 3<> broken.changes testrun - -b . --ignore=missingfield include test2 broken.changes 3<> broken.changes testrun - -b . --ignore=missingfield include test2 broken.changes 3<> conf/distributions <> broken.changes touch nowhere_0old.dsc testrun - -b . --ignore=unusedarch --ignore=surprisingarch --ignore=wrongdistribution --ignore=missingfield include test2 broken.changes 3<results.expected < broken.changes < Description: missing Changes: missing Binary: none and nothing Distribution: test2 Files: `md5sum 4test_0b.1-1.dsc| cut -d" " -f 1` `stat -c%s 4test_0b.1-1.dsc` a b differently_0another.dsc `md5sum 4test_0b.1-1_abacus.deb| cut -d" " -f 1` `stat -c%s 4test_0b.1-1_abacus.deb` a b 4test_0b.1-1_abacus.deb EOF #todo: make it work without this.. cp 4test_0b.1-1.dsc differently_0another.dsc testrun - -b . include test2 broken.changes 3<> broken.changes < broken.changes < Description: missing Changes: missing Binary: 4test Distribution: test2 Files: `md5sum 4test_0b.1-1.dsc| cut -d" " -f 1` `stat -c%s 4test_0b.1-1.dsc` a b 4test_0orso.dsc `md5sum 4test_0b.1-1_abacus.deb| cut -d" " -f 1` `stat -c%s 4test_0b.1-1_abacus.deb` a b 4test_0b.1-1_abacus.deb `md5sum 4test_0b.1-1.tar.gz| cut -d" " -f 1` `stat -c%s 4test_0b.1-1.tar.gz` a b 4test_0b.1-1.tar.gz EOF cp 4test_0b.1-1.dsc 4test_0orso.dsc testrun - -b . include test2 broken.changes 3< conf/distributions < conf/distributions < descr1 < conf/uploaders1 < conf/uploaders2 < 100). Built some recursion? *=included from './conf/uploaders2' line 1 -v0*=There have been errors! returns 255 EOF cat > conf/uploaders2 < descrbad < descrbad < descres < descrs <> conf/uploaders2 <>conf/uploaders1 <conf/uploaders1 <conf/uploaders1 <>conf/uploaders1 <>conf/uploaders1 <conf/uploaders1 <>conf/uploaders1 <>conf/uploaders1 <conf/uploaders1 <> descr2 sed -e 's/0000000000000000/fedcba9876542210/g' descrs >> descr3 echo "architecture coal" >> descrs check1takes descrs check1takes descr2 check2takes descr3 rm -r conf descr* testsuccess reprepro-4.13.1/tests/withsubkeys.key0000644000175100017510000000606612152651661014633 00000000000000-----BEGIN PGP PRIVATE KEY BLOCK----- Version: GnuPG v1.4.9 (GNU/Linux) lQHXBEnjCPkBBADbdIK4D+1lbjq1wzZSIfyHJFWKMpy26iwhS2KJqkBNcN1n3Ute ND9WHNuhj+n3k1saFjj7yi/18PwM7weqDPAnzp5dpSVl6OXZU0Oaf4hdk/K7hxkM AaW8sGxJc2OMssffU/ZIGde/62kgQSwhqK0S3BnDORdWE2eIShGkC7Ws4QARAQAB AAP4gjfE3ynpm1JfUzIg8RVR/9KDUOtJmHz541n8jBTzycLlznKNasZY5yGN3B9w tUZxo8weNLeTveID3mve+8uM/UDwcgOVJlMJXXCDCMGYontTR8yAdN2k9mh09Ejx ihL+KrFXY+L42YFa6CUQgzNrxvG5nG3T+NFjDKHew44LWQIA26zeTY9Qvu/+tbIa YaLYHbNHMCABAPV7zHdhAsgPKII6nO1Ic9e6OobNRRn89vFyWxopYFT3sjvV1ZEc +gqKKwIA/75SuKR+INGfY/7OZBjI5tOtWW0jBSxKHHf9LbCm1uW9KNtv2yhZ88oJ MboLNXTNeIdAgjsxUEZnTtbumTbbIwH9FxIhrIj9q1Pb7FZ8ZP2xLSkpHsNlvHYI +pEcGcNPfpl9D9KKK3tbyG633CAIrMtmTjioliQH0H1kyF2mxhsjOaWvtFJSZXBy ZXBybyBUZXN0c3VpdGUgS2V5IDYgKEZPUiBVU0UgV0lUSElOIFRFU1RTVUlURSBP TkxZKSA8d2l0aHN1YmtleXNAbm93aGVyZS50bGQ+iLYEEwECACAFAknjCPkCGwMG CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRAVT2N79ixtO5xmA/0SkijX92Wupoyu zJbQfRBkPVG7TN8fkmQkKdUZCmu8CrVelZynDGmLhikdMncNv6wMaXP6+/0z3AX/ KwuH8X66xH3nouodM/eueTMbJY3d+b3Re1vEg4xZU7h66/2zE9iXcPoHR2j2l63d jCZOR6Q27kBXXih1j2TAALDpFXW3sp0BuwRJ4wl1EQQAzBhaIj9XtwloZZeo5ZKE dahIHXOJTt8fXvtpUs+kBEje7aKFxYEZgDSkYBBjxRXII/p/Ab7dLRCMgPcRSGrg SXdlVSlMG8mQLm1tIelgkz/7GnMErbFeYClK7ohfWZBrW79Q5quCNlrV+qetJXJR D8lstf4e5r9gQDouCNSjx3MAoJh3s4vRjiXvo1lxp75zZdGtouB3BADLrg5965Vf GySW/MKGbIK1Rm9CextGUxd01GL1uoR4Hi9oHNW7+/QZ9ixOMKA5ggqomfp7pgEe s4wvhHsFIpDpNAWOKlkxlERY33x7R5XxKsgSJS/IfQct5AlbajVDFukFgKW8CWnM A9AD+slO9VoOKgHEDevt9Mfhur1dMoWVowP/U5f7N16vJLckL8uk2DiiHXXtNq0C ucLXhykPAzmw66e9y+0LHd9sj1WzB7WKYOFCR4SGuSRhfUn8HCzaOSRZYI4XVX7z 0mq/ZvenSJm+7mBxOm5rkxCgpwEuFojz1589qFDugKhM+JfGW2H4fpevxyHmpfvK 0oyqOc/6t39Oe/EAAKCQOt2SvprOCp9vDGQrL/O6Wg+mLwnMiO0EGAECAA8CGwIF AknjCfcFCQABUgAAUkcgBBkRAgAGBQJJ4wl1AAoJEHS2LTLnv6eib9sAn1miRieV +l3MMRA0mHQlPF89CIRrAJ94g4sj4qIJQQMQ2zJwFLYmtluSuwkQFU9je/YsbTtF sgQAtaP0bMzn7wmyGAWif9LUxdV2RjfEvrA3jj4V+GeoMT0V4no0eoDKuj2o2tBZ bWWrCXdseJ8UWoftmCErCetWy4zrsr26hwtcMB5NQIXsYzlagejMIv/89AkdnbAN 3B70PEeIpuTZSYRP4598dSrGDQqoSpKWVCemMXEoYl0pMMWdAdgESeMJsQEEAOTI v3NxcTHczP6iijUVIj99QHi1VPnATlBoZRpxf7mYMALiY1CKNFzx6EXiCi9XCHoj spnfcSQqCgtB7EixxRQSCT8WR7w5Q79dtYDSlVLmqjlAll2ea8BxYAqScyDiLA5P AI7Y/ey/tAL0bM0qe57pZ64xHFWXIlP0faKUVPbbABEBAAEAA/0cYK4mop6YwbuH ph+gf/OUjnOtxUg6BllwbdKEmilumurxoKUS+2GNWdAmwufigVgi1kS0A1wkUTaX uOCXD73TCHPcKMRp7YLZzg9jy/XlDgbPn4qRl1qa7RHPvAV5a4j4upcw+EzP2B+3 z7e/zlQkFOuKdSYj/zgBidRwxRs9kQIA8HngrqvrRNC03IzQLZqYhRFV7AxlK2Xl ikFdEj0xXAy6ep4svwsBtHnUUfmXZL/cG4A6MNp1in4XK6WNuBmmyQIA842mH8nV /vmIwm+lfDTs1Y5BMoV2g9VhOixJ3WO2HQVGbYB42tBd3cw+tp5urSPESFN9rcq8 U2+XE+wPZRiugwIA0aBXyLN73fhkTUduTJhYOgtEl1yormrsEZHwYiNI7zLjc71p 3aMZsXpAZV3lcgPV83O7ESgC3z+Pbux6tA8+k6R7iQE9BBgBAgAJBQJJ4wmxAhsC AKgJEBVPY3v2LG07nSAEGQECAAYFAknjCbEACgkQ2KKLf9el2Ie2bQP/T8ThPu2S eq5tR8aGxJJ1w0U6szTT2UIyXu8gBO3SM8wLVcBuIXdkxOshUP2Xua758tLmns5X aoDiK8am9E22zX1BXyjVmK/74of6yjsf+VxJJtTlgpxeFH+zI1zZxIA1TlRg2TvV IHC5oD1i5v1P2xugtzby4aGHTXdi8pUMKGLMegP/U61Kg6OcqEA2C6288UhsNWZz ReY9mMOlC8z+TET6IfHnP5hd4+hZxKDWKgj0V95vbWzA/XkZIiK09wjjht6Oqw5t qq4R38D5kkFUr9yLlwTxMw+jipVTTFlbjjWhsnEoH16QZRQr0PT0FlSnn3CGy+sd Sdq+vMpQsIh1h85JVQI= =+Gnh -----END PGP PRIVATE KEY BLOCK----- reprepro-4.13.1/tests/expired.key0000644000175100017510000000171112152651661013702 00000000000000-----BEGIN PGP PRIVATE KEY BLOCK----- Version: GnuPG v1.4.6 (GNU/Linux) lQG6BEgsQCURBAC+SMbS3n9hnYIYf3YoKWHqEsrrjX6UXgb9io3VtHy4ljr2cJbF pABjIEWh0z7kLXXJVeR6bAc4lgR8cR6T2TsuRkx0lT2BtFI+iNz8oeLRjM4TQbJQ erxl4m67PXPxLXmbhBmO2HSQ/6NlQIE9AfAE5Bf5JTb630aJolrgWF4phwCguqfK EcqgotFEErPvwCF9bqv+UHsEAJCNVCJ1wyabrnSykkE+7H8cgB9wkE255ussB0pD pX3IKcquwShQFgLUjgCmlVnBqFE5K/K8dBSf+TAYI6a3zV5SzKTWUy2b3cZljMwO jUxd5CMVSK4c8IeTxPvWdcx0hzjAngeKNkGbzWaQqUes49Mr9ItxEXViVvaJLcay RhnEA/46soa9a7YI+XWJF0UQUSKSbuie5iwGzXC7KCosyNsPcu2G15dL7YelkGAo B+rV8yWMVg0+2fY68nmrkilfR32jG3rMPPS5ZPYO8vAQFv1VSJUjuIjerV0+fVsv W3udbXFDmURpw8LhZMI5bKmJtcKdGhXd1sZ/vhZSZAFs0LchDwAAl0NwAPemPbcK PBEqqXFxSe0lkSIH9bROUmVwcmVwcm8gVGVzdHN1aXRlIEtleSA0IChGT1IgVVNF IFdJVEhJTiBURVNUU1VJVEUgT05MWSkgPGV4cGlyZWRAbm93aGVyZS50bGQ+iGYE ExECACYFAkgsQCUCGwMFCQABUYAGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRCJ T6Kd0E3T1go2AJ4lg1614/jYIy5m7NCCPXUpCOhrVQCfUMuczWp32ddKY4aDqiHX t/QBoI4= =Q+dU -----END PGP PRIVATE KEY BLOCK----- reprepro-4.13.1/tests/exporthooks.test0000644000175100017510000000402712152651661015021 00000000000000set -u . "$TESTSDIR"/test.inc mkdir conf cat > conf/distributions < conf/distributions < conf/strange.sh <<'EOF' #!/bin/sh echo hook "$@" touch "$1/$3.something.new" echo "$3.something.new" >&3 touch "$1/$3.something.hidden.new" echo "$3.something.hidden.new." >&3 exit 0 EOF chmod a+x conf/strange.sh testrun - -b . export o 3< results cat > results.expected < results || true cat > results.expected <&2 exit 1 fi if test -z "$TESTSDIR" || ! test -d "$TESTSDIR" ; then echo "TESTSDIR='$TESTSDIR' not a valid directory!" >&2 exit 1 fi if test -z "$WORKDIR" || ! test -d "$WORKDIR" ; then echo "WORKDIR='$WORKDIR' not a valid directory!" >&2 exit 1 fi # avoid architecture dependency of the test-suite: export DEB_HOST_ARCH="abacus" export PATH="$TESTSDIR:$PATH" if ! [ -x "$REPREPRO" ] ; then echo "Could not find $REPREPRO!" >&2 exit 1 fi checknolog() { dodo test ! -f logs/"$1" } checklog() { cat > results.log.expected LOGDATE="$(date +'%Y-%m-%d %H:')" echo normalizing "$1": DATESTR is "$LOGDATE??:??" sed -i -e 's/^'"$LOGDATE"'[0-9][0-9]:[0-9][0-9] /DATESTR /g' logs/"$1" dodiff results.log.expected logs/"$1" rm logs/"$1" } md5() { md5sum "$1" | cut -d' ' -f1 } sha1() { sha1sum "$1" | cut -d' ' -f1 } sha256() { sha256sum "$1" | cut -d' ' -f1 } printindexpart() { FILENAME="$1" dpkg-deb -I "$FILENAME" control >"$FILENAME".control ed -s "$FILENAME".control << EOF H /^Description:/ kd /^Priority/ m 'd-1 /^Section/ m 'd-1 'd i Filename: $FILENAME Size: $(stat -c "%s" "$FILENAME") SHA256: $(sha256 "$FILENAME") SHA1: $(sha1 "$FILENAME") MD5sum: $(md5 "$FILENAME") . $ a . w q EOF cat "$FILENAME".control rm "$FILENAME".control } withoutchecksums() { awk 'BEGIN{inheader=0} /^Checksums-.*: / || (inheader && /^ /) {inheader = 1; next} {inheader = 0 ; print}' "$@" } mdandsize() { cat < empty.rules <Ôoµ+òR/àˆY H,LTest revoked key rñÖhZ÷Cˆž=”D÷˜|O‹EÕ_Êã0~/ž„&o°ܲQx%°½LcåÜ´NReprepro Testsuite Key 3 (FOR USE WITHIN TESTSUITE ONLY) ˆ` H,?Ï € rñÖhZ÷¶< ùâ •"NÿkÈÒ*T›á|M‚ †ŸÅ]ÞQÿÝik³TÍ}}õ½Øreprepro-4.13.1/tests/subcomponents.test0000644000175100017510000005117212152651661015336 00000000000000set -u . "$TESTSDIR"/test.inc dodo test ! -d db testrun - -b . _versioncompare 0 1 3< conf/distributions < results.expected < results dodiff results.expected results cat > conf/distributions < results.expected < results dodiff results.expected results # Now try with suite cat > conf/distributions < results.expected < results dodiff results.expected results testrun - -b . createsymlinks 3< 'foo/updates' because of '/'. stdout EOF cat >> conf/distributions < 'foo/updates' because of the '/' in it. -v2*=Hopefully something else will link 'bla' -> 'foo' then this is not needed. stdout -v1*=Created ./dists/bla->foo EOF # check a .dsc with nothing in it: cat > test.dsc < test.dsc < Section: section Priority: priority Files: EOF testrun - -C a -b . includedsc foo test.dsc 3< conf/options < conf/distributions < fake.dsc < Files: EOF testrun - -C main includedsc test fake.dsc 3< results cat > results.expected < results cat > results.expected < results cat > results.expected < results cat > results.expected < results cat > results.expected < results cat > results.expected < results cat > results.expected < test/a/a.dsc < Checksums-Sha1: $(sha1andsize test/a/a.tar.gz) a.tar.gz EOF cat > test/dists/name/comp/source/Sources < Directory: a Files: $(mdandsize test/a/a.dsc) a.dsc $(mdandsize test/a/a.tar.gz) a.tar.gz Checksums-Sha1: $(sha1andsize test/a/a.dsc) a.dsc $(sha1andsize test/a/a.tar.gz) a.tar.gz EOF mkdir conf cat > conf/distributions < conf/updates <everything IgnoreRelease: Yes DownloadListsAs: . EOF testrun - update test1 3< test/dists/name/comp/source/Sources testrun - update test2 3< conf/distributions < conf/options < fake1.deb echo "fake-deb2" > fake2.deb echo "fake-deb3" > fake3.deb fakedeb1md="$(md5 fake1.deb)" fakedeb2md="$(md5 fake2.deb)" fakedeb3md="$(md5 fake3.deb)" fakedeb1sha1="$(sha1 fake1.deb)" fakedeb2sha1="$(sha1 fake2.deb)" fakedeb3sha1="$(sha1 fake3.deb)" fakedeb1sha2="$(sha256 fake1.deb)" fakedeb2sha2="$(sha256 fake2.deb)" fakedeb3sha2="$(sha256 fake3.deb)" fakesize=10 cat > fakeindex < pool/c/p/pseudo/fake_0_all.deb testrun - -b . check 3<results.expected << EOF 'fake' -> 'Package: fake Version: 0 Source: pseudo (9999) Architecture: all Filename: pool/c/p/pseudo/fake_0_all.deb Section: base Priority: extra Size: $fakesize MD5Sum: $fakedeb1md ' EOF dodiff results.expected results cat results testrun - -b . _listchecksums 3< #include #include #include #include #include #include "error.h" #include "guesscomponent.h" /* Guess which component to use: * - if the user gave one, use that one. * - if the section is a componentname, use this one * - if the section starts with a componentname/, use this one * - if the section ends with a /componentname, use this one * - if the section/ is the start of a componentname, use this one * - use the first component in the list */ retvalue guess_component(const char *codename, const struct atomlist *components, const char *package, const char *section, component_t givencomponent, component_t *guess) { int i; size_t section_len; if (atom_defined(givencomponent)) { if (!atomlist_in(components, givencomponent)) { (void)fprintf(stderr, "Could not find '%s' in components of '%s': ", atoms_components[givencomponent], codename); (void)atomlist_fprint(stderr, at_component, components); (void)fputs("'\n", stderr); return RET_ERROR; } *guess = givencomponent; return RET_OK; } if (section == NULL) { fprintf(stderr, "Found no section for '%s', so I cannot guess the component to put it in!\n", package); return RET_ERROR; } if (components->count <= 0) { fprintf(stderr, "I do not find any components in '%s', so there is no chance I cannot even take one by guessing!\n", codename); return RET_ERROR; } section_len = strlen(section); for (i = 0 ; i < components->count ; i++) { const char *component = atoms_components[components->atoms[i]]; if (strcmp(section, component) == 0) { *guess = components->atoms[i]; return RET_OK; } } for (i = 0 ; i < components->count ; i++) { const char *component = atoms_components[components->atoms[i]]; size_t len = strlen(component); if (lenatoms[i]; return RET_OK; } } for (i = 0 ; i < components->count ; i++) { const char *component = atoms_components[components->atoms[i]]; size_t len = strlen(component); if (lenatoms[i]; return RET_OK; } } for (i = 0 ; i < components->count ; i++) { const char *component = atoms_components[components->atoms[i]]; if (strncmp(section, component, section_len) == 0 && component[section_len] == '/') { *guess = components->atoms[i]; return RET_OK; } } *guess = components->atoms[0]; return RET_OK; } reprepro-4.13.1/tracking.h0000644000175100017510000000522712152651661012347 00000000000000#ifndef REPREPRO_TRACKING_H #define REPREPRO_TRACKING_H #ifndef REPREPRO_DATABASE_H #include "database.h" #endif #ifndef REPREPRO_TRACKINGT_H #include "trackingt.h" #endif #ifndef REPREPRO_DISTRIBUTION_H #include "distribution.h" #endif retvalue tracking_parse(struct distribution *, struct configiterator *); /* high-level retrack of the whole distribution */ retvalue tracking_retrack(struct distribution *, bool /*evenifnotstale*/); retvalue tracking_initialize(/*@out@*/trackingdb *, const struct distribution *, bool readonly); retvalue tracking_done(trackingdb); retvalue tracking_listdistributions(/*@out@*/struct strlist *); retvalue tracking_drop(const char *); retvalue tracking_reset(trackingdb); retvalue tracking_rereference(struct distribution *); retvalue trackedpackage_addfilekey(trackingdb, struct trackedpackage *, enum filetype, /*@only@*/char * /*filekey*/, bool /*used*/); retvalue trackedpackage_adddupfilekeys(trackingdb, struct trackedpackage *, enum filetype, const struct strlist * /*filekeys*/, bool /*used*/); retvalue trackedpackage_removefilekeys(trackingdb, struct trackedpackage *, const struct strlist *); void trackedpackage_free(struct trackedpackage *); retvalue tracking_get(trackingdb, const char * /*sourcename*/, const char * /*version*/, /*@out@*/struct trackedpackage **); retvalue tracking_getornew(trackingdb, const char * /*name*/, const char * /*version*/, /*@out@*/struct trackedpackage **); retvalue tracking_save(trackingdb, /*@only@*/struct trackedpackage *); retvalue tracking_remove(trackingdb, const char * /*sourcename*/, const char * /*version*/); retvalue tracking_printall(trackingdb); retvalue trackingdata_summon(trackingdb, const char *, const char *, struct trackingdata *); retvalue trackingdata_new(trackingdb, struct trackingdata *); retvalue trackingdata_switch(struct trackingdata *, const char *, const char *); retvalue trackingdata_insert(struct trackingdata *, enum filetype, const struct strlist * /*filekeys*/, /*@null@*//*@only@*/char * /*oldsource*/, /*@null@*//*@only@*/char * /*oldversion*/, /*@null@*/const struct strlist * /*oldfilekeys*/); retvalue trackingdata_remove(struct trackingdata *, /*@only@*/char */*oldsource*/, /*@only@*/char * /*oldversion*/, const struct strlist * /*filekeys*/); void trackingdata_done(struct trackingdata *); /* like _done but actually do something */ retvalue trackingdata_finish(trackingdb, struct trackingdata *); /* look at all listed packages and remove everything not needed */ retvalue tracking_tidyall(trackingdb); retvalue tracking_removepackages(trackingdb, struct distribution *, const char * /*sourcename*/, /*@null@*/const char * /*version*/); #endif /*REPREPRO_TRACKING_H*/ reprepro-4.13.1/release.h0000644000175100017510000000472112152651661012163 00000000000000#ifndef REPREPRO_RELEASE_H #define REPREPRO_RELEASE_H #ifndef REPREPRO_ERROR_H #include "error.h" #warning "What's hapening here?" #endif #ifndef REPREPRO_STRLIST_H #include "strlist.h" #endif #ifndef REPREPRO_DATABASE_H #include "database.h" #endif struct release; #define ic_first ic_uncompressed enum indexcompression {ic_uncompressed=0, ic_gzip, #ifdef HAVE_LIBBZ2 ic_bzip2, #endif ic_count /* fake item to get count */ }; typedef unsigned int compressionset; /* 1 << indexcompression */ #define IC_FLAG(a) (1<<(a)) /* Initialize Release generation */ retvalue release_init(struct release **, const char * /*codename*/, /*@null@*/const char * /*suite*/, /*@null@*/const char * /*fakeprefix*/); /* same but for a snapshot */ retvalue release_initsnapshot(const char *codename, const char *name, struct release **); retvalue release_mkdir(struct release *, const char * /*relativedirectory*/); const char *release_dirofdist(struct release *); retvalue release_addnew(struct release *, /*@only@*/char *, /*@only@*/char *); retvalue release_addsilentnew(struct release *, /*@only@*/char *, /*@only@*/char *); retvalue release_adddel(struct release *, /*@only@*/char *); retvalue release_addold(struct release *, /*@only@*/char *); struct filetorelease; retvalue release_startfile(struct release *, const char * /*filename*/, compressionset, bool /*usecache*/, struct filetorelease **); retvalue release_startlinkedfile(struct release *, const char * /*filename*/, const char * /*symlinkas*/, compressionset, bool /*usecache*/, struct filetorelease **); void release_warnoldfileorlink(struct release *, const char *, compressionset); /* return true if an old file is already there */ bool release_oldexists(struct filetorelease *); /* errors will be cached for release_finishfile */ retvalue release_writedata(struct filetorelease *, const char *, size_t); #define release_writestring(file, data) release_writedata(file, data, strlen(data)) void release_abortfile(/*@only@*/struct filetorelease *); retvalue release_finishfile(struct release *, /*@only@*/struct filetorelease *); struct distribution; struct target; retvalue release_directorydescription(struct release *, const struct distribution *, const struct target *, const char * /*filename*/, bool /*onlyifneeded*/); void release_free(/*@only@*/struct release *); retvalue release_prepare(struct release *, struct distribution *, bool /*onlyneeded*/); retvalue release_finish(/*@only@*/struct release *, struct distribution *); #endif reprepro-4.13.1/README0000644000175100017510000000744612152651661011261 00000000000000* What it is: This project is a leightweight feature complete manager of a debian package (i.e. binary .deb and source .dsc+.tar.gz+.diff.gz) repository. Emphasis is put on having all packages in the pool/-directory, maximal checking of all sources. generation of signed Release file, Contents, ... Libraries needed are libdb{3,4.?,5.?} and libz. Libraries used if available are libgpgme, libbz2 and libarchive. * Current status: The main features work without problems. Some special use cases might not be very well tested. * Some naming conventions: basename: the name of a file without any directory information. filekey: the position relative to the mirrordir. (as found as "Filename:" in Packages.gz) full filename: the position relative to / architecture: The term like "sparc","i386","mips",... component: Things like "main" "non-free" "contrib" ... (somtimes also called sections) section: Things like "base" "interpreters" "oldlibs" (sometimes also called subsections) type: The kind of packages, currently supported: "deb", "udeb" and "dsc". target: The smallest unit packages are in. A target is specified by the codename of the distribution it is in, the architecture, component and type. When architecture is "source" exactly when the type is "dsc". identifier: an internal string to specify a target, it has the form "||source" for type dsc, "||" for type deb and "u|||" for type udeb. md5sum: The checksum of a file, being in the format " " * Differences to how other standard tools handle the situation: - mirroring: This makes no real mirror of the distribution, but only of it contents. Thus the Index-files will be different. (And thus no longer can be verified by the offical signatures). This means people using this mirror have to trust you to not include anything ugly, as they can only check your signature directly. (Or in other words: not useful for mirroring things to be used by strangers). - location: The directory layout under pool/ is only divided by the component and the sourcename. Ecspecially woody and updates/woody will share the same space, thus avoiding multiple instances of the same file. (Can also cause trouble in the rare cases, when both have a file of the same name with different md5sum. Using -f can help here). - 'byhand'-section This is currently just implemented as alias for '-', to make sure lack of implementation does not cause them to land in a byhand-section... - Override files: Only the ExtraOverride style of apt-ftparchive(1) is supported. (i.e. "packagename Section section\npackagename Maintainer maintainer\n") Note that other than apt-ftparchive case is most likely to be significant. (Having the wrong case in might also cause havoc in apt-ftparchive, as that changes the case of the fieldname, which might confuse other programms...) * Things that might be intresting to know: - guessing the component: If inserting a binary or source package without naming an component, this program has to guess of course. This will done the following way: It will take the first component with the name of the section, being prefix to the section, being suffix to the section or having the section as prefix or any. Thus having specifiend the components: "main non-free contrib non-US/main non-US/non-free non-US/contrib" should map .e.g "non-US" to "non-US/main" and "contrib/editors" to "contrib", while having only "main non-free and contrib" as components should map e.g. "non-US/contrib" to "contrib" and "non-US" to "main". NOTE: Always specify main as the first component, if you want things to end up there. NOTE: unlike in dak, non-US and non-us are different things... reprepro-4.13.1/configparser.c0000644000175100017510000011435412152651661013224 00000000000000/* This file is part of "reprepro" * Copyright (C) 2007 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include "error.h" #include "names.h" #include "atoms.h" #include "filecntl.h" #include "configparser.h" struct configiterator { FILE *f; unsigned int startline, line, column, markerline, markercolumn; const char *filename; const char *chunkname; bool eol; }; const char *config_filename(const struct configiterator *iter) { return iter->filename; } unsigned int config_line(const struct configiterator *iter) { return iter->line; } unsigned int config_column(const struct configiterator *iter) { return iter->column; } unsigned int config_firstline(const struct configiterator *iter) { return iter->startline; } unsigned int config_markerline(const struct configiterator *iter) { return iter->markerline; } unsigned int config_markercolumn(const struct configiterator *iter) { return iter->markercolumn; } void config_overline(struct configiterator *iter) { int c; while (!iter->eol) { c = fgetc(iter->f); if (c == '#') { do { c = fgetc(iter->f); } while (c != EOF && c != '\n'); } if (c == EOF || c == '\n') iter->eol = true; else iter->column++; } } bool config_nextline(struct configiterator *iter) { int c; assert (iter->eol); c = fgetc(iter->f); while (c == '#') { do { c = fgetc(iter->f); } while (c != EOF && c != '\n'); iter->line++; c = fgetc(iter->f); } if (c == EOF) return false; if (c == ' ' || c == '\t') { iter->line++; iter->column = 1; iter->eol = false; return true; } (void)ungetc(c, iter->f); return false; } retvalue linkedlistfinish(UNUSED(void *privdata), void *this, void **last, UNUSED(bool complete), UNUSED(struct configiterator *dummy3)) { *last = this; return RET_NOTHING; } static inline retvalue finishchunk(configfinishfunction finishfunc, void *privdata, struct configiterator *iter, const struct configfield *fields, size_t fieldcount, bool *found, void **this, void **last, bool complete) { size_t i; retvalue r; if (complete) for (i = 0 ; i < fieldcount ; i++) { if (!fields[i].required) continue; if (found[i]) continue; fprintf(stderr, "Error parsing config file %s, line %u:\n" "Required field '%s' not found in\n" "%s starting in line %u and ending in line %u.\n", iter->filename, iter->line, fields[i].name, iter->chunkname, iter->startline, iter->line-1); (void)finishfunc(privdata, *this, last, false, iter); *this = NULL; return RET_ERROR_MISSING; } r = finishfunc(privdata, *this, last, complete, iter); *this = NULL; return r; } char *configfile_expandname(const char *filename, char *fndup) { const char *fromdir; char *n; assert (fndup == NULL || fndup == filename); if (filename[0] == '/' || (filename[0] == '.' && filename[1] == '/')) return fndup?fndup:strdup(filename); if (filename[0] == '~' && filename[1] == '/') { n = calc_dirconcat(getenv("HOME"), filename + 2); free(fndup); return n; } if (filename[0] != '+' || filename[1] == '\0' || filename[2] != '/') { n = calc_dirconcat(global.confdir, filename); free(fndup); return n; } if (filename[1] == 'b') { fromdir = global.basedir; } else if (filename[1] == 'o') { fromdir = global.outdir; } else if (filename[1] == 'c') { fromdir = global.confdir; } else { fprintf(stderr, "Warning: strange filename '%s'!\n", filename); return fndup?fndup:strdup(filename); } n = calc_dirconcat(fromdir, filename + 3); free(fndup); return n; } static retvalue configfile_parse_multi(/*@only@*/char *, bool, configinitfunction, configfinishfunction, const char *, const struct configfield *, size_t, void *, int, void **, struct strlist *); static retvalue configfile_parse_single(/*@only@*/char *filename, bool ignoreunknown, configinitfunction initfunc, configfinishfunction finishfunc, const char *chunkname, const struct configfield *fields, size_t fieldcount, void *privdata, int depth, void **last_p, struct strlist *filenames) { bool found[fieldcount]; void *this = NULL; char key[100]; size_t keylen; int c, ret; size_t i; struct configiterator iter; retvalue result, r; bool afterinclude = false; if (strlist_in(filenames, filename)) { if (verbose >= 0) { fprintf(stderr, "Ignoring subsequent inclusion of '%s'!\n", filename); } free(filename); return RET_NOTHING; } iter.filename = filename; r = strlist_add(filenames, filename); if (RET_WAS_ERROR(r)) return r; iter.chunkname = chunkname; iter.line = 0; iter.column = 0; iter.f = fopen(iter.filename, "r"); if (iter.f == NULL) { int e = errno; fprintf(stderr, "Error opening config file '%s': %s(%d)\n", iter.filename, strerror(e), e); return RET_ERRNO(e); } result = RET_NOTHING; do { iter.line++; iter.column = 1; c = fgetc(iter.f); while (c == '#') { do { c = fgetc(iter.f); } while (c != EOF && c != '\n'); iter.line++; c = fgetc(iter.f); } if (c == '\r') { do { c = fgetc(iter.f); } while (c == '\r'); if (c != EOF && c != '\n') { fprintf(stderr, "%s:%u: error parsing configuration file: CR without following LF!\n", iter.filename, iter.line); result = RET_ERROR; break; } } if (c == EOF) break; if (c == '\n') { afterinclude = false; /* Ignore multiple emptye lines */ if (this == NULL) continue; /* finish this chunk, to get ready for the next: */ r = finishchunk(finishfunc, privdata, &iter, fields, fieldcount, found, &this, last_p, true); if (RET_WAS_ERROR(r)) { result = r; break; } continue; } if (afterinclude) { fprintf(stderr, "Warning parsing %s, line %u: no empty line after '!include'-sequence" " might cause ambiguity in the future!\n", iter.filename, iter.line); afterinclude = false; } if (c == '!') { keylen = 0; while ((c = fgetc(iter.f)) != EOF && c >= 'a' && c <= 'z') { iter.column++; key[keylen++] = c; if (keylen >= 10) break; } if (c != ':') { fprintf(stderr, "Error parsing %s, line %u: invalid !-sequence!\n", iter.filename, iter.line); result = RET_ERROR; break; } iter.column++; if (keylen == 7 && memcmp(key, "include", 7) == 0) { char *filetoinclude; if (this != NULL) { fprintf(stderr, "Error parsing %s, line %u: '!include' statement within unterminated %s!\n" "(perhaps you forgot to put an empty line before this)\n", iter.filename, iter.line, chunkname); result = RET_ERROR; break; } if (depth > 20) { fprintf(stderr, "Error parsing %s, line %u: too many nested '!include' statements!\n", iter.filename, iter.line); result = RET_ERROR; break; } r = config_getonlyword(&iter, "!include", NULL, &filetoinclude); if (RET_WAS_ERROR(r)) { result = r; break; } filetoinclude = configfile_expandname( filetoinclude, filetoinclude); r = configfile_parse_multi(filetoinclude, ignoreunknown, initfunc, finishfunc, chunkname, fields, fieldcount, privdata, depth + 1, last_p, filenames); if (RET_WAS_ERROR(r)) { result = r; break; } afterinclude = true; } else { key[keylen] = '\0'; fprintf(stderr, "Error parsing %s, line %u: unknown !-sequence '%s'!\n", iter.filename, iter.line, key); result = RET_ERROR; break; } /* ignore all data left of this field */ do { config_overline(&iter); } while (config_nextline(&iter)); continue; } if (c == '\0') { fprintf(stderr, "Error parsing %s, line %u: \\000 character not allowed in config files!\n", iter.filename, iter.line); result = RET_ERROR; break; } if (c == ' ' || c == '\t') { fprintf(stderr, "Error parsing %s, line %u: unexpected white space before keyword!\n", iter.filename, iter.line); result = RET_ERROR; break; } key[0] = c; keylen = 1; while ((c = fgetc(iter.f)) != EOF && c != ':' && c != '\n' && c != '#' && c != '\0') { iter.column++; if (c == ' ') { fprintf(stderr, "Error parsing %s, line %u: Unexpected space in header name!\n", iter.filename, iter.line); result = RET_ERROR; break; } if (c == '\t') { fprintf(stderr, "Error parsing %s, line %u: Unexpected tabulator character in header name!\n", iter.filename, iter.line); result = RET_ERROR; break; } key[keylen++] = c; if (keylen >= 100) break; } if (c != ':') { if (c != ' ' && c != '\t') /* newline or end-of-file */ fprintf(stderr, "Error parsing %s, line %u, column %u: Colon expected!\n", iter.filename, iter.line, iter.column); result = RET_ERROR; break; } if (this == NULL) { /* new chunk, initialize everything */ r = initfunc(privdata, *last_p, &this); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) { result = r; break; } assert (this != NULL); iter.startline = iter.line; memset(found, 0, sizeof(found)); } for (i = 0 ; i < fieldcount ; i++) { if (keylen != fields[i].namelen) continue; if (strncasecmp(key, fields[i].name, keylen) != 0) continue; break; } if (i >= fieldcount) { key[keylen] = '\0'; if (!ignoreunknown) { fprintf(stderr, "Error parsing %s, line %u: Unknown header '%s'!\n", iter.filename, iter.line, key); result = RET_ERROR_UNKNOWNFIELD; break; } if (verbose >= 0) fprintf(stderr, "Warning parsing %s, line %u: Unknown header '%s'!\n", iter.filename, iter.line, key); } else if (found[i]) { fprintf(stderr, "Error parsing %s, line %u: Second appearance of '%s' in the same chunk!\n", iter.filename, iter.line, fields[i].name); result = RET_ERROR; break; } else found[i] = true; do { c = fgetc(iter.f); iter.column++; } while (c == ' ' || c == '\t'); (void)ungetc(c, iter.f); iter.eol = false; if (i < fieldcount) { r = fields[i].setfunc(privdata, fields[i].name, this, &iter); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; } /* ignore all data left of this field */ do { config_overline(&iter); } while (config_nextline(&iter)); } while (true); if (this != NULL) { r = finishchunk(finishfunc, privdata, &iter, fields, fieldcount, found, &this, last_p, !RET_WAS_ERROR(result)); RET_UPDATE(result, r); } if (ferror(iter.f) != 0) { int e = errno; fprintf(stderr, "Error reading config file '%s': %s(%d)\n", iter.filename, strerror(e), e); r = RET_ERRNO(e); RET_UPDATE(result, r); } ret = fclose(iter.f); if (ret != 0) { int e = errno; fprintf(stderr, "Error closing config file '%s': %s(%d)\n", iter.filename, strerror(e), e); r = RET_ERRNO(e); RET_UPDATE(result, r); } return result; } static retvalue configfile_parse_multi(/*@only@*/char *fullfilename, bool ignoreunknown, configinitfunction initfunc, configfinishfunction finishfunc, const char *chunkname, const struct configfield *fields, size_t fieldcount, void *privdata, int depth, void **last_p, struct strlist *filenames) { retvalue result = RET_NOTHING, r; if (isdirectory(fullfilename)) { DIR *dir; struct dirent *de; int e; char *subfilename; dir = opendir(fullfilename); if (dir == NULL) { e = errno; fprintf(stderr, "Error %d opening directory '%s': %s\n", e, fullfilename, strerror(e)); free(fullfilename); return RET_ERRNO(e); } while ((errno = 0, de = readdir(dir)) != NULL) { size_t l; if (de->d_type != DT_REG && de->d_type != DT_LNK && de->d_type != DT_UNKNOWN) continue; if (de->d_name[0] == '.') continue; l = strlen(de->d_name); if (l < 5 || strcmp(de->d_name + l - 5, ".conf") != 0) continue; subfilename = calc_dirconcat(fullfilename, de->d_name); if (FAILEDTOALLOC(subfilename)) { (void)closedir(dir); free(fullfilename); return RET_ERROR_OOM; } r = configfile_parse_single(subfilename, ignoreunknown, initfunc, finishfunc, chunkname, fields, fieldcount, privdata, depth, last_p, filenames); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) { (void)closedir(dir); free(fullfilename); return r; } } e = errno; if (e != 0) { (void)closedir(dir); fprintf(stderr, "Error %d reading directory '%s': %s\n", e, fullfilename, strerror(e)); free(fullfilename); return RET_ERRNO(e); } if (closedir(dir) != 0) { e = errno; fprintf(stderr, "Error %d closing directory '%s': %s\n", e, fullfilename, strerror(e)); free(fullfilename); return RET_ERRNO(e); } free(fullfilename); } else { r = configfile_parse_single(fullfilename, ignoreunknown, initfunc, finishfunc, chunkname, fields, fieldcount, privdata, depth, last_p, filenames); RET_UPDATE(result, r); } return result; } retvalue configfile_parse(const char *filename, bool ignoreunknown, configinitfunction initfunc, configfinishfunction finishfunc, const char *chunkname, const struct configfield *fields, size_t fieldcount, void *privdata) { struct strlist filenames; void *last = NULL; retvalue r; char *fullfilename; fullfilename = configfile_expandname(filename, NULL); if (fullfilename == NULL) return RET_ERROR_OOM; strlist_init(&filenames); r = configfile_parse_multi(fullfilename, ignoreunknown, initfunc, finishfunc, chunkname, fields, fieldcount, privdata, 0, &last, &filenames); /* only free filenames last, as they might still be * referenced while running */ strlist_done(&filenames); return r; } static inline int config_nextchar(struct configiterator *iter) { int c; unsigned int realcolumn; c = fgetc(iter->f); realcolumn = iter->column + 1; if (c == '#') { do { c = fgetc(iter->f); realcolumn++; } while (c != '\n' && c != EOF && c != '\r'); } if (c == '\r') { while (c == '\r') { realcolumn++; c = fgetc(iter->f); } if (c != '\n' && c != EOF) { fprintf(stderr, "Warning parsing config file '%s', line '%u', column %u: CR not followed by LF!\n", config_filename(iter), config_line(iter), realcolumn); } } if (c == EOF) { fprintf(stderr, "Warning parsing config file '%s', line '%u': File ending without final LF!\n", config_filename(iter), config_line(iter)); /* fake a proper text file: */ c = '\n'; } iter->column++; if (c == '\n') iter->eol = true; return c; } static inline int config_nextnonspace(struct configiterator *iter) { int c; do { iter->markerline = iter->line; iter->markercolumn = iter->column; if (iter->eol) { if (!config_nextline(iter)) return EOF; } c = config_nextchar(iter); } while (c == '\n' || c == ' ' || c == '\t'); return c; } int config_nextnonspaceinline(struct configiterator *iter) { int c; do { iter->markerline = iter->line; iter->markercolumn = iter->column; if (iter->eol) return EOF; c = config_nextchar(iter); if (c == '\n') return EOF; } while (c == '\r' || c == ' ' || c == '\t'); return c; } #define configparser_errorlast(iter, message, ...) \ fprintf(stderr, "Error parsing %s, line %u, column %u: " message "\n", \ iter->filename, iter->markerline, \ iter->markercolumn, ## __VA_ARGS__); #define configparser_error(iter, message, ...) \ fprintf(stderr, "Error parsing %s, line %u, column %u: " message "\n", \ iter->filename, iter->line, \ iter->column, ## __VA_ARGS__); retvalue config_completeword(struct configiterator *iter, char firstc, char **result_p) { size_t size = 0, len = 0; char *value = NULL, *nv; int c = firstc; iter->markerline = iter->line; iter->markercolumn = iter->column; do { if (len + 2 >= size) { nv = realloc(value, size+128); if (FAILEDTOALLOC(nv)) { free(value); return RET_ERROR_OOM; } size += 128; value = nv; } value[len] = c; len++; c = config_nextchar(iter); if (c == '\n') break; } while (c != ' ' && c != '\t'); assert (len > 0); assert (len < size); value[len] = '\0'; nv = realloc(value, len+1); if (nv == NULL) *result_p = value; else *result_p = nv; return RET_OK; } retvalue config_getwordinline(struct configiterator *iter, char **result_p) { int c; c = config_nextnonspaceinline(iter); if (c == EOF) return RET_NOTHING; return config_completeword(iter, c, result_p); } retvalue config_getword(struct configiterator *iter, char **result_p) { int c; c = config_nextnonspace(iter); if (c == EOF) return RET_NOTHING; return config_completeword(iter, c, result_p); } retvalue config_gettimespan(struct configiterator *iter, const char *header, unsigned long *time_p) { long long currentnumber, currentsum = 0; bool empty = true; int c; do { c = config_nextnonspace(iter); if (c == EOF) { if (empty) { configparser_errorlast(iter, "Unexpected end of %s header (value expected).", header); return RET_ERROR; } *time_p = currentsum; return RET_OK; } iter->markerline = iter->line; iter->markercolumn = iter->column; currentnumber = 0; if (c < '0' || c > '9') { configparser_errorlast(iter, "Unexpected character '%c' where a digit was expected in %s header.", (char)c, header); return RET_ERROR; } empty = false; do { if (currentnumber > 3660) { configparser_errorlast(iter, "Absurdly long time span (> 100 years) in %s header.", header); return RET_ERROR; } currentnumber *= 10; currentnumber += (c - '0'); c = config_nextchar(iter); } while (c >= '0' && c <= '9'); if (c == ' ' || c == '\t' || c == '\n') c = config_nextnonspace(iter); if (c == 'y') { if (currentnumber > 100) { configparser_errorlast(iter, "Absurdly long time span (> 100 years) in %s header.", header); return RET_ERROR; } currentnumber *= 365*24*60*60; } else if (c == 'm') { if (currentnumber > 1200) { configparser_errorlast(iter, "Absurdly long time span (> 100 years) in %s header.", header); return RET_ERROR; } currentnumber *= 31*24*60*60; } else if (c == 'd') { if (currentnumber > 36600) { configparser_errorlast(iter, "Absurdly long time span (> 100 years) in %s header.", header); return RET_ERROR; } currentnumber *= 24*60*60; } else { if (currentnumber > 36600) { configparser_errorlast(iter, "Absurdly long time span (> 100 years) in %s header.", header); return RET_ERROR; } currentnumber *= 24*60*60; if (c != EOF) { configparser_errorlast(iter, "Unexpected character '%c' where a 'd','m' or 'y' was expected in %s header.", (char)c, header); return RET_ERROR; } } currentsum += currentnumber; } while (true); } retvalue config_getonlyword(struct configiterator *iter, const char *header, checkfunc check, char **result_p) { char *value; retvalue r; r = config_getword(iter, &value); if (r == RET_NOTHING) { configparser_errorlast(iter, "Unexpected end of %s header (value expected).", header); return RET_ERROR; } if (RET_WAS_ERROR(r)) return r; if (config_nextnonspace(iter) != EOF) { configparser_error(iter, "End of %s header expected (but trailing garbage).", header); free(value); return RET_ERROR; } if (check != NULL) { const char *errormessage = check(value); if (errormessage != NULL) { configparser_errorlast(iter, "Malformed %s content '%s': %s", header, value, errormessage); free(value); checkerror_free(errormessage); return RET_ERROR; } } *result_p = value; return RET_OK; } retvalue config_getscript(struct configiterator *iter, const char *name, char **value_p) { char *value; retvalue r; r = config_getonlyword(iter, name, NULL, &value); if (RET_IS_OK(r)) { assert (value != NULL && value[0] != '\0'); value = configfile_expandname(value, value); if (FAILEDTOALLOC(value)) return RET_ERROR_OOM; *value_p = value; } return r; } retvalue config_geturl(struct configiterator *iter, const char *header, char **result_p) { char *value, *p; retvalue r; size_t l; r = config_getword(iter, &value); if (r == RET_NOTHING) { configparser_errorlast(iter, "Unexpected end of %s header (value expected).", header); return RET_ERROR; } if (RET_WAS_ERROR(r)) return r; // TODO: think about allowing (escaped) spaces... if (config_nextnonspace(iter) != EOF) { configparser_error(iter, "End of %s header expected (but trailing garbage).", header); free(value); return RET_ERROR; } p = value; while (*p != '\0' && (*p == '_' || *p == '-' || (*p>='a' && *p<='z') || (*p>='A' && *p<='Z') || (*p>='0' && *p<='9'))) { p++; } if (*p != ':') { configparser_errorlast(iter, "Malformed %s field: no colon (must be method:path).", header); free(value); return RET_ERROR; } if (p == value) { configparser_errorlast(iter, "Malformed %s field: transport method name expected (colon is not allowed to be the first character)!", header); free(value); return RET_ERROR; } p++; l = strlen(p); /* remove one leading slash, as we always add one and some apt-methods * are confused with //. (end with // if you really want it) */ if (l > 0 && p[l - 1] == '/') p[l - 1] = '\0'; *result_p = value; return RET_OK; } retvalue config_getuniqwords(struct configiterator *iter, const char *header, checkfunc check, struct strlist *result_p) { char *value; retvalue r; struct strlist data; const char *errormessage; strlist_init(&data); while ((r = config_getword(iter, &value)) != RET_NOTHING) { if (RET_WAS_ERROR(r)) { strlist_done(&data); return r; } if (strlist_in(&data, value)) { configparser_errorlast(iter, "Unexpected duplicate '%s' within %s header.", value, header); free(value); strlist_done(&data); return RET_ERROR; } else if (check != NULL && (errormessage = check(value)) != NULL) { configparser_errorlast(iter, "Malformed %s element '%s': %s", header, value, errormessage); checkerror_free(errormessage); free(value); strlist_done(&data); return RET_ERROR; } else { r = strlist_add(&data, value); if (RET_WAS_ERROR(r)) { strlist_done(&data); return r; } } } strlist_move(result_p, &data); return RET_OK; } retvalue config_getinternatomlist(struct configiterator *iter, const char *header, enum atom_type type, checkfunc check, struct atomlist *result_p) { char *value; retvalue r; struct atomlist data; const char *errormessage; atom_t atom; atomlist_init(&data); while ((r = config_getword(iter, &value)) != RET_NOTHING) { if (RET_WAS_ERROR(r)) { atomlist_done(&data); return r; } if (check != NULL && (errormessage = check(value)) != NULL) { configparser_errorlast(iter, "Malformed %s element '%s': %s", header, value, errormessage); checkerror_free(errormessage); free(value); atomlist_done(&data); return RET_ERROR; } r = atom_intern(type, value, &atom); if (RET_WAS_ERROR(r)) return r; r = atomlist_add_uniq(&data, atom); if (r == RET_NOTHING) { configparser_errorlast(iter, "Unexpected duplicate '%s' within %s header.", value, header); free(value); atomlist_done(&data); return RET_ERROR; } free(value); if (RET_WAS_ERROR(r)) { atomlist_done(&data); return r; } } atomlist_move(result_p, &data); return RET_OK; } retvalue config_getatom(struct configiterator *iter, const char *header, enum atom_type type, atom_t *result_p) { char *value; retvalue r; atom_t atom; r = config_getword(iter, &value); if (r == RET_NOTHING) { configparser_errorlast(iter, "Unexpected empty '%s' field.", header); r = RET_ERROR_MISSING; } if (RET_WAS_ERROR(r)) return r; atom = atom_find(type, value); if (!atom_defined(atom)) { configparser_errorlast(iter, "Not previously seen %s '%s' within '%s' field.", atomtypes[type], value, header); free(value); return RET_ERROR; } *result_p = atom; free(value); return RET_OK; } retvalue config_getatomlist(struct configiterator *iter, const char *header, enum atom_type type, struct atomlist *result_p) { char *value; retvalue r; struct atomlist data; atom_t atom; atomlist_init(&data); while ((r = config_getword(iter, &value)) != RET_NOTHING) { if (RET_WAS_ERROR(r)) { atomlist_done(&data); return r; } atom = atom_find(type, value); if (!atom_defined(atom)) { configparser_errorlast(iter, "Not previously seen %s '%s' within '%s' header.", atomtypes[type], value, header); free(value); atomlist_done(&data); return RET_ERROR; } r = atomlist_add_uniq(&data, atom); if (r == RET_NOTHING) { configparser_errorlast(iter, "Unexpected duplicate '%s' within %s header.", value, header); free(value); atomlist_done(&data); return RET_ERROR; } free(value); if (RET_WAS_ERROR(r)) { atomlist_done(&data); return r; } } atomlist_move(result_p, &data); return RET_OK; } retvalue config_getsplitatoms(struct configiterator *iter, const char *header, enum atom_type type, struct atomlist *from_p, struct atomlist *into_p) { char *value, *separator; atom_t origin, destination; retvalue r; struct atomlist data_from, data_into; atomlist_init(&data_from); atomlist_init(&data_into); while ((r = config_getword(iter, &value)) != RET_NOTHING) { if (RET_WAS_ERROR(r)) { atomlist_done(&data_from); atomlist_done(&data_into); return r; } separator = strchr(value, '>'); if (separator == NULL) { separator = value; destination = atom_find(type, value); origin = destination;; } else if (separator == value) { destination = atom_find(type, separator + 1); origin = destination;; } else if (separator[1] == '\0') { *separator = '\0'; separator = value; destination = atom_find(type, value); origin = destination;; } else { *separator = '\0'; separator++; origin = atom_find(type, value); destination = atom_find(type, separator); } if (!atom_defined(origin)) { configparser_errorlast(iter, "Unknown %s '%s' in %s.", atomtypes[type], value, header); free(value); atomlist_done(&data_from); atomlist_done(&data_into); return RET_ERROR; } if (!atom_defined(destination)) { configparser_errorlast(iter, "Unknown %s '%s' in %s.", atomtypes[type], separator, header); free(value); atomlist_done(&data_from); atomlist_done(&data_into); return RET_ERROR; } free(value); r = atomlist_add(&data_from, origin); if (RET_WAS_ERROR(r)) { atomlist_done(&data_from); atomlist_done(&data_into); return r; } r = atomlist_add(&data_into, destination); if (RET_WAS_ERROR(r)) { atomlist_done(&data_from); atomlist_done(&data_into); return r; } } atomlist_move(from_p, &data_from); atomlist_move(into_p, &data_into); return RET_OK; } retvalue config_getatomsublist(struct configiterator *iter, const char *header, enum atom_type type, struct atomlist *result_p, const struct atomlist *superset, const char *superset_header) { char *value; retvalue r; struct atomlist data; atom_t atom; atomlist_init(&data); while ((r = config_getword(iter, &value)) != RET_NOTHING) { if (RET_WAS_ERROR(r)) { atomlist_done(&data); return r; } atom = atom_find(type, value); if (!atom_defined(atom) || !atomlist_in(superset, atom)) { configparser_errorlast(iter, "'%s' not allowed in %s as it was not in %s.", value, header, superset_header); free(value); atomlist_done(&data); return RET_ERROR; } r = atomlist_add_uniq(&data, atom); if (r == RET_NOTHING) { configparser_errorlast(iter, "Unexpected duplicate '%s' within %s header.", value, header); free(value); atomlist_done(&data); return RET_ERROR; } free(value); if (RET_WAS_ERROR(r)) { atomlist_done(&data); return r; } } atomlist_move(result_p, &data); return RET_OK; } retvalue config_getwords(struct configiterator *iter, struct strlist *result_p) { char *value; retvalue r; struct strlist data; strlist_init(&data); while ((r = config_getword(iter, &value)) != RET_NOTHING) { if (RET_WAS_ERROR(r)) { strlist_done(&data); return r; } r = strlist_add(&data, value); if (RET_WAS_ERROR(r)) { strlist_done(&data); return r; } } strlist_move(result_p, &data); return RET_OK; } retvalue config_getsignwith(struct configiterator *iter, const char *name, struct strlist *result_p) { char *value; retvalue r; struct strlist data; int c; strlist_init(&data); c = config_nextnonspace(iter); if (c == EOF) { configparser_errorlast(iter, "Missing value for %s field.", name); return RET_ERROR; } /* if the first character is a '!', a script to start follows */ if (c == '!') { const char *type = "!"; iter->markerline = iter->line; iter->markercolumn = iter->column; c = config_nextchar(iter); if (c == '-') { configparser_errorlast(iter, "'!-' in signwith lines reserved for future usage!\n"); return RET_ERROR; type = "!-"; c = config_nextnonspace(iter); } else if (c == '\n' || c == ' ' || c == '\t') c = config_nextnonspace(iter); if (c == EOF) { configparser_errorlast(iter, "Missing value for %s field.", name); return RET_ERROR; } r = config_completeword(iter, c, &value); if (RET_WAS_ERROR(r)) return r; if (config_nextnonspace(iter) != EOF) { configparser_error(iter, "End of %s header expected (but trailing garbage).", name); free(value); return RET_ERROR; } assert (value != NULL && value[0] != '\0'); value = configfile_expandname(value, value); if (FAILEDTOALLOC(value)) return RET_ERROR_OOM; r = strlist_add_dup(&data, type); if (RET_WAS_ERROR(r)) { free(value); return r; } r = strlist_add(&data, value); if (RET_WAS_ERROR(r)) { strlist_done(&data); return r; } strlist_move(result_p, &data); return RET_OK; } /* otherwise each word is stored in the strlist */ r = config_completeword(iter, c, &value); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; r = strlist_add(&data, value); if (RET_WAS_ERROR(r)) return r; while ((r = config_getword(iter, &value)) != RET_NOTHING) { if (RET_WAS_ERROR(r)) { strlist_done(&data); return r; } r = strlist_add(&data, value); if (RET_WAS_ERROR(r)) { strlist_done(&data); return r; } } strlist_move(result_p, &data); return RET_OK; } retvalue config_getsplitwords(struct configiterator *iter, UNUSED(const char *header), struct strlist *from_p, struct strlist *into_p) { char *value, *origin, *destination, *separator; retvalue r; struct strlist data_from, data_into; strlist_init(&data_from); strlist_init(&data_into); while ((r = config_getword(iter, &value)) != RET_NOTHING) { if (RET_WAS_ERROR(r)) { strlist_done(&data_from); strlist_done(&data_into); return r; } separator = strchr(value, '>'); if (separator == NULL) { destination = strdup(value); origin = value; } else if (separator == value) { destination = strdup(separator+1); origin = strdup(separator+1); free(value); } else if (separator[1] == '\0') { *separator = '\0'; destination = strdup(value); origin = value; } else { origin = strndup(value, separator-value); destination = strdup(separator+1); free(value); } if (FAILEDTOALLOC(origin) || FAILEDTOALLOC(destination)) { free(origin); free(destination); strlist_done(&data_from); strlist_done(&data_into); return RET_ERROR_OOM; } r = strlist_add(&data_from, origin); if (RET_WAS_ERROR(r)) { free(destination); strlist_done(&data_from); strlist_done(&data_into); return r; } r = strlist_add(&data_into, destination); if (RET_WAS_ERROR(r)) { strlist_done(&data_from); strlist_done(&data_into); return r; } } strlist_move(from_p, &data_from); strlist_move(into_p, &data_into); return RET_OK; } retvalue config_getconstant(struct configiterator *iter, const struct constant *constants, int *result_p) { retvalue r; char *value; const struct constant *c; /* that could be done more in-situ, * but is not runtime-critical at all */ r = config_getword(iter, &value); if (r == RET_NOTHING) return r; if (RET_WAS_ERROR(r)) return r; for (c = constants ; c->name != NULL ; c++) { if (strcmp(c->name, value) == 0) { free(value); *result_p = c->value; return RET_OK; } } free(value); return RET_ERROR_UNKNOWNFIELD; } retvalue config_getflags(struct configiterator *iter, const char *header, const struct constant *constants, bool *flags, bool ignoreunknown, const char *msg) { retvalue r, result = RET_NOTHING; int option = -1; while (true) { r = config_getconstant(iter, constants, &option); if (r == RET_NOTHING) break; if (r == RET_ERROR_UNKNOWNFIELD) { // TODO: would be nice to have the wrong flag here to put it in the error message: if (ignoreunknown) { fprintf(stderr, "Warning: ignored error parsing config file %s, line %u, column %u:\n" "Unknown flag in %s header.%s\n", config_filename(iter), config_markerline(iter), config_markercolumn(iter), header, msg); continue; } fprintf(stderr, "Error parsing config file %s, line %u, column %u:\n" "Unknown flag in %s header.%s\n", config_filename(iter), config_markerline(iter), config_markercolumn(iter), header, msg); } if (RET_WAS_ERROR(r)) return r; assert (option >= 0); flags[option] = true; result = RET_OK; option = -1; } return result; } retvalue config_getall(struct configiterator *iter, char **result_p) { size_t size = 0, len = 0; char *value = NULL, *nv; int c; c = config_nextnonspace(iter); if (c == EOF) return RET_NOTHING; iter->markerline = iter->line; iter->markercolumn = iter->column; do { if (len + 2 >= size) { nv = realloc(value, size+128); if (FAILEDTOALLOC(nv)) { free(value); return RET_ERROR_OOM; } size += 128; value = nv; } value[len] = c; len++; if (iter->eol) { if (!config_nextline(iter)) break; } c = config_nextchar(iter); } while (true); assert (len > 0); assert (len < size); while (len > 0 && (value[len-1] == ' ' || value[len-1] == '\t' || value[len-1] == '\n' || value[len-1] == '\r')) len--; value[len] = '\0'; nv = realloc(value, len+1); if (nv == NULL) *result_p = value; else *result_p = nv; return RET_OK; } retvalue config_gettruth(struct configiterator *iter, const char *header, bool *result_p) { char *value = NULL; retvalue r; /* wastefull, but does not happen that often */ r = config_getword(iter, &value); if (r == RET_NOTHING) { configparser_errorlast(iter, "Unexpected empty boolean %s header (something like Yes or No expected).", header); return RET_ERROR; } if (RET_WAS_ERROR(r)) return r; // TODO: check against trailing garbage if (strcasecmp(value, "Yes") == 0) { *result_p = true; free(value); return RET_OK; } if (strcasecmp(value, "No") == 0) { *result_p = false; free(value); return RET_OK; } if (strcmp(value, "1") == 0) { *result_p = true; free(value); return RET_OK; } if (strcmp(value, "0") == 0) { *result_p = false; free(value); return RET_OK; } configparser_errorlast(iter, "Unexpected value in boolean %s header (something like Yes or No expected).", header); free(value); return RET_ERROR; } retvalue config_getnumber(struct configiterator *iter, const char *name, long long *result_p, long long minval, long long maxval) { char *word = NULL; retvalue r; long long value; char *e; r = config_getword(iter, &word); if (r == RET_NOTHING) { configparser_errorlast(iter, "Unexpected end of line (%s number expected).", name); return RET_ERROR; } if (RET_WAS_ERROR(r)) return r; value = strtoll(word, &e, 10); if (e == word) { fprintf(stderr, "Error parsing config file %s, line %u, column %u:\n" "Expected %s number but got '%s'\n", config_filename(iter), config_markerline(iter), config_markercolumn(iter), name, word); free(word); return RET_ERROR; } if (e != NULL && *e != '\0') { unsigned char digit1, digit2, digit3; digit1 = ((unsigned char)(*e))&0x7; digit2 = (((unsigned char)(*e)) >> 3)&0x7; digit3 = (((unsigned char)(*e)) >> 6)&0x7; fprintf(stderr, "Error parsing config file %s, line %u, column %u:\n" "Unexpected character \\%01hhu%01hhu%01hhu in %s number '%s'\n", config_filename(iter), config_markerline(iter), config_markercolumn(iter) + (int)(e-word), digit3, digit2, digit1, name, word); free(word); return RET_ERROR; } if (value == LLONG_MAX || value > maxval) { fprintf(stderr, "Error parsing config file %s, line %u, column %u:\n" "Too large %s number '%s'\n", config_filename(iter), config_markerline(iter), config_markercolumn(iter), name, word); free(word); return RET_ERROR; } if (value == LLONG_MIN || value < minval) { fprintf(stderr, "Error parsing config file %s, line %u, column %u:\n" "Too small %s number '%s'\n", config_filename(iter), config_markerline(iter), config_markercolumn(iter), name, word); free(word); return RET_ERROR; } free(word); *result_p = value; return RET_OK; } static retvalue config_getline(struct configiterator *iter, /*@out@*/char **result_p) { size_t size = 0, len = 0; char *value = NULL, *nv; int c; c = config_nextnonspace(iter); if (c == EOF) return RET_NOTHING; iter->markerline = iter->line; iter->markercolumn = iter->column; do { if (len + 2 >= size) { nv = realloc(value, size+128); if (FAILEDTOALLOC(nv)) { free(value); return RET_ERROR_OOM; } size += 128; value = nv; } value[len] = c; len++; c = config_nextchar(iter); } while (c != '\n'); assert (len > 0); assert (len < size); while (len > 0 && (value[len-1] == ' ' || value[len-1] == '\t' || value[len-1] == '\r')) len--; assert (len > 0); value[len] = '\0'; nv = realloc(value, len+1); if (nv == NULL) *result_p = value; else *result_p = nv; return RET_OK; } retvalue config_getlines(struct configiterator *iter, struct strlist *result) { char *line; struct strlist list; retvalue r; strlist_init(&list); do { r = config_getline(iter, &line); if (RET_WAS_ERROR(r)) { strlist_done(&list); return r; } if (r == RET_NOTHING) r = strlist_add_dup(&list, ""); else r = strlist_add(&list, line); if (RET_WAS_ERROR(r)) { strlist_done(&list); return r; } } while (config_nextline(iter)); strlist_move(result, &list); return RET_OK; } reprepro-4.13.1/uploaderslist.c0000644000175100017510000011702612152651661013433 00000000000000/* This file is part of "reprepro" * Copyright (C) 2005,2006,2007,2009,2011 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include "error.h" #include "mprintf.h" #include "strlist.h" #include "names.h" #include "atoms.h" #include "signature.h" #include "globmatch.h" #include "uploaderslist.h" #include "configparser.h" #include "ignore.h" struct upload_condition { /* linked list of all sub-nodes */ /*@null@*/struct upload_condition *next; enum upload_condition_type type; const struct upload_condition *next_if_true, *next_if_false; bool accept_if_true, accept_if_false; enum { /* none matching means false, at least one being from * the set means true */ needs_any = 0, /* one not matching means false, otherwise true */ needs_all, /* one not matching means false, * otherwise true iff there is at least one */ needs_existsall, /* having a candidate means true, otherwise false */ needs_anycandidate } needs; union { /* uc_SECTIONS, uc_BINARIES, uc_SOURCENAME, uc_BYHAND, * uc_CODENAME, */ struct strlist strings; /* uc_COMPONENTS, uc_ARCHITECTURES */ struct atomlist atoms; }; }; struct upload_conditions { /* condition currently tested */ const struct upload_condition *current; /* current state of top most condition */ bool matching; /* top most condition will not be true unless cleared*/ bool needscandidate; /* always use last next, then decrement */ int count; const struct upload_condition *conditions[]; }; static retvalue upload_conditions_add(struct upload_conditions **c_p, const struct upload_condition *a) { int newcount; struct upload_conditions *n; if (a->type == uc_REJECTED) { /* due to groups, there can be empty conditions. * Don't include those in this list... */ return RET_OK; } if (*c_p == NULL) newcount = 1; else newcount = (*c_p)->count + 1; n = realloc(*c_p, sizeof(struct upload_conditions) + newcount * sizeof(const struct upload_condition*)); if (FAILEDTOALLOC(n)) return RET_ERROR_OOM; n->current = NULL; n->count = newcount; n->conditions[newcount - 1] = a; *c_p = n; return RET_OK; } struct fileposition { const struct filebeingparsed { struct filebeingparsed *next, *includedby; char *filename; unsigned long lineno; FILE *f; int depth; } *file; unsigned long lineno; }; #define set_position(at, fbp) ({ \ (at).file = fbp; \ (at).lineno = fbp->lineno; \ }) #define unset_pos(fileposition) ((fileposition).lineno == 0) #define errorcol(fbp, column, format, ...) ({ \ fprintf(stderr, "%s:%lu:%u: ", (fbp)->filename, (fbp)->lineno, (column)); \ fprintf(stderr, format "\n" , ## __VA_ARGS__); \ print_include_trace((fbp)->includedby); \ }) #define errorline(fbp, format, ...) ({ \ fprintf(stderr, "%s:%lu: ", (fbp)->filename, (fbp)->lineno); \ fprintf(stderr, format "\n" , ## __VA_ARGS__); \ print_include_trace((fbp)->includedby); \ }) #define errorpos(pos, format, ...) ({ \ fprintf(stderr, "%s:%lu: ", (pos).file->filename, (pos).lineno); \ fprintf(stderr, format "\n" , ## __VA_ARGS__); \ }) static void print_include_trace(struct filebeingparsed *includedby) { for ( ; includedby != NULL ; includedby = includedby->includedby ) { fprintf(stderr, "included from '%s' line %lu\n", includedby->filename, includedby->lineno); } } struct uploadergroup { struct uploadergroup *next; size_t len; char *name; /* NULL terminated list of pointers, or NULL for none */ const struct uploadergroup **memberof; struct upload_condition permissions; /* line numbers (if != 0) to allow some diagnostics */ struct fileposition firstmemberat, emptyat, firstusedat, unusedat; }; struct uploader { struct uploader *next; /* NULL terminated list of pointers, or NULL for none */ const struct uploadergroup **memberof; size_t len; char *reversed_fingerprint; struct upload_condition permissions; bool allow_subkeys; }; static struct uploaders { struct uploaders *next; size_t reference_count; char *filename; size_t filename_len; struct uploadergroup *groups; struct uploader *by_fingerprint; struct upload_condition anyvalidkeypermissions; struct upload_condition unsignedpermissions; struct upload_condition anybodypermissions; } *uploaderslists = NULL; static void uploadpermission_release(struct upload_condition *p) { struct upload_condition *h, *f = NULL; assert (p != NULL); do { h = p->next; switch (p->type) { case uc_BINARIES: case uc_SECTIONS: case uc_SOURCENAME: case uc_BYHAND: case uc_CODENAME: strlist_done(&p->strings); break; case uc_ARCHITECTURES: atomlist_done(&p->atoms); break; case uc_ALWAYS: case uc_REJECTED: break; } free(f); /* next one must be freed: */ f = h; /* and processed: */ p = h; } while (p != NULL); } static void uploadergroup_free(struct uploadergroup *u) { if (u == NULL) return; free(u->name); free(u->memberof); uploadpermission_release(&u->permissions); free(u); } static void uploader_free(struct uploader *u) { if (u == NULL) return; free(u->reversed_fingerprint); free(u->memberof); uploadpermission_release(&u->permissions); free(u); } static void uploaders_free(struct uploaders *u) { if (u == NULL) return; while (u->by_fingerprint != NULL) { struct uploader *next = u->by_fingerprint->next; uploader_free(u->by_fingerprint); u->by_fingerprint = next; } while (u->groups != NULL) { struct uploadergroup *next = u->groups->next; uploadergroup_free(u->groups); u->groups = next; } uploadpermission_release(&u->anyvalidkeypermissions); uploadpermission_release(&u->anybodypermissions); uploadpermission_release(&u->unsignedpermissions); free(u->filename); free(u); } void uploaders_unlock(struct uploaders *u) { if (u->reference_count > 1) { u->reference_count--; } else { struct uploaders **p = &uploaderslists; assert (u->reference_count == 1); /* avoid double free: */ if (u->reference_count == 0) return; while (*p != NULL && *p != u) p = &(*p)->next; assert (p != NULL && *p == u); if (*p == u) { *p = u->next; uploaders_free(u); } } } static retvalue upload_conditions_add_group(struct upload_conditions **c_p, const struct uploadergroup **groups) { const struct uploadergroup *group; retvalue r; while ((group = *(groups++)) != NULL) { r = upload_conditions_add(c_p, &group->permissions); if (!RET_WAS_ERROR(r) && group->memberof != NULL) r = upload_conditions_add_group(c_p, group->memberof); if (RET_WAS_ERROR(r)) return r; } return RET_OK; } static retvalue find_key_and_add(struct uploaders *u, struct upload_conditions **c_p, const struct signature *s) { size_t len, i, primary_len; char *reversed; const char *fingerprint, *primary_fingerprint; char *reversed_primary_key; const struct uploader *uploader; retvalue r; assert (u != NULL); fingerprint = s->keyid; assert (fingerprint != NULL); len = strlen(fingerprint); reversed = alloca(len+1); if (FAILEDTOALLOC(reversed)) return RET_ERROR_OOM; for (i = 0 ; i < len ; i++) { char c = fingerprint[len-i-1]; if (c >= 'a' && c <= 'f') c -= 'a' - 'A'; else if (c == 'x' && len-i-1 == 1 && fingerprint[0] == '0') break; if ((c < '0' || c > '9') && (c <'A' && c > 'F')) { fprintf(stderr, "Strange character '%c'(=%hhu) in fingerprint '%s'.\n" "Search for appropriate rules in the uploaders file might fail.\n", c, c, fingerprint); break; } reversed[i] = c; } len = i; reversed[len] = '\0'; /* hm, this only sees the key is expired when it is kind of late... */ primary_fingerprint = s->primary_keyid; primary_len = strlen(primary_fingerprint); reversed_primary_key = alloca(len+1); if (FAILEDTOALLOC(reversed_primary_key)) return RET_ERROR_OOM; for (i = 0 ; i < primary_len ; i++) { char c = primary_fingerprint[primary_len-i-1]; if (c >= 'a' && c <= 'f') c -= 'a' - 'A'; else if (c == 'x' && primary_len-i-1 == 1 && primary_fingerprint[0] == '0') break; if ((c < '0' || c > '9') && (c <'A' && c > 'F')) { fprintf(stderr, "Strange character '%c'(=%hhu) in fingerprint/key-id '%s'.\n" "Search for appropriate rules in the uploaders file might fail.\n", c, c, primary_fingerprint); break; } reversed_primary_key[i] = c; } primary_len = i; reversed_primary_key[primary_len] = '\0'; for (uploader = u->by_fingerprint ; uploader != NULL ; uploader = uploader->next) { /* TODO: allow ignoring */ if (s->state != sist_valid) continue; if (uploader->allow_subkeys) { if (uploader->len > primary_len) continue; if (memcmp(uploader->reversed_fingerprint, reversed_primary_key, uploader->len) != 0) continue; } else { if (uploader->len > len) continue; if (memcmp(uploader->reversed_fingerprint, reversed, uploader->len) != 0) continue; } r = upload_conditions_add(c_p, &uploader->permissions); if (!RET_WAS_ERROR(r) && uploader->memberof != NULL) r = upload_conditions_add_group(c_p, uploader->memberof); if (RET_WAS_ERROR(r)) return r; /* no break here, as a key might match * multiple specifications of different length */ } return RET_OK; } retvalue uploaders_permissions(struct uploaders *u, const struct signatures *signatures, struct upload_conditions **c_p) { struct upload_conditions *conditions = NULL; retvalue r; int j; r = upload_conditions_add(&conditions, &u->anybodypermissions); if (RET_WAS_ERROR(r)) return r; if (signatures == NULL) { /* signatures.count might be 0 meaning there is * something lile a gpg header but we could not get * keys, because of a gpg error or because of being * compiling without libgpgme */ r = upload_conditions_add(&conditions, &u->unsignedpermissions); if (RET_WAS_ERROR(r)) { free(conditions); return r; } } if (signatures != NULL && signatures->validcount > 0) { r = upload_conditions_add(&conditions, &u->anyvalidkeypermissions); if (RET_WAS_ERROR(r)) { free(conditions); return r; } } if (signatures != NULL) { for (j = 0 ; j < signatures->count ; j++) { r = find_key_and_add(u, &conditions, &signatures->signatures[j]); if (RET_WAS_ERROR(r)) { free(conditions); return r; } } } *c_p = conditions; return RET_OK; } /* uc_FAILED means rejected, uc_ACCEPTED means can go in */ enum upload_condition_type uploaders_nextcondition(struct upload_conditions *c) { if (c->current != NULL) { if (c->matching && !c->needscandidate) { if (c->current->accept_if_true) return uc_ACCEPTED; c->current = c->current->next_if_true; } else { if (c->current->accept_if_false) return uc_ACCEPTED; c->current = c->current->next_if_false; } } /* return the first non-trivial one left: */ while (true) { while (c->current != NULL) { assert (c->current->type > uc_REJECTED); if (c->current->type == uc_ALWAYS) { if (c->current->accept_if_true) return uc_ACCEPTED; c->current = c->current->next_if_true; } else { /* empty set fullfills all conditions, but not an exists condition */ switch (c->current->needs) { case needs_any: c->matching = false; c->needscandidate = false; break; case needs_all: c->matching = true; c->needscandidate = false; break; case needs_existsall: case needs_anycandidate: c->matching = true; c->needscandidate = true; break; } return c->current->type; } } if (c->count == 0) return uc_REJECTED; c->count--; c->current = c->conditions[c->count]; } /* not reached */ } static bool match_namecheck(const struct strlist *strings, const char *name) { int i; for (i = 0 ; i < strings->count ; i++) { if (globmatch(name, strings->values[i])) return true; } return false; } bool uploaders_verifystring(struct upload_conditions *conditions, const char *name) { const struct upload_condition *c = conditions->current; assert (c != NULL); assert (c->type == uc_BINARIES || c->type == uc_SECTIONS || c->type == uc_CODENAME || c->type == uc_SOURCENAME || c->type == uc_BYHAND); conditions->needscandidate = false; switch (conditions->current->needs) { case needs_all: case needs_existsall: /* once one condition is false, the case is settled */ if (conditions->matching && !match_namecheck(&c->strings, name)) conditions->matching = false; /* but while it is true, more info is needed */ return conditions->matching; case needs_any: /* once one condition is true, the case is settled */ if (!conditions->matching && match_namecheck(&c->strings, name)) conditions->matching = true; conditions->needscandidate = false; /* but while it is false, more info is needed */ return !conditions->matching; case needs_anycandidate: /* we are settled, no more information needed */ return false; } /* NOT REACHED */ assert (conditions->current->needs != conditions->current->needs); } bool uploaders_verifyatom(struct upload_conditions *conditions, atom_t atom) { const struct upload_condition *c = conditions->current; assert (c != NULL); assert (c->type == uc_ARCHITECTURES); conditions->needscandidate = false; switch (conditions->current->needs) { case needs_all: case needs_existsall: /* once one condition is false, the case is settled */ if (conditions->matching && !atomlist_in(&c->atoms, atom)) conditions->matching = false; /* but while it is true, more info is needed */ return conditions->matching; case needs_any: /* once one condition is true, the case is settled */ if (!conditions->matching && atomlist_in(&c->atoms, atom)) conditions->matching = true; /* but while it is false, more info is needed */ return !conditions->matching; case needs_anycandidate: /* we are settled, no more information needed */ return false; } /* NOT REACHED */ assert (conditions->current->needs != conditions->current->needs); } static struct uploader *addfingerprint(struct uploaders *u, const char *fingerprint, size_t len, bool allow_subkeys) { size_t i; char *reversed = malloc(len+1); struct uploader *uploader, **last; if (FAILEDTOALLOC(reversed)) return NULL; for (i = 0 ; i < len ; i++) { char c = fingerprint[len-i-1]; if (c >= 'a' && c <= 'f') c -= 'a' - 'A'; assert ((c >= '0' && c <= '9') || (c >= 'A' || c <= 'F')); reversed[i] = c; } reversed[len] = '\0'; last = &u->by_fingerprint; for (uploader = u->by_fingerprint ; uploader != NULL ; uploader = *(last = &uploader->next)) { if (uploader->len != len) continue; if (memcmp(uploader->reversed_fingerprint, reversed, len) != 0) continue; if (uploader->allow_subkeys != allow_subkeys) continue; free(reversed); return uploader; } assert (*last == NULL); uploader = zNEW(struct uploader); if (FAILEDTOALLOC(uploader)) return NULL; *last = uploader; uploader->reversed_fingerprint = reversed; uploader->len = len; uploader->allow_subkeys = allow_subkeys; return uploader; } static struct uploadergroup *addgroup(struct uploaders *u, const char *name, size_t len) { struct uploadergroup *group, **last; last = &u->groups; for (group = u->groups ; group != NULL ; group = *(last = &group->next)) { if (group->len != len) continue; if (memcmp(group->name, name, len) != 0) continue; return group; } assert (*last == NULL); group = zNEW(struct uploadergroup); if (FAILEDTOALLOC(group)) return NULL; group->name = strndup(name, len); group->len = len; if (FAILEDTOALLOC(group->name)) { free(group); return NULL; } *last = group; return group; } static inline const char *overkey(const char *p) { while ((*p >= '0' && *p <= '9') || (*p >= 'a' && *p <= 'f') || (*p >= 'A' && *p <= 'F')) { p++; } return p; } static retvalue parse_stringpart(/*@out@*/struct strlist *strings, const char **pp, const struct filebeingparsed *fbp, int column) { const char *p = *pp; retvalue r; strlist_init(strings); do { const char *startp, *endp; char *n; while (*p != '\0' && xisspace(*p)) p++; if (*p != '\'') { errorcol(fbp, column + (int)(p - *pp), "starting \"'\" expected!"); return RET_ERROR; } p++; startp = p; while (*p != '\0' && *p != '\'') p++; if (*p == '\0') { errorcol(fbp, column + (int)(p - *pp), "closing \"'\" expected!"); return RET_ERROR; } assert (*p == '\''); endp = p; p++; n = strndup(startp, endp - startp); if (FAILEDTOALLOC(n)) return RET_ERROR_OOM; r = strlist_adduniq(strings, n); if (RET_WAS_ERROR(r)) return r; while (*p != '\0' && xisspace(*p)) p++; column += (p - *pp); *pp = p; if (**pp == '|') { p++; } } while (**pp == '|'); *pp = p; return RET_OK; } static retvalue parse_architectures(/*@out@*/struct atomlist *atoms, const char **pp, const struct filebeingparsed *fbp, int column) { const char *p = *pp; retvalue r; atomlist_init(atoms); do { const char *startp, *endp; atom_t atom; while (*p != '\0' && xisspace(*p)) p++; if (*p != '\'') { errorcol(fbp, column + (int)(p - *pp), "starting \"'\" expected!"); return RET_ERROR; } p++; startp = p; while (*p != '\0' && *p != '\'' && *p != '*' && *p != '?') p++; if (*p == '*' || *p == '?') { errorcol(fbp, column + (int)(p - *pp), "Wildcards are not allowed in architectures!"); return RET_ERROR; } if (*p == '\0') { errorcol(fbp, column + (int)(p - *pp), "closing \"'\" expected!"); return RET_ERROR; } assert (*p == '\''); endp = p; p++; atom = architecture_find_l(startp, endp - startp); if (!atom_defined(atom)) { errorcol(fbp, column + (int)(startp-*pp), "Unknown architecture '%.*s'! (Did you mistype?)", (int)(endp-startp), startp); return RET_ERROR; } r = atomlist_add_uniq(atoms, atom); if (RET_WAS_ERROR(r)) return r; while (*p != '\0' && xisspace(*p)) p++; column += (p - *pp); *pp = p; if (**pp == '|') { p++; } } while (**pp == '|'); *pp = p; return RET_OK; } static retvalue parse_condition(const struct filebeingparsed *fbp, int column, const char **pp, /*@out@*/struct upload_condition *condition) { const char *p = *pp; struct upload_condition *fallback, *last, *or_scope; setzero(struct upload_condition, condition); /* allocate a new fallback-node: * (this one is used to make it easier to concatenate those decision * trees, especially it keeps open the possibility to have deny * decisions) */ fallback = zNEW(struct upload_condition); if (FAILEDTOALLOC(fallback)) return RET_ERROR_OOM; fallback->type = uc_ALWAYS; assert(!fallback->accept_if_true); /* the queue with next has all nodes, so they can be freed * (or otherwise modified) */ condition->next = fallback; last = condition; or_scope = condition; while (true) { if (strncmp(p, "not", 3) == 0 && xisspace(p[3])) { p += 3; while (*p != '\0' && xisspace(*p)) p++; /* negate means false is good and true * is bad: */ last->accept_if_false = true; last->accept_if_true = false; last->next_if_false = NULL; last->next_if_true = fallback; } else { last->accept_if_false = false; last->accept_if_true = true; last->next_if_false = fallback; last->next_if_true = NULL; } if (p[0] == '*' && xisspace(p[1])) { last->type = uc_ALWAYS; p++; } else if (strncmp(p, "architectures", 13) == 0 && strchr(" \t'", p[13]) != NULL) { retvalue r; last->type = uc_ARCHITECTURES; last->needs = needs_all; p += 13; while (*p != '\0' && xisspace(*p)) p++; if (strncmp(p, "contain", 7) == 0 && strchr(" \t'", p[7]) != NULL) { last->needs = needs_any; p += 7; } r = parse_architectures(&last->atoms, &p, fbp, column + (p-*pp)); if (RET_WAS_ERROR(r)) { uploadpermission_release(condition); return r; } } else if (strncmp(p, "binaries", 8) == 0 && strchr(" \t'", p[8]) != NULL) { retvalue r; last->type = uc_BINARIES; last->needs = needs_all; p += 8; while (*p != '\0' && xisspace(*p)) p++; if (strncmp(p, "contain", 7) == 0 && strchr(" \t'", p[7]) != NULL) { last->needs = needs_any; p += 7; } r = parse_stringpart(&last->strings, &p, fbp, column + (p-*pp)); if (RET_WAS_ERROR(r)) { uploadpermission_release(condition); return r; } } else if (strncmp(p, "byhand", 6) == 0 && strchr(" \t'", p[6]) != NULL) { retvalue r; last->type = uc_BYHAND; last->needs = needs_existsall; p += 8; while (*p != '\0' && xisspace(*p)) p++; if (*p != '\'') { strlist_init(&last->strings); r = RET_OK; } else r = parse_stringpart(&last->strings, &p, fbp, column + (p-*pp)); if (RET_WAS_ERROR(r)) { uploadpermission_release(condition); return r; } } else if (strncmp(p, "sections", 8) == 0 && strchr(" \t'", p[8]) != NULL) { retvalue r; last->type = uc_SECTIONS; last->needs = needs_all; p += 8; while (*p != '\0' && xisspace(*p)) p++; if (strncmp(p, "contain", 7) == 0 && strchr(" \t'", p[7]) != NULL) { last->needs = needs_any; p += 7; } r = parse_stringpart(&last->strings, &p, fbp, column + (p-*pp)); if (RET_WAS_ERROR(r)) { uploadpermission_release(condition); return r; } } else if (strncmp(p, "source", 6) == 0 && strchr(" \t'", p[6]) != NULL) { retvalue r; last->type = uc_SOURCENAME; p += 6; r = parse_stringpart(&last->strings, &p, fbp, column + (p-*pp)); if (RET_WAS_ERROR(r)) { uploadpermission_release(condition); return r; } } else if (strncmp(p, "distribution", 12) == 0 && strchr(" \t'", p[12]) != NULL) { retvalue r; last->type = uc_CODENAME; p += 12; r = parse_stringpart(&last->strings, &p, fbp, column + (p-*pp)); if (RET_WAS_ERROR(r)) { uploadpermission_release(condition); return r; } } else { errorcol(fbp, column + (int)(p - *pp), "condition expected after 'allow' keyword!"); uploadpermission_release(condition); return RET_ERROR; } while (*p != '\0' && xisspace(*p)) p++; if (strncmp(p, "and", 3) == 0 && xisspace(p[3])) { struct upload_condition *n, *c; p += 3; n = zNEW(struct upload_condition); if (FAILEDTOALLOC(n)) { uploadpermission_release(condition); return RET_ERROR_OOM; } /* everything that yet made it succeed makes it need * to check this condition: */ for (c = condition ; c != NULL ; c = c->next) { if (c->accept_if_true) { c->next_if_true = n; c->accept_if_true = false; } if (c->accept_if_false) { c->next_if_false = n; c->accept_if_false = false; } } /* or will only bind to this one */ or_scope = n; /* add it to queue: */ assert (last->next == fallback); n->next = fallback; last->next = n; last = n; } else if (strncmp(p, "or", 2) == 0 && xisspace(p[2])) { struct upload_condition *n, *c; p += 2; n = zNEW(struct upload_condition); if (FAILEDTOALLOC(n)) { uploadpermission_release(condition); return RET_ERROR_OOM; } /* everything in current scope that made it fail * now makes it check this: (currently that will * only be true at most for c == last, but with * parantheses this all will be needed) */ for (c = or_scope ; c != NULL ; c = c->next) { if (c->next_if_true == fallback) c->next_if_true = n; if (c->next_if_false == fallback) c->next_if_false = n; } /* add it to queue: */ assert (last->next == fallback); n->next = fallback; last->next = n; last = n; } else if (strncmp(p, "by", 2) == 0 && xisspace(p[2])) { p += 2; break; } else { errorcol(fbp, column + (int)(p - *pp), "'by','and' or 'or' keyword expected!"); uploadpermission_release(condition); setzero(struct upload_condition, condition); return RET_ERROR; } while (*p != '\0' && xisspace(*p)) p++; } *pp = p; return RET_OK; } static void condition_add(struct upload_condition *permissions, struct upload_condition *c) { if (permissions->next == NULL) { /* first condition, as no fallback yet allocated */ *permissions = *c; setzero(struct upload_condition, c); } else { struct upload_condition *last; last = permissions->next; assert (last != NULL); while (last->next != NULL) last = last->next; /* the very last is always the fallback-node to which all * other conditions fall back if they have no decision */ assert(last->type = uc_ALWAYS); assert(!last->accept_if_true); *last = *c; setzero(struct upload_condition, c); } } static retvalue find_group(struct uploadergroup **g, struct uploaders *u, const char **pp, const struct filebeingparsed *fbp, const char *buffer) { const char *p, *q; struct uploadergroup *group; p = *pp; q = p; while ((*q >= 'a' && *q <= 'z') || (*q >= 'A' && *q <= 'Z') || (*q >= '0' && *q <= '9') || *q == '-' || *q == '_' || *q == '.') q++; if (*p == '\0' || (q-p == 3 && memcmp(p, "add", 3) == 0) || (q-p == 5 && memcmp(p, "empty", 5) == 0) || (q-p == 6 && memcmp(p, "unused", 6) == 0) || (q-p == 8 && memcmp(p, "contains", 8) == 0)) { errorcol(fbp, (int)(1 + p - buffer), "group name expected!"); return RET_ERROR; } if (*q != '\0' && *q != ' ' && *q != '\t') { errorcol(fbp, (int)(1 +p -buffer), "invalid group name!"); return RET_ERROR; } *pp = q; group = addgroup(u, p, q-p); if (FAILEDTOALLOC(group)) return RET_ERROR_OOM; *g = group; return RET_OK; } static retvalue find_uploader(struct uploader **u_p, struct uploaders *u, const char *p, const struct filebeingparsed *fbp, const char *buffer) { struct uploader *uploader; bool allow_subkeys = false; const char *q, *qq; if (p[0] == '0' && p[1] == 'x') p += 2; q = overkey(p); if (*p == '\0' || (*q !='\0' && !xisspace(*q) && *q != '+') || q==p) { errorcol(fbp, (int)(1 + q - buffer), "key id or fingerprint expected!"); return RET_ERROR; } if (q - p > 16) { if (!IGNORABLE(longkeyid)) errorcol(fbp, (int)(1 + p - buffer), "key id most likely too long for gpgme to understand\n" "(at most 16 hex digits should be safe. Use --ignore=longkeyid to ignore)"); } qq = q; while (xisspace(*qq)) qq++; if (*qq == '+') { qq++; allow_subkeys = true; } while (xisspace(*qq)) qq++; if (*qq != '\0') { errorcol(fbp, (int)(1 +qq - buffer), "unexpected data after 'key ' statement!"); if (*q == ' ') fprintf(stderr, " Hint: no spaces allowed in fingerprint specification.\n"); return RET_ERROR; } uploader = addfingerprint(u, p, q-p, allow_subkeys); if (FAILEDTOALLOC(uploader)) return RET_ERROR_OOM; *u_p = uploader; return RET_OK; } static retvalue include_group(struct uploadergroup *group, const struct uploadergroup ***memberof_p, const struct filebeingparsed *fbp) { size_t n; const struct uploadergroup **memberof = *memberof_p; n = 0; if (memberof != NULL) { while (memberof[n] != NULL) { if (memberof[n] == group) { errorline(fbp, "member added to group %s a second time!", group->name); return RET_ERROR; } n++; } } if (n == 0 || (n & 15) == 15) { /* let's hope no static checker is confused here ;-> */ memberof = realloc(memberof, ((n+17)&~15) * sizeof(struct uploadergroup*)); if (FAILEDTOALLOC(memberof)) return RET_ERROR_OOM; *memberof_p = memberof; } memberof[n] = group; memberof[n+1] = NULL; if (unset_pos(group->firstmemberat)) set_position(group->firstmemberat, fbp);; if (!unset_pos(group->emptyat)) { errorline(fbp, "cannot add members to group '%s' marked empty!", group->name); errorpos(group->emptyat, "here it was marked as empty"); return RET_ERROR; } return RET_OK; } static bool is_included_in(const struct uploadergroup *needle, const struct uploadergroup *chair) { const struct uploadergroup **g; if (needle->memberof == NULL) return false; for (g = needle->memberof ; *g != NULL ; g++) { if (*g == chair) return true; if (is_included_in(*g, chair)) return true; } return false; } static inline bool trim_line(const struct filebeingparsed *fbp, char *buffer) { size_t l = strlen(buffer); if (l == 0 || buffer[l-1] != '\n') { if (l >= 1024) errorcol(fbp, 1024, "Overlong line!"); else errorcol(fbp, (int)l, "Unterminated line!"); return false; } do { buffer[--l] = '\0'; } while (l > 0 && xisspace(buffer[l-1])); return true; } static inline retvalue parseuploaderline(char *buffer, const struct filebeingparsed *fbp, struct uploaders *u) { retvalue r; const char *p, *q; struct upload_condition condition; p = buffer; while (*p != '\0' && xisspace(*p)) p++; if (*p == '\0' || *p == '#') return RET_NOTHING; if (strncmp(p, "group", 5) == 0 && (*p == '\0' || xisspace(p[5]))) { struct uploadergroup *group; p += 5; while (*p != '\0' && xisspace(*p)) p++; r = find_group(&group, u, &p, fbp, buffer); if (RET_WAS_ERROR(r)) return r; while (*p != '\0' && xisspace(*p)) p++; if (strncmp(p, "add", 3) == 0) { struct uploader *uploader; p += 3; while (*p != '\0' && xisspace(*p)) p++; r = find_uploader(&uploader, u, p, fbp, buffer); if (RET_WAS_ERROR(r)) return r; r = include_group(group, &uploader->memberof, fbp); if (RET_WAS_ERROR(r)) return r; return RET_OK; } else if (strncmp(p, "contains", 8) == 0) { struct uploadergroup *member; p += 8; while (*p != '\0' && xisspace(*p)) p++; q = p; r = find_group(&member, u, &q, fbp, buffer); if (RET_WAS_ERROR(r)) return r; if (group == member) { errorline(fbp, "cannot add group '%s' to itself!", member->name); return RET_ERROR; } if (is_included_in(group, member)) { /* perhaps offer a winning coupon for the first * one triggering this? */ errorline(fbp, "cannot add group '%s' to group '%s' as the later is already member of the former!", member->name, group->name); return RET_ERROR; } r = include_group(group, &member->memberof, fbp); if (RET_WAS_ERROR(r)) return r; if (unset_pos(member->firstusedat)) set_position(member->firstusedat, fbp);; if (!unset_pos(member->unusedat)) { errorline(fbp, "cannot use group '%s' marked as unused!", member->name); errorpos(member->unusedat, "here it got marked as unused."); return RET_ERROR; } } else if (strncmp(p, "empty", 5) == 0) { q = p + 5; if (!unset_pos(group->emptyat)) { errorline(fbp, "group '%s' marked as empty again", group->name); errorpos(group->emptyat, "here it was marked empty the first time"); } if (!unset_pos(group->firstmemberat)) { errorline(fbp, "group '%s' cannot be marked empty as it already has members!", group->name); errorpos(group->firstmemberat, "here a member was added the first time"); return RET_ERROR; } set_position(group->emptyat, fbp);; } else if (strncmp(p, "unused", 6) == 0) { q = p + 6; if (!unset_pos(group->unusedat)) { errorline(fbp, "group '%s' marked as unused again!", group->name); errorpos(group->unusedat, "here it was already marked unused"); } if (!unset_pos(group->firstusedat)) { errorline(fbp, "group '%s' cannot be marked unused as it was already used!", group->name); errorpos(group->firstusedat, "here it was used the first time"); return RET_ERROR; } set_position(group->unusedat, fbp);; } else { errorcol(fbp, (int)(1 + p - buffer), "missing 'add', 'contains', 'unused' or 'empty' keyword."); return RET_ERROR; } while (*q != '\0' && xisspace(*q)) q++; if (*q != '\0') { errorcol(fbp, (int)(1 + p - buffer), "unexpected data at end of group statement!"); return RET_ERROR; } return RET_OK; } if (strncmp(p, "allow", 5) != 0 || !xisspace(p[5])) { errorcol(fbp, (int)(1 +p - buffer), "'allow' or 'group' keyword expected!" " (no other statement has yet been implemented)"); return RET_ERROR; } p+=5; while (*p != '\0' && xisspace(*p)) p++; r = parse_condition(fbp, (1+p-buffer), &p, &condition); if (RET_WAS_ERROR(r)) return r; while (*p != '\0' && xisspace(*p)) p++; if (strncmp(p, "key", 3) == 0 && (p[3] == '\0' || xisspace(p[3]))) { struct uploader *uploader; p += 3; while (*p != '\0' && xisspace(*p)) p++; r = find_uploader(&uploader, u, p, fbp, buffer); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) { uploadpermission_release(&condition); return r; } condition_add(&uploader->permissions, &condition); } else if (strncmp(p, "group", 5) == 0 && (p[5] == '\0' || xisspace(p[5]))) { struct uploadergroup *group; p += 5; while (*p != '\0' && xisspace(*p)) p++; r = find_group(&group, u, &p, fbp, buffer); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) { uploadpermission_release(&condition); return r; } assert (group != NULL); while (*p != '\0' && xisspace(*p)) p++; if (*p != '\0') { errorcol(fbp, (int)(1 + p - buffer), "unexpected data at end of group statement!"); uploadpermission_release(&condition); return RET_ERROR; } if (unset_pos(group->firstusedat)) set_position(group->firstusedat, fbp);; if (!unset_pos(group->unusedat)) { errorline(fbp, "cannot use group '%s' marked as unused!", group->name); errorpos(group->unusedat, "here it was marked as unused."); uploadpermission_release(&condition); return RET_ERROR; } condition_add(&group->permissions, &condition); } else if (strncmp(p, "unsigned", 8) == 0 && (p[8]=='\0' || xisspace(p[8]))) { p+=8; if (*p != '\0') { errorcol(fbp, (int)(1 + p - buffer), "unexpected data after 'unsigned' statement!"); uploadpermission_release(&condition); return RET_ERROR; } condition_add(&u->unsignedpermissions, &condition); } else if (strncmp(p, "any", 3) == 0 && xisspace(p[3])) { p+=3; while (*p != '\0' && xisspace(*p)) p++; if (strncmp(p, "key", 3) != 0 || (p[3]!='\0' && !xisspace(p[3]))) { errorcol(fbp, (int)(1 + p - buffer), "'key' keyword expected after 'any' keyword!"); uploadpermission_release(&condition); return RET_ERROR; } p += 3; if (*p != '\0') { errorcol(fbp, (int)(1 + p - buffer), "unexpected data after 'any key' statement!"); uploadpermission_release(&condition); return RET_ERROR; } condition_add(&u->anyvalidkeypermissions, &condition); } else if (strncmp(p, "anybody", 7) == 0 && (p[7] == '\0' || xisspace(p[7]))) { p+=7; while (*p != '\0' && xisspace(*p)) p++; if (*p != '\0') { errorcol(fbp, (int)(1 + p - buffer), "unexpected data after 'anybody' statement!"); uploadpermission_release(&condition); return RET_ERROR; } condition_add(&u->anybodypermissions, &condition); } else { errorcol(fbp, (int)(1 + p - buffer), "'key', 'unsigned', 'anybody' or 'any key' expected!"); uploadpermission_release(&condition); return RET_ERROR; } return RET_OK; } static retvalue openfiletobeparsed(struct filebeingparsed *includedby, const char *filename, struct filebeingparsed **fbp_p, struct filebeingparsed **root_p) { struct filebeingparsed *fbp; if (includedby != NULL && includedby->depth > 100) { errorcol(includedby, 0, "Too deeply nested include directives (> 100). Built some recursion?"); return RET_ERROR; } fbp = calloc(1, sizeof(struct filebeingparsed)); if (FAILEDTOALLOC(fbp)) return RET_ERROR_OOM; fbp->filename = configfile_expandname(filename, NULL); if (FAILEDTOALLOC(fbp->filename)) { free(fbp); return RET_ERROR_OOM; } fbp->f = fopen(fbp->filename, "r"); if (fbp->f == NULL) { int e = errno; fprintf(stderr, "Error opening '%s': %s\n", fbp->filename, strerror(e)); print_include_trace(includedby); free(fbp->filename); free(fbp); return RET_ERRNO(e); } fbp->depth = (includedby != NULL)?(includedby->depth+1):0; fbp->includedby = includedby; *fbp_p = fbp; fbp->next = *root_p; *root_p = fbp; return RET_OK; } static void filebeingparsed_free(struct filebeingparsed *fbp) { while (fbp != NULL) { struct filebeingparsed *n = fbp->next; if (fbp->f != NULL) (void)fclose(fbp->f); free(fbp->filename); free(fbp); fbp = n; } } static inline retvalue close_file(struct filebeingparsed **p) { int i; struct filebeingparsed *fbp = *p; assert (p != NULL); *p = fbp->includedby; i = fclose(fbp->f); fbp->f = NULL; if (i != 0) { int e = errno; fprintf(stderr, "Error reading '%s': %s\n", fbp->filename, strerror(e)); print_include_trace(fbp->includedby); return RET_ERRNO(e); } else return RET_OK; } static inline retvalue include_file(struct filebeingparsed **fbp_p, struct filebeingparsed **root_p, const char *buffer) { const char *filename = buffer; while (*filename != '\0' && xisspace(*filename)) filename++; if (*filename == '\0') { errorcol(*fbp_p, 1+(int)(filename - buffer), "Missing filename after include directive!"); return RET_ERROR; } return openfiletobeparsed(*fbp_p, filename, fbp_p, root_p); } static retvalue uploaders_load(/*@out@*/struct uploaders **list, const char *fname) { char buffer[1025]; struct uploaders *u; struct uploadergroup *g; retvalue r; struct filebeingparsed *fbp = NULL; struct filebeingparsed *filesroot = NULL; r = openfiletobeparsed(NULL, fname, &fbp, &filesroot); if (RET_WAS_ERROR(r)) return r; u = zNEW(struct uploaders); if (FAILEDTOALLOC(u)) { filebeingparsed_free(filesroot); return RET_ERROR_OOM; } /* reject by default */ u->unsignedpermissions.type = uc_ALWAYS; u->anyvalidkeypermissions.type = uc_ALWAYS; u->anybodypermissions.type = uc_ALWAYS; while (fbp != NULL) { while (fgets(buffer, 1024, fbp->f) != NULL) { fbp->lineno++; if (!trim_line(fbp, buffer)) { filebeingparsed_free(filesroot); uploaders_free(u); return RET_ERROR; } if (strncmp(buffer, "include", 7) == 0) r = include_file(&fbp, &filesroot, buffer + 7); else r = parseuploaderline(buffer, fbp, u); if (RET_WAS_ERROR(r)) { filebeingparsed_free(filesroot); uploaders_free(u); return r; } } r = close_file(&fbp); if (RET_WAS_ERROR(r)) { filebeingparsed_free(filesroot); uploaders_free(u); return r; } } for (g = u->groups ; g != NULL ; g = g->next) { if ((unset_pos(g->firstmemberat) && unset_pos(g->emptyat)) && !unset_pos(g->firstusedat)) errorpos(g->firstusedat, "Warning: group '%s' gets used but never gets any members", g->name); if ((unset_pos(g->firstusedat) && unset_pos(g->unusedat)) && !unset_pos(g->firstmemberat)) // TODO: avoid this if the group is from a include? errorpos(g->firstmemberat, "Warning: group '%s' gets members but is not used in any rule", g->name); } assert (fbp == NULL); /* only free file information once filenames are no longer needed: */ filebeingparsed_free(filesroot); *list = u; return RET_OK; } retvalue uploaders_get(/*@out@*/struct uploaders **list, const char *filename) { retvalue r; struct uploaders *u; size_t len; assert (filename != NULL); len = strlen(filename); u = uploaderslists; while (u != NULL && (u->filename_len != len || memcmp(u->filename, filename, len) != 0)) u = u->next; if (u == NULL) { r = uploaders_load(&u, filename); if (!RET_IS_OK(r)) return r; assert (u != NULL); u->filename = strdup(filename); if (FAILEDTOALLOC(u->filename)) { uploaders_free(u); return RET_ERROR_OOM; } u->filename_len = len; u->next = uploaderslists; u->reference_count = 1; uploaderslists = u; } else u->reference_count++; *list = u; return RET_OK; } reprepro-4.13.1/aptmethod.c0000644000175100017510000010160712152651661012524 00000000000000/* This file is part of "reprepro" * Copyright (C) 2004,2005,2007,2008,2009,2012 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "error.h" #include "mprintf.h" #include "strlist.h" #include "names.h" #include "dirs.h" #include "chunks.h" #include "checksums.h" #include "files.h" #include "uncompression.h" #include "aptmethod.h" #include "filecntl.h" #include "hooks.h" struct tobedone { /*@null@*/ struct tobedone *next; /* must be saved to know where is should be moved to: */ /*@notnull@*/ char *uri; /*@notnull@*/ char *filename; /* in case of redirection, safe to another file first */ /*@null@*/ char *redirected_filename; /* callback and its data: */ queue_callback *callback; /*@null@*/void *privdata1, *privdata2; /* there is no fallback or that was already used */ bool lasttry, ignore; /* how often this was redirected */ unsigned int redirect_count; }; struct aptmethod { /*@only@*/ /*@null@*/ struct aptmethod *next; char *name; char *baseuri; /*@null@*/char *fallbackbaseuri; /*@null@*/char *config; int mstdin, mstdout; pid_t child; enum { ams_notstarted=0, ams_waitforcapabilities, ams_ok, ams_failed } status; /*@null@*/struct tobedone *tobedone; /*@null@*//*@dependent@*/struct tobedone *lasttobedone; /*@null@*//*@dependent@*/const struct tobedone *nexttosend; /* what is currently read: */ /*@null@*/char *inputbuffer; size_t input_size, alreadyread; /* What is currently written: */ /*@null@*/char *command; size_t alreadywritten, output_length; /* old (<= squeeze) 103 behavior detected */ bool old103; /* new (>= wheezy) 103 behavior detected, no more workarounds necessary */ bool new103; }; struct aptmethodrun { struct aptmethod *methods; }; static void todo_free(/*@only@*/ struct tobedone *todo) { free(todo->filename); free(todo->redirected_filename); free(todo->uri); free(todo); } static void free_todolist(/*@only@*/ struct tobedone *todo) { while (todo != NULL) { struct tobedone *h = todo->next; todo_free(todo); todo = h; } } static void aptmethod_free(/*@only@*/struct aptmethod *method) { if (method == NULL) return; free(method->name); free(method->baseuri); free(method->config); free(method->fallbackbaseuri); free(method->inputbuffer); free(method->command); free_todolist(method->tobedone); free(method); } retvalue aptmethod_shutdown(struct aptmethodrun *run) { retvalue result = RET_OK, r; struct aptmethod *method, *lastmethod, **method_ptr; /* first get rid of everything not running: */ method_ptr = &run->methods; while (*method_ptr != NULL) { if ((*method_ptr)->child > 0) { if (verbose > 10) fprintf(stderr, "Still waiting for %d\n", (int)(*method_ptr)->child); method_ptr = &(*method_ptr)->next; continue; } else { /*@only@*/ struct aptmethod *h; h = (*method_ptr); *method_ptr = h->next; h->next = NULL; aptmethod_free(h); } } /* finally get rid of all the processes: */ for (method = run->methods ; method != NULL ; method = method->next) { if (method->mstdin >= 0) { (void)close(method->mstdin); if (verbose > 30) fprintf(stderr, "Closing stdin of %d\n", (int)method->child); } method->mstdin = -1; if (method->mstdout >= 0) { (void)close(method->mstdout); if (verbose > 30) fprintf(stderr, "Closing stdout of %d\n", (int)method->child); } method->mstdout = -1; } while (run->methods != NULL || uncompress_running()) { pid_t pid;int status; pid = wait(&status); lastmethod = NULL; method = run->methods; while (method != NULL) { if (method->child == pid) { struct aptmethod *next = method->next; if (lastmethod != NULL) { lastmethod->next = next; } else run->methods = next; aptmethod_free(method); pid = -1; break; } else { lastmethod = method; method = method->next; } } if (pid > 0) { r = uncompress_checkpid(pid, status); RET_UPDATE(result, r); } } free(run); return result; } /******************Initialize the data structures***********************/ retvalue aptmethod_initialize_run(struct aptmethodrun **run) { struct aptmethodrun *r; r = zNEW(struct aptmethodrun); if (FAILEDTOALLOC(r)) return RET_ERROR_OOM; *run = r; return RET_OK; } retvalue aptmethod_newmethod(struct aptmethodrun *run, const char *uri, const char *fallbackuri, const struct strlist *config, struct aptmethod **m) { struct aptmethod *method; const char *p; method = zNEW(struct aptmethod); if (FAILEDTOALLOC(method)) return RET_ERROR_OOM; method->mstdin = -1; method->mstdout = -1; method->child = -1; method->status = ams_notstarted; p = uri; while (*p != '\0' && (*p == '_' || *p == '-' || (*p>='a' && *p<='z') || (*p>='A' && *p<='Z') || (*p>='0' && *p<='9'))) { p++; } if (*p == '\0') { fprintf(stderr, "No colon found in method-URI '%s'!\n", uri); free(method); return RET_ERROR; } if (*p != ':') { fprintf(stderr, "Unexpected character '%c' in method-URI '%s'!\n", *p, uri); free(method); return RET_ERROR; } if (p == uri) { fprintf(stderr, "Zero-length name in method-URI '%s'!\n", uri); free(method); return RET_ERROR; } method->name = strndup(uri, p-uri); if (FAILEDTOALLOC(method->name)) { free(method); return RET_ERROR_OOM; } method->baseuri = strdup(uri); if (FAILEDTOALLOC(method->baseuri)) { free(method->name); free(method); return RET_ERROR_OOM; } if (fallbackuri == NULL) method->fallbackbaseuri = NULL; else { method->fallbackbaseuri = strdup(fallbackuri); if (FAILEDTOALLOC(method->fallbackbaseuri)) { free(method->baseuri); free(method->name); free(method); return RET_ERROR_OOM; } } #define CONF601 "601 Configuration" #define CONFITEM "\nConfig-Item: " if (config->count == 0) method->config = strdup(CONF601 CONFITEM "Dir=/" "\n\n"); else method->config = strlist_concat(config, CONF601 CONFITEM, CONFITEM, "\n\n"); if (FAILEDTOALLOC(method->config)) { free(method->fallbackbaseuri); free(method->baseuri); free(method->name); free(method); return RET_ERROR_OOM; } method->next = run->methods; run->methods = method; *m = method; return RET_OK; } /**************************Fire up a method*****************************/ inline static retvalue aptmethod_startup(struct aptmethod *method) { pid_t f; int mstdin[2]; int mstdout[2]; int r; /* When there is nothing to get, there is no reason to startup * the method. */ if (method->tobedone == NULL) { return RET_NOTHING; } /* when we are already running, we are already ready...*/ if (method->child > 0) { return RET_OK; } method->status = ams_waitforcapabilities; r = pipe(mstdin); if (r < 0) { int e = errno; fprintf(stderr, "Error %d creating pipe: %s\n", e, strerror(e)); return RET_ERRNO(e); } r = pipe(mstdout); if (r < 0) { int e = errno; (void)close(mstdin[0]); (void)close(mstdin[1]); fprintf(stderr, "Error %d in pipe syscall: %s\n", e, strerror(e)); return RET_ERRNO(e); } if (interrupted()) { (void)close(mstdin[0]);(void)close(mstdin[1]); (void)close(mstdout[0]);(void)close(mstdout[1]); return RET_ERROR_INTERRUPTED; } f = fork(); if (f < 0) { int e = errno; (void)close(mstdin[0]); (void)close(mstdin[1]); (void)close(mstdout[0]); (void)close(mstdout[1]); fprintf(stderr, "Error %d forking: %s\n", e, strerror(e)); return RET_ERRNO(e); } if (f == 0) { char *methodname; int e; /* child: */ (void)close(mstdin[1]); (void)close(mstdout[0]); if (dup2(mstdin[0], 0) < 0) { e = errno; fprintf(stderr, "Error %d while setting stdin: %s\n", e, strerror(e)); exit(255); } if (dup2(mstdout[1], 1) < 0) { e = errno; fprintf(stderr, "Error %d while setting stdout: %s\n", e, strerror(e)); exit(255); } closefrom(3); methodname = calc_dirconcat(global.methoddir, method->name); if (FAILEDTOALLOC(methodname)) exit(255); /* not really useful here, unless someone write reprepro * specific modules (which I hope noone will) */ sethookenvironment(NULL, NULL, NULL, NULL); /* actually call the method without any arguments: */ (void)execl(methodname, methodname, ENDOFARGUMENTS); e = errno; fprintf(stderr, "Error %d while executing '%s': %s\n", e, methodname, strerror(e)); exit(255); } /* the main program continues... */ method->child = f; if (verbose > 10) fprintf(stderr, "Method '%s' started as %d\n", method->baseuri, (int)f); (void)close(mstdin[0]); (void)close(mstdout[1]); markcloseonexec(mstdin[1]); markcloseonexec(mstdout[0]); method->mstdin = mstdin[1]; method->mstdout = mstdout[0]; method->inputbuffer = NULL; method->input_size = 0; method->alreadyread = 0; method->command = NULL; method->output_length = 0; method->alreadywritten = 0; return RET_OK; } /**************************how to add files*****************************/ static inline void enqueue(struct aptmethod *method, /*@only@*/struct tobedone *todo) { todo->next = NULL; if (method->lasttobedone == NULL) method->nexttosend = method->lasttobedone = method->tobedone = todo; else { method->lasttobedone->next = todo; method->lasttobedone = todo; if (method->nexttosend == NULL) method->nexttosend = todo; } } static retvalue enqueuenew(struct aptmethod *method, /*@only@*/char *uri, /*@only@*/char *destfile, queue_callback *callback, void *privdata1, void *privdata2) { struct tobedone *todo; if (FAILEDTOALLOC(destfile)) { free(uri); return RET_ERROR_OOM; } if (FAILEDTOALLOC(uri)) { free(destfile); return RET_ERROR_OOM; } todo = NEW(struct tobedone); if (FAILEDTOALLOC(todo)) { free(uri); free(destfile); return RET_ERROR_OOM; } todo->next = NULL; todo->uri = uri; todo->filename = destfile; todo->redirected_filename = NULL; todo->callback = callback; todo->privdata1 = privdata1; todo->privdata2 = privdata2; todo->lasttry = method->fallbackbaseuri == NULL; todo->ignore = false; todo->redirect_count = 0; enqueue(method, todo); return RET_OK; } retvalue aptmethod_enqueue(struct aptmethod *method, const char *origfile, /*@only@*/char *destfile, queue_callback *callback, void *privdata1, void *privdata2) { return enqueuenew(method, calc_dirconcat(method->baseuri, origfile), destfile, callback, privdata1, privdata2); } retvalue aptmethod_enqueueindex(struct aptmethod *method, const char *suite, const char *origfile, const char *suffix, const char *destfile, const char *downloadsuffix, queue_callback *callback, void *privdata1, void *privdata2) { return enqueuenew(method, mprintf("%s/%s/%s%s", method->baseuri, suite, origfile, suffix), mprintf("%s%s", destfile, downloadsuffix), callback, privdata1, privdata2); } /*****************what to do with received files************************/ static retvalue requeue_or_fail(struct aptmethod *method, /*@only@*/struct tobedone *todo) { retvalue r; if (todo->ignore) return RET_NOTHING; if (todo->lasttry) { if (todo->callback == NULL) r = RET_ERROR; else r = todo->callback(qa_error, todo->privdata1, todo->privdata2, todo->uri, NULL, todo->filename, NULL, method->name); todo_free(todo); return r; } else { size_t l, old_len, new_len; char *s; assert (method->fallbackbaseuri != NULL); old_len = strlen(method->baseuri); new_len = strlen(method->fallbackbaseuri); l = strlen(todo->uri); s = malloc(l+new_len+1-old_len); if (FAILEDTOALLOC(s)) { todo_free(todo); return RET_ERROR_OOM; } memcpy(s, method->fallbackbaseuri, new_len); strcpy(s+new_len, todo->uri + old_len); free(todo->uri); todo->uri = s; todo->lasttry = true; todo->redirect_count = 0; enqueue(method, todo); return RET_OK; } } /* look which file could not be received and remove it: */ static retvalue urierror(struct aptmethod *method, const char *uri, /*@only@*/char *message) { struct tobedone *todo, *lasttodo; lasttodo = NULL; todo = method->tobedone; while (todo != NULL) { if (strcmp(todo->uri, uri) == 0) { /* remove item: */ if (lasttodo == NULL) method->tobedone = todo->next; else lasttodo->next = todo->next; if (method->nexttosend == todo) { /* just in case some method received * files before we request them ;-) */ method->nexttosend = todo->next; } if (method->lasttobedone == todo) { method->lasttobedone = todo->next; } fprintf(stderr, "aptmethod error receiving '%s':\n'%s'\n", uri, (message != NULL)?message:""); /* put message in failed items to show it later? */ free(message); return requeue_or_fail(method, todo); } lasttodo = todo; todo = todo->next; } /* huh? If if have not asked for it, how can there be errors? */ fprintf(stderr, "Method '%s' reported error with unrequested file '%s':\n'%s'!\n", method->name, uri, message); free(message); return RET_ERROR; } /* look which file could not be received and readd the new name... */ static retvalue uriredirect(struct aptmethod *method, const char *uri, /*@only@*/char *newuri) { struct tobedone *todo, *lasttodo; lasttodo = NULL; todo = method->tobedone; while (todo != NULL) { if (strcmp(todo->uri, uri) == 0) { /* with the old 103 behavior, the method will * change the url itself, but report the new one * when done, so rewrite it: */ if (method->old103) { free(todo->uri); todo->uri = newuri; return RET_NOTHING; } /* remove item: */ if (lasttodo == NULL) method->tobedone = todo->next; else lasttodo->next = todo->next; if (method->nexttosend == todo) { /* just in case some method received * files before we request them ;-) */ method->nexttosend = todo->next; } if (method->lasttobedone == todo) { method->lasttobedone = todo->next; } if (todo->redirected_filename == NULL && todo->redirect_count < 2) { if (verbose > 0) fprintf(stderr, "aptmethod redirects '%s' to '%s'\n", uri, newuri); /* readd with new uri */ free(todo->uri); todo->uri = newuri; if (!method->new103) { /* save to a different filename. This is quite * wastefull for index files, as they will be * copied another time, but otherwise an squeeze * http method might download it two times to the * same file, corrupting it */ todo->redirected_filename = mprintf("%s_redirect", todo->filename); if (FAILEDTOALLOC(todo->redirected_filename)) return RET_ERROR_OOM; } todo->redirect_count++; enqueue(method, todo); return RET_OK; } fprintf(stderr, "aptmethod redirects already redirected '%s' again to '%s'\n" "Multiple redirects currently not supported by reprepro", uri, newuri); /* put message in failed items to show it later? */ free(newuri); return requeue_or_fail(method, todo); } lasttodo = todo; todo = todo->next; } /* huh? If if have not asked for it, how can there be errors? */ fprintf(stderr, "Method '%s' reported redirect for unrequested file '%s'-> '%s'\n", method->name, uri, newuri); free(newuri); return RET_ERROR; } /* look where a received file has to go to: */ static retvalue uridone(struct aptmethod *method, const char *uri, const char *filename, /*@only@*//*@null@*/struct checksums *checksumsfromapt) { struct tobedone *todo, *lasttodo; retvalue r; lasttodo = NULL; todo = method->tobedone; while (todo != NULL) { bool expectduplicates = false; if (strcmp(todo->uri, uri) != 0) { lasttodo = todo; todo = todo->next; continue; } if (todo->ignore) { assert (method->old103); assert (todo->redirected_filename != NULL); r = RET_NOTHING; } else { /* This detection (by requesting the redirected file to * another filename and comparing here is a ugly hack * (assuming the method will process the old one before * the new one and causing an unnecessary copy with the * new behaviour...., but there is no way to destinguish * those methods).*/ if (todo->redirected_filename != NULL && strcmp(filename, todo->filename) == 0) { /* this looks like we met a method that gives 103 * but still downloads the file. remember this */ if (!method->old103) fprintf(stderr, "aptmethod '%s' seems to have a obsoleted redirect handling which causes\n" "reprepro to request files multiple times. Work-around activated, but better\n" "only use it for targets not redirecting (or upgrade to apt >= 0.9.4 if\n" "that is the http method from apt)!\n", method->name); method->old103 = true; method->new103 = false; expectduplicates = true; } else if (!method->old103 && todo->redirected_filename != NULL && strcmp(filename, todo->redirected_filename) == 0) { /* nothing hints for a old 103 handling, and the redirected * file was gotten before any redirected was, so assume * this is the new style */ method->new103 = true; } r = todo->callback(qa_got, todo->privdata1, todo->privdata2, todo->uri, filename, todo->filename, checksumsfromapt, method->name); } checksums_free(checksumsfromapt); if (todo->redirected_filename != NULL && strcmp(filename, todo->redirected_filename) == 0) unlink(todo->redirected_filename); if (expectduplicates) { todo->ignore = true; } else { /* remove item: */ if (lasttodo == NULL) method->tobedone = todo->next; else lasttodo->next = todo->next; if (method->nexttosend == todo) { /* just in case some method received * files before we request them ;-) */ method->nexttosend = todo->next; } if (method->lasttobedone == todo) { method->lasttobedone = todo->next; } todo_free(todo); } return r; } /* huh? */ fprintf(stderr, "Method '%s' retrieved unexpected file '%s' at '%s'!\n", method->name, uri, filename); checksums_free(checksumsfromapt); return RET_ERROR; } /***************************Input and Output****************************/ static retvalue logmessage(const struct aptmethod *method, const char *chunk, const char *type) { retvalue r; char *message; r = chunk_getvalue(chunk, "Message", &message); if (RET_WAS_ERROR(r)) return r; if (RET_IS_OK(r)) { fprintf(stderr, "aptmethod '%s': '%s'\n", method->baseuri, message); free(message); return RET_OK; } r = chunk_getvalue(chunk, "URI", &message); if (RET_WAS_ERROR(r)) return r; if (RET_IS_OK(r)) { fprintf(stderr, "aptmethod %s '%s'\n", type, message); free(message); return RET_OK; } fprintf(stderr, "aptmethod '%s': '%s'\n", method->baseuri, type); return RET_OK; } static inline retvalue gotcapabilities(struct aptmethod *method, const char *chunk) { retvalue r; r = chunk_gettruth(chunk, "Single-Instance"); if (RET_WAS_ERROR(r)) return r; // TODO: what to do with this? // if (r != RET_NOTHING) { // fprintf(stderr, "WARNING: Single-instance not yet supported!\n"); // } r = chunk_gettruth(chunk, "Send-Config"); if (RET_WAS_ERROR(r)) return r; if (r != RET_NOTHING) { assert(method->command == NULL); method->alreadywritten = 0; method->command = method->config; method->config = NULL; method->output_length = strlen(method->command); if (verbose > 11) { fprintf(stderr, "Sending config: '%s'\n", method->command); } } else { free(method->config); method->config = NULL; } method->status = ams_ok; return RET_OK; } static inline retvalue goturidone(struct aptmethod *method, const char *chunk) { static const char * const method_hash_names[cs_COUNT] = { "MD5-Hash", "SHA1-Hash", "SHA256-Hash", "Size" }; retvalue result, r; char *uri, *filename; enum checksumtype type; char *hashes[cs_COUNT]; struct checksums *checksums = NULL; //TODO: is it worth the mess to make this in-situ? r = chunk_getvalue(chunk, "URI", &uri); if (r == RET_NOTHING) { fprintf(stderr, "Missing URI header in uridone received from '%s' method!\n", method->name); r = RET_ERROR; method->status = ams_failed; } if (RET_WAS_ERROR(r)) return r; r = chunk_getvalue(chunk, "Filename", &filename); if (r == RET_NOTHING) { char *altfilename; r = chunk_getvalue(chunk, "Alt-Filename", &altfilename); if (r == RET_NOTHING) { fprintf(stderr, "Missing Filename header in uridone received from '%s' method!\n", method->name); r = urierror(method, uri, strdup( "")); } else { r = urierror(method, uri, mprintf( "", altfilename)); free(altfilename); } free(uri); return r; } if (RET_WAS_ERROR(r)) { free(uri); return r; } if (verbose >= 1) fprintf(stderr, "aptmethod got '%s'\n", uri); result = RET_NOTHING; for (type = cs_md5sum ; type < cs_COUNT ; type++) { hashes[type] = NULL; r = chunk_getvalue(chunk, method_hash_names[type], &hashes[type]); RET_UPDATE(result, r); } if (RET_IS_OK(result) && hashes[cs_md5sum] == NULL) { /* the lenny version also has this, better ask for * in case the old MD5-Hash vanishes in the future */ r = chunk_getvalue(chunk, "MD5Sum-Hash", &hashes[cs_md5sum]); RET_UPDATE(result, r); } if (RET_WAS_ERROR(result)) { free(uri); free(filename); for (type = cs_md5sum ; type < cs_COUNT ; type++) free(hashes[type]); return result; } if (RET_IS_OK(result)) { /* ignore errors, we can recompute them from the file */ (void)checksums_init(&checksums, hashes); } r = uridone(method, uri, filename, checksums); free(uri); free(filename); return r; } static inline retvalue goturierror(struct aptmethod *method, const char *chunk) { retvalue r; char *uri, *message; r = chunk_getvalue(chunk, "URI", &uri); if (r == RET_NOTHING) { fprintf(stderr, "Missing URI header in urierror received from '%s' method!\n", method->name); r = RET_ERROR; } if (RET_WAS_ERROR(r)) return r; r = chunk_getvalue(chunk, "Message", &message); if (r == RET_NOTHING) { message = NULL; } if (RET_WAS_ERROR(r)) { free(uri); return r; } r = urierror(method, uri, message); free(uri); return r; } static inline retvalue gotredirect(struct aptmethod *method, const char *chunk) { char *uri, *newuri; retvalue r; r = chunk_getvalue(chunk, "URI", &uri); if (r == RET_NOTHING) { fprintf(stderr, "Missing URI header in uriredirect received from '%s' method!\n", method->name); r = RET_ERROR; } if (RET_WAS_ERROR(r)) return r; r = chunk_getvalue(chunk, "New-URI", &newuri); if (r == RET_NOTHING) { fprintf(stderr, "Missing New-URI header in uriredirect received from '%s' method!\n", method->name); r = RET_ERROR; } if (RET_WAS_ERROR(r)) { free(uri); return r; } r = uriredirect(method, uri, newuri); free(uri); return r; } static inline retvalue parsereceivedblock(struct aptmethod *method, const char *input) { const char *p; retvalue r; #define OVERLINE {while (*p != '\0' && *p != '\n') p++; if (*p == '\n') p++; } while (*input == '\n' || *input == '\r') input++; if (*input == '\0') { fprintf(stderr, "Unexpected number of newlines from '%s' method!\n", method->name); return RET_NOTHING; } p = input; switch ((*(input+1)=='0')?*input:'\0') { case '1': switch (*(input+2)) { /* 100 Capabilities */ case '0': OVERLINE; if (verbose > 14) { fprintf(stderr, "Got '%s'\n", input); } return gotcapabilities(method, input); /* 101 Log */ case '1': if (verbose > 10) { OVERLINE; return logmessage(method, p, "101"); } return RET_OK; /* 102 Status */ case '2': if (verbose > 5) { OVERLINE; return logmessage(method, p, "102"); } return RET_OK; /* 103 Redirect */ case '3': OVERLINE; return gotredirect(method, p); default: fprintf(stderr, "Error or unsupported message received: '%s'\n", input); return RET_ERROR; } case '2': switch (*(input+2)) { /* 200 URI Start */ case '0': if (verbose > 5) { OVERLINE; return logmessage(method, p, "start"); } return RET_OK; /* 201 URI Done */ case '1': OVERLINE; return goturidone(method, p); default: fprintf(stderr, "Error or unsupported message received: '%s'\n", input); return RET_ERROR; } case '4': switch (*(input+2)) { case '0': OVERLINE; r = goturierror(method, p); break; case '1': OVERLINE; (void)logmessage(method, p, "general error"); method->status = ams_failed; r = RET_ERROR; break; default: fprintf(stderr, "Error or unsupported message received: '%s'\n", input); r = RET_ERROR; } /* a failed download is not a error yet, as it might * be redone from another source later */ return r; default: fprintf(stderr, "Unexpected data from '%s' method: '%s'\n", method->name, input); return RET_ERROR; } } static retvalue receivedata(struct aptmethod *method) { retvalue result; ssize_t r; char *p; int consecutivenewlines; assert (method->status != ams_ok || method->tobedone != NULL); if (method->status != ams_waitforcapabilities && method->status != ams_ok) return RET_NOTHING; /* First look if we have enough room to read.. */ if (method->alreadyread + 1024 >= method->input_size) { char *newptr; if (method->input_size >= (size_t)128000) { fprintf(stderr, "Ridiculously long answer from method!\n"); method->status = ams_failed; return RET_ERROR; } newptr = realloc(method->inputbuffer, method->alreadyread+1024); if (FAILEDTOALLOC(newptr)) { return RET_ERROR_OOM; } method->inputbuffer = newptr; method->input_size = method->alreadyread + 1024; } assert (method->inputbuffer != NULL); /* then read as much as the pipe is able to fill of our buffer */ r = read(method->mstdout, method->inputbuffer + method->alreadyread, method->input_size - method->alreadyread - 1); if (r < 0) { int e = errno; fprintf(stderr, "Error %d reading pipe from aptmethod: %s\n", e, strerror(e)); method->status = ams_failed; return RET_ERRNO(e); } method->alreadyread += r; result = RET_NOTHING; while(true) { retvalue res; r = method->alreadyread; p = method->inputbuffer; consecutivenewlines = 0; while (r > 0) { if (*p == '\0') { fprintf(stderr, "Unexpected Zeroes in method output!\n"); method->status = ams_failed; return RET_ERROR; } else if (*p == '\n') { consecutivenewlines++; if (consecutivenewlines >= 2) break; } else if (*p != '\r') { consecutivenewlines = 0; } p++; r--; } if (r <= 0) { return result; } *p ='\0'; p++; r--; res = parsereceivedblock(method, method->inputbuffer); if (r > 0) memmove(method->inputbuffer, p, r); method->alreadyread = r; RET_UPDATE(result, res); } } static retvalue senddata(struct aptmethod *method) { size_t l; ssize_t r; if (method->status != ams_ok) return RET_NOTHING; if (method->command == NULL) { const struct tobedone *todo; /* nothing queued to send, nothing to be queued...*/ todo = method->nexttosend; if (todo == NULL) return RET_OK; if (interrupted()) return RET_ERROR_INTERRUPTED; method->alreadywritten = 0; // TODO: make sure this is already checked for earlier... assert (strchr(todo->uri, '\n') == NULL && strchr(todo->filename, '\n') == NULL); /* http-aptmethod seems to loose the last byte, * if the file is already in place, * so we better unlink the target first... * but this is done elsewhere already unlink(todo->filename); */ method->command = mprintf( "600 URI Acquire\nURI: %s\nFilename: %s\n\n", todo->uri, (todo->redirected_filename==NULL) ?todo->filename :todo->redirected_filename); if (FAILEDTOALLOC(method->command)) { return RET_ERROR_OOM; } if (verbose > 20) fprintf(stderr, "Will sent: '%s'\n", method->command); method->output_length = strlen(method->command); method->nexttosend = method->nexttosend->next; } l = method->output_length - method->alreadywritten; r = write(method->mstdin, method->command + method->alreadywritten, l); if (r < 0) { int e = errno; fprintf(stderr, "Error %d writing to pipe: %s\n", e, strerror(e)); //TODO: disable the whole method?? method->status = ams_failed; return RET_ERRNO(e); } else if ((size_t)r < l) { method->alreadywritten += r; return RET_OK; } free(method->command); method->command = NULL; return RET_OK; } static retvalue checkchilds(struct aptmethodrun *run) { pid_t child;int status; retvalue result = RET_OK, r; while ((child = waitpid(-1, &status, WNOHANG)) > 0) { struct aptmethod *method; for (method = run->methods ; method != NULL ; method = method->next) { if (method->child == child) break; } if (method == NULL) { /* perhaps an uncompressor terminated */ r = uncompress_checkpid(child, status); if (RET_IS_OK(r)) continue; if (RET_WAS_ERROR(r)) { result = r; continue; } else { fprintf(stderr, "Unexpected child died (maybe gpg died if signing/verifing was done): %d\n", (int)child); continue; } } /* Make sure we do not cope with this child any more */ if (method->mstdin != -1) { (void)close(method->mstdin); method->mstdin = -1; } if (method->mstdout != -1) { (void)close(method->mstdout); method->mstdout = -1; } method->child = -1; if (method->status != ams_failed) method->status = ams_notstarted; /* say something if it exited unnormal: */ if (WIFEXITED(status)) { int exitcode; exitcode = WEXITSTATUS(status); if (exitcode != 0) { fprintf(stderr, "Method %s://%s exited with non-zero exit code %d!\n", method->name, method->baseuri, exitcode); method->status = ams_notstarted; result = RET_ERROR; } } else { fprintf(stderr, "Method %s://%s exited unnormally!\n", method->name, method->baseuri); method->status = ams_notstarted; result = RET_ERROR; } } return result; } /* *workleft is always set, even when return indicated error. * (workleft < 0 when critical)*/ static retvalue readwrite(struct aptmethodrun *run, /*@out@*/int *workleft) { int maxfd, v; fd_set readfds, writefds; struct aptmethod *method; retvalue result, r; /* First calculate what to look at: */ FD_ZERO(&readfds); FD_ZERO(&writefds); maxfd = 0; *workleft = 0; for (method = run->methods ; method != NULL ; method = method->next) { if (method->status == ams_ok && (method->command != NULL || method->nexttosend != NULL)) { FD_SET(method->mstdin, &writefds); if (method->mstdin > maxfd) maxfd = method->mstdin; (*workleft)++; if (verbose > 19) fprintf(stderr, "want to write to '%s'\n", method->baseuri); } if (method->status == ams_waitforcapabilities || (method->status == ams_ok && method->tobedone != NULL)) { FD_SET(method->mstdout, &readfds); if (method->mstdout > maxfd) maxfd = method->mstdout; (*workleft)++; if (verbose > 19) fprintf(stderr, "want to read from '%s'\n", method->baseuri); } } if (*workleft == 0) return RET_NOTHING; // TODO: think about a timeout... v = select(maxfd + 1, &readfds, &writefds, NULL, NULL); if (v < 0) { int e = errno; //TODO: handle (e == EINTR) && interrupted() specially fprintf(stderr, "Select returned error %d: %s\n", e, strerror(e)); *workleft = -1; // TODO: what to do here? return RET_ERRNO(e); } result = RET_NOTHING; maxfd = 0; for (method = run->methods ; method != NULL ; method = method->next) { if (method->mstdout != -1 && FD_ISSET(method->mstdout, &readfds)) { r = receivedata(method); RET_UPDATE(result, r); } if (method->mstdin != -1 && FD_ISSET(method->mstdin, &writefds)) { r = senddata(method); RET_UPDATE(result, r); } } return result; } retvalue aptmethod_download(struct aptmethodrun *run) { struct aptmethod *method; retvalue result, r; int workleft; result = RET_NOTHING; /* fire up all methods, removing those that do not work: */ for (method = run->methods; method != NULL ; method = method->next) { r = aptmethod_startup(method); /* do not remove failed methods here any longer, * and not remove methods having nothing to do, * as this breaks when no index files are downloaded * due to all already being in place... */ RET_UPDATE(result, r); } /* waiting for them to finish: */ do { r = checkchilds(run); RET_UPDATE(result, r); r = readwrite(run, &workleft); RET_UPDATE(result, r); // TODO: check interrupted here... } while (workleft > 0 || uncompress_running()); return result; } reprepro-4.13.1/contents.h0000644000175100017510000000117512152651661012400 00000000000000#ifndef REPREPRO_CONTENTS_H #define REPREPRO_CONTENTS_H #ifndef REPREPRO_STRLIST_H #include "strlist.h" #endif #ifndef REPREPRO_DATABASE_H #include "database.h" #endif #ifndef REPREPRO_RELEASE_H #include "release.h" #endif struct contentsoptions { struct { bool enabled; bool udebs; bool nodebs; bool percomponent; bool allcomponents; bool compatsymlink; } flags; compressionset compressions; }; struct distribution; struct configiterator; retvalue contentsoptions_parse(struct distribution *, struct configiterator *); retvalue contents_generate(struct distribution *, struct release *, bool /*onlyneeded*/); #endif reprepro-4.13.1/dpkgversions.c0000644000175100017510000001034512152651661013253 00000000000000/* * Most contents of this file are taken from: * libdpkg - Debian packaging suite library routines * from the files * parsehelp.c - helpful routines for parsing and writing * and * vercmp.c - comparison of version numbers * * Copyright (C) 1995 Ian Jackson * * This is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2, * or (at your option) any later version. * * This is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public * License along with dpkg; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include "error.h" #include "dpkgversions.h" #define _(a) a #define cisalpha(a) (isalpha(a)!=0) #define cisdigit(a) (isdigit(a)!=0) /* from dpkg-db.h.in: */ struct versionrevision { unsigned long epoch; const char *version; const char *revision; }; /* from parsehelp.c */ static const char *parseversion(struct versionrevision *rversion, const char *string) { char *hyphen, *colon, *eepochcolon; const char *end, *ptr; unsigned long epoch; if (!*string) return _("version string is empty"); /* trim leading and trailing space */ while (*string && (*string == ' ' || *string == '\t')) string++; /* string now points to the first non-whitespace char */ end = string; /* find either the end of the string, or a whitespace char */ while (*end && *end != ' ' && *end != '\t') end++; /* check for extra chars after trailing space */ ptr = end; while (*ptr && (*ptr == ' ' || *ptr == '\t')) ptr++; if (*ptr) return _("version string has embedded spaces"); colon= strchr(string, ':'); if (colon) { epoch= strtoul(string, &eepochcolon, 10); if (colon != eepochcolon) return _("epoch in version is not number"); if (!*++colon) return _("nothing after colon in version number"); string= colon; rversion->epoch= epoch; } else { rversion->epoch= 0; } rversion->version= strndup(string, end - string); hyphen= strrchr(rversion->version,'-'); if (hyphen) *hyphen++= 0; rversion->revision= hyphen ? hyphen : ""; return NULL; } /* from vercmp.c */ /* assume ascii; warning: evaluates x multiple times! */ #define order(x) ((x) == '~' ? -1 \ : cisdigit((x)) ? 0 \ : !(x) ? 0 \ : cisalpha((x)) ? (x) \ : (x) + 256) static int verrevcmp(const char *val, const char *ref) { if (!val) val= ""; if (!ref) ref= ""; while (*val || *ref) { int first_diff= 0; while ((*val && !cisdigit(*val)) || (*ref && !cisdigit(*ref))) { int vc= order(*val), rc= order(*ref); if (vc != rc) return vc - rc; val++; ref++; } while (*val == '0') val++; while (*ref == '0') ref++; while (cisdigit(*val) && cisdigit(*ref)) { if (!first_diff) first_diff= *val - *ref; val++; ref++; } if (cisdigit(*val)) return 1; if (cisdigit(*ref)) return -1; if (first_diff) return first_diff; } return 0; } static int versioncompare(const struct versionrevision *version, const struct versionrevision *refversion) { int r; if (version->epoch > refversion->epoch) return 1; if (version->epoch < refversion->epoch) return -1; r= verrevcmp(version->version,refversion->version); if (r) return r; return verrevcmp(version->revision,refversion->revision); } /* now own code */ retvalue dpkgversions_cmp(const char *first,const char *second,int *result) { struct versionrevision v1,v2; const char *m; if ((m = parseversion(&v1,first)) != NULL) { fprintf(stderr,"Error while parsing '%s' as version: %s\n",first,m); return RET_ERROR; } if ((m = parseversion(&v2,second)) != NULL) { fprintf(stderr,"Error while parsing '%s' as version: %s\n",second,m); return RET_ERROR; } *result = versioncompare(&v1,&v2); free((char*)v1.version); free((char*)v2.version); return RET_OK; } reprepro-4.13.1/aptmethod.h0000644000175100017510000000223412152651661012525 00000000000000#ifndef REPREPRO_APTMETHOD_H #define REPREPRO_APTMETHOD_H #ifndef REPREPRO_DATABASE_H #include "database.h" #endif #ifndef REPREPRO_CHECKSUMS_H #include "checksums.h" #endif struct aptmethodrun; struct aptmethod; enum queue_action { qa_abort, qa_got, qa_error }; typedef retvalue queue_callback(enum queue_action, void *, void *, const char * /*uri*/, const char * /*gotfilename*/, const char * /*wantedfilename*/, /*@null@*/const struct checksums *, const char * /*methodname*/); retvalue aptmethod_initialize_run(/*@out@*/struct aptmethodrun **); retvalue aptmethod_newmethod(struct aptmethodrun *, const char * /*uri*/, const char * /*fallbackuri*/, const struct strlist * /*config*/, /*@out@*/struct aptmethod **); retvalue aptmethod_enqueue(struct aptmethod *, const char * /*origfile*/, /*@only@*/char */*destfile*/, queue_callback *, void *, void *); retvalue aptmethod_enqueueindex(struct aptmethod *, const char * /*suite*/, const char * /*origfile*/, const char *, const char * /*destfile*/, const char *, queue_callback *, void *, void *); retvalue aptmethod_download(struct aptmethodrun *); retvalue aptmethod_shutdown(/*@only@*/struct aptmethodrun *); #endif reprepro-4.13.1/needbuild.c0000644000175100017510000002000312152651661012460 00000000000000/* This file is part of "reprepro" * Copyright (C) 2009,2012,2013 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include "error.h" #include "atoms.h" #include "strlist.h" #include "chunks.h" #include "trackingt.h" #include "tracking.h" #include "globmatch.h" #include "needbuild.h" /* list all source packages in a distribution that needs buildd action For each source package check: - if tracking is enabled and there is a .log or .changes file for the given arch -> SKIP - if there is a binary package for the given architecture -> SKIP - if the package's Architecture field excludes this arch -> SKIP - if the package's Binary field only lists existing ones (i.e. architecture all) -> SKIP */ static retvalue tracked_source_needs_build(architecture_t architecture, const char *sourcename, const char *sourceversion, const char *dscfilename, const struct strlist *binary, const struct trackedpackage *tp, bool printarch) { bool found_binary[binary->count]; const char *archstring = atoms_architectures[architecture]; size_t archstringlen= strlen(archstring); int i; memset(found_binary, 0, sizeof(bool)*binary->count); for (i = 0 ; i < tp->filekeys.count ; i++) { enum filetype ft = tp->filetypes[i]; const char *fk = tp->filekeys.values[i]; if (ft == ft_XTRA_DATA) continue; if (ft == ft_ALL_BINARY) { int j; if (architecture == architecture_all) { /* found an _all.deb, nothing to do */ return RET_NOTHING; } /* determine which binary files are arch all packages: */ for (j = 0 ; j < binary->count ; j++) { const char *b = binary->values[j]; size_t l = strlen(b); if (strncmp(fk, b, l) == 0 && fk[l] == '_') found_binary[j] = true; } continue; } if (ft == ft_ARCH_BINARY) { const char *a = strrchr(fk, '_'); if (a == NULL) continue; a++; if (strncmp(a, archstring, archstringlen) != 0 || a[archstringlen] != '.') continue; /* found an .deb with this architecture, so nothing is to be done */ return RET_NOTHING; } if (ft == ft_LOG || ft == ft_CHANGES) { const char *a = strrchr(fk, '_'); const char *e; if (a == NULL) continue; a++; while ((e = strchr(a, '+')) != NULL) { if ((size_t)(e-a) != archstringlen) { a = e+1; continue; } if (memcmp(a, archstring, archstringlen) != 0){ a = e+1; continue; } /* found something for this architecture */ return RET_NOTHING; } e = strchr(a, '.'); if (e == NULL) continue; if ((size_t)(e-a) != archstringlen) { a = e+1; continue; } if (memcmp(a, archstring, archstringlen) != 0){ a = e+1; continue; } /* found something for this architecture */ return RET_NOTHING; } } /* nothing for this architecture was found, check if is has any binary packages that are lacking: */ for (i = 0 ; i < binary->count ; i++) { if (!found_binary[i]) { if (printarch) printf("%s %s %s %s\n", sourcename, sourceversion, dscfilename, archstring); else printf("%s %s %s\n", sourcename, sourceversion, dscfilename); return RET_OK; } } /* all things listed in Binary already exists, nothing to do: */ return RET_NOTHING; } struct needbuild_data { architecture_t architecture; trackingdb tracks; /*@null@*/ const char *glob; bool printarch; }; static retvalue check_source_needs_build(struct distribution *distribution, struct target *target, const char *sourcename, const char *control, void *data) { struct needbuild_data *d = data; char *sourceversion; struct strlist binary, architectures, filekeys; const char *dscfilename = NULL; int i; retvalue r; if (d->glob != NULL && !globmatch(sourcename, d->glob)) return RET_NOTHING; r = target->getversion(control, &sourceversion); if (!RET_IS_OK(r)) return r; r = chunk_getwordlist(control, "Architecture", &architectures); if (RET_IS_OK(r)) { bool skip = true; const char *req = atoms_architectures[d->architecture]; const char *hyphen, *os; size_t osl; hyphen = strchr(req, '-'); if (hyphen == NULL) { os = "linux"; osl = 5; } else { os = req; osl = hyphen - req; } for (i = 0 ; i < architectures.count ; i++) { const char *a = architectures.values[i]; if (strcmp(a, req) == 0) { skip = false; break; } /* "all" is not part of "any" or "*-any" */ if (d->architecture == architecture_all) continue; if (strcmp(a, "any") == 0) { skip = false; break; } size_t al = strlen(a); if (al < 4 || memcmp(a + al - 4, "-any", 4) != 0) continue; if (al == osl + 4 && memcmp(a, os, osl) == 0) { skip = false; break; } } strlist_done(&architectures); if (skip) { free(sourceversion); return RET_NOTHING; } } r = chunk_getwordlist(control, "Binary", &binary); if (!RET_IS_OK(r)) { free(sourceversion); return r; } r = target->getfilekeys(control, &filekeys); if (!RET_IS_OK(r)) { strlist_done(&binary); free(sourceversion); return r; } for (i = 0 ; i < filekeys.count ; i++) { if (endswith(filekeys.values[i], ".dsc")) { dscfilename = filekeys.values[i]; break; } } if (dscfilename == NULL) { fprintf(stderr, "Warning: source package '%s' in '%s' without dsc file!\n", sourcename, target->identifier); free(sourceversion); strlist_done(&binary); strlist_done(&filekeys); return RET_NOTHING; } if (d->tracks != NULL) { struct trackedpackage *tp; r = tracking_get(d->tracks, sourcename, sourceversion, &tp); if (RET_WAS_ERROR(r)) { free(sourceversion); strlist_done(&binary); strlist_done(&filekeys); return r; } if (RET_IS_OK(r)) { r = tracked_source_needs_build( d->architecture, sourcename, sourceversion, dscfilename, &binary, tp, d->printarch); trackedpackage_free(tp); free(sourceversion); strlist_done(&binary); strlist_done(&filekeys); return r; } fprintf(stderr, "Warning: %s's tracking data of %s (%s) is out of date. Run retrack to repair!\n", distribution->codename, sourcename, sourceversion); } // TODO: implement without tracking free(sourceversion); strlist_done(&binary); strlist_done(&filekeys); return RET_NOTHING; } retvalue find_needs_build(struct distribution *distribution, architecture_t architecture, const struct atomlist *onlycomponents, const char *glob, bool printarch) { retvalue result, r; struct needbuild_data d; d.architecture = architecture; d.glob = glob; d.printarch = printarch; if (distribution->tracking == dt_NONE) { fprintf(stderr, "ERROR: '%s' has no source package Tracking enabled and\n" "build-needing is currently only implemented for distributions where\n" "this is enabled.\n" "(i.e. you need to add e.g. Tracking: minimal in conf/distribution\n" "and run retrack (and repeat running it after every update and pull.)\n", distribution->codename); return RET_ERROR; } if (distribution->tracking != dt_NONE) { r = tracking_initialize(&d.tracks, distribution, true); if (RET_WAS_ERROR(r)) return r; if (r == RET_NOTHING) d.tracks = NULL; } else d.tracks = NULL; result = distribution_foreach_package_c(distribution, onlycomponents, architecture_source, pt_dsc, check_source_needs_build, &d); r = tracking_done(d.tracks); RET_ENDUPDATE(result, r); return result; } reprepro-4.13.1/autogen.sh0000755000175100017510000000076712152655314012400 00000000000000#!/bin/sh set -e mkdir -p ac aclocal autoheader automake -a -c autoconf rm -rf autom4te.cache || true if [ $# -lt 1 ] ; then exit 0 fi if [ "x$1" = "x--configure" ] ; then shift repreprodir="`pwd`" if [ $# -gt 0 ] ; then mkdir -p -- "$1" cd "$1" || exit 1 shift fi "$repreprodir"/configure --enable-maintainer-mode CFLAGS="-Wall -O2 -g -Wmissing-prototypes -Wstrict-prototypes -Wshadow -Wunused-parameter -Wsign-compare" CPPFLAGS="" "$@" else echo "unsupported option $1" >&2 exit 1 fi reprepro-4.13.1/sha256.h0000644000175100017510000000105212152651661011545 00000000000000#ifndef REPREPRO_SHA256_H #define REPREPRO_SHA256_H /* Structure to save state of computation between the single steps. */ struct SHA256_Context { uint32_t H[8]; uint64_t total; uint32_t buflen; char buffer[128]; /* NB: always correctly aligned for uint32_t. */ }; #define SHA256_DIGEST_SIZE 32 void SHA256Init(/*@out@*/struct SHA256_Context *context); void SHA256Update(struct SHA256_Context *context, const uint8_t *data, size_t len); void SHA256Final(struct SHA256_Context *context, /*@out@*/uint8_t digest[SHA256_DIGEST_SIZE]); #endif reprepro-4.13.1/distribution.h0000644000175100017510000001432712152651661013265 00000000000000#ifndef REPREPRO_DISTRIBUTION_H #define REPREPRO_DISTRIBUTION_H struct distribution; #ifndef REPREPRO_ERROR_H #include "error.h" #warning "What's hapening here?" #endif #ifndef REPREPRO_DATABASE_H #include "database.h" #endif #ifndef REPREPRO_STRLIST_H #include "strlist.h" #endif #ifndef REPREPRO_TARGET_H #include "target.h" #endif #ifndef REPREPRO_EXPORTS_H #include "exports.h" #endif #ifndef REPREPRO_CONTENTS_H #include "contents.h" #endif struct overridefile; struct uploaders; struct distribution { struct distribution *next; /* the primary name to access this distribution: */ char *codename; /* for more helpfull error messages: */ const char *filename; /* only valid while parsing! */ unsigned int firstline, lastline; /* additional information for the Release-file to be * generated, may be NULL. only suite is sometimes used * (and only for sanity checks) */ /*@null@*/char *suite, *version; /*@null@*/char *origin, *label, *description, *notautomatic, *butautomaticupgrades; /* What architectures and components are there */ struct atomlist architectures, components; /* which update rules to use */ struct strlist updates; /* which rules to use to pull packages from other distributions */ struct strlist pulls; /* the key to sign with, may have no entries to mean unsigned: */ struct strlist signwith; /* the override file to use by default */ /*@null@*/char *deb_override, *udeb_override, *dsc_override; /* fake component prefix (and codename antisuffix) for Release files: */ /*@null@*/char *fakecomponentprefix; /* only loaded when you've done it yourself: */ struct { /*@null@*/struct overridefile *dsc, *deb, *udeb; } overrides; /* the list of components containing a debian-installer dir, * normally only "main" */ struct atomlist udebcomponents; /* what kind of index files to generate */ struct exportmode dsc, deb, udeb; /* is tracking enabled for this distribution? * (NONE must be 0 so it is the default) */ enum trackingtype { dt_NONE=0, dt_KEEP, dt_ALL, dt_MINIMAL } tracking; struct trackingoptions { bool includechanges; bool includebyhand; bool includelogs; bool needsources; bool keepsources; bool embargoalls; } trackingoptions; /* what content files to generate */ struct contentsoptions contents; struct atomlist contents_architectures, contents_components, contents_ucomponents; bool contents_architectures_set, contents_components_set, contents_ucomponents_set, /* not used, just here to keep things simpler: */ udebcomponents_set; /* A list of all targets contained in the distribution*/ struct target *targets; /* a filename to look for who is allowed to upload packages */ /*@null@*/char *uploaders; /* only loaded after _loaduploaders */ /*@null@*/struct uploaders *uploaderslist; /* how and where to log */ /*@null@*/struct logger *logger; /* scripts to feed byhand/raw-* files in */ /*@null@*/struct byhandhook *byhandhooks; /* a list of names beside Codename and Suite to accept .changes * files via include */ struct strlist alsoaccept; /* if != 0, number of seconds to add for Vaild-Until */ unsigned long validfor; /* RET_NOTHING: do not export with EXPORT_CHANGED, EXPORT_NEVER * RET_OK: export unless EXPORT_NEVER * RET_ERROR_*: only export with EXPORT_FORCE */ retvalue status; /* false: not looked at, do not export at all */ bool lookedat; /* false: not requested, do not handle at all */ bool selected; /* forbid all writing operations and exports if true */ bool readonly; /* tracking information might be obsolete */ bool needretrack; /* omitted because of --onlysmalldeletes */ bool omitted; }; retvalue distribution_get(struct distribution * /*all*/, const char *, bool /*lookedat*/, /*@out@*/struct distribution **); /* set lookedat, start logger, ... */ retvalue distribution_prepareforwriting(struct distribution *); typedef retvalue each_target_action(struct distribution *, struct target *, void *); typedef retvalue each_package_action(struct distribution *, struct target *, const char *, const char *, void *); /* call for each package of */ retvalue distribution_foreach_package(struct distribution *, /*@null@*/const struct atomlist *, /*@null@*/const struct atomlist *, /*@null@*/const struct atomlist *, each_package_action, /*@null@*/each_target_action, void *); retvalue distribution_foreach_package_c(struct distribution *, /*@null@*/const struct atomlist *, architecture_t, packagetype_t, each_package_action, void *); /* delete every package decider returns RET_OK for */ retvalue distribution_remove_packages(struct distribution *, const struct atomlist *, const struct atomlist *, const struct atomlist *, each_package_action decider, struct trackingdata *, void *); /*@dependent@*/struct target *distribution_getpart(const struct distribution *distribution, component_t, architecture_t, packagetype_t); /* like distribtion_getpart, but returns NULL if there is no such target */ /*@null@*//*@dependent@*/struct target *distribution_gettarget(const struct distribution *distribution, component_t, architecture_t, packagetype_t); retvalue distribution_fullexport(struct distribution *); retvalue distribution_snapshot(struct distribution *, const char */*name*/); /* read the configuration from all distributions */ retvalue distribution_readall(/*@out@*/struct distribution **distributions); /* mark all dists from fitting in the filter given in */ retvalue distribution_match(struct distribution * /*alldistributions*/, int /*argc*/, const char * /*argv*/ [], bool /*lookedat*/, bool /*readonly*/); /* get a pointer to the apropiate part of the linked list */ struct distribution *distribution_find(struct distribution *, const char *); retvalue distribution_freelist(/*@only@*/struct distribution *distributions); enum exportwhen {EXPORT_NEVER, EXPORT_SILENT_NEVER, EXPORT_CHANGED, EXPORT_NORMAL, EXPORT_FORCE }; retvalue distribution_exportlist(enum exportwhen when, /*@only@*/struct distribution *); retvalue distribution_loadalloverrides(struct distribution *); void distribution_unloadoverrides(struct distribution *distribution); retvalue distribution_loaduploaders(struct distribution *); void distribution_unloaduploaders(struct distribution *distribution); #endif reprepro-4.13.1/log.c0000644000175100017510000007227212152651661011325 00000000000000/* This file is part of "reprepro" * Copyright (C) 2007 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "error.h" #include "strlist.h" #include "atoms.h" #include "dirs.h" #include "target.h" #include "distribution.h" #include "configparser.h" #include "log.h" #include "filecntl.h" /*@null@*/ static /*@refcounted@*/ struct logfile { /*@null@*/struct logfile *next; char *filename; /*@refs@*/size_t refcount; int fd; } *logfile_root = NULL; static retvalue logfile_reference(/*@only@*/char *filename, /*@out@*/struct logfile **logfile) { struct logfile *l; assert (global.logdir != NULL && filename != NULL); for (l = logfile_root ; l != NULL ; l = l->next) { if (strcmp(l->filename, filename) == 0) { l->refcount++; *logfile = l; free(filename); return RET_OK; } } l = NEW(struct logfile); if (FAILEDTOALLOC(l)) { free(filename); return RET_ERROR_OOM; } if (filename[0] == '/') l->filename = filename; else { l->filename = calc_dirconcat(global.logdir, filename); free(filename); } if (FAILEDTOALLOC(l->filename)) { free(l); return RET_ERROR_OOM; } l->refcount = 1; l->fd = -1; l->next = logfile_root; logfile_root = l; *logfile = l; return RET_OK; } static void logfile_dereference(struct logfile *logfile) { assert (logfile != NULL); assert (logfile->refcount > 0); if (--logfile->refcount == 0) { if (logfile_root == logfile) logfile_root = logfile->next; else { struct logfile *previous = logfile_root; while (previous != NULL && previous->next != logfile) previous = previous->next; assert (previous != NULL); assert (previous->next == logfile); previous->next = logfile->next; } if (logfile->fd >= 0) { int ret, e; ret = close(logfile->fd); logfile->fd = -1; if (ret < 0) { e = errno; fprintf(stderr, "Error received when closing log file '%s': %d=%s\n", logfile->filename, e, strerror(e)); } } free(logfile->filename); free(logfile); } } static retvalue logfile_open(struct logfile *logfile) { assert (logfile != NULL); assert (logfile->fd < 0); (void)dirs_make_parent(logfile->filename); logfile->fd = open(logfile->filename, O_CREAT|O_APPEND|O_NOCTTY|O_WRONLY, 0666); if (logfile->fd < 0) { int e = errno; fprintf(stderr, "Cannot open/create logfile '%s': %d=%s\n", logfile->filename, e, strerror(e)); return RET_ERRNO(e); } return RET_OK; } static retvalue logfile_write(struct logfile *logfile, struct target *target, const char *name, /*@null@*/const char *version, /*@null@*/const char *oldversion) { int ret; time_t currenttime; struct tm t; assert (logfile->fd >= 0); currenttime = time(NULL); if (localtime_r(¤ttime, &t) == NULL) { if (version != NULL && oldversion != NULL) ret = dprintf(logfile->fd, "EEEE-EE-EE EE:EE:EE replace %s %s %s %s %s %s %s\n", target->distribution->codename, atoms_packagetypes[target->packagetype], atoms_components[target->component], atoms_architectures[target->architecture], name, version, oldversion); else if (version != NULL) ret = dprintf(logfile->fd, "EEEE-EE-EE EE:EE:EE add %s %s %s %s %s %s\n", target->distribution->codename, atoms_packagetypes[target->packagetype], atoms_components[target->component], atoms_architectures[target->architecture], name, version); else ret = dprintf(logfile->fd, "EEEE-EE-EE EE:EE:EE remove %s %s %s %s %s %s\n", target->distribution->codename, atoms_packagetypes[target->packagetype], atoms_components[target->component], atoms_architectures[target->architecture], name, oldversion); } else if (version != NULL && oldversion != NULL) ret = dprintf(logfile->fd, "%04d-%02d-%02d %02u:%02u:%02u replace %s %s %s %s %s %s %s\n", 1900+t.tm_year, t.tm_mon+1, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec, target->distribution->codename, atoms_packagetypes[target->packagetype], atoms_components[target->component], atoms_architectures[target->architecture], name, version, oldversion); else if (version != NULL) ret = dprintf(logfile->fd, "%04d-%02d-%02d %02u:%02u:%02u add %s %s %s %s %s %s\n", 1900+t.tm_year, t.tm_mon+1, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec, target->distribution->codename, atoms_packagetypes[target->packagetype], atoms_components[target->component], atoms_architectures[target->architecture], name, version); else ret = dprintf(logfile->fd, "%04d-%02d-%02d %02u:%02u:%02u remove %s %s %s %s %s %s\n", 1900+t.tm_year, t.tm_mon+1, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec, target->distribution->codename, atoms_packagetypes[target->packagetype], atoms_components[target->component], atoms_architectures[target->architecture], name, oldversion); if (ret < 0) { int e = errno; fprintf(stderr, "Error writing to log file '%s': %d=%s", logfile->filename, e, strerror(e)); return RET_ERRNO(e); } return RET_OK; } struct notificator { char *scriptname; /* if defined, only call if it matches the package: */ packagetype_t packagetype; component_t component; architecture_t architecture; command_t command; bool withcontrol, changesacceptrule; }; static void notificator_done(/*@special@*/struct notificator *n) /*@releases n->scriptname, n->packagename, n->component, n->architecture@*/{ free(n->scriptname); } static retvalue notificator_parse(struct notificator *n, struct configiterator *iter) { retvalue r; int c; setzero(struct notificator, n); n->architecture = atom_unknown; n->component = atom_unknown; n->packagetype = atom_unknown; n->command = atom_unknown; while ((c = config_nextnonspaceinline(iter)) != EOF) { if (c == '-') { char *word, *s, *detachedargument = NULL; const char *argument; atom_t *value_p = NULL; enum atom_type value_type; bool error = false; r = config_completeword(iter, c, &word); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; s = word + 1; while (*s != '\0' && *s != '=') s++; if (*s == '=') { argument = s+1; s[0] = '\0'; } else argument = NULL; switch (s-word) { case 2: if (word[1] == 'A') { value_p = &n->architecture; value_type = at_architecture; } else if (word[1] == 'C') { value_p = &n->component; value_type = at_component; } else if (word[1] == 'T') { value_p = &n->packagetype; value_type = at_packagetype; } else error = true; break; case 5: if (strcmp(word, "--via") == 0) { value_p = &n->command; value_type = at_command; } else error = true; break; case 6: if (strcmp(word, "--type") == 0) { value_p = &n->packagetype; value_type = at_packagetype; } else error = true; break; case 9: if (strcmp(word, "--changes") == 0) n->changesacceptrule = true; else error = true; break; case 11: if (strcmp(word, "--component") == 0) { value_p = &n->component; value_type = at_component; } else error = true; break; case 13: if (strcmp(word, "--withcontrol") == 0) n->withcontrol = true; else error = true; break; case 14: if (strcmp(word, "--architecture") == 0) { value_p = &n->architecture; value_type = at_architecture; } else error = true; break; default: error = true; break; } if (error) { fprintf(stderr, "Unknown Log notifier option in %s, line %u, column %u: '%s'\n", config_filename(iter), config_markerline(iter), config_markercolumn(iter), word); free(word); return RET_ERROR; } if (value_p == NULL) { if (argument != NULL) { fprintf(stderr, "Log notifier option has = but may not, in %s, line %u, column %u: '%s'\n", config_filename(iter), config_markerline(iter), config_markercolumn(iter), word); free(word); return RET_ERROR; } free(word); continue; } /* option expecting string value: */ if (atom_defined(*value_p)) { fprintf(stderr, "Repeated notifier option %s in %s, line %u, column %u!\n", word, config_filename(iter), config_markerline(iter), config_markercolumn(iter)); free(word); return RET_ERROR; } detachedargument = NULL; if (argument == NULL) { r = config_getwordinline(iter, &detachedargument); if (RET_WAS_ERROR(r)) return r; if (r == RET_NOTHING) { fprintf(stderr, "Log notifier option %s misses an argument in %s, line %u, column %u\n", word, config_filename(iter), config_line(iter), config_column(iter)); free(word); return RET_ERROR; } argument = detachedargument; } *value_p = atom_find(value_type, argument); if (!atom_defined(*value_p)) { fprintf(stderr, "Warning: unknown %s '%s', ignoring notificator line line %u in %s\n", atomtypes[value_type], argument, config_line(iter), config_filename(iter)); config_overline(iter); free(detachedargument); free(word); return RET_NOTHING; } free(detachedargument); free(word); } else { char *script; if (n->changesacceptrule && atom_defined(n->architecture)) { fprintf(stderr, "Error: --changes and --architecture cannot be combined! (line %u in '%s')\n", config_markerline(iter), config_filename(iter)); return RET_ERROR; } if (n->changesacceptrule && atom_defined(n->component)) { fprintf(stderr, "Error: --changes and --component cannot be combined! (line %u in %s)\n", config_markerline(iter), config_filename(iter)); return RET_ERROR; } if (n->changesacceptrule && atom_defined(n->packagetype)) { fprintf(stderr, "Error: --changes and --type cannot be combined! (line %u in %s)\n", config_markerline(iter), config_filename(iter)); return RET_ERROR; } r = config_completeword(iter, c, &script); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; c = config_nextnonspaceinline(iter); if (c != EOF) { fprintf(stderr, "Error parsing config file %s, line %u, column %u:\n" "Unexpected data at end of notifier after script name '%s'\n", config_filename(iter), config_line(iter), config_column(iter), script); free(script); return RET_ERROR; } n->scriptname = configfile_expandname(script, script); if (FAILEDTOALLOC(n->scriptname)) return RET_ERROR_OOM; return RET_OK; } } fprintf(stderr, "Error parsing config file %s, line %u, column %u:\n" "Unexpected end of line: name of notifier script missing!\n", config_filename(iter), config_line(iter), config_column(iter)); return RET_ERROR; } /*@null@*/ static struct notification_process { /*@null@*/struct notification_process *next; char **arguments; /*@null@*/char *causingfile; /*@null@*/char *causingrule; /*@null@*/char *suitefrom; /* data to send to the process */ size_t datalen, datasent; /*@null@*/char *data; /* process */ pid_t child; int fd; } *processes = NULL; static void notification_process_free(/*@only@*/struct notification_process *p) { char **a; if (p->fd >= 0) (void)close(p->fd); for (a = p->arguments ; *a != NULL ; a++) free(*a); free(p->causingfile); free(p->causingrule); free(p->suitefrom); free(p->arguments); free(p->data); free(p); } static int catchchildren(void) { pid_t child; int status; struct notification_process *p, **pp; int returned = 0; /* to avoid stealing aptmethods.c children, only * check for our children. (As not many run, that * is no large overhead. */ pp = &processes; while ((p=*pp) != NULL) { if (p->child <= 0) { pp = &p->next; continue; } child = waitpid(p->child, &status, WNOHANG); if (child == 0) { pp = &p->next; continue; } if (child < 0) { int e = errno; fprintf(stderr, "Error calling waitpid on notification child: %d=%s\n", e, strerror(e)); /* but still handle the failed child: */ } else if (WIFSIGNALED(status)) { fprintf(stderr, "Notification process '%s' killed with signal %d!\n", p->arguments[0], WTERMSIG(status)); } else if (!WIFEXITED(status)) { fprintf(stderr, "Notification process '%s' failed!\n", p->arguments[0]); } else if (WIFEXITED(status) && WEXITSTATUS(status) != 0) { fprintf(stderr, "Notification process '%s' returned with exit code %d!\n", p->arguments[0], (int)(WEXITSTATUS(status))); } if (p->fd >= 0) { (void)close(p->fd); p->fd = -1; } p->child = 0; *pp = p->next; notification_process_free(p); returned++; } return returned; } static void feedchildren(bool dowait) { struct notification_process *p; fd_set w; int ret; int number = 0; struct timeval tv = {0, 0}; FD_ZERO(&w); for (p = processes; p!= NULL ; p = p->next) { if (p->child > 0 && p->fd >= 0 && p->datasent < p->datalen) { FD_SET(p->fd, &w); if (p->fd >= number) number = p->fd + 1; } } if (number == 0) return; ret = select(number, NULL, &w, NULL, dowait?NULL:&tv); if (ret < 0) { // TODO... return; } for (p = processes; p != NULL ; p = p->next) { if (p->child > 0 && p->fd >= 0 && FD_ISSET(p->fd, &w)) { size_t tosent = p->datalen - p->datasent; ssize_t sent; if (tosent > (size_t)512) tosent = 512; sent = write(p->fd, p->data+p->datasent, 512); if (sent < 0) { int e = errno; fprintf(stderr, "Error '%s' while sending data to '%s', sending SIGABRT to it!\n", strerror(e), p->arguments[0]); (void)kill(p->child, SIGABRT); } p->datasent += sent; if (p->datasent >= p->datalen) { free(p->data); p->data = NULL; } } } } static size_t runningchildren(void) { struct notification_process *p; size_t running = 0; p = processes; while (p != NULL && p->child != 0) { running ++; p = p->next; } return running; } static retvalue startchild(void) { struct notification_process *p; pid_t child; int filedes[2]; int ret; p = processes; while (p != NULL && p->child != 0) p = p->next; if (p == NULL) return RET_NOTHING; if (p->datalen > 0) { ret = pipe(filedes); if (ret < 0) { int e = errno; fprintf(stderr, "Error creating pipe: %d=%s!\n", e, strerror(e)); return RET_ERRNO(e); } p->fd = filedes[1]; } else { p->fd = -1; } child = fork(); if (child == 0) { if (p->datalen > 0) { dup2(filedes[0], 0); if (filedes[0] != 0) (void)close(filedes[0]); (void)close(filedes[1]); } /* Try to close all open fd but 0,1,2 */ closefrom(3); sethookenvironment(p->causingfile, p->causingrule, p->suitefrom, NULL); (void)execv(p->arguments[0], p->arguments); fprintf(stderr, "Error executing '%s': %s\n", p->arguments[0], strerror(errno)); _exit(255); } if (p->datalen > 0) { (void)close(filedes[0]); markcloseonexec(p->fd); } if (child < 0) { int e = errno; fprintf(stderr, "Error forking: %d=%s!\n", e, strerror(e)); if (p->fd >= 0) { (void)close(p->fd); p->fd = -1; } return RET_ERRNO(e); } p->child = child; if (p->datalen > 0) { struct pollfd polldata; ssize_t written; polldata.fd = p->fd; polldata.events = POLLOUT; while (poll(&polldata, 1, 0) > 0) { if ((polldata.revents & POLLNVAL) != 0) { p->fd = -1; return RET_ERROR; } if ((polldata.revents & POLLHUP) != 0) { (void)close(p->fd); p->fd = -1; return RET_OK; } if ((polldata.revents & POLLOUT) != 0) { size_t towrite = p->datalen - p->datasent; if (towrite > (size_t)512) towrite = 512; written = write(p->fd, p->data + p->datasent, towrite); if (written < 0) { int e = errno; fprintf(stderr, "Error '%s' while sending data to '%s', sending SIGABRT to it!\n", strerror(e), p->arguments[0]); (void)kill(p->child, SIGABRT); return RET_ERRNO(e); } p->datasent += written; if (p->datasent >= p->datalen) { free(p->data); p->data = NULL; ret = close(p->fd); p->fd = -1; if (ret != 0) return RET_ERRNO(errno); else return RET_OK; } continue; } /* something to write but at the same time not, * let's better stop here better */ return RET_OK; } } return RET_OK; } static retvalue notificator_enqueuechanges(struct notificator *n, const char *codename, const char *name, const char *version, const char *changeschunk, const char *safefilename, /*@null@*/const char *filekey) { size_t count, i, j; char **arguments; struct notification_process *p; catchchildren(); feedchildren(false); if (!n->changesacceptrule) return RET_NOTHING; if (limitation_missed(n->command, causingcommand)) { return RET_NOTHING; } count = 6; /* script "accepted" codename name version safename */ if (filekey != NULL) count++; arguments = nzNEW(count + 1, char*); if (FAILEDTOALLOC(arguments)) return RET_ERROR_OOM; i = 0; arguments[i++] = strdup(n->scriptname); arguments[i++] = strdup("accepted"); arguments[i++] = strdup(codename); arguments[i++] = strdup(name); arguments[i++] = strdup(version); arguments[i++] = strdup(safefilename); if (filekey != NULL) arguments[i++] = strdup(filekey); assert (i == count); arguments[i] = NULL; for (i = 0 ; i < count ; i++) if (FAILEDTOALLOC(arguments[i])) { for (j = 0 ; j < count ; j++) free(arguments[j]); free(arguments); return RET_ERROR_OOM; } if (processes == NULL) { p = NEW(struct notification_process); processes = p; } else { p = processes; while (p->next != NULL) p = p->next; p->next = NEW(struct notification_process); p = p->next; } if (FAILEDTOALLOC(p)) { for (j = 0 ; j < count ; j++) free(arguments[j]); free(arguments); return RET_ERROR_OOM; } if (causingfile != NULL) { p->causingfile = strdup(causingfile); if (FAILEDTOALLOC(p->causingfile)) { for (j = 0 ; j < count ; j++) free(arguments[j]); free(arguments); free(p); return RET_ERROR_OOM; } } else p->causingfile = NULL; p->causingrule = NULL; p->suitefrom = NULL; p->arguments = arguments; p->next = NULL; p->child = 0; p->fd = -1; p->datalen = 0; p->datasent = 0; p->data = NULL; // TODO: implement --withcontrol // until that changeschunk is not yet needed: changeschunk = changeschunk; if (runningchildren() < 1) startchild(); return RET_OK; } static retvalue notificator_enqueue(struct notificator *n, struct target *target, const char *name, /*@null@*/const char *version, /*@null@*/const char *oldversion, /*@null@*/const char *control, /*@null@*/const char *oldcontrol, /*@null@*/const struct strlist *filekeys, /*@null@*/const struct strlist *oldfilekeys, bool renotification, /*@null@*/const char *causingrule, /*@null@*/ const char *suitefrom) { size_t count, i; char **arguments; const char *action = NULL; struct notification_process *p; catchchildren(); feedchildren(false); if (n->changesacceptrule) return RET_NOTHING; // some day, some atom handling for those would be nice if (limitation_missed(n->architecture, target->architecture)) { if (runningchildren() < 1) startchild(); return RET_NOTHING; } if (limitation_missed(n->component, target->component)) { if (runningchildren() < 1) startchild(); return RET_NOTHING; } if (limitation_missed(n->packagetype, target->packagetype)) { if (runningchildren() < 1) startchild(); return RET_NOTHING; } if (limitation_missed(n->command, causingcommand)) { if (runningchildren() < 1) startchild(); return RET_NOTHING; } count = 7; /* script action codename type component architecture */ if (version != NULL) { action = "add"; count += 2; /* version and filekeylist marker */ if (filekeys != NULL) count += filekeys->count; } if (oldversion != NULL) { assert (!renotification); if (action == NULL) action = "remove"; else action = "replace"; count += 2; /* version and filekeylist marker */ if (oldfilekeys != NULL) count += oldfilekeys->count; } assert (action != NULL); if (renotification) action = "info"; arguments = nzNEW(count + 1, char*); if (FAILEDTOALLOC(arguments)) return RET_ERROR_OOM; i = 0; arguments[i++] = strdup(n->scriptname); arguments[i++] = strdup(action); arguments[i++] = strdup(target->distribution->codename); arguments[i++] = strdup(atoms_packagetypes[target->packagetype]); arguments[i++] = strdup(atoms_components[target->component]); arguments[i++] = strdup(atoms_architectures[target->architecture]); arguments[i++] = strdup(name); if (version != NULL) arguments[i++] = strdup(version); if (oldversion != NULL) arguments[i++] = strdup(oldversion); if (version != NULL) { int j; arguments[i++] = strdup("--"); if (filekeys != NULL) for (j = 0 ; j < filekeys->count ; j++) arguments[i++] = strdup(filekeys->values[j]); } if (oldversion != NULL) { int j; arguments[i++] = strdup("--"); if (oldfilekeys != NULL) for (j = 0 ; j < oldfilekeys->count ; j++) arguments[i++] = strdup(oldfilekeys->values[j]); } assert (i == count); arguments[i] = NULL; for (i = 0 ; i < count ; i++) { size_t j; if (FAILEDTOALLOC(arguments[i])) { for (j = 0 ; j < count ; j++) free(arguments[j]); free(arguments); return RET_ERROR_OOM; } } if (processes == NULL) { p = NEW(struct notification_process); processes = p; } else { p = processes; while (p->next != NULL) p = p->next; p->next = NEW(struct notification_process); p = p->next; } if (FAILEDTOALLOC(p)) { size_t j; for (j = 0 ; j < count ; j++) free(arguments[j]); free(arguments); return RET_ERROR_OOM; } if (causingfile != NULL) { size_t j; p->causingfile = strdup(causingfile); if (FAILEDTOALLOC(p->causingfile)) { for (j = 0 ; j < count ; j++) free(arguments[j]); free(arguments); free(p); return RET_ERROR_OOM; } } else p->causingfile = NULL; if (causingrule != NULL) { size_t j; p->causingrule = strdup(causingrule); if (FAILEDTOALLOC(p->causingrule)) { for (j = 0 ; j < count ; j++) free(arguments[j]); free(arguments); free(p->causingfile); free(p); return RET_ERROR_OOM; } } else p->causingrule = NULL; if (suitefrom != NULL) { size_t j; p->suitefrom = strdup(suitefrom); if (FAILEDTOALLOC(p->suitefrom)) { for (j = 0 ; j < count ; j++) free(arguments[j]); free(arguments); free(p->causingfile); free(p->causingrule); free(p); return RET_ERROR_OOM; } } else p->suitefrom = NULL; p->arguments = arguments; p->next = NULL; p->child = 0; p->fd = -1; p->datalen = 0; p->datasent = 0; p->data = NULL; // TODO: implement --withcontrol // until that control is not yet needed: control = control; oldcontrol = oldcontrol; if (runningchildren() < 1) startchild(); return RET_OK; } void logger_wait(void) { while (processes != NULL) { catchchildren(); if (interrupted()) break; feedchildren(true); // TODO: add option to start multiple at the same time if (runningchildren() < 1) startchild(); else { struct timeval tv = { 0, 100 }; select(0, NULL, NULL, NULL, &tv); } } } void logger_warn_waiting(void) { struct notification_process *p; if (processes != NULL) { (void)fputs( "WARNING: some notificator hooks were not run!\n" "(most likely due to receiving an interruption request)\n" "You will either have to run them by hand or run rerunnotifiers if\n" "you want the information they get to not be out of sync.\n" "Missed calls are:\n", stderr); for (p = processes ; p != NULL ; p = p->next) { char **c = p->arguments; if (c == NULL) continue; while (*c != NULL) { (void)fputc('"', stderr); (void)fputs(*c, stderr); (void)fputc('"', stderr); c++; if (*c != NULL) (void)fputc(' ', stderr); } (void)fputc('\n', stderr); } } } struct logger { /*@dependent@*//*@null@*/struct logfile *logfile; size_t notificator_count; struct notificator *notificators; }; void logger_free(struct logger *logger) { if (logger == NULL) return; if (logger->logfile != NULL) logfile_dereference(logger->logfile); if (logger->notificators != NULL) { size_t i; for (i = 0 ; i < logger->notificator_count ; i++) notificator_done(&logger->notificators[i]); free(logger->notificators); } free(logger); } retvalue logger_init(struct configiterator *iter, struct logger **logger_p) { struct logger *n; retvalue r; char *logfilename; bool havenotificators; r = config_getfileinline(iter, &logfilename); if (RET_WAS_ERROR(r)) return r; if (r == RET_NOTHING) logfilename = NULL; if (config_nextnonspaceinline(iter) != EOF) { fprintf(stderr, "Error parsing %s, line %u, column %u:\n" "Unexpected second filename for logfile.\n", config_filename(iter), config_line(iter), config_column(iter)); free(logfilename); return RET_ERROR; } config_overline(iter); havenotificators = config_nextline(iter); if (!havenotificators && logfilename == NULL) { *logger_p = NULL; return RET_NOTHING; } n = NEW(struct logger); if (FAILEDTOALLOC(n)) { free(logfilename); return RET_ERROR_OOM; } if (logfilename != NULL) { assert (*logfilename != '\0'); r = logfile_reference(logfilename, &n->logfile); if (RET_WAS_ERROR(r)) { free(n); return r; } } else n->logfile = NULL; n->notificators = NULL; n->notificator_count = 0; while (havenotificators) { struct notificator *newnot; newnot = realloc(n->notificators, (n->notificator_count+1) * sizeof(struct notificator)); if (FAILEDTOALLOC(newnot)) { logger_free(n); return RET_ERROR_OOM; } n->notificators = newnot; r = notificator_parse(&n->notificators[n->notificator_count++], iter); if (RET_WAS_ERROR(r)) { /* a bit ugly: also free the just failed item here */ logger_free(n); return r; } if (r == RET_NOTHING) n->notificator_count--; // TODO assert eol here... havenotificators = config_nextline(iter); } *logger_p = n; return RET_OK; } retvalue logger_prepare(struct logger *logger) { retvalue r; if (logger->logfile == NULL) return RET_NOTHING; if (logger->logfile != NULL && logger->logfile->fd < 0) { r = logfile_open(logger->logfile); } else r = RET_OK; return r; } bool logger_isprepared(/*@null@*/const struct logger *logger) { if (logger == NULL) return true; if (logger->logfile != NULL && logger->logfile->fd < 0) return false; return true; } void logger_log(struct logger *log, struct target *target, const char *name, const char *version, const char *oldversion, const char *control, const char *oldcontrol, const struct strlist *filekeys, const struct strlist *oldfilekeys, const char *causingrule, const char *suitefrom) { size_t i; assert (name != NULL); assert (control != NULL || oldcontrol != NULL); assert (version != NULL || control == NULL); /* so that a replacement can be detected by existance of oldversion */ if (oldcontrol != NULL && oldversion == NULL) oldversion = "#unparseable#"; assert (version != NULL || oldversion != NULL); if (log->logfile != NULL) logfile_write(log->logfile, target, name, version, oldversion); for (i = 0 ; i < log->notificator_count ; i++) { notificator_enqueue(&log->notificators[i], target, name, version, oldversion, control, oldcontrol, filekeys, oldfilekeys, false, causingrule, suitefrom); } } void logger_logchanges(struct logger *log, const char *codename, const char *name, const char *version, const char *data, const char *safefilename, const char *changesfilekey) { size_t i; assert (name != NULL); assert (version != NULL); if (log == NULL) return; for (i = 0 ; i < log->notificator_count ; i++) { notificator_enqueuechanges(&log->notificators[i], codename, name, version, data, safefilename, changesfilekey); } } bool logger_rerun_needs_target(const struct logger *logger, const struct target *target) { size_t i; struct notificator *n; for (i = 0 ; i < logger->notificator_count ; i++) { n = &logger->notificators[i]; if (limitation_missed(n->architecture, target->architecture)) continue; if (limitation_missed(n->component, target->component)) continue; if (limitation_missed(n->packagetype, target->packagetype)) continue; return true; } return false; } retvalue logger_reruninfo(struct logger *logger, struct target *target, const char *name, const char *version, const char *control, /*@null@*/const struct strlist *filekeys) { retvalue result, r; size_t i; assert (name != NULL); assert (version != NULL); assert (control != NULL); result = RET_NOTHING; for (i = 0 ; i < logger->notificator_count ; i++) { r = notificator_enqueue(&logger->notificators[i], target, name, version, NULL, control, NULL, filekeys, NULL, true, NULL, NULL); RET_UPDATE(result, r); } return result; } reprepro-4.13.1/updates.h0000644000175100017510000000272012152651661012205 00000000000000#ifndef REPREPRO_UPDATES_H #define REPREPRO_UPDATES_H #ifndef REPREPRO_ERROR_H #include "error.h" #warning "What's hapening here?" #endif #ifndef REPREPRO_RELEASE_H #include "release.h" #endif #ifndef REPREPRO_DISTRIBUTION_H #include "distribution.h" #endif #ifndef REPREPRO_STRLIST_H #include "strlist.h" #endif #ifndef REPREPRO_FREESPACE_H #include "freespace.h" #endif struct update_pattern; struct update_origin; struct update_target; struct update_distribution; retvalue updates_getpatterns(/*@out@*/struct update_pattern **); void updates_freepatterns(/*@only@*/struct update_pattern *p); void updates_freeupdatedistributions(/*@only@*/struct update_distribution *d); retvalue updates_calcindices(struct update_pattern *, struct distribution *, const struct atomlist * /*components*/, const struct atomlist */*architectures*/, const struct atomlist */*types*/, /*@out@*/struct update_distribution **); retvalue updates_update(struct update_distribution *, bool /*nolistsdownload*/, bool /*skipold*/, enum spacecheckmode, off_t /*reserveddb*/, off_t /*reservedother*/); retvalue updates_checkupdate(struct update_distribution *, bool /*nolistsdownload*/, bool /*skipold*/); retvalue updates_dumpupdate(struct update_distribution *, bool /*nolistsdownload*/, bool /*skipold*/); retvalue updates_predelete(struct update_distribution *, bool /*nolistsdownload*/, bool /*skipold*/); retvalue updates_cleanlists(const struct distribution *, const struct update_pattern *); #endif reprepro-4.13.1/rredpatch.h0000644000175100017510000000177512152651661012525 00000000000000#ifndef REPREPRO_RREDPATCH_H #define REPREPRO_RREDPATCH_H struct rred_patch; struct modification; retvalue patch_load(const char *, off_t, /*@out@*/struct rred_patch **); retvalue patch_loadfd(const char *, int, off_t, /*@out@*/struct rred_patch **); void patch_free(/*@only@*/struct rred_patch *); /*@only@*//*@null@*/struct modification *patch_getmodifications(struct rred_patch *); /*@null@*/const struct modification *patch_getconstmodifications(struct rred_patch *); struct modification *modification_dup(const struct modification *); void modification_freelist(/*@only@*/struct modification *); retvalue combine_patches(/*@out@*/struct modification **, /*@only@*/struct modification *, /*@only@*/struct modification *); void modification_printaspatch(void *, const struct modification *, void (const void *, size_t, void *)); retvalue modification_addstuff(const char *source, struct modification **patch_p, /*@out@*/char **line_p); retvalue patch_file(FILE *, const char *, const struct modification *); #endif reprepro-4.13.1/sizes.h0000644000175100017510000000021512152651661011672 00000000000000#ifndef REPREPRO_SIZES_H #define REPREPRO_SIZES_H retvalue sizes_distributions(struct distribution * /*all*/, bool /* specific */); #endif reprepro-4.13.1/downloadcache.h0000644000175100017510000000273512152651661013341 00000000000000#ifndef REPREPRO_DOWNLOADLIST_H #define REPREPRO_DOWNLOADLIST_H #ifndef REPREPRO_ERROR_H #include "error.h" #warning "What's hapening here?" #endif #ifndef REPREPRO_STRLIST_H #include "strlist.h" #warning "What's hapening here?" #endif #ifndef REPREPRO_DATABASE_H #include "database.h" #endif #ifndef REPREPRO_APTMETHOD_H #include "aptmethod.h" #endif #ifndef REPREPRO_CHECKSUMS_H #include "checksums.h" #endif #ifndef REPREPRO_FREESPACE_H #include "freespace.h" #endif struct downloaditem; struct downloadcache { /*@null@*/struct downloaditem *items; /*@null@*/struct devices *devices; /* for showing what percentage was downloaded */ long long size_todo, size_done; unsigned int last_percent; }; /* Initialize a new download session */ retvalue downloadcache_initialize(enum spacecheckmode, off_t /*reserveddb*/, off_t /*reservedother*/, /*@out@*/struct downloadcache **); /* free all memory */ retvalue downloadcache_free(/*@null@*//*@only@*/struct downloadcache *); /* queue a new file to be downloaded: * results in RET_ERROR_WRONG_MD5, if someone else already asked * for the same destination with other md5sum created. */ retvalue downloadcache_add(struct downloadcache *, struct aptmethod *, const char * /*orig*/, const char * /*filekey*/, const struct checksums *); /* some as above, only for more files... */ retvalue downloadcache_addfiles(struct downloadcache *, struct aptmethod *, const struct checksumsarray * /*origfiles*/, const struct strlist * /*filekeys*/); #endif reprepro-4.13.1/debfile.h0000644000175100017510000000056612152651661012140 00000000000000#ifndef REPREPRO_DEBFILE_H #define REPREPRO_DEBFILE_H #ifndef REPREPRO_ERROR_H #include "error.h" #warning "What's hapening here?" #endif /* Read the control information of a .deb file */ retvalue extractcontrol(/*@out@*/char **, const char *); /* Read a list of files from a .deb file */ retvalue getfilelist(/*@out@*/char **, /*@out@*/ size_t *, const char *); #endif reprepro-4.13.1/incoming.c0000644000175100017510000020573312152651661012347 00000000000000/* This file is part of "reprepro" * Copyright (C) 2006,2007,2008,2009,2010 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "error.h" #include "ignore.h" #include "mprintf.h" #include "filecntl.h" #include "strlist.h" #include "dirs.h" #include "names.h" #include "checksums.h" #include "chunks.h" #include "target.h" #include "signature.h" #include "binaries.h" #include "sources.h" #include "dpkgversions.h" #include "uploaderslist.h" #include "guesscomponent.h" #include "log.h" #include "override.h" #include "tracking.h" #include "incoming.h" #include "files.h" #include "configparser.h" #include "byhandhook.h" #include "changes.h" enum permitflags { /* do not error out on unused files */ pmf_unused_files = 0, /* do not error out if there already is a newer package */ pmf_oldpackagenewer, pmf_COUNT /* must be last */ }; enum cleanupflags { /* delete everything referenced by a .changes file * when it is not accepted */ cuf_on_deny = 0, /* check owner when deleting on_deny */ cuf_on_deny_check_owner, /* delete everything referenced by a .changes on errors * after accepting that .changes file*/ cuf_on_error, /* delete unused files after sucessfully * processing the used ones */ cuf_unused_files, cuf_COUNT /* must be last */ }; enum optionsflags { /* only put _all.deb comes with those of some architecture, * only put in those architectures */ iof_limit_arch_all = 0, /* allow .changes file to specify multipe distributions */ iof_multiple_distributions, iof_COUNT /* must be last */ }; struct incoming { /* by incoming_parse: */ char *name; char *directory; char *morguedir; char *tempdir; char *logdir; struct strlist allow; struct distribution **allow_into; struct distribution *default_into; /* by incoming_prepare: */ struct strlist files; bool *processed; bool *delete; bool permit[pmf_COUNT]; bool cleanup[cuf_COUNT]; bool options[iof_COUNT]; /* only to ease parsing: */ const char *filename; /* only valid while parsing! */ size_t lineno; }; #define BASENAME(i, ofs) (i)->files.values[ofs] /* the changes file is always the first one listed */ #define changesfile(c) (c->files) static void incoming_free(/*@only@*/ struct incoming *i) { if (i == NULL) return; free(i->name); free(i->morguedir); free(i->tempdir); free(i->logdir); free(i->directory); strlist_done(&i->allow); free(i->allow_into); strlist_done(&i->files); free(i->processed); free(i->delete); free(i); } static retvalue incoming_prepare(struct incoming *i) { DIR *dir; struct dirent *ent; retvalue r; int ret; /* TODO: decide whether to clean this directory first ... */ r = dirs_make_recursive(i->tempdir); if (RET_WAS_ERROR(r)) return r; dir = opendir(i->directory); if (dir == NULL) { int e = errno; fprintf(stderr, "Cannot scan '%s': %s\n", i->directory, strerror(e)); return RET_ERRNO(e); } while ((ent = readdir(dir)) != NULL) { if (ent->d_name[0] == '.') continue; /* this should be impossible to hit. * but given utf-8 encoding filesystems and * overlong slashes, better check than be sorry */ if (strchr(ent->d_name, '/') != NULL) continue; r = strlist_add_dup(&i->files, ent->d_name) ; if (RET_WAS_ERROR(r)) { (void)closedir(dir); return r; } } ret = closedir(dir); if (ret != 0) { int e = errno; fprintf(stderr, "Error scanning '%s': %s\n", i->directory, strerror(e)); return RET_ERRNO(e); } i->processed = nzNEW(i->files.count, bool); if (FAILEDTOALLOC(i->processed)) return RET_ERROR_OOM; i->delete = nzNEW(i->files.count, bool); if (FAILEDTOALLOC(i->delete)) return RET_ERROR_OOM; return RET_OK; } struct read_incoming_data { /*@temp@*/const char *name; /*@temp@*/struct distribution *distributions; struct incoming *i; }; static retvalue translate(struct distribution *distributions, struct strlist *names, struct distribution ***r) { struct distribution **d; int j; d = nzNEW(names->count, struct distribution *); if (FAILEDTOALLOC(d)) return RET_ERROR_OOM; for (j = 0 ; j < names->count ; j++) { d[j] = distribution_find(distributions, names->values[j]); if (d[j] == NULL) { free(d); return RET_ERROR; } } *r = d; return RET_OK; } CFstartparse(incoming) { CFstartparseVAR(incoming, result_p); struct incoming *i; i = zNEW(struct incoming); if (FAILEDTOALLOC(i)) return RET_ERROR_OOM; *result_p = i; return RET_OK; } CFfinishparse(incoming) { CFfinishparseVARS(incoming, i, last, d); if (!complete || strcmp(i->name, d->name) != 0) { incoming_free(i); return RET_NOTHING; } if (d->i != NULL) { fprintf(stderr, "Multiple definitions of '%s': first started at line %u of %s, second at line %u of %s!\n", d->name, (unsigned int)d->i->lineno, d->i->filename, config_firstline(iter), config_filename(iter)); incoming_free(i); incoming_free(d->i); d->i = NULL; return RET_ERROR; } if (i->logdir != NULL && i->logdir[0] != '/') { char *n = calc_dirconcat(global.basedir, i->logdir); if (FAILEDTOALLOC(n)) { incoming_free(i); return RET_ERROR_OOM; } free(i->logdir); i->logdir = n; } if (i->morguedir != NULL && i->morguedir[0] != '/') { char *n = calc_dirconcat(global.basedir, i->morguedir); if (FAILEDTOALLOC(n)) { incoming_free(i); return RET_ERROR_OOM; } free(i->morguedir); i->morguedir = n; } if (i->tempdir[0] != '/') { char *n = calc_dirconcat(global.basedir, i->tempdir); if (FAILEDTOALLOC(n)) { incoming_free(i); return RET_ERROR_OOM; } free(i->tempdir); i->tempdir = n; } if (i->directory[0] != '/') { char *n = calc_dirconcat(global.basedir, i->directory); if (FAILEDTOALLOC(n)) { incoming_free(i); return RET_ERROR_OOM; } free(i->directory); i->directory = n; } if (i->default_into == NULL && i->allow.count == 0) { fprintf(stderr, "There is neither an 'Allow' nor a 'Default' definition in rule '%s'\n" "(starting at line %u, ending at line %u of %s)!\n" "Aborting as nothing would be let in.\n", d->name, config_firstline(iter), config_line(iter), config_filename(iter)); incoming_free(i); return RET_ERROR; } if (i->morguedir != NULL && !i->cleanup[cuf_on_deny] && !i->cleanup[cuf_on_error] && !i->cleanup[cuf_unused_files]) { fprintf(stderr, "Warning: There is a 'MorgueDir' but no 'Cleanup' to act on in rule '%s'\n" "(starting at line %u, ending at line %u of %s)!\n", d->name, config_firstline(iter), config_line(iter), config_filename(iter)); } d->i = i; i->filename = config_filename(iter); i->lineno = config_firstline(iter); /* only suppreses the last unused warning: */ *last = i; return RET_OK; } CFSETPROC(incoming, default) { CFSETPROCVARS(incoming, i, d); char *default_into; retvalue r; r = config_getonlyword(iter, headername, NULL, &default_into); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; i->default_into = distribution_find(d->distributions, default_into); free(default_into); return (i->default_into == NULL)?RET_ERROR:RET_OK; } CFSETPROC(incoming, allow) { CFSETPROCVARS(incoming, i, d); struct strlist allow_into; retvalue r; r = config_getsplitwords(iter, headername, &i->allow, &allow_into); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; assert (i->allow.count == allow_into.count); r = translate(d->distributions, &allow_into, &i->allow_into); strlist_done(&allow_into); if (RET_WAS_ERROR(r)) return r; return RET_OK; } CFSETPROC(incoming, permit) { CFSETPROCVARS(incoming, i, d); static const struct constant const permitconstants[] = { { "unused_files", pmf_unused_files}, { "older_version", pmf_oldpackagenewer}, /* not yet implemented: { "downgrade", pmf_downgrade}, */ { NULL, -1} }; if (IGNORABLE(unknownfield)) return config_getflags(iter, headername, permitconstants, i->permit, true, ""); else if (i->name == NULL) return config_getflags(iter, headername, permitconstants, i->permit, false, "\n(try put Name: before Permit: to ignore if it is from the wrong rule"); else if (strcmp(i->name, d->name) != 0) return config_getflags(iter, headername, permitconstants, i->permit, true, " (but not within the rule we are intrested in.)"); else return config_getflags(iter, headername, permitconstants, i->permit, false, " (use --ignore=unknownfield to ignore this)\n"); } CFSETPROC(incoming, cleanup) { CFSETPROCVARS(incoming, i, d); static const struct constant const cleanupconstants[] = { { "unused_files", cuf_unused_files}, { "on_deny", cuf_on_deny}, /* not yet implemented { "on_deny_check_owner", cuf_on_deny_check_owner}, */ { "on_error", cuf_on_error}, { NULL, -1} }; if (IGNORABLE(unknownfield)) return config_getflags(iter, headername, cleanupconstants, i->cleanup, true, ""); else if (i->name == NULL) return config_getflags(iter, headername, cleanupconstants, i->cleanup, false, "\n(try put Name: before Cleanup: to ignore if it is from the wrong rule"); else if (strcmp(i->name, d->name) != 0) return config_getflags(iter, headername, cleanupconstants, i->cleanup, true, " (but not within the rule we are intrested in.)"); else return config_getflags(iter, headername, cleanupconstants, i->cleanup, false, " (use --ignore=unknownfield to ignore this)\n"); } CFSETPROC(incoming, options) { CFSETPROCVARS(incoming, i, d); static const struct constant const optionsconstants[] = { { "limit_arch_all", iof_limit_arch_all}, { "multiple_distributions", iof_multiple_distributions}, { NULL, -1} }; if (IGNORABLE(unknownfield)) return config_getflags(iter, headername, optionsconstants, i->options, true, ""); else if (i->name == NULL) return config_getflags(iter, headername, optionsconstants, i->options, false, "\n(try put Name: before Options: to ignore if it is from the wrong rule"); else if (strcmp(i->name, d->name) != 0) return config_getflags(iter, headername, optionsconstants, i->options, true, " (but not within the rule we are intrested in.)"); else return config_getflags(iter, headername, optionsconstants, i->options, false, " (use --ignore=unknownfield to ignore this)\n"); } CFvalueSETPROC(incoming, name) CFdirSETPROC(incoming, logdir) CFdirSETPROC(incoming, tempdir) CFdirSETPROC(incoming, morguedir) CFdirSETPROC(incoming, directory) CFtruthSETPROC2(incoming, multiple, options[iof_multiple_distributions]) static const struct configfield incomingconfigfields[] = { CFr("Name", incoming, name), CFr("TempDir", incoming, tempdir), CFr("IncomingDir", incoming, directory), CF("MorgueDir", incoming, morguedir), CF("Default", incoming, default), CF("Allow", incoming, allow), CF("Multiple", incoming, multiple), CF("Options", incoming, options), CF("Cleanup", incoming, cleanup), CF("Permit", incoming, permit), CF("Logdir", incoming, logdir) }; static retvalue incoming_init(struct distribution *distributions, const char *name, /*@out@*/struct incoming **result) { retvalue r; struct read_incoming_data imports; imports.name = name; imports.distributions = distributions; imports.i = NULL; r = configfile_parse("incoming", IGNORABLE(unknownfield), startparseincoming, finishparseincoming, "incoming rule", incomingconfigfields, ARRAYCOUNT(incomingconfigfields), &imports); if (RET_WAS_ERROR(r)) return r; if (imports.i == NULL) { fprintf(stderr, "No definition for '%s' found in '%s/incoming'!\n", name, global.confdir); return RET_ERROR_MISSING; } r = incoming_prepare(imports.i); if (RET_WAS_ERROR(r)) { incoming_free(imports.i); return r; } *result = imports.i; return r; } struct candidate { /* from candidate_read */ int ofs; char *control; struct signatures *signatures; /* from candidate_parse */ char *source, *sourceversion, *changesversion; struct strlist distributions, architectures, binaries; bool isbinNMU; struct candidate_file { /* set by _addfileline */ struct candidate_file *next; int ofs; /* to basename in struct incoming->files */ filetype type; /* all NULL if it is the .changes itself, * otherwise the data from the .changes for this file: */ char *section; char *priority; architecture_t architecture; char *name; /* like above, but updated once files are copied */ struct checksums *checksums; /* set later */ bool used; char *tempfilename; /* distribution-unspecific contents of the packages */ /* - only for FE_BINARY types: */ struct deb_headers deb; /* - only for fe_DSC types */ struct dsc_headers dsc; /* only valid while parsing */ struct hashes h; } *files; struct candidate_perdistribution { struct candidate_perdistribution *next; struct distribution *into; bool skip; struct candidate_package { /* a package is something installing files, including * the pseudo-package for the .changes file, if that is * to be included */ struct candidate_package *next; const struct candidate_file *master; component_t component; packagetype_t packagetype; struct strlist filekeys; /* a list of pointers to the files belonging to those * filekeys, NULL if it does not need linking/copying */ const struct candidate_file **files; /* only for FE_PACKAGE: */ char *control; /* only for fe_DSC */ char *directory; /* true if skipped because already there or newer */ bool skip; } *packages; struct byhandfile { struct byhandfile *next; const struct candidate_file *file; const struct byhandhook *hook; } *byhandhookstocall; } *perdistribution; /* the logsubdir, and the list of files to put there, * otherwise both NULL */ char *logsubdir; int logcount; const struct candidate_file **logfiles; }; static void candidate_file_free(/*@only@*/struct candidate_file *f) { checksums_free(f->checksums); free(f->section); free(f->priority); free(f->name); if (FE_BINARY(f->type)) binaries_debdone(&f->deb); if (f->type == fe_DSC) sources_done(&f->dsc); if (f->tempfilename != NULL) { (void)unlink(f->tempfilename); free(f->tempfilename); f->tempfilename = NULL; } free(f); } static void candidate_package_free(/*@only@*/struct candidate_package *p) { free(p->control); free(p->directory); strlist_done(&p->filekeys); free(p->files); free(p); } static void candidate_free(/*@only@*/struct candidate *c) { if (c == NULL) return; free(c->control); signatures_free(c->signatures); free(c->source); free(c->sourceversion); free(c->changesversion); strlist_done(&c->distributions); strlist_done(&c->architectures); strlist_done(&c->binaries); while (c->perdistribution != NULL) { struct candidate_perdistribution *d = c->perdistribution; c->perdistribution = d->next; while (d->packages != NULL) { struct candidate_package *p = d->packages; d->packages = p->next; candidate_package_free(p); } while (d->byhandhookstocall != NULL) { struct byhandfile *h = d->byhandhookstocall; d->byhandhookstocall = h->next; free(h); } free(d); } while (c->files != NULL) { struct candidate_file *f = c->files; c->files = f->next; candidate_file_free(f); } free(c->logsubdir); free(c->logfiles); free(c); } static retvalue candidate_newdistribution(struct candidate *c, struct distribution *distribution) { struct candidate_perdistribution *n, **pp = &c->perdistribution; while (*pp != NULL) { if ((*pp)->into == distribution) return RET_NOTHING; pp = &(*pp)->next; } n = zNEW(struct candidate_perdistribution); if (FAILEDTOALLOC(n)) return RET_ERROR_OOM; n->into = distribution; *pp = n; return RET_OK; } static struct candidate_package *candidate_newpackage(struct candidate_perdistribution *fordistribution, const struct candidate_file *master) { struct candidate_package *n, **pp = &fordistribution->packages; while (*pp != NULL) pp = &(*pp)->next; n = zNEW(struct candidate_package); if (FAILEDTOALLOC(n)) return NULL; n->component = atom_unknown; n->packagetype = atom_unknown; n->master = master; *pp = n; return n; } static retvalue candidate_usefile(const struct incoming *i, const struct candidate *c, struct candidate_file *file); static retvalue candidate_read(struct incoming *i, int ofs, struct candidate **result, bool *broken) { struct candidate *n; retvalue r; n = zNEW(struct candidate); if (FAILEDTOALLOC(n)) return RET_ERROR_OOM; n->ofs = ofs; /* first file of any .changes file is the file itself */ n->files = zNEW(struct candidate_file); if (FAILEDTOALLOC(n->files)) { free(n); return RET_ERROR_OOM; } n->files->ofs = n->ofs; n->files->type = fe_CHANGES; r = candidate_usefile(i, n, n->files); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) { candidate_free(n); return r; } assert (n->files->tempfilename != NULL); r = signature_readsignedchunk(n->files->tempfilename, BASENAME(i, ofs), &n->control, &n->signatures, broken); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) { candidate_free(n); return r; } *result = n; return RET_OK; } static retvalue candidate_addfileline(struct incoming *i, struct candidate *c, const char *fileline) { struct candidate_file **p, *n; char *basefilename; retvalue r; n = zNEW(struct candidate_file); if (FAILEDTOALLOC(n)) return RET_ERROR_OOM; r = changes_parsefileline(fileline, &n->type, &basefilename, &n->h.hashes[cs_md5sum], &n->h.hashes[cs_length], &n->section, &n->priority, &n->architecture, &n->name); if (RET_WAS_ERROR(r)) { free(n); return r; } n->ofs = strlist_ofs(&i->files, basefilename); if (n->ofs < 0) { fprintf(stderr, "In '%s': file '%s' not found in the incoming dir!\n", i->files.values[c->ofs], basefilename); free(basefilename); candidate_file_free(n); return RET_ERROR_MISSING; } free(basefilename); p = &c->files; while (*p != NULL) p = &(*p)->next; *p = n; return RET_OK; } static retvalue candidate_addhashes(struct incoming *i, struct candidate *c, enum checksumtype cs, const struct strlist *lines) { int j; for (j = 0 ; j < lines->count ; j++) { const char *fileline = lines->values[j]; struct candidate_file *f; const char *basefilename; struct hash_data hash, size; retvalue r; r = hashline_parse(BASENAME(i, c->ofs), fileline, cs, &basefilename, &hash, &size); if (!RET_IS_OK(r)) return r; f = c->files; while (f != NULL && strcmp(BASENAME(i, f->ofs), basefilename) != 0) f = f->next; if (f == NULL) { fprintf(stderr, "Warning: Ignoring file '%s' listed in '%s' but not in '%s' of '%s'!\n", basefilename, changes_checksum_names[cs], changes_checksum_names[cs_md5sum], BASENAME(i, c->ofs)); continue; } if (f->h.hashes[cs_length].len != size.len || memcmp(f->h.hashes[cs_length].start, size.start, size.len) != 0) { fprintf(stderr, "Error: Different size of '%s' listed in '%s' and '%s' of '%s'!\n", basefilename, changes_checksum_names[cs], changes_checksum_names[cs_md5sum], BASENAME(i, c->ofs)); return RET_ERROR; } f->h.hashes[cs] = hash; } return RET_OK; } static retvalue candidate_finalizechecksums(struct candidate *c) { struct candidate_file *f; retvalue r; /* store collected hashes as checksums structs, * starting after .changes file: */ for (f = c->files->next ; f != NULL ; f = f->next) { r = checksums_initialize(&f->checksums, f->h.hashes); if (RET_WAS_ERROR(r)) return r; } return RET_OK; } static retvalue candidate_parse(struct incoming *i, struct candidate *c) { retvalue r; struct strlist filelines[cs_hashCOUNT]; enum checksumtype cs; int j; #define R if (RET_WAS_ERROR(r)) return r; #define E(err, ...) { \ if (r == RET_NOTHING) { \ fprintf(stderr, "In '%s': " err "\n", \ BASENAME(i, c->ofs), ## __VA_ARGS__); \ r = RET_ERROR; \ } \ if (RET_WAS_ERROR(r)) return r; \ } r = chunk_getnameandversion(c->control, "Source", &c->source, &c->sourceversion); E("Missing 'Source' field!"); r = propersourcename(c->source); E("Malforce Source name!"); if (c->sourceversion != NULL) { r = properversion(c->sourceversion); E("Malforce Source Version number!"); } r = chunk_getwordlist(c->control, "Binary", &c->binaries); E("Missing 'Binary' field!"); r = chunk_getwordlist(c->control, "Architecture", &c->architectures); E("Missing 'Architecture' field!"); r = chunk_getvalue(c->control, "Version", &c->changesversion); E("Missing 'Version' field!"); r = properversion(c->changesversion); E("Malforce Version number!"); // TODO: logic to detect binNMUs to warn against sources? if (c->sourceversion == NULL) { c->sourceversion = strdup(c->changesversion); if (FAILEDTOALLOC(c->sourceversion)) return RET_ERROR_OOM; c->isbinNMU = false; } else { int cmp; r = dpkgversions_cmp(c->sourceversion, c->changesversion, &cmp); R; c->isbinNMU = cmp != 0; } r = chunk_getwordlist(c->control, "Distribution", &c->distributions); E("Missing 'Distribution' field!"); r = chunk_getextralinelist(c->control, changes_checksum_names[cs_md5sum], &filelines[cs_md5sum]); E("Missing '%s' field!", changes_checksum_names[cs_md5sum]); for (j = 0 ; j < filelines[cs_md5sum].count ; j++) { r = candidate_addfileline(i, c, filelines[cs_md5sum].values[j]); if (RET_WAS_ERROR(r)) { strlist_done(&filelines[cs_md5sum]); return r; } } for (cs = cs_firstEXTENDED ; cs < cs_hashCOUNT ; cs++) { r = chunk_getextralinelist(c->control, changes_checksum_names[cs], &filelines[cs]); if (RET_IS_OK(r)) r = candidate_addhashes(i, c, cs, &filelines[cs]); else strlist_init(&filelines[cs]); if (RET_WAS_ERROR(r)) { while (cs-- > cs_md5sum) strlist_done(&filelines[cs]); return r; } } r = candidate_finalizechecksums(c); for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) strlist_done(&filelines[cs]); R; if (c->files == NULL || c->files->next == NULL) { fprintf(stderr, "In '%s': Empty 'Files' section!\n", BASENAME(i, c->ofs)); return RET_ERROR; } return RET_OK; } static retvalue candidate_earlychecks(struct incoming *i, struct candidate *c) { struct candidate_file *file; retvalue r; // TODO: allow being more permissive, // that will need some more checks later, though r = propersourcename(c->source); if (RET_WAS_ERROR(r)) return r; r = properversion(c->sourceversion); if (RET_WAS_ERROR(r)) return r; for (file = c->files ; file != NULL ; file = file->next) { if (file->type != fe_CHANGES && file->type != fe_BYHAND && file->type != fe_LOG && !atom_defined(file->architecture)) { fprintf(stderr, "'%s' contains '%s' not matching an valid architecture in any distribution known!\n", BASENAME(i, c->ofs), BASENAME(i, file->ofs)); return RET_ERROR; } if (!FE_PACKAGE(file->type)) continue; assert (atom_defined(file->architecture)); if (strlist_in(&c->architectures, atoms_architectures[file->architecture])) continue; fprintf(stderr, "'%s' is not listed in the Architecture header of '%s' but file '%s' looks like it!\n", atoms_architectures[file->architecture], BASENAME(i, c->ofs), BASENAME(i, file->ofs)); return RET_ERROR; } return RET_OK; } /* Is used before any other candidate fields are set */ static retvalue candidate_usefile(const struct incoming *i, const struct candidate *c, struct candidate_file *file) { const char *basefilename; char *origfile, *tempfilename; struct checksums *readchecksums; retvalue r; bool improves; const char *p; if (file->used && file->tempfilename != NULL) return RET_OK; assert(file->tempfilename == NULL); basefilename = BASENAME(i, file->ofs); for (p = basefilename; *p != '\0' ; p++) { if ((0x80 & *(const unsigned char *)p) != 0) { fprintf(stderr, "Invalid filename '%s' listed in '%s': contains 8-bit characters\n", basefilename, BASENAME(i, c->ofs)); return RET_ERROR; } } tempfilename = calc_dirconcat(i->tempdir, basefilename); if (FAILEDTOALLOC(tempfilename)) return RET_ERROR_OOM; origfile = calc_dirconcat(i->directory, basefilename); if (FAILEDTOALLOC(origfile)) { free(tempfilename); return RET_ERROR_OOM; } r = checksums_copyfile(tempfilename, origfile, true, &readchecksums); free(origfile); if (RET_WAS_ERROR(r)) { free(tempfilename); return r; } if (file->checksums == NULL) { file->checksums = readchecksums; file->tempfilename = tempfilename; file->used = true; return RET_OK; } if (!checksums_check(file->checksums, readchecksums, &improves)) { fprintf(stderr, "ERROR: File '%s' does not match expectations:\n", basefilename); checksums_printdifferences(stderr, file->checksums, readchecksums); checksums_free(readchecksums); deletefile(tempfilename); free(tempfilename); return RET_ERROR_WRONG_MD5; } if (improves) { r = checksums_combine(&file->checksums, readchecksums, NULL); if (RET_WAS_ERROR(r)) { checksums_free(readchecksums); deletefile(tempfilename); free(tempfilename); return r; } } checksums_free(readchecksums); file->tempfilename = tempfilename; file->used = true; return RET_OK; } static inline retvalue getsectionprioritycomponent(const struct incoming *i, const struct candidate *c, const struct distribution *into, const struct candidate_file *file, const char *name, const struct overridedata *oinfo, /*@out@*/const char **section_p, /*@out@*/const char **priority_p, /*@out@*/component_t *component) { retvalue r; const char *section, *priority, *forcecomponent; component_t fc; section = override_get(oinfo, SECTION_FIELDNAME); if (section == NULL) { // TODO: warn about disparities here? section = file->section; } if (section == NULL || strcmp(section, "-") == 0) { fprintf(stderr, "No section found for '%s' ('%s' in '%s')!\n", name, BASENAME(i, file->ofs), BASENAME(i, c->ofs)); return RET_ERROR; } priority = override_get(oinfo, PRIORITY_FIELDNAME); if (priority == NULL) { // TODO: warn about disparities here? priority = file->priority; } if (priority == NULL || strcmp(priority, "-") == 0) { fprintf(stderr, "No priority found for '%s' ('%s' in '%s')!\n", name, BASENAME(i, file->ofs), BASENAME(i, c->ofs)); return RET_ERROR; } forcecomponent = override_get(oinfo, "$Component"); if (forcecomponent != NULL) { fc = component_find(forcecomponent); if (!atom_defined(fc)) { fprintf(stderr, "Unknown component '%s' (in $Component in override file for '%s'\n", forcecomponent, name); return RET_ERROR; } /* guess_component will check if that is valid for this * distribution */ } else fc = atom_unknown; r = guess_component(into->codename, &into->components, BASENAME(i, file->ofs), section, fc, component); if (RET_WAS_ERROR(r)) { return r; } *section_p = section; *priority_p = priority; return RET_OK; } static retvalue candidate_read_deb(struct incoming *i, struct candidate *c, struct candidate_file *file) { retvalue r; r = binaries_readdeb(&file->deb, file->tempfilename, true); if (RET_WAS_ERROR(r)) return r; if (strcmp(file->name, file->deb.name) != 0) { // TODO: add permissive thing to ignore this fprintf(stderr, "Name part of filename ('%s') and name within the file ('%s') do not match for '%s' in '%s'!\n", file->name, file->deb.name, BASENAME(i, file->ofs), BASENAME(i, c->ofs)); return RET_ERROR; } if (file->architecture != file->deb.architecture) { // TODO: add permissive thing to ignore this in some cases // but do not forget to look into into->architectures then fprintf(stderr, "Architecture '%s' of '%s' does not match '%s' specified in '%s'!\n", atoms_architectures[file->deb.architecture], BASENAME(i, file->ofs), atoms_architectures[file->architecture], BASENAME(i, c->ofs)); return RET_ERROR; } if (strcmp(c->source, file->deb.source) != 0) { // TODO: add permissive thing to ignore this // (beware if tracking is active) fprintf(stderr, "Source header '%s' of '%s' and source name '%s' within the file '%s' do not match!\n", c->source, BASENAME(i, c->ofs), file->deb.source, BASENAME(i, file->ofs)); return RET_ERROR; } if (strcmp(c->sourceversion, file->deb.sourceversion) != 0) { // TODO: add permissive thing to ignore this // (beware if tracking is active) fprintf(stderr, "Source version '%s' of '%s' and source version '%s' within the file '%s' do not match!\n", c->sourceversion, BASENAME(i, c->ofs), file->deb.sourceversion, BASENAME(i, file->ofs)); return RET_ERROR; } if (! strlist_in(&c->binaries, file->deb.name)) { fprintf(stderr, "Name '%s' of binary '%s' is not listed in Binaries header of '%s'!\n", file->deb.name, BASENAME(i, file->ofs), BASENAME(i, c->ofs)); return RET_ERROR; } r = properpackagename(file->deb.name); if (RET_IS_OK(r)) r = propersourcename(file->deb.source); if (RET_IS_OK(r)) r = properversion(file->deb.version); if (RET_WAS_ERROR(r)) return r; return RET_OK; } static retvalue candidate_read_dsc(struct incoming *i, struct candidate_file *file) { retvalue r; bool broken = false; char *p; r = sources_readdsc(&file->dsc, file->tempfilename, BASENAME(i, file->ofs), &broken); if (RET_WAS_ERROR(r)) return r; p = calc_source_basename(file->dsc.name, file->dsc.version); if (FAILEDTOALLOC(p)) return RET_ERROR_OOM; r = checksumsarray_include(&file->dsc.files, p, file->checksums); if (RET_WAS_ERROR(r)) { return r; } // TODO: take a look at "broken"... return RET_OK; } static retvalue candidate_read_files(struct incoming *i, struct candidate *c) { struct candidate_file *file; retvalue r; for (file = c->files ; file != NULL ; file = file->next) { if (!FE_PACKAGE(file->type)) continue; r = candidate_usefile(i, c, file); if (RET_WAS_ERROR(r)) return r; assert(file->tempfilename != NULL); if (FE_BINARY(file->type)) r = candidate_read_deb(i, c, file); else if (file->type == fe_DSC) r = candidate_read_dsc(i, file); else { r = RET_ERROR; assert (FE_BINARY(file->type) || file->type == fe_DSC); } if (RET_WAS_ERROR(r)) return r; } return RET_OK; } static retvalue candidate_preparechangesfile(const struct candidate *c, struct candidate_perdistribution *per) { retvalue r; char *basefilename, *filekey; struct candidate_package *package; struct candidate_file *file; component_t component = component_strange; assert (c->files != NULL && c->files->ofs == c->ofs); /* search for a component to use */ for (package = per->packages ; package != NULL ; package = package->next) { if (atom_defined(package->component)) { component = package->component; break; } } file = changesfile(c); /* make sure the file is already copied */ assert (file->used); assert (file->checksums != NULL); /* pseudo package containing the .changes file */ package = candidate_newpackage(per, c->files); if (FAILEDTOALLOC(package)) return RET_ERROR_OOM; basefilename = calc_changes_basename(c->source, c->changesversion, &c->architectures); if (FAILEDTOALLOC(basefilename)) return RET_ERROR_OOM; filekey = calc_filekey(component, c->source, basefilename); free(basefilename); if (FAILEDTOALLOC(filekey)) return RET_ERROR_OOM; r = strlist_init_singleton(filekey, &package->filekeys); if (RET_WAS_ERROR(r)) return r; assert (package->filekeys.count == 1); filekey = package->filekeys.values[0]; package->files = zNEW(const struct candidate_file *); if (FAILEDTOALLOC(package->files)) return RET_ERROR_OOM; r = files_canadd(filekey, file->checksums); if (RET_WAS_ERROR(r)) return r; if (RET_IS_OK(r)) package->files[0] = file; return RET_OK; } static retvalue prepare_deb(const struct incoming *i, const struct candidate *c, struct candidate_perdistribution *per, const struct candidate_file *file) { const char *section, *priority; const char *filekey; const struct overridedata *oinfo; struct candidate_package *package; const struct distribution *into = per->into; retvalue r; assert (FE_BINARY(file->type)); assert (file->tempfilename != NULL); assert (file->deb.name != NULL); package = candidate_newpackage(per, file); if (FAILEDTOALLOC(package)) return RET_ERROR_OOM; assert (file == package->master); if (file->type == fe_DEB) package->packagetype = pt_deb; else package->packagetype = pt_udeb; oinfo = override_search(file->type==fe_UDEB?into->overrides.udeb: into->overrides.deb, file->name); r = getsectionprioritycomponent(i, c, into, file, file->name, oinfo, §ion, &priority, &package->component); if (RET_WAS_ERROR(r)) return r; if (file->type == fe_UDEB && !atomlist_in(&into->udebcomponents, package->component)) { fprintf(stderr, "Cannot put file '%s' of '%s' into component '%s',\n" "as it is not listed in UDebComponents of '%s'!\n", BASENAME(i, file->ofs), BASENAME(i, c->ofs), atoms_components[package->component], into->codename); return RET_ERROR; } r = binaries_calcfilekeys(package->component, &file->deb, package->packagetype, &package->filekeys); if (RET_WAS_ERROR(r)) return r; assert (package->filekeys.count == 1); filekey = package->filekeys.values[0]; package->files = zNEW(const struct candidate_file *); if (FAILEDTOALLOC(package->files)) return RET_ERROR_OOM; r = files_canadd(filekey, file->checksums); if (RET_WAS_ERROR(r)) return r; if (RET_IS_OK(r)) package->files[0] = file; r = binaries_complete(&file->deb, filekey, file->checksums, oinfo, section, priority, &package->control); if (RET_WAS_ERROR(r)) return r; return RET_OK; } static retvalue prepare_source_file(const struct incoming *i, const struct candidate *c, const char *filekey, const char *basefilename, struct checksums **checksums_p, int package_ofs, /*@out@*/const struct candidate_file **foundfile_p){ struct candidate_file *f; const struct checksums * const checksums = *checksums_p; retvalue r; bool improves; f = c->files; while (f != NULL && (f->checksums == NULL || strcmp(BASENAME(i, f->ofs), basefilename) != 0)) f = f->next; if (f == NULL) { r = files_canadd(filekey, checksums); if (!RET_IS_OK(r)) return r; /* no file by this name and also no file with these * characteristics in the pool, look for differently-named * file with the same characteristics: */ f = c->files; while (f != NULL && (f->checksums == NULL || !checksums_check(f->checksums, checksums, NULL))) f = f->next; if (f == NULL) { fprintf(stderr, "file '%s' is needed for '%s', not yet registered in the pool and not found in '%s'\n", basefilename, BASENAME(i, package_ofs), BASENAME(i, c->ofs)); return RET_ERROR; } /* otherwise proceed with the found file: */ } if (!checksums_check(f->checksums, checksums, &improves)) { fprintf(stderr, "file '%s' has conflicting checksums listed in '%s' and '%s'!\n", basefilename, BASENAME(i, c->ofs), BASENAME(i, package_ofs)); return RET_ERROR; } if (improves) { /* put additional checksums from the .dsc to the information * found in .changes, so that a file matching those in .changes * but not in .dsc is detected */ r = checksums_combine(&f->checksums, checksums, NULL); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; } r = files_canadd(filekey, f->checksums); if (r == RET_NOTHING) { /* already in the pool, mark as used (in the sense * of "only not needed because it is already there") */ f->used = true; } else if (RET_IS_OK(r)) { /* don't have this file in the pool, make sure it is ready * here */ r = candidate_usefile(i, c, f); if (RET_WAS_ERROR(r)) return r; // TODO: update checksums to now received checksums? *foundfile_p = f; } if (!RET_WAS_ERROR(r) && !checksums_iscomplete(checksums)) { /* update checksums so the source index can show them */ r = checksums_combine(checksums_p, f->checksums, NULL); } return r; } static retvalue prepare_dsc(const struct incoming *i, const struct candidate *c, struct candidate_perdistribution *per, const struct candidate_file *file) { const char *section, *priority; const struct overridedata *oinfo; struct candidate_package *package; const struct distribution *into = per->into; retvalue r; int j; assert (file->type == fe_DSC); assert (file->tempfilename != NULL); assert (file->dsc.name != NULL); package = candidate_newpackage(per, file); if (FAILEDTOALLOC(package)) return RET_ERROR_OOM; assert (file == package->master); package->packagetype = pt_dsc; if (c->isbinNMU) { // TODO: add permissive thing to ignore this fprintf(stderr, "Source package ('%s') in '%s', which look like a binNMU (as '%s' and '%s' differ)!\n", BASENAME(i, file->ofs), BASENAME(i, c->ofs), c->sourceversion, c->changesversion); return RET_ERROR; } if (strcmp(file->name, file->dsc.name) != 0) { // TODO: add permissive thing to ignore this fprintf(stderr, "Name part of filename ('%s') and name within the file ('%s') do not match for '%s' in '%s'!\n", file->name, file->dsc.name, BASENAME(i, file->ofs), BASENAME(i, c->ofs)); return RET_ERROR; } if (strcmp(c->source, file->dsc.name) != 0) { // TODO: add permissive thing to ignore this // (beware if tracking is active) fprintf(stderr, "Source header '%s' of '%s' and name '%s' within the file '%s' do not match!\n", c->source, BASENAME(i, c->ofs), file->dsc.name, BASENAME(i, file->ofs)); return RET_ERROR; } if (strcmp(c->sourceversion, file->dsc.version) != 0) { // TODO: add permissive thing to ignore this // (beware if tracking is active) fprintf(stderr, "Source version '%s' of '%s' and version '%s' within the file '%s' do not match!\n", c->sourceversion, BASENAME(i, c->ofs), file->dsc.version, BASENAME(i, file->ofs)); return RET_ERROR; } r = propersourcename(file->dsc.name); if (RET_IS_OK(r)) r = properversion(file->dsc.version); if (RET_IS_OK(r)) r = properfilenames(&file->dsc.files.names); if (RET_WAS_ERROR(r)) return r; oinfo = override_search(into->overrides.dsc, file->dsc.name); r = getsectionprioritycomponent(i, c, into, file, file->dsc.name, oinfo, §ion, &priority, &package->component); if (RET_WAS_ERROR(r)) return r; package->directory = calc_sourcedir(package->component, file->dsc.name); if (FAILEDTOALLOC(package->directory)) return RET_ERROR_OOM; r = calc_dirconcats(package->directory, &file->dsc.files.names, &package->filekeys); if (RET_WAS_ERROR(r)) return r; package->files = nzNEW(package->filekeys.count, const struct candidate_file *); if (FAILEDTOALLOC(package->files)) return RET_ERROR_OOM; r = files_canadd(package->filekeys.values[0], file->checksums); if (RET_IS_OK(r)) package->files[0] = file; if (RET_WAS_ERROR(r)) return r; for (j = 1 ; j < package->filekeys.count ; j++) { r = prepare_source_file(i, c, package->filekeys.values[j], file->dsc.files.names.values[j], &file->dsc.files.checksums[j], file->ofs, &package->files[j]); if (RET_WAS_ERROR(r)) return r; } r = sources_complete(&file->dsc, package->directory, oinfo, section, priority, &package->control); if (RET_WAS_ERROR(r)) return r; return RET_OK; } static retvalue candidate_preparetrackbyhands(const struct incoming *i, const struct candidate *c, struct candidate_perdistribution *per) { retvalue r; char *byhanddir; struct candidate_package *package; struct candidate_file *firstbyhand = NULL, *file; component_t component = component_strange; int count = 0; for (file = c->files ; file != NULL ; file = file->next) { if (file->type == fe_BYHAND) { count++; if (firstbyhand == NULL) firstbyhand = file; } } if (count == 0) return RET_NOTHING; /* search for a component to use */ for (package = per->packages ; package != NULL ; package = package->next) { if (atom_defined(package->component)) { component = package->component; break; } } /* pseudo package containing byhand files */ package = candidate_newpackage(per, firstbyhand); if (FAILEDTOALLOC(package)) return RET_ERROR_OOM; r = strlist_init_n(count, &package->filekeys); if (RET_WAS_ERROR(r)) return r; package->files = nzNEW(count, const struct candidate_file *); if (FAILEDTOALLOC(package->files)) return RET_ERROR_OOM; byhanddir = calc_byhanddir(component, c->source, c->changesversion); if (FAILEDTOALLOC(byhanddir)) return RET_ERROR_OOM; for (file = c->files ; file != NULL ; file = file->next) { char *filekey; if (file->type != fe_BYHAND) continue; r = candidate_usefile(i, c, file); if (RET_WAS_ERROR(r)) { free(byhanddir); return r; } filekey = calc_dirconcat(byhanddir, BASENAME(i, file->ofs)); if (FAILEDTOALLOC(filekey)) { free(byhanddir); return RET_ERROR_OOM; } r = files_canadd(filekey, file->checksums); if (RET_WAS_ERROR(r)) { free(byhanddir); return r; } if (RET_IS_OK(r)) package->files[package->filekeys.count] = file; r = strlist_add(&package->filekeys, filekey); assert (r == RET_OK); } free(byhanddir); assert (package->filekeys.count == count); return RET_OK; } static retvalue candidate_preparelogs(const struct incoming *i, const struct candidate *c, struct candidate_perdistribution *per) { retvalue r; struct candidate_package *package; struct candidate_file *firstlog = NULL, *file; component_t component = component_strange; int count = 0; for (file = c->files ; file != NULL ; file = file->next) { if (file->type == fe_LOG) { count++; if (firstlog == NULL) firstlog = file; } } if (count == 0) return RET_NOTHING; /* search for a component to use */ for (package = per->packages ; package != NULL ; package = package->next) { if (atom_defined(package->component)) { component = package->component; break; } } /* pseudo package containing log files */ package = candidate_newpackage(per, firstlog); if (FAILEDTOALLOC(package)) return RET_ERROR_OOM; r = strlist_init_n(count, &package->filekeys); if (RET_WAS_ERROR(r)) return r; package->files = nzNEW(count, const struct candidate_file *); if (FAILEDTOALLOC(package->files)) return RET_ERROR_OOM; for (file = c->files ; file != NULL ; file = file->next) { char *filekey; if (file->type != fe_LOG) continue; r = candidate_usefile(i, c, file); if (RET_WAS_ERROR(r)) return r; // TODO: add same checks on the basename contents? filekey = calc_filekey(component, c->source, BASENAME(i, file->ofs)); if (FAILEDTOALLOC(filekey)) return RET_ERROR_OOM; r = files_canadd(filekey, file->checksums); if (RET_WAS_ERROR(r)) return r; if (RET_IS_OK(r)) package->files[package->filekeys.count] = file; r = strlist_add(&package->filekeys, filekey); assert (r == RET_OK); } assert (package->filekeys.count == count); return RET_OK; } static retvalue prepare_hookedbyhand(const struct incoming *i, const struct candidate *c, struct candidate_perdistribution *per, struct candidate_file *file) { const struct distribution *d = per->into; const struct byhandhook *h = NULL; struct byhandfile **b_p, *b; retvalue result = RET_NOTHING; retvalue r; b_p = &per->byhandhookstocall; while (*b_p != NULL) b_p = &(*b_p)->next; while (byhandhooks_matched(d->byhandhooks, &h, file->section, file->priority, BASENAME(i, file->ofs))) { r = candidate_usefile(i, c, file); if (RET_WAS_ERROR(r)) return r; b = zNEW(struct byhandfile); if (FAILEDTOALLOC(b)) return RET_ERROR_OOM; b->file = file; b->hook = h; *b_p = b; b_p = &b->next; result = RET_OK; } return result; } static retvalue prepare_for_distribution(const struct incoming *i, const struct candidate *c, struct candidate_perdistribution *d) { struct candidate_file *file; retvalue r; d->into->lookedat = true; for (file = c->files ; file != NULL ; file = file->next) { switch (file->type) { case fe_UDEB: case fe_DEB: r = prepare_deb(i, c, d, file); break; case fe_DSC: r = prepare_dsc(i, c, d, file); break; case fe_BYHAND: r = prepare_hookedbyhand(i, c, d, file); break; default: r = RET_NOTHING; break; } if (RET_WAS_ERROR(r)) { return r; } } if (d->into->tracking != dt_NONE) { if (d->into->trackingoptions.includebyhand) { r = candidate_preparetrackbyhands(i, c, d); if (RET_WAS_ERROR(r)) return r; } if (d->into->trackingoptions.includelogs) { r = candidate_preparelogs(i, c, d); if (RET_WAS_ERROR(r)) return r; } if (d->into->trackingoptions.includechanges) { r = candidate_preparechangesfile(c, d); if (RET_WAS_ERROR(r)) return r; } } //... check if something would be done ... return RET_OK; } static retvalue candidate_addfiles(struct candidate *c) { int j; struct candidate_perdistribution *d; struct candidate_package *p; retvalue r; for (d = c->perdistribution ; d != NULL ; d = d->next) { for (p = d->packages ; p != NULL ; p = p->next) { if (p->skip) continue; for (j = 0 ; j < p->filekeys.count ; j++) { const struct candidate_file *f = p->files[j]; if (f == NULL) continue; assert(f->tempfilename != NULL); r = files_hardlinkandadd(f->tempfilename, p->filekeys.values[j], f->checksums); if (RET_WAS_ERROR(r)) return r; } } } return RET_OK; } static retvalue add_dsc(struct distribution *into, struct trackingdata *trackingdata, struct candidate_package *p) { retvalue r; struct target *t = distribution_getpart(into, p->component, architecture_source, pt_dsc); assert (logger_isprepared(into->logger)); /* finally put it into the source distribution */ r = target_initpackagesdb(t, READWRITE); if (!RET_WAS_ERROR(r)) { retvalue r2; if (interrupted()) r = RET_ERROR_INTERRUPTED; else r = target_addpackage(t, into->logger, p->master->dsc.name, p->master->dsc.version, p->control, &p->filekeys, false, trackingdata, architecture_source, NULL, NULL); r2 = target_closepackagesdb(t); RET_ENDUPDATE(r, r2); } RET_UPDATE(into->status, r); return r; } static retvalue checkadd_dsc( struct distribution *into, const struct incoming *i, bool tracking, struct candidate_package *p) { retvalue r; struct target *t = distribution_getpart(into, p->component, architecture_source, pt_dsc); /* check for possible errors putting it into the source distribution */ r = target_initpackagesdb(t, READONLY); if (!RET_WAS_ERROR(r)) { retvalue r2; if (interrupted()) r = RET_ERROR_INTERRUPTED; else r = target_checkaddpackage(t, p->master->dsc.name, p->master->dsc.version, tracking, i->permit[pmf_oldpackagenewer]); r2 = target_closepackagesdb(t); RET_ENDUPDATE(r, r2); } return r; } static retvalue candidate_add_into(const struct incoming *i, const struct candidate *c, const struct candidate_perdistribution *d, const char **changesfilekey_p) { retvalue r; struct candidate_package *p; struct trackingdata trackingdata; struct distribution *into = d->into; trackingdb tracks; struct atomlist binary_architectures; if (interrupted()) return RET_ERROR_INTERRUPTED; into->lookedat = true; if (into->logger != NULL) { r = logger_prepare(d->into->logger); if (RET_WAS_ERROR(r)) return r; } tracks = NULL; if (into->tracking != dt_NONE) { r = tracking_initialize(&tracks, into, false); if (RET_WAS_ERROR(r)) return r; } if (tracks != NULL) { r = trackingdata_summon(tracks, c->source, c->sourceversion, &trackingdata); if (RET_WAS_ERROR(r)) { (void)tracking_done(tracks); return r; } if (into->trackingoptions.needsources) { // TODO, but better before we start adding... } } atomlist_init(&binary_architectures); for (p = d->packages ; p != NULL ; p = p->next) { if (FE_BINARY(p->master->type)) { architecture_t a = p->master->architecture; if (a != architecture_all) atomlist_add_uniq(&binary_architectures, a); } } r = RET_OK; for (p = d->packages ; p != NULL ; p = p->next) { if (p->skip) { if (verbose >= 0) printf( "Not putting '%s' in '%s' as already in there with equal or newer version.\n", BASENAME(i, p->master->ofs), into->codename); continue; } if (p->master->type == fe_DSC) { r = add_dsc(into, (tracks==NULL)?NULL:&trackingdata, p); } else if (FE_BINARY(p->master->type)) { architecture_t a = p->master->architecture; const struct atomlist *as, architectures = {&a, 1, 1}; if (i->options[iof_limit_arch_all] && a == architecture_all && binary_architectures.count > 0) as = &binary_architectures; else as = &architectures; r = binaries_adddeb(&p->master->deb, as, p->packagetype, into, (tracks==NULL)?NULL:&trackingdata, p->component, &p->filekeys, p->control); } else if (p->master->type == fe_CHANGES) { /* finally add the .changes to tracking, if requested */ assert (p->master->name == NULL); assert (tracks != NULL); r = trackedpackage_adddupfilekeys(trackingdata.tracks, trackingdata.pkg, ft_CHANGES, &p->filekeys, false); if (p->filekeys.count > 0) *changesfilekey_p = p->filekeys.values[0]; } else if (p->master->type == fe_BYHAND) { assert (tracks != NULL); r = trackedpackage_adddupfilekeys(trackingdata.tracks, trackingdata.pkg, ft_XTRA_DATA, &p->filekeys, false); } else if (p->master->type == fe_LOG) { assert (tracks != NULL); r = trackedpackage_adddupfilekeys(trackingdata.tracks, trackingdata.pkg, ft_LOG, &p->filekeys, false); } else r = RET_ERROR_INTERNAL; if (RET_WAS_ERROR(r)) break; } atomlist_done(&binary_architectures); if (tracks != NULL) { retvalue r2; r2 = trackingdata_finish(tracks, &trackingdata); RET_UPDATE(r, r2); r2 = tracking_done(tracks); RET_ENDUPDATE(r, r2); } return r; } static inline retvalue candidate_checkadd_into(const struct incoming *i, const struct candidate_perdistribution *d) { retvalue r; struct candidate_package *p; struct distribution *into = d->into; bool somethingtodo = false; for (p = d->packages ; p != NULL ; p = p->next) { if (p->master->type == fe_DSC) { r = checkadd_dsc(into, i, into->tracking != dt_NONE, p); } else if (FE_BINARY(p->master->type)) { r = binaries_checkadddeb(&p->master->deb, p->master->architecture, p->packagetype, into, into->tracking != dt_NONE, p->component, i->permit[pmf_oldpackagenewer]); } else if (p->master->type == fe_CHANGES || p->master->type == fe_BYHAND || p->master->type == fe_LOG) { continue; } else r = RET_ERROR_INTERNAL; if (RET_WAS_ERROR(r)) return r; if (r == RET_NOTHING) p->skip = true; else somethingtodo = true; } if (somethingtodo) return RET_OK; else return RET_NOTHING; } static inline bool isallowed(UNUSED(struct incoming *i), struct candidate *c, struct distribution *into, struct upload_conditions *conditions) { const struct candidate_file *file; do switch (uploaders_nextcondition(conditions)) { case uc_ACCEPTED: return true; case uc_REJECTED: return false; case uc_CODENAME: (void)uploaders_verifystring(conditions, into->codename); break; case uc_SOURCENAME: assert (c->source != NULL); (void)uploaders_verifystring(conditions, c->source); break; case uc_SECTIONS: for (file = c->files ; file != NULL ; file = file->next) { if (!FE_PACKAGE(file->type)) continue; if (!uploaders_verifystring(conditions, (file->section == NULL) ?"-":file->section)) break; } break; case uc_BINARIES: for (file = c->files ; file != NULL ; file = file->next) { if (!FE_BINARY(file->type)) continue; if (!uploaders_verifystring(conditions, file->name)) break; } break; case uc_ARCHITECTURES: for (file = c->files ; file != NULL ; file = file->next) { if (!FE_PACKAGE(file->type)) continue; if (!uploaders_verifyatom(conditions, file->architecture)) break; } break; case uc_BYHAND: for (file = c->files ; file != NULL ; file = file->next) { if (file->type != fe_BYHAND) continue; if (!uploaders_verifystring(conditions, file->section)) break; } break; } while (true); } static retvalue candidate_checkpermissions(struct incoming *i, struct candidate *c, struct distribution *into) { retvalue r; struct upload_conditions *conditions; bool allowed; /* no rules means allowed */ if (into->uploaders == NULL) return RET_OK; r = distribution_loaduploaders(into); if (RET_WAS_ERROR(r)) return r; assert(into->uploaderslist != NULL); r = uploaders_permissions(into->uploaderslist, c->signatures, &conditions); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; allowed = isallowed(i, c, into, conditions); free(conditions); if (allowed) return RET_OK; else /* reject */ return RET_NOTHING; } static retvalue check_architecture_availability(const struct incoming *i, const struct candidate *c) { struct candidate_perdistribution *d; bool check_all_availability = false; bool have_all_available = false; int j; // TODO: switch to instead ensure every architecture can be put into // one distribution at least would be nice. If implementing this do not // forget to check later to only put files in when the distribution can // cope with that. for (j = 0 ; j < c->architectures.count ; j++) { const char *architecture = c->architectures.values[j]; if (strcmp(architecture, "all") == 0) { check_all_availability = true; continue; } for (d = c->perdistribution ; d != NULL ; d = d->next) { if (atomlist_in(&d->into->architectures, architecture_find(architecture))) continue; fprintf(stderr, "'%s' lists architecture '%s' not found in distribution '%s'!\n", BASENAME(i, c->ofs), architecture, d->into->codename); return RET_ERROR; } if (strcmp(architecture, "source") != 0) have_all_available = true; } if (check_all_availability && ! have_all_available) { for (d = c->perdistribution ; d != NULL ; d = d->next) { if (d->into->architectures.count > 1) continue; if (d->into->architectures.count > 0 && d->into->architectures.atoms[0] != architecture_source) continue; fprintf(stderr, "'%s' lists architecture 'all' but no binary architecture found in distribution '%s'!\n", BASENAME(i, c->ofs), d->into->codename); return RET_ERROR; } } return RET_OK; } static retvalue create_uniq_logsubdir(const char *logdir, const char *name, const char *version, const struct strlist *architectures, /*@out@*/char **subdir_p) { char *dir, *p; size_t l; retvalue r; r = dirs_make_recursive(logdir); if (RET_WAS_ERROR(r)) return r; p = calc_changes_basename(name, version, architectures); if (FAILEDTOALLOC(p)) return RET_ERROR_OOM; dir = calc_dirconcat(logdir, p); free(p); if (FAILEDTOALLOC(dir)) return RET_ERROR_OOM; l = strlen(dir); assert (l > 8 && strcmp(dir + l - 8 , ".changes") == 0); memset(dir + l - 7, '0', 7); r = dirs_create(dir); while (r == RET_NOTHING) { p = dir + l - 1; while (*p == '9') { *p = '0'; p--; } if (*p < '0' || *p > '8') { fprintf(stderr, "Failed to create a new directory of the form '%s'\n" "it looks like all 10000000 such directories are already there...\n", dir); return RET_ERROR; } (*p)++; r = dirs_create(dir); } *subdir_p = dir; return RET_OK; } static retvalue candidate_prepare_logdir(struct incoming *i, struct candidate *c) { int count, j; struct candidate_file *file; retvalue r; r = create_uniq_logsubdir(i->logdir, c->source, c->changesversion, &c->architectures, &c->logsubdir); assert (RET_IS_OK(r)); if (RET_WAS_ERROR(r)) return RET_ERROR_OOM; count = 0; for (file = c->files ; file != NULL ; file = file->next) { if (file->ofs == c->ofs || file->type == fe_LOG || (file->type == fe_BYHAND && !file->used)) count++; } c->logcount = count; c->logfiles = nzNEW(count, const struct candidate_file *); if (FAILEDTOALLOC(c->logfiles)) return RET_ERROR_OOM; j = 0; for (file = c->files ; file != NULL ; file = file->next) { if (file->ofs == c->ofs || file->type == fe_LOG || (file->type == fe_BYHAND && !file->used)) { r = candidate_usefile(i, c, file); if (RET_WAS_ERROR(r)) return r; c->logfiles[j++] = file; } } assert (count == j); return RET_OK; } static retvalue candidate_finish_logdir(struct incoming *i, struct candidate *c) { int j; for (j = 0 ; j < c->logcount ; j++) { retvalue r; const struct candidate_file *f = c->logfiles[j]; r = checksums_hardlink(c->logsubdir, BASENAME(i, f->ofs), f->tempfilename, f->checksums); if (RET_WAS_ERROR(r)) return r; } return RET_OK; } static retvalue candidate_add_byhands(struct incoming *i, UNUSED(struct candidate *c), struct candidate_perdistribution *d) { struct byhandfile *b; retvalue r; for (b = d->byhandhookstocall ; b != NULL ; b = b->next){ const struct candidate_file *f = b->file; r = byhandhook_call(b->hook, d->into->codename, f->section, f->priority, BASENAME(i, f->ofs), f->tempfilename); if (RET_WAS_ERROR(r)) return r; } return RET_OK; } /* the actual adding of packages, * everything that can be tested earlier should be already tested now */ static retvalue candidate_really_add(struct incoming *i, struct candidate *c) { struct candidate_perdistribution *d; retvalue r; for (d = c->perdistribution ; d != NULL ; d = d->next) { if (d->byhandhookstocall == NULL) continue; r = candidate_add_byhands(i, c, d); if (RET_WAS_ERROR(r)) return r; } /* make hardlinks/copies of the files */ r = candidate_addfiles(c); if (RET_WAS_ERROR(r)) return r; if (interrupted()) return RET_ERROR_INTERRUPTED; if (i->logdir != NULL) { r = candidate_finish_logdir(i, c); if (RET_WAS_ERROR(r)) return r; } if (interrupted()) return RET_ERROR_INTERRUPTED; r = RET_OK; for (d = c->perdistribution ; d != NULL ; d = d->next) { struct distribution *into = d->into; const char *changesfilekey = NULL; /* if there are regular packages to add, * add them and call the log. * If all packages were skipped but a byhandhook run, * still advertise the .changes file to loggers */ if (!d->skip) { r = candidate_add_into(i, c, d, &changesfilekey); if (RET_WAS_ERROR(r)) return r; } else if (d->byhandhookstocall == NULL) continue; logger_logchanges(into->logger, into->codename, c->source, c->changesversion, c->control, changesfile(c)->tempfilename, changesfilekey); } return RET_OK; } static retvalue candidate_add(struct incoming *i, struct candidate *c) { struct candidate_perdistribution *d; struct candidate_file *file; retvalue r; bool somethingtodo; char *origfilename; assert (c->perdistribution != NULL); /* check if every distribution this is to be added to supports * all architectures we have files for */ r = check_architecture_availability(i, c); if (RET_WAS_ERROR(r)) return r; for (d = c->perdistribution ; d != NULL ; d = d->next) { r = distribution_loadalloverrides(d->into); if (RET_WAS_ERROR(r)) return r; } // TODO: once uploaderlist allows to look for package names or existing // override entries or such things, check package names here enable // checking for content name with outer name /* when we get here, the package is allowed in, now we have to * read the parts and check all stuff we only know now */ r = candidate_read_files(i, c); if (RET_WAS_ERROR(r)) return r; /* now the distribution specific part starts: */ for (d = c->perdistribution ; d != NULL ; d = d->next) { r = prepare_for_distribution(i, c, d); if (RET_WAS_ERROR(r)) return r; } if (i->logdir != NULL) { r = candidate_prepare_logdir(i, c); if (RET_WAS_ERROR(r)) return r; } for (file = c->files ; file != NULL ; file = file->next) { if (!file->used && !i->permit[pmf_unused_files]) { // TODO: find some way to mail such errors... fprintf(stderr, "Error: '%s' contains unused file '%s'!\n" "(Do Permit: unused_files to conf/incoming to ignore and\n" " additionally Cleanup: unused_files to delete them)\n", BASENAME(i, c->ofs), BASENAME(i, file->ofs)); if (file->type == fe_LOG || file->type == fe_BYHAND) fprintf(stderr, "Alternatively, you can also add a LogDir: for '%s' into conf/incoming\n" "then files like that will be stored there.\n", i->name); return RET_ERROR; } } /* additional test run to see if anything could go wrong, * or if there are already newer versions */ somethingtodo = false; for (d = c->perdistribution ; d != NULL ; d = d->next) { r = candidate_checkadd_into(i, d); if (RET_WAS_ERROR(r)) return r; if (r == RET_NOTHING) { d->skip = true; if (d->byhandhookstocall != NULL) somethingtodo = true; } else somethingtodo = true; } if (! somethingtodo) { if (verbose >= 0) { printf( "Skipping %s because all packages are skipped!\n", BASENAME(i, c->ofs)); } for (file = c->files ; file != NULL ; file = file->next) { if (file->used || i->cleanup[cuf_unused_files]) i->delete[file->ofs] = true; } return RET_NOTHING; } // TODO: make sure not two different files are supposed to be installed // as the same filekey. /* the actual adding of packages, make sure what can be checked was * checked by now */ origfilename = calc_dirconcat(i->directory, BASENAME(i, changesfile(c)->ofs)); causingfile = origfilename; r = candidate_really_add(i, c); causingfile = NULL; free(origfilename); if (RET_WAS_ERROR(r)) return r; /* mark files as done */ for (file = c->files ; file != NULL ; file = file->next) { if (file->used) i->processed[file->ofs] = true; if (file->used || i->cleanup[cuf_unused_files]) { i->delete[file->ofs] = true; } } return r; } static retvalue process_changes(struct incoming *i, int ofs) { struct candidate *c; retvalue r; int j, k; bool broken = false, tried = false; r = candidate_read(i, ofs, &c, &broken); if (RET_WAS_ERROR(r)) return r; assert (RET_IS_OK(r)); r = candidate_parse(i, c); if (RET_WAS_ERROR(r)) { candidate_free(c); return r; } r = candidate_earlychecks(i, c); if (RET_WAS_ERROR(r)) { if (i->cleanup[cuf_on_error]) { struct candidate_file *file; i->delete[c->ofs] = true; for (file = c->files ; file != NULL ; file = file->next) { i->delete[file->ofs] = true; } } candidate_free(c); return r; } for (k = 0 ; k < c->distributions.count ; k++) { const char *name = c->distributions.values[k]; for (j = 0 ; j < i->allow.count ; j++) { // TODO: implement "*" if (strcmp(name, i->allow.values[j]) == 0) { tried = true; r = candidate_checkpermissions(i, c, i->allow_into[j]); if (r == RET_NOTHING) continue; if (RET_IS_OK(r)) r = candidate_newdistribution(c, i->allow_into[j]); if (RET_WAS_ERROR(r)) { candidate_free(c); return r; } else break; } } if (c->perdistribution != NULL && !i->options[iof_multiple_distributions]) break; } if (c->perdistribution == NULL && i->default_into != NULL) { tried = true; r = candidate_checkpermissions(i, c, i->default_into); if (RET_WAS_ERROR(r)) { candidate_free(c); return r; } if (RET_IS_OK(r)) { r = candidate_newdistribution(c, i->default_into); } } if (c->perdistribution == NULL) { fprintf(stderr, tried?"No distribution accepting '%s'!\n": "No distribution found for '%s'!\n", i->files.values[ofs]); if (i->cleanup[cuf_on_deny]) { struct candidate_file *file; i->delete[c->ofs] = true; for (file = c->files ; file != NULL ; file = file->next) { // TODO: implement same-owner check if (!i->cleanup[cuf_on_deny_check_owner]) i->delete[file->ofs] = true; } } r = RET_ERROR_INCOMING_DENY; } else { if (broken) { fprintf(stderr, "'%s' is signed with only invalid signatures.\n" "If this was not corruption but willfull modification,\n" "remove the signatures and try again.\n", i->files.values[ofs]); r = RET_ERROR; } else r = candidate_add(i, c); if (RET_WAS_ERROR(r) && i->cleanup[cuf_on_error]) { struct candidate_file *file; i->delete[c->ofs] = true; for (file = c->files ; file != NULL ; file = file->next) { i->delete[file->ofs] = true; } } } logger_wait(); candidate_free(c); return r; } static inline /*@null@*/char *create_uniq_subdir(const char *basedir) { char date[16], *dir; unsigned long number = 0; retvalue r; time_t curtime; struct tm *tm; int e; r = dirs_make_recursive(basedir); if (RET_WAS_ERROR(r)) return NULL; if (time(&curtime) == (time_t)-1) tm = NULL; else tm = gmtime(&curtime); if (tm == NULL || strftime(date, 16, "%Y-%m-%d", tm) != 10) strcpy(date, "timeerror"); for (number = 0 ; number < 10000 ; number ++) { dir = mprintf("%s/%s-%lu", basedir, date, number); if (FAILEDTOALLOC(dir)) return NULL; if (mkdir(dir, 0777) == 0) return dir; e = errno; if (e != EEXIST) { fprintf(stderr, "Error %d creating directory '%s': %s\n", e, dir, strerror(e)); free(dir); return NULL; } free(dir); } fprintf(stderr, "Could not create unique subdir in '%s'!\n", basedir); return NULL; } /* tempdir should ideally be on the same partition like the pooldir */ retvalue process_incoming(struct distribution *distributions, const char *name, const char *changesfilename) { struct incoming *i; retvalue result, r; int j; char *morguedir; result = RET_NOTHING; r = incoming_init(distributions, name, &i); if (RET_WAS_ERROR(r)) return r; for (j = 0 ; j < i->files.count ; j ++) { const char *basefilename = i->files.values[j]; size_t l = strlen(basefilename); #define C_SUFFIX ".changes" const size_t c_len = strlen(C_SUFFIX); if (l <= c_len || memcmp(basefilename + (l - c_len), C_SUFFIX, c_len) != 0) continue; if (changesfilename != NULL && strcmp(basefilename, changesfilename) != 0) continue; /* a .changes file, check it */ r = process_changes(i, j); RET_UPDATE(result, r); } logger_wait(); if (i->morguedir == NULL) morguedir = NULL; else { morguedir = create_uniq_subdir(i->morguedir); } for (j = 0 ; j < i->files.count ; j ++) { char *fullfilename; if (!i->delete[j]) continue; fullfilename = calc_dirconcat(i->directory, i->files.values[j]); if (FAILEDTOALLOC(fullfilename)) { result = RET_ERROR_OOM; continue; } if (morguedir != NULL && !i->processed[j]) { char *newname = calc_dirconcat(morguedir, i->files.values[j]); if (newname != NULL && rename(fullfilename, newname) == 0) { free(newname); free(fullfilename); continue; } else if (FAILEDTOALLOC(newname)) { result = RET_ERROR_OOM; } else { int e = errno; fprintf(stderr, "Error %d moving '%s' to '%s': %s\n", e, i->files.values[j], morguedir, strerror(e)); RET_UPDATE(result, RET_ERRNO(e)); /* no continue, instead * delete the file as normal: */ } } if (verbose >= 3) printf("deleting '%s'...\n", fullfilename); deletefile(fullfilename); free(fullfilename); } if (morguedir != NULL) { /* in the case it is empty, remove again */ (void)rmdir(morguedir); free(morguedir); } incoming_free(i); return result; } reprepro-4.13.1/error.h0000644000175100017510000000320412152651661011667 00000000000000#ifndef REPREPRO_ERROR_H #define REPREPRO_ERROR_H #ifndef REPREPRO_GLOBALS_H #include "globals.h" #endif bool interrupted(void); /* retvalue is simply an int. * just named to show it follows the given semantics */ /*@numabstract@*/ enum retvalue_enum { RET_ERROR_INCOMING_DENY = -13, RET_ERROR_INTERNAL = -12, RET_ERROR_BZ2 = -11, RET_ERROR_Z = -10, RET_ERROR_INTERRUPTED = -9, RET_ERROR_UNKNOWNFIELD = -8, RET_ERROR_MISSING = -7, RET_ERROR_BADSIG = -6, RET_ERROR_GPGME = -5, RET_ERROR_EXIST = -4, RET_ERROR_OOM = -3, RET_ERROR_WRONG_MD5 = -2, RET_ERROR = -1, RET_NOTHING = 0, RET_OK = 1 }; typedef enum retvalue_enum retvalue; #define FAILEDTOALLOC(x) unlikely(x == NULL) #define RET_IS_OK(r) likely((r) == RET_OK) #define RET_WAS_NO_ERROR(r) likely((r) >= (retvalue)0) #define RET_WAS_ERROR(r) unlikely((r) < (retvalue)0) /* update a return value, so that it contains the first error-code * and otherwise is RET_OK, if anything was RET_OK */ #define RET_UPDATE(ret, update) { if ((update)!=RET_NOTHING && RET_WAS_NO_ERROR(ret)) ret=update;} /* like RET_UPDATE, but RET_ENDUPDATE(RET_NOTHING, RET_OK) keeps RET_NOTHING */ #define RET_ENDUPDATE(ret, update) {if (RET_WAS_ERROR(update) && RET_WAS_NO_ERROR(ret)) ret=update;} /* code a errno in a error */ #define RET_ERRNO(err) ((err>0)?((retvalue)-err):RET_ERROR) /* code a db-error in a error */ // TODO: to be implemented... #define RET_DBERR(e) RET_ERROR #define ASSERT_NOT_NOTHING(r) {assert (r != RET_NOTHING); if (r == RET_NOTHING) r = RET_ERROR_INTERNAL;} #define EXIT_RET(ret) (RET_WAS_NO_ERROR(ret)?((nothingiserror&&ret==RET_NOTHING)?EXIT_FAILURE:EXIT_SUCCESS):(int)ret) #endif reprepro-4.13.1/filecntl.h0000644000175100017510000000043112152651661012335 00000000000000#ifndef REPREPRO_FILECNTL_H #define REPREPRO_FILECNTL_H #ifndef HAVE_CLOSEFROM void closefrom(int); #endif void markcloseonexec(int); int deletefile(const char *); bool isanyfile(const char *); bool isregularfile(const char *); bool isdirectory(const char *fullfilename); #endif reprepro-4.13.1/checksums.h0000644000175100017510000001407412152651661012532 00000000000000#ifndef REPREPRO_CHECKSUMS_H #define REPREPRO_CHECKSUMS_H #ifndef REPREPRO_ERROR_H #include "error.h" #warning "What's hapening here?" #endif #ifndef REPREPRO_STRLIST_H #include "strlist.h" #endif enum checksumtype { /* must be first */ cs_md5sum, /* additionall hashes: */ #define cs_firstEXTENDED cs_sha1sum cs_sha1sum, cs_sha256sum, #define cs_hashCOUNT cs_length /* must be last but one */ cs_length, /* must be last */ cs_COUNT }; struct checksums; extern const char * const changes_checksum_names[]; extern const char * const source_checksum_names[]; extern const char * const release_checksum_names[]; extern const struct constant *hashnames; struct hashes { struct hash_data { const char *start; size_t len; } hashes[cs_COUNT]; }; void checksums_free(/*@only@*//*@null@*/struct checksums *); /* duplicate a checksum record, NULL means OOM */ /*@null@*/struct checksums *checksums_dup(const struct checksums *); retvalue checksums_setall(/*@out@*/struct checksums **checksums_p, const char *combinedchecksum, size_t len); retvalue checksums_initialize(/*@out@*/struct checksums **checksums_p, const struct hash_data *); /* hashes[*] is free'd: */ retvalue checksums_init(/*@out@*/struct checksums **, char *hashes[cs_COUNT]); retvalue checksums_parse(/*@out@*/struct checksums **, const char *); off_t checksums_getfilesize(const struct checksums *); /* get 0-terminated combined textual representation of the checksums, * including the size (including the trailing '\0'): */ retvalue checksums_getcombined(const struct checksums *, /*@out@*/const char **, /*@out@*/size_t *); /* get a static pointer to a specific part of a checksum (wihtout size) */ bool checksums_getpart(const struct checksums *, enum checksumtype, /*@out@*/const char **, /*@out@*/size_t *); /* extract a single checksum from the combined data: */ bool checksums_gethashpart(const struct checksums *, enum checksumtype, /*@out@*/const char **hash_p, /*@out@*/size_t *hashlen_p, /*@out@*/const char **size_p, /*@out@*/size_t *sizelen_p); /* check if a single checksum fits */ bool checksums_matches(const struct checksums *, enum checksumtype, const char *); /* Copy file to file , calculating checksums */ retvalue checksums_copyfile(const char * /*destination*/, const char * /*origin*/, bool /*deletetarget*/, /*@out@*/struct checksums **); retvalue checksums_hardlink(const char * /*directory*/, const char * /*filekey*/, const char * /*sourcefilename*/, const struct checksums *); retvalue checksums_linkorcopyfile(const char * /*destination*/, const char * /*origin*/, /*@out@*/struct checksums **); /* calculare checksums of a file: */ retvalue checksums_read(const char * /*fullfilename*/, /*@out@*/struct checksums **); /* replace the contents of a file with data and calculate the new checksums */ retvalue checksums_replace(const char * /*filename*/, const char *, size_t, /*@out@*//*@null@*/struct checksums **); /* check if the file has the given md5sum (only cheap tests like size), * RET_NOTHING means file does not exist, * RET_ERROR_WRONG_MD5 means wrong size */ retvalue checksums_cheaptest(const char * /*fullfilename*/, const struct checksums *, bool); /* check if filename has specified checksums, if not return RET_ERROR_WRONG_MD5, * if it has, and checksums_p put the improved checksum there * (*checksums_p should either be NULL or checksums) */ retvalue checksums_test(const char *, const struct checksums *, /*@null@*/struct checksums **); /* check if checksum of filekey in database and checksum of actual file, set improve if some new has is in the last */ bool checksums_check(const struct checksums *, const struct checksums *, /*@out@*/bool * /*improves_p*/); /* return true, iff all supported checksums are available */ bool checksums_iscomplete(const struct checksums *); /* Collect missing checksums (if all are there always RET_OK without checking). * if the file is not there, return RET_NOTHING, * if it is but not matches, return RET_ERROR_WRONG_MD5 */ retvalue checksums_complete(struct checksums **, const char * /*fullfilename*/); void checksums_printdifferences(FILE *, const struct checksums * /*expected*/, const struct checksums * /*got*/); retvalue checksums_combine(struct checksums **, const struct checksums *, /*@null@*/bool[cs_hashCOUNT]); typedef /*@only@*/ struct checksums *ownedchecksums; struct checksumsarray { struct strlist names; /*@null@*/ownedchecksums *checksums; }; void checksumsarray_move(/*@out@*/struct checksumsarray *, /*@special@*/struct checksumsarray *array)/*@requires maxSet(array->names.values) >= array->names.count /\ maxSet(array->checksums) >= array->names.count @*/ /*@releases array->checksums, array->names.values @*/; void checksumsarray_done(/*@special@*/struct checksumsarray *array) /*@requires maxSet(array->names.values) >= array->names.count /\ maxSet(array->checksums) >= array->names.count @*/ /*@releases array->checksums, array->names.values @*/; retvalue checksumsarray_parse(/*@out@*/struct checksumsarray *, const struct strlist [cs_hashCOUNT], const char * /*filenametoshow*/); retvalue checksumsarray_genfilelist(const struct checksumsarray *, /*@out@*/char **, /*@out@*/char **, /*@out@*/char **); retvalue checksumsarray_include(struct checksumsarray *, /*@only@*/char *, const struct checksums *); void checksumsarray_resetunsupported(const struct checksumsarray *, bool[cs_hashCOUNT]); retvalue hashline_parse(const char * /*filenametoshow*/, const char * /*line*/, enum checksumtype, /*@out@*/const char ** /*basename_p*/, /*@out@*/struct hash_data *, /*@out@*/struct hash_data *); struct configiterator; #ifdef CHECKSUMS_CONTEXT #ifndef MD5_H #include "md5.h" #endif #ifndef REPREPRO_SHA1_H #include "sha1.h" #endif #ifndef REPREPRO_SHA256_H #include "sha256.h" #endif struct checksumscontext { struct MD5Context md5; struct SHA1_Context sha1; struct SHA256_Context sha256; }; void checksumscontext_init(/*@out@*/struct checksumscontext *); void checksumscontext_update(struct checksumscontext *, const unsigned char *, size_t); retvalue checksums_from_context(/*@out@*/struct checksums **, struct checksumscontext *); #endif #endif reprepro-4.13.1/INSTALL0000644000175100017510000001757712152651661011440 00000000000000Build-Dependencies: libdb3, libdb4.x or libdb5.x libz Optional Dependencies: libgpgme >= 0.4.1 (In Debian libgpgme11-dev, NOT libgpgme-dev) libbz2 libarchive When Building from git: autoconf2.50 (autoconf 2.13 will not work) Basic Installation ================== These are generic installation instructions. The `configure' shell script attempts to guess correct values for various system-dependent variables used during compilation. It uses those values to create a `Makefile' in each directory of the package. It may also create one or more `.h' files containing system-dependent definitions. Finally, it creates a shell script `config.status' that you can run in the future to recreate the current configuration, a file `config.cache' that saves the results of its tests to speed up reconfiguring, and a file `config.log' containing compiler output (useful mainly for debugging `configure'). If you need to do unusual things to compile the package, please try to figure out how `configure' could check whether to do them, and mail diffs or instructions to the address given in the `README' so they can be considered for the next release. If at some point `config.cache' contains results you don't want to keep, you may remove or edit it. The file `configure.in' is used to create `configure' by a program called `autoconf'. You only need `configure.in' if you want to change it or regenerate `configure' using a newer version of `autoconf'. The simplest way to compile this package is: 1. `cd' to the directory containing the package's source code and type `./configure' to configure the package for your system. If you're using `csh' on an old version of System V, you might need to type `sh ./configure' instead to prevent `csh' from trying to execute `configure' itself. Running `configure' takes awhile. While running, it prints some messages telling which features it is checking for. 2. Type `make' to compile the package. 3. Optionally, type `make check' to run any self-tests that come with the package. 4. Type `make install' to install the programs and any data files and documentation. 5. You can remove the program binaries and object files from the source code directory by typing `make clean'. To also remove the files that `configure' created (so you can compile the package for a different kind of computer), type `make distclean'. There is also a `make maintainer-clean' target, but that is intended mainly for the package's developers. If you use it, you may have to get all sorts of other programs in order to regenerate files that came with the distribution. Compilers and Options ===================== Some systems require unusual options for compilation or linking that the `configure' script does not know about. You can give `configure' initial values for variables by setting them in the environment. Using a Bourne-compatible shell, you can do that on the command line like this: CC=c89 CFLAGS=-O2 LIBS=-lposix ./configure Or on systems that have the `env' program, you can do it like this: env CPPFLAGS=-I/usr/local/include LDFLAGS=-s ./configure Compiling For Multiple Architectures ==================================== You can compile the package for more than one kind of computer at the same time, by placing the object files for each architecture in their own directory. To do this, you must use a version of `make' that supports the `VPATH' variable, such as GNU `make'. `cd' to the directory where you want the object files and executables to go and run the `configure' script. `configure' automatically checks for the source code in the directory that `configure' is in and in `..'. If you have to use a `make' that does not supports the `VPATH' variable, you have to compile the package for one architecture at a time in the source code directory. After you have installed the package for one architecture, use `make distclean' before reconfiguring for another architecture. Installation Names ================== By default, `make install' will install the package's files in `/usr/local/bin', `/usr/local/man', etc. You can specify an installation prefix other than `/usr/local' by giving `configure' the option `--prefix=PATH'. You can specify separate installation prefixes for architecture-specific files and architecture-independent files. If you give `configure' the option `--exec-prefix=PATH', the package will use PATH as the prefix for installing programs and libraries. Documentation and other data files will still use the regular prefix. In addition, if you use an unusual directory layout you can give options like `--bindir=PATH' to specify different values for particular kinds of files. Run `configure --help' for a list of the directories you can set and what kinds of files go in them. If the package supports it, you can cause programs to be installed with an extra prefix or suffix on their names by giving `configure' the option `--program-prefix=PREFIX' or `--program-suffix=SUFFIX'. Optional Features ================= Some packages pay attention to `--enable-FEATURE' options to `configure', where FEATURE indicates an optional part of the package. They may also pay attention to `--with-PACKAGE' options, where PACKAGE is something like `gnu-as' or `x' (for the X Window System). The `README' should mention any `--enable-' and `--with-' options that the package recognizes. For packages that use the X Window System, `configure' can usually find the X include and library files automatically, but if it doesn't, you can use the `configure' options `--x-includes=DIR' and `--x-libraries=DIR' to specify their locations. Specifying the System Type ========================== There may be some features `configure' can not figure out automatically, but needs to determine by the type of host the package will run on. Usually `configure' can figure that out, but if it prints a message saying it can not guess the host type, give it the `--host=TYPE' option. TYPE can either be a short name for the system type, such as `sun4', or a canonical name with three fields: CPU-COMPANY-SYSTEM See the file `config.sub' for the possible values of each field. If `config.sub' isn't included in this package, then this package doesn't need to know the host type. If you are building compiler tools for cross-compiling, you can also use the `--target=TYPE' option to select the type of system they will produce code for and the `--build=TYPE' option to select the type of system on which you are compiling the package. Sharing Defaults ================ If you want to set default values for `configure' scripts to share, you can create a site shell script called `config.site' that gives default values for variables like `CC', `cache_file', and `prefix'. `configure' looks for `PREFIX/share/config.site' if it exists, then `PREFIX/etc/config.site' if it exists. Or, you can set the `CONFIG_SITE' environment variable to the location of the site script. A warning: not all `configure' scripts look for a site script. Operation Controls ================== `configure' recognizes the following options to control how it operates. `--cache-file=FILE' Use and save the results of the tests in FILE instead of `./config.cache'. Set FILE to `/dev/null' to disable caching, for debugging `configure'. `--help' Print a summary of the options to `configure', and exit. `--quiet' `--silent' `-q' Do not print messages saying which checks are being made. To suppress all normal output, redirect it to `/dev/null' (any error messages will still be shown). `--srcdir=DIR' Look for the package's source code in directory DIR. Usually `configure' can determine that directory automatically. `--version' Print the version of Autoconf used to generate the `configure' script, and exit. `configure' also accepts some other, not widely useful, options. reprepro-4.13.1/override.h0000644000175100017510000000231012152651661012352 00000000000000#ifndef REPREPRO_OVERRIDE_H #define REPREPRO_OVERRIDE_H #ifndef REPREPRO_ERROR_H #include "error.h" #warning "What's hapening here?" #endif #ifndef REPREPRO_STRLIST_H #include "strlist.h" #endif #ifndef REPREPRO_CHUNKS_H #include "chunks.h" #endif struct overridefile; struct overridedata; /* to avoid typos */ #define PRIORITY_FIELDNAME "Priority" #define SECTION_FIELDNAME "Section" void override_free(/*@only@*//*@null@*/struct overridefile *); retvalue override_read(const char *filename, /*@out@*/struct overridefile **, bool /*source*/); /*@null@*//*@dependent@*/const struct overridedata *override_search(/*@null@*/const struct overridefile *, const char * /*package*/); /*@null@*//*@dependent@*/const char *override_get(/*@null@*/const struct overridedata *, const char * /*field*/); /* add new fields to otherreplaces, but not "Section", or "Priority". * incorporates otherreplaces, or frees them on error */ /*@null@*/struct fieldtoadd *override_addreplacefields(const struct overridedata *, /*@only@*/struct fieldtoadd *); /* as above, but all fields. and may return NULL if there are no overrides */ retvalue override_allreplacefields(const struct overridedata *, /*@out@*/struct fieldtoadd **); #endif reprepro-4.13.1/mprintf.c0000644000175100017510000000234112152651661012211 00000000000000#include #include #include #include #include #include #include "mprintf.h" // TODO: check for asprintf in configure and // write a replacement for such situations. char * mprintf(const char *fmt, ...) { char *p; int r; va_list va; va_start(va, fmt); r = vasprintf(&p, fmt, va); va_end(va); /* return NULL both when r is < 0 and when NULL was returned */ if (r < 0) return NULL; else return p; } char * vmprintf(const char *fmt, va_list va) { char *p; int r; r = vasprintf(&p, fmt, va); /* return NULL both when r is < 0 and when NULL was returned */ if (r < 0) return NULL; else return p; } #ifndef HAVE_DPRINTF int dprintf(int fd, const char *format, ...){ char *buffer; int ret; va_list va; va_start(va, format); buffer = vmprintf(format, va); va_end(va); if (buffer == NULL) return -1; ret = write(fd, buffer, strlen(buffer)); free(buffer); return ret; } #endif #ifndef HAVE_STRNDUP /* That's not the best possible strndup implementation, but it suffices for what * it is used here */ char *strndup(const char *str, size_t n) { char *r = malloc(n+1); if (r == NULL) return r; memcpy(r, str, n); r[n] = '\0'; return r; } #endif reprepro-4.13.1/checkindsc.c0000644000175100017510000002757112152651661012644 00000000000000/* This file is part of "reprepro" * Copyright (C) 2003,2004,2005,2006,2007,2008,2012 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include "error.h" #include "filecntl.h" #include "strlist.h" #include "checksums.h" #include "names.h" #include "checksums.h" #include "dirs.h" #include "checkindsc.h" #include "reference.h" #include "sources.h" #include "files.h" #include "guesscomponent.h" #include "tracking.h" #include "ignore.h" #include "override.h" #include "log.h" #include "sourceextraction.h" /* This file includes the code to include sources, i.e. to create the chunk for the Sources.gz-file and to put it in the various databases. things to do with .dsc's checkin by hand: (by comparison with apt-ftparchive) * Get all from .dsc (search the chunk with the Source:-field. end the chunk artifical before the pgp-end-block.(in case someone missed the newline there)) * check to have source, version, maintainer, standards-version, files. And also look at binary, architecture and build*, as described in policy 5.4 * Get overwrite information, ecspecially the priority(if there is a binaries field, check the one with the highest) and the section (...what else...?) * Rename Source-Field to Package-Field * add dsc to files-list. (check other files md5sum and size) * add Directory-field * Add Priority and Status * apply possible maintainer-updates from the overwrite-file or arbitrary tag changes from the extra-overwrite-file * keep rest (perhaps sort alphabetical) */ struct dscpackage { /* things to be set by dsc_read: */ struct dsc_headers dsc; /* things that will still be NULL then: */ component_t component; /* Things that may be calculated by dsc_calclocations: */ struct strlist filekeys; }; static void dsc_free(/*@only@*/struct dscpackage *pkg) { if (pkg != NULL) { sources_done(&pkg->dsc); strlist_done(&pkg->filekeys); free(pkg); } } static retvalue dsc_read(/*@out@*/struct dscpackage **pkg, const char *filename) { retvalue r; struct dscpackage *dsc; bool broken; dsc = zNEW(struct dscpackage); if (FAILEDTOALLOC(dsc)) return RET_ERROR_OOM; r = sources_readdsc(&dsc->dsc, filename, filename, &broken); if (RET_IS_OK(r) && broken && !IGNORING(brokensignatures, "'%s' contains only broken signatures.\n" "This most likely means the file was damaged or edited improperly\n", filename)) r = RET_ERROR; if (RET_IS_OK(r)) r = propersourcename(dsc->dsc.name); if (RET_IS_OK(r)) r = properversion(dsc->dsc.version); if (RET_IS_OK(r)) r = properfilenames(&dsc->dsc.files.names); if (RET_WAS_ERROR(r)) { dsc_free(dsc); return r; } dsc->component = atom_unknown; *pkg = dsc; return RET_OK; } retvalue dsc_addprepared(const struct dsc_headers *dsc, component_t component, const struct strlist *filekeys, struct distribution *distribution, struct trackingdata *trackingdata){ retvalue r; struct target *t = distribution_getpart(distribution, component, architecture_source, pt_dsc); assert (logger_isprepared(distribution->logger)); /* finally put it into the source distribution */ r = target_initpackagesdb(t, READWRITE); if (!RET_WAS_ERROR(r)) { retvalue r2; if (interrupted()) r = RET_ERROR_INTERRUPTED; else r = target_addpackage(t, distribution->logger, dsc->name, dsc->version, dsc->control, filekeys, false, trackingdata, architecture_source, NULL, NULL); r2 = target_closepackagesdb(t); RET_ENDUPDATE(r, r2); } RET_UPDATE(distribution->status, r); return r; } /* insert the given .dsc into the mirror in in the * if component is NULL, guessing it from the section. * If basename, filekey and directory are != NULL, then they are used instead * of being newly calculated. * (And all files are expected to already be in the pool). */ retvalue dsc_add(component_t forcecomponent, const char *forcesection, const char *forcepriority, struct distribution *distribution, const char *dscfilename, int delete, trackingdb tracks){ retvalue r; struct dscpackage *pkg; struct trackingdata trackingdata; char *destdirectory, *origdirectory; const struct overridedata *oinfo; char *control; int i; causingfile = dscfilename; /* First make sure this distribution has a source section at all, * for which it has to be listed in the "Architectures:"-field ;-) */ if (!atomlist_in(&distribution->architectures, architecture_source)) { fprintf(stderr, "Cannot put a source package into Distribution '%s' not having 'source' in its 'Architectures:'-field!\n", distribution->codename); /* nota bene: this cannot be forced or ignored, as no target has been created for this. */ return RET_ERROR; } r = dsc_read(&pkg, dscfilename); if (RET_WAS_ERROR(r)) { return r; } oinfo = override_search(distribution->overrides.dsc, pkg->dsc.name); if (forcesection == NULL) { forcesection = override_get(oinfo, SECTION_FIELDNAME); } if (forcepriority == NULL) { forcepriority = override_get(oinfo, PRIORITY_FIELDNAME); } if (forcesection != NULL) { free(pkg->dsc.section); pkg->dsc.section = strdup(forcesection); if (FAILEDTOALLOC(pkg->dsc.section)) { dsc_free(pkg); return RET_ERROR_OOM; } } if (forcepriority != NULL) { free(pkg->dsc.priority); pkg->dsc.priority = strdup(forcepriority); if (FAILEDTOALLOC(pkg->dsc.priority)) { dsc_free(pkg); return RET_ERROR_OOM; } } r = dirs_getdirectory(dscfilename, &origdirectory); if (RET_WAS_ERROR(r)) { dsc_free(pkg); return r; } if (pkg->dsc.section == NULL || pkg->dsc.priority == NULL) { struct sourceextraction *extraction; extraction = sourceextraction_init( (pkg->dsc.section == NULL)?&pkg->dsc.section:NULL, (pkg->dsc.priority == NULL)?&pkg->dsc.priority:NULL); if (FAILEDTOALLOC(extraction)) { free(origdirectory); dsc_free(pkg); return RET_ERROR_OOM; } for (i = 0 ; i < pkg->dsc.files.names.count ; i ++) sourceextraction_setpart(extraction, i, pkg->dsc.files.names.values[i]); while (sourceextraction_needs(extraction, &i)) { char *fullfilename = calc_dirconcat(origdirectory, pkg->dsc.files.names.values[i]); if (FAILEDTOALLOC(fullfilename)) { free(origdirectory); dsc_free(pkg); return RET_ERROR_OOM; } /* while it would nice to try at the pool if we * do not have the file here, to know its location * in the pool we need to know the component. And * for the component we might need the section first */ // TODO: but if forcecomponent is set it might be possible. r = sourceextraction_analyse(extraction, fullfilename); free(fullfilename); if (RET_WAS_ERROR(r)) { free(origdirectory); dsc_free(pkg); sourceextraction_abort(extraction); return r; } } r = sourceextraction_finish(extraction); if (RET_WAS_ERROR(r)) { free(origdirectory); dsc_free(pkg); return r; } } if (pkg->dsc.section == NULL && pkg->dsc.priority == NULL) { fprintf(stderr, "No section and no priority for '%s', skipping.\n", pkg->dsc.name); free(origdirectory); dsc_free(pkg); return RET_ERROR; } if (pkg->dsc.section == NULL) { fprintf(stderr, "No section for '%s', skipping.\n", pkg->dsc.name); free(origdirectory); dsc_free(pkg); return RET_ERROR; } if (pkg->dsc.priority == NULL) { fprintf(stderr, "No priority for '%s', skipping.\n", pkg->dsc.name); free(origdirectory); dsc_free(pkg); return RET_ERROR; } if (strcmp(pkg->dsc.section, "unknown") == 0 && verbose >= 0) { fprintf(stderr, "Warning: strange section '%s'!\n", pkg->dsc.section); } if (!atom_defined(forcecomponent)) { const char *fc; fc = override_get(oinfo, "$Component"); if (fc != NULL) { forcecomponent = component_find(fc); if (!atom_defined(forcecomponent)) { fprintf(stderr, "Unparseable component '%s' in $Component override of '%s'\n", fc, pkg->dsc.name); return RET_ERROR; } } } /* decide where it has to go */ r = guess_component(distribution->codename, &distribution->components, pkg->dsc.name, pkg->dsc.section, forcecomponent, &pkg->component); if (RET_WAS_ERROR(r)) { free(origdirectory); dsc_free(pkg); return r; } if (verbose > 0 && !atom_defined(forcecomponent)) { fprintf(stderr, "%s: component guessed as '%s'\n", dscfilename, atoms_components[pkg->component]); } { char *dscbasename, *dscfilekey; struct checksums *dscchecksums; dscbasename = calc_source_basename(pkg->dsc.name, pkg->dsc.version); destdirectory = calc_sourcedir(pkg->component, pkg->dsc.name); /* Calculate the filekeys: */ if (destdirectory != NULL) r = calc_dirconcats(destdirectory, &pkg->dsc.files.names, &pkg->filekeys); if (dscbasename == NULL || destdirectory == NULL || RET_WAS_ERROR(r)) { free(dscbasename); free(destdirectory); free(origdirectory); dsc_free(pkg); return r; } dscfilekey = calc_dirconcat(destdirectory, dscbasename); dscchecksums = NULL; if (FAILEDTOALLOC(dscfilename)) r = RET_ERROR_OOM; else /* then look if we already have this, or copy it in */ r = files_preinclude( dscfilename, dscfilekey, &dscchecksums); if (!RET_WAS_ERROR(r)) { /* Add the dsc-file to basenames, filekeys and md5sums, * so that it will be listed in the Sources.gz */ r = checksumsarray_include(&pkg->dsc.files, dscbasename, dscchecksums); if (RET_IS_OK(r)) r = strlist_include(&pkg->filekeys, dscfilekey); else free(dscfilekey); } else { free(dscfilekey); free(dscbasename); } checksums_free(dscchecksums); } assert (pkg->dsc.files.names.count == pkg->filekeys.count); for (i = 1 ; i < pkg->dsc.files.names.count ; i ++) { if (!RET_WAS_ERROR(r)) { r = files_checkincludefile(origdirectory, pkg->dsc.files.names.values[i], pkg->filekeys.values[i], &pkg->dsc.files.checksums[i]); } } /* Calculate the chunk to include: */ if (!RET_WAS_ERROR(r)) r = sources_complete(&pkg->dsc, destdirectory, oinfo, pkg->dsc.section, pkg->dsc.priority, &control); free(destdirectory); if (RET_IS_OK(r)) { free(pkg->dsc.control); pkg->dsc.control = control; } else { free(origdirectory); dsc_free(pkg); return r; } if (interrupted()) { dsc_free(pkg); free(origdirectory); return RET_ERROR_INTERRUPTED; } if (tracks != NULL) { r = trackingdata_summon(tracks, pkg->dsc.name, pkg->dsc.version, &trackingdata); if (RET_WAS_ERROR(r)) { free(origdirectory); dsc_free(pkg); return r; } } r = dsc_addprepared(&pkg->dsc, pkg->component, &pkg->filekeys, distribution, (tracks!=NULL)?&trackingdata:NULL); /* delete source files, if they are to be */ if ((RET_IS_OK(r) && delete >= D_MOVE) || (r == RET_NOTHING && delete >= D_DELETE)) { char *fullfilename; for (i = 0 ; i < pkg->dsc.files.names.count ; i++) { fullfilename = calc_dirconcat(origdirectory, pkg->dsc.files.names.values[i]); if (FAILEDTOALLOC(fullfilename)) { r = RET_ERROR_OOM; break; } if (isregularfile(fullfilename)) deletefile(fullfilename); free(fullfilename); } } free(origdirectory); dsc_free(pkg); if (tracks != NULL) { retvalue r2; r2 = trackingdata_finish(tracks, &trackingdata); RET_ENDUPDATE(r, r2); } return r; } reprepro-4.13.1/pull.c0000644000175100017510000007372512152651661011524 00000000000000/* This file is part of "reprepro" * Copyright (C) 2006,2007 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include "error.h" #include "ignore.h" #include "mprintf.h" #include "strlist.h" #include "names.h" #include "pull.h" #include "upgradelist.h" #include "distribution.h" #include "tracking.h" #include "termdecide.h" #include "filterlist.h" #include "log.h" #include "configparser.h" /*************************************************************************** * step one: * * parse CONFDIR/pull to get pull information saved in * * pull_rule structs * **************************************************************************/ /* the data for some upstream part to get pull from, some * some fields can be NULL or empty */ struct pull_rule { struct pull_rule *next; //e.g. "Name: woody" char *name; //e.g. "From: woody" char *from; //e.g. "Architectures: i386 sparc mips" (not set means all) struct atomlist architectures_from; struct atomlist architectures_into; bool architectures_set; //e.g. "Components: main contrib" (not set means all) struct atomlist components; bool components_set; //e.g. "UDebComponents: main" // (not set means all) struct atomlist udebcomponents; bool udebcomponents_set; // NULL means no condition /*@null@*/term *includecondition; struct filterlist filterlist; struct filterlist filtersrclist; /*----only set after _addsourcedistribution----*/ /*@NULL@*/ struct distribution *distribution; bool used; }; static void pull_rule_free(/*@only@*/struct pull_rule *pull) { if (pull == NULL) return; free(pull->name); free(pull->from); atomlist_done(&pull->architectures_from); atomlist_done(&pull->architectures_into); atomlist_done(&pull->components); atomlist_done(&pull->udebcomponents); term_free(pull->includecondition); filterlist_release(&pull->filterlist); filterlist_release(&pull->filtersrclist); free(pull); } void pull_freerules(struct pull_rule *p) { while (p != NULL) { struct pull_rule *rule; rule = p; p = rule->next; pull_rule_free(rule); } } CFlinkedlistinit(pull_rule) CFvalueSETPROC(pull_rule, name) CFvalueSETPROC(pull_rule, from) CFatomlistSETPROC(pull_rule, components, at_component) CFatomlistSETPROC(pull_rule, udebcomponents, at_component) CFfilterlistSETPROC(pull_rule, filterlist) CFfilterlistSETPROC(pull_rule, filtersrclist) CFtermSETPROC(pull_rule, includecondition) CFUSETPROC(pull_rule, architectures) { CFSETPROCVAR(pull_rule, this); retvalue r; this->architectures_set = true; r = config_getsplitatoms(iter, "Architectures", at_architecture, &this->architectures_from, &this->architectures_into); if (r == RET_NOTHING) { fprintf(stderr, "Warning parsing %s, line %u: an empty Architectures field\n" "causes the whole rule to do nothing.\n", config_filename(iter), config_markerline(iter)); } return r; } static const struct configfield pullconfigfields[] = { CFr("Name", pull_rule, name), CFr("From", pull_rule, from), CF("Architectures", pull_rule, architectures), CF("Components", pull_rule, components), CF("UDebComponents", pull_rule, udebcomponents), CF("FilterFormula", pull_rule, includecondition), CF("FilterSrcList", pull_rule, filtersrclist), CF("FilterList", pull_rule, filterlist) }; retvalue pull_getrules(struct pull_rule **rules) { struct pull_rule *pull = NULL; retvalue r; r = configfile_parse("pulls", IGNORABLE(unknownfield), configparser_pull_rule_init, linkedlistfinish, "pull rule", pullconfigfields, ARRAYCOUNT(pullconfigfields), &pull); if (RET_IS_OK(r)) *rules = pull; else if (r == RET_NOTHING) { assert (pull == NULL); *rules = NULL; r = RET_OK; } else { // TODO special handle unknownfield pull_freerules(pull); } return r; } /*************************************************************************** * step two: * * create pull_distribution structs to hold all additional information for * * a distribution * **************************************************************************/ struct pull_target; static void pull_freetargets(struct pull_target *targets); struct pull_distribution { struct pull_distribution *next; /*@dependant@*/struct distribution *distribution; struct pull_target *targets; /*@dependant@*/struct pull_rule *rules[]; }; void pull_freedistributions(struct pull_distribution *d) { while (d != NULL) { struct pull_distribution *next; next = d->next; pull_freetargets(d->targets); free(d); d = next; } } static retvalue pull_initdistribution(struct pull_distribution **pp, struct distribution *distribution, struct pull_rule *rules) { struct pull_distribution *p; int i; assert(distribution != NULL); if (distribution->pulls.count == 0) return RET_NOTHING; p = malloc(sizeof(struct pull_distribution)+ sizeof(struct pull_rules *)*distribution->pulls.count); if (FAILEDTOALLOC(p)) return RET_ERROR_OOM; p->next = NULL; p->distribution = distribution; p->targets = NULL; for (i = 0 ; i < distribution->pulls.count ; i++) { const char *name = distribution->pulls.values[i]; if (strcmp(name, "-") == 0) { p->rules[i] = NULL; } else { struct pull_rule *rule = rules; while (rule && strcmp(rule->name, name) != 0) rule = rule->next; if (rule == NULL) { fprintf(stderr, "Error: Unknown pull rule '%s' in distribution '%s'!\n", name, distribution->codename); free(p); return RET_ERROR_MISSING; } p->rules[i] = rule; rule->used = true; } } *pp = p; return RET_OK; } static retvalue pull_init(struct pull_distribution **pulls, struct pull_rule *rules, struct distribution *distributions) { struct pull_distribution *p = NULL, **pp = &p; struct distribution *d; retvalue r; for (d = distributions ; d != NULL ; d = d->next) { if (!d->selected) continue; r = pull_initdistribution(pp, d, rules); if (RET_WAS_ERROR(r)) { pull_freedistributions(p); return r; } if (RET_IS_OK(r)) { assert (*pp != NULL); pp = &(*pp)->next; } } *pulls = p; return RET_OK; } /*************************************************************************** * step three: * * load the config of the distributions mentioned in the rules * **************************************************************************/ static retvalue pull_loadsourcedistributions(struct distribution *alldistributions, struct pull_rule *rules) { struct pull_rule *rule; struct distribution *d; for (rule = rules ; rule != NULL ; rule = rule->next) { if (rule->used && rule->distribution == NULL) { for (d = alldistributions ; d != NULL ; d = d->next) { if (strcmp(d->codename, rule->from) == 0) { rule->distribution = d; break; } } if (d == NULL) { fprintf(stderr, "Error: Unknown distribution '%s' referenced in pull rule '%s'\n", rule->from, rule->name); return RET_ERROR_MISSING; } } } return RET_OK; } /*************************************************************************** * step four: * * create pull_targets and pull_sources * **************************************************************************/ struct pull_source { struct pull_source *next; /* NULL, if this is a delete rule */ struct target *source; struct pull_rule *rule; }; struct pull_target { /*@null@*/struct pull_target *next; /*@null@*/struct pull_source *sources; /*@dependent@*/struct target *target; /*@null@*/struct upgradelist *upgradelist; }; static void pull_freetargets(struct pull_target *targets) { while (targets != NULL) { struct pull_target *target = targets; targets = target->next; while (target->sources != NULL) { struct pull_source *source = target->sources; target->sources = source->next; free(source); } free(target); } } static retvalue pull_createsource(struct pull_rule *rule, struct target *target, struct pull_source ***s) { const struct atomlist *c; const struct atomlist *a_from, *a_into; int ai; assert (rule != NULL); assert (rule->distribution != NULL); if (rule->architectures_set) { a_from = &rule->architectures_from; a_into = &rule->architectures_into; } else { a_from = &rule->distribution->architectures; a_into = &rule->distribution->architectures; } if (target->packagetype == pt_udeb) { if (rule->udebcomponents_set) c = &rule->udebcomponents; else c = &rule->distribution->udebcomponents; } else { if (rule->components_set) c = &rule->components; else c = &rule->distribution->components; } if (!atomlist_in(c, target->component)) return RET_NOTHING; for (ai = 0 ; ai < a_into->count ; ai++) { struct pull_source *source; if (a_into->atoms[ai] != target->architecture) continue; source = NEW(struct pull_source); if (FAILEDTOALLOC(source)) return RET_ERROR_OOM; source->next = NULL; source->rule = rule; source->source = distribution_getpart(rule->distribution, target->component, a_from->atoms[ai], target->packagetype); **s = source; *s = &source->next; } return RET_OK; } static retvalue pull_createdelete(struct pull_source ***s) { struct pull_source *source; source = NEW(struct pull_source); if (FAILEDTOALLOC(source)) return RET_ERROR_OOM; source->next = NULL; source->rule = NULL; source->source = NULL; **s = source; *s = &source->next; return RET_OK; } static retvalue generatepulltarget(struct pull_distribution *pd, struct target *target) { struct pull_source **s; struct pull_target *pt; retvalue r; int i; pt = NEW(struct pull_target); if (FAILEDTOALLOC(pt)) return RET_ERROR_OOM; pt->target = target; pt->next = pd->targets; pt->upgradelist = NULL; pt->sources = NULL; s = &pt->sources; pd->targets = pt; for (i = 0 ; i < pd->distribution->pulls.count ; i++) { struct pull_rule *rule = pd->rules[i]; if (rule == NULL) r = pull_createdelete(&s); else r = pull_createsource(rule, target, &s); if (RET_WAS_ERROR(r)) return r; } return RET_OK; } static retvalue pull_generatetargets(struct pull_distribution *pull_distributions, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *packagetypes) { struct pull_distribution *pd; struct target *target; retvalue r; for (pd = pull_distributions ; pd != NULL ; pd = pd->next) { for (target = pd->distribution->targets ; target != NULL ; target = target->next) { if (!target_matches(target, components, architectures, packagetypes)) continue; r = generatepulltarget(pd, target); if (RET_WAS_ERROR(r)) return r; } } return RET_OK; } /*************************************************************************** * Some checking to be able to warn against typos * **************************************************************************/ static bool *preparefoundlist(const struct atomlist *list) { bool *found; int i, j; found = nzNEW(list->count, bool); if (FAILEDTOALLOC(found)) return found; for (i = 0 ; i < list->count ; i++) { if (found[i]) continue; for (j = i + 1 ; j < list->count ; j++) if (list->atoms[i] == list->atoms[j]) found[j] = true; } return found; } static inline void markasused(const struct strlist *pulls, const char *rulename, const struct atomlist *needed, const struct atomlist *have, bool *found) { int i, j, o; for (i = 0 ; i < pulls->count ; i++) { if (strcmp(pulls->values[i], rulename) != 0) continue; for (j = 0 ; j < have->count ; j++) { o = atomlist_ofs(needed, have->atoms[j]); if (o >= 0) found[o] = true; } } } static void checkifarchitectureisused(const struct atomlist *architectures, const struct distribution *alldistributions, const struct pull_rule *rule, const char *action) { bool *found; const struct distribution *d; int i; assert (rule != NULL); if (architectures->count == 0) return; found = preparefoundlist(architectures); if (found == NULL) return; for (d = alldistributions ; d != NULL ; d = d->next) { markasused(&d->pulls, rule->name, architectures, &d->architectures, found); } for (i = 0 ; i < architectures->count ; i++) { if (found[i]) continue; fprintf(stderr, "Warning: pull rule '%s' wants to %s architecture '%s',\n" "but no distribution using this has such an architecture.\n" "(This will simply be ignored and is not even checked when using --fast).\n", rule->name, action, atoms_architectures[architectures->atoms[i]]); } free(found); return; } static void checkifcomponentisused(const struct atomlist *components, const struct distribution *alldistributions, const struct pull_rule *rule, const char *action) { bool *found; const struct distribution *d; int i; assert (rule != NULL); if (components->count == 0) return; found = preparefoundlist(components); if (found == NULL) return; for (d = alldistributions ; d != NULL ; d = d->next) { markasused(&d->pulls, rule->name, components, &d->components, found); } for (i = 0 ; i < components->count ; i++) { if (found[i]) continue; fprintf(stderr, "Warning: pull rule '%s' wants to %s component '%s',\n" "but no distribution using this has such an component.\n" "(This will simply be ignored and is not even checked when using --fast).\n", rule->name, action, atoms_components[components->atoms[i]]); } free(found); return; } static void checkifudebcomponentisused(const struct atomlist *udebcomponents, const struct distribution *alldistributions, const struct pull_rule *rule, const char *action) { bool *found; const struct distribution *d; int i; assert (rule != NULL); if (udebcomponents->count == 0) return; found = preparefoundlist(udebcomponents); if (found == NULL) return; for (d = alldistributions ; d != NULL ; d = d->next) { markasused(&d->pulls, rule->name, udebcomponents, &d->udebcomponents, found); } for (i = 0 ; i < udebcomponents->count ; i++) { if (found[i]) continue; fprintf(stderr, "Warning: pull rule '%s' wants to %s udeb component '%s',\n" "but no distribution using this has such an udeb component.\n" "(This will simply be ignored and is not even checked when using --fast).\n", rule->name, action, atoms_components[udebcomponents->atoms[i]]); } free(found); return; } static void checksubset(const struct atomlist *needed, const struct atomlist *have, const char *rulename, const char *from, const char *what, const char **atoms) { int i, j; for (i = 0 ; i < needed->count ; i++) { atom_t value = needed->atoms[i]; for (j = 0 ; j < i ; j++) { if (value == needed->atoms[j]) break; } if (j < i) continue; if (!atomlist_in(have, value)) { fprintf(stderr, "Warning: pull rule '%s' wants to get something from %s '%s',\n" "but there is no such %s in distribution '%s'.\n" "(This will simply be ignored and is not even checked when using --fast).\n", rulename, what, atoms[value], what, from); } } } static void searchunused(const struct distribution *alldistributions, const struct pull_rule *rule) { if (rule->distribution != NULL) { // TODO: move this part of the checks into parsing? checksubset(&rule->architectures_from, &rule->distribution->architectures, rule->name, rule->from, "architecture", atoms_architectures); checksubset(&rule->components, &rule->distribution->components, rule->name, rule->from, "component", atoms_components); checksubset(&rule->udebcomponents, &rule->distribution->udebcomponents, rule->name, rule->from, "udeb component", atoms_components); } if (rule->distribution == NULL) { assert (strcmp(rule->from, "*") == 0); checkifarchitectureisused(&rule->architectures_from, alldistributions, rule, "get something from"); /* no need to check component and udebcomponent, as those * are the same with the others */ } checkifarchitectureisused(&rule->architectures_into, alldistributions, rule, "put something into"); checkifcomponentisused(&rule->components, alldistributions, rule, "put something into"); checkifudebcomponentisused(&rule->udebcomponents, alldistributions, rule, "put something into"); } static void pull_searchunused(const struct distribution *alldistributions, struct pull_rule *pull_rules) { struct pull_rule *rule; for (rule = pull_rules ; rule != NULL ; rule = rule->next) { if (!rule->used) continue; searchunused(alldistributions, rule); } } /*************************************************************************** * combination of the steps two, three and four * **************************************************************************/ retvalue pull_prepare(struct distribution *alldistributions, struct pull_rule *rules, bool fast, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *types, struct pull_distribution **pd) { struct pull_distribution *pulls; retvalue r; r = pull_init(&pulls, rules, alldistributions); if (RET_WAS_ERROR(r)) return r; r = pull_loadsourcedistributions(alldistributions, rules); if (RET_WAS_ERROR(r)) { pull_freedistributions(pulls); return r; } if (!fast) pull_searchunused(alldistributions, rules); r = pull_generatetargets(pulls, components, architectures, types); if (RET_WAS_ERROR(r)) { pull_freedistributions(pulls); return r; } *pd = pulls; return RET_OK; } /*************************************************************************** * step five: * * decide what gets pulled * **************************************************************************/ static upgrade_decision ud_decide_by_rule(void *privdata, const struct target *target, const char *package, const char *sourcename, /*@null@*/const char *old_version, const char *new_version, const char *sourceversion, const char *newcontrolchunk) { struct pull_rule *rule = privdata; upgrade_decision decision = UD_UPGRADE; retvalue r; struct filterlist *fl; const char *n, *v; bool cmdline_still_undecided; if (target->packagetype == pt_dsc) { assert (strcmp(package, sourcename) == 0); assert (strcmp(new_version, sourceversion) == 0); if (rule->filtersrclist.set) fl = &rule->filtersrclist; else fl = &rule->filterlist; n = package; v = new_version; } else { if (rule->filterlist.set) { fl = &rule->filterlist; n = package; v = new_version; } else { fl = &rule->filtersrclist; n = sourcename; v = sourceversion; } } switch (filterlist_find(n, v, fl)) { case flt_deinstall: case flt_purge: return UD_NO; case flt_warning: return UD_LOUDNO; case flt_supersede: decision = UD_SUPERSEDE; break; case flt_hold: decision = UD_HOLD; break; case flt_error: /* cannot yet be handled! */ fprintf(stderr, "Package name marked to be unexpected('error'): '%s'!\n", package); return UD_ERROR; case flt_upgradeonly: if (old_version == NULL) return UD_NO; break; case flt_install: break; case flt_unchanged: case flt_auto_hold: assert (false); break; } cmdline_still_undecided = false; switch (filterlist_find(sourcename, sourceversion, &cmdline_src_filter)) { case flt_deinstall: case flt_purge: return UD_NO; case flt_warning: return UD_LOUDNO; case flt_auto_hold: cmdline_still_undecided = true; decision = UD_HOLD; break; case flt_hold: decision = UD_HOLD; break; case flt_supersede: decision = UD_SUPERSEDE; break; case flt_error: /* cannot yet be handled! */ fprintf(stderr, "Package name marked to be unexpected('error'): '%s'!\n", package); return UD_ERROR; case flt_upgradeonly: if (old_version == NULL) return UD_NO; break; case flt_install: decision = UD_UPGRADE; break; case flt_unchanged: cmdline_still_undecided = true; break; } if (target->packagetype != pt_dsc) { switch (filterlist_find(package, new_version, &cmdline_bin_filter)) { case flt_deinstall: case flt_purge: return UD_NO; case flt_warning: return UD_LOUDNO; case flt_hold: decision = UD_HOLD; break; case flt_supersede: decision = UD_SUPERSEDE; break; case flt_error: /* cannot yet be handled! */ fprintf(stderr, "Package name marked to be unexpected('error'): '%s'!\n", package); return UD_ERROR; case flt_upgradeonly: if (old_version == NULL) return UD_NO; break; case flt_install: decision = UD_UPGRADE; break; case flt_unchanged: break; case flt_auto_hold: /* hold only if it was not in the src-filter */ if (cmdline_still_undecided) decision = UD_HOLD; break; } } else if (cmdline_bin_filter.defaulttype == flt_auto_hold) { if (cmdline_still_undecided) decision = UD_HOLD; } /* formula tested last as it is the most expensive */ if (rule->includecondition != NULL) { r = term_decidechunktarget(rule->includecondition, newcontrolchunk, target); if (RET_WAS_ERROR(r)) return UD_ERROR; if (r == RET_NOTHING) { return UD_NO; } } return decision; } static inline retvalue pull_searchformissing(/*@null@*/FILE *out, struct pull_target *p) { struct pull_source *source; retvalue result, r; if (verbose > 2 && out != NULL) fprintf(out, " pulling into '%s'\n", p->target->identifier); assert(p->upgradelist == NULL); r = upgradelist_initialize(&p->upgradelist, p->target); if (RET_WAS_ERROR(r)) return r; result = RET_NOTHING; for (source=p->sources ; source != NULL ; source=source->next) { if (source->rule == NULL) { if (verbose > 4 && out != NULL) fprintf(out, " marking everything to be deleted\n"); r = upgradelist_deleteall(p->upgradelist); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) return result; continue; } if (verbose > 4 && out != NULL) fprintf(out, " looking what to get from '%s'\n", source->source->identifier); r = upgradelist_pull(p->upgradelist, source->source, ud_decide_by_rule, source->rule, source); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) return result; } return result; } static retvalue pull_search(/*@null@*/FILE *out, struct pull_distribution *d) { retvalue result, r; struct pull_target *u; result = RET_NOTHING; for (u=d->targets ; u != NULL ; u=u->next) { r = pull_searchformissing(out, u); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; } return result; } static bool pull_isbigdelete(struct pull_distribution *d) { struct pull_target *u, *v; for (u = d->targets ; u != NULL ; u=u->next) { if (upgradelist_isbigdelete(u->upgradelist)) { d->distribution->omitted = true; for (v = d->targets ; v != NULL ; v = v->next) { upgradelist_free(v->upgradelist); v->upgradelist = NULL; } return true; } } return false; } static void pull_from_callback(void *privdata, const char **rule_p, const char **from_p) { struct pull_source *source = privdata; *rule_p = source->rule->name; *from_p = source->rule->from; } static retvalue pull_install(struct pull_distribution *distribution) { retvalue result, r; struct pull_target *u; struct distribution *d = distribution->distribution; assert (logger_isprepared(d->logger)); result = RET_NOTHING; for (u=distribution->targets ; u != NULL ; u=u->next) { r = upgradelist_install(u->upgradelist, d->logger, false, pull_from_callback); RET_UPDATE(d->status, r); RET_UPDATE(result, r); upgradelist_free(u->upgradelist); u->upgradelist = NULL; if (RET_WAS_ERROR(r)) break; } if (RET_IS_OK(result) && d->tracking != dt_NONE) { r = tracking_retrack(d, false); RET_ENDUPDATE(result, r); } return result; } static void pull_dumppackage(const char *packagename, /*@null@*/const char *oldversion, /*@null@*/const char *newversion, /*@null@*/const char *bestcandidate, /*@null@*/const struct strlist *newfilekeys, /*@null@*/const char *newcontrol, void *privdata) { struct pull_source *source = privdata; if (newversion == NULL) { if (oldversion != NULL && bestcandidate != NULL) { printf("'%s': '%s' will be deleted" " (best new: '%s')\n", packagename, oldversion, bestcandidate); } else if (oldversion != NULL) { printf("'%s': '%s' will be deleted" " (no longer available or superseded)\n", packagename, oldversion); } else { printf("'%s': will NOT be added as '%s'\n", packagename, bestcandidate); } } else if (newversion == oldversion) { if (bestcandidate != NULL) { if (verbose > 1) printf("'%s': '%s' will be kept" " (best new: '%s')\n", packagename, oldversion, bestcandidate); } else { if (verbose > 0) printf("'%s': '%s' will be kept" " (unavailable for reload)\n", packagename, oldversion); } } else { const char *via = source->rule->name; assert (newfilekeys != NULL); assert (newcontrol != NULL); if (oldversion != NULL) (void)printf("'%s': '%s' will be upgraded" " to '%s' (from '%s'):\n files needed: ", packagename, oldversion, newversion, via); else (void)printf("'%s': newly installed" " as '%s' (from '%s'):\n files needed: ", packagename, newversion, via); (void)strlist_fprint(stdout, newfilekeys); if (verbose > 2) (void)printf("\n installing as: '%s'\n", newcontrol); else (void)putchar('\n'); } } static void pull_dump(struct pull_distribution *distribution) { struct pull_target *u; for (u=distribution->targets ; u != NULL ; u=u->next) { if (u->upgradelist == NULL) continue; printf("Updates needed for '%s':\n", u->target->identifier); upgradelist_dump(u->upgradelist, pull_dumppackage); upgradelist_free(u->upgradelist); u->upgradelist = NULL; } } static void pull_dumplistpackage(const char *packagename, /*@null@*/const char *oldversion, /*@null@*/const char *newversion, /*@null@*/const char *bestcandidate, /*@null@*/const struct strlist *newfilekeys, /*@null@*/const char *newcontrol, void *privdata) { struct pull_source *source = privdata; if (newversion == NULL) { if (oldversion == NULL) return; printf("delete '%s' '%s'\n", packagename, oldversion); } else if (newversion == oldversion) { if (bestcandidate != NULL) printf("keep '%s' '%s' '%s'\n", packagename, oldversion, bestcandidate); else printf("keep '%s' '%s' unavailable\n", packagename, oldversion); } else { const char *via = source->rule->name; assert (newfilekeys != NULL); assert (newcontrol != NULL); if (oldversion != NULL) (void)printf("update '%s' '%s' '%s' '%s'\n", packagename, oldversion, newversion, via); else (void)printf("add '%s' - '%s' '%s'\n", packagename, newversion, via); } } static void pull_dumplist(struct pull_distribution *distribution) { struct pull_target *u; for (u=distribution->targets ; u != NULL ; u=u->next) { if (u->upgradelist == NULL) continue; printf("Updates needed for '%s':\n", u->target->identifier); upgradelist_dump(u->upgradelist, pull_dumplistpackage); upgradelist_free(u->upgradelist); u->upgradelist = NULL; } } retvalue pull_update(struct pull_distribution *distributions) { retvalue result, r; struct pull_distribution *d; for (d=distributions ; d != NULL ; d=d->next) { r = distribution_prepareforwriting(d->distribution); if (RET_WAS_ERROR(r)) return r; r = distribution_loadalloverrides(d->distribution); if (RET_WAS_ERROR(r)) return r; } if (verbose >= 0) printf("Calculating packages to pull...\n"); result = RET_NOTHING; for (d=distributions ; d != NULL ; d=d->next) { r = pull_search(stdout, d); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; // TODO: make already here sure the files are ready? } if (RET_WAS_ERROR(result)) { for (d=distributions ; d != NULL ; d=d->next) { struct pull_target *u; for (u=d->targets ; u != NULL ; u=u->next) { upgradelist_free(u->upgradelist); u->upgradelist = NULL; } } return result; } if (verbose >= 0) printf("Installing (and possibly deleting) packages...\n"); for (d=distributions ; d != NULL ; d=d->next) { if (global.onlysmalldeletes) { if (pull_isbigdelete(d)) { fprintf(stderr, "Not processing '%s' because of --onlysmalldeletes\n", d->distribution->codename); continue; } } r = pull_install(d); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; } logger_wait(); return result; } retvalue pull_checkupdate(struct pull_distribution *distributions) { struct pull_distribution *d; retvalue result, r; for (d=distributions ; d != NULL ; d=d->next) { r = distribution_loadalloverrides(d->distribution); if (RET_WAS_ERROR(r)) return r; } if (verbose >= 0) fprintf(stderr, "Calculating packages to get...\n"); result = RET_NOTHING; for (d=distributions ; d != NULL ; d=d->next) { r = pull_search(stderr, d); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; pull_dump(d); } return result; } retvalue pull_dumpupdate(struct pull_distribution *distributions) { struct pull_distribution *d; retvalue result, r; for (d=distributions ; d != NULL ; d=d->next) { r = distribution_loadalloverrides(d->distribution); if (RET_WAS_ERROR(r)) return r; } result = RET_NOTHING; for (d=distributions ; d != NULL ; d=d->next) { r = pull_search(NULL, d); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; pull_dumplist(d); } return result; } reprepro-4.13.1/exports.h0000644000175100017510000000161312152651661012244 00000000000000#ifndef REPREPRO_EXPORTS_H #define REPREPRO_EXPORTS_H #ifndef REPREPRO_RELEASE_H #include "release.h" #endif struct exportmode { /* "Packages", "Sources" or something like that */ char *filename; /* create uncompressed, create .gz, */ compressionset compressions; /* Generate a Release file next to the Indexfile , if non-null*/ /*@null@*/ char *release; /* programms to start after all are generated */ struct strlist hooks; }; retvalue exportmode_init(/*@out@*/struct exportmode *, bool /*uncompressed*/, /*@null@*/const char * /*release*/, const char * /*indexfile*/); struct configiterator; retvalue exportmode_set(struct exportmode *, struct configiterator *); void exportmode_done(struct exportmode *); retvalue export_target(const char * /*relativedir*/, struct target *, const struct exportmode *, struct release *, bool /*onlyifmissing*/, bool /*snapshot*/); #endif reprepro-4.13.1/database_p.h0000644000175100017510000000056412152651661012627 00000000000000#ifndef REPREPRO_DATABASE_P_H #define REPREPRO_DATABASE_P_H #ifndef REPREPRO_DATABASE_H #include "database.h" #endif extern /*@null@*/ struct table *rdb_checksums, *rdb_contents; extern /*@null@*/ struct table *rdb_references; retvalue database_listsubtables(const char *, /*@out@*/struct strlist *); retvalue database_dropsubtable(const char *, const char *); #endif reprepro-4.13.1/ar.c0000644000175100017510000001610012152651661011132 00000000000000/* This file is part of "reprepro" * Copyright (C) 2005,2006 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include "error.h" #include "uncompression.h" #include "ar.h" /* Arr, me matees, Arr */ #define BLOCKSIZE 10240 #define AR_MAGIC "!\n" #define AR_HEADERMAGIC "`\n" struct ar_archive { char *filename; int fd; struct ar_header { char ah_filename[16]; char ah_date[12]; char ah_uid[6]; char ah_gid[6]; char ah_mode[8]; char ah_size[10]; char ah_magictrailer[2]; } currentheader; off_t member_size, next_position; void *readbuffer; /*@null@*/struct compressedfile *member; enum compression compression; }; static ssize_t readwait(int fd, /*@out@*/void *buf, size_t count) { ssize_t totalread; totalread = 0; while (count > 0) { ssize_t s; s = read(fd, buf, count); if (s < 0) return s; if (interrupted()) { errno = EINTR; return -1; } if ((size_t)s > count) { errno = EINVAL; return -1; } if (s == 0) break; totalread += s; buf += s; count -= s; } return totalread; } retvalue ar_open(/*@out@*/struct ar_archive **n, const char *filename) { struct ar_archive *ar; char buffer[sizeof(AR_MAGIC)]; ssize_t bytesread; if (interrupted()) return RET_ERROR_INTERRUPTED; ar = zNEW(struct ar_archive); if (FAILEDTOALLOC(ar)) return RET_ERROR_OOM; ar->fd = open(filename, O_NOCTTY|O_RDONLY); if (ar->fd < 0) { int e = errno; fprintf(stderr, "Error %d opening %s: %s\n", e, filename, strerror(e)); free(ar); return RET_ERRNO(e); } bytesread = readwait(ar->fd, buffer, sizeof(AR_MAGIC) - 1); if (bytesread != sizeof(AR_MAGIC)-1) { int e = errno; (void)close(ar->fd); free(ar); if (bytesread < 0) { fprintf(stderr, "Error %d reading from %s: %s\n", e, filename, strerror(e)); return RET_ERRNO(e); } else { fprintf(stderr, "Premature end of reading from %s\n", filename); return RET_ERROR; } } if (memcmp(buffer, AR_MAGIC, sizeof(AR_MAGIC)-1) != 0) { (void)close(ar->fd); free(ar); fprintf(stderr, "Missing ar header '!' at the beginning of %s\n", filename); return RET_ERROR; } ar->filename = strdup(filename); if (FAILEDTOALLOC(ar->filename)) { close(ar->fd); free(ar); return RET_ERROR_OOM; } ar->next_position = sizeof(AR_MAGIC) - 1; *n = ar; return RET_OK; } void ar_close(/*@only@*/struct ar_archive *ar) { if (ar != NULL) { if (ar->fd >= 0) (void)close(ar->fd); free(ar->filename); free(ar); } } /* RET_OK = next is there, RET_NOTHING = eof, < 0 = error */ retvalue ar_nextmember(struct ar_archive *ar, /*@out@*/char **filename) { ssize_t bytesread; char *p; off_t s; assert(ar->readbuffer == NULL); assert(ar->fd >= 0); /* seek over what is left from the last part: */ s = lseek(ar->fd, ar->next_position, SEEK_SET); if (s == (off_t)-1) { int e = errno; fprintf(stderr, "Error %d seeking to next member in ar file %s: %s\n", e, ar->filename, strerror(e)); return RET_ERRNO(e); } /* read the next header from the file */ if (interrupted()) return RET_ERROR_INTERRUPTED; bytesread = readwait(ar->fd, &ar->currentheader, sizeof(ar->currentheader)); ar->next_position += sizeof(ar->currentheader); if (bytesread == 0) return RET_NOTHING; if (bytesread != sizeof(ar->currentheader)){ int e = errno; if (bytesread < 0) { fprintf(stderr, "Error %d reading from ar file %s: %s\n", e, ar->filename, strerror(e)); return RET_ERRNO(e); } else { fprintf(stderr, "Premature end of ar file %s\n", ar->filename); return RET_ERROR; } } if (memcmp(ar->currentheader.ah_magictrailer, AR_HEADERMAGIC, 2) != 0) { fprintf(stderr, "Corrupt ar file %s\n", ar->filename); return RET_ERROR; } /* calculate the length and mark possible fillers being needed */ /* make ah_size null-terminated by overwriting the following field */ assert (&ar->currentheader.ah_magictrailer[0] == ar->currentheader.ah_size + 10); ar->currentheader.ah_magictrailer[0] = '\0'; ar->member_size = strtoul(ar->currentheader.ah_size, &p, 10); if (*p != '\0' && *p != ' ') { fprintf(stderr, "Error calculating length field in ar file %s\n", ar->filename); return RET_ERROR; } ar->next_position += ar->member_size; if ((ar->member_size & 1) != 0) ar->next_position ++; /* get the name of the file */ if (false) { /* handle long filenames */ // TODO! } else { /* normal filenames */ int i = sizeof(ar->currentheader.ah_filename); while (i > 0 && ar->currentheader.ah_filename[i-1] == ' ') i--; /* hop over GNU style filenames, though they should not * be in a .deb file... */ if (i > 0 && ar->currentheader.ah_filename[i-1] == '/') i--; *filename = strndup(ar->currentheader.ah_filename, i); } ar->compression = c_none; return RET_OK; } void ar_archivemember_setcompression(struct ar_archive *ar, enum compression compression) { ar->compression = compression; } ssize_t ar_archivemember_read(struct archive *a, void *d, const void **p) { struct ar_archive *ar = d; ssize_t bytesread; assert (ar->readbuffer != NULL); if (ar->member == NULL) return 0; *p = ar->readbuffer; bytesread = uncompress_read(ar->member, ar->readbuffer, BLOCKSIZE); if (bytesread < 0) { const char *msg; int e; (void)uncompress_fdclose(ar->member, &e, &msg); ar->member = NULL; archive_set_error(a, e, "%s", msg); return -1; } return bytesread; } int ar_archivemember_open(struct archive *a, void *d) { struct ar_archive *ar = d; retvalue r; const char *msg; int e; assert (uncompression_supported(ar->compression)); assert (ar->readbuffer == NULL); ar->readbuffer = malloc(BLOCKSIZE); if (FAILEDTOALLOC(ar->readbuffer)) { archive_set_error(a, ENOMEM, "Out of memory"); return ARCHIVE_FATAL; } r = uncompress_fdopen(&ar->member, ar->fd, ar->member_size, ar->compression, &e, &msg); if (RET_IS_OK(r)) return ARCHIVE_OK; archive_set_error(a, e, "%s", msg); return ARCHIVE_FATAL; } int ar_archivemember_close(UNUSED(struct archive *a), void *d) { struct ar_archive *ar = d; retvalue r; const char *msg; int e; free(ar->readbuffer); ar->readbuffer = NULL; if (ar->member == NULL) return ARCHIVE_OK; r = uncompress_fdclose(ar->member, &e, &msg); ar->member = NULL; if (RET_IS_OK(r)) return ARCHIVE_OK; archive_set_error(a, e, "%s", msg); return ARCHIVE_FATAL; } reprepro-4.13.1/sources.h0000644000175100017510000000323512152651661012225 00000000000000#ifndef REPREPRO_SOURCES_H #define REPREPRO_SOURCES_H #ifndef REPREPRO_ERROR_H #include "error.h" #warning "What's hapening here?" #endif #ifndef REPREPRO_TARGET_H #include "target.h" #endif /* Functions for the target.h-stuff: */ get_version sources_getversion; get_installdata sources_getinstalldata; get_architecture sources_getarchitecture; get_filekeys sources_getfilekeys; get_checksums sources_getchecksums; do_reoverride sources_doreoverride; do_retrack sources_retrack; get_sourceandversion sources_getsourceandversion; complete_checksums sources_complete_checksums; /* Functions for checkindsc.c and incoming.c: */ struct dsc_headers { char *name, *version; char *control; struct checksumsarray files; /* normaly not in a .dsc file: */ /*@null@*/ char *section, *priority; }; /* read contents of filename into sources_readdsc. * - broken is like signature_readsignedchunk * - does not follow retvalue conventions, some fields may be set even when * error returned * - no checks for sanity of values, left to the caller */ retvalue sources_readdsc(struct dsc_headers *, const char *filename, const char *filenametoshow, bool *broken); void sources_done(struct dsc_headers *); struct overridedata; retvalue sources_complete(const struct dsc_headers *, const char *directory, const struct overridedata *override, const char *section, const char *priority, char **newcontrol); char *calc_source_basename(const char *name, const char *version); char *calc_sourcedir(component_t, const char *sourcename); char *calc_filekey(component_t, const char *sourcename, const char *filename); char *calc_byhanddir(component_t, const char *sourcename, const char *version); #endif reprepro-4.13.1/binaries.c0000644000175100017510000005263012152651661012334 00000000000000/* This file is part of "reprepro" * Copyright (C) 2003,2004,2005,2006,2007,2009,2010 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include "error.h" #include "mprintf.h" #include "strlist.h" #include "names.h" #include "chunks.h" #include "sources.h" #include "binaries.h" #include "names.h" #include "dpkgversions.h" #include "log.h" #include "override.h" #include "tracking.h" #include "debfile.h" static const char * const deb_checksum_headers[cs_COUNT] = { "MD5sum", "SHA1", "SHA256", "Size"}; static char *calc_binary_basename(const char *name, const char *version, architecture_t arch, packagetype_t packagetype) { const char *v; assert (name != NULL); assert (version != NULL); assert (atom_defined(arch)); assert (atom_defined(packagetype)); v = strchr(version, ':'); if (v != NULL) v++; else v = version; return mprintf("%s_%s_%s.%s", name, v, atoms_architectures[arch], atoms_packagetypes[packagetype]); } /* get checksums out of a "Packages"-chunk. */ static retvalue binaries_parse_checksums(const char *chunk, /*@out@*/struct checksums **checksums_p) { retvalue result, r; char *checksums[cs_COUNT]; enum checksumtype type; bool gothash = false; result = RET_NOTHING; for (type = 0 ; type < cs_COUNT ; type++) { checksums[type] = NULL; r = chunk_getvalue(chunk, deb_checksum_headers[type], &checksums[type]); if (type != cs_length && RET_IS_OK(r)) gothash = true; RET_UPDATE(result, r); } if (!gothash) { fprintf(stderr, "No checksums found in binary control chunk:\n '%s'\n", chunk); RET_UPDATE(result, RET_ERROR_MISSING); } if (checksums[cs_length] == NULL) { fprintf(stderr, "Missing 'Size' line in binary control chunk:\n '%s'\n", chunk); RET_UPDATE(result, RET_ERROR_MISSING); } if (RET_WAS_ERROR(result)) { for (type = 0 ; type < cs_COUNT ; type++) free(checksums[type]); return result; } return checksums_init(checksums_p, checksums); } retvalue binaries_getarchitecture(const char *chunk, architecture_t *architecture_p) { char *parch; retvalue r; r = chunk_getvalue(chunk, "Architecture", &parch); if (r == RET_NOTHING) { fprintf(stderr, "Internal Error: Missing Architecture: header in '%s'!\n", chunk); return RET_ERROR; } if (RET_WAS_ERROR(r)) return r; *architecture_p = architecture_find(parch); free(parch); if (!atom_defined(*architecture_p)) { fprintf(stderr, "Internal Error: Unexpected Architecture: header in '%s'!\n", chunk); return RET_ERROR; } return RET_OK; } /* get somefields out of a "Packages.gz"-chunk. * returns RET_OK on success, RET_NOTHING if incomplete, error otherwise */ static retvalue binaries_parse_chunk(const char *chunk, const char *packagename, packagetype_t packagetype, architecture_t package_architecture, const char *version, /*@out@*/char **sourcename_p, /*@out@*/char **basename_p) { retvalue r; char *mysourcename, *mybasename; assert(packagename!=NULL); /* get the sourcename */ r = chunk_getname(chunk, "Source", &mysourcename, true); if (r == RET_NOTHING) { mysourcename = strdup(packagename); if (FAILEDTOALLOC(mysourcename)) r = RET_ERROR_OOM; } if (RET_WAS_ERROR(r)) { return r; } r = properpackagename(packagename); if (!RET_WAS_ERROR(r)) r = properversion(version); if (RET_WAS_ERROR(r)) { free(mysourcename); return r; } mybasename = calc_binary_basename(packagename, version, package_architecture, packagetype); if (FAILEDTOALLOC(mybasename)) { free(mysourcename); return RET_ERROR_OOM; } *basename_p = mybasename; *sourcename_p = mysourcename; return RET_OK; } /* get files out of a "Packages.gz"-chunk. */ retvalue binaries_getfilekeys(const char *chunk, struct strlist *files) { retvalue r; char *filename; /* Read the filename given there */ r = chunk_getvalue(chunk, "Filename", &filename); if (!RET_IS_OK(r)) { if (r == RET_NOTHING) { fprintf(stderr, "Data does not look like binary control: '%s'\n", chunk); r = RET_ERROR; } return r; } r = strlist_init_singleton(filename, files); return r; } static retvalue calcfilekeys(component_t component, const char *sourcename, const char *basefilename, struct strlist *filekeys) { char *filekey; retvalue r; r = propersourcename(sourcename); if (RET_WAS_ERROR(r)) { return r; } filekey = calc_filekey(component, sourcename, basefilename); if (FAILEDTOALLOC(filekey)) return RET_ERROR_OOM; r = strlist_init_singleton(filekey, filekeys); return r; } static inline retvalue calcnewcontrol(const char *chunk, const char *packagename, const char *sourcename, const char *basefilename, component_t component, struct strlist *filekeys, char **newchunk) { retvalue r; char *n; n = chunk_normalize(chunk, "Package", packagename); if (FAILEDTOALLOC(n)) return RET_ERROR_OOM; r = calcfilekeys(component, sourcename, basefilename, filekeys); if (RET_WAS_ERROR(r)) { free(n); return r; } assert (filekeys->count == 1); *newchunk = chunk_replacefield(n, "Filename", filekeys->values[0], false); free(n); if (FAILEDTOALLOC(*newchunk)) { strlist_done(filekeys); return RET_ERROR_OOM; } return RET_OK; } retvalue binaries_getversion(const char *control, char **version) { retvalue r; r = chunk_getvalue(control, "Version", version); if (RET_WAS_ERROR(r)) return r; if (r == RET_NOTHING) { fprintf(stderr, "Missing 'Version' field in chunk:'%s'\n", control); return RET_ERROR; } return r; } retvalue binaries_getinstalldata(const struct target *t, const char *packagename, const char *version, architecture_t package_architecture, const char *chunk, char **control, struct strlist *filekeys, struct checksumsarray *origfiles) { char *sourcename, *basefilename; struct checksumsarray origfilekeys; retvalue r; r = binaries_parse_chunk(chunk, packagename, t->packagetype, package_architecture, version, &sourcename, &basefilename); if (RET_WAS_ERROR(r)) { return r; } else if (r == RET_NOTHING) { fprintf(stderr, "Does not look like a binary package: '%s'!\n", chunk); return RET_ERROR; } r = binaries_getchecksums(chunk, &origfilekeys); if (RET_WAS_ERROR(r)) { free(sourcename); free(basefilename); return r; } r = calcnewcontrol(chunk, packagename, sourcename, basefilename, t->component, filekeys, control); if (RET_WAS_ERROR(r)) { checksumsarray_done(&origfilekeys); } else { assert (r != RET_NOTHING); checksumsarray_move(origfiles, &origfilekeys); } free(sourcename); free(basefilename); return r; } retvalue binaries_getchecksums(const char *chunk, struct checksumsarray *filekeys) { retvalue r; struct checksumsarray a; r = binaries_getfilekeys(chunk, &a.names); if (RET_WAS_ERROR(r)) return r; assert (a.names.count == 1); a.checksums = NEW(struct checksums *); if (FAILEDTOALLOC(a.checksums)) { strlist_done(&a.names); return RET_ERROR_OOM; } r = binaries_parse_checksums(chunk, a.checksums); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) { free(a.checksums); strlist_done(&a.names); return r; } checksumsarray_move(filekeys, &a); return RET_OK; } retvalue binaries_doreoverride(const struct target *target, const char *packagename, const char *controlchunk, /*@out@*/char **newcontrolchunk) { const struct overridedata *o; struct fieldtoadd *fields; char *newchunk; retvalue r; if (interrupted()) return RET_ERROR_INTERRUPTED; o = override_search(target->distribution->overrides.deb, packagename); if (o == NULL) return RET_NOTHING; r = override_allreplacefields(o, &fields); if (!RET_IS_OK(r)) return r; newchunk = chunk_replacefields(controlchunk, fields, "Filename", false); addfield_free(fields); if (FAILEDTOALLOC(newchunk)) return RET_ERROR_OOM; *newcontrolchunk = newchunk; return RET_OK; } retvalue ubinaries_doreoverride(const struct target *target, const char *packagename, const char *controlchunk, /*@out@*/char **newcontrolchunk) { const struct overridedata *o; struct fieldtoadd *fields; char *newchunk; if (interrupted()) return RET_ERROR_INTERRUPTED; o = override_search(target->distribution->overrides.udeb, packagename); if (o == NULL) return RET_NOTHING; fields = override_addreplacefields(o, NULL); if (FAILEDTOALLOC(fields)) return RET_ERROR_OOM; newchunk = chunk_replacefields(controlchunk, fields, "Description", true); addfield_free(fields); if (FAILEDTOALLOC(newchunk)) return RET_ERROR_OOM; *newcontrolchunk = newchunk; return RET_OK; } retvalue binaries_retrack(const char *packagename, const char *chunk, trackingdb tracks) { retvalue r; const char *sourcename; char *fsourcename, *sourceversion, *arch, *filekey; enum filetype filetype; struct trackedpackage *pkg; //TODO: elliminate duplicate code! assert(packagename!=NULL); if (interrupted()) return RET_ERROR_INTERRUPTED; /* is there a sourcename */ r = chunk_getnameandversion(chunk, "Source", &fsourcename, &sourceversion); if (RET_WAS_ERROR(r)) return r; if (r == RET_NOTHING) { sourceversion = NULL; sourcename = packagename; fsourcename = NULL; } else { sourcename = fsourcename; } if (sourceversion == NULL) { // Think about binNMUs, can something be done here? r = chunk_getvalue(chunk, "Version", &sourceversion); if (RET_WAS_ERROR(r)) { free(fsourcename); return r; } if (r == RET_NOTHING) { free(fsourcename); fprintf(stderr, "Missing 'Version' field in chunk:'%s'\n", chunk); return RET_ERROR; } } r = chunk_getvalue(chunk, "Architecture", &arch); if (r == RET_NOTHING) { fprintf(stderr, "No Architecture field in chunk:'%s'\n", chunk); r = RET_ERROR; } if (RET_WAS_ERROR(r)) { free(sourceversion); free(fsourcename); return r; } if (strcmp(arch, "all") == 0) { filetype = ft_ALL_BINARY; } else { filetype = ft_ARCH_BINARY; } free(arch); r = chunk_getvalue(chunk, "Filename", &filekey); if (!RET_IS_OK(r)) { if (r == RET_NOTHING) { fprintf(stderr, "No Filename field in chunk: '%s'\n", chunk); r = RET_ERROR; } free(sourceversion); free(fsourcename); return r; } r = tracking_getornew(tracks, sourcename, sourceversion, &pkg); free(fsourcename); free(sourceversion); if (RET_WAS_ERROR(r)) { free(filekey); return r; } assert (r != RET_NOTHING); r = trackedpackage_addfilekey(tracks, pkg, filetype, filekey, true); if (RET_WAS_ERROR(r)) { trackedpackage_free(pkg); return r; } return tracking_save(tracks, pkg); } retvalue binaries_getsourceandversion(const char *chunk, const char *packagename, char **source, char **version) { retvalue r; char *sourcename, *sourceversion; //TODO: elliminate duplicate code! assert(packagename!=NULL); /* is there a sourcename */ r = chunk_getnameandversion(chunk, "Source", &sourcename, &sourceversion); if (RET_WAS_ERROR(r)) return r; if (r == RET_NOTHING) { sourceversion = NULL; sourcename = strdup(packagename); if (FAILEDTOALLOC(sourcename)) return RET_ERROR_OOM; } if (sourceversion == NULL) { r = chunk_getvalue(chunk, "Version", &sourceversion); if (RET_WAS_ERROR(r)) { free(sourcename); return r; } if (r == RET_NOTHING) { free(sourcename); fprintf(stderr, "No Version field in chunk:'%s'\n", chunk); return RET_ERROR; } } *source = sourcename; *version = sourceversion; return RET_OK; } static inline retvalue getvalue(const char *filename, const char *chunk, const char *field, char **value) { retvalue r; r = chunk_getvalue(chunk, field, value); if (r == RET_NOTHING) { fprintf(stderr, "No %s field in %s's control file!\n", field, filename); r = RET_ERROR; } return r; } static inline retvalue checkvalue(const char *filename, const char *chunk, const char *field) { retvalue r; r = chunk_checkfield(chunk, field); if (r == RET_NOTHING) { fprintf(stderr, "No %s field in %s's control file!\n", field, filename); r = RET_ERROR; } return r; } static inline retvalue getvalue_n(const char *chunk, const char *field, char **value) { retvalue r; r = chunk_getvalue(chunk, field, value); if (r == RET_NOTHING) { *value = NULL; } return r; } void binaries_debdone(struct deb_headers *pkg) { free(pkg->name);free(pkg->version); free(pkg->source);free(pkg->sourceversion); free(pkg->control); free(pkg->section); free(pkg->priority); } retvalue binaries_readdeb(struct deb_headers *deb, const char *filename, bool needssourceversion) { retvalue r; char *architecture; r = extractcontrol(&deb->control, filename); if (RET_WAS_ERROR(r)) return r; /* first look for fields that should be there */ r = chunk_getname(deb->control, "Package", &deb->name, false); if (r == RET_NOTHING) { fprintf(stderr, "Missing 'Package' field in %s!\n", filename); r = RET_ERROR; } if (RET_WAS_ERROR(r)) return r; r = checkvalue(filename, deb->control, "Maintainer"); if (RET_WAS_ERROR(r)) return r; r = checkvalue(filename, deb->control, "Description"); if (RET_WAS_ERROR(r)) return r; r = getvalue(filename, deb->control, "Version", &deb->version); if (RET_WAS_ERROR(r)) return r; r = getvalue(filename, deb->control, "Architecture", &architecture); if (RET_WAS_ERROR(r)) return r; r = properfilenamepart(architecture); if (RET_WAS_ERROR(r)) { free(architecture); return r; } r = architecture_intern(architecture, &deb->architecture); free(architecture); if (RET_WAS_ERROR(r)) return r; /* can be there, otherwise we also know what it is */ if (needssourceversion) r = chunk_getnameandversion(deb->control, "Source", &deb->source, &deb->sourceversion); else r = chunk_getname(deb->control, "Source", &deb->source, true); if (r == RET_NOTHING) { deb->source = strdup(deb->name); if (FAILEDTOALLOC(deb->source)) r = RET_ERROR_OOM; } if (RET_WAS_ERROR(r)) return r; if (needssourceversion && deb->sourceversion == NULL) { deb->sourceversion = strdup(deb->version); if (FAILEDTOALLOC(deb->sourceversion)) return RET_ERROR_OOM; } /* normaly there, but optional: */ r = getvalue_n(deb->control, PRIORITY_FIELDNAME, &deb->priority); if (RET_WAS_ERROR(r)) return r; r = getvalue_n(deb->control, SECTION_FIELDNAME, &deb->section); if (RET_WAS_ERROR(r)) return r; return RET_OK; } /* do overwrites, add Filename and Checksums to the control-item */ retvalue binaries_complete(const struct deb_headers *pkg, const char *filekey, const struct checksums *checksums, const struct overridedata *override, const char *section, const char *priority, char **newcontrol) { struct fieldtoadd *replace; char *normalchunk, *newchunk; enum checksumtype type; assert (section != NULL && priority != NULL); assert (filekey != NULL && checksums != NULL); replace = NULL; for (type = 0 ; type < cs_COUNT ; type++) { const char *start; size_t len; if (checksums_getpart(checksums, type, &start, &len)) { replace = addfield_newn(deb_checksum_headers[type], start, len, replace); if (FAILEDTOALLOC(replace)) return RET_ERROR_OOM; } } replace = addfield_new("Filename", filekey, replace); if (FAILEDTOALLOC(replace)) return RET_ERROR_OOM; replace = addfield_new(SECTION_FIELDNAME, section, replace); if (FAILEDTOALLOC(replace)) return RET_ERROR_OOM; replace = addfield_new(PRIORITY_FIELDNAME, priority, replace); if (FAILEDTOALLOC(replace)) return RET_ERROR_OOM; replace = override_addreplacefields(override, replace); if (FAILEDTOALLOC(replace)) return RET_ERROR_OOM; normalchunk = chunk_normalize(pkg->control, "Package", pkg->name); if (FAILEDTOALLOC(normalchunk)) newchunk = NULL; else newchunk = chunk_replacefields(normalchunk, replace, "Description", true); free(normalchunk); addfield_free(replace); if (FAILEDTOALLOC(newchunk)) { return RET_ERROR_OOM; } *newcontrol = newchunk; return RET_OK; } /* update Checksums */ retvalue binaries_complete_checksums(const char *chunk, const struct strlist *filekeys, struct checksums **c, char **out) { struct fieldtoadd *replace; char *newchunk; enum checksumtype type; const struct checksums *checksums; assert (filekeys->count == 1); checksums = c[0]; replace = NULL; for (type = 0 ; type < cs_COUNT ; type++) { const char *start; size_t len; if (checksums_getpart(checksums, type, &start, &len)) { replace = addfield_newn(deb_checksum_headers[type], start, len, replace); if (FAILEDTOALLOC(replace)) return RET_ERROR_OOM; } } newchunk = chunk_replacefields(chunk, replace, "Description", true); addfield_free(replace); if (FAILEDTOALLOC(newchunk)) return RET_ERROR_OOM; *out = newchunk; return RET_OK; } retvalue binaries_adddeb(const struct deb_headers *deb, const struct atomlist *forcearchitectures, packagetype_t packagetype, struct distribution *distribution, struct trackingdata *trackingdata, component_t component, const struct strlist *filekeys, const char *control) { retvalue r, result; int i; assert (logger_isprepared(distribution->logger)); /* finally put it into one or more architectures of the distribution */ result = RET_NOTHING; if (deb->architecture != architecture_all) { struct target *t = distribution_getpart(distribution, component, deb->architecture, packagetype); r = target_initpackagesdb(t, READWRITE); if (!RET_WAS_ERROR(r)) { retvalue r2; if (interrupted()) r = RET_ERROR_INTERRUPTED; else r = target_addpackage(t, distribution->logger, deb->name, deb->version, control, filekeys, false, trackingdata, deb->architecture, NULL, NULL); r2 = target_closepackagesdb(t); RET_ENDUPDATE(r, r2); } RET_UPDATE(result, r); RET_UPDATE(distribution->status, result); return result; } /* It's an architecture all package */ /* if -A includes all, it is added everywhere, as if no -A was * given. (as it behaved this way when there was only one -A possible, * and to allow incoming to force it into architecture 'all' ) * */ if (forcearchitectures != NULL && atomlist_in(forcearchitectures, architecture_all)) forcearchitectures = NULL; for (i = 0 ; i < distribution->architectures.count ; i++) { /*@dependent@*/struct target *t; architecture_t a = distribution->architectures.atoms[i]; if (a == architecture_source) continue; if (forcearchitectures != NULL && !atomlist_in(forcearchitectures, a)) continue; t = distribution_getpart(distribution, component, a, packagetype); r = target_initpackagesdb(t, READWRITE); if (!RET_WAS_ERROR(r)) { retvalue r2; if (interrupted()) r = RET_ERROR_INTERRUPTED; else r = target_addpackage(t, distribution->logger, deb->name, deb->version, control, filekeys, false, trackingdata, deb->architecture, NULL, NULL); r2 = target_closepackagesdb(t); RET_ENDUPDATE(r, r2); } RET_UPDATE(result, r); } RET_UPDATE(distribution->status, result); return result; } static inline retvalue checkadddeb(struct distribution *distribution, component_t component, architecture_t architecture, packagetype_t packagetype, bool tracking, const struct deb_headers *deb, bool permitnewerold) { retvalue r; struct target *t; t = distribution_getpart(distribution, component, architecture, packagetype); assert (t != NULL); r = target_initpackagesdb(t, READONLY); if (!RET_WAS_ERROR(r)) { retvalue r2; if (interrupted()) r = RET_ERROR_INTERRUPTED; else r = target_checkaddpackage(t, deb->name, deb->version, tracking, permitnewerold); r2 = target_closepackagesdb(t); RET_ENDUPDATE(r, r2); } return r; } retvalue binaries_checkadddeb(const struct deb_headers *deb, architecture_t forcearchitecture, packagetype_t packagetype, struct distribution *distribution, bool tracking, component_t component, bool permitnewerold) { retvalue r, result; int i; /* finally put it into one or more architectures of the distribution */ result = RET_NOTHING; if (deb->architecture != architecture_all) { r = checkadddeb(distribution, component, deb->architecture, packagetype, tracking, deb, permitnewerold); RET_UPDATE(result, r); } else if (atom_defined(forcearchitecture) && forcearchitecture != architecture_all) { r = checkadddeb(distribution, component, forcearchitecture, packagetype, tracking, deb, permitnewerold); RET_UPDATE(result, r); } else for (i = 0 ; i < distribution->architectures.count ; i++) { architecture_t a = distribution->architectures.atoms[i]; if (a == architecture_source) continue; r = checkadddeb(distribution, component, a, packagetype, tracking, deb, permitnewerold); RET_UPDATE(result, r); } return result; } retvalue binaries_calcfilekeys(component_t component, const struct deb_headers *deb, packagetype_t packagetype, struct strlist *filekeys) { retvalue r; char *basefilename; basefilename = calc_binary_basename(deb->name, deb->version, deb->architecture, packagetype); if (FAILEDTOALLOC(basefilename)) return RET_ERROR_OOM; r = calcfilekeys(component, deb->source, basefilename, filekeys); free(basefilename); return r; } reprepro-4.13.1/upgradelist.c0000644000175100017510000005304012152651661013057 00000000000000/* This file is part of "reprepro" * Copyright (C) 2004,2005,2006,2007,2008 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include "error.h" #include "ignore.h" #include "strlist.h" #include "indexfile.h" #include "dpkgversions.h" #include "target.h" #include "files.h" #include "upgradelist.h" struct package_data { struct package_data *next; /* the name of the package: */ char *name; /* the version in our repository: * NULL means not yet in the archive */ char *version_in_use; /* the most recent version we found * (either is version_in_use or version_new)*/ /*@dependent@*/const char *version; /* if this is != 0, package will be deleted afterwards, * (or new version simply ignored if it is not yet in the * archive) */ bool deleted; /* The most recent version we found upstream: * NULL means nothing found. */ char *new_version; /* where the recent version comes from: */ /*@dependent@*/void *privdata; /* the new control-chunk for the package to go in * non-NULL if new_version && newversion == version_in_use */ char *new_control; /* the list of files that will belong to this: * same validity */ struct strlist new_filekeys; struct checksumsarray new_origfiles; /* to destinguish arch all from not arch all */ architecture_t architecture; }; struct upgradelist { /*@dependent@*/struct target *target; struct package_data *list; /* package the next package will most probably be after. * (NULL=before start of list) */ /*@null@*//*@dependent@*/struct package_data *last; /* internal...*/ }; static void package_data_free(/*@only@*/struct package_data *data){ if (data == NULL) return; free(data->name); free(data->version_in_use); free(data->new_version); //free(data->new_from); free(data->new_control); strlist_done(&data->new_filekeys); checksumsarray_done(&data->new_origfiles); free(data); } /* This is called before any package lists are read for any package we already * have in this target. upgrade->list points to the first in the sorted list, * upgrade->last to the last one inserted */ static retvalue save_package_version(struct upgradelist *upgrade, const char *packagename, const char *chunk) { char *version; retvalue r; struct package_data *package; r = upgrade->target->getversion(chunk, &version); if (RET_WAS_ERROR(r)) return r; package = zNEW(struct package_data); if (FAILEDTOALLOC(package)) { free(version); return RET_ERROR_OOM; } package->privdata = NULL; package->name = strdup(packagename); if (FAILEDTOALLOC(package->name)) { free(package); free(version); return RET_ERROR_OOM; } package->version_in_use = version; version = NULL; // just to be sure... package->version = package->version_in_use; if (upgrade->list == NULL) { /* first chunk to add: */ upgrade->list = package; upgrade->last = package; } else { if (strcmp(packagename, upgrade->last->name) > 0) { upgrade->last->next = package; upgrade->last = package; } else { /* this should only happen if the underlying * database-method get changed, so just throwing * out here */ fprintf(stderr, "Package database is not sorted!!!\n"); assert(false); exit(EXIT_FAILURE); } } return RET_OK; } retvalue upgradelist_initialize(struct upgradelist **ul, struct target *t) { struct upgradelist *upgrade; retvalue r, r2; const char *packagename, *controlchunk; struct target_cursor iterator; upgrade = zNEW(struct upgradelist); if (FAILEDTOALLOC(upgrade)) return RET_ERROR_OOM; upgrade->target = t; /* Beginn with the packages currently in the archive */ r = target_openiterator(t, READONLY, &iterator); if (RET_WAS_ERROR(r)) { upgradelist_free(upgrade); return r; } while (target_nextpackage(&iterator, &packagename, &controlchunk)) { r2 = save_package_version(upgrade, packagename, controlchunk); RET_UPDATE(r, r2); if (RET_WAS_ERROR(r2)) break; } r2 = target_closeiterator(&iterator); RET_UPDATE(r, r2); if (RET_WAS_ERROR(r)) { upgradelist_free(upgrade); return r; } upgrade->last = NULL; *ul = upgrade; return RET_OK; } void upgradelist_free(struct upgradelist *upgrade) { struct package_data *l; if (upgrade == NULL) return; l = upgrade->list; while (l != NULL) { struct package_data *n = l->next; package_data_free(l); l = n; } free(upgrade); return; } static retvalue upgradelist_trypackage(struct upgradelist *upgrade, void *privdata, upgrade_decide_function *predecide, void *predecide_data, const char *packagename_const, /*@null@*//*@only@*/char *packagename, const char *sourcename, /*@only@*/char *version, const char *sourceversion, architecture_t architecture, const char *chunk) { retvalue r; upgrade_decision decision; struct package_data *current, *insertafter; if (architecture == architecture_all) { if (upgrade->target->packagetype == pt_dsc) { fputs("Internal error: trying to put binary ('all')" " package into source architecture!\n", stderr); return RET_ERROR_INTERNAL; } } /* insertafter = NULL will mean insert before list */ insertafter = upgrade->last; /* the next one to test, current = NULL will mean not found */ if (insertafter != NULL) current = insertafter->next; else current = upgrade->list; /* the algorithm assumes almost all packages are feed in * alphabetically. So the next package will likely be quite * after the last one. Otherwise we walk down the long list * again and again... and again... and even some more...*/ while (true) { int cmp; assert (insertafter == NULL || insertafter->next == current); assert (insertafter != NULL || current == upgrade->list); if (current == NULL) cmp = -1; /* every package is before the end of list */ else cmp = strcmp(packagename_const, current->name); if (cmp == 0) break; if (cmp < 0) { int precmp; if (insertafter == NULL) { /* if we are before the first * package, add us there...*/ current = NULL; break; } // I only hope noone creates indices anti-sorted: precmp = strcmp(packagename_const, insertafter->name); if (precmp == 0) { current = insertafter; break; } else if (precmp < 0) { /* restart at the beginning: */ current = upgrade->list; insertafter = NULL; if (verbose > 10) { fprintf(stderr, "restarting search..."); } continue; } else { // precmp > 0 /* insert after insertafter: */ current = NULL; break; } assert ("This is not reached" == NULL); } /* cmp > 0 : may come later... */ assert (current != NULL); insertafter = current; current = current->next; if (current == NULL) { /* add behind insertafter at end of list */ break; } /* otherwise repeat until place found */ } if (current == NULL) { /* adding a package not yet known */ struct package_data *new; char *newcontrol; decision = predecide(predecide_data, upgrade->target, packagename_const, sourcename, NULL, version, sourceversion, chunk); if (decision != UD_UPGRADE) { upgrade->last = insertafter; if (decision == UD_LOUDNO) fprintf(stderr, "Loudly rejecting '%s' '%s' to enter '%s'!\n", packagename, version, upgrade->target->identifier); free(packagename); free(version); return (decision==UD_ERROR)?RET_ERROR:RET_NOTHING; } new = zNEW(struct package_data); if (FAILEDTOALLOC(new)) { free(packagename); free(version); return RET_ERROR_OOM; } new->deleted = false; //to be sure... new->privdata = privdata; if (packagename == NULL) { new->name = strdup(packagename_const); if (FAILEDTOALLOC(new->name)) { free(packagename); free(version); free(new); return RET_ERROR_OOM; } } else new->name = packagename; packagename = NULL; //to be sure... new->new_version = version; new->version = version; new->architecture = architecture; version = NULL; //to be sure... r = upgrade->target->getinstalldata(upgrade->target, new->name, new->new_version, new->architecture, chunk, &new->new_control, &new->new_filekeys, &new->new_origfiles); if (RET_WAS_ERROR(r)) { package_data_free(new); return r; } /* apply override data */ r = upgrade->target->doreoverride(upgrade->target, new->name, new->new_control, &newcontrol); if (RET_WAS_ERROR(r)) { package_data_free(new); return r; } if (RET_IS_OK(r)) { free(new->new_control); new->new_control = newcontrol; } if (insertafter != NULL) { new->next = insertafter->next; insertafter->next = new; } else { new->next = upgrade->list; upgrade->list = new; } upgrade->last = new; } else { /* The package already exists: */ char *control, *newcontrol; struct strlist files; struct checksumsarray origfiles; int versioncmp; upgrade->last = current; r = dpkgversions_cmp(version, current->version, &versioncmp); if (RET_WAS_ERROR(r)) { free(packagename); free(version); return r; } if (versioncmp <= 0 && !current->deleted) { /* there already is a newer version, so * doing nothing but perhaps updating what * versions are around, when we are newer * than yet known candidates... */ int c = 0; if (current->new_version == current->version) c =versioncmp; else if (current->new_version == NULL) c = 1; else (void)dpkgversions_cmp(version, current->new_version, &c); if (c > 0) { free(current->new_version); current->new_version = version; } else free(version); free(packagename); return RET_NOTHING; } if (versioncmp > 0 && verbose > 30) fprintf(stderr, "'%s' from '%s' is newer than '%s' currently\n", version, packagename_const, current->version); decision = predecide(predecide_data, upgrade->target, current->name, sourcename, current->version, version, sourceversion, chunk); if (decision != UD_UPGRADE) { if (decision == UD_LOUDNO) fprintf(stderr, "Loudly rejecting '%s' '%s' to enter '%s'!\n", packagename, version, upgrade->target->identifier); /* Even if we do not install it, setting it on hold * will keep it or even install from a mirror before * the delete was applied */ if (decision == UD_HOLD) current->deleted = false; free(version); free(packagename); /* while supersede will remove the current package */ if (decision == UD_SUPERSEDE) { current->deleted = true; return RET_OK; } return (decision==UD_ERROR)?RET_ERROR:RET_NOTHING; } if (versioncmp == 0) { /* we are replacing a package with the same version, * so we keep the old one for sake of speed. */ if (current->deleted && current->version != current->new_version) { /* remember the version for checkupdate/pull */ free(current->new_version); current->new_version = version; } else free(version); current->deleted = false; free(packagename); return RET_NOTHING; } if (versioncmp != 0 && current->version == current->new_version && current->version_in_use != NULL) { /* The version to include is not the newest after the * last deletion round), but maybe older, maybe newer. * So we get to the question: it is also not the same * like the version we already have? */ int vcmp = 1; (void)dpkgversions_cmp(version, current->version_in_use, &vcmp); if (vcmp == 0) { current->version = current->version_in_use; if (current->deleted) { free(current->new_version); current->new_version = version; } else free(version); current->deleted = false; free(packagename); return RET_NOTHING; } } // TODO: the following case might be worth considering, but sadly new_version // might have changed without the proper data set. // if (versioncmp >= 0 && current->version == current->version_in_use // && current->new_version != NULL) { current->architecture = architecture; r = upgrade->target->getinstalldata(upgrade->target, packagename_const, version, architecture, chunk, &control, &files, &origfiles); free(packagename); if (RET_WAS_ERROR(r)) { free(version); return r; } /* apply override data */ r = upgrade->target->doreoverride(upgrade->target, packagename_const, control, &newcontrol); if (RET_WAS_ERROR(r)) { free(version); free(control); strlist_done(&files); checksumsarray_done(&origfiles); return r; } if (RET_IS_OK(r)) { free(control); control = newcontrol; } current->deleted = false; free(current->new_version); current->new_version = version; current->version = version; current->privdata = privdata; strlist_move(¤t->new_filekeys, &files); checksumsarray_move(¤t->new_origfiles, &origfiles); free(current->new_control); current->new_control = control; } return RET_OK; } retvalue upgradelist_update(struct upgradelist *upgrade, void *privdata, const char *filename, upgrade_decide_function *decide, void *decide_data, bool ignorewrongarchitecture) { struct indexfile *i; char *packagename, *version, *sourcename, *sourceversion; const char *control; retvalue result, r; architecture_t package_architecture; r = indexfile_open(&i, filename, c_none); if (!RET_IS_OK(r)) return r; result = RET_NOTHING; upgrade->last = NULL; while (indexfile_getnext(i, &packagename, &version, &control, &package_architecture, upgrade->target, ignorewrongarchitecture)) { r = upgrade->target->getsourceandversion(control, packagename, &sourcename, &sourceversion); if (RET_IS_OK(r)) { r = upgradelist_trypackage(upgrade, privdata, decide, decide_data, packagename, packagename, sourcename, version, sourceversion, package_architecture, control); RET_UPDATE(result, r); free(sourcename); free(sourceversion); } if (RET_WAS_ERROR(r)) { if (verbose > 0) fprintf(stderr, "Stop reading further chunks from '%s' due to previous errors.\n", filename); break; } if (interrupted()) { result = RET_ERROR_INTERRUPTED; break; } } r = indexfile_close(i); RET_ENDUPDATE(result, r); return result; } retvalue upgradelist_pull(struct upgradelist *upgrade, struct target *source, upgrade_decide_function *predecide, void *decide_data, void *privdata) { retvalue result, r; const char *package, *control; struct target_cursor iterator; upgrade->last = NULL; r = target_openiterator(source, READONLY, &iterator); if (RET_WAS_ERROR(r)) return r; result = RET_NOTHING; while (target_nextpackage(&iterator, &package, &control)) { char *version; architecture_t package_architecture; char *sourcename, *sourceversion; assert (source->packagetype == upgrade->target->packagetype); r = source->getversion(control, &version); assert (r != RET_NOTHING); if (!RET_IS_OK(r)) { RET_UPDATE(result, r); break; } r = source->getarchitecture(control, &package_architecture); if (!RET_IS_OK(r)) { RET_UPDATE(result, r); break; } if (package_architecture != upgrade->target->architecture && package_architecture != architecture_all) { free(version); continue; if (source->architecture == upgrade->target->architecture && !ignore[IGN_wrongarchitecture]) { fprintf(stderr, "WARNING: architecture '%s' package '%s' in '%s'!\n", atoms_architectures[ package_architecture], package, source->identifier); if (ignored[IGN_wrongarchitecture] == 0) { fprintf(stderr, "(expected 'all' or '%s', so ignoring this package, but\n" "your database seems to be in a bad state. (Try running 'reprepro check')!)\n", atoms_architectures[ source->architecture]); } ignored[IGN_wrongarchitecture]++; } free(version); continue; } r = upgrade->target->getsourceandversion(control, package, &sourcename, &sourceversion); if (RET_IS_OK(r)) { r = upgradelist_trypackage(upgrade, privdata, predecide, decide_data, package, NULL, sourcename, version, sourceversion, package_architecture, control); RET_UPDATE(result, r); free(sourcename); free(sourceversion); } if (RET_WAS_ERROR(r)) break; if (interrupted()) { result = RET_ERROR_INTERRUPTED; break; } } r = target_closeiterator(&iterator); RET_ENDUPDATE(result, r); return result; } /* mark all packages as deleted, so they will vanis unless readded or reholded */ retvalue upgradelist_deleteall(struct upgradelist *upgrade) { struct package_data *pkg; for (pkg = upgrade->list ; pkg != NULL ; pkg = pkg->next) { pkg->deleted = true; } return RET_OK; } /* request all wanted files in the downloadlists given before */ retvalue upgradelist_enqueue(struct upgradelist *upgrade, enqueueaction *action, void *calldata) { struct package_data *pkg; retvalue result, r; result = RET_NOTHING; assert(upgrade != NULL); for (pkg = upgrade->list ; pkg != NULL ; pkg = pkg->next) { if (pkg->version == pkg->new_version && !pkg->deleted) { r = action(calldata, &pkg->new_origfiles, &pkg->new_filekeys, pkg->privdata); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; } } return result; } /* delete all packages that will not be kept (i.e. either deleted or upgraded) */ retvalue upgradelist_predelete(struct upgradelist *upgrade, struct logger *logger) { struct package_data *pkg; retvalue result, r; result = RET_NOTHING; assert(upgrade != NULL); result = target_initpackagesdb(upgrade->target, READWRITE); if (RET_WAS_ERROR(result)) return result; for (pkg = upgrade->list ; pkg != NULL ; pkg = pkg->next) { if (pkg->version_in_use != NULL && (pkg->version == pkg->new_version || pkg->deleted)) { if (interrupted()) r = RET_ERROR_INTERRUPTED; else r = target_removepackage(upgrade->target, logger, pkg->name, NULL); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; } } r = target_closepackagesdb(upgrade->target); RET_ENDUPDATE(result, r); return result; } bool upgradelist_isbigdelete(const struct upgradelist *upgrade) { struct package_data *pkg; long long deleted = 0, all = 0; if (upgrade->list == NULL) return false; for (pkg = upgrade->list ; pkg != NULL ; pkg = pkg->next) { if (pkg->version_in_use == NULL) continue; all++; if (pkg->deleted) deleted++; } return deleted >= 10 && all/deleted < 5; } bool upgradelist_woulddelete(const struct upgradelist *upgrade) { struct package_data *pkg; if (upgrade->list == NULL) return false; for (pkg = upgrade->list ; pkg != NULL ; pkg = pkg->next) { if (pkg->version_in_use == NULL) continue; if (pkg->deleted) return true; } return false; } retvalue upgradelist_install(struct upgradelist *upgrade, struct logger *logger, bool ignoredelete, void (*callback)(void *, const char **, const char **)){ struct package_data *pkg; retvalue result, r; if (upgrade->list == NULL) return RET_NOTHING; result = target_initpackagesdb(upgrade->target, READWRITE); if (RET_WAS_ERROR(result)) return result; result = RET_NOTHING; for (pkg = upgrade->list ; pkg != NULL ; pkg = pkg->next) { if (pkg->version == pkg->new_version && !pkg->deleted) { char *newcontrol; assert ((pkg->architecture == architecture_all && upgrade->target->packagetype != pt_dsc) || pkg->architecture == upgrade->target->architecture); r = files_checkorimprove(&pkg->new_filekeys, pkg->new_origfiles.checksums); if (! RET_WAS_ERROR(r)) { r = upgrade->target->completechecksums( pkg->new_control, &pkg->new_filekeys, pkg->new_origfiles.checksums, &newcontrol); } if (! RET_WAS_ERROR(r)) { /* upgrade (or possibly downgrade) */ const char *causingrule = NULL, *suitefrom = NULL; free(pkg->new_control); pkg->new_control = newcontrol; callback(pkg->privdata, &causingrule, &suitefrom); // TODO: trackingdata? if (interrupted()) r = RET_ERROR_INTERRUPTED; else r = target_addpackage(upgrade->target, logger, pkg->name, pkg->new_version, pkg->new_control, &pkg->new_filekeys, true, NULL, pkg->architecture, causingrule, suitefrom); } RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; } if (pkg->deleted && pkg->version_in_use != NULL && !ignoredelete) { if (interrupted()) r = RET_ERROR_INTERRUPTED; else r = target_removepackage(upgrade->target, logger, pkg->name, NULL); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; } } r = target_closepackagesdb(upgrade->target); RET_ENDUPDATE(result, r); return result; } void upgradelist_dump(struct upgradelist *upgrade, dumpaction action){ struct package_data *pkg; assert(upgrade != NULL); for (pkg = upgrade->list ; pkg != NULL ; pkg = pkg->next) { if (interrupted()) return; if (pkg->deleted) action(pkg->name, pkg->version_in_use, NULL, pkg->new_version, NULL, NULL, pkg->privdata); else if (pkg->version == pkg->version_in_use) action(pkg->name, pkg->version_in_use, pkg->version_in_use, pkg->new_version, NULL, NULL, pkg->privdata); else action(pkg->name, pkg->version_in_use, pkg->new_version, NULL, &pkg->new_filekeys, pkg->new_control, pkg->privdata); } } reprepro-4.13.1/archallflood.c0000644000175100017510000004466312152651661013201 00000000000000/* This file is part of "reprepro" * Copyright (C) 2009 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include "error.h" #include "strlist.h" #include "indexfile.h" #include "dpkgversions.h" #include "target.h" #include "distribution.h" #include "tracking.h" #include "files.h" #include "archallflood.h" struct aa_source_package { /*@null@*/struct aa_source_package *parent; /*@null@*/struct aa_source_package *left_child; /*@null@*/struct aa_source_package *right_child; /*@null@*/struct aa_source_package *nextversion; char *name; char *version; /* if true, it was already verified that there is no * binary package of the same source version already there, * so new architecture 'all' can be added without danger */ bool has_no_sibling; /* if true, then there is a binary package of this source * package, so replacing an architecture all is only allowed * if there is already a binary for the new one */ bool has_sibling; int refcount; }; struct aa_package_data { struct aa_package_data *next; /* the name of the architecture all package: */ char *name; /* NULL if does not exists/not yet known */ /*@null@*/char *old_version; /*@null@*/struct aa_source_package *old_source; /*@null@*/char *new_version; /*@null@*/struct aa_source_package *new_source; bool new_has_sibling; struct checksumsarray new_origfiles; struct strlist new_filekeys; char *new_control; }; struct floodlist { /*@dependent@*/struct target *target; struct aa_source_package *sources; struct aa_package_data *list; /* package the next package will most probably be after. * (NULL=before start of list) */ /*@null@*//*@dependent@*/struct aa_package_data *last; }; static void aa_package_data_free(/*@only@*/struct aa_package_data *data){ if (data == NULL) return; free(data->name); free(data->old_version); free(data->new_version); free(data->new_control); strlist_done(&data->new_filekeys); checksumsarray_done(&data->new_origfiles); free(data); } static void floodlist_free(struct floodlist *list) { struct aa_source_package *s; struct aa_package_data *l; if (list == NULL) return; l = list->list; while (l != NULL) { struct aa_package_data *n = l->next; aa_package_data_free(l); l = n; } s = list->sources; while (s != NULL) { struct aa_source_package *n; while (s->left_child != NULL || s->right_child != NULL) { if (s->left_child != NULL) { n = s->left_child; s->left_child = NULL; s = n; } else { n = s->right_child; s->right_child = NULL; s = n; } } while (s->nextversion != NULL) { n = s->nextversion->nextversion; /* do not free name, it is not malloced */ free(s->nextversion->version); free(s->nextversion); s->nextversion = n; } n = s->parent; free(s->name); free(s->version); free(s); s = n; } free(list); return; } static retvalue find_or_add_source(struct floodlist *list, /*@only@*/char *source, /*@only@*/char *sourceversion, /*@out@*/struct aa_source_package **src_p) { struct aa_source_package *parent, **p, *n; int c; parent = NULL; p = &list->sources; /* if this gets too slow, make it a balanced tree, * but it seems fast enough even as simple tree */ while (*p != NULL) { c = strcmp(source, (*p)->name); if (c == 0) break; parent = *p; if (c > 0) p = &parent->right_child; else p = &parent->left_child; } if (*p == NULL) { /* there is not even something with this name */ n = zNEW(struct aa_source_package); if (FAILEDTOALLOC(n)) { free(source); free(sourceversion); return RET_ERROR_OOM; } n->name = source; n->version = sourceversion; n->parent = parent; *p = n; *src_p = n; return RET_OK; } free(source); source = (*p)->name; /* source name found, now look for version: */ c = strcmp(sourceversion, (*p)->version); if (c == 0) { free(sourceversion); *src_p = *p; return RET_OK; } if (c < 0) { /* before first item, do some swapping as this is * part of the name linked list */ n = zNEW(struct aa_source_package); if (FAILEDTOALLOC(n)) { free(sourceversion); return RET_ERROR_OOM; } memcpy(n, *p, sizeof(struct aa_source_package)); setzero(struct aa_source_package, *p); (*p)->name = source; (*p)->version = sourceversion; (*p)->left_child = n->left_child; (*p)->right_child = n->right_child; (*p)->parent = n->parent; n->left_child = NULL; n->right_child = NULL; n->parent = NULL; (*p)->nextversion = n; *src_p = *p; return RET_OK; } do { p = &(*p)->nextversion; if (*p == NULL) break; c = strcmp(sourceversion, (*p)->version); } while (c > 0); if (c == 0) { assert (*p != NULL); free(sourceversion); *src_p = *p; return RET_OK; } n = zNEW(struct aa_source_package); if (FAILEDTOALLOC(n)) { free(sourceversion); return RET_ERROR_OOM; } n->name = source; n->version = sourceversion; n->nextversion = *p; *p = n; *src_p = n; return RET_OK; } static struct aa_source_package *find_source(struct floodlist *list, const char *source, const char *sourceversion) { struct aa_source_package *p; int c = -1; p = list->sources; while (p != NULL) { c = strcmp(source, p->name); if (c == 0) break; if (c > 0) p = p->right_child; else p = p->left_child; } if (p == NULL) return NULL; while (p != NULL && (c = strcmp(sourceversion, p->version)) > 0) p = p->nextversion; if (c < 0) return NULL; else return p; } /* Before anything else is done the current state of one target is read into * the list: list->list points to the first in the sorted list, * list->last to the last one inserted */ static retvalue save_package_version(struct floodlist *list, const char *packagename, const char *chunk) { char *version, *source, *sourceversion; architecture_t architecture; struct aa_source_package *src; retvalue r; struct aa_package_data *package; r = list->target->getarchitecture(chunk, &architecture); if (RET_WAS_ERROR(r)) return r; r = list->target->getsourceandversion(chunk, packagename, &source, &sourceversion); if (RET_WAS_ERROR(r)) return r; r = find_or_add_source(list, source, sourceversion, &src); source = NULL; sourceversion = NULL; // just to be sure if (RET_WAS_ERROR(r)) return r; r = list->target->getversion(chunk, &version); if (RET_WAS_ERROR(r)) return r; if (architecture != architecture_all) { free(version); src->has_sibling = true; return RET_NOTHING; } package = zNEW(struct aa_package_data); if (FAILEDTOALLOC(package)) { free(version); return RET_ERROR_OOM; } package->name = strdup(packagename); if (FAILEDTOALLOC(package->name)) { free(package); free(version); return RET_ERROR_OOM; } package->old_version = version; version = NULL; // just to be sure... package->old_source = src; if (list->list == NULL) { /* first chunk to add: */ list->list = package; list->last = package; } else { if (strcmp(packagename, list->last->name) > 0) { list->last->next = package; list->last = package; } else { /* this should only happen if the underlying * database-method get changed, so just throwing * out here */ fprintf(stderr, "INTERNAL ERROR: Package database is not sorted!!!\n"); assert(false); exit(EXIT_FAILURE); } } return RET_OK; } static retvalue floodlist_initialize(struct floodlist **fl, struct target *t) { struct floodlist *list; retvalue r, r2; const char *packagename, *controlchunk; struct target_cursor iterator; list = zNEW(struct floodlist); if (FAILEDTOALLOC(list)) return RET_ERROR_OOM; list->target = t; /* Begin with the packages currently in the archive */ r = target_openiterator(t, READONLY, &iterator); if (RET_WAS_ERROR(r)) { floodlist_free(list); return r; } while (target_nextpackage(&iterator, &packagename, &controlchunk)) { r2 = save_package_version(list, packagename, controlchunk); RET_UPDATE(r, r2); if (RET_WAS_ERROR(r2)) break; } r2 = target_closeiterator(&iterator); RET_UPDATE(r, r2); if (RET_WAS_ERROR(r)) { floodlist_free(list); return r; } list->last = NULL; *fl = list; return RET_OK; } static retvalue floodlist_trypackage(struct floodlist *list, const char *packagename_const, /*@only@*/char *version, const char *chunk) { retvalue r; struct aa_package_data *current, *insertafter; /* insertafter = NULL will mean insert before list */ insertafter = list->last; /* the next one to test, current = NULL will mean not found */ if (insertafter != NULL) current = insertafter->next; else current = list->list; /* the algorithm assumes almost all packages are feed in * alphabetically. */ while (true) { int cmp; assert (insertafter == NULL || insertafter->next == current); assert (insertafter != NULL || current == list->list); if (current == NULL) cmp = -1; /* every package is before the end of list */ else cmp = strcmp(packagename_const, current->name); if (cmp == 0) break; if (cmp < 0) { int precmp; if (insertafter == NULL) { /* if we are before the first * package, add us there...*/ current = NULL; break; } precmp = strcmp(packagename_const, insertafter->name); if (precmp == 0) { current = insertafter; break; } else if (precmp < 0) { /* restart at the beginning: */ current = list->list; insertafter = NULL; continue; } else { // precmp > 0 /* insert after insertafter: */ current = NULL; break; } assert ("This is not reached" == NULL); } /* cmp > 0 : may come later... */ assert (current != NULL); insertafter = current; current = current->next; if (current == NULL) { /* add behind insertafter at end of list */ break; } /* otherwise repeat until place found */ } if (current == NULL) { /* adding a package not yet known */ struct aa_package_data *new; char *source, *sourceversion; struct aa_source_package *src; r = list->target->getsourceandversion(chunk, packagename_const, &source, &sourceversion); if (! RET_IS_OK(r)) { free(version); return r; } src = find_source(list, source, sourceversion); free(source); free(sourceversion); new = zNEW(struct aa_package_data); if (FAILEDTOALLOC(new)) { free(version); return RET_ERROR_OOM; } new->new_source = src; new->new_version = version; version = NULL; new->name = strdup(packagename_const); if (FAILEDTOALLOC(new->name)) { aa_package_data_free(new); return RET_ERROR_OOM; } r = list->target->getinstalldata(list->target, new->name, new->new_version, architecture_all, chunk, &new->new_control, &new->new_filekeys, &new->new_origfiles); if (RET_WAS_ERROR(r)) { aa_package_data_free(new); return r; } if (insertafter != NULL) { new->next = insertafter->next; insertafter->next = new; } else { new->next = list->list; list->list = new; } list->last = new; } else { /* The package already exists: */ char *control; struct strlist files; struct checksumsarray origfiles; char *source, *sourceversion; struct aa_source_package *src; int versioncmp; list->last = current; if (current->new_has_sibling) { /* it has a new and that has a binary sibling, * which means this becomes the new version * exactly when it is newer than the old newest */ r = dpkgversions_cmp(version, current->new_version, &versioncmp); if (RET_WAS_ERROR(r)) { free(version); return r; } if (versioncmp <= 0) { free(version); return RET_NOTHING; } } else if (current->old_version != NULL) { /* if it is older than the old one, we will * always discard it */ r = dpkgversions_cmp(version, current->old_version, &versioncmp); if (RET_WAS_ERROR(r)) { free(version); return r; } if (versioncmp <= 0) { free(version); return RET_NOTHING; } } /* we need to get the source to know more */ r = list->target->getsourceandversion(chunk, packagename_const, &source, &sourceversion); if (! RET_IS_OK(r)) { free(version); return r; } src = find_source(list, source, sourceversion); free(source); free(sourceversion); if (src == NULL || !src->has_sibling) { /* the new one has no sibling, only allowed * to override those that have: */ if (current->new_version == NULL) { if (current->old_source->has_sibling) { free(version); return RET_NOTHING; } } else if (current->new_has_sibling) { free(version); return RET_NOTHING; } else { /* the new one has no sibling and the old one * has not too, take the newer one: */ r = dpkgversions_cmp(version, current->new_version, &versioncmp); if (RET_WAS_ERROR(r)) { free(version); return r; } if (versioncmp <= 0) { free(version); return RET_NOTHING; } } } r = list->target->getinstalldata(list->target, packagename_const, version, architecture_all, chunk, &control, &files, &origfiles); if (RET_WAS_ERROR(r)) { free(version); return r; } free(current->new_version); current->new_version = version; current->new_source = src; current->new_has_sibling = src != NULL && src->has_sibling; strlist_done(¤t->new_filekeys); strlist_move(¤t->new_filekeys, &files); checksumsarray_done(¤t->new_origfiles); checksumsarray_move(¤t->new_origfiles, &origfiles); free(current->new_control); current->new_control = control; } return RET_OK; } static retvalue floodlist_pull(struct floodlist *list, struct target *source) { retvalue result, r; const char *package, *control; struct target_cursor iterator; list->last = NULL; r = target_openiterator(source, READONLY, &iterator); if (RET_WAS_ERROR(r)) return r; result = RET_NOTHING; while (target_nextpackage(&iterator, &package, &control)) { char *version; architecture_t package_architecture; r = list->target->getarchitecture(control, &package_architecture); if (r == RET_NOTHING) continue; if (!RET_IS_OK(r)) { RET_UPDATE(result, r); break; } if (package_architecture != architecture_all) continue; r = list->target->getversion(control, &version); if (r == RET_NOTHING) continue; if (!RET_IS_OK(r)) { RET_UPDATE(result, r); break; } r = floodlist_trypackage(list, package, version, control); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; if (interrupted()) { result = RET_ERROR_INTERRUPTED; break; } } r = target_closeiterator(&iterator); RET_ENDUPDATE(result, r); return result; } static retvalue floodlist_install(struct floodlist *list, struct logger *logger, /*@NULL@*/struct trackingdata *td) { struct aa_package_data *pkg; retvalue result, r; if (list->list == NULL) return RET_NOTHING; result = target_initpackagesdb(list->target, READWRITE); if (RET_WAS_ERROR(result)) return result; result = RET_NOTHING; for (pkg = list->list ; pkg != NULL ; pkg = pkg->next) { if (pkg->new_version != NULL) { r = files_expectfiles(&pkg->new_filekeys, pkg->new_origfiles.checksums); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) continue; if (interrupted()) { r = RET_ERROR_INTERRUPTED; break; } if (td != NULL) { if (pkg->new_source != NULL) { r = trackingdata_switch(td, pkg->new_source->name, pkg->new_source->version); } else { char *source, *sourceversion; r = list->target->getsourceandversion( pkg->new_control, pkg->name, &source, &sourceversion); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) { RET_UPDATE(result, r); break; } r = trackingdata_switch(td, source, sourceversion); free(source); free(sourceversion); } if (RET_WAS_ERROR(r)) { RET_UPDATE(result, r); break; } } r = target_addpackage(list->target, logger, pkg->name, pkg->new_version, pkg->new_control, &pkg->new_filekeys, false, td, architecture_all, NULL, NULL); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; } } r = target_closepackagesdb(list->target); RET_ENDUPDATE(result, r); return result; } retvalue flood(struct distribution *d, const struct atomlist *components, const struct atomlist *architectures, const struct atomlist *packagetypes, architecture_t architecture, trackingdb tracks) { struct target *t, *s; retvalue result = RET_NOTHING, r; struct trackingdata trackingdata; if (tracks != NULL) { r = trackingdata_new(tracks, &trackingdata); if (RET_WAS_ERROR(r)) return r; } for (t = d->targets ; t != NULL ; t = t->next) { struct floodlist *fl = NULL; if (atom_defined(architecture)) { if (architecture != t->architecture) continue; } else if (limitations_missed(architectures, t->architecture)) continue; if (limitations_missed(components, t->component)) continue; if (limitations_missed(packagetypes, t->packagetype)) continue; if (t->packagetype != pt_deb && t->packagetype != pt_udeb) continue; r = floodlist_initialize(&fl, t); if (RET_WAS_ERROR(r)) { if (tracks != NULL) trackingdata_done(&trackingdata); return r; } for (s = d->targets ; s != NULL ; s = s->next) { if (s->component != t->component) continue; if (s->packagetype != t->packagetype) continue; /* no need to copy things from myself: */ if (s->architecture == t->architecture) continue; if (limitations_missed(architectures, s->architecture)) continue; r = floodlist_pull(fl, s); RET_UPDATE(d->status, r); if (RET_WAS_ERROR(r)) { if (tracks != NULL) trackingdata_done(&trackingdata); floodlist_free(fl); return r; } } r = floodlist_install(fl, d->logger, (tracks != NULL)?&trackingdata:NULL); RET_UPDATE(result, r); floodlist_free(fl); if (RET_WAS_ERROR(r)) { if (tracks != NULL) trackingdata_done(&trackingdata); return r; } } if (tracks != NULL) { r = trackingdata_finish(tracks, &trackingdata); RET_ENDUPDATE(result, r); } return result; } reprepro-4.13.1/md5.c0000644000175100017510000001726012152651661011225 00000000000000/* * This code implements the MD5 message-digest algorithm. * The algorithm is due to Ron Rivest. This code was * written by Colin Plumb in 1993, no copyright is claimed. * This code is in the public domain; do with it what you wish. * * Equivalent code is available from RSA Data Security, Inc. * This code has been tested against that, and is equivalent, * except that you don't need to include two pages of legalese * with every copy. * * To compute the message digest of a chunk of bytes, declare an * MD5Context structure, pass it to MD5Init, call MD5Update as * needed on buffers full of bytes, and then call MD5Final, which * will fill a supplied 16-byte array with the digest. * * Changed so as no longer to depend on Colin Plumb's `usual.h' header * definitions; now uses stuff from dpkg's config.h. * - Ian Jackson . * Still in the public domain. * * Changed to no longer need things from dpkg, * and made MD5Transfor static... * - Bernhard R. Link * Still in public domain. */ #include #include /* for memcpy() */ #include /* for stupid systems */ #include /* for ntohl() */ #include "md5.h" static void MD5Transform(UWORD32 buf[4], UWORD32 const in[16]); #ifdef WORDS_BIGENDIAN static void byteSwap(UWORD32 *buf, unsigned words) { md5byte *p = (md5byte *)buf; do { *buf++ = (UWORD32)((unsigned)p[3] << 8 | p[2]) << 16 | ((unsigned)p[1] << 8 | p[0]); p += 4; } while (--words); } #else /* I'm assuming there is only big and little endian, PDP_ENDIAN users * will have bad luck... */ #define byteSwap(buf,words) #endif /* * Start MD5 accumulation. Set bit count to 0 and buffer to mysterious * initialization constants. */ void MD5Init(struct MD5Context *ctx) { ctx->buf[0] = 0x67452301U; ctx->buf[1] = 0xefcdab89U; ctx->buf[2] = 0x98badcfeU; ctx->buf[3] = 0x10325476U; ctx->bytes[0] = 0; ctx->bytes[1] = 0; } /* * Update context to reflect the concatenation of another buffer full * of bytes. */ void MD5Update(struct MD5Context *ctx, md5byte const *buf, unsigned int len) { UWORD32 t; /* Update byte count */ t = ctx->bytes[0]; if ((ctx->bytes[0] = t + len) < t) ctx->bytes[1]++; /* Carry from low to high */ t = 64 - (t & 0x3f); /* Space available in ctx->in (at least 1) */ if (t > len) { memcpy((md5byte *)ctx->in + 64 - t, buf, len); return; } /* First chunk is an odd size */ memcpy((md5byte *)ctx->in + 64 - t, buf, t); byteSwap(ctx->in, 16); MD5Transform(ctx->buf, ctx->in); buf += t; len -= t; /* Process data in 64-byte chunks */ while (len >= 64) { memcpy(ctx->in, buf, 64); byteSwap(ctx->in, 16); MD5Transform(ctx->buf, ctx->in); buf += 64; len -= 64; } /* Handle any remaining bytes of data. */ memcpy(ctx->in, buf, len); } /* * Final wrapup - pad to 64-byte boundary with the bit pattern * 1 0* (64-bit count of bits processed, MSB-first) */ void MD5Final(md5byte digest[16], struct MD5Context *ctx) { int count = ctx->bytes[0] & 0x3f; /* Number of bytes in ctx->in */ md5byte *p = (md5byte *)ctx->in + count; /* Set the first char of padding to 0x80. There is always room. */ *p++ = 0x80; /* Bytes of padding needed to make 56 bytes (-8..55) */ count = 56 - 1 - count; if (count < 0) { /* Padding forces an extra block */ memset(p, 0, count + 8); byteSwap(ctx->in, 16); MD5Transform(ctx->buf, ctx->in); p = (md5byte *)ctx->in; count = 56; } memset(p, 0, count); byteSwap(ctx->in, 14); /* Append length in bits and transform */ ctx->in[14] = ctx->bytes[0] << 3; ctx->in[15] = ctx->bytes[1] << 3 | ctx->bytes[0] >> 29; MD5Transform(ctx->buf, ctx->in); byteSwap(ctx->buf, 4); memcpy(digest, ctx->buf, 16); memset(ctx, 0, sizeof(ctx)); /* In case it's sensitive */ } #ifndef ASM_MD5 /* The four core functions - F1 is optimized somewhat */ /* #define F1(x, y, z) (x & y | ~x & z) */ #define F1(x, y, z) (z ^ (x & (y ^ z))) #define F2(x, y, z) F1(z, x, y) #define F3(x, y, z) (x ^ y ^ z) #define F4(x, y, z) (y ^ (x | ~z)) /* This is the central step in the MD5 algorithm. */ #define MD5STEP(f,w,x,y,z,in,s) \ (w += f(x,y,z) + in, w = (w<>(32-s)) + x) /* * The core of the MD5 algorithm, this alters an existing MD5 hash to * reflect the addition of 16 longwords of new data. MD5Update blocks * the data and converts bytes into longwords for this routine. */ static void MD5Transform(UWORD32 buf[4], UWORD32 const in[16]) { register UWORD32 a, b, c, d; a = buf[0]; b = buf[1]; c = buf[2]; d = buf[3]; MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7); MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12); MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17); MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22); MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7); MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12); MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17); MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22); MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7); MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12); MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17); MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22); MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7); MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12); MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17); MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22); MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5); MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9); MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14); MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20); MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5); MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9); MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14); MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20); MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5); MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9); MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14); MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20); MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5); MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9); MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14); MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20); MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4); MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11); MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16); MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23); MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4); MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11); MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16); MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23); MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4); MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11); MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16); MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23); MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4); MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11); MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16); MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23); MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6); MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10); MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15); MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21); MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6); MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10); MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15); MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21); MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6); MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10); MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15); MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21); MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6); MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10); MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15); MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21); buf[0] += a; buf[1] += b; buf[2] += c; buf[3] += d; } #endif reprepro-4.13.1/filelist.h0000644000175100017510000000202212152651661012346 00000000000000#ifndef REPREPRO_FILELIST_H #define REPREPRO_FILELIST_H #ifndef REPREPRO_RELEASE_H #include "release.h" #endif struct filelist_list; retvalue filelist_init(struct filelist_list **list); retvalue filelist_addpackage(struct filelist_list *, const char *package, const char *section, const char *filekey); retvalue filelist_write(struct filelist_list *list, struct filetorelease *file); void filelist_free(/*@only@*/struct filelist_list *); retvalue fakefilelist(const char *filekey); retvalue filelists_translate(struct table *, struct table *); /* for use in routines reading the data: */ struct filelistcompressor { unsigned int offsets[256]; size_t size, len; unsigned int dirdepth; char *filelist; }; retvalue filelistcompressor_setup(/*@out@*/struct filelistcompressor *); retvalue filelistcompressor_add(struct filelistcompressor *, const char *, size_t); retvalue filelistcompressor_finish(struct filelistcompressor *, /*@out@*/char **, /*@out@*/size_t *); void filelistcompressor_cancel(struct filelistcompressor *); #endif reprepro-4.13.1/pool.c0000644000175100017510000005231112152651661011505 00000000000000/* This file is part of "reprepro" * Copyright (C) 2008,2009,2012 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include "error.h" #include "ignore.h" #include "mprintf.h" #include "atoms.h" #include "strlist.h" #include "dirs.h" #include "pool.h" #include "reference.h" #include "files.h" #include "sources.h" #include "outhook.h" /* for now save them only in memory. In later times some way to store * them on disk would be nice */ static component_t reserved_components = 0; static void **file_changes_per_component = NULL; static void *legacy_file_changes = NULL; bool pool_havedereferenced = false; bool pool_havedeleted = false; #define pl_ADDED 1 #define pl_UNREFERENCED 2 #define pl_DELETED 4 static int legacy_compare(const void *a, const void *b) { const char *v1 = a, *v2 = b; v1++; v2++; return strcmp(v1, v2); } struct source_node { void *file_changes; char sourcename[]; }; static int source_node_compare(const void *a, const void *b) { const struct source_node *v1 = a, *v2 = b; return strcmp(v1->sourcename, v2->sourcename); } static retvalue split_filekey(const char *filekey, /*@out@*/component_t *component_p, /*@out@*/struct source_node **node_p, /*@out@*/const char **basename_p) { const char *p, *lastp, *source; struct source_node *node; component_t c; if (unlikely(memcmp(filekey, "pool/", 5) != 0)) return RET_NOTHING; lastp = filekey + 4; filekey = lastp + 1; /* components can include slashes, so look for the first valid component * followed by something looking like a proper directory. * This might missdetect the component, but as it only is used for * the current run it will hopefully always detect the same place * (and all that is important is that it is the same place) */ while (true) { p = strchr(lastp + 1, '/'); if (unlikely(p == NULL)) return RET_NOTHING; lastp = p; c = component_find_l(filekey, (size_t)(p - filekey)); if (unlikely(!atom_defined(c))) continue; p++; if (p[0] != '\0' && p[1] == '/' && p[0] != '/' && p[2] == p[0]) { p += 2; if (unlikely(p[0] == 'l' && p[1] == 'i' && p[2] == 'b')) continue; source = p; break; } else if (p[0] == 'l' && p[1] == 'i' && p[2] == 'b' && p[3] != '\0' && p[4] == '/' && p[5] == 'l' && p[6] == 'i' && p[7] == 'b' && p[3] != '/' && p[8] == p[3]) { source = p + 5; break; } else continue; } p = strchr(source, '/'); if (unlikely(p == NULL)) return RET_NOTHING; node = malloc(sizeof(struct source_node) + (p - source) + 1); if (FAILEDTOALLOC(node)) return RET_ERROR_OOM; node->file_changes = NULL; memcpy(node->sourcename, source, p - source); node->sourcename[p - source] = '\0'; p++; *basename_p = p; *node_p = node; *component_p = c; return RET_OK; } /* name can be either basename (in a source directory) or a full * filekey (in legacy fallback mode) */ static retvalue remember_name(void **root_p, const char *name, char mode, char mode_and) { char **p; p = tsearch(name - 1, root_p, legacy_compare); if (FAILEDTOALLOC(p)) return RET_ERROR_OOM; if (*p == name - 1) { size_t l = strlen(name); *p = malloc(l + 2); if (FAILEDTOALLOC(*p)) return RET_ERROR_OOM; **p = mode; memcpy((*p) + 1, name, l + 1); } else { **p &= mode_and; **p |= mode; } return RET_OK; } static retvalue remember_filekey(const char *filekey, char mode, char mode_and) { retvalue r; component_t c; struct source_node *node, **found; const char *basefilename; r = split_filekey(filekey, &c, &node, &basefilename); if (RET_WAS_ERROR(r)) return r; if (r == RET_OK) { assert (atom_defined(c)); if (c > reserved_components) { void ** h; assert (c <= components_count()); h = realloc(file_changes_per_component, sizeof(void*) * (c + 1)); if (FAILEDTOALLOC(h)) return RET_ERROR_OOM; file_changes_per_component = h; while (reserved_components < c) { h[++reserved_components] = NULL; } } assert (file_changes_per_component != NULL); found = tsearch(node, &file_changes_per_component[c], source_node_compare); if (FAILEDTOALLOC(found)) return RET_ERROR_OOM; if (*found != node) { free(node); node = *found; } return remember_name(&node->file_changes, basefilename, mode, mode_and); } fprintf(stderr, "Warning: strange filekey '%s'!\n", filekey); return remember_name(&legacy_file_changes, filekey, mode, mode_and); } retvalue pool_dereferenced(const char *filekey) { pool_havedereferenced = true; return remember_filekey(filekey, pl_UNREFERENCED, 0xFF); }; retvalue pool_markadded(const char *filekey) { return remember_filekey(filekey, pl_ADDED, ~pl_DELETED); }; /* so much code, just for the case the morguedir is on an other partition than * the pool dir... */ static inline retvalue copyfile(const char *source, const char *destination, int outfd, off_t length) { int infd, err; ssize_t readbytes; void *buffer; size_t bufsize = 1024*1024; buffer = malloc(bufsize); if (FAILEDTOALLOC(buffer)) { (void)close(outfd); (void)unlink(destination); bufsize = 16*1024; buffer = malloc(bufsize); if (FAILEDTOALLOC(buffer)) return RET_ERROR_OOM; } infd = open(source, O_RDONLY|O_NOCTTY); if (infd < 0) { int en = errno; fprintf(stderr, "error %d opening file %s to be copied into the morgue: %s\n", en, source, strerror(en)); free(buffer); (void)close(outfd); (void)unlink(destination); return RET_ERRNO(en); } while ((readbytes = read(infd, buffer, bufsize)) > 0) { const char *start = buffer; if ((off_t)readbytes > length) { fprintf(stderr, "Mismatch of sizes of '%s': files is larger than expected!\n", destination); break; } while (readbytes > 0) { ssize_t written; written = write(outfd, start, readbytes); if (written > 0) { assert (written <= readbytes); readbytes -= written; start += written; } else if (written < 0) { int en = errno; (void)close(infd); (void)close(outfd); (void)unlink(destination); free(buffer); fprintf(stderr, "error %d writing to morgue file %s: %s\n", en, destination, strerror(en)); return RET_ERRNO(en); } } } free(buffer); if (readbytes == 0) { err = close(infd); if (err != 0) readbytes = -1; infd = -1; } if (readbytes < 0) { int en = errno; fprintf(stderr, "error %d reading file %s to be copied into the morgue: %s\n", en, source, strerror(en)); if (infd >= 0) (void)close(infd); (void)close(outfd); (void)unlink(destination); return RET_ERRNO(en); } if (infd >= 0) (void)close(infd); err = close(outfd); if (err != 0) { int en = errno; fprintf(stderr, "error %d writing to morgue file %s: %s\n", en, destination, strerror(en)); (void)unlink(destination); return RET_ERRNO(en); } return RET_OK; } static inline retvalue morgue_name(const char *filekey, char **name_p, int *fd_p) { const char *name = dirs_basename(filekey); char *firsttry = calc_dirconcat(global.morguedir, name); int fd, en, number; retvalue r; if (FAILEDTOALLOC(firsttry)) return RET_ERROR_OOM; fd = open(firsttry, O_WRONLY|O_CREAT|O_EXCL|O_NOCTTY, 0666); if (fd >= 0) { assert (fd > 2); *name_p = firsttry; *fd_p = fd; return RET_OK; } en = errno; if (en == ENOENT) { r = dirs_make_recursive(global.morguedir); if (RET_WAS_ERROR(r)) { free(firsttry); return r; } /* try again */ fd = open(firsttry, O_WRONLY|O_CREAT|O_EXCL|O_NOCTTY, 0666); if (fd >= 0) { assert (fd > 2); *name_p = firsttry; *fd_p = fd; return RET_OK; } en = errno; } if (en != EEXIST) { fprintf(stderr, "error %d creating morgue-file %s: %s\n", en, firsttry, strerror(en)); free(firsttry); return RET_ERRNO(en); } /* file exists, try names with -number appended: */ for (number = 1 ; number < 1000 ; number++) { char *try = mprintf("%s-%d", firsttry, number); if (FAILEDTOALLOC(try)) { free(firsttry); return RET_ERROR_OOM; } fd = open(try, O_WRONLY|O_CREAT|O_EXCL|O_NOCTTY, 0666); if (fd >= 0) { assert (fd > 2); free(firsttry); *name_p = try; *fd_p = fd; return RET_OK; } free(try); } free(firsttry); fprintf(stderr, "Could not create a new file '%s' in morguedir '%s'!\n", name, global.morguedir); return RET_ERROR; } /* if file not there, return RET_NOTHING */ static inline retvalue movefiletomorgue(const char *filekey, const char *filename, bool new) { char *morguefilename = NULL; int err; retvalue r; if (!new && global.morguedir != NULL) { int morguefd = -1; struct stat s; r = morgue_name(filekey, &morguefilename, &morguefd); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; err = lstat(filename, &s); if (err != 0) { int en = errno; if (errno == ENOENT) { (void)close(morguefd); (void)unlink(morguefilename); free(morguefilename); return RET_NOTHING; } fprintf(stderr, "error %d looking at file %s to be moved into the morgue: %s\n", en, filename, strerror(en)); (void)close(morguefd); (void)unlink(morguefilename); free(morguefilename); return RET_ERRNO(en); } if (S_ISLNK(s.st_mode)) { /* no need to copy a symbolic link: */ (void)close(morguefd); (void)unlink(morguefilename); free(morguefilename); morguefilename = NULL; } else if (S_ISREG(s.st_mode)) { err = rename(filename, morguefilename); if (err == 0) { (void)close(morguefd); free(morguefilename); return RET_OK; } r = copyfile(filename, morguefilename, morguefd, s.st_size); if (RET_WAS_ERROR(r)) { free(morguefilename); return r; } } else { fprintf(stderr, "Strange (non-regular) file '%s' in the pool.\nPlease delete manually!\n", filename); (void)close(morguefd); (void)unlink(morguefilename); free(morguefilename); morguefilename = NULL; return RET_ERROR; } } err = unlink(filename); if (err != 0) { int en = errno; if (errno == ENOENT) return RET_NOTHING; fprintf(stderr, "error %d while unlinking %s: %s\n", en, filename, strerror(en)); if (morguefilename != NULL) { (void)unlink(morguefilename); free(morguefilename); } return RET_ERRNO(en); } else { free(morguefilename); return RET_OK; } } /* delete the file and possible parent directories, * if not new and morguedir set, first move/copy there */ static retvalue deletepoolfile(const char *filekey, bool new) { char *filename; retvalue r; if (interrupted()) return RET_ERROR_INTERRUPTED; if (!new) outhook_send("POOLDELETE", filekey, NULL, NULL); filename = files_calcfullfilename(filekey); if (FAILEDTOALLOC(filename)) return RET_ERROR_OOM; /* move to morgue or simply delete: */ r = movefiletomorgue(filekey, filename, new); if (r == RET_NOTHING) { fprintf(stderr, "%s not found, forgetting anyway\n", filename); } if (!RET_IS_OK(r)) { free(filename); return r; } if (!global.keepdirectories) { /* try to delete parent directories, until one gives * errors (hopefully because it still contains files) */ size_t fixedpartlen = strlen(global.outdir); char *p; int err, en; while ((p = strrchr(filename, '/')) != NULL) { /* do not try to remove parts of the mirrordir */ if ((size_t)(p-filename) <= fixedpartlen+1) break; *p ='\0'; /* try to rmdir the directory, this will * fail if there are still other files or directories * in it: */ err = rmdir(filename); if (err == 0) { if (verbose > 1) { printf( "removed now empty directory %s\n", filename); } } else { en = errno; if (en != ENOTEMPTY) { //TODO: check here if only some //other error was first and it //is not empty so we do not have //to remove it anyway... fprintf(stderr, "ignoring error %d trying to rmdir %s: %s\n", en, filename, strerror(en)); } /* parent directories will contain this one * thus not be empty, in other words: * everything's done */ break; } } } free(filename); return RET_OK; } retvalue pool_delete(const char *filekey) { retvalue r; if (verbose >= 1) printf("deleting and forgetting %s\n", filekey); r = deletepoolfile(filekey, false); if (RET_WAS_ERROR(r)) return r; return files_remove(filekey); } /* called from files_remove: */ retvalue pool_markdeleted(const char *filekey) { pool_havedeleted = true; return remember_filekey(filekey, pl_DELETED, ~pl_UNREFERENCED); }; /* libc's twalk misses a callback_data pointer, so we need some temporary * global variables: */ static retvalue result; static bool first, onlycount; static long woulddelete_count; static component_t current_component; static const char *sourcename = NULL; static void removeifunreferenced(const void *nodep, const VISIT which, UNUSED(const int depth)) { char *node; const char *filekey; retvalue r; if (which != leaf && which != postorder) return; if (interrupted()) return; node = *(char **)nodep; filekey = node + 1; if ((*node & pl_UNREFERENCED) == 0) return; r = references_isused(filekey); if (r != RET_NOTHING) return; if (onlycount) { woulddelete_count++; return; } if (verbose >= 0 && first) { printf("Deleting files no longer referenced...\n"); first = false; } if (verbose >= 1) printf("deleting and forgetting %s\n", filekey); r = deletepoolfile(filekey, (*node & pl_ADDED) != 0); RET_UPDATE(result, r); if (!RET_WAS_ERROR(r)) { r = files_removesilent(filekey); RET_UPDATE(result, r); if (!RET_WAS_ERROR(r)) *node &= ~pl_UNREFERENCED; if (RET_IS_OK(r)) *node |= pl_DELETED; } } static void removeifunreferenced2(const void *nodep, const VISIT which, UNUSED(const int depth)) { char *node; char *filekey; retvalue r; if (which != leaf && which != postorder) return; if (interrupted()) return; node = *(char **)nodep; if ((*node & pl_UNREFERENCED) == 0) return; filekey = calc_filekey(current_component, sourcename, node + 1); r = references_isused(filekey); if (r != RET_NOTHING) { free(filekey); return; } if (onlycount) { woulddelete_count++; free(filekey); return; } if (verbose >= 0 && first) { printf("Deleting files no longer referenced...\n"); first = false; } if (verbose >= 1) printf("deleting and forgetting %s\n", filekey); r = deletepoolfile(filekey, (*node & pl_ADDED) != 0); RET_UPDATE(result, r); if (!RET_WAS_ERROR(r)) { r = files_removesilent(filekey); RET_UPDATE(result, r); if (!RET_WAS_ERROR(r)) *node &= ~pl_UNREFERENCED; if (RET_IS_OK(r)) *node |= pl_DELETED; } RET_UPDATE(result, r); free(filekey); } static void removeunreferenced_from_component(const void *nodep, const VISIT which, UNUSED(const int depth)) { struct source_node *node; if (which != leaf && which != postorder) return; if (interrupted()) return; node = *(struct source_node **)nodep; sourcename = node->sourcename; twalk(node->file_changes, removeifunreferenced2); } retvalue pool_removeunreferenced(bool delete) { component_t c; if (!delete && verbose <= 0) return RET_NOTHING; result = RET_NOTHING; first = true; onlycount = !delete; woulddelete_count = 0; for (c = 1 ; c <= reserved_components ; c++) { assert (file_changes_per_component != NULL); current_component = c; twalk(file_changes_per_component[c], removeunreferenced_from_component); } twalk(legacy_file_changes, removeifunreferenced); if (interrupted()) result = RET_ERROR_INTERRUPTED; if (!delete && woulddelete_count > 0) { printf( "%lu files lost their last reference.\n" "(dumpunreferenced lists such files, use deleteunreferenced to delete them.)\n", woulddelete_count); } return result; } static void removeunusednew(const void *nodep, const VISIT which, UNUSED(const int depth)) { char *node; const char *filekey; retvalue r; if (which != leaf && which != postorder) return; if (interrupted()) return; node = *(char **)nodep; filekey = node + 1; /* only look at newly added and not already deleted */ if ((*node & (pl_ADDED|pl_DELETED)) != pl_ADDED) return; r = references_isused(filekey); if (r != RET_NOTHING) return; if (onlycount) { woulddelete_count++; return; } if (verbose >= 0 && first) { printf( "Deleting files just added to the pool but not used.\n" "(to avoid use --keepunusednewfiles next time)\n"); first = false; } if (verbose >= 1) printf("deleting and forgetting %s\n", filekey); r = deletepoolfile(filekey, true); RET_UPDATE(result, r); if (!RET_WAS_ERROR(r)) { r = files_removesilent(filekey); RET_UPDATE(result, r); /* don't remove pl_ADDED here, otherwise the hook * script will be told to remove something not added */ if (!RET_WAS_ERROR(r)) *node &= ~pl_UNREFERENCED; if (RET_IS_OK(r)) *node |= pl_DELETED; } } static void removeunusednew2(const void *nodep, const VISIT which, UNUSED(const int depth)) { char *node; char *filekey; retvalue r; if (which != leaf && which != postorder) return; if (interrupted()) return; node = *(char **)nodep; /* only look at newly added and not already deleted */ if ((*node & (pl_ADDED|pl_DELETED)) != pl_ADDED) return; filekey = calc_filekey(current_component, sourcename, node + 1); r = references_isused(filekey); if (r != RET_NOTHING) { free(filekey); return; } if (onlycount) { woulddelete_count++; free(filekey); return; } if (verbose >= 0 && first) { printf( "Deleting files just added to the pool but not used.\n" "(to avoid use --keepunusednewfiles next time)\n"); first = false; } if (verbose >= 1) printf("deleting and forgetting %s\n", filekey); r = deletepoolfile(filekey, true); RET_UPDATE(result, r); if (!RET_WAS_ERROR(r)) { r = files_removesilent(filekey); RET_UPDATE(result, r); /* don't remove pl_ADDED here, otherwise the hook * script will be told to remove something not added */ if (!RET_WAS_ERROR(r)) *node &= ~pl_UNREFERENCED; if (RET_IS_OK(r)) *node |= pl_DELETED; } RET_UPDATE(result, r); free(filekey); } static void removeunusednew_from_component(const void *nodep, const VISIT which, UNUSED(const int depth)) { struct source_node *node; if (which != leaf && which != postorder) return; if (interrupted()) return; node = *(struct source_node **)nodep; sourcename = node->sourcename; twalk(node->file_changes, removeunusednew2); } void pool_tidyadded(bool delete) { component_t c; if (!delete && verbose < 0) return; result = RET_NOTHING; first = true; onlycount = !delete; woulddelete_count = 0; for (c = 1 ; c <= reserved_components ; c++) { assert (file_changes_per_component != NULL); current_component = c; twalk(file_changes_per_component[c], removeunusednew_from_component); } // this should not really happen at all, but better safe then sorry: twalk(legacy_file_changes, removeunusednew); if (!delete && woulddelete_count > 0) { printf( "%lu files were added but not used.\n" "The next deleteunreferenced call will delete them.\n", woulddelete_count); } return; } static void reportnewlegacyfiles(const void *nodep, const VISIT which, UNUSED(const int depth)) { char *node; if (which != leaf && which != postorder) return; node = *(char **)nodep; /* only look at newly added and not already deleted */ if ((*node & (pl_ADDED|pl_DELETED)) != pl_ADDED) return; outhook_sendpool(atom_unknown, NULL, node + 1); } static void reportnewproperfiles(const void *nodep, const VISIT which, UNUSED(const int depth)) { char *node; if (which != leaf && which != postorder) return; node = *(char **)nodep; /* only look at newly added and not already deleted */ if ((*node & (pl_ADDED|pl_DELETED)) != pl_ADDED) return; outhook_sendpool(current_component, sourcename, node + 1); } static void reportnewfiles(const void *nodep, const VISIT which, UNUSED(const int depth)) { struct source_node *node; if (which != leaf && which != postorder) return; node = *(struct source_node **)nodep; sourcename = node->sourcename; twalk(node->file_changes, reportnewproperfiles); } void pool_sendnewfiles(void) { component_t c; for (c = 1 ; c <= reserved_components ; c++) { assert (file_changes_per_component != NULL); current_component = c; twalk(file_changes_per_component[c], reportnewfiles); } twalk(legacy_file_changes, reportnewlegacyfiles); return; } #ifdef HAVE_TDESTROY static void sourcename_free(void *n) { struct source_node *node = n; tdestroy(node->file_changes, free); free(node); } #endif void pool_free(void) { #ifdef HAVE_TDESTROY component_t c; for (c = 1 ; c <= reserved_components ; c++) { tdestroy(file_changes_per_component[c], sourcename_free); } reserved_components = 0; free(file_changes_per_component); file_changes_per_component = NULL; tdestroy(legacy_file_changes, free); legacy_file_changes = NULL; #endif } reprepro-4.13.1/archallflood.h0000644000175100017510000000044712152651661013176 00000000000000#ifndef REPREPRO_ARCHALLFLOOD_H #define REPREPRO_ARCHALLFLOOD_H retvalue flood(struct distribution *, /*@null@*/const struct atomlist * /*components*/, /*@NULL@*/const struct atomlist * /*architectures*/, /*@NULL@*/const struct atomlist * /*packagetypes*/, architecture_t, trackingdb); #endif reprepro-4.13.1/remoterepository.c0000644000175100017510000016473612152651661014206 00000000000000/* This file is part of "reprepro" * Copyright (C) 2003,2004,2005,2007,2008,2009,2012 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include "globals.h" #include "error.h" #include "ignore.h" #include "filecntl.h" #include "checksums.h" #include "mprintf.h" #include "dirs.h" #include "chunks.h" #include "names.h" #include "aptmethod.h" #include "signature.h" #include "readtextfile.h" #include "uncompression.h" #include "diffindex.h" #include "rredpatch.h" #include "remoterepository.h" /* This is code to handle lists from remote repositories. Those are stored in the lists/ (or --listdir) directory and needs some maintaince: - cleaning (unneeded) lists from that directory - deciding what to download from a remote repository (needs knowledge what is there and what is there) - in the future: implement ed to use remote .diffs */ struct remote_repository { struct remote_repository *next, *prev; /* repository is determined by pattern name currently. * That might change if there is some safe way to combine * some. (note that method options might make equally looking * repositories different ones, so that is hard to decide). * * This is possible as pattern is not modifyable in options * or method by the using distribution. */ const char *name; const char *method; const char *fallback; const struct strlist *config; struct aptmethod *download; struct remote_distribution *distributions; }; static struct remote_repository *repositories = NULL; struct remote_distribution { struct remote_distribution *next; /* repository and suite uniquely identify it, as the only thing the distribution can change is the suite. Currently most of the other fields would also fit in the remote_repository structure, but I plan to add new patters allowing this by distribution... */ struct remote_repository *repository; char *suite; /* flat repository */ bool flat; bool flatnonflatwarned; char *suite_base_dir; /* if true, do not download or check Release file */ bool ignorerelease; /* hashes to ignore */ bool ignorehashes[cs_hashCOUNT]; /* linked list of key descriptions to check against, each must match */ struct signature_requirement *verify; /* local copy of InRelease, Release and Release.gpg file, * only set if available */ char *inreleasefile; char *releasefile; char *releasegpgfile; const char *usedreleasefile; /* filenames and checksums from the Release file */ struct checksumsarray remotefiles; /* the index files we need */ struct remote_index *indices; /* InRelease failed or requested not to be used */ bool noinrelease; }; struct remote_index { /* next index in remote distribution */ struct remote_index *next; struct remote_distribution *from; /* what to download? .gz better than .bz2? and so on */ struct encoding_preferences downloadas; /* remote filename as to be found in Release file*/ char *filename_in_release; /* the name without suffix in the lists/ dir */ char *cachefilename; /* the basename of the above */ const char *cachebasename; /* index in checksums for the different types, -1 = not avail */ int ofs[c_COUNT], diff_ofs; /* index in requested download methods so we can continue later */ int lasttriedencoding; /* the compression to be tried currently */ enum compression compression; /* the old uncompressed file, so that it is only deleted * when needed, to avoid losing it for a patch run */ /*@dependant@*/struct cachedlistfile *olduncompressed; struct checksums *oldchecksums; /* if using pdiffs, the content of the Packages.diff/Index: */ struct diffindex *diffindex; /* the last patch queued to be applied */ char *patchfilename; /*@dependant@*/const struct diffindex_patch *selectedpatch; bool deletecompressedpatch; bool queued; bool needed; bool got; }; #define MAXPARTS 5 struct cachedlistfile { struct cachedlistfile *next; const char *basefilename; unsigned int partcount; const char *parts[MAXPARTS]; /* might be used by some rule */ bool needed, deleted; char fullfilename[]; }; static void remote_index_free(/*@only@*/struct remote_index *i) { if (i == NULL) return; free(i->cachefilename); free(i->patchfilename); free(i->filename_in_release); diffindex_free(i->diffindex); checksums_free(i->oldchecksums); free(i); } static void remote_distribution_free(/*@only@*/struct remote_distribution *d) { if (d == NULL) return; free(d->suite); signature_requirements_free(d->verify); free(d->inreleasefile); free(d->releasefile); free(d->releasegpgfile); free(d->suite_base_dir); checksumsarray_done(&d->remotefiles); while (d->indices != NULL) { struct remote_index *h = d->indices; d->indices = h->next; remote_index_free(h); } free(d); } void remote_repository_free(struct remote_repository *remote) { if (remote == NULL) return; while (remote->distributions != NULL) { struct remote_distribution *h = remote->distributions; remote->distributions = h->next; remote_distribution_free(h); } if (remote->next != NULL) remote->next->prev = remote->prev; if (remote->prev != NULL) remote->prev->next = remote->next; free(remote); return; } void cachedlistfile_freelist(struct cachedlistfile *c) { while (c != NULL) { struct cachedlistfile *n = c->next; free(c); c = n; } } void cachedlistfile_deleteunneeded(const struct cachedlistfile *c) { for (; c != NULL ; c = c->next) { if (c->needed) continue; if (verbose >= 0) printf("deleting %s\n", c->fullfilename); deletefile(c->fullfilename); } } static /*@null@*/ struct cachedlistfile *cachedlistfile_new(const char *basefilename, size_t len, size_t listdirlen) { struct cachedlistfile *c; size_t l; char *p; char ch; c = malloc(sizeof(struct cachedlistfile) + listdirlen + 2*len + 3); if (FAILEDTOALLOC(c)) return NULL; c->next = NULL; c->needed = false; c->deleted = false; p = c->fullfilename; assert ((size_t)(p - (char*)c) <= sizeof(struct cachedlistfile)); memcpy(p, global.listdir, listdirlen); p += listdirlen; *(p++) = '/'; assert ((size_t)(p - c->fullfilename) == listdirlen + 1); c->basefilename = p; memcpy(p, basefilename, len); p += len; *(p++) = '\0'; assert ((size_t)(p - c->fullfilename) == listdirlen + len + 2); c->parts[0] = p; c->partcount = 1; l = len; while (l-- > 0 && (ch = *(basefilename++)) != '\0') { if (ch == '_') { *(p++) = '\0'; if (c->partcount < MAXPARTS) c->parts[c->partcount] = p; c->partcount++; } else if (ch == '%') { char first, second; if (len <= 1) { c->partcount = 0; return c; } first = *(basefilename++); second = *(basefilename++); if (first >= '0' && first <= '9') *p = (first - '0') << 4; else if (first >= 'a' && first <= 'f') *p = (first - 'a' + 10) << 4; else { c->partcount = 0; return c; } if (second >= '0' && second <= '9') *p |= (second - '0'); else if (second >= 'a' && second <= 'f') *p |= (second - 'a' + 10); else { c->partcount = 0; return c; } p++; } else *(p++) = ch; } *(p++) = '\0'; assert ((size_t)(p - c->fullfilename) <= listdirlen + 2*len + 3); return c; } retvalue cachedlists_scandir(/*@out@*/struct cachedlistfile **cachedfiles_p) { struct cachedlistfile *cachedfiles = NULL, **next_p; struct dirent *r; size_t listdirlen = strlen(global.listdir); DIR *dir; // TODO: check if it is always created before... dir = opendir(global.listdir); if (dir == NULL) { int e = errno; fprintf(stderr, "Error %d opening directory '%s': %s!\n", e, global.listdir, strerror(e)); return RET_ERRNO(e); } next_p = &cachedfiles; while (true) { size_t namelen; int e; errno = 0; r = readdir(dir); if (r == NULL) { e = errno; if (e == 0) break; /* this should not happen... */ e = errno; fprintf(stderr, "Error %d reading dir '%s': %s!\n", e, global.listdir, strerror(e)); (void)closedir(dir); cachedlistfile_freelist(cachedfiles); return RET_ERRNO(e); } namelen = _D_EXACT_NAMLEN(r); if (namelen == 1 && r->d_name[0] == '.') continue; if (namelen == 2 && r->d_name[0] == '.' && r->d_name[1] == '.') continue; *next_p = cachedlistfile_new(r->d_name, namelen, listdirlen); if (FAILEDTOALLOC(*next_p)) { (void)closedir(dir); cachedlistfile_freelist(cachedfiles); return RET_ERROR_OOM; } next_p = &(*next_p)->next; } if (closedir(dir) != 0) { int e = errno; fprintf(stderr, "Error %d closing directory '%s': %s!\n", e, global.listdir, strerror(e)); cachedlistfile_freelist(cachedfiles); return RET_ERRNO(e); } *cachedfiles_p = cachedfiles; return RET_OK; } static retvalue cachedlistfile_delete(struct cachedlistfile *old) { int e; if (old->deleted) return RET_OK; e = deletefile(old->fullfilename); if (e != 0) return RET_ERRNO(e); old->deleted = true; return RET_OK; } struct remote_repository *remote_repository_prepare(const char *name, const char *method, const char *fallback, const struct strlist *config) { struct remote_repository *n; /* calling code ensures no two with the same name are created, * so just create it... */ n = zNEW(struct remote_repository); if (FAILEDTOALLOC(n)) return NULL; n->name = name; n->method = method; n->fallback = fallback; n->config = config; n->next = repositories; if (n->next != NULL) n->next->prev = n; repositories = n; return n; } /* This escaping is quite harsh, but so nothing bad can happen... */ static inline size_t escapedlen(const char *p) { size_t l = 0; if (*p == '-') { l = 3; p++; } while (*p != '\0') { if ((*p < 'A' || *p > 'Z') && (*p < 'a' || *p > 'z') && (*p < '0' || *p > '9') && *p != '-') l +=3; else l++; p++; } return l; } static inline char *escapedcopy(char *dest, const char *orig) { static char hex[16] = "0123456789ABCDEF"; if (*orig == '-') { orig++; *dest = '%'; dest++; *dest = '2'; dest++; *dest = 'D'; dest++; } while (*orig != '\0') { if ((*orig < 'A' || *orig > 'Z') && (*orig < 'a' || *orig > 'z') && (*orig < '0' || *orig > '9') && *orig != '-') { *dest = '%'; dest++; *dest = hex[(*orig >> 4)& 0xF ]; dest++; *dest = hex[*orig & 0xF ]; dest++; } else { *dest = *orig; dest++; } orig++; } return dest; } char *genlistsfilename(const char *type, unsigned int count, ...) { const char *fields[count]; unsigned int i; size_t listdir_len, type_len, len; char *result, *p; va_list ap; len = 0; va_start(ap, count); for (i = 0 ; i < count ; i++) { fields[i] = va_arg(ap, const char*); assert (fields[i] != NULL); len += escapedlen(fields[i]) + 1; } /* check sentinel */ assert (va_arg(ap, const char*) == NULL); va_end(ap); listdir_len = strlen(global.listdir); if (type != NULL) type_len = strlen(type); else type_len = 0; result = malloc(listdir_len + type_len + len + 2); if (FAILEDTOALLOC(result)) return NULL; memcpy(result, global.listdir, listdir_len); p = result + listdir_len; *(p++) = '/'; for (i = 0 ; i < count ; i++) { p = escapedcopy(p, fields[i]); *(p++) = '_'; } assert ((size_t)(p - result) == listdir_len + len + 1); if (type != NULL) memcpy(p, type, type_len + 1); else *(--p) = '\0'; return result; } void cachedlistfile_need(struct cachedlistfile *list, const char *type, unsigned int count, ...) { struct cachedlistfile *file; const char *fields[count]; unsigned int i; va_list ap; va_start(ap, count); for (i = 0 ; i < count ; i++) { fields[i] = va_arg(ap, const char*); assert (fields[i] != NULL); } /* check sentinel */ assert (va_arg(ap, const char*) == NULL); va_end(ap); for (file = list ; file != NULL ; file = file->next) { if (file->partcount != count + 1) continue; i = 0; while (i < count && strcmp(file->parts[i], fields[i]) == 0) i++; if (i < count) continue; if (strcmp(type, file->parts[i]) != 0) continue; file->needed = true; } } retvalue remote_distribution_prepare(struct remote_repository *repository, const char *suite, bool ignorerelease, bool getinrelease, const char *verifyrelease, bool flat, bool *ignorehashes, struct remote_distribution **out_p) { struct remote_distribution *n, **last; enum checksumtype cs; for (last = &repository->distributions ; (n = *last) != NULL ; last = &n->next) { if (strcmp(n->suite, suite) != 0) continue; if (n->flat != flat) { if (verbose >= 0 && !n->flatnonflatwarned && !IGNORABLE(flatandnonflat)) fprintf(stderr, "Warning: From the same remote repository '%s', distribution '%s'\n" "is requested both flat and non-flat. While this is possible\n" "(having %s/dists/%s and %s/%s), it is unlikely.\n" "To no longer see this message, use --ignore=flatandnonflat.\n", repository->method, suite, repository->method, suite, repository->method, suite); n->flatnonflatwarned = true; continue; } break; } if (*last != NULL) { n = *last; assert (n->flat == flat); if ((n->ignorerelease && !ignorerelease) || (!n->ignorerelease && ignorerelease)) { // TODO a hint which two are at fault would be nice, // but how to get the information... if (verbose >= 0) fprintf(stderr, "Warning: I was told to both ignore Release files for Suite '%s'\n" "from remote repository '%s' and to not ignore it. Going to not ignore!\n", suite, repository->name); n->ignorerelease = false; } if ((n->noinrelease && getinrelease) || (!n->noinrelease && !getinrelease)) { if (verbose >= 0) fprintf(stderr, "Warning: Conflicting GetInRelease values for Suite '%s'\n" "from remote repository '%s'. Resolving to get InRelease files!\n", suite, repository->name); n->noinrelease = false; } for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) { if ((n->ignorehashes[cs] && !ignorehashes[cs]) || (!n->ignorehashes[cs] && ignorehashes[cs])) { // TODO dito if (verbose >= 0) fprintf(stderr, "Warning: I was told to both ignore '%s' for Suite '%s'\n" "from remote repository '%s' and to not ignore it. Going to not ignore!\n", suite, release_checksum_names[cs], repository->name); n->ignorehashes[cs] = false; } } if (verifyrelease != NULL) { retvalue r; r = signature_requirement_add(&n->verify, verifyrelease); if (RET_WAS_ERROR(r)) return r; } *out_p = n; return RET_OK; } n = zNEW(struct remote_distribution); if (FAILEDTOALLOC(n)) return RET_ERROR_OOM; n->repository = repository; n->suite = strdup(suite); n->ignorerelease = ignorerelease; n->noinrelease = !getinrelease; if (verifyrelease != NULL) { retvalue r; r = signature_requirement_add(&n->verify, verifyrelease); if (RET_WAS_ERROR(r)) { remote_distribution_free(n); return r; } } memcpy(n->ignorehashes, ignorehashes, sizeof(bool [cs_hashCOUNT])); n->flat = flat; if (flat) n->suite_base_dir = strdup(suite); else n->suite_base_dir = calc_dirconcat("dists", suite); if (FAILEDTOALLOC(n->suite) || FAILEDTOALLOC(n->suite_base_dir)) { remote_distribution_free(n); return RET_ERROR_OOM; } /* ignorerelease can be unset later, so always calculate the filename */ if (flat) n->inreleasefile = genlistsfilename("InRelease", 3, repository->name, suite, "flat", ENDOFARGUMENTS); else n->inreleasefile = genlistsfilename("InRelease", 2, repository->name, suite, ENDOFARGUMENTS); if (FAILEDTOALLOC(n->inreleasefile)) { remote_distribution_free(n); return RET_ERROR_OOM; } if (flat) n->releasefile = genlistsfilename("Release", 3, repository->name, suite, "flat", ENDOFARGUMENTS); else n->releasefile = genlistsfilename("Release", 2, repository->name, suite, ENDOFARGUMENTS); if (FAILEDTOALLOC(n->releasefile)) { remote_distribution_free(n); return RET_ERROR_OOM; } n->releasegpgfile = calc_addsuffix(n->releasefile, "gpg"); if (FAILEDTOALLOC(n->releasefile)) { remote_distribution_free(n); return RET_ERROR_OOM; } *last = n; *out_p = n; return RET_OK; } static retvalue copytoplace(const char *gotfilename, const char *wantedfilename, const char *method, struct checksums **checksums_p) { retvalue r; struct checksums *checksums = NULL; /* if the file is somewhere else, copy it: */ if (strcmp(gotfilename, wantedfilename) != 0) { /* never link index files, but copy them */ if (verbose > 1) fprintf(stderr, "Copy file '%s' to '%s'...\n", gotfilename, wantedfilename); r = checksums_copyfile(wantedfilename, gotfilename, false, &checksums); if (r == RET_ERROR_EXIST) { fprintf(stderr, "Unexpected error: '%s' exists while it should not!\n", wantedfilename); } if (r == RET_NOTHING) { fprintf(stderr, "Cannot open '%s', obtained from '%s' method.\n", gotfilename, method); r = RET_ERROR_MISSING; } if (RET_WAS_ERROR(r)) { return r; } } if (checksums_p == NULL) checksums_free(checksums); else *checksums_p = checksums; return RET_OK; } static retvalue enqueue_old_release_files(struct remote_distribution *d); /* handle a downloaded Release or Release.gpg file: * no checksums to test, nothing to trigger, as they have to be all * read at once to decide what is new and what actually needs downloading */ static retvalue release_callback(enum queue_action action, void *privdata, void *privdata2, UNUSED(const char *uri), const char *gotfilename, const char *wantedfilename, UNUSED(/*@null@*/const struct checksums *checksums), const char *methodname) { struct remote_distribution *d = privdata; retvalue r; /* if the InRelease file cannot be got, * try Release (and Release.gpg if checking) instead */ if (action == qa_error && privdata2 == d->inreleasefile) { assert (!d->noinrelease); return enqueue_old_release_files(d); } if (action != qa_got) return RET_ERROR; r = copytoplace(gotfilename, wantedfilename, methodname, NULL); if (RET_WAS_ERROR(r)) return r; return r; } static retvalue enqueue_old_release_files(struct remote_distribution *d) { retvalue r; d->noinrelease = true; r = aptmethod_enqueueindex(d->repository->download, d->suite_base_dir, "Release", "", d->releasefile, "", release_callback, d, NULL); if (RET_WAS_ERROR(r)) return r; if (d->verify != NULL) { r = aptmethod_enqueueindex(d->repository->download, d->suite_base_dir, "Release", ".gpg", d->releasegpgfile, "", release_callback, d, NULL); if (RET_WAS_ERROR(r)) return r; } return RET_OK; } static retvalue remote_distribution_enqueuemetalists(struct remote_distribution *d) { struct remote_repository *repository = d->repository; assert (repository->download != NULL); if (d->ignorerelease) return RET_NOTHING; (void)unlink(d->inreleasefile); (void)unlink(d->releasefile); if (d->verify != NULL) { (void)unlink(d->releasegpgfile); } if (d->noinrelease) return enqueue_old_release_files(d); else return aptmethod_enqueueindex(repository->download, d->suite_base_dir, "InRelease", "", d->inreleasefile, "", release_callback, d, d->inreleasefile); } retvalue remote_startup(struct aptmethodrun *run) { struct remote_repository *rr; retvalue r; if (interrupted()) return RET_ERROR_INTERRUPTED; for (rr = repositories ; rr != NULL ; rr = rr->next) { assert (rr->download == NULL); r = aptmethod_newmethod(run, rr->method, rr->fallback, rr->config, &rr->download); if (RET_WAS_ERROR(r)) return r; } return RET_OK; } static void find_index(const struct strlist *files, struct remote_index *ri) { const char *filename = ri->filename_in_release; size_t len = strlen(filename); int i; enum compression c; for (i = 0 ; i < files->count ; i++) { const char *value = files->values[i]; if (strncmp(value, filename, len) != 0) continue; value += len; if (*value == '\0') { ri->ofs[c_none] = i; continue; } if (*value != '.') continue; if (strcmp(value, ".diff/Index") == 0) { ri->diff_ofs = i; continue; } for (c = 0 ; c < c_COUNT ; c++) if (strcmp(value, uncompression_suffix[c]) == 0) { ri->ofs[c] = i; break; } } } /* get a strlist with the md5sums of a Release-file */ static inline retvalue release_getchecksums(const char *releasefile, const char *chunk, const bool ignorehash[cs_hashCOUNT], struct checksumsarray *out) { retvalue r; struct strlist files[cs_hashCOUNT]; enum checksumtype cs; bool foundanything = false; for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) { if (ignorehash[cs]) { strlist_init(&files[cs]); continue; } assert (release_checksum_names[cs] != NULL); r = chunk_getextralinelist(chunk, release_checksum_names[cs], &files[cs]); if (RET_WAS_ERROR(r)) { while (cs-- > cs_md5sum) { strlist_done(&files[cs]); } return r; } else if (r == RET_NOTHING) strlist_init(&files[cs]); else foundanything = true; } if (!foundanything) { fprintf(stderr, "Missing checksums in Release file '%s'!\n", releasefile); return RET_ERROR; } r = checksumsarray_parse(out, files, releasefile); for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) { strlist_done(&files[cs]); } return r; } static retvalue process_remoterelease(struct remote_distribution *rd) { struct remote_repository *rr = rd->repository; struct remote_index *ri; retvalue r; char *releasedata; size_t releaselen; if (!rd->noinrelease) { r = signature_check_inline(rd->verify, rd->inreleasefile, &releasedata); assert (r != RET_NOTHING); if (r == RET_NOTHING) r = RET_ERROR_BADSIG; if (r == RET_ERROR_BADSIG) { fprintf(stderr, "Error: Not enough signatures found for remote repository %s (%s %s)!\n", rr->name, rr->method, rd->suite); r = RET_ERROR_BADSIG; } if (RET_WAS_ERROR(r)) return r; rd->usedreleasefile = rd->inreleasefile; } else { r = readtextfile(rd->releasefile, rd->releasefile, &releasedata, &releaselen); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; rd->usedreleasefile = rd->releasefile; if (rd->verify != NULL) { r = signature_check(rd->verify, rd->releasegpgfile, rd->releasefile, releasedata, releaselen); assert (r != RET_NOTHING); if (r == RET_NOTHING) r = RET_ERROR_BADSIG; if (r == RET_ERROR_BADSIG) { fprintf(stderr, "Error: Not enough signatures found for remote repository %s (%s %s)!\n", rr->name, rr->method, rd->suite); r = RET_ERROR_BADSIG; } if (RET_WAS_ERROR(r)) { free(releasedata); return r; } } } r = release_getchecksums(rd->usedreleasefile, releasedata, rd->ignorehashes, &rd->remotefiles); free(releasedata); if (RET_WAS_ERROR(r)) return r; /* Check for our files in there */ for (ri = rd->indices ; ri != NULL ; ri = ri->next) { find_index(&rd->remotefiles.names, ri); } // TODO: move checking if not exists at all to here? return RET_OK; } retvalue remote_preparemetalists(struct aptmethodrun *run, bool nodownload) { struct remote_repository *rr; struct remote_distribution *rd; retvalue r; if (!nodownload) { for (rr = repositories ; rr != NULL ; rr = rr->next) { for (rd = rr->distributions ; rd != NULL ; rd = rd->next) { r = remote_distribution_enqueuemetalists(rd); if (RET_WAS_ERROR(r)) return r; } } r = aptmethod_download(run); if (RET_WAS_ERROR(r)) return r; } for (rr = repositories ; rr != NULL ; rr = rr->next) { for (rd = rr->distributions ; rd != NULL ; rd = rd->next) { if (!rd->ignorerelease) { if (nodownload) if (!isregularfile(rd->inreleasefile)) rd->noinrelease = true; r = process_remoterelease(rd); if (RET_WAS_ERROR(r)) return r; } } } return RET_OK; } bool remote_index_isnew(/*@null@*/const struct remote_index *ri, struct donefile *done) { const char *basefilename; struct checksums *checksums; bool hashes_missing, improves; /* files without uncompressed checksum cannot be tested */ if (ri->ofs[c_none] < 0) return true; /* if not there or the wrong files comes next, then something * has changed and we better reload everything */ if (!donefile_nextindex(done, &basefilename, &checksums)) return true; if (strcmp(basefilename, ri->cachebasename) != 0) { checksums_free(checksums); return true; } /* otherwise check if the file checksums match */ if (!checksums_check(checksums, ri->from->remotefiles.checksums[ri->ofs[c_none]], &hashes_missing)) { checksums_free(checksums); return true; } if (hashes_missing) { /* if Release has checksums we do not yet know about, * process it to make sure those match as well */ checksums_free(checksums); return true; } if (!checksums_check(ri->from->remotefiles.checksums[ri->ofs[c_none]], checksums, &improves)) { /* this should not happen, but ... */ checksums_free(checksums); return true; } if (improves) { /* assume this is our file and add the other hashes so they * will show up in the file again the next time. * This is a bit unelegant in mixing stuff, but otherwise this * will cause redownloading when remote adds more hashes. * The only downside of mixing can reject files that have the * same recorded hashes as a previously processed files. * But that is quite inlikely unless on attack, so getting some * hint in that case cannot harm.*/ (void)checksums_combine(&ri->from->remotefiles.checksums[ ri->ofs[c_none]], checksums, NULL); } checksums_free(checksums); return false; } static inline void remote_index_oldfiles(struct remote_index *ri, /*@null@*/struct cachedlistfile *oldfiles, /*@out@*/struct cachedlistfile *old[c_COUNT]) { struct cachedlistfile *o; size_t l; enum compression c; for (c = 0 ; c < c_COUNT ; c++) old[c] = NULL; l = strlen(ri->cachebasename); for (o = oldfiles ; o != NULL ; o = o->next) { if (o->deleted) continue; if (strncmp(o->basefilename, ri->cachebasename, l) != 0) continue; for (c = 0 ; c < c_COUNT ; c++) if (strcmp(o->basefilename + l, uncompression_suffix[c]) == 0) { old[c] = o; o->needed = true; break; } if (strcmp(o->basefilename + l, ".diffindex") == 0) (void)cachedlistfile_delete(o); if (strncmp(o->basefilename + l, ".diff-", 6) == 0) (void)cachedlistfile_delete(o); } } static inline void remote_index_delete_oldfiles(struct remote_index *ri, /*@null@*/struct cachedlistfile *oldfiles) { struct cachedlistfile *o; size_t l; l = strlen(ri->cachebasename); for (o = oldfiles ; o != NULL ; o = o->next) { if (o->deleted) continue; if (strncmp(o->basefilename, ri->cachebasename, l) != 0) continue; (void)cachedlistfile_delete(o); } } static queue_callback index_callback; static queue_callback diff_callback; static retvalue queue_next_without_release(struct remote_distribution *rd, struct remote_index *ri) { const struct encoding_preferences *downloadas; static const struct encoding_preferences defaultdownloadas = { .count = 5, .requested = { { .diff = false, .force = false, .compression = c_gzip }, { .diff = false, .force = false, .compression = c_bzip2 }, { .diff = false, .force = false, .compression = c_none }, { .diff = false, .force = false, .compression = c_lzma }, { .diff = false, .force = false, .compression = c_xz } } }; int e; if (ri->downloadas.count == 0) downloadas = &defaultdownloadas; else downloadas = &ri->downloadas; for (e = ri->lasttriedencoding + 1 ; e < downloadas->count ; e++) { enum compression c = downloadas->requested[e].compression; if (downloadas->requested[e].diff) continue; if (uncompression_supported(c)) { ri->lasttriedencoding = e; ri->compression = c; return aptmethod_enqueueindex(rd->repository->download, rd->suite_base_dir, ri->filename_in_release, uncompression_suffix[c], ri->cachefilename, uncompression_suffix[c], index_callback, ri, NULL); } } if (ri->lasttriedencoding < 0) fprintf(stderr, "ERROR: no supported compressions in DownloadListsAs for '%s' by '%s'!\n", rd->suite, rd->repository->method); ri->lasttriedencoding = e; return RET_ERROR; } static inline retvalue find_requested_encoding(struct remote_index *ri, const char *releasefile) { int e; enum compression c, stopat, /* the most-preferred requested but unsupported */ unsupported = c_COUNT, /* the best unrequested but supported */ unrequested = c_COUNT; if (ri->downloadas.count > 0) { bool found = false; for (e = ri->lasttriedencoding + 1 ; e < ri->downloadas.count ; e++) { struct compression_preference req; req = ri->downloadas.requested[e]; if (req.diff) { if (ri->olduncompressed == NULL) continue; assert (ri->ofs[c_none] >= 0); if (!req.force && ri->diff_ofs < 0) continue; ri->compression = c_COUNT; ri->lasttriedencoding = e; return RET_OK; } if (ri->ofs[req.compression] < 0 && (!req.force || ri->ofs[c_none] < 0)) continue; if (uncompression_supported(req.compression)) { ri->compression = req.compression; ri->lasttriedencoding = e; return RET_OK; } else if (unsupported == c_COUNT) unsupported = req.compression; } if (ri->lasttriedencoding > -1) { /* we already tried something, and nothing else * is available, so give up */ ri->lasttriedencoding = e; return RET_ERROR; } /* nothing that is both requested by the user and supported * and listed in the Release file found, check what is there * to get a meaningfull error message */ for (c = 0 ; c < c_COUNT ; c++) { if (ri->ofs[c] < 0) continue; found = true; if (uncompression_supported(c)) unrequested = c; } if (!found) { // TODO: might be nice to check for not-yet-even // known about compressions and say they are not // yet know yet instead then here... fprintf(stderr, "Could not find '%s' within '%s'\n", ri->filename_in_release, releasefile); return RET_ERROR_WRONG_MD5; } if (unsupported != c_COUNT && unrequested != c_COUNT) { fprintf(stderr, "Error: '%s' only lists unusable or unrequested compressions of '%s'.\n" "Try e.g the '%s' option (or check what it is set to) to make more useable.\n" "Or change your DownloadListsAs to request e.g. '%s'.\n", releasefile, ri->filename_in_release, uncompression_option[unsupported], uncompression_config[unrequested]); return RET_ERROR; } if (unsupported != c_COUNT) { fprintf(stderr, "Error: '%s' only lists unusable compressions of '%s'.\n" "Try e.g the '%s' option (or check what it is set to) to make more useable.\n", releasefile, ri->filename_in_release, uncompression_option[unsupported]); return RET_ERROR; } if (unrequested != c_COUNT) { fprintf(stderr, "Error: '%s' only lists unrequested compressions of '%s'.\n" "Try changing your DownloadListsAs to request e.g. '%s'.\n", releasefile, ri->filename_in_release, uncompression_config[unrequested]); return RET_ERROR; } fprintf(stderr, "Error: '%s' lists no requested and usable compressions of '%s'.\n", releasefile, ri->filename_in_release); return RET_ERROR; } /* When nothing specified, use the newest compression. * This might make it slow on older computers (and perhaps * on relatively new ones, too), but usually bandwidth costs * and your time not. * And you can always configure it to prefer a faster one... */ /* ri->lasttriedencoding -1 means nothing tried, * 0 means Package.diff was tried, * 1 means nothing c_COUNT - 1 was already tried, * 2 means nothing c_COUNT - 2 was already tried, * and so on...*/ if (ri->lasttriedencoding < 0) { if (ri->olduncompressed != NULL && ri->diff_ofs >= 0) { ri->compression = c_COUNT; ri->lasttriedencoding = 0; return RET_OK; } stopat = c_COUNT; } else stopat = c_COUNT - ri->lasttriedencoding; ri->compression = c_COUNT; for (c = 0 ; c < stopat ; c++) { if (ri->ofs[c] < 0) continue; if (uncompression_supported(c)) ri->compression = c; else unsupported = c; } if (ri->compression == c_COUNT) { if (ri->lasttriedencoding > -1) { /* not the first try, no error message needed */ ri->lasttriedencoding = c_COUNT; return RET_ERROR; } if (unsupported != c_COUNT) { fprintf(stderr, "Error: '%s' only lists unusable compressions of '%s'.\n" "Try e.g the '%s' option (or check what it is set to) to enable more.\n", releasefile, ri->filename_in_release, uncompression_option[unsupported]); return RET_ERROR; } fprintf(stderr, "Could not find '%s' within '%s'\n", ri->filename_in_release, releasefile); return RET_ERROR_WRONG_MD5; } ri->lasttriedencoding = c_COUNT - ri->compression; return RET_OK; } static inline retvalue remove_old_uncompressed(struct remote_index *ri) { retvalue r; if (ri->olduncompressed != NULL) { r = cachedlistfile_delete(ri->olduncompressed); ri->olduncompressed = NULL; return r; } else return RET_NOTHING; } static retvalue queue_next_encoding(struct remote_distribution *rd, struct remote_index *ri); // TODO: check if this still makes sense. // (might be left over to support switching from older versions // of reprepro that also put compressed files there) static inline retvalue reuse_old_compressed_index(struct remote_distribution *rd, struct remote_index *ri, enum compression c, const char *oldfullfilename) { retvalue r; r = uncompress_file(oldfullfilename, ri->cachefilename, c); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; if (ri->ofs[c_none] >= 0) { r = checksums_test(ri->cachefilename, rd->remotefiles.checksums[ri->ofs[c_none]], &rd->remotefiles.checksums[ri->ofs[c_none]]); if (r == RET_ERROR_WRONG_MD5) { fprintf(stderr, "Error: File '%s' looked correct according to '%s',\n" "but after unpacking '%s' looks wrong.\n" "Something is seriously broken!\n", oldfullfilename, rd->usedreleasefile, ri->cachefilename); } if (r == RET_NOTHING) { fprintf(stderr, "File '%s' mysteriously vanished!\n", ri->cachefilename); r = RET_ERROR_MISSING; } if (RET_WAS_ERROR(r)) return r; } /* already there, nothing to do to get it... */ ri->queued = true; ri->got = true; return RET_OK; } static inline retvalue queueindex(struct remote_distribution *rd, struct remote_index *ri, bool nodownload, /*@null@*/struct cachedlistfile *oldfiles) { enum compression c; retvalue r; struct cachedlistfile *old[c_COUNT]; if (rd->ignorerelease) { ri->queued = true; if (nodownload) { ri->got = true; return RET_OK; } /* as there is no way to know which are current, * just delete everything */ remote_index_delete_oldfiles(ri, oldfiles); return queue_next_without_release(rd, ri); } /* check if this file is still available from an earlier download */ remote_index_oldfiles(ri, oldfiles, old); ri->olduncompressed = NULL; ri->oldchecksums = NULL; if (ri->ofs[c_none] < 0 && old[c_none] != NULL) { /* if we know not what it should be, * we canot use the old... */ r = cachedlistfile_delete(old[c_none]); if (RET_WAS_ERROR(r)) return r; old[c_none] = NULL; } else if (old[c_none] != NULL) { bool improves; int uo = ri->ofs[c_none]; struct checksums **wanted_p = &rd->remotefiles.checksums[uo]; r = checksums_read(old[c_none]->fullfilename, &ri->oldchecksums); if (r == RET_NOTHING) { fprintf(stderr, "File '%s' mysteriously vanished!\n", old[c_none]->fullfilename); r = RET_ERROR_MISSING; } if (RET_WAS_ERROR(r)) return r; if (checksums_check(*wanted_p, ri->oldchecksums, &improves)) { /* already there, nothing to do to get it... */ ri->queued = true; ri->got = true; if (improves) r = checksums_combine(wanted_p, ri->oldchecksums, NULL); else r = RET_OK; checksums_free(ri->oldchecksums); ri->oldchecksums = NULL; return r; } ri->olduncompressed = old[c_none]; old[c_none] = NULL; } assert (old[c_none] == NULL); /* make sure everything old is deleted or check if it can be used */ for (c = 0 ; c < c_COUNT ; c++) { if (old[c] == NULL) continue; if (c != c_none && ri->ofs[c] >= 0) { /* check if it can be used */ r = checksums_test(old[c]->fullfilename, rd->remotefiles.checksums[ri->ofs[c]], &rd->remotefiles.checksums[ri->ofs[c]]); if (r == RET_ERROR_WRONG_MD5) r = RET_NOTHING; if (RET_WAS_ERROR(r)) return r; if (RET_IS_OK(r)) { r = remove_old_uncompressed(ri); if (RET_WAS_ERROR(r)) return r; assert (old[c_none] == NULL); return reuse_old_compressed_index(rd, ri, c, old[c]->fullfilename); } } r = cachedlistfile_delete(old[c]); if (RET_WAS_ERROR(r)) return r; old[c] = NULL; } /* nothing found, we'll have to download: */ if (nodownload) { if (ri->olduncompressed != NULL) fprintf(stderr, "Error: '%s' does not match Release file, try without --nolistsdownload to download new one!\n", ri->cachefilename); else fprintf(stderr, "Error: Missing '%s', try without --nolistsdownload to download it!\n", ri->cachefilename); return RET_ERROR_MISSING; } return queue_next_encoding(rd, ri); } static retvalue queue_next_encoding(struct remote_distribution *rd, struct remote_index *ri) { struct remote_repository *rr = rd->repository; retvalue r; if (rd->ignorerelease) return queue_next_without_release(rd, ri); r = find_requested_encoding(ri, rd->usedreleasefile); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; assert (ri->compression <= c_COUNT); /* check if downloading a .diff/Index (aka .pdiff) is requested */ if (ri->compression == c_COUNT) { assert (ri->olduncompressed != NULL); assert (ri->oldchecksums != NULL); ri->queued = true; return aptmethod_enqueueindex(rr->download, rd->suite_base_dir, ri->filename_in_release, ".diff/Index", ri->cachefilename, ".diffindex", diff_callback, ri, NULL); } assert (ri->compression < c_COUNT); assert (uncompression_supported(ri->compression)); if (ri->compression == c_none) { r = remove_old_uncompressed(ri); if (RET_WAS_ERROR(r)) return r; } /* as those checksums might be overwritten with completed data, * this assumes that the uncompressed checksums for one index is never * the compressed checksum for another... */ ri->queued = true; return aptmethod_enqueueindex(rr->download, rd->suite_base_dir, ri->filename_in_release, uncompression_suffix[ri->compression], ri->cachefilename, uncompression_suffix[ri->compression], index_callback, ri, NULL); } static retvalue remote_distribution_enqueuelists(struct remote_distribution *rd, bool nodownload, struct cachedlistfile *oldfiles) { struct remote_index *ri; retvalue r; /* check what to get for the requested indicies */ for (ri = rd->indices ; ri != NULL ; ri = ri->next) { if (ri->queued) continue; if (!ri->needed) { /* if we do not know anything about it, * it cannot have got marked as old * or otherwise as unneeded */ assert (!rd->ignorerelease); continue; } r = queueindex(rd, ri, nodownload, oldfiles); if (RET_WAS_ERROR(r)) return r; } return RET_OK; } retvalue remote_preparelists(struct aptmethodrun *run, bool nodownload) { struct remote_repository *rr; struct remote_distribution *rd; retvalue r; struct cachedlistfile *oldfiles; r = cachedlists_scandir(&oldfiles); if (RET_WAS_ERROR(r)) return r; if (r == RET_NOTHING) oldfiles = NULL; for (rr = repositories ; rr != NULL ; rr = rr->next) { for (rd = rr->distributions ; rd != NULL ; rd = rd->next) { r = remote_distribution_enqueuelists(rd, nodownload, oldfiles); if (RET_WAS_ERROR(r)) { cachedlistfile_freelist(oldfiles); return r; } } } r = aptmethod_download(run); if (RET_WAS_ERROR(r)) { cachedlistfile_freelist(oldfiles); return r; } cachedlistfile_freelist(oldfiles); return RET_OK; } static struct remote_index *addindex(struct remote_distribution *rd, /*@only@*/char *cachefilename, /*@only@*/char *filename, /*@null@*/const struct encoding_preferences *downloadas) { struct remote_index *ri, **last; enum compression c; const char *cachebasename; if (FAILEDTOALLOC(cachefilename) || FAILEDTOALLOC(filename)) return NULL; cachebasename = dirs_basename(cachefilename); last = &rd->indices; while (*last != NULL && strcmp((*last)->cachebasename, cachebasename) != 0) last = &(*last)->next; if (*last != NULL) { ri = *last; // TODO: perhaps try to calculate some form of intersections // instead of just using the shorter one... if (downloadas != NULL && (ri->downloadas.count == 0 || ri->downloadas.count > downloadas->count)) ri->downloadas = *downloadas; free(cachefilename); free(filename); return ri; } ri = zNEW(struct remote_index); if (FAILEDTOALLOC(ri)) { free(cachefilename); free(filename); return NULL; } *last = ri; ri->from = rd; ri->cachefilename = cachefilename; ri->cachebasename = cachebasename; ri->filename_in_release = filename; if (downloadas != NULL) ri->downloadas = *downloadas; for (c = 0 ; c < c_COUNT ; c++) ri->ofs[c] = -1; ri->diff_ofs = -1; ri->lasttriedencoding = -1; return ri; } struct remote_index *remote_index(struct remote_distribution *rd, const char *architecture, const char *component, packagetype_t packagetype, /*@null@*/const struct encoding_preferences *downloadas) { char *cachefilename, *filename_in_release; assert (!rd->flat); if (packagetype == pt_deb) { filename_in_release = mprintf( "%s/binary-%s/Packages", component, architecture); cachefilename = genlistsfilename("Packages", 4, rd->repository->name, rd->suite, component, architecture, ENDOFARGUMENTS); } else if (packagetype == pt_udeb) { filename_in_release = mprintf( "%s/debian-installer/binary-%s/Packages", component, architecture); cachefilename = genlistsfilename("uPackages", 4, rd->repository->name, rd->suite, component, architecture, ENDOFARGUMENTS); } else if (packagetype == pt_dsc) { filename_in_release = mprintf( "%s/source/Sources", component); cachefilename = genlistsfilename("Sources", 3, rd->repository->name, rd->suite, component, ENDOFARGUMENTS); } else { assert ("Unexpected package type" == NULL); } return addindex(rd, cachefilename, filename_in_release, downloadas); } void cachedlistfile_need_index(struct cachedlistfile *list, const char *repository, const char *suite, const char *architecture, const char *component, packagetype_t packagetype) { if (packagetype == pt_deb) { cachedlistfile_need(list, "Packages", 4, repository, suite, component, architecture, ENDOFARGUMENTS); } else if (packagetype == pt_udeb) { cachedlistfile_need(list, "uPackages", 4, repository, suite, component, architecture, ENDOFARGUMENTS); } else if (packagetype == pt_dsc) { cachedlistfile_need(list, "Sources", 3, repository, suite, component, ENDOFARGUMENTS); } } struct remote_index *remote_flat_index(struct remote_distribution *rd, packagetype_t packagetype, /*@null@*/const struct encoding_preferences *downloadas) { char *cachefilename, *filename_in_release; assert (rd->flat); if (packagetype == pt_deb) { filename_in_release = strdup("Packages"); cachefilename = genlistsfilename("Packages", 2, rd->repository->name, rd->suite, ENDOFARGUMENTS); } else if (packagetype == pt_dsc) { filename_in_release = strdup("Sources"); cachefilename = genlistsfilename("Sources", 2, rd->repository->name, rd->suite, ENDOFARGUMENTS); } else { assert ("Unexpected package type" == NULL); } return addindex(rd, cachefilename, filename_in_release, downloadas); } void cachedlistfile_need_flat_index(struct cachedlistfile *list, const char *repository, const char *suite, packagetype_t packagetype) { if (packagetype == pt_deb) { cachedlistfile_need(list, "Packages", 2, repository, suite, ENDOFARGUMENTS); } else if (packagetype == pt_dsc) { cachedlistfile_need(list, "Sources", 1, repository, suite, ENDOFARGUMENTS); } } const char *remote_index_file(const struct remote_index *ri) { assert (ri->needed && ri->queued && ri->got); return ri->cachefilename; } const char *remote_index_basefile(const struct remote_index *ri) { assert (ri->needed && ri->queued); return ri->cachebasename; } struct aptmethod *remote_aptmethod(const struct remote_distribution *rd) { return rd->repository->download; } void remote_index_markdone(const struct remote_index *ri, struct markdonefile *done) { if (ri->ofs[c_none] < 0) return; markdone_index(done, ri->cachebasename, ri->from->remotefiles.checksums[ri->ofs[c_none]]); } void remote_index_needed(struct remote_index *ri) { ri->needed = true; } static retvalue indexfile_mark_got(struct remote_distribution *rd, struct remote_index *ri, /*@null@*/const struct checksums *gotchecksums) { struct checksums **checksums_p; if (!rd->ignorerelease && ri->ofs[c_none] >= 0) { checksums_p = &rd->remotefiles.checksums[ri->ofs[c_none]]; bool matches, improves; // TODO: this no longer calculates all the checksums if // the Release does not contain more and the apt method // returned not all (but all that are in Release). // This will then cause the done file not containing all // checksums. (but if the Release not contain them, this // does not harm, does it?) if (gotchecksums != NULL) { matches = checksums_check(*checksums_p, gotchecksums, &improves); /* that should have been tested earlier */ assert (matches); if (! matches) return RET_ERROR_WRONG_MD5; if (improves) { retvalue r; r = checksums_combine(checksums_p, gotchecksums, NULL); if (RET_WAS_ERROR(r)) return r; } } } ri->got = true; return RET_OK; } static retvalue indexfile_unpacked(void *privdata, const char *compressed, bool failed) { struct remote_index *ri = privdata; struct remote_distribution *rd = ri->from; retvalue r; struct checksums *readchecksums = NULL; if (failed) { // TODO: check if alternative can be used... return RET_ERROR; } /* file got uncompressed, check if it has the correct checksum */ /* even with a Release file, an old-style one might * not list the checksums for the uncompressed indices */ if (!rd->ignorerelease && ri->ofs[c_none] >= 0) { int ofs = ri->ofs[c_none]; const struct checksums *wantedchecksums = rd->remotefiles.checksums[ofs]; bool matches, missing = false; r = checksums_read(ri->cachefilename, &readchecksums); if (r == RET_NOTHING) { fprintf(stderr, "Cannot open '%s', though it should just have been unpacked from '%s'!\n", ri->cachefilename, compressed); r = RET_ERROR_MISSING; } if (RET_WAS_ERROR(r)) return r; missing = false; matches = checksums_check(readchecksums, wantedchecksums, &missing); assert (!missing); if (!matches) { fprintf(stderr, "Wrong checksum of uncompressed content of '%s':\n", compressed); checksums_printdifferences(stderr, wantedchecksums, readchecksums); checksums_free(readchecksums); return RET_ERROR_WRONG_MD5; } /* if the compressed file was downloaded or copied, delete it. * This is only done if we know the uncompressed checksum, so * that less downloading is needed (though as apt no longer * supports such archieves, they are unlikely anyway). */ if (strncmp(ri->cachefilename, compressed, strlen(ri->cachefilename)) == 0) { (void)unlink(compressed); } } r = indexfile_mark_got(rd, ri, readchecksums); checksums_free(readchecksums); if (RET_WAS_ERROR(r)) return r; return RET_OK; } /* *checksums_p must be either NULL or gotchecksums list all known checksums */ static inline retvalue check_checksums(const char *methodname, const char *uri, const char *gotfilename, const struct checksums *wantedchecksums, /*@null@*/const struct checksums *gotchecksums, struct checksums **checksums_p) { bool matches, missing = false; struct checksums *readchecksums = NULL; retvalue r; if (gotchecksums == NULL) { matches = true; missing = true; } else matches = checksums_check(gotchecksums, wantedchecksums, &missing); /* if the apt method did not generate all checksums * we want to check, we'll have to do so: */ if (matches && missing) { /* we assume that everything we know how to * extract from a Release file is something * we know how to calculate out of a file */ assert (checksums_p == NULL || *checksums_p == NULL); r = checksums_read(gotfilename, &readchecksums); if (r == RET_NOTHING) { fprintf(stderr, "Cannot open '%s', though apt-method '%s' claims it is there!\n", gotfilename, methodname); r = RET_ERROR_MISSING; } if (RET_WAS_ERROR(r)) return r; gotchecksums = readchecksums; missing = false; matches = checksums_check(gotchecksums, wantedchecksums, &missing); assert (!missing); } if (!matches) { fprintf(stderr, "Wrong checksum during receive of '%s':\n", uri); checksums_printdifferences(stderr, wantedchecksums, gotchecksums); checksums_free(readchecksums); return RET_ERROR_WRONG_MD5; } if (checksums_p == NULL) checksums_free(readchecksums); else if (readchecksums != NULL) *checksums_p = readchecksums; return RET_OK; } static retvalue index_callback(enum queue_action action, void *privdata, UNUSED(void *privdata2), const char *uri, const char *gotfilename, const char *wantedfilename, /*@null@*/const struct checksums *gotchecksums, const char *methodname) { struct remote_index *ri = privdata; struct remote_distribution *rd = ri->from; struct checksums *readchecksums = NULL; retvalue r; if (action == qa_error) return queue_next_encoding(rd, ri); if (action != qa_got) return RET_ERROR; if (ri->compression == c_none) { assert (strcmp(wantedfilename, ri->cachefilename) == 0); r = copytoplace(gotfilename, wantedfilename, methodname, &readchecksums); if (RET_WAS_ERROR(r)) return r; gotfilename = wantedfilename; if (readchecksums != NULL) gotchecksums = readchecksums; } if (!rd->ignorerelease && ri->ofs[ri->compression] >= 0) { int ofs = ri->ofs[ri->compression]; const struct checksums *wantedchecksums = rd->remotefiles.checksums[ofs]; r = check_checksums(methodname, uri, gotfilename, wantedchecksums, gotchecksums, &readchecksums); if (RET_WAS_ERROR(r)) { checksums_free(readchecksums); return r; } if (readchecksums != NULL) gotchecksums = readchecksums; } if (ri->compression == c_none) { assert (strcmp(gotfilename, wantedfilename) == 0); r = indexfile_mark_got(rd, ri, gotchecksums); checksums_free(readchecksums); if (RET_WAS_ERROR(r)) return r; return RET_OK; } else { checksums_free(readchecksums); r = remove_old_uncompressed(ri); if (RET_WAS_ERROR(r)) return r; r = uncompress_queue_file(gotfilename, ri->cachefilename, ri->compression, indexfile_unpacked, privdata); if (RET_WAS_ERROR(r)) return r; return RET_OK; } } static queue_callback diff_got_callback; static retvalue queue_next_diff(struct remote_index *ri) { struct remote_distribution *rd = ri->from; struct remote_repository *rr = rd->repository; int i; retvalue r; for (i = 0 ; i < ri->diffindex->patchcount ; i++) { bool improves; struct diffindex_patch *p = &ri->diffindex->patches[i]; char *patchsuffix, *c; if (p->done || p->frompackages == NULL) continue; if (!checksums_check(ri->oldchecksums, p->frompackages, &improves)) continue; /* p->frompackages should only have sha1 and oldchecksums * should definitly list a sha1 hash */ assert (!improves); p->done = true; free(ri->patchfilename); ri->patchfilename = mprintf("%s.diff-%s", ri->cachefilename, p->name); if (FAILEDTOALLOC(ri->patchfilename)) return RET_ERROR_OOM; c = ri->patchfilename + strlen(ri->cachefilename); while (*c != '\0') { if ((*c < '0' || *c > '9') && (*c < 'A' || *c > 'Z') && (*c < 'a' || *c > 'z') && *c != '.' && *c != '-') *c = '_'; c++; } ri->selectedpatch = p; patchsuffix = mprintf(".diff/%s.gz", p->name); if (FAILEDTOALLOC(patchsuffix)) return RET_ERROR_OOM; /* found a matching patch, tell the downloader we want it */ r = aptmethod_enqueueindex(rr->download, rd->suite_base_dir, ri->filename_in_release, patchsuffix, ri->patchfilename, ".gz", diff_got_callback, ri, p); free(patchsuffix); return r; } /* no patch matches, try next possibility... */ fprintf(stderr, "Error: available '%s' not listed in '%s.diffindex'.\n", ri->cachefilename, ri->cachefilename); return queue_next_encoding(rd, ri); } static retvalue diff_uncompressed(void *privdata, const char *compressed, bool failed) { struct remote_index *ri = privdata; struct remote_distribution *rd = ri->from; const struct diffindex_patch *p = ri->selectedpatch; char *tempfilename; struct rred_patch *rp; FILE *f; int i; retvalue r; bool dummy; if (ri->deletecompressedpatch) (void)unlink(compressed); if (failed) return RET_ERROR; r = checksums_test(ri->patchfilename, p->checksums, NULL); if (r == RET_NOTHING) { fprintf(stderr, "Mysteriously vanished file '%s'!\n", ri->patchfilename); r = RET_ERROR_MISSING; } if (r == RET_ERROR_WRONG_MD5) fprintf(stderr, "Corrupted package diff '%s'!\n", ri->patchfilename); if (RET_WAS_ERROR(r)) return r; r = patch_load(ri->patchfilename, checksums_getfilesize(p->checksums), &rp); ASSERT_NOT_NOTHING(r); if (RET_WAS_ERROR(r)) return r; tempfilename = calc_addsuffix(ri->cachefilename, "tmp"); if (FAILEDTOALLOC(tempfilename)) { patch_free(rp); return RET_ERROR_OOM; } (void)unlink(tempfilename); i = rename(ri->cachefilename, tempfilename); if (i != 0) { int e = errno; fprintf(stderr, "Error %d moving '%s' to '%s': %s\n", e, ri->cachefilename, tempfilename, strerror(e)); free(tempfilename); patch_free(rp); return RET_ERRNO(e); } f = fopen(ri->cachefilename, "w"); if (f == NULL) { int e = errno; fprintf(stderr, "Error %d creating '%s': %s\n", e, ri->cachefilename, strerror(e)); (void)unlink(tempfilename); ri->olduncompressed->deleted = true; ri->olduncompressed = NULL; free(tempfilename); patch_free(rp); return RET_ERRNO(e); } r = patch_file(f, tempfilename, patch_getconstmodifications(rp)); (void)unlink(tempfilename); (void)unlink(ri->patchfilename); free(ri->patchfilename); ri->patchfilename = NULL; free(tempfilename); patch_free(rp); if (RET_WAS_ERROR(r)) { (void)fclose(f); remove_old_uncompressed(ri); // TODO: fall back to downloading at once? return r; } i = ferror(f); if (i != 0) { int e = errno; (void)fclose(f); fprintf(stderr, "Error %d writing to '%s': %s\n", e, ri->cachefilename, strerror(e)); remove_old_uncompressed(ri); return RET_ERRNO(e); } i = fclose(f); if (i != 0) { int e = errno; fprintf(stderr, "Error %d writing to '%s': %s\n", e, ri->cachefilename, strerror(e)); remove_old_uncompressed(ri); return RET_ERRNO(e); } checksums_free(ri->oldchecksums); ri->oldchecksums = NULL; r = checksums_read(ri->cachefilename, &ri->oldchecksums); if (r == RET_NOTHING) { fprintf(stderr, "Myteriously vanished file '%s'!\n", ri->cachefilename); r = RET_ERROR; } if (RET_WAS_ERROR(r)) return r; if (checksums_check(ri->oldchecksums, rd->remotefiles.checksums[ri->ofs[c_none]], &dummy)) { ri->olduncompressed->deleted = true; ri->olduncompressed = NULL; /* we have a winner */ return indexfile_mark_got(rd, ri, ri->oldchecksums); } /* let's see what patch we need next */ return queue_next_diff(ri); } static retvalue diff_got_callback(enum queue_action action, void *privdata, UNUSED(void *privdata2), UNUSED(const char *uri), const char *gotfilename, const char *wantedfilename, UNUSED(/*@null@*/const struct checksums *gotchecksums), UNUSED(const char *methodname)) { struct remote_index *ri = privdata; retvalue r; if (action == qa_error) return queue_next_encoding(ri->from, ri); if (action != qa_got) return RET_ERROR; ri->deletecompressedpatch = strcmp(gotfilename, wantedfilename) == 0; r = uncompress_queue_file(gotfilename, ri->patchfilename, c_gzip, diff_uncompressed, ri); if (RET_WAS_ERROR(r)) (void)unlink(gotfilename); return r; } static retvalue diff_callback(enum queue_action action, void *privdata, UNUSED(void *privdata2), const char *uri, const char *gotfilename, const char *wantedfilename, /*@null@*/const struct checksums *gotchecksums, const char *methodname) { struct remote_index *ri = privdata; struct remote_distribution *rd = ri->from; struct checksums *readchecksums = NULL; int ofs; retvalue r; if (action == qa_error) return queue_next_encoding(rd, ri); if (action != qa_got) return RET_ERROR; r = copytoplace(gotfilename, wantedfilename, methodname, &readchecksums); if (RET_WAS_ERROR(r)) return r; if (readchecksums != NULL) gotchecksums = readchecksums; ofs = ri->diff_ofs; if (ofs >= 0) { const struct checksums *wantedchecksums = rd->remotefiles.checksums[ofs]; bool matches, missing = false; if (gotchecksums == NULL) { matches = true; missing = true; } else matches = checksums_check(gotchecksums, wantedchecksums, &missing); /* if the apt method did not generate all checksums * we want to check, we'll have to do so: */ if (matches && missing) { /* we assume that everything we know how to * extract from a Release file is something * we know how to calculate out of a file */ assert (readchecksums == NULL); r = checksums_read(gotfilename, &readchecksums); if (r == RET_NOTHING) { fprintf(stderr, "Cannot open '%s', though apt-method '%s' claims it is there!\n", gotfilename, methodname); r = RET_ERROR_MISSING; } if (RET_WAS_ERROR(r)) return r; gotchecksums = readchecksums; missing = false; matches = checksums_check(gotchecksums, wantedchecksums, &missing); assert (!missing); } if (!matches) { fprintf(stderr, "Wrong checksum during receive of '%s':\n", uri); checksums_printdifferences(stderr, wantedchecksums, gotchecksums); checksums_free(readchecksums); return RET_ERROR_WRONG_MD5; } } checksums_free(readchecksums); r = diffindex_read(wantedfilename, &ri->diffindex); ASSERT_NOT_NOTHING(r); if (RET_WAS_ERROR(r)) return queue_next_encoding(rd, ri); if (ri->ofs[c_none] >= 0) { bool dummy; if (!checksums_check(rd->remotefiles.checksums[ ri->ofs[c_none]], ri->diffindex->destination, &dummy)) { fprintf(stderr, "'%s' does not match file requested in '%s'. Aborting diff processing...\n", gotfilename, rd->usedreleasefile); /* as this is claimed to be a common error * (outdated .diff/Index file), proceed with * other requested way to retrieve index file */ return queue_next_encoding(rd, ri); } } return queue_next_diff(ri); } reprepro-4.13.1/remoterepository.h0000644000175100017510000000604012152651661014172 00000000000000#ifndef REPREPRO_REMOTEREPOSITORY_H #define REPREPRO_REMOTEREPOSITORY_H #ifndef REPREPRO_ERROR_H #include "error.h" #warning "What's hapening here?" #endif #ifndef REPREPRO_APTMETHOD_H #include "aptmethod.h" #endif #ifndef REPREPRO_DONEFILE_H #include "donefile.h" #endif #ifndef REPREPRO_ATOMS_H #include "atoms.h" #endif struct remote_repository; struct remote_distribution; struct remote_index; /* register repository, strings as stored by reference */ struct remote_repository *remote_repository_prepare(const char * /*name*/, const char * /*method*/, const char * /*fallback*/, const struct strlist * /*config*/); /* register remote distribution of the given repository */ retvalue remote_distribution_prepare(struct remote_repository *, const char * /*suite*/, bool /*ignorerelease*/, bool /*getinrelease*/, const char * /*verifyrelease*/, bool /*flat*/, bool * /*ignorehashes*/, /*@out@*/struct remote_distribution **); void remote_repository_free(/*@only@*/struct remote_repository *); /* create aptmethods for all of yet created repositories */ retvalue remote_startup(struct aptmethodrun *); retvalue remote_preparemetalists(struct aptmethodrun *, bool /*nodownload*/); retvalue remote_preparelists(struct aptmethodrun *, bool /*nodownload*/); struct encoding_preferences { /* number of preferences, 0 means use default */ unsigned short count; /* a list of compressions to use */ struct compression_preference { bool diff; bool force; enum compression compression; } requested[3*c_COUNT]; }; struct remote_index *remote_index(struct remote_distribution *, const char * /*architecture*/, const char * /*component*/, packagetype_t, const struct encoding_preferences *); struct remote_index *remote_flat_index(struct remote_distribution *, packagetype_t, const struct encoding_preferences *); /* returns the name of the prepared uncompressed file */ /*@observer@*/const char *remote_index_file(const struct remote_index *); /*@observer@*/const char *remote_index_basefile(const struct remote_index *); /*@observer@*/struct aptmethod *remote_aptmethod(const struct remote_distribution *); bool remote_index_isnew(const struct remote_index *, struct donefile *); void remote_index_needed(struct remote_index *); void remote_index_markdone(const struct remote_index *, struct markdonefile *); char *genlistsfilename(/*@null@*/const char * /*type*/, unsigned int /*count*/, ...) __attribute__((sentinel)); struct cachedlistfile; retvalue cachedlists_scandir(/*@out@*/struct cachedlistfile **); void cachedlistfile_need_index(struct cachedlistfile *, const char * /*repository*/, const char * /*suite*/, const char * /*architecture*/, const char * /*component*/, packagetype_t); void cachedlistfile_need_flat_index(struct cachedlistfile *, const char * /*repository*/, const char * /*suite*/, packagetype_t); void cachedlistfile_need(struct cachedlistfile *, const char * /*type*/, unsigned int /*count*/, ...) __attribute__((sentinel)); void cachedlistfile_freelist(/*@only@*/struct cachedlistfile *); void cachedlistfile_deleteunneeded(const struct cachedlistfile *); #endif reprepro-4.13.1/needbuild.h0000644000175100017510000000057012152651661012474 00000000000000#ifndef REPREPRO_NEEDBUILD_H #define REPREPRO_NEEDBUILD_H #ifndef REPREPRO_ATOMS_H #include "atoms.h" #endif #ifndef REPREPRO_DATABASE_H #include "database.h" #endif #ifndef REPREPRO_DISTRIBUTION_H #include "distribution.h" #endif retvalue find_needs_build(struct distribution *, architecture_t, const struct atomlist *, /*@null@*/const char *glob, bool printarch); #endif reprepro-4.13.1/uploaderslist.h0000644000175100017510000000165012152651661013433 00000000000000#ifndef REPREPRO_UPLOADERSLIST_H #define REPREPRO_UPLOADERSLIST_H struct upload_conditions; struct uploaders; enum upload_condition_type { uc_REJECTED = 0, uc_ALWAYS, /* uc_COMPONENT, */ uc_ARCHITECTURES, uc_CODENAME, uc_SOURCENAME, uc_SECTIONS, uc_BINARIES, uc_BYHAND }; #define uc_ACCEPTED uc_ALWAYS retvalue uploaders_get(/*@out@*/struct uploaders **list, const char *filename); void uploaders_unlock(/*@only@*//*@null@*/struct uploaders *); struct signatures; retvalue uploaders_permissions(struct uploaders *, const struct signatures *, /*@out@*/struct upload_conditions **); /* uc_FAILED means rejected, uc_ACCEPTED means can go in */ enum upload_condition_type uploaders_nextcondition(struct upload_conditions *); /* true means, give more if more to check, false means enough */ bool uploaders_verifystring(struct upload_conditions *, const char *); bool uploaders_verifyatom(struct upload_conditions *, atom_t); #endif reprepro-4.13.1/debfilecontents.c0000644000175100017510000001145412152651661013707 00000000000000/* This file is part of "reprepro" * Copyright (C) 2006,2007 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include "error.h" #include "uncompression.h" #include "ar.h" #include "filelist.h" #include "debfile.h" #ifndef HAVE_LIBARCHIVE #error Why did this file got compiled? #endif static retvalue read_data_tar(/*@out@*/char **list, /*@out@*/size_t *size, const char *debfile, struct ar_archive *ar, struct archive *tar) { struct archive_entry *entry; struct filelistcompressor c; retvalue r; int a, e; r = filelistcompressor_setup(&c); if (RET_WAS_ERROR(r)) return r; archive_read_support_format_tar(tar); archive_read_support_format_gnutar(tar); a = archive_read_open(tar, ar, ar_archivemember_open, ar_archivemember_read, ar_archivemember_close); if (a != ARCHIVE_OK) { filelistcompressor_cancel(&c); e = archive_errno(tar); if (e == -EINVAL) /* special code to say there is none */ fprintf(stderr, "open data.tar within '%s' failed: %s\n", debfile, archive_error_string(tar)); else fprintf(stderr, "open data.tar within '%s' failed: %d:%d:%s\n", debfile, a, e, archive_error_string(tar)); return RET_ERROR; } while ((a=archive_read_next_header(tar, &entry)) == ARCHIVE_OK) { const char *name = archive_entry_pathname(entry); mode_t mode; if (name[0] == '.') name++; if (name[0] == '/') name++; if (name[0] == '\0') continue; mode = archive_entry_mode(entry); if (!S_ISDIR(mode)) { r = filelistcompressor_add(&c, name, strlen(name)); if (RET_WAS_ERROR(r)) { filelistcompressor_cancel(&c); return r; } } if (interrupted()) { filelistcompressor_cancel(&c); return RET_ERROR_INTERRUPTED; } a = archive_read_data_skip(tar); if (a != ARCHIVE_OK) { e = archive_errno(tar); if (e == -EINVAL) { r = RET_ERROR; fprintf(stderr, "Error skipping %s within data.tar from %s: %s\n", archive_entry_pathname(entry), debfile, archive_error_string(tar)); } else { fprintf(stderr, "Error %d skipping %s within data.tar from %s: %s\n", e, archive_entry_pathname(entry), debfile, archive_error_string(tar)); if (e != 0) r = RET_ERRNO(e); else r = RET_ERROR; } filelistcompressor_cancel(&c); return r; } } if (a != ARCHIVE_EOF) { e = archive_errno(tar); if (e == -EINVAL) { r = RET_ERROR; fprintf(stderr, "Error reading data.tar from %s: %s\n", debfile, archive_error_string(tar)); } else { fprintf(stderr, "Error %d reading data.tar from %s: %s\n", e, debfile, archive_error_string(tar)); if (e != 0) r = RET_ERRNO(e); else r = RET_ERROR; } filelistcompressor_cancel(&c); return r; } return filelistcompressor_finish(&c, list, size); } retvalue getfilelist(/*@out@*/char **filelist, size_t *size, const char *debfile) { struct ar_archive *ar; retvalue r; bool hadcandidate = false; r = ar_open(&ar, debfile); if (RET_WAS_ERROR(r)) return r; assert (r != RET_NOTHING); do { char *filename; enum compression c; r = ar_nextmember(ar, &filename); if (RET_IS_OK(r)) { if (strncmp(filename, "data.tar", 8) != 0) { free(filename); continue; } hadcandidate = true; for (c = 0 ; c < c_COUNT ; c++) { if (strcmp(filename + 8, uncompression_suffix[c]) == 0) break; } if (c >= c_COUNT) { free(filename); continue; } ar_archivemember_setcompression(ar, c); if (uncompression_supported(c)) { struct archive *tar; tar = archive_read_new(); r = read_data_tar(filelist, size, debfile, ar, tar); // TODO: check how to get an error message here.. archive_read_finish(tar); if (r != RET_NOTHING) { ar_close(ar); free(filename); return r; } } free(filename); } } while (RET_IS_OK(r)); ar_close(ar); if (hadcandidate) fprintf(stderr, "Could not find a suitable data.tar file within '%s'!\n", debfile); else fprintf(stderr, "Could not find a data.tar file within '%s'!\n", debfile); return RET_ERROR_MISSING; } reprepro-4.13.1/sourceextraction.c0000644000175100017510000004162312152651661014141 00000000000000/* This file is part of "reprepro" * Copyright (C) 2008 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #ifdef HAVE_LIBARCHIVE #include #include #endif #include "error.h" #include "filecntl.h" #include "chunks.h" #include "uncompression.h" #include "sourceextraction.h" struct sourceextraction { bool failed, completed; int difffile, tarfile, debiantarfile; enum compression diffcompression, tarcompression, debiancompression; /*@null@*/ char **section_p, **priority_p; }; struct sourceextraction *sourceextraction_init(char **section_p, char **priority_p) { struct sourceextraction *n; n = zNEW(struct sourceextraction); if (FAILEDTOALLOC(n)) return n; n->difffile = -1; n->tarfile = -1; n->debiantarfile = -1; n->section_p = section_p; n->priority_p = priority_p; return n; } void sourceextraction_abort(struct sourceextraction *e) { free(e); } /* with must be a string constant, no pointer! */ #define endswith(name, len, with) (len >= sizeof(with) && memcmp(name+(len+1-sizeof(with)), with, sizeof(with)-1) == 0) /* register a file part of this source */ void sourceextraction_setpart(struct sourceextraction *e, int i, const char *basefilename) { size_t bl = strlen(basefilename); enum compression c; if (e->failed) return; c = compression_by_suffix(basefilename, &bl); if (endswith(basefilename, bl, ".dsc")) return; else if (endswith(basefilename, bl, ".diff")) { e->difffile = i; e->diffcompression = c; return; } else if (endswith(basefilename, bl, ".debian.tar")) { e->debiantarfile = i; e->debiancompression = c; return; } else if (endswith(basefilename, bl, ".tar")) { e->tarfile = i; e->tarcompression = c; return; } else { // TODO: errormessage e->failed = true; } } /* return the next needed file */ bool sourceextraction_needs(struct sourceextraction *e, int *ofs_p) { if (e->failed || e->completed) return false; if (e->difffile >= 0) { if (!uncompression_supported(e->diffcompression)) // TODO: errormessage return false; *ofs_p = e->difffile; return true; } else if (e->debiantarfile >= 0) { #ifdef HAVE_LIBARCHIVE if (!uncompression_supported(e->debiancompression)) return false; *ofs_p = e->debiantarfile; return true; #else return false; #endif } else if (e->tarfile >= 0) { #ifdef HAVE_LIBARCHIVE if (!uncompression_supported(e->tarcompression)) return false; *ofs_p = e->tarfile; return true; #else return false; #endif } else return false; } static retvalue parsediff(struct compressedfile *f, /*@null@*/char **section_p, /*@null@*/char **priority_p, bool *found_p) { size_t destlength, lines_in, lines_out; const char *p, *s; char *garbage; #define BUFSIZE 4096 char buffer[BUFSIZE]; int bytes_read, used = 0, filled = 0; auto inline bool u_getline(void); inline bool u_getline(void) { do { if (filled - used > 0) { char *n; p = buffer + used; n = memchr(p, '\n', filled - used); if (n != NULL) { used += 1 + (n - p); *n = '\0'; while (--n >= p && *n == '\r') *n = '\0'; return true; } } else { assert (filled == used); filled = 0; used = 0; } if (filled == BUFSIZE) { if (used == 0) /* overlong line */ return false; memmove(buffer, buffer + used, filled - used); filled -= used; used = 0; } bytes_read = uncompress_read(f, buffer + filled, BUFSIZE - filled); if (bytes_read <= 0) return false; filled += bytes_read; } while (true); } auto inline char u_overlinegetchar(void); inline char u_overlinegetchar(void) { const char *n; char ch; if (filled - used > 0) { ch = buffer[used]; } else { assert (filled == used); used = 0; bytes_read = uncompress_read(f, buffer, BUFSIZE); if (bytes_read <= 0) { filled = 0; return '\0'; } filled = bytes_read; ch = buffer[0]; } if (ch == '\n') return '\0'; /* over rest of the line */ n = memchr(buffer + used, '\n', filled - used); if (n != NULL) { used = 1 + (n - buffer); return ch; } used = 0; filled = 0; /* need to read more to get to the end of the line */ do { /* these lines can be long */ bytes_read = uncompress_read(f, buffer, BUFSIZE); if (bytes_read <= 0) return false; n = memchr(buffer, '\n', bytes_read); } while (n == NULL); used = 1 + (n - buffer); filled = bytes_read; return ch; } /* we are assuming the exact format dpkg-source generates here... */ if (!u_getline()) { /* empty or strange file */ *found_p = false; return RET_OK; } if (memcmp(p, "diff ", 4) == 0) { /* one exception is allowing diff lines, * as diff -ru adds them ... */ if (!u_getline()) { /* strange file */ *found_p = false; return RET_OK; } } if (unlikely(memcmp(p, "--- ", 4) != 0)) return RET_NOTHING; if (!u_getline()) /* so short a file? */ return RET_NOTHING; if (unlikely(memcmp(p, "+++ ", 4) != 0)) return RET_NOTHING; p += 4; s = strchr(p, '/'); if (unlikely(s == NULL)) return RET_NOTHING; s++; /* another exception to allow diff output directly: * +++ lines might have garbage after a tab... */ garbage = strchr(s, '\t'); if (garbage != NULL) *garbage = '\0'; destlength = s - p; /* ignore all files that are not x/debian/control */ while (strcmp(s, "debian/control") != 0) { if (unlikely(interrupted())) return RET_ERROR_INTERRUPTED; if (!u_getline()) return RET_NOTHING; while (memcmp(p, "@@ -", 4) == 0) { if (unlikely(interrupted())) return RET_ERROR_INTERRUPTED; p += 4; while (*p != ',' && *p != ' ') { if (unlikely(*p == '\0')) return RET_NOTHING; p++; } if (*p == ' ') lines_in = 1; else { p++; lines_in = 0; while (*p >= '0' && *p <= '9') { lines_in = 10*lines_in + (*p-'0'); p++; } } while (*p == ' ') p++; if (unlikely(*(p++) != '+')) return RET_NOTHING; while (*p >= '0' && *p <= '9') p++; if (*p == ',') { p++; lines_out = 0; while (*p >= '0' && *p <= '9') { lines_out = 10*lines_out + (*p-'0'); p++; } } else if (*p == ' ') lines_out = 1; else return RET_NOTHING; while (*p == ' ') p++; if (unlikely(*p != '@')) return RET_NOTHING; while (lines_in > 0 || lines_out > 0) { char ch; ch = u_overlinegetchar(); switch (ch) { case '+': if (unlikely(lines_out == 0)) return RET_NOTHING; lines_out--; break; case ' ': if (unlikely(lines_out == 0)) return RET_NOTHING; lines_out--; /* no break */ case '-': if (unlikely(lines_in == 0)) return RET_NOTHING; lines_in--; break; default: return RET_NOTHING; } } if (!u_getline()) { *found_p = false; /* nothing found successfully */ return RET_OK; } } if (memcmp(p, "\\ No newline at end of file", 27) == 0) { if (!u_getline()) { /* nothing found successfully */ *found_p = false; return RET_OK; } } if (memcmp(p, "diff ", 4) == 0) { if (!u_getline()) { /* strange file, but nothing explicitly wrong */ *found_p = false; return RET_OK; } } if (unlikely(memcmp(p, "--- ", 4) != 0)) return RET_NOTHING; if (!u_getline()) return RET_NOTHING; if (unlikely(memcmp(p, "+++ ", 4) != 0)) return RET_NOTHING; p += 4; s = strchr(p, '/'); if (unlikely(s == NULL)) return RET_NOTHING; /* another exception to allow diff output directly: * +++ lines might have garbage after a tab... */ garbage = strchr(s, '\t'); if (garbage != NULL) *garbage = '\0'; /* if it does not always have the same directory, then * we cannot be sure it has no debian/control, so we * have to fail... */ s++; if (s != p + destlength) return RET_NOTHING; } /* found debian/control */ if (!u_getline()) return RET_NOTHING; if (unlikely(memcmp(p, "@@ -", 4) != 0)) return RET_NOTHING; p += 4; p++; while (*p != ',' && *p != ' ') { if (unlikely(*p == '\0')) return RET_NOTHING; p++; } if (*p == ',') { p++; while (*p >= '0' && *p <= '9') p++; } while (*p == ' ') p++; if (unlikely(*(p++) != '+')) return RET_NOTHING; if (*(p++) != '1' || *(p++) != ',') { /* a diff not starting at the first line (or not being * more than one line) is not yet supported */ return RET_NOTHING; } lines_out = 0; while (*p >= '0' && *p <= '9') { lines_out = 10*lines_out + (*p-'0'); p++; } while (*p == ' ') p++; if (unlikely(*p != '@')) return RET_NOTHING; while (lines_out > 0) { if (unlikely(interrupted())) return RET_ERROR_INTERRUPTED; if (!u_getline()) return RET_NOTHING; switch (*(p++)) { case '-': break; default: return RET_NOTHING; case ' ': case '+': if (unlikely(lines_out == 0)) return RET_NOTHING; lines_out--; if (section_p != NULL && strncasecmp(p, "Section:", 8) == 0) { p += 8; while (*p == ' ' || *p == '\t') p++; s = p; while (*s != ' ' && *s != '\t' && *s != '\0' && *s != '\r') s++; if (s == p) return RET_NOTHING; *section_p = strndup(p, s-p); if (FAILEDTOALLOC(*section_p)) return RET_ERROR_OOM; while (*s == ' ' || *s == '\t' || *s == '\r') s++; if (*s != '\0') return RET_NOTHING; continue; } if (priority_p != NULL && strncasecmp(p, "Priority:", 9) == 0) { p += 9; while (*p == ' ' || *p == '\t') p++; s = p; while (*s != ' ' && *s != '\t' && *s != '\0' && *s != '\r') s++; if (s == p) return RET_NOTHING; *priority_p = strndup(p, s-p); if (FAILEDTOALLOC(*priority_p)) return RET_ERROR_OOM; while (*s == ' ' || *s == '\t' || *s == '\r') s++; if (*s != '\0') return RET_NOTHING; continue; } if (*p == '\0') { /* end of control data, we are * finished */ *found_p = true; return RET_OK; } break; } } /* cannot yet handle a .diff not containing the full control */ return RET_NOTHING; } #ifdef HAVE_LIBARCHIVE static retvalue read_source_control_file(struct sourceextraction *e, struct archive *tar, struct archive_entry *entry) { // TODO: implement... size_t size, len, controllen; ssize_t got; char *buffer; const char *aftercontrol; size = archive_entry_size(entry); if (size <= 0) return RET_NOTHING; if (size > 10*1024*1024) return RET_NOTHING; buffer = malloc(size+2); if (FAILEDTOALLOC(buffer)) return RET_ERROR_OOM; len = 0; while ((got = archive_read_data(tar, buffer+len, ((size_t)size+1)-len)) > 0 && !interrupted()) { len += got; if (len > size) { free(buffer); return RET_NOTHING; } } if (unlikely(interrupted())) { free(buffer); return RET_ERROR_INTERRUPTED; } if (got < 0) { free(buffer); return RET_NOTHING; } buffer[len] = '\0'; // TODO: allow a saved .diff for this file applied here controllen = chunk_extract(buffer, buffer, len, true, &aftercontrol); if (controllen == 0) { free(buffer); return RET_NOTHING; } if (e->section_p != NULL) (void)chunk_getvalue(buffer, "Section", e->section_p); if (e->priority_p != NULL) (void)chunk_getvalue(buffer, "Priority", e->priority_p); free(buffer); return RET_OK; } static int compressedfile_open(UNUSED(struct archive *a), UNUSED(void *v)) { return ARCHIVE_OK; } static int compressedfile_close(UNUSED(struct archive *a), UNUSED(void *v)) { return ARCHIVE_OK; } static ssize_t compressedfile_read(UNUSED(struct archive *a), void *d, const void **buffer_p) { struct compressedfile *f = d; // TODO malloc buffer instead static char mybuffer[4096]; *buffer_p = mybuffer; return uncompress_read(f, mybuffer, 4096); } static retvalue parse_tarfile(struct sourceextraction *e, const char *filename, enum compression c, /*@out@*/bool *found_p) { struct archive *tar; struct archive_entry *entry; struct compressedfile *file; int a; retvalue r, r2; /* While an .tar, especially an .orig.tar can be very ugly * (they should be pristine upstream tars, so dpkg-source works around * a lot of ugliness), * we are looking for debian/control. This is unlikely to be in an ugly * upstream tar verbatimly. */ if (!isregularfile(filename)) return RET_NOTHING; tar = archive_read_new(); if (FAILEDTOALLOC(tar)) return RET_ERROR_OOM; archive_read_support_format_tar(tar); archive_read_support_format_gnutar(tar); r = uncompress_open(&file, filename, c); if (!RET_IS_OK(r)) { archive_read_finish(tar); return r; } a = archive_read_open(tar, file, compressedfile_open, compressedfile_read, compressedfile_close); if (a != ARCHIVE_OK) { int err = archive_errno(tar); if (err != -EINVAL && err != 0) fprintf(stderr, "Error %d trying to extract control information from %s:\n" "%s\n", err, filename, archive_error_string(tar)); else fprintf(stderr, "Error trying to extract control information from %s:\n" "%s\n", filename, archive_error_string(tar)); archive_read_finish(tar); uncompress_abort(file); return RET_ERROR; } while ((a=archive_read_next_header(tar, &entry)) == ARCHIVE_OK) { const char *name = archive_entry_pathname(entry); const char *s; bool iscontrol; if (name[0] == '.' && name[1] == '/') name += 2; s = strchr(name, '/'); if (s == NULL) // TODO: is this already enough to give up totally? iscontrol = false; else iscontrol = strcmp(s+1, "debian/control") == 0 || strcmp(name, "debian/control") == 0; if (iscontrol) { r = read_source_control_file(e, tar, entry); archive_read_finish(tar); r2 = uncompress_error(file); RET_UPDATE(r, r2); uncompress_abort(file); *found_p = true; return r; } a = archive_read_data_skip(tar); if (a != ARCHIVE_OK) { int err = archive_errno(tar); printf("Error %d skipping %s within %s: %s\n", err, name, filename, archive_error_string(tar)); archive_read_finish(tar); if (err == 0 || err == -EINVAL) r = RET_ERROR; else r = RET_ERRNO(err); r2 = uncompress_error(file); RET_UPDATE(r, r2); uncompress_abort(file); return r; } if (interrupted()) return RET_ERROR_INTERRUPTED; } if (a != ARCHIVE_EOF) { int err = archive_errno(tar); fprintf(stderr, "Error %d reading %s: %s\n", err, filename, archive_error_string(tar)); archive_read_finish(tar); if (err == 0 || err == -EINVAL) r = RET_ERROR; else r = RET_ERRNO(err); r2 = uncompress_error(file); RET_UPDATE(r, r2); uncompress_abort(file); return r; } archive_read_finish(tar); *found_p = false; return uncompress_close(file); } #endif /* full file name of requested files ready to analyse */ retvalue sourceextraction_analyse(struct sourceextraction *e, const char *fullfilename) { retvalue r; bool found; #ifndef HAVE_LIBARCHIVE assert (e->difffile >= 0); #endif if (e->difffile >= 0) { struct compressedfile *f; assert (uncompression_supported(e->diffcompression)); e->difffile = -1; r = uncompress_open(&f, fullfilename, e->diffcompression); if (!RET_IS_OK(r)) { e->failed = true; /* being unable to read a file is no hard error... */ return RET_NOTHING; } r = parsediff(f, e->section_p, e->priority_p, &found); if (RET_IS_OK(r)) { if (!found) r = uncompress_close(f); else { r = uncompress_error(f); uncompress_abort(f); } } else { uncompress_abort(f); } if (!RET_IS_OK(r)) e->failed = true; else if (found) /* do not look in the tar, we found debian/control */ e->completed = true; return r; } #ifdef HAVE_LIBARCHIVE if (e->debiantarfile >= 0) { e->debiantarfile = -1; r = parse_tarfile(e, fullfilename, e->debiancompression, &found); if (!RET_IS_OK(r)) e->failed = true; else if (found) /* do not look in the tar, we found debian/control */ e->completed = true; return r; } #endif /* if it's not the diff nor the .debian.tar, look into the .tar file: */ assert (e->tarfile >= 0); e->tarfile = -1; #ifdef HAVE_LIBARCHIVE r = parse_tarfile(e, fullfilename, e->tarcompression, &found); if (!RET_IS_OK(r)) e->failed = true; else if (found) /* do not look in the tar, we found debian/control */ e->completed = true; return r; #else return RET_NOTHING; #endif } retvalue sourceextraction_finish(struct sourceextraction *e) { if (e->completed) { free(e); return RET_OK; } free(e); return RET_NOTHING; } reprepro-4.13.1/outhook.h0000644000175100017510000000050112152651661012223 00000000000000#ifndef REPREPRO_OUTHOOK_H #define REPREPRO_OUTHOOK_H #ifndef REPREPRO_ATOMS_H #include "atoms.h" #endif retvalue outhook_start(void); void outhook_send(const char *, const char *, const char *, const char *); void outhook_sendpool(component_t, const char *, const char *); retvalue outhook_call(const char *); #endif reprepro-4.13.1/database.c0000644000175100017510000016607612152651661012316 00000000000000/* This file is part of "reprepro" * Copyright (C) 2007,2008 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "globals.h" #include "error.h" #include "ignore.h" #include "strlist.h" #include "names.h" #include "database.h" #include "dirs.h" #include "filecntl.h" #include "files.h" #include "filelist.h" #include "reference.h" #include "tracking.h" #include "dpkgversions.h" #include "distribution.h" #include "database_p.h" #define STRINGIFY(x) #x #define TOSTRING(x) STRINGIFY(x) #define LIBDB_VERSION_STRING "bdb" TOSTRING(DB_VERSION_MAJOR) "." TOSTRING(DB_VERSION_MINOR) "." TOSTRING(DB_VERSION_PATCH) #define CLEARDBT(dbt) { memset(&dbt, 0, sizeof(dbt)); } #define SETDBT(dbt, datastr) {const char *my = datastr; memset(&dbt, 0, sizeof(dbt)); dbt.data = (void *)my; dbt.size = strlen(my) + 1;} #define SETDBTl(dbt, datastr, datasize) {const char *my = datastr; memset(&dbt, 0, sizeof(dbt)); dbt.data = (void *)my; dbt.size = datasize;} static bool rdb_initialized, rdb_used, rdb_locked, rdb_verbose; static int rdb_dircreationdepth; static bool rdb_nopackages, rdb_readonly; static bool rdb_packagesdatabaseopen; static bool rdb_trackingdatabaseopen; static /*@null@*/ char *rdb_version, *rdb_lastsupportedversion, *rdb_dbversion, *rdb_lastsupporteddbversion; struct table *rdb_checksums, *rdb_contents; struct table *rdb_references; static struct { bool createnewtables; } rdb_capabilities; static void database_free(void) { if (!rdb_initialized) return; free(rdb_version); rdb_version = NULL; free(rdb_lastsupportedversion); rdb_lastsupportedversion = NULL; free(rdb_dbversion); rdb_dbversion = NULL; free(rdb_lastsupporteddbversion); rdb_lastsupporteddbversion = NULL; rdb_initialized = false; } static inline char *dbfilename(const char *filename) { return calc_dirconcat(global.dbdir, filename); } /**********************/ /* lock file handling */ /**********************/ static retvalue database_lock(size_t waitforlock) { char *lockfile; int fd; retvalue r; size_t tries = 0; assert (!rdb_locked); rdb_dircreationdepth = 0; r = dir_create_needed(global.dbdir, &rdb_dircreationdepth); if (RET_WAS_ERROR(r)) return r; lockfile = dbfilename("lockfile"); if (FAILEDTOALLOC(lockfile)) return RET_ERROR_OOM; fd = open(lockfile, O_WRONLY|O_CREAT|O_EXCL|O_NOFOLLOW|O_NOCTTY, S_IRUSR|S_IWUSR); while (fd < 0) { int e = errno; if (e == EEXIST) { if (tries < waitforlock && ! interrupted()) { unsigned int timetosleep = 10; if (verbose >= 0) printf( "Could not aquire lock: %s already exists!\nWaiting 10 seconds before trying again.\n", lockfile); while (timetosleep > 0) timetosleep = sleep(timetosleep); tries++; fd = open(lockfile, O_WRONLY|O_CREAT|O_EXCL |O_NOFOLLOW|O_NOCTTY, S_IRUSR|S_IWUSR); continue; } fprintf(stderr, "The lock file '%s' already exists. There might be another instance with the\n" "same database dir running. To avoid locking overhead, only one process\n" "can access the database at the same time. Do not delete the lock file unless\n" "you are sure no other version is still running!\n", lockfile); } else fprintf(stderr, "Error %d creating lock file '%s': %s!\n", e, lockfile, strerror(e)); free(lockfile); return RET_ERRNO(e); } // TODO: do some more locking of this file to avoid problems // with the non-atomity of O_EXCL with nfs-filesystems... if (close(fd) != 0) { int e = errno; fprintf(stderr, "(Late) Error %d creating lock file '%s': %s!\n", e, lockfile, strerror(e)); (void)unlink(lockfile); free(lockfile); return RET_ERRNO(e); } free(lockfile); rdb_locked = true; return RET_OK; } static void releaselock(void) { char *lockfile; assert (rdb_locked); lockfile = dbfilename("lockfile"); if (lockfile == NULL) return; if (unlink(lockfile) != 0) { int e = errno; fprintf(stderr, "Error %d deleting lock file '%s': %s!\n", e, lockfile, strerror(e)); (void)unlink(lockfile); } free(lockfile); dir_remove_new(global.dbdir, rdb_dircreationdepth); rdb_locked = false; } static retvalue writeversionfile(void); retvalue database_close(void) { retvalue result = RET_OK, r; if (rdb_references != NULL) { r = table_close(rdb_references); RET_UPDATE(result, r); rdb_references = NULL; } if (rdb_checksums != NULL) { r = table_close(rdb_checksums); RET_UPDATE(result, r); rdb_checksums = NULL; } if (rdb_contents != NULL) { r = table_close(rdb_contents); RET_UPDATE(result, r); rdb_contents = NULL; } r = writeversionfile(); RET_UPDATE(result, r); if (rdb_locked) releaselock(); database_free(); return result; } static retvalue database_hasdatabasefile(const char *filename, /*@out@*/bool *exists_p) { char *fullfilename; fullfilename = dbfilename(filename); if (FAILEDTOALLOC(fullfilename)) return RET_ERROR_OOM; *exists_p = isregularfile(fullfilename); free(fullfilename); return RET_OK; } enum database_type { dbt_QUERY, dbt_BTREE, dbt_BTREEDUP, dbt_BTREEPAIRS, dbt_HASH, dbt_COUNT /* must be last */ }; static const uint32_t types[dbt_COUNT] = { DB_UNKNOWN, DB_BTREE, DB_BTREE, DB_BTREE, DB_HASH }; static int paireddatacompare(UNUSED(DB *db), const DBT *a, const DBT *b); static retvalue database_opentable(const char *filename, /*@null@*/const char *subtable, enum database_type type, uint32_t flags, /*@out@*/DB **result) { char *fullfilename; DB *table; int dbret; fullfilename = dbfilename(filename); if (FAILEDTOALLOC(fullfilename)) return RET_ERROR_OOM; dbret = db_create(&table, NULL, 0); if (dbret != 0) { fprintf(stderr, "db_create: %s\n", db_strerror(dbret)); free(fullfilename); return RET_DBERR(dbret); } if (type == dbt_BTREEDUP || type == dbt_BTREEPAIRS) { dbret = table->set_flags(table, DB_DUPSORT); if (dbret != 0) { table->err(table, dbret, "db_set_flags(DB_DUPSORT):"); (void)table->close(table, 0); free(fullfilename); return RET_DBERR(dbret); } } if (type == dbt_BTREEPAIRS) { dbret = table->set_dup_compare(table, paireddatacompare); if (dbret != 0) { table->err(table, dbret, "db_set_dup_compare:"); (void)table->close(table, 0); free(fullfilename); return RET_DBERR(dbret); } } #if DB_VERSION_MAJOR == 5 #define DB_OPEN(database, filename, name, type, flags) \ database->open(database, NULL, filename, name, type, flags, 0664) #else #if DB_VERSION_MAJOR == 4 #define DB_OPEN(database, filename, name, type, flags) \ database->open(database, NULL, filename, name, type, flags, 0664) #else #if DB_VERSION_MAJOR == 3 #define DB_OPEN(database, filename, name, type, flags) \ database->open(database, filename, name, type, flags, 0664) #else #error Unexpected DB_VERSION_MAJOR! #endif #endif #endif dbret = DB_OPEN(table, fullfilename, subtable, types[type], flags); if (dbret == ENOENT && !ISSET(flags, DB_CREATE)) { (void)table->close(table, 0); free(fullfilename); return RET_NOTHING; } if (dbret != 0) { if (subtable != NULL) table->err(table, dbret, "db_open(%s:%s)[%d]", fullfilename, subtable, dbret); else table->err(table, dbret, "db_open(%s)[%d]", fullfilename, dbret); (void)table->close(table, 0); free(fullfilename); return RET_DBERR(dbret); } free(fullfilename); *result = table; return RET_OK; } retvalue database_listsubtables(const char *filename, struct strlist *result) { DB *table; DBC *cursor; DBT key, data; int dbret; retvalue ret, r; struct strlist ids; r = database_opentable(filename, NULL, dbt_QUERY, DB_RDONLY, &table); if (!RET_IS_OK(r)) return r; cursor = NULL; if ((dbret = table->cursor(table, NULL, &cursor, 0)) != 0) { table->err(table, dbret, "cursor(%s):", filename); (void)table->close(table, 0); return RET_ERROR; } CLEARDBT(key); CLEARDBT(data); strlist_init(&ids); ret = RET_NOTHING; while ((dbret=cursor->c_get(cursor, &key, &data, DB_NEXT)) == 0) { char *identifier = strndup(key.data, key.size); if (FAILEDTOALLOC(identifier)) { (void)table->close(table, 0); strlist_done(&ids); return RET_ERROR_OOM; } r = strlist_add(&ids, identifier); if (RET_WAS_ERROR(r)) { (void)table->close(table, 0); strlist_done(&ids); return r; } ret = RET_OK; CLEARDBT(key); CLEARDBT(data); } if (dbret != 0 && dbret != DB_NOTFOUND) { table->err(table, dbret, "c_get(%s):", filename); (void)table->close(table, 0); strlist_done(&ids); return RET_DBERR(dbret); } if ((dbret = cursor->c_close(cursor)) != 0) { table->err(table, dbret, "c_close(%s):", filename); (void)table->close(table, 0); strlist_done(&ids); return RET_DBERR(dbret); } dbret = table->close(table, 0); if (dbret != 0) { table->err(table, dbret, "close(%s):", filename); strlist_done(&ids); return RET_DBERR(dbret); } else { strlist_move(result, &ids); return ret; } } retvalue database_dropsubtable(const char *table, const char *subtable) { char *filename; DB *db; int dbret; filename = dbfilename(table); if (FAILEDTOALLOC(filename)) return RET_ERROR_OOM; if ((dbret = db_create(&db, NULL, 0)) != 0) { fprintf(stderr, "db_create: %s %s\n", filename, db_strerror(dbret)); free(filename); return RET_DBERR(dbret); } dbret = db->remove(db, filename, subtable, 0); if (dbret == ENOENT) { free(filename); return RET_NOTHING; } if (dbret != 0) { fprintf(stderr, "Error removing '%s' from %s!\n", subtable, filename); free(filename); return RET_DBERR(dbret); } free(filename); return RET_OK; } static inline bool targetisdefined(const char *identifier, struct distribution *distributions) { struct distribution *d; struct target *t; for (d = distributions ; d != NULL ; d = d->next) { for (t = d->targets; t != NULL ; t = t->next) { if (strcmp(t->identifier, identifier) == 0) { t->existed = true; return true; } } } return false; } static retvalue warnidentifers(const struct strlist *identifiers, struct distribution *distributions, bool readonly) { struct distribution *d; struct target *t; const char *identifier; retvalue r; int i; for (i = 0; i < identifiers->count ; i++) { identifier = identifiers->values[i]; if (targetisdefined(identifier, distributions)) continue; fprintf(stderr, "Error: packages database contains unused '%s' database.\n", identifier); if (ignored[IGN_undefinedtarget] == 0) { (void)fputs( "This usually means you removed some component, architecture or even\n" "a whole distribution from conf/distributions.\n" "In that case you most likely want to call reprepro clearvanished to get rid\n" "of the databases belonging to those removed parts.\n" "(Another reason to get this error is using conf/ and db/ directories\n" " belonging to different reprepro repositories).\n", stderr); } if (IGNORABLE(undefinedtarget)) { (void)fputs( "Ignoring as --ignore=undefinedtarget given.\n", stderr); ignored[IGN_undefinedtarget]++; continue; } (void)fputs( "To ignore use --ignore=undefinedtarget.\n", stderr); return RET_ERROR; } if (readonly) return RET_OK; for (d = distributions ; d != NULL ; d = d->next) { bool architecture_existed[d->architectures.count]; bool have_old = false; /* check for new architectures */ memset(architecture_existed, 0, sizeof(architecture_existed)); for (t = d->targets; t != NULL ; t = t->next) { int o; if (!t->existed) continue; o = atomlist_ofs(&d->architectures, t->architecture); assert (o >= 0); if (o >= 0) { architecture_existed[o] = true; /* only warn about new ones if there * is at least one old one, otherwise * it's just a new distribution */ have_old = true; } } for (i = 0 ; have_old && i < d->architectures.count ; i++) { architecture_t a; if (architecture_existed[i]) continue; a = d->architectures.atoms[i]; fprintf(stderr, "New architecture '%s' in '%s'. Perhaps you want to call\n" "reprepro flood '%s' '%s'\n" "to populate it with architecture 'all' packages from other architectures.\n", atoms_architectures[a], d->codename, d->codename, atoms_architectures[a]); } /* create databases, so we know next time what is new */ for (t = d->targets; t != NULL ; t = t->next) { if (t->existed) continue; /* create database now, to test it can be created * early, and to know when new architectures * arrive in the future. */ r = target_initpackagesdb(t, READWRITE); if (RET_WAS_ERROR(r)) return r; r = target_closepackagesdb(t); if (RET_WAS_ERROR(r)) return r; } } return RET_OK; } static retvalue warnunusedtracking(const struct strlist *codenames, const struct distribution *distributions) { const char *codename; const struct distribution *d; int i; for (i = 0; i < codenames->count ; i++) { codename = codenames->values[i]; d = distributions; while (d != NULL && strcmp(d->codename, codename) != 0) d = d->next; if (d != NULL && d->tracking != dt_NONE) continue; fprintf(stderr, "Error: tracking database contains unused '%s' database.\n", codename); if (ignored[IGN_undefinedtracking] == 0) { if (d == NULL) (void)fputs( "This either means you removed a distribution from the distributions config\n" "file without calling clearvanished (or at least removealltracks), you\n" "experienced a bug in retrack in versions < 3.0.0, you found a new bug or your\n" "config does not belong to this database.\n", stderr); else (void)fputs( "This either means you removed the Tracking: options from this distribution without\n" "calling removealltracks for it, or your config does not belong to this database.\n", stderr); } if (IGNORABLE(undefinedtracking)) { (void)fputs( "Ignoring as --ignore=undefinedtracking given.\n", stderr); ignored[IGN_undefinedtracking]++; continue; } (void)fputs("To ignore use --ignore=undefinedtracking.\n", stderr); return RET_ERROR; } return RET_OK; } static retvalue readline(/*@out@*/char **result, FILE *f, const char *versionfilename) { char buffer[21]; size_t l; if (fgets(buffer, 20, f) == NULL) { int e = errno; if (e == 0) { fprintf(stderr, "Error reading '%s': unexpected empty file\n", versionfilename); return RET_ERROR; } else { fprintf(stderr, "Error reading '%s': %s(errno is %d)\n", versionfilename, strerror(e), e); return RET_ERRNO(e); } } l = strlen(buffer); while (l > 0 && (buffer[l-1] == '\r' || buffer[l-1] == '\n')) { buffer[--l] = '\0'; } if (l == 0) { fprintf(stderr, "Error reading '%s': unexpcted empty line.\n", versionfilename); return RET_ERROR; } *result = strdup(buffer); if (FAILEDTOALLOC(*result)) return RET_ERROR_OOM; return RET_OK; } static retvalue readversionfile(bool nopackagesyet) { char *versionfilename; FILE *f; retvalue r; int c; versionfilename = dbfilename("version"); if (FAILEDTOALLOC(versionfilename)) return RET_ERROR_OOM; f = fopen(versionfilename, "r"); if (f == NULL) { int e = errno; if (e != ENOENT) { fprintf(stderr, "Error opening '%s': %s(errno is %d)\n", versionfilename, strerror(e), e); free(versionfilename); return RET_ERRNO(e); } free(versionfilename); if (nopackagesyet) { /* set to default for new packages.db files: */ rdb_version = strdup(VERSION); if (FAILEDTOALLOC(rdb_version)) return RET_ERROR_OOM; rdb_capabilities.createnewtables = true; } else rdb_version = NULL; rdb_lastsupportedversion = NULL; rdb_dbversion = NULL; rdb_lastsupporteddbversion = NULL; return RET_NOTHING; } /* first line is the version creating this database */ r = readline(&rdb_version, f, versionfilename); if (RET_WAS_ERROR(r)) { (void)fclose(f); free(versionfilename); return r; } /* second line says which versions of reprepro will be able to cope * with this database */ r = readline(&rdb_lastsupportedversion, f, versionfilename); if (RET_WAS_ERROR(r)) { (void)fclose(f); free(versionfilename); return r; } /* next line is the version of the underlying database library */ r = readline(&rdb_dbversion, f, versionfilename); if (RET_WAS_ERROR(r)) { (void)fclose(f); free(versionfilename); return r; } /* and then the minimum version of this library needed. */ r = readline(&rdb_lastsupporteddbversion, f, versionfilename); if (RET_WAS_ERROR(r)) { (void)fclose(f); free(versionfilename); return r; } (void)fclose(f); free(versionfilename); /* check for enabled capabilities in the version */ r = dpkgversions_cmp(rdb_version, "3", &c); if (RET_WAS_ERROR(r)) return r; if (c >= 0) rdb_capabilities.createnewtables = true; /* ensure we can understand it */ r = dpkgversions_cmp(VERSION, rdb_lastsupportedversion, &c); if (RET_WAS_ERROR(r)) return r; if (c < 0) { fprintf(stderr, "According to %s/version this database was created with a future version\n" "and uses features this version cannot understand. Aborting...\n", global.dbdir); return RET_ERROR; } /* ensure it's a libdb database: */ if (strncmp(rdb_dbversion, "bdb", 3) != 0) { fprintf(stderr, "According to %s/version this database was created with a yet unsupported\n" "database library. Aborting...\n", global.dbdir); return RET_ERROR; } if (strncmp(rdb_lastsupporteddbversion, "bdb", 3) != 0) { fprintf(stderr, "According to %s/version this database was created with a yet unsupported\n" "database library. Aborting...\n", global.dbdir); return RET_ERROR; } r = dpkgversions_cmp(LIBDB_VERSION_STRING, rdb_lastsupporteddbversion, &c); if (RET_WAS_ERROR(r)) return r; if (c < 0) { fprintf(stderr, "According to %s/version this database was created with a future version\n" "%s of libdb. The libdb version this binary is linked against cannot yet\n" "handle this format. Aborting...\n", global.dbdir, rdb_dbversion + 3); return RET_ERROR; } return RET_OK; } static retvalue writeversionfile(void) { char *versionfilename, *finalversionfilename; FILE *f; int i, e; versionfilename = dbfilename("version.new"); if (FAILEDTOALLOC(versionfilename)) return RET_ERROR_OOM; f = fopen(versionfilename, "w"); if (f == NULL) { e = errno; fprintf(stderr, "Error creating '%s': %s(errno is %d)\n", versionfilename, strerror(e), e); free(versionfilename); return RET_ERRNO(e); } if (rdb_version == NULL) (void)fputs("0\n", f); else { (void)fputs(rdb_version, f); (void)fputc('\n', f); } if (rdb_lastsupportedversion == NULL) { (void)fputs("3.3.0\n", f); } else { int c; retvalue r; r = dpkgversions_cmp(rdb_lastsupportedversion, "3.3.0", &c); if (!RET_IS_OK(r) || c < 0) (void)fputs("3.3.0\n", f); else { (void)fputs(rdb_lastsupportedversion, f); (void)fputc('\n', f); } } if (rdb_dbversion == NULL) fprintf(f, "bdb%d.%d.%d\n", DB_VERSION_MAJOR, DB_VERSION_MINOR, DB_VERSION_PATCH); else { (void)fputs(rdb_dbversion, f); (void)fputc('\n', f); } if (rdb_lastsupporteddbversion == NULL) fprintf(f, "bdb%d.%d.0\n", DB_VERSION_MAJOR, DB_VERSION_MINOR); else { (void)fputs(rdb_lastsupporteddbversion, f); (void)fputc('\n', f); } e = ferror(f); if (e != 0) { fprintf(stderr, "Error writing '%s': %s(errno is %d)\n", versionfilename, strerror(e), e); (void)fclose(f); unlink(versionfilename); free(versionfilename); return RET_ERRNO(e); } if (fclose(f) != 0) { e = errno; fprintf(stderr, "Error writing '%s': %s(errno is %d)\n", versionfilename, strerror(e), e); unlink(versionfilename); free(versionfilename); return RET_ERRNO(e); } finalversionfilename = dbfilename("version"); if (FAILEDTOALLOC(finalversionfilename)) { unlink(versionfilename); free(versionfilename); return RET_ERROR_OOM; } i = rename(versionfilename, finalversionfilename); if (i != 0) { e = errno; fprintf(stderr, "Error %d moving '%s' to '%s': %s\n", e, versionfilename, finalversionfilename, strerror(e)); (void)unlink(versionfilename); free(versionfilename); free(finalversionfilename); return RET_ERRNO(e); } free(finalversionfilename); free(versionfilename); return RET_OK; } static retvalue createnewdatabase(struct distribution *distributions) { struct distribution *d; struct target *t; retvalue result = RET_NOTHING, r; for (d = distributions ; d != NULL ; d = d->next) { for (t = d->targets ; t != NULL ; t = t->next) { r = target_initpackagesdb(t, READWRITE); RET_UPDATE(result, r); if (RET_IS_OK(r)) { r = target_closepackagesdb(t); RET_UPDATE(result, r); } } } r = writeversionfile(); RET_UPDATE(result, r); return result; } /* Initialize a database. * - if not fast, make all kind of checks for consistency (TO BE IMPLEMENTED), * - if readonly, do not create but return with RET_NOTHING * - lock database, waiting a given amount of time if already locked */ retvalue database_create(struct distribution *alldistributions, bool fast, bool nopackages, bool allowunused, bool readonly, size_t waitforlock, bool verbosedb) { retvalue r; bool packagesfileexists, trackingfileexists, nopackagesyet; if (rdb_initialized || rdb_used) { fputs("Internal Error: database initialized a 2nd time!\n", stderr); return RET_ERROR_INTERNAL; } if (readonly && !isdir(global.dbdir)) { if (verbose >= 0) fprintf(stderr, "Exiting without doing anything, as there is no database yet that could result in other actions.\n"); return RET_NOTHING; } rdb_initialized = true; rdb_used = true; r = database_lock(waitforlock); assert (r != RET_NOTHING); if (!RET_IS_OK(r)) { database_free(); return r; } rdb_readonly = readonly; rdb_verbose = verbosedb; r = database_hasdatabasefile("packages.db", &packagesfileexists); if (RET_WAS_ERROR(r)) { releaselock(); database_free(); return r; } r = database_hasdatabasefile("tracking.db", &trackingfileexists); if (RET_WAS_ERROR(r)) { releaselock(); database_free(); return r; } nopackagesyet = !packagesfileexists && !trackingfileexists; r = readversionfile(nopackagesyet); if (RET_WAS_ERROR(r)) { releaselock(); database_free(); return r; } if (nopackages) { rdb_nopackages = true; return RET_OK; } if (nopackagesyet) { // TODO: handle readonly, but only once packages files may no // longer be generated when it is active... r = createnewdatabase(alldistributions); if (RET_WAS_ERROR(r)) { database_close(); return r; } } /* after this point we should call database_close, * as other stuff was handled, * so writing the version file cannot harm (and not doing so could) */ if (!allowunused && !fast && packagesfileexists) { struct strlist identifiers; r = database_listpackages(&identifiers); if (RET_WAS_ERROR(r)) { database_close(); return r; } if (r == RET_NOTHING) strlist_init(&identifiers); r = warnidentifers(&identifiers, alldistributions, readonly); if (RET_WAS_ERROR(r)) { strlist_done(&identifiers); database_close(); return r; } strlist_done(&identifiers); } if (!allowunused && !fast && trackingfileexists) { struct strlist codenames; r = tracking_listdistributions(&codenames); if (RET_WAS_ERROR(r)) { database_close(); return r; } if (RET_IS_OK(r)) { r = warnunusedtracking(&codenames, alldistributions); if (RET_WAS_ERROR(r)) { strlist_done(&codenames); database_close(); return r; } strlist_done(&codenames); } } return RET_OK; } /**************************************************************************** * Stuff string parts * ****************************************************************************/ static const char databaseerror[] = "Internal error of the underlying BerkeleyDB database:\n"; /**************************************************************************** * Stuff to handle data in tables * **************************************************************************** There is nothing that connot be solved by another layer of indirection, except too many levels of indirection. (Source forgotten) */ struct table { char *name, *subname; DB *berkeleydb; bool *flagreset; bool readonly, verbose; }; static void table_printerror(struct table *table, int dbret, const char *action) { if (table->subname != NULL) table->berkeleydb->err(table->berkeleydb, dbret, "%sWithin %s subtable %s at %s", databaseerror, table->name, table->subname, action); else table->berkeleydb->err(table->berkeleydb, dbret, "%sWithin %s at %s", databaseerror, table->name, action); } retvalue table_close(struct table *table) { int dbret; retvalue result; if (table == NULL) return RET_NOTHING; if (table->flagreset != NULL) *table->flagreset = false; if (table->berkeleydb == NULL) { assert (table->readonly); dbret = 0; } else dbret = table->berkeleydb->close(table->berkeleydb, 0); if (dbret != 0) { fprintf(stderr, "db_close(%s, %s): %s\n", table->name, table->subname, db_strerror(dbret)); result = RET_DBERR(dbret); } else result = RET_OK; free(table->name); free(table->subname); free(table); return result; } retvalue table_getrecord(struct table *table, const char *key, char **data_p) { int dbret; DBT Key, Data; assert (table != NULL); if (table->berkeleydb == NULL) { assert (table->readonly); return RET_NOTHING; } SETDBT(Key, key); CLEARDBT(Data); Data.flags = DB_DBT_MALLOC; dbret = table->berkeleydb->get(table->berkeleydb, NULL, &Key, &Data, 0); // TODO: find out what error code means out of memory... if (dbret == DB_NOTFOUND) return RET_NOTHING; if (dbret != 0) { table_printerror(table, dbret, "get"); return RET_DBERR(dbret); } if (FAILEDTOALLOC(Data.data)) return RET_ERROR_OOM; if (Data.size <= 0 || ((const char*)Data.data)[Data.size-1] != '\0') { if (table->subname != NULL) fprintf(stderr, "Database %s(%s) returned corrupted (not null-terminated) data!", table->name, table->subname); else fprintf(stderr, "Database %s returned corrupted (not null-terminated) data!", table->name); free(Data.data); return RET_ERROR; } *data_p = Data.data; return RET_OK; } retvalue table_getpair(struct table *table, const char *key, const char *value, /*@out@*/const char **data_p, /*@out@*/size_t *datalen_p) { int dbret; DBT Key, Data; size_t valuelen = strlen(value); assert (table != NULL); if (table->berkeleydb == NULL) { assert (table->readonly); return RET_NOTHING; } SETDBT(Key, key); SETDBTl(Data, value, valuelen + 1); dbret = table->berkeleydb->get(table->berkeleydb, NULL, &Key, &Data, DB_GET_BOTH); if (dbret == DB_NOTFOUND || dbret == DB_KEYEMPTY) return RET_NOTHING; if (dbret != 0) { table_printerror(table, dbret, "get(BOTH)"); return RET_DBERR(dbret); } if (FAILEDTOALLOC(Data.data)) return RET_ERROR_OOM; if (Data.size < valuelen + 2 || ((const char*)Data.data)[Data.size-1] != '\0') { if (table->subname != NULL) fprintf(stderr, "Database %s(%s) returned corrupted (not paired) data!", table->name, table->subname); else fprintf(stderr, "Database %s returned corrupted (not paired) data!", table->name); return RET_ERROR; } *data_p = ((const char*)Data.data) + valuelen + 1; *datalen_p = Data.size - valuelen - 2; return RET_OK; } retvalue table_gettemprecord(struct table *table, const char *key, const char **data_p, size_t *datalen_p) { int dbret; DBT Key, Data; assert (table != NULL); if (table->berkeleydb == NULL) { assert (table->readonly); return RET_NOTHING; } SETDBT(Key, key); CLEARDBT(Data); dbret = table->berkeleydb->get(table->berkeleydb, NULL, &Key, &Data, 0); // TODO: find out what error code means out of memory... if (dbret == DB_NOTFOUND) return RET_NOTHING; if (dbret != 0) { table_printerror(table, dbret, "get"); return RET_DBERR(dbret); } if (FAILEDTOALLOC(Data.data)) return RET_ERROR_OOM; if (data_p == NULL) { assert (datalen_p == NULL); return RET_OK; } if (Data.size <= 0 || ((const char*)Data.data)[Data.size-1] != '\0') { if (table->subname != NULL) fprintf(stderr, "Database %s(%s) returned corrupted (not null-terminated) data!", table->name, table->subname); else fprintf(stderr, "Database %s returned corrupted (not null-terminated) data!", table->name); return RET_ERROR; } *data_p = Data.data; if (datalen_p != NULL) *datalen_p = Data.size - 1; return RET_OK; } retvalue table_checkrecord(struct table *table, const char *key, const char *data) { int dbret; DBT Key, Data; DBC *cursor; retvalue r; SETDBT(Key, key); SETDBT(Data, data); dbret = table->berkeleydb->cursor(table->berkeleydb, NULL, &cursor, 0); if (dbret != 0) { table_printerror(table, dbret, "cursor"); return RET_DBERR(dbret); } dbret=cursor->c_get(cursor, &Key, &Data, DB_GET_BOTH); if (dbret == 0) { r = RET_OK; } else if (dbret == DB_NOTFOUND) { r = RET_NOTHING; } else { table_printerror(table, dbret, "c_get"); (void)cursor->c_close(cursor); return RET_DBERR(dbret); } dbret = cursor->c_close(cursor); if (dbret != 0) { table_printerror(table, dbret, "c_close"); return RET_DBERR(dbret); } return r; } retvalue table_removerecord(struct table *table, const char *key, const char *data) { int dbret; DBT Key, Data; DBC *cursor; retvalue r; SETDBT(Key, key); SETDBT(Data, data); dbret = table->berkeleydb->cursor(table->berkeleydb, NULL, &cursor, 0); if (dbret != 0) { table_printerror(table, dbret, "cursor"); return RET_DBERR(dbret); } dbret=cursor->c_get(cursor, &Key, &Data, DB_GET_BOTH); if (dbret == 0) dbret = cursor->c_del(cursor, 0); if (dbret == 0) { r = RET_OK; } else if (dbret == DB_NOTFOUND) { r = RET_NOTHING; } else { table_printerror(table, dbret, "c_get"); (void)cursor->c_close(cursor); return RET_DBERR(dbret); } dbret = cursor->c_close(cursor); if (dbret != 0) { table_printerror(table, dbret, "c_close"); return RET_DBERR(dbret); } return r; } bool table_recordexists(struct table *table, const char *key) { retvalue r; r = table_gettemprecord(table, key, NULL, NULL); return RET_IS_OK(r); } retvalue table_addrecord(struct table *table, const char *key, const char *data, size_t datalen, bool ignoredups) { int dbret; DBT Key, Data; assert (table != NULL); assert (!table->readonly && table->berkeleydb != NULL); SETDBT(Key, key); SETDBTl(Data, data, datalen + 1); dbret = table->berkeleydb->put(table->berkeleydb, NULL, &Key, &Data, DB_NODUPDATA); if (dbret != 0 && !(ignoredups && dbret == DB_KEYEXIST)) { table_printerror(table, dbret, "put"); return RET_DBERR(dbret); } if (table->verbose) { if (table->subname != NULL) printf("db: '%s' added to %s(%s).\n", key, table->name, table->subname); else printf("db: '%s' added to %s.\n", key, table->name); } return RET_OK; } retvalue table_adduniqsizedrecord(struct table *table, const char *key, const char *data, size_t data_size, bool allowoverwrite, bool nooverwrite) { int dbret; DBT Key, Data; assert (table != NULL); assert (!table->readonly && table->berkeleydb != NULL); assert (data_size > 0 && data[data_size-1] == '\0'); SETDBT(Key, key); SETDBTl(Data, data, data_size); dbret = table->berkeleydb->put(table->berkeleydb, NULL, &Key, &Data, allowoverwrite?0:DB_NOOVERWRITE); if (nooverwrite && dbret == DB_KEYEXIST) { /* if nooverwrite is set, do nothing and ignore: */ return RET_NOTHING; } if (dbret != 0) { table_printerror(table, dbret, "put(uniq)"); return RET_DBERR(dbret); } if (table->verbose) { if (table->subname != NULL) printf("db: '%s' added to %s(%s).\n", key, table->name, table->subname); else printf("db: '%s' added to %s.\n", key, table->name); } return RET_OK; } retvalue table_adduniqrecord(struct table *table, const char *key, const char *data) { return table_adduniqsizedrecord(table, key, data, strlen(data)+1, false, false); } retvalue table_deleterecord(struct table *table, const char *key, bool ignoremissing) { int dbret; DBT Key; assert (table != NULL); assert (!table->readonly && table->berkeleydb != NULL); SETDBT(Key, key); dbret = table->berkeleydb->del(table->berkeleydb, NULL, &Key, 0); if (dbret != 0) { if (dbret == DB_NOTFOUND && ignoremissing) return RET_NOTHING; table_printerror(table, dbret, "del"); if (dbret == DB_NOTFOUND) return RET_ERROR_MISSING; else return RET_DBERR(dbret); } if (table->verbose) { if (table->subname != NULL) printf("db: '%s' removed from %s(%s).\n", key, table->name, table->subname); else printf("db: '%s' removed from %s.\n", key, table->name); } return RET_OK; } retvalue table_replacerecord(struct table *table, const char *key, const char *data) { retvalue r; r = table_deleterecord(table, key, false); if (r != RET_ERROR_MISSING && RET_WAS_ERROR(r)) return r; return table_adduniqrecord(table, key, data); } struct cursor { DBC *cursor; uint32_t flags; retvalue r; }; retvalue table_newglobalcursor(struct table *table, struct cursor **cursor_p) { struct cursor *cursor; int dbret; if (table->berkeleydb == NULL) { assert (table->readonly); *cursor_p = NULL; return RET_OK; } cursor = zNEW(struct cursor); if (FAILEDTOALLOC(cursor)) return RET_ERROR_OOM; cursor->cursor = NULL; cursor->flags = DB_NEXT; cursor->r = RET_OK; dbret = table->berkeleydb->cursor(table->berkeleydb, NULL, &cursor->cursor, 0); if (dbret != 0) { table_printerror(table, dbret, "cursor"); free(cursor); return RET_DBERR(dbret); } *cursor_p = cursor; return RET_OK; } static inline retvalue parse_pair(struct table *table, DBT Key, DBT Data, /*@null@*//*@out@*/const char **key_p, /*@out@*/const char **value_p, /*@out@*/const char **data_p, /*@out@*/size_t *datalen_p) { /*@dependant@*/ const char *separator; if (Key.size == 0 || Data.size == 0 || ((const char*)Key.data)[Key.size-1] != '\0' || ((const char*)Data.data)[Data.size-1] != '\0') { if (table->subname != NULL) fprintf(stderr, "Database %s(%s) returned corrupted (not null-terminated) data!", table->name, table->subname); else fprintf(stderr, "Database %s returned corrupted (not null-terminated) data!", table->name); return RET_ERROR; } separator = memchr(Data.data, '\0', Data.size-1); if (separator == NULL) { if (table->subname != NULL) fprintf(stderr, "Database %s(%s) returned corrupted data!", table->name, table->subname); else fprintf(stderr, "Database %s returned corrupted data!", table->name); return RET_ERROR; } if (key_p != NULL) *key_p = Key.data; *value_p = Data.data; *data_p = separator + 1; *datalen_p = Data.size - (separator - (const char*)Data.data) - 2; return RET_OK; } retvalue table_newduplicatecursor(struct table *table, const char *key, struct cursor **cursor_p, const char **value_p, const char **data_p, size_t *datalen_p) { struct cursor *cursor; int dbret; DBT Key, Data; retvalue r; if (table->berkeleydb == NULL) { assert (table->readonly); *cursor_p = NULL; return RET_NOTHING; } cursor = zNEW(struct cursor); if (FAILEDTOALLOC(cursor)) return RET_ERROR_OOM; cursor->cursor = NULL; cursor->flags = DB_NEXT_DUP; cursor->r = RET_OK; dbret = table->berkeleydb->cursor(table->berkeleydb, NULL, &cursor->cursor, 0); if (dbret != 0) { table_printerror(table, dbret, "cursor"); free(cursor); return RET_DBERR(dbret); } SETDBT(Key, key); CLEARDBT(Data); dbret = cursor->cursor->c_get(cursor->cursor, &Key, &Data, DB_SET); if (dbret == DB_NOTFOUND || dbret == DB_KEYEMPTY) { (void)cursor->cursor->c_close(cursor->cursor); free(cursor); return RET_NOTHING; } if (dbret != 0) { table_printerror(table, dbret, "c_get(DB_SET)"); (void)cursor->cursor->c_close(cursor->cursor); free(cursor); return RET_DBERR(dbret); } r = parse_pair(table, Key, Data, NULL, value_p, data_p, datalen_p); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) { (void)cursor->cursor->c_close(cursor->cursor); free(cursor); return r; } *cursor_p = cursor; return RET_OK; } retvalue table_newpairedcursor(struct table *table, const char *key, const char *value, struct cursor **cursor_p, const char **data_p, size_t *datalen_p) { struct cursor *cursor; int dbret; DBT Key, Data; retvalue r; size_t valuelen = strlen(value); if (table->berkeleydb == NULL) { assert (table->readonly); *cursor_p = NULL; return RET_NOTHING; } cursor = zNEW(struct cursor); if (FAILEDTOALLOC(cursor)) return RET_ERROR_OOM; cursor->cursor = NULL; /* cursor_next is not allowed with this type: */ cursor->flags = DB_GET_BOTH; cursor->r = RET_OK; dbret = table->berkeleydb->cursor(table->berkeleydb, NULL, &cursor->cursor, 0); if (dbret != 0) { table_printerror(table, dbret, "cursor"); free(cursor); return RET_DBERR(dbret); } SETDBT(Key, key); SETDBTl(Data, value, valuelen + 1); dbret = cursor->cursor->c_get(cursor->cursor, &Key, &Data, DB_GET_BOTH); if (dbret != 0) { if (dbret == DB_NOTFOUND || dbret == DB_KEYEMPTY) { table_printerror(table, dbret, "c_get(DB_GET_BOTH)"); r = RET_DBERR(dbret); } else r = RET_NOTHING; (void)cursor->cursor->c_close(cursor->cursor); free(cursor); return r; } if (Data.size < valuelen + 2 || ((const char*)Data.data)[Data.size-1] != '\0') { if (table->subname != NULL) fprintf(stderr, "Database %s(%s) returned corrupted (not paired) data!", table->name, table->subname); else fprintf(stderr, "Database %s returned corrupted (not paired) data!", table->name); (void)cursor->cursor->c_close(cursor->cursor); free(cursor); return RET_ERROR; } if (data_p != NULL) *data_p = ((const char*)Data.data) + valuelen + 1; if (datalen_p != NULL) *datalen_p = Data.size - valuelen - 2; *cursor_p = cursor; return RET_OK; } retvalue cursor_close(struct table *table, struct cursor *cursor) { int dbret; retvalue r; if (cursor == NULL) return RET_OK; r = cursor->r; dbret = cursor->cursor->c_close(cursor->cursor); cursor->cursor = NULL; free(cursor); if (dbret != 0) { table_printerror(table, dbret, "c_close"); RET_UPDATE(r, RET_DBERR(dbret)); } return r; } bool cursor_nexttemp(struct table *table, struct cursor *cursor, const char **key, const char **data) { DBT Key, Data; int dbret; if (cursor == NULL) return false; CLEARDBT(Key); CLEARDBT(Data); dbret = cursor->cursor->c_get(cursor->cursor, &Key, &Data, DB_NEXT); if (dbret == DB_NOTFOUND) return false; if (dbret != 0) { table_printerror(table, dbret, "c_get(DB_NEXT)"); cursor->r = RET_DBERR(dbret); return false; } if (Key.size <= 0 || Data.size <= 0 || ((const char*)Key.data)[Key.size-1] != '\0' || ((const char*)Data.data)[Data.size-1] != '\0') { if (table->subname != NULL) fprintf(stderr, "Database %s(%s) returned corrupted (not null-terminated) data!", table->name, table->subname); else fprintf(stderr, "Database %s returned corrupted (not null-terminated) data!", table->name); cursor->r = RET_ERROR; return false; } *key = Key.data; *data = Data.data; return true; } bool cursor_nexttempdata(struct table *table, struct cursor *cursor, const char **key, const char **data, size_t *len_p) { DBT Key, Data; int dbret; if (cursor == NULL) return false; CLEARDBT(Key); CLEARDBT(Data); dbret = cursor->cursor->c_get(cursor->cursor, &Key, &Data, DB_NEXT); if (dbret == DB_NOTFOUND) return false; if (dbret != 0) { table_printerror(table, dbret, "c_get(DB_NEXT)"); cursor->r = RET_DBERR(dbret); return false; } if (Key.size <= 0 || Data.size <= 0 || ((const char*)Key.data)[Key.size-1] != '\0' || ((const char*)Data.data)[Data.size-1] != '\0') { if (table->subname != NULL) fprintf(stderr, "Database %s(%s) returned corrupted (not null-terminated) data!", table->name, table->subname); else fprintf(stderr, "Database %s returned corrupted (not null-terminated) data!", table->name); cursor->r = RET_ERROR; return false; } if (key != NULL) *key = Key.data; *data = Data.data; *len_p = Data.size - 1; return true; } bool cursor_nextpair(struct table *table, struct cursor *cursor, /*@null@*/const char **key_p, const char **value_p, const char **data_p, size_t *datalen_p) { DBT Key, Data; int dbret; retvalue r; if (cursor == NULL) return false; CLEARDBT(Key); CLEARDBT(Data); dbret = cursor->cursor->c_get(cursor->cursor, &Key, &Data, cursor->flags); if (dbret == DB_NOTFOUND) return false; if (dbret != 0) { table_printerror(table, dbret, (cursor->flags==DB_NEXT) ? "c_get(DB_NEXT)" : (cursor->flags==DB_NEXT_DUP) ? "c_get(DB_NEXT_DUP)" : "c_get(DB_???NEXT)"); cursor->r = RET_DBERR(dbret); return false; } r = parse_pair(table, Key, Data, key_p, value_p, data_p, datalen_p); if (RET_WAS_ERROR(r)) { cursor->r = r; return false; } return true; } retvalue cursor_replace(struct table *table, struct cursor *cursor, const char *data, size_t datalen) { DBT Key, Data; int dbret; assert (cursor != NULL); assert (!table->readonly); CLEARDBT(Key); SETDBTl(Data, data, datalen + 1); dbret = cursor->cursor->c_put(cursor->cursor, &Key, &Data, DB_CURRENT); if (dbret != 0) { table_printerror(table, dbret, "c_put(DB_CURRENT)"); return RET_DBERR(dbret); } return RET_OK; } retvalue cursor_delete(struct table *table, struct cursor *cursor, const char *key, const char *value) { int dbret; assert (cursor != NULL); assert (!table->readonly); dbret = cursor->cursor->c_del(cursor->cursor, 0); if (dbret != 0) { table_printerror(table, dbret, "c_del"); return RET_DBERR(dbret); } if (table->verbose) { if (value != NULL) if (table->subname != NULL) printf("db: '%s' '%s' removed from %s(%s).\n", key, value, table->name, table->subname); else printf("db: '%s' '%s' removed from %s.\n", key, value, table->name); else if (table->subname != NULL) printf("db: '%s' removed from %s(%s).\n", key, table->name, table->subname); else printf("db: '%s' removed from %s.\n", key, table->name); } return RET_OK; } bool table_isempty(struct table *table) { DBC *cursor; DBT Key, Data; int dbret; dbret = table->berkeleydb->cursor(table->berkeleydb, NULL, &cursor, 0); if (dbret != 0) { table_printerror(table, dbret, "cursor"); return true; } CLEARDBT(Key); CLEARDBT(Data); dbret = cursor->c_get(cursor, &Key, &Data, DB_NEXT); if (dbret == DB_NOTFOUND) { (void)cursor->c_close(cursor); return true; } if (dbret != 0) { table_printerror(table, dbret, "c_get(DB_NEXT)"); (void)cursor->c_close(cursor); return true; } dbret = cursor->c_close(cursor); if (dbret != 0) table_printerror(table, dbret, "c_close"); return false; } /**************************************************************************** * Open the different types of tables with their needed flags: * ****************************************************************************/ static retvalue database_table(const char *filename, const char *subtable, enum database_type type, uint32_t flags, /*@out@*/struct table **table_p) { struct table *table; retvalue r; table = zNEW(struct table); if (FAILEDTOALLOC(table)) return RET_ERROR_OOM; /* TODO: is filename always an static constant? then we could drop the dup */ table->name = strdup(filename); if (FAILEDTOALLOC(table->name)) { free(table); return RET_ERROR_OOM; } if (subtable != NULL) { table->subname = strdup(subtable); if (FAILEDTOALLOC(table->subname)) { free(table->name); free(table); return RET_ERROR_OOM; } } else table->subname = NULL; table->readonly = ISSET(flags, DB_RDONLY); table->verbose = rdb_verbose; r = database_opentable(filename, subtable, type, flags, &table->berkeleydb); if (RET_WAS_ERROR(r)) { free(table->subname); free(table->name); free(table); return r; } if (r == RET_NOTHING) { if (ISSET(flags, DB_RDONLY)) { /* sometimes we don't want a return here, when? */ table->berkeleydb = NULL; r = RET_OK; } else { free(table->subname); free(table->name); free(table); return r; } } *table_p = table; return r; } retvalue database_openreferences(void) { retvalue r; assert (rdb_references == NULL); r = database_table("references.db", "references", dbt_BTREEDUP, DB_CREATE, &rdb_references); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) { rdb_references = NULL; return r; } else rdb_references->verbose = false; return RET_OK; } /* only compare the first 0-terminated part of the data */ static int paireddatacompare(UNUSED(DB *db), const DBT *a, const DBT *b) { if (a->size < b->size) return strncmp(a->data, b->data, a->size); else return strncmp(a->data, b->data, b->size); } retvalue database_opentracking(const char *codename, bool readonly, struct table **table_p) { struct table *table; retvalue r; if (rdb_nopackages) { (void)fputs( "Internal Error: Accessing packages database while that was not prepared!\n", stderr); return RET_ERROR; } if (rdb_trackingdatabaseopen) { (void)fputs( "Internal Error: Trying to open multiple tracking databases at the same time.\nThis should normaly not happen (to avoid triggering bugs in the underlying BerkeleyDB)\n", stderr); return RET_ERROR; } r = database_table("tracking.db", codename, dbt_BTREEPAIRS, readonly?DB_RDONLY:DB_CREATE, &table); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; table->flagreset = &rdb_trackingdatabaseopen; rdb_trackingdatabaseopen = true; *table_p = table; return RET_OK; } retvalue database_openpackages(const char *identifier, bool readonly, struct table **table_p) { struct table *table; retvalue r; if (rdb_nopackages) { (void)fputs( "Internal Error: Accessing packages database while that was not prepared!\n", stderr); return RET_ERROR; } if (rdb_packagesdatabaseopen) { (void)fputs( "Internal Error: Trying to open multiple packages databases at the same time.\n" "This should normaly not happen (to avoid triggering bugs in the underlying BerkeleyDB)\n", stderr); return RET_ERROR; } r = database_table("packages.db", identifier, dbt_BTREE, readonly?DB_RDONLY:DB_CREATE, &table); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; table->flagreset = &rdb_packagesdatabaseopen; rdb_packagesdatabaseopen = true; *table_p = table; return RET_OK; } /* Get a list of all identifiers having a package list */ retvalue database_listpackages(struct strlist *identifiers) { return database_listsubtables("packages.db", identifiers); } /* drop a database */ retvalue database_droppackages(const char *identifier) { return database_dropsubtable("packages.db", identifier); } retvalue database_openfiles(void) { retvalue r; struct strlist identifiers; bool checksumsexisted, oldfiles; assert (rdb_checksums == NULL); assert (rdb_contents == NULL); r = database_listsubtables("contents.cache.db", &identifiers); if (RET_IS_OK(r)) { if (strlist_in(&identifiers, "filelists")) { fprintf(stderr, "Your %s/contents.cache.db file still contains a table of cached file lists\n" "in the old (pre 3.0.0) format. You have to either delete that file (and lose\n" "all caches of file lists) or run reprepro with argument translatefilelists\n" "to translate the old caches into the new format.\n", global.dbdir); strlist_done(&identifiers); return RET_ERROR; } strlist_done(&identifiers); } r = database_hasdatabasefile("checksums.db", &checksumsexisted); r = database_table("checksums.db", "pool", dbt_BTREE, DB_CREATE, &rdb_checksums); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) { rdb_checksums = NULL; return r; } r = database_hasdatabasefile("files.db", &oldfiles); if (RET_WAS_ERROR(r)) { (void)table_close(rdb_checksums); rdb_checksums = NULL; return r; } if (oldfiles) { fprintf(stderr, "Error: database uses deprecated format.\n" "Please run translatelegacychecksums to update to the new format first.\n"); return RET_ERROR; } // TODO: only create this file once it is actually needed... r = database_table("contents.cache.db", "compressedfilelists", dbt_BTREE, DB_CREATE, &rdb_contents); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) { (void)table_close(rdb_checksums); rdb_checksums = NULL; rdb_contents = NULL; } return r; } retvalue database_openreleasecache(const char *codename, struct table **cachedb_p) { retvalue r; char *oldcachefilename; /* Since 3.1.0 it's release.caches.db, before release.cache.db. * The new file also contains the sha1 checksums and is extensible * for more in the future. Thus if there is only the old variant, * rename to the new. (So no old version by accident uses it and * puts the additional sha1 data into the md5sum fields.) * If both files are there, just delete both, as neither will * be very current then. * */ oldcachefilename = dbfilename("release.cache.db"); if (FAILEDTOALLOC(oldcachefilename)) return RET_ERROR_OOM; if (isregularfile(oldcachefilename)) { char *newcachefilename; newcachefilename = dbfilename("release.caches.db"); if (FAILEDTOALLOC(newcachefilename)) { free(oldcachefilename); return RET_ERROR_OOM; } if (isregularfile(newcachefilename) || rename(oldcachefilename, newcachefilename) != 0) { fprintf(stderr, "Deleting old-style export cache file %s!\n" "This means that all index files (even unchanged) will be rewritten the\n" "next time parts of their distribution are changed. This should only\n" "happen once while migration from pre-3.1.0 to later versions.\n", oldcachefilename); if (unlink(oldcachefilename) != 0) { int e = errno; fprintf(stderr, "Cannot delete '%s': %s!", oldcachefilename, strerror(e)); free(oldcachefilename); free(newcachefilename); return RET_ERRNO(e); } (void)unlink(oldcachefilename); } free(newcachefilename); } free(oldcachefilename); r = database_table("release.caches.db", codename, dbt_HASH, DB_CREATE, cachedb_p); if (RET_IS_OK(r)) (*cachedb_p)->verbose = false; return r; } static retvalue table_copy(struct table *oldtable, struct table *newtable) { retvalue r; struct cursor *cursor; const char *filekey, *data; size_t data_len; r = table_newglobalcursor(oldtable, &cursor); if (!RET_IS_OK(r)) return r; while (cursor_nexttempdata(oldtable, cursor, &filekey, &data, &data_len)) { r = table_adduniqsizedrecord(newtable, filekey, data, data_len+1, false, true); if (RET_WAS_ERROR(r)) return r; } return RET_OK; } retvalue database_translate_filelists(void) { char *dbname, *tmpdbname; struct table *oldtable, *newtable; struct strlist identifiers; int ret; retvalue r, r2; r = database_listsubtables("contents.cache.db", &identifiers); if (RET_IS_OK(r)) { if (!strlist_in(&identifiers, "filelists")) { fprintf(stderr, "Your %s/contents.cache.db file does not contain an old style database!\n", global.dbdir); strlist_done(&identifiers); return RET_NOTHING; } strlist_done(&identifiers); } dbname = dbfilename("contents.cache.db"); if (FAILEDTOALLOC(dbname)) return RET_ERROR_OOM; tmpdbname = dbfilename("old.contents.cache.db"); if (FAILEDTOALLOC(tmpdbname)) { free(dbname); return RET_ERROR_OOM; } ret = rename(dbname, tmpdbname); if (ret != 0) { int e = errno; fprintf(stderr, "Could not rename '%s' into '%s': %s(%d)\n", dbname, tmpdbname, strerror(e), e); free(dbname); free(tmpdbname); return RET_ERRNO(e); } newtable = NULL; r = database_table("contents.cache.db", "compressedfilelists", dbt_BTREE, DB_CREATE, &newtable); assert (r != RET_NOTHING); oldtable = NULL; if (RET_IS_OK(r)) { r = database_table("old.contents.cache.db", "filelists", dbt_BTREE, DB_RDONLY, &oldtable); if (r == RET_NOTHING) { fprintf(stderr, "Could not find old-style database!\n"); r = RET_ERROR; } } if (RET_IS_OK(r)) { r = filelists_translate(oldtable, newtable); if (r == RET_NOTHING) r = RET_OK; } r2 = table_close(oldtable); RET_ENDUPDATE(r, r2); oldtable = NULL; if (RET_IS_OK(r)) { /* copy the new-style database, */ r = database_table("old.contents.cache.db", "compressedfilelists", dbt_BTREE, DB_RDONLY, &oldtable); if (RET_IS_OK(r)) { /* if there is one... */ r = table_copy(oldtable, newtable); r2 = table_close(oldtable); RET_ENDUPDATE(r, r2); } if (r == RET_NOTHING) { r = RET_OK; } } r2 = table_close(newtable); RET_ENDUPDATE(r, r2); if (RET_IS_OK(r)) (void)unlink(tmpdbname); if (RET_WAS_ERROR(r)) { ret = rename(tmpdbname, dbname); if (ret != 0) { int e = errno; fprintf(stderr, "Could not rename '%s' back into '%s': %s(%d)\n", dbname, tmpdbname, strerror(e), e); free(tmpdbname); free(dbname); return RET_ERRNO(e); } free(tmpdbname); free(dbname); return r; } free(tmpdbname); free(dbname); return RET_OK; } /* This is already implemented as standalone functions duplicating a bit * of database_create and from files.c, * because database_create is planed to error out if * there is still an old * files.db and files.c is supposed to lose all support for it in the next * major version */ static inline retvalue translate(struct table *oldmd5sums, struct table *newchecksums) { long numold = 0, numnew = 0, numreplace = 0, numretro = 0; struct cursor *cursor, *newcursor; const char *filekey, *md5sum, *all; size_t alllen; retvalue r; /* first add all md5sums to checksums if not there yet */ r = table_newglobalcursor(oldmd5sums, &cursor); if (RET_WAS_ERROR(r)) return r; while (cursor_nexttemp(oldmd5sums, cursor, &filekey, &md5sum)) { struct checksums *n = NULL; const char *combined; size_t combinedlen; r = table_gettemprecord(newchecksums, filekey, &all, &alllen); if (RET_IS_OK(r)) r = checksums_setall(&n, all, alllen); if (RET_IS_OK(r)) { if (checksums_matches(n, cs_md5sum, md5sum)) { /* already there, nothing to do */ checksums_free(n); numnew++; continue; } /* new item does not match */ if (verbose > 0) printf( "Overwriting stale new-checksums entry '%s'!\n", filekey); numreplace++; checksums_free(n); n = NULL; } if (RET_WAS_ERROR(r)) { (void)cursor_close(oldmd5sums, cursor); return r; } /* parse and recreate, to only have sanitized strings * in the database */ r = checksums_parse(&n, md5sum); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) { (void)cursor_close(oldmd5sums, cursor); return r; } r = checksums_getcombined(n, &combined, &combinedlen); assert (r != RET_NOTHING); if (!RET_IS_OK(r)) { (void)cursor_close(oldmd5sums, cursor); return r; } numold++; r = table_adduniqsizedrecord(newchecksums, filekey, combined, combinedlen + 1, true, false); assert (r != RET_NOTHING); if (!RET_IS_OK(r)) { (void)cursor_close(oldmd5sums, cursor); return r; } } r = cursor_close(oldmd5sums, cursor); if (RET_WAS_ERROR(r)) return r; /* then delete everything from checksums that is not in md5sums */ r = table_newglobalcursor(oldmd5sums, &cursor); if (RET_WAS_ERROR(r)) return r; r = table_newglobalcursor(newchecksums, &newcursor); if (RET_WAS_ERROR(r)) { cursor_close(oldmd5sums, cursor); return r; } while (cursor_nexttemp(oldmd5sums, cursor, &filekey, &md5sum)) { bool more; int cmp; const char *newfilekey, *dummy; do { more = cursor_nexttemp(newchecksums, newcursor, &newfilekey, &dummy); /* should have been added in the last step */ assert (more); cmp = strcmp(filekey, newfilekey); /* should have been added in the last step */ assert (cmp >= 0); more = cmp > 0; if (more) { numretro++; if (verbose > 0) printf( "Deleting stale new-checksums entry '%s'!\n", newfilekey); r = cursor_delete(newchecksums, newcursor, newfilekey, dummy); if (RET_WAS_ERROR(r)) { cursor_close(oldmd5sums, cursor); cursor_close(newchecksums, newcursor); return r; } } } while (more); } r = cursor_close(oldmd5sums, cursor); if (RET_WAS_ERROR(r)) return r; r = cursor_close(newchecksums, newcursor); if (RET_WAS_ERROR(r)) return r; if (verbose >= 0) { printf("%ld packages were already in the new checksums.db\n", numnew); printf("%ld packages were added to the new checksums.db\n", numold - numreplace); if (numretro != 0) printf( "%ld were only in checksums.db and not in files.db\n" "This should only have happened if you added them with a newer version\n" "and then deleted them with an older version of reprepro.\n", numretro); if (numreplace != 0) printf( "%ld were different checksums.db and not in files.db\n" "This should only have happened if you added them with a newer version\n" "and then deleted them with an older version of reprepro and\n" "then readded them with a old version.\n", numreplace); if (numretro != 0 || numreplace != 0) printf( "If you never run a old version after a new version,\n" "you might want to check with check and checkpool if something went wrong.\n"); } return RET_OK; } retvalue database_translate_legacy_checksums(bool verbosedb) { struct table *newchecksums, *oldmd5sums; char *fullfilename; retvalue r; int e; if (rdb_initialized || rdb_used) { fputs("Internal Error: database initialized a 2nd time!\n", stderr); return RET_ERROR_INTERNAL; } if (!isdir(global.dbdir)) { fprintf(stderr, "Cannot find directory '%s'!\n", global.dbdir); return RET_ERROR; } rdb_initialized = true; rdb_used = true; r = database_lock(0); assert (r != RET_NOTHING); if (!RET_IS_OK(r)) { database_free(); return r; } rdb_readonly = READWRITE; rdb_verbose = verbosedb; r = readversionfile(false); if (RET_WAS_ERROR(r)) { releaselock(); database_free(); return r; } r = database_table("files.db", "md5sums", dbt_BTREE, 0, &oldmd5sums); if (r == RET_NOTHING) { fprintf(stderr, "There is no old files.db in %s. Nothing to translate!\n", global.dbdir); releaselock(); database_free(); return RET_NOTHING; } else if (RET_WAS_ERROR(r)) { releaselock(); database_free(); return r; } r = database_table("checksums.db", "pool", dbt_BTREE, DB_CREATE, &newchecksums); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) { (void)table_close(oldmd5sums); releaselock(); database_free(); return r; } r = translate(oldmd5sums, newchecksums); if (RET_WAS_ERROR(r)) { (void)table_close(oldmd5sums); (void)table_close(newchecksums); releaselock(); database_free(); return r; } (void)table_close(oldmd5sums); r = table_close(newchecksums); if (RET_WAS_ERROR(r)) { releaselock(); database_free(); return r; } fullfilename = dbfilename("files.db"); if (FAILEDTOALLOC(fullfilename)) { releaselock(); database_free(); return RET_ERROR_OOM; } e = deletefile(fullfilename); if (e != 0) { fprintf(stderr, "Could not delete '%s'!\n" "It can now savely be deleted and it all that is left to be done!\n", fullfilename); database_free(); return RET_ERRNO(e); } r = writeversionfile(); releaselock(); database_free(); return r; } bool database_allcreated(void) { return rdb_capabilities.createnewtables; } reprepro-4.13.1/incoming.h0000644000175100017510000000042112152651661012337 00000000000000#ifndef REPREPRO_INCOMING_H #define REPREPRO_INCOMING_H #ifndef REPREPRO_ERROR_H #include "error.h" #warning "What's hapening here?" #endif retvalue process_incoming(struct distribution *distributions, const char *name, /*@null@*/const char *onlychangesfilename); #endif reprepro-4.13.1/names.h0000644000175100017510000000260512152651661011645 00000000000000#ifndef REPREPRO_NAMES_H #define REPREPRO_NAMES_H #ifndef REPREPRO_STRLIST_H #include "strlist.h" #endif char *calc_addsuffix(const char *, const char *); char *calc_dirconcat(const char *, const char *); char *calc_dirconcat3(const char *, const char *, const char *); char *calc_changes_basename(const char *, const char *, const struct strlist *); char *calc_trackreferee(const char *, const char *, const char *); #define calc_snapshotbasedir(codename, name) mprintf("%s/%s/snapshots/%s", global.distdir, codename, name) /* Create a strlist consisting out of calc_dirconcat'ed entries of the old */ retvalue calc_dirconcats(const char *, const struct strlist *, /*@out@*/struct strlist *); retvalue calc_inplacedirconcats(const char *, struct strlist *); /* move over a version number, * if epochsuppresed is true, colons may happen even without epoch there */ void names_overversion(const char **, bool /*epochsuppressed*/); /* check for forbidden characters */ retvalue propersourcename(const char *); retvalue properfilenamepart(const char *); retvalue properfilename(const char *); retvalue properfilenames(const struct strlist *); retvalue properpackagename(const char *); retvalue properversion(const char *); static inline bool endswith(const char *name, const char *suffix) { size_t ln = strlen(name), ls = strlen(suffix); return ln > ls && strcmp(name + (ln - ls), suffix) == 0; } #endif reprepro-4.13.1/extractcontrol.c0000644000175100017510000002573712152651661013623 00000000000000/* This file is part of "reprepro" * Copyright (C) 2003,2007 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include "error.h" #include "filecntl.h" #include "readtextfile.h" #include "debfile.h" #include "chunks.h" #ifdef HAVE_LIBARCHIVE #error Why did this file got compiled instead of debfile.c? #endif // ********************************************************************** // * This is a very simple implementation calling ar and tar, which // * is only used with --without-libarchive or when no libarchive was // * found. // ********************************************************************** static retvalue try_extractcontrol(char **control, const char *debfile, bool brokentar) { int pipe_1[2]; int pipe_2[2]; int ret; pid_t ar, tar, pid; int status; char *controlchunk; retvalue result, r; result = RET_OK; ret = pipe(pipe_1); if (ret < 0) { int e = errno; fprintf(stderr, "Error %d creating pipe: %s\n", e, strerror(e)); return RET_ERRNO(e); } ret = pipe(pipe_2); if (ret < 0) { int e = errno; close(pipe_1[0]); close(pipe_1[1]); fprintf(stderr, "Error %d creating pipe: %s\n", e, strerror(e)); return RET_ERRNO(e); } ar = fork(); if (ar < 0) { int e = errno; fprintf(stderr, "Error %d forking: %s\n", e, strerror(e)); result = RET_ERRNO(e); close(pipe_1[0]); close(pipe_1[1]); close(pipe_2[0]); close(pipe_2[1]); return result; } if (ar == 0) { int e; /* calling ar */ if (dup2(pipe_1[1], 1) < 0) exit(255); close(pipe_1[0]); close(pipe_1[1]); close(pipe_2[0]); close(pipe_2[1]); //TODO without explicit path ret = execl("/usr/bin/ar", "ar", "p", debfile, "control.tar.gz", ENDOFARGUMENTS); e = errno; fprintf(stderr, "ar call failed with error %d: %s\n", e, strerror(e)); exit(254); } tar = fork(); if (tar < 0) { int e = errno; result = RET_ERRNO(e); fprintf(stderr, "Error %d forking: %s\n", e, strerror(e)); close(pipe_1[0]); close(pipe_1[1]); close(pipe_2[0]); close(pipe_2[1]); tar = -1; } else if (tar == 0) { int e; /* calling tar */ if (dup2(pipe_1[0], 0) < 0) exit(255); if (dup2(pipe_2[1], 1) < 0) exit(255); close(pipe_1[0]); close(pipe_1[1]); close(pipe_2[0]); close(pipe_2[1]); //TODO without explicit path execl("/bin/tar", "tar", "-xOzf", "-", brokentar?"control":"./control", ENDOFARGUMENTS); e = errno; fprintf(stderr, "tar call failed with error %d: %s\n", e, strerror(e)); exit(254); } close(pipe_1[0]); close(pipe_1[1]); markcloseonexec(pipe_2[0]); close(pipe_2[1]); controlchunk = NULL; /* read data: */ if (RET_IS_OK(result)) { size_t len, controllen; const char *afterchanges; r = readtextfilefd(pipe_2[0], brokentar? "output from ar p control.tar.gz | tar -xOzf - control": "output from ar p control.tar.gz | tar -xOzf - ./control", &controlchunk, &controllen); if (RET_IS_OK(r)) { len = chunk_extract(controlchunk, controlchunk, controllen, false, &afterchanges); if (len == 0) r = RET_NOTHING; if (*afterchanges != '\0') { fprintf(stderr, "Unexpected emtpy line in control information within '%s'\n" "(obtained via 'ar p %s control.tar.gz | tar -XOzf - %scontrol')\n", debfile, debfile, brokentar?"":"./"); free(controlchunk); controlchunk = NULL; r = RET_ERROR; } } if (r == RET_NOTHING) { free(controlchunk); controlchunk = NULL; fprintf(stderr, "No control information found in .deb!\n"); /* only report error now, * if we haven't try everything yet */ if (brokentar) r = RET_ERROR_MISSING; } RET_UPDATE(result, r); } while (ar != -1 || tar != -1) { pid=wait(&status); if (pid < 0) { if (errno != EINTR) RET_UPDATE(result, RET_ERRNO(errno)); } else { if (pid == ar) { ar = -1; if (!WIFEXITED(status)) { fprintf(stderr, "Ar exited unnaturally!\n"); result = RET_ERROR; } else if (WEXITSTATUS(status) != 0) { fprintf(stderr, "Error from ar for '%s': %d\n", debfile, WEXITSTATUS(status)); result = RET_ERROR; } } else if (pid == tar) { tar = -1; if (!WIFEXITED(status)) { fprintf(stderr, "Tar exited unnaturally!\n"); result = RET_ERROR; } else if (!brokentar && WEXITSTATUS(status) == 2) { if (RET_IS_OK(result)) result = RET_NOTHING; } else if (WEXITSTATUS(status) != 0) { fprintf(stderr, "Error from tar for control.tar.gz within '%s': %d\n", debfile, WEXITSTATUS(status)); result = RET_ERROR; } } else { // WTH? fprintf(stderr, "Who is %d, and why does this bother me?\n", (int)pid); } } } if (RET_IS_OK(result)) { if (controlchunk == NULL) /* we got not data but tar gave not error.. */ return RET_ERROR_MISSING; else *control = controlchunk; } else free(controlchunk); return result; } retvalue extractcontrol(char **control, const char *debfile) { retvalue r; r = try_extractcontrol(control, debfile, false); if (r != RET_NOTHING) return r; /* perhaps the control.tar.gz is packaged by hand wrongly, * try again: */ r = try_extractcontrol(control, debfile, true); if (RET_IS_OK(r)) { fprintf(stderr, "WARNING: '%s' contains a broken/unusual control.tar.gz.\n" "reprepro was able to work around this but other tools or versions might not.\n", debfile); } assert (r != RET_NOTHING); return r; } retvalue getfilelist(/*@out@*/char **filelist, /*@out@*/size_t *size, const char *debfile) { fprintf(stderr, "Extraction of file list without libarchive currently not implemented.\n"); return RET_ERROR; #if 0 int pipe_1[2]; int pipe_2[2]; int ret; pid_t ar, tar, pid; int status; struct filelistcompressor c; size_t last = 0; retvalue result; #error this still needs to be reimplemented... result = filelistcompressor_setup(&c); if (RET_WAS_ERROR(result)) return result; result = RET_OK; ret = pipe(pipe_1); if (ret < 0) { int e = errno; fprintf(stderr, "Error %d creating pipe: %s\n", e, strerror(e)); filelistcompressor_cancel(&c); return RET_ERRNO(e); } ret = pipe(pipe_2); if (ret < 0) { int e = errno; fprintf(stderr, "Error %d creating pipe: %s\n", e, strerror(e)); close(pipe_1[0]); close(pipe_1[1]); filelistcompressor_cancel(&c); return RET_ERRNO(e); } ar = fork(); if (ar < 0) { int e = errno; fprintf(stderr, "Error %d forking: %s\n", e, strerror(e)); result = RET_ERRNO(e); close(pipe_1[0]); close(pipe_1[1]); close(pipe_2[0]); close(pipe_2[1]); filelistcompressor_cancel(&c); return result; } if (ar == 0) { int e; /* calling ar */ if (dup2(pipe_1[1], 1) < 0) exit(255); close(pipe_1[0]); close(pipe_1[1]); close(pipe_2[0]); close(pipe_2[1]); //TODO without explicit path ret = execl("/usr/bin/ar", "ar", "p", debfile, "data.tar.gz", ENDOFARGUMENTS); e = errno; fprintf(stderr, "ar call failed with error %d: %s\n", e, strerror(e)); exit(254); } tar = fork(); if (tar < 0) { int e = errno; result = RET_ERRNO(e); fprintf(stderr, "Error %d forking: %s\n", e, strerror(e)); close(pipe_1[0]); close(pipe_1[1]); close(pipe_2[0]); close(pipe_2[1]); tar = -1; } else if (tar == 0) { int e; /* calling tar */ if (dup2(pipe_1[0], 0) < 0) exit(255); if (dup2(pipe_2[1], 1) < 0) exit(255); close(pipe_1[0]); close(pipe_1[1]); close(pipe_2[0]); close(pipe_2[1]); //TODO without explicit path execl("/bin/tar", "tar", "-tzf", "-", ENDOFARGUMENTS); e = errno; fprintf(stderr, "tar call failed with error %d: %s\n", e, strerror(e)); exit(254); } close(pipe_1[0]); close(pipe_1[1]); close(pipe_2[1]); /* read data: */ if (RET_IS_OK(result)) do { ssize_t bytes_read; size_t ignore; if (listsize <= len + 512) { char *n; listsize = len + 1024; n = realloc(list, listsize); if (FAILEDTOALLOC(n)) { result = RET_ERROR_OOM; break; } list = n; } ignore = 0; bytes_read = read(pipe_2[0], list+len, listsize-len-1); if (bytes_read < 0) { int e = errno; fprintf(stderr, "Error %d reading from pipe: %s\n", e, strerror(e)); result = RET_ERRNO(e); break; } else if (bytes_read == 0) break; else while (bytes_read > 0) { if (list[len] == '\0') { fprintf(stderr, "Unexpected NUL character from tar while getting file list from %s!\n", debfile); result = RET_ERROR; break; } else if (list[len] == '\n') { if (len > last+ignore && list[len-1] != '/') { list[len] = '\0'; len++; bytes_read--; memmove(list+last, list+last+ignore, 1+len-last-ignore); last = len-ignore; } else { len++; bytes_read--; ignore = len-last; } } else if (list[len] == '.' && len == last+ignore) { len++; ignore++; bytes_read--; } else if (list[len] == '/' && len == last+ignore) { len++; ignore++; bytes_read--; } else { len++; bytes_read--; } } if (ignore > 0) { if (len <= last+ignore) len = last; else { memmove(list+last, list+last+ignore, 1+len-last-ignore); len -= ignore; } } } while (true); if (len != last) { fprintf(stderr, "WARNING: unterminated output from tar pipe while extracting file list of %s\n", debfile); list[len] = '\0'; fprintf(stderr, "The item '%s' might got lost.\n", list+last); result = RET_ERROR; } else { char *n = realloc(list, len+1); if (FAILEDTOALLOC(n)) result = RET_ERROR_OOM; else { list = n; list[len] = '\0'; } } close(pipe_2[0]); while (ar != -1 || tar != -1) { pid=wait(&status); if (pid < 0) { if (errno != EINTR) RET_UPDATE(result, RET_ERRNO(errno)); } else { if (pid == ar) { ar = -1; if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) { fprintf(stderr, "Error from ar for '%s': %d\n", debfile, WEXITSTATUS(status)); result = RET_ERROR; } } else if (pid == tar) { tar = -1; if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) { fprintf(stderr, "Error from tar for data.tar.gz within '%s': %d\n", debfile, WEXITSTATUS(status)); result = RET_ERROR; } } else { // WTH? fprintf(stderr, "Who is %d, and why does this bother me?\n", pid); } } } if (RET_IS_OK(result)) return filelistcompressor_finish(&c, filelist); else filelistcompressor_cancel(&c); return result; #endif } reprepro-4.13.1/exports.c0000644000175100017510000003300212152651661012234 00000000000000/* This file is part of "reprepro" * Copyright (C) 2005,2007,2008,2009 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include "error.h" #include "mprintf.h" #include "strlist.h" #include "names.h" #include "dirs.h" #include "database.h" #include "target.h" #include "exports.h" #include "configparser.h" #include "filecntl.h" #include "hooks.h" static const char *exportdescription(const struct exportmode *mode, char *buffer, size_t buffersize) { char *result = buffer; enum indexcompression ic; static const char* compression_names[ic_count] = { "uncompressed" ,"gzipped" #ifdef HAVE_LIBBZ2 ,"bzip2ed" #endif }; bool needcomma = false, needellipsis = false; assert (buffersize > 50); *buffer++ = ' '; buffersize--; *buffer++ = '('; buffersize--; for (ic = ic_first ; ic < ic_count ; ic++) { if ((mode->compressions & IC_FLAG(ic)) != 0) { size_t l = strlen(compression_names[ic]); assert (buffersize > l+3); if (needcomma) { *buffer++ = ','; buffersize--; } memcpy(buffer, compression_names[ic], l); buffer += l; buffersize -= l; needcomma = true; } } /* should be long enough for the previous things in all cases */ assert (buffersize > 10); if (mode->hooks.count > 0) { int i; if (needcomma) { *buffer++ = ','; buffersize--; } strcpy(buffer, "script: "); buffer += 8; buffersize -= 8; needcomma = false; for (i = 0 ; i < mode->hooks.count ; i++) { const char *hook = dirs_basename(mode->hooks.values[i]); size_t l = strlen(hook); if (buffersize < 6) { needellipsis = true; break; } if (needcomma) { *buffer++ = ','; buffersize--; } if (l > buffersize - 5) { memcpy(buffer, hook, buffersize-5); buffer += (buffersize-5); buffersize -= (buffersize-5); needellipsis = true; break; } else { memcpy(buffer, hook, l); buffer += l; buffersize -= l; assert (buffersize >= 2); } needcomma = true; } } if (needellipsis) { /* moveing backward here is easier than checking above */ if (buffersize < 5) { buffer -= (5 - buffersize); buffersize = 5; } *buffer++ = '.'; buffersize--; *buffer++ = '.'; buffersize--; *buffer++ = '.'; buffersize--; } assert (buffersize >= 2); *buffer++ = ')'; buffersize--; *buffer = '\0'; return result; } retvalue exportmode_init(/*@out@*/struct exportmode *mode, bool uncompressed, /*@null@*/const char *release, const char *indexfile) { strlist_init(&mode->hooks); mode->compressions = IC_FLAG(ic_gzip) | (uncompressed ? IC_FLAG(ic_uncompressed) : 0); mode->filename = strdup(indexfile); if (FAILEDTOALLOC(mode->filename)) return RET_ERROR_OOM; if (release == NULL) mode->release = NULL; else { mode->release = strdup(release); if (FAILEDTOALLOC(mode->release)) return RET_ERROR_OOM; } return RET_OK; } // TODO: check for scripts in confdir early... retvalue exportmode_set(struct exportmode *mode, struct configiterator *iter) { retvalue r; char *word; r = config_getword(iter, &word); if (RET_WAS_ERROR(r)) return r; if (r == RET_NOTHING) { fprintf(stderr, "Error parsing %s, line %u, column %u: Unexpected end of field!\n" "Filename to use for index files (Packages, Sources, ...) missing.\n", config_filename(iter), config_markerline(iter), config_markercolumn(iter)); return RET_ERROR_MISSING; } assert (word[0] != '\0'); if (word[0] == '.') { free(word); fprintf(stderr, "Error parsing %s, line %u, column %u: filename for index files expected!\n", config_filename(iter), config_markerline(iter), config_markercolumn(iter)); return RET_ERROR; } free(mode->filename); mode->filename = word; r = config_getword(iter, &word); if (RET_WAS_ERROR(r)) return r; if (r == RET_NOTHING) word = NULL; if (r != RET_NOTHING && word[0] != '.') { assert (word[0] != '\0'); free(mode->release); mode->release = word; r = config_getword(iter, &word); if (RET_WAS_ERROR(r)) return r; } if (r == RET_NOTHING) { fprintf(stderr, "Error parsing %s, line %u, column %u: Unexpected end of field!\n" "Compression identifiers ('.', '.gz' or '.bz2') missing.\n", config_filename(iter), config_markerline(iter), config_markercolumn(iter)); return RET_ERROR; } if (word[0] != '.') { fprintf(stderr, "Error parsing %s, line %u, column %u:\n" "Compression extension ('.', '.gz' or '.bz2') expected.\n", config_filename(iter), config_markerline(iter), config_markercolumn(iter)); free(word); return RET_ERROR; } mode->compressions = 0; while (r != RET_NOTHING && word[0] == '.') { if (word[1] == '\0') mode->compressions |= IC_FLAG(ic_uncompressed); else if (word[1] == 'g' && word[2] == 'z' && word[3] == '\0') mode->compressions |= IC_FLAG(ic_gzip); #ifdef HAVE_LIBBZ2 else if (word[1] == 'b' && word[2] == 'z' && word[3] == '2' && word[4] == '\0') mode->compressions |= IC_FLAG(ic_bzip2); #endif else { fprintf(stderr, "Error parsing %s, line %u, column %u:\n" "Unsupported compression extension '%s'!\n", config_filename(iter), config_markerline(iter), config_markercolumn(iter), word); free(word); return RET_ERROR; } free(word); r = config_getword(iter, &word); if (RET_WAS_ERROR(r)) return r; } while (r != RET_NOTHING) { if (word[0] == '.') { fprintf(stderr, "Error parsing %s, line %u, column %u:\n" "Scripts starting with dot are forbidden to avoid ambiguity ('%s')!\n" "Try to put all compressions first and then all scripts to avoid this.\n", config_filename(iter), config_markerline(iter), config_markercolumn(iter), word); free(word); return RET_ERROR; } else { char *fullfilename = configfile_expandname(word, word); if (FAILEDTOALLOC(fullfilename)) return RET_ERROR_OOM; r = strlist_add(&mode->hooks, fullfilename); if (RET_WAS_ERROR(r)) return r; } r = config_getword(iter, &word); if (RET_WAS_ERROR(r)) return r; } return RET_OK; } static retvalue gotfilename(const char *relname, size_t l, struct release *release) { if (l > 12 && memcmp(relname+l-12, ".tobedeleted", 12) == 0) { char *filename; filename = strndup(relname, l - 12); if (FAILEDTOALLOC(filename)) return RET_ERROR_OOM; return release_adddel(release, filename); } else if (l > 4 && memcmp(relname+(l-4), ".new", 4) == 0) { char *filename, *tmpfilename; filename = strndup(relname, l - 4); if (FAILEDTOALLOC(filename)) return RET_ERROR_OOM; tmpfilename = strndup(relname, l); if (FAILEDTOALLOC(tmpfilename)) { free(filename); return RET_ERROR_OOM; } return release_addnew(release, tmpfilename, filename); } else if (l > 5 && memcmp(relname + (l-5), ".new.", 5) == 0) { char *filename, *tmpfilename; filename = strndup(relname, l-5); if (FAILEDTOALLOC(filename)) return RET_ERROR_OOM; tmpfilename = strndup(relname, l-1); if (FAILEDTOALLOC(tmpfilename)) { free(filename); return RET_ERROR_OOM; } return release_addsilentnew(release, tmpfilename, filename); } else if (l > 5 && memcmp(relname + (l-5), ".keep", 5) == 0) { return RET_OK; } else { char *filename; filename = strndup(relname, l); if (FAILEDTOALLOC(filename)) return RET_ERROR_OOM; return release_addold(release, filename); } } static retvalue callexporthook(/*@null@*/const char *hook, const char *relfilename, const char *mode, struct release *release) { pid_t f, c; int status; int io[2]; char buffer[1000]; int already = 0; if (hook == NULL) return RET_NOTHING; status = pipe(io); if (status < 0) { int e = errno; fprintf(stderr, "Error %d creating pipe: %s!\n", e, strerror(e)); return RET_ERRNO(e); } f = fork(); if (f < 0) { int e = errno; (void)close(io[0]); (void)close(io[1]); fprintf(stderr, "Error %d while forking for exporthook: %s\n", e, strerror(e)); return RET_ERRNO(e); } if (f == 0) { char *reltmpfilename; int e; if (dup2(io[1], 3) < 0) { e = errno; fprintf(stderr, "Error %d dup2'ing fd %d to 3: %s\n", e, io[1], strerror(e)); exit(255); } /* "Doppelt haelt besser": */ if (io[0] != 3) (void)close(io[0]); if (io[1] != 3) (void)close(io[1]); closefrom(4); /* backward compatibilty */ reltmpfilename = calc_addsuffix(relfilename, "new"); if (reltmpfilename == NULL) { exit(255); } sethookenvironment(causingfile, NULL, NULL, NULL); (void)execl(hook, hook, release_dirofdist(release), reltmpfilename, relfilename, mode, ENDOFARGUMENTS); e = errno; fprintf(stderr, "Error %d while executing '%s': %s\n", e, hook, strerror(e)); exit(255); } close(io[1]); markcloseonexec(io[0]); if (verbose > 6) printf("Called %s '%s' '%s.new' '%s' '%s'\n", hook, release_dirofdist(release), relfilename, relfilename, mode); /* read what comes from the client */ while (true) { ssize_t r; int last, j; r = read(io[0], buffer + already, 999 - already); if (r < 0) { int e = errno; fprintf(stderr, "Error %d reading from exporthook: %s!\n", e, strerror(e)); break; } already += r; if (r == 0) { buffer[already] = '\0'; already++; } last = 0; for (j = 0 ; j < already ; j++) { if (buffer[j] == '\n' || buffer[j] == '\0') { int next = j+1; int e = (j>0)?(j-1):j; retvalue ret; while (last < j && xisspace(buffer[last])) last++; if (last >= j) { last = next; continue; } while (xisspace(buffer[e])) { e--; assert (e >= last); } ret = gotfilename(buffer + last, e - last + 1, release); if (RET_WAS_ERROR(ret)) { (void)close(io[0]); return ret; } last = next; } } if (last > 0) { if (already > last) memmove(buffer, buffer + last, already - last); already -= last; } if (r == 0) break; } (void)close(io[0]); do { c = waitpid(f, &status, WUNTRACED); if (c < 0) { int e = errno; fprintf(stderr, "Error %d while waiting for hook '%s' to finish: %s\n", e, hook, strerror(e)); return RET_ERRNO(e); } } while (c != f); if (WIFEXITED(status)) { if (WEXITSTATUS(status) == 0) { if (verbose > 6) printf("Exporthook successfully returned!\n"); return RET_OK; } else { fprintf(stderr, "Exporthook failed with exitcode %d!\n", (int)WEXITSTATUS(status)); return RET_ERROR; } } else if (WIFSIGNALED(status)) { fprintf(stderr, "Exporthook killed by signal %d!\n", (int)(WTERMSIG(status))); return RET_ERROR; } else { fprintf(stderr, "Exporthook terminated abnormally. (status is %x)!\n", status); return RET_ERROR; } } retvalue export_target(const char *relativedir, struct target *target, const struct exportmode *exportmode, struct release *release, bool onlyifmissing, bool snapshot) { retvalue r; struct filetorelease *file; const char *status; char *relfilename; char buffer[100]; const char *chunk; size_t chunk_len; struct target_cursor iterator; relfilename = calc_dirconcat(relativedir, exportmode->filename); if (FAILEDTOALLOC(relfilename)) return RET_ERROR_OOM; r = release_startfile(release, relfilename, exportmode->compressions, onlyifmissing, &file); if (RET_WAS_ERROR(r)) { free(relfilename); return r; } if (RET_IS_OK(r)) { if (release_oldexists(file)) { if (verbose > 5) printf(" replacing '%s/%s'%s\n", release_dirofdist(release), relfilename, exportdescription(exportmode, buffer, 100)); status = "change"; } else { if (verbose > 5) printf(" creating '%s/%s'%s\n", release_dirofdist(release), relfilename, exportdescription(exportmode, buffer, 100)); status = "new"; } r = target_openiterator(target, READONLY, &iterator); if (RET_WAS_ERROR(r)) { release_abortfile(file); free(relfilename); return r; } while (target_nextpackage_len(&iterator, NULL, &chunk, &chunk_len)) { if (chunk_len == 0) continue; (void)release_writedata(file, chunk, chunk_len); (void)release_writestring(file, "\n"); if (chunk[chunk_len-1] != '\n') (void)release_writestring(file, "\n"); } r = target_closeiterator(&iterator); if (RET_WAS_ERROR(r)) { release_abortfile(file); free(relfilename); return r; } r = release_finishfile(release, file); if (RET_WAS_ERROR(r)) { free(relfilename); return r; } } else { if (verbose > 9) printf(" keeping old '%s/%s'%s\n", release_dirofdist(release), relfilename, exportdescription(exportmode, buffer, 100)); status = "old"; } if (!snapshot) { int i; for (i = 0 ; i < exportmode->hooks.count ; i++) { const char *hook = exportmode->hooks.values[i]; r = callexporthook(hook, relfilename, status, release); if (RET_WAS_ERROR(r)) { free(relfilename); return r; } } } free(relfilename); return RET_OK; } void exportmode_done(struct exportmode *mode) { assert (mode != NULL); free(mode->filename); strlist_done(&mode->hooks); free(mode->release); } reprepro-4.13.1/filelist.c0000644000175100017510000004250712152651661012355 00000000000000/* This file is part of "reprepro" * Copyright (C) 2006,2007 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include "error.h" #include "database_p.h" #include "files.h" #include "debfile.h" #include "filelist.h" struct filelist_package { struct filelist_package *next; char name[]; }; struct dirlist; struct filelist { struct filelist *nextl; struct filelist *nextr; int balance; char *name; size_t count; const char *packages[]; }; struct dirlist { struct dirlist *nextl; struct dirlist *nextr; int balance; /*@dependant@*/ struct dirlist *parent; struct dirlist *subdirs; struct filelist *files; /*@dependant@*/struct filelist *lastfile; size_t len; char name[]; }; struct filelist_list { struct dirlist *root; struct filelist_package *packages; }; retvalue filelist_init(struct filelist_list **list) { struct filelist_list *filelist; filelist = zNEW(struct filelist_list); if (FAILEDTOALLOC(filelist)) return RET_ERROR_OOM; filelist->root = zNEW(struct dirlist); if (FAILEDTOALLOC(filelist->root)) { free(filelist); return RET_ERROR_OOM; } *list = filelist; return RET_OK; }; static void files_free(/*@only@*/struct filelist *list) { if (list == NULL) return; files_free(list->nextl); files_free(list->nextr); free(list->name); free(list); } static void dirlist_free(/*@only@*/struct dirlist *list) { if (list == NULL) return; files_free(list->files); dirlist_free(list->subdirs); dirlist_free(list->nextl); dirlist_free(list->nextr); free(list); } void filelist_free(struct filelist_list *list) { if (list == NULL) return; dirlist_free(list->root); while (list->packages != NULL) { struct filelist_package *package = list->packages; list->packages = package->next; free(package); } free(list); }; static retvalue filelist_newpackage(struct filelist_list *filelist, const char *name, const char *section, const struct filelist_package **pkg) { struct filelist_package *p; size_t name_len = strlen(name); size_t section_len = strlen(section); p = malloc(sizeof(struct filelist_package)+name_len+section_len+2); if (FAILEDTOALLOC(p)) return RET_ERROR_OOM; p->next = filelist->packages; memcpy(p->name, section, section_len); p->name[section_len] = '/'; memcpy(p->name+section_len+1, name, name_len+1); filelist->packages = p; *pkg = p; return RET_OK; }; static bool findfile(struct dirlist *parent, const char *packagename, const char *basefilename, size_t namelen) { struct filelist *file, *n, *last; struct filelist **stack[128]; int stackpointer = 0; stack[stackpointer++] = &parent->files; file = parent->files; while (file != NULL) { int c = strncmp(basefilename, file->name, namelen); if (c == 0 && file->name[namelen] == '\0') { n = realloc(file, sizeof(struct filelist)+ (file->count+1)*sizeof(const char*)); if (n == NULL) return false; n->packages[n->count++] = packagename; *(stack[--stackpointer]) = n; return true; } else if (c > 0) { stack[stackpointer++] = &file->nextr; file = file->nextr; } else { stack[stackpointer++] = &file->nextl; file = file->nextl; } } n = malloc(sizeof(struct filelist)+sizeof(const char*)); if (FAILEDTOALLOC(n)) return false; n->name = strndup(basefilename, namelen); n->nextl = NULL; n->nextr = NULL; n->balance = 0; n->count = 1; n->packages[0] = packagename; if (FAILEDTOALLOC(n->name)) { free(n); return false; } *(stack[--stackpointer]) = n; while (stackpointer > 0) { file = *(stack[--stackpointer]); if (file->nextl == n) { file->balance--; if (file->balance > -1) break; if (file->balance == -1) { n = file; continue; } if (n->balance == -1) { file->nextl = n->nextr; file->balance = 0; n->nextr = file; n->balance = 0; *(stack[stackpointer]) = n; break; } else { last = n->nextr; file->nextl = last->nextr; *(stack[stackpointer]) = last; last->nextr = file; n->nextr = last->nextl; last->nextl = n; if (last->balance == 0) { file->balance = 0; n->balance = 0; } else if (last->balance < 0) { file->balance = 1; n->balance = 0; } else { file->balance = 0; n->balance = -1; } last->balance = 0; break; } } else { file->balance++; if (file->balance < 1) break; if (file->balance == 1) { n = file; continue; } if (n->balance == 1) { file->nextr = n->nextl; file->balance = 0; n->nextl = file; n->balance = 0; *(stack[stackpointer]) = n; break; } else { last = n->nextl; file->nextr = last->nextl; *(stack[stackpointer]) = last; last->nextl = file; n->nextl = last->nextr; last->nextr = n; if (last->balance == 0) { file->balance = 0; n->balance = 0; } else if (last->balance > 0) { file->balance = -1; n->balance = 0; } else { file->balance = 0; n->balance = 1; } last->balance = 0; break; } } } return true; } typedef const unsigned char cuchar; static struct dirlist *finddir(struct dirlist *dir, cuchar *name, size_t namelen) { struct dirlist *d, *this, *parent, *h; struct dirlist **stack[128]; int stackpointer = 0; stack[stackpointer++] = &dir->subdirs; d = dir->subdirs; while (d != NULL) { int c; if (namelen < d->len) { c = memcmp(name, d->name, namelen); if (c <= 0) { stack[stackpointer++] = &d->nextl; d = d->nextl; } else { stack[stackpointer++] = &d->nextr; d = d->nextr; } } else { c = memcmp(name, d->name, d->len); if (c == 0 && d->len == namelen) { return d; } else if (c >= 0) { stack[stackpointer++] = &d->nextr; d = d->nextr; } else { stack[stackpointer++] = &d->nextl; d = d->nextl; } } } /* not found, create it and rebalance */ d = malloc(sizeof(struct dirlist) + namelen); if (FAILEDTOALLOC(d)) return d; d->subdirs = NULL; d->nextl = NULL; d->nextr = NULL; d->balance = 0; d->parent = dir; d->files = NULL; d->len = namelen; memcpy(d->name, name, namelen); *(stack[--stackpointer]) = d; this = d; while (stackpointer > 0) { parent = *(stack[--stackpointer]); if (parent->nextl == this) { parent->balance--; if (parent->balance > -1) break; if (parent->balance == -1) { this = parent; continue; } if (this->balance == -1) { parent->nextl = this->nextr; parent->balance = 0; this->nextr = parent; this->balance = 0; *(stack[stackpointer]) = this; break; } else { h = this->nextr; parent->nextl = h->nextr; *(stack[stackpointer]) = h; h->nextr = parent; this->nextr = h->nextl; h->nextl = this; if (h->balance == 0) { parent->balance = 0; this->balance = 0; } else if (h->balance < 0) { parent->balance = 1; this->balance = 0; } else { parent->balance = 0; this->balance = -1; } h->balance = 0; break; } } else { parent->balance++; if (parent->balance < 1) break; if (parent->balance == 1) { this = parent; continue; } if (this->balance == 1) { parent->nextr = this->nextl; parent->balance = 0; this->nextl = parent; this->balance = 0; *(stack[stackpointer]) = this; break; } else { h = this->nextl; parent->nextr = h->nextl; *(stack[stackpointer]) = h; h->nextl = parent; this->nextl = h->nextr; h->nextr = this; if (h->balance == 0) { parent->balance = 0; this->balance = 0; } else if (h->balance > 0) { parent->balance = -1; this->balance = 0; } else { parent->balance = 0; this->balance = 1; } h->balance = 0; break; } } } return d; } static retvalue filelist_addfiles(struct filelist_list *list, const struct filelist_package *package, const char *filekey, const char *datastart, size_t size) { struct dirlist *curdir = list->root; const unsigned char *data = (const unsigned char *)datastart; while (*data != '\0') { int d; if ((size_t)(data - (const unsigned char *)datastart) >= size-1) { /* This might not catch everything, but we are only * accessing it readonly */ fprintf(stderr, "Corrupted file list data for %s\n", filekey); return RET_ERROR; } d = *(data++); if (d == 1) { size_t len = 0; while (*data == 255) { data++; len += 255; } if (*data == 0) { fprintf(stderr, "Corrupted file list data for %s\n", filekey); return RET_ERROR; } len += *(data++); if (!findfile(curdir, package->name, (const char*)data, len)) return RET_ERROR_OOM; data += len; } else if (d == 2) { size_t len = 0; while (*data == 255) { data++; len += 255; } if (*data == 0) { fprintf(stderr, "Corrupted file list data for %s\n", filekey); return RET_ERROR; } len += *(data++); curdir = finddir(curdir, data, len); if (FAILEDTOALLOC(curdir)) return RET_ERROR_OOM; data += len; } else { d -= 2; while (d-- > 0 && curdir->parent != NULL) curdir = curdir->parent; } } if ((size_t)(data - (const unsigned char *)datastart) != size-1) { fprintf(stderr, "Corrupted file list data for %s (format suggest %llu, is %llu)\n", filekey, (unsigned long long)(data - (const unsigned char *)datastart), (unsigned long long)(size-1)); return RET_ERROR; } return RET_OK; } retvalue filelist_addpackage(struct filelist_list *list, const char *packagename, const char *section, const char *filekey) { const struct filelist_package *package; char *debfilename, *contents = NULL; retvalue r; const char *c; size_t len; r = filelist_newpackage(list, packagename, section, &package); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; r = table_gettemprecord(rdb_contents, filekey, &c, &len); if (r == RET_NOTHING) { if (verbose > 3) printf("Reading filelist for %s\n", filekey); debfilename = files_calcfullfilename(filekey); if (FAILEDTOALLOC(debfilename)) return RET_ERROR_OOM; r = getfilelist(&contents, &len, debfilename); len--; free(debfilename); c = contents; } if (RET_IS_OK(r)) { r = filelist_addfiles(list, package, filekey, c, len + 1); if (contents != NULL) r = table_adduniqsizedrecord(rdb_contents, filekey, contents, len + 1, true, false); } free(contents); return r; } retvalue fakefilelist(const char *filekey) { return table_adduniqsizedrecord(rdb_contents, filekey, "", 1, true, false); } static const char header[] = "FILE LOCATION\n"; static const char separator_chars[] = "\t "; static void filelist_writefiles(char *dir, size_t len, struct filelist *files, struct filetorelease *file) { unsigned int i; bool first; if (files == NULL) return; filelist_writefiles(dir, len, files->nextl, file); (void)release_writedata(file, dir, len); (void)release_writestring(file, files->name); (void)release_writedata(file, separator_chars, sizeof(separator_chars) - 1); first = true; for (i = 0 ; i < files->count ; i ++) { if (!first) (void)release_writestring(file, ","); first = false; (void)release_writestring(file, files->packages[i]); } (void)release_writestring(file, "\n"); filelist_writefiles(dir, len, files->nextr, file); } static retvalue filelist_writedirs(char **buffer_p, size_t *size_p, size_t ofs, struct dirlist *dir, struct filetorelease *file) { if (dir->nextl != NULL) { retvalue r; r = filelist_writedirs(buffer_p, size_p, ofs, dir->nextl, file); if (RET_WAS_ERROR(r)) return r; } { size_t len = dir->len; register retvalue r; if (ofs+len+2 >= *size_p) { char *n; *size_p += 1024*(1+(len/1024)); n = realloc(*buffer_p, *size_p); if (FAILEDTOALLOC(n)) { free(*buffer_p); *buffer_p = NULL; return RET_ERROR_OOM; } *buffer_p = n; } memcpy((*buffer_p) + ofs, dir->name, len); (*buffer_p)[ofs + len] = '/'; // TODO: output files and directories sorted together instead filelist_writefiles(*buffer_p, ofs+len+1, dir->files, file); if (dir->subdirs == NULL) r = RET_OK; else r = filelist_writedirs(buffer_p, size_p, ofs+len+1, dir->subdirs, file); if (dir->nextr == NULL) return r; if (RET_WAS_ERROR(r)) return r; } return filelist_writedirs(buffer_p, size_p, ofs, dir->nextr, file); } retvalue filelist_write(struct filelist_list *list, struct filetorelease *file) { size_t size = 1024; char *buffer = malloc(size); retvalue r; if (FAILEDTOALLOC(buffer)) return RET_ERROR_OOM; (void)release_writedata(file, header, sizeof(header) - 1); buffer[0] = '\0'; filelist_writefiles(buffer, 0, list->root->files, file); if (list->root->subdirs != NULL) r = filelist_writedirs(&buffer, &size, 0, list->root->subdirs, file); else r = RET_OK; free(buffer); return r; } /* helpers for filelist generators to get the preprocessed form */ retvalue filelistcompressor_setup(/*@out@*/struct filelistcompressor *c) { c->size = 2000; c->len = 0; c->filelist = malloc(c->size); if (FAILEDTOALLOC(c->filelist)) return RET_ERROR_OOM; c->dirdepth = 0; return RET_OK; } static inline bool filelistcompressor_space(struct filelistcompressor *c, size_t len) { if (c->len + len + 2 >= c->size) { char *n; if (c->size > 1024*1024*1024) { fprintf(stderr, "Ridiculously long file list!\n"); return false; } c->size = c->len + len + 2048; n = realloc(c->filelist, c->size); if (FAILEDTOALLOC(n)) return false; c->filelist = n; } return true; } retvalue filelistcompressor_add(struct filelistcompressor *c, const char *name, size_t name_len) { unsigned int depth; const char *separator; /* check if it is already in the current dir or a subdir of that: */ if (name_len > 0 && *name == '.') { name++; name_len--; } while (name_len > 0 && *name == '/') { name++; name_len--; } for (depth = 0; depth < c->dirdepth ; depth++) { const unsigned char *u =(unsigned char *)c->filelist + c->offsets[depth]; size_t dir_len = 0; while (*u == 255) { dir_len += 255; u++; } dir_len += *(u++); if (dir_len >= name_len) break; if (memcmp(u, name, dir_len) != 0 || name[dir_len] != '/') break; name += dir_len + 1; name_len -= dir_len + 1; } if (depth < c->dirdepth) { if (!filelistcompressor_space(c, 1)) return RET_ERROR_OOM; c->filelist[c->len++] = (unsigned char)2 + c->dirdepth - depth; c->dirdepth = depth; } while ((separator = memchr(name, '/', name_len)) != NULL) { size_t dirlen = separator - name; /* ignore files within directories with more than 255 chars */ if (dirlen >= 255) return RET_NOTHING; /* ignore too deep paths */ if (c->dirdepth > 252) return RET_NOTHING; /* add directory */ if (!filelistcompressor_space(c, 2 + dirlen)) return RET_ERROR_OOM; c->filelist[c->len++] = 2; c->offsets[c->dirdepth++] = c->len; c->filelist[c->len++] = dirlen; memcpy(c->filelist + c->len, name, dirlen); c->len += dirlen; name += dirlen+1; name_len -= dirlen+1; while (name_len > 0 && *name == '/') { name++; name_len--; } } if (name_len >= 255) return RET_NOTHING; /* all directories created, now only the file is left */ if (!filelistcompressor_space(c, 2 + name_len)) return RET_ERROR_OOM; c->filelist[c->len++] = 1; c->filelist[c->len++] = name_len; memcpy(c->filelist + c->len, name, name_len); c->len += name_len; return RET_OK; } retvalue filelistcompressor_finish(struct filelistcompressor *c, /*@out@*/char **list, /*@out@*/size_t *size) { char *l; l = realloc(c->filelist, c->len+1); if (FAILEDTOALLOC(l)) { free(c->filelist); return RET_ERROR_OOM; } l[c->len] = '\0'; *list = l; *size = c->len+1; return RET_OK; } void filelistcompressor_cancel(struct filelistcompressor *c) { free(c->filelist); } retvalue filelists_translate(struct table *oldtable, struct table *newtable) { retvalue r; struct cursor *cursor; const char *filekey, *olddata; size_t olddata_len, newdata_size; char *newdata; r = table_newglobalcursor(oldtable, &cursor); if (!RET_IS_OK(r)) return r; while (cursor_nexttempdata(oldtable, cursor, &filekey, &olddata, &olddata_len)) { const char *p; size_t l; struct filelistcompressor c; r = filelistcompressor_setup(&c); if (RET_WAS_ERROR(r)) break; for (p = olddata ; (l = strlen(p)) != 0 ; p += l + 1) { r = filelistcompressor_add(&c, p, l); if (RET_WAS_ERROR(r)) break; } if (RET_WAS_ERROR(r)) { filelistcompressor_cancel(&c); break; } r = filelistcompressor_finish(&c, &newdata, &newdata_size); if (!RET_IS_OK(r)) break; r = table_adduniqsizedrecord(newtable, filekey, newdata, newdata_size, false, false); free(newdata); if (RET_WAS_ERROR(r)) break; } if (RET_WAS_ERROR(r)) { (void)cursor_close(oldtable, cursor); return r; } r = cursor_close(oldtable, cursor); if (RET_WAS_ERROR(r)) return r; return RET_OK; } reprepro-4.13.1/database.h0000644000175100017510000000636512152651661012315 00000000000000#ifndef REPREPRO_DATABASE_H #define REPREPRO_DATABASE_H #ifndef REPREPRO_GLOBALS_H #include "globals.h" #endif #ifndef REPREPRO_ERROR_H #include "error.h" #endif #ifndef REPREPRO_STRLIST_H #include "strlist.h" #endif struct distribution; struct table; struct cursor; retvalue database_create(struct distribution *, bool fast, bool /*nopackages*/, bool /*allowunused*/, bool /*readonly*/, size_t /*waitforlock*/, bool /*verbosedb*/); retvalue database_close(void); retvalue database_openfiles(void); retvalue database_openreferences(void); retvalue database_listpackages(/*@out@*/struct strlist *); retvalue database_droppackages(const char *); retvalue database_openpackages(const char *, bool /*readonly*/, /*@out@*/struct table **); retvalue database_openreleasecache(const char *, /*@out@*/struct table **); retvalue database_opentracking(const char *, bool /*readonly*/, /*@out@*/struct table **); retvalue database_translate_filelists(void); retvalue database_translate_legacy_checksums(bool /*verbosedb*/); bool database_allcreated(void); retvalue table_close(/*@only@*/struct table *); bool table_isempty(struct table *); bool table_recordexists(struct table *, const char *); /* retrieve a record from the database, return RET_NOTHING if there is none: */ retvalue table_getrecord(struct table *, const char *, /*@out@*/char **); retvalue table_gettemprecord(struct table *, const char *, /*@out@*//*@null@*/const char **, /*@out@*//*@null@*/size_t *); retvalue table_getpair(struct table *, const char *, const char *, /*@out@*/const char **, /*@out@*/size_t *); retvalue table_adduniqsizedrecord(struct table *, const char * /*key*/, const char * /*data*/, size_t /*data_size*/, bool /*allowoverwrote*/, bool /*nooverwrite*/); retvalue table_adduniqrecord(struct table *, const char * /*key*/, const char * /*data*/); retvalue table_addrecord(struct table *, const char * /*key*/, const char * /*data*/, size_t /*len*/, bool /*ignoredups*/); retvalue table_replacerecord(struct table *, const char *key, const char *data); retvalue table_deleterecord(struct table *, const char *key, bool ignoremissing); retvalue table_checkrecord(struct table *, const char *key, const char *data); retvalue table_removerecord(struct table *, const char *key, const char *data); retvalue table_newglobalcursor(struct table *, /*@out@*/struct cursor **); retvalue table_newduplicatecursor(struct table *, const char *, /*@out@*/struct cursor **, /*@out@*/const char **, /*@out@*/const char **, /*@out@*/size_t *); retvalue table_newpairedcursor(struct table *, const char *, const char *, /*@out@*/struct cursor **, /*@out@*//*@null@*/const char **, /*@out@*//*@null@*/size_t *); bool cursor_nexttemp(struct table *, struct cursor *, /*@out@*/const char **, /*@out@*/const char **); bool cursor_nexttempdata(struct table *, struct cursor *, /*@out@*/const char **, /*@out@*/const char **, /*@out@*/size_t *); bool cursor_nextpair(struct table *, struct cursor *, /*@null@*//*@out@*/const char **, /*@out@*/const char **, /*@out@*/const char **, /*@out@*/size_t *); retvalue cursor_replace(struct table *, struct cursor *, const char *, size_t); retvalue cursor_delete(struct table *, struct cursor *, const char *, /*@null@*/const char *); retvalue cursor_close(struct table *, /*@only@*/struct cursor *); #endif reprepro-4.13.1/hooks.c0000644000175100017510000000417712152651661011666 00000000000000/* This file is part of "reprepro" * Copyright (C) 2007,2012 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ /* general helpers infrastructure for all hooks: */ #include #include #include #include #include "error.h" #include "hooks.h" void sethookenvironment(const char *causing_file, const char *causing_rule, const char *suite_from, const char *exitcode) { if (exitcode != NULL) setenv("REPREPRO_EXIT_CODE", exitcode, true); else unsetenv("REPREPRO_EXIT_CODE"); if (causing_file != NULL) setenv("REPREPRO_CAUSING_FILE", causing_file, true); else unsetenv("REPREPRO_CAUSING_FILE"); if (causing_rule != NULL) setenv("REPREPRO_CAUSING_RULE", causing_rule, true); else unsetenv("REPREPRO_CAUSING_RULE"); if (suite_from != NULL) setenv("REPREPRO_FROM", suite_from, true); else unsetenv("REPREPRO_FROM"); if (atom_defined(causingcommand)) setenv("REPREPRO_CAUSING_COMMAND", atoms_commands[causingcommand], true); else unsetenv("REPREPRO_CAUSING_COMMAND"); setenv("REPREPRO_BASE_DIR", global.basedir, true); setenv("REPREPRO_OUT_DIR", global.outdir, true); setenv("REPREPRO_CONF_DIR", global.confdir, true); setenv("REPREPRO_CONFIG_DIR", global.confdir, true); setenv("REPREPRO_DIST_DIR", global.distdir, true); setenv("REPREPRO_LOG_DIR", global.logdir, true); } /* global variables to denote current state */ const char *causingfile = NULL; /* only valid while being called */ command_t causingcommand = atom_unknown; /* valid till end of program */ reprepro-4.13.1/checks.h0000644000175100017510000000103112152651661011772 00000000000000#ifndef REPREPRO_CHECKS_H #define REPREPRO_CHECKS_H /* return NULL if no problem, statically allocated string otherwise */ typedef const char *checkfunc(const char *); const char *checkfordirectoryandidentifier(const char *); #define checkforcomponent checkfordirectoryandidentifier #define checkforcodename checkfordirectoryandidentifier const char *checkforidentifierpart(const char *); #define checkforarchitecture checkforidentifierpart /* not yet used */ static inline void checkerror_free(UNUSED(const char *dummy)) {}; #endif reprepro-4.13.1/files.c0000644000175100017510000005412712152651661011645 00000000000000/* This file is part of "reprepro" * Copyright (C) 2003,2004,2005,2006,2007,2008 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include "error.h" #include "strlist.h" #include "filecntl.h" #include "names.h" #include "checksums.h" #include "dirs.h" #include "names.h" #include "files.h" #include "ignore.h" #include "filelist.h" #include "debfile.h" #include "pool.h" #include "database_p.h" static retvalue files_get_checksums(const char *filekey, /*@out@*/struct checksums **checksums_p) { const char *checksums; size_t checksumslen; retvalue r; r = table_gettemprecord(rdb_checksums, filekey, &checksums, &checksumslen); if (!RET_IS_OK(r)) return r; return checksums_setall(checksums_p, checksums, checksumslen); } retvalue files_add_checksums(const char *filekey, const struct checksums *checksums) { retvalue r; const char *combined; size_t combinedlen; assert (rdb_checksums != NULL); r = checksums_getcombined(checksums, &combined, &combinedlen); if (!RET_IS_OK(r)) return r; r = table_adduniqsizedrecord(rdb_checksums, filekey, combined, combinedlen + 1, true, false); if (!RET_IS_OK(r)) return r; return pool_markadded(filekey); } static retvalue files_replace_checksums(const char *filekey, const struct checksums *checksums) { retvalue r; const char *combined; size_t combinedlen; assert (rdb_checksums != NULL); r = checksums_getcombined(checksums, &combined, &combinedlen); if (!RET_IS_OK(r)) return r; return table_adduniqsizedrecord(rdb_checksums, filekey, combined, combinedlen + 1, true, false); } /* remove file's md5sum from database */ retvalue files_removesilent(const char *filekey) { retvalue r; if (rdb_contents != NULL) (void)table_deleterecord(rdb_contents, filekey, true); r = table_deleterecord(rdb_checksums, filekey, true); if (r == RET_NOTHING) { fprintf(stderr, "Unable to forget unknown filekey '%s'.\n", filekey); return RET_ERROR_MISSING; } return r; } retvalue files_remove(const char *filekey) { retvalue r; r = files_removesilent(filekey); if (RET_IS_OK(r)) return pool_markdeleted(filekey); return r; } /* hardlink file with known checksums and add it to database */ retvalue files_hardlinkandadd(const char *tempfile, const char *filekey, const struct checksums *checksums) { retvalue r; /* an additional check to make sure nothing tricks us into * overwriting it by another file */ r = files_canadd(filekey, checksums); if (!RET_IS_OK(r)) return r; r = checksums_hardlink(global.outdir, filekey, tempfile, checksums); if (RET_WAS_ERROR(r)) return r; return files_add_checksums(filekey, checksums); } /* check if file is already there (RET_NOTHING) or could be added (RET_OK) * or RET_ERROR_WRONG_MD5SUM if filekey already has different md5sum */ retvalue files_canadd(const char *filekey, const struct checksums *checksums) { retvalue r; struct checksums *indatabase; bool improves; r = files_get_checksums(filekey, &indatabase); if (r == RET_NOTHING) return RET_OK; if (RET_WAS_ERROR(r)) return r; if (!checksums_check(indatabase, checksums, &improves)) { fprintf(stderr, "File \"%s\" is already registered with different checksums!\n", filekey); checksums_printdifferences(stderr, indatabase, checksums); checksums_free(indatabase); return RET_ERROR_WRONG_MD5; } // TODO: sometimes the caller might want to have additional // checksums from the database already, think about ways to // make them available... checksums_free(indatabase); return RET_NOTHING; } /* check for file in the database and if not found there, if it can be detected */ retvalue files_expect(const char *filekey, const struct checksums *checksums, bool warnifadded) { retvalue r; char *filename; struct checksums *improvedchecksums = NULL; r = files_canadd(filekey, checksums); if (r == RET_NOTHING) return RET_OK; if (RET_WAS_ERROR(r)) return r; /* ready to add means missing, so have to look for the file itself: */ filename = files_calcfullfilename(filekey); if (FAILEDTOALLOC(filename)) return RET_ERROR_OOM; /* first check if a possible manually put (or left over from previous * downloads attepts) file is there and is correct */ r = checksums_test(filename, checksums, &improvedchecksums); if (r == RET_ERROR_WRONG_MD5) { fprintf(stderr, "Deleting unexpected file '%s'!\n" "(not in database and wrong in pool)\n ", filename); if (unlink(filename) == 0) r = RET_NOTHING; else { int e = errno; fprintf(stderr, "Error %d deleting '%s': %s!\n", e, filename, strerror(e)); } } free(filename); if (!RET_IS_OK(r)) return r; if (warnifadded) fprintf(stderr, "Warning: readded existing file '%s' mysteriously missing from the checksum database.\n", filekey); // TODO: some callers might want the updated checksum when // improves is true, how to get them there? /* add found file to database */ if (improvedchecksums != NULL) { r = files_add_checksums(filekey, improvedchecksums); checksums_free(improvedchecksums); } else r = files_add_checksums(filekey, checksums); assert (r != RET_NOTHING); return r; } /* check for several files in the database and in the pool if missing */ retvalue files_expectfiles(const struct strlist *filekeys, struct checksums *checksumsarray[]) { int i; retvalue r; for (i = 0 ; i < filekeys->count ; i++) { const char *filekey = filekeys->values[i]; const struct checksums *checksums = checksumsarray[i]; r = files_expect(filekey, checksums, verbose >= 0); if (RET_WAS_ERROR(r)) return r; if (r == RET_NOTHING) { /* File missing */ fprintf(stderr, "Missing file %s\n", filekey); return RET_ERROR_MISSING; } } return RET_OK; } static inline retvalue checkorimprove(const char *filekey, struct checksums **checksums_p) { const struct checksums *checksums = *checksums_p; struct checksums *indatabase; bool improves; retvalue r; r = files_get_checksums(filekey, &indatabase); if (r == RET_NOTHING) { fprintf(stderr, "Missing file %s\n", filekey); return RET_ERROR_MISSING; } if (RET_WAS_ERROR(r)) return r; if (!checksums_check(checksums, indatabase, &improves)) { fprintf(stderr, "File \"%s\" is already registered with different checksums!\n", filekey); checksums_printdifferences(stderr, indatabase, checksums); r = RET_ERROR_WRONG_MD5; } else if (improves) { r = checksums_combine(checksums_p, indatabase, NULL); } else r = RET_NOTHING; checksums_free(indatabase); return r; } /* check for several files in the database and update information, * return RET_NOTHING if everything is OK and nothing needs improving */ retvalue files_checkorimprove(const struct strlist *filekeys, struct checksums *checksumsarray[]) { int i; retvalue result, r; result = RET_NOTHING; for (i = 0 ; i < filekeys->count ; i++) { r = checkorimprove(filekeys->values[i], &checksumsarray[i]); if (RET_WAS_ERROR(r)) return r; if (RET_IS_OK(r)) result = RET_OK; } return result; } /* dump out all information */ retvalue files_printmd5sums(void) { retvalue result, r; struct cursor *cursor; const char *filekey, *checksum; r = table_newglobalcursor(rdb_checksums, &cursor); if (!RET_IS_OK(r)) return r; result = RET_NOTHING; while (cursor_nexttemp(rdb_checksums, cursor, &filekey, &checksum)) { result = RET_OK; (void)fputs(filekey, stdout); (void)putchar(' '); while (*checksum == ':') { while (*checksum != ' ' && *checksum != '\0') checksum++; if (*checksum == ' ') checksum++; } (void)fputs(checksum, stdout); (void)putchar('\n'); } r = cursor_close(rdb_checksums, cursor); RET_ENDUPDATE(result, r); return result; } retvalue files_printchecksums(void) { retvalue result, r; struct cursor *cursor; const char *filekey, *checksum; r = table_newglobalcursor(rdb_checksums, &cursor); if (!RET_IS_OK(r)) return r; result = RET_NOTHING; while (cursor_nexttemp(rdb_checksums, cursor, &filekey, &checksum)) { result = RET_OK; (void)fputs(filekey, stdout); (void)putchar(' '); (void)fputs(checksum, stdout); (void)putchar('\n'); if (interrupted()) { result = RET_ERROR_INTERRUPTED; break; } } r = cursor_close(rdb_checksums, cursor); RET_ENDUPDATE(result, r); return result; } /* callback for each registered file */ retvalue files_foreach(per_file_action action, void *privdata) { retvalue result, r; struct cursor *cursor; const char *filekey, *checksum; r = table_newglobalcursor(rdb_checksums, &cursor); if (!RET_IS_OK(r)) return r; result = RET_NOTHING; while (cursor_nexttemp(rdb_checksums, cursor, &filekey, &checksum)) { if (interrupted()) { RET_UPDATE(result, RET_ERROR_INTERRUPTED); break; } r = action(privdata, filekey); RET_UPDATE(result, r); } r = cursor_close(rdb_checksums, cursor); RET_ENDUPDATE(result, r); return result; } static retvalue checkpoolfile(const char *fullfilename, const struct checksums *expected, bool *improveable) { struct checksums *actual; retvalue r; bool improves; r = checksums_read(fullfilename, &actual); if (RET_IS_OK(r)) { if (!checksums_check(expected, actual, &improves)) { fprintf(stderr, "WRONG CHECKSUMS of '%s':\n", fullfilename); checksums_printdifferences(stderr, expected, actual); r = RET_ERROR_WRONG_MD5; } else if (improves) *improveable = true; checksums_free(actual); } return r; } retvalue files_checkpool(bool fast) { retvalue result, r; struct cursor *cursor; const char *filekey, *combined; size_t combinedlen; struct checksums *expected; char *fullfilename; bool improveable = false; result = RET_NOTHING; r = table_newglobalcursor(rdb_checksums, &cursor); if (!RET_IS_OK(r)) return r; while (cursor_nexttempdata(rdb_checksums, cursor, &filekey, &combined, &combinedlen)) { r = checksums_setall(&expected, combined, combinedlen); if (RET_WAS_ERROR(r)) { RET_UPDATE(result, r); continue; } fullfilename = files_calcfullfilename(filekey); if (FAILEDTOALLOC(fullfilename)) { result = RET_ERROR_OOM; checksums_free(expected); break; } if (fast) r = checksums_cheaptest(fullfilename, expected, true); else r = checkpoolfile(fullfilename, expected, &improveable); if (r == RET_NOTHING) { fprintf(stderr, "Missing file '%s'!\n", fullfilename); r = RET_ERROR_MISSING; } free(fullfilename); checksums_free(expected); RET_UPDATE(result, r); } r = cursor_close(rdb_checksums, cursor); RET_ENDUPDATE(result, r); if (improveable && verbose >= 0) printf( "There were files with only some of the checksums this version of reprepro\n" "can compute recorded. To add those run reprepro collectnewchecksums.\n"); return result; } retvalue files_collectnewchecksums(void) { retvalue result, r; struct cursor *cursor; const char *filekey, *all; size_t alllen; struct checksums *expected; char *fullfilename; result = RET_NOTHING; r = table_newglobalcursor(rdb_checksums, &cursor); if (!RET_IS_OK(r)) return r; while (cursor_nexttempdata(rdb_checksums, cursor, &filekey, &all, &alllen)) { r = checksums_setall(&expected, all, alllen); if (!RET_IS_OK(r)) { RET_UPDATE(result, r); continue; } if (checksums_iscomplete(expected)) { checksums_free(expected); continue; } fullfilename = files_calcfullfilename(filekey); if (FAILEDTOALLOC(fullfilename)) { result = RET_ERROR_OOM; checksums_free(expected); break; } r = checksums_complete(&expected, fullfilename); if (r == RET_NOTHING) { fprintf(stderr, "Missing file '%s'!\n", fullfilename); r = RET_ERROR_MISSING; } if (r == RET_ERROR_WRONG_MD5) { fprintf(stderr, "ERROR: Cannot collect missing checksums for '%s'\n" "as the file in the pool does not match the already recorded checksums\n", filekey); } free(fullfilename); if (RET_IS_OK(r)) r = files_replace_checksums(filekey, expected); checksums_free(expected); RET_UPDATE(result, r); } r = cursor_close(rdb_checksums, cursor); RET_ENDUPDATE(result, r); return result; } retvalue files_detect(const char *filekey) { struct checksums *checksums; char *fullfilename; retvalue r; fullfilename = files_calcfullfilename(filekey); if (FAILEDTOALLOC(fullfilename)) return RET_ERROR_OOM; r = checksums_read(fullfilename, &checksums); if (r == RET_NOTHING) { fprintf(stderr, "Error opening '%s'!\n", fullfilename); r = RET_ERROR_MISSING; } if (RET_WAS_ERROR(r)) { free(fullfilename); return r; } free(fullfilename); r = files_add_checksums(filekey, checksums); checksums_free(checksums); return r; } struct rfd { bool reread; }; static retvalue regenerate_filelist(void *data, const char *filekey) { bool reread = ((struct rfd*)data)->reread; size_t l = strlen(filekey); char *debfilename; char *filelist; size_t fls; retvalue r; if (l <= 4 || memcmp(filekey+l-4, ".deb", 4) != 0) return RET_NOTHING; if (!reread && !table_recordexists(rdb_contents, filekey)) return RET_NOTHING; debfilename = files_calcfullfilename(filekey); if (FAILEDTOALLOC(debfilename)) return RET_ERROR_OOM; r = getfilelist(&filelist, &fls, debfilename); free(debfilename); if (RET_IS_OK(r)) { if (verbose > 0) (void)puts(filekey); if (verbose > 6) { const char *p = filelist; while (*p != '\0') { (void)putchar(' '); (void)puts(p); p += strlen(p)+1; } } r = table_adduniqsizedrecord(rdb_contents, filekey, filelist, fls, true, true); free(filelist); } return r; } retvalue files_regenerate_filelist(bool reread) { struct rfd d; d.reread = reread; return files_foreach(regenerate_filelist, &d); } /* Include a yet unknown file into the pool */ retvalue files_preinclude(const char *sourcefilename, const char *filekey, struct checksums **checksums_p) { retvalue r; struct checksums *checksums, *realchecksums; bool improves; char *fullfilename; r = files_get_checksums(filekey, &checksums); if (RET_WAS_ERROR(r)) return r; if (RET_IS_OK(r)) { r = checksums_read(sourcefilename, &realchecksums); if (r == RET_NOTHING) r = RET_ERROR_MISSING; if (RET_WAS_ERROR(r)) { checksums_free(checksums); return r; } if (!checksums_check(checksums, realchecksums, &improves)) { fprintf(stderr, "ERROR: '%s' cannot be included as '%s'.\n" "Already existing files can only be included again, if they are the same, but:\n", sourcefilename, filekey); checksums_printdifferences(stderr, checksums, realchecksums); checksums_free(checksums); checksums_free(realchecksums); return RET_ERROR_WRONG_MD5; } if (improves) { r = checksums_combine(&checksums, realchecksums, NULL); if (RET_WAS_ERROR(r)) { checksums_free(realchecksums); checksums_free(checksums); return r; } r = files_replace_checksums(filekey, checksums); if (RET_WAS_ERROR(r)) { checksums_free(realchecksums); checksums_free(checksums); return r; } } checksums_free(realchecksums); // args, this breaks retvalue semantics! if (checksums_p != NULL) *checksums_p = checksums; else checksums_free(checksums); return RET_NOTHING; } assert (sourcefilename != NULL); fullfilename = files_calcfullfilename(filekey); if (FAILEDTOALLOC(fullfilename)) return RET_ERROR_OOM; (void)dirs_make_parent(fullfilename); r = checksums_copyfile(fullfilename, sourcefilename, true, &checksums); if (r == RET_ERROR_EXIST) { // TODO: deal with already existing files! fprintf(stderr, "File '%s' does already exist!\n", fullfilename); } if (r == RET_NOTHING) { fprintf(stderr, "Could not open '%s'!\n", sourcefilename); r = RET_ERROR_MISSING; } if (RET_WAS_ERROR(r)) { free(fullfilename); return r; } free(fullfilename); r = files_add_checksums(filekey, checksums); if (RET_WAS_ERROR(r)) { checksums_free(checksums); return r; } if (checksums_p != NULL) *checksums_p = checksums; else checksums_free(checksums); return RET_OK; } static retvalue checkimproveorinclude(const char *sourcedir, const char *basefilename, const char *filekey, struct checksums **checksums_p, bool *improving) { retvalue r; struct checksums *checksums = NULL; bool improves, copied = false; char *fullfilename = files_calcfullfilename(filekey); if (FAILEDTOALLOC(fullfilename)) return RET_ERROR_OOM; if (checksums_iscomplete(*checksums_p)) { r = checksums_cheaptest(fullfilename, *checksums_p, true); if (r != RET_NOTHING) { free(fullfilename); return r; } } else { r = checksums_read(fullfilename, &checksums); if (RET_WAS_ERROR(r)) { free(fullfilename); return r; } } if (r == RET_NOTHING) { char *sourcefilename = calc_dirconcat(sourcedir, basefilename); if (FAILEDTOALLOC(sourcefilename)) { free(fullfilename); return RET_ERROR_OOM; } fprintf(stderr, "WARNING: file %s was lost!\n" "(i.e. found in the database, but not in the pool)\n" "trying to compensate...\n", filekey); (void)dirs_make_parent(fullfilename); r = checksums_copyfile(fullfilename, sourcefilename, false, &checksums); if (r == RET_ERROR_EXIST) { fprintf(stderr, "File '%s' seems to be missing and existing at the same time!\n" "To confused to continue...\n", fullfilename); } if (r == RET_NOTHING) { fprintf(stderr, "Could not open '%s'!\n", sourcefilename); r = RET_ERROR_MISSING; } free(sourcefilename); if (RET_WAS_ERROR(r)) { free(fullfilename); return r; } copied = true; } assert (checksums != NULL); if (!checksums_check(*checksums_p, checksums, &improves)) { if (copied) { deletefile(fullfilename); fprintf(stderr, "ERROR: Unexpected content of file '%s/%s'!\n", sourcedir, basefilename); } else // TODO: if the database only listed some of the currently supported checksums, // and the caller of checkincludefile supplied some (which none yet does), but // not all (which needs at least three checksums, i.e. not applicaple before // sha256 get added), then this might also be called if the file in the pool // just has the same checksums as previously recorded (e.g. a md5sum collision) // but the new file was listed with another secondary hash than the original. // In that situation it might be a bit misleading... fprintf(stderr, "ERROR: file %s is damaged!\n" "(i.e. found in the database, but with different checksums in the pool)\n", filekey); checksums_printdifferences(stderr, *checksums_p, checksums); r = RET_ERROR_WRONG_MD5; } if (improves) { r = checksums_combine(checksums_p, checksums, NULL); if (RET_IS_OK(r)) *improving = true; } checksums_free(checksums); free(fullfilename); return r; } retvalue files_checkincludefile(const char *sourcedir, const char *basefilename, const char *filekey, struct checksums **checksums_p) { char *sourcefilename, *fullfilename; struct checksums *checksums; retvalue r; bool improves; assert (*checksums_p != NULL); r = files_get_checksums(filekey, &checksums); if (RET_WAS_ERROR(r)) return r; if (RET_IS_OK(r)) { /* there are three sources now: * - the checksums from the database (may have some we * do not even know about, and may miss some we can * generate) * - the checksums provided (typically only md5sum, * as this comes from a .changes or .dsc) * - the checksums of the file * * to make things more complicated, the file should only * be read if needed, as this needs time. * And it can happen the file got lost in the pool, then * this is the best place to replace it. */ if (!checksums_check(checksums, *checksums_p, &improves)) { fprintf(stderr, "ERROR: '%s/%s' cannot be included as '%s'.\n" "Already existing files can only be included again, if they are the same, but:\n", sourcedir, basefilename, filekey); checksums_printdifferences(stderr, checksums, *checksums_p); checksums_free(checksums); return RET_ERROR_WRONG_MD5; } r = RET_NOTHING; if (improves) r = checksums_combine(&checksums, *checksums_p, NULL); if (!RET_WAS_ERROR(r)) r = checkimproveorinclude(sourcedir, basefilename, filekey, &checksums, &improves); if (!RET_WAS_ERROR(r) && improves) r = files_replace_checksums(filekey, checksums); if (RET_IS_OK(r)) r = RET_NOTHING; /* return the combined checksum */ checksums_free(*checksums_p); *checksums_p = checksums; return r; } assert (sourcedir != NULL); sourcefilename = calc_dirconcat(sourcedir, basefilename); if (FAILEDTOALLOC(sourcefilename)) return RET_ERROR_OOM; fullfilename = files_calcfullfilename(filekey); if (FAILEDTOALLOC(fullfilename)) { free(sourcefilename); return RET_ERROR_OOM; } (void)dirs_make_parent(fullfilename); r = checksums_copyfile(fullfilename, sourcefilename, true, &checksums); if (r == RET_NOTHING) { fprintf(stderr, "Could not open '%s'!\n", sourcefilename); r = RET_ERROR_MISSING; } if (RET_WAS_ERROR(r)) { free(fullfilename); free(sourcefilename); return r; } if (!checksums_check(*checksums_p, checksums, &improves)) { deletefile(fullfilename); fprintf(stderr, "ERROR: Unexpected content of file '%s'!\n", sourcefilename); checksums_printdifferences(stderr, *checksums_p, checksums); r = RET_ERROR_WRONG_MD5; } free(sourcefilename); free(fullfilename); if (RET_WAS_ERROR(r)) { return r; } if (improves) { r = checksums_combine(checksums_p, checksums, NULL); checksums_free(checksums); if (RET_WAS_ERROR(r)) return r; } else checksums_free(checksums); return files_add_checksums(filekey, *checksums_p); } off_t files_getsize(const char *filekey) { retvalue r; off_t s; struct checksums *checksums; r = files_get_checksums(filekey, &checksums); if (!RET_IS_OK(r)) return -1; s = checksums_getfilesize(checksums); checksums_free(checksums); return s; } reprepro-4.13.1/chunks.h0000644000175100017510000000610112152651661012030 00000000000000#ifndef REPREPRO_CHUNKS_H #define REPREPRO_CHUNKS_H #ifndef REPREPRO_ERROR_H #include "error.h" #warning "What's hapening here?" #endif #ifndef REPREPRO_STRLIST_H #include "strlist.h" #endif /* look for name in chunk. returns RET_NOTHING if not found */ retvalue chunk_getvalue(const char *, const char *, /*@out@*/char **); retvalue chunk_getextralinelist(const char *, const char *, /*@out@*/struct strlist *); retvalue chunk_getwordlist(const char *, const char *, /*@out@*/struct strlist *); retvalue chunk_getuniqwordlist(const char *, const char *, /*@out@*/struct strlist *); retvalue chunk_getwholedata(const char *, const char *, /*@out@*/char **value); /* Parse a package/source-field: ' *value( ?\(version\))? *' */ retvalue chunk_getname(const char *, const char *, /*@out@*/char **, bool /*allowversion*/); retvalue chunk_getnameandversion(const char *, const char *, /*@out@*/char **, /*@out@*/char **); /* return RET_OK, if field is found, RET_NOTHING, if not (or value indicates false in future variants) */ retvalue chunk_gettruth(const char *, const char *); /* return RET_OK, if field is found, RET_NOTHING, if not */ retvalue chunk_checkfield(const char *, const char *); /* modifications of a chunk: */ struct fieldtoadd { /*@null@*/struct fieldtoadd *next; /* The name of the field: */ /*@dependent@*/const char *field; /* The data to include: (if NULL, delete this field) */ /*@null@*//*@dependent@*/const char *data; /* how many chars in them (the *exact* len to use * , no \0 allowed within!), */ size_t len_field, len_data; }; // TODO make this return retvalue.. /* Add this the to before field, * replacing older fields of this name, if they are already there. */ /*@null@*/ char *chunk_replacefields(const char *, const struct fieldtoadd *, const char * /*beforethis*/, bool /*maybemissing*/); /*@null@*/struct fieldtoadd *deletefield_new(/*@dependent@*/const char *, /*@only@*//*@null@*/struct fieldtoadd *); /*@null@*/struct fieldtoadd *aodfield_new(/*@dependent@*/const char *, /*@dependent@*//*@null@*/const char *, /*@only@*/struct fieldtoadd *); /*@null@*/struct fieldtoadd *addfield_new(/*@dependent@*/const char *, /*@dependent@*//*@null@*/const char *, /*@only@*/struct fieldtoadd *); /*@null@*/struct fieldtoadd *addfield_newn(/*@dependent@*/const char *, /*@dependent@*//*@null@*/const char *, size_t, /*@only@*/struct fieldtoadd *); void addfield_free(/*@only@*//*@null@*/struct fieldtoadd *); /* that is chunk_replacefields(chunk,{fieldname,strlen,data,strlen},fieldname); */ /*@null@*/char *chunk_replacefield(const char *, const char *, const char *, bool /*maybemissing*/); /* make sure a given field is first and remove any later occurences */ /*@null@*/char *chunk_normalize(const char *, const char *, const char *); /* reformat control data, removing leading spaces and CRs */ size_t chunk_extract(char * /*buffer*/, const char */*start*/, size_t, bool, /*@out@*/const char ** /*next*/); const char *chunk_getstart(const char *, size_t, bool /*commentsallowed*/); const char *chunk_over(const char *); #endif reprepro-4.13.1/dirs.h0000644000175100017510000000154712152651661011507 00000000000000#ifndef REPREPRO_DIRS_H #define REPREPRO_DIRS_H #ifndef REPREPRO_ERROR_H #warning "What is happening here?" #include "error.h" #endif #ifndef REPREPRO_STRLIST_H #warning "What is happening here?" #include "strlist.h" #endif /* create a directory, return RET_NOTHING if already existing */ retvalue dirs_create(const char *); /* create recursively all parent directories before the last '/' */ retvalue dirs_make_parent(const char *); /* create dirname and any '/'-separated part of it */ retvalue dirs_make_recursive(const char *); /* create directory and parents as needed, and save count to remove them later */ retvalue dir_create_needed(const char *, int *); void dir_remove_new(const char *, int); /* Behave like dirname(3) */ retvalue dirs_getdirectory(const char *, /*@out@*/char **); const char *dirs_basename(const char *); bool isdir(const char *); #endif reprepro-4.13.1/termdecide.h0000644000175100017510000000075212152651661012650 00000000000000#ifndef REPREPRO_TERMDECIDE_H #define REPREPRO_TERMDECIDE_H #ifndef REPREPRO_TERMS_H #include "terms.h" #endif #ifndef REPREPRO_TARGET_H #include "target.h" #endif /* decide based on a chunk, (warning: string comparisons even for version!)*/ retvalue term_decidechunk(const term *, const char *, /*@null@*/const void *); retvalue term_compilefortargetdecision(/*@out@*/term **, const char *); retvalue term_decidechunktarget(const term *, const char *, const struct target *); #endif reprepro-4.13.1/sha1.c0000644000175100017510000001451712152651661011376 00000000000000/* SHA-1 in C By Steve Reid 100% Public Domain ----------------- Modified 7/98 By James H. Brown Still 100% Public Domain [changes omitted as reverted] ----------------- Modified 8/98 By Steve Reid Still 100% public domain 1- Removed #include and used return() instead of exit() 2- Fixed overwriting of finalcount in SHA1Final() (discovered by Chris Hall) 3- Changed email address from steve@edmweb.com to sreid@sea-to-sky.net ----------------- Modified 4/01 By Saul Kravitz Still 100% PD Modified to run on Compaq Alpha hardware. ----------------- Modified 07/2002 By Ralph Giles Still 100% public domain modified for use with stdint types, autoconf code cleanup, removed attribution comments switched SHA1Final() argument order for consistency use SHA1_ prefix for public api move public api to sha1.h ------------------------ Modified 11/2007 by Bernhard R. Link Still 100% public domain: Removed everything not related to hash itself, removed wiping of temp data (as not needed for public data) multiple modifications to make it more what I consider readable. using endian.h now. multiple more modifications... Modified 06/2008 by Bernhard R. Link Still 100% public domain: use WORDS_BIGENDIAN instead of endian.h */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include #include #include #include #include "sha1.h" static void SHA1_Transform(uint32_t state[5], const uint8_t buffer[64]); #define rol(value, bits) (((value) << (bits)) | ((value) >> (32 - (bits)))) #define blk(i) (block[i&15] = rol(block[(i+13)&15]^block[(i+8)&15] \ ^block[(i+2)&15]^block[i&15],1)) /* (R0+R1), R2, R3, R4 are the different operations used in SHA1 */ #define R0(v,w,x,y,z,i) z+=((w&(x^y))^y)+block[i]+0x5A827999+rol(v,5);w=rol(w,30); #define R1(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk(i)+0x5A827999+rol(v,5);w=rol(w,30); #define R2(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0x6ED9EBA1+rol(v,5);w=rol(w,30); #define R3(v,w,x,y,z,i) z+=(((w|x)&y)|(w&x))+blk(i)+0x8F1BBCDC+rol(v,5);w=rol(w,30); #define R4(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0xCA62C1D6+rol(v,5);w=rol(w,30); /* Hash a single 512-bit block. This is the core of the algorithm. */ void SHA1_Transform(uint32_t state[5], const uint8_t buffer[64]) { uint32_t a, b, c, d, e; uint32_t block[16]; #ifndef WORDS_BIGENDIAN int i; #endif assert (sizeof(block) == 64*sizeof(uint8_t)); #ifdef WORDS_BIGENDIAN memcpy(block, buffer, sizeof(block)); #else for (i = 0 ; i < 16 ; i++) { block[i] = (buffer[4*i]<<24) | (buffer[4*i+1]<<16) | (buffer[4*i+2]<<8) | buffer[4*i+3]; } #endif /* Copy context->state[] to working vars */ a = state[0]; b = state[1]; c = state[2]; d = state[3]; e = state[4]; /* 4 rounds of 20 operations each. Loop unrolled. */ R0(a,b,c,d,e, 0); R0(e,a,b,c,d, 1); R0(d,e,a,b,c, 2); R0(c,d,e,a,b, 3); R0(b,c,d,e,a, 4); R0(a,b,c,d,e, 5); R0(e,a,b,c,d, 6); R0(d,e,a,b,c, 7); R0(c,d,e,a,b, 8); R0(b,c,d,e,a, 9); R0(a,b,c,d,e,10); R0(e,a,b,c,d,11); R0(d,e,a,b,c,12); R0(c,d,e,a,b,13); R0(b,c,d,e,a,14); R0(a,b,c,d,e,15); R1(e,a,b,c,d,16); R1(d,e,a,b,c,17); R1(c,d,e,a,b,18); R1(b,c,d,e,a,19); R2(a,b,c,d,e,20); R2(e,a,b,c,d,21); R2(d,e,a,b,c,22); R2(c,d,e,a,b,23); R2(b,c,d,e,a,24); R2(a,b,c,d,e,25); R2(e,a,b,c,d,26); R2(d,e,a,b,c,27); R2(c,d,e,a,b,28); R2(b,c,d,e,a,29); R2(a,b,c,d,e,30); R2(e,a,b,c,d,31); R2(d,e,a,b,c,32); R2(c,d,e,a,b,33); R2(b,c,d,e,a,34); R2(a,b,c,d,e,35); R2(e,a,b,c,d,36); R2(d,e,a,b,c,37); R2(c,d,e,a,b,38); R2(b,c,d,e,a,39); R3(a,b,c,d,e,40); R3(e,a,b,c,d,41); R3(d,e,a,b,c,42); R3(c,d,e,a,b,43); R3(b,c,d,e,a,44); R3(a,b,c,d,e,45); R3(e,a,b,c,d,46); R3(d,e,a,b,c,47); R3(c,d,e,a,b,48); R3(b,c,d,e,a,49); R3(a,b,c,d,e,50); R3(e,a,b,c,d,51); R3(d,e,a,b,c,52); R3(c,d,e,a,b,53); R3(b,c,d,e,a,54); R3(a,b,c,d,e,55); R3(e,a,b,c,d,56); R3(d,e,a,b,c,57); R3(c,d,e,a,b,58); R3(b,c,d,e,a,59); R4(a,b,c,d,e,60); R4(e,a,b,c,d,61); R4(d,e,a,b,c,62); R4(c,d,e,a,b,63); R4(b,c,d,e,a,64); R4(a,b,c,d,e,65); R4(e,a,b,c,d,66); R4(d,e,a,b,c,67); R4(c,d,e,a,b,68); R4(b,c,d,e,a,69); R4(a,b,c,d,e,70); R4(e,a,b,c,d,71); R4(d,e,a,b,c,72); R4(c,d,e,a,b,73); R4(b,c,d,e,a,74); R4(a,b,c,d,e,75); R4(e,a,b,c,d,76); R4(d,e,a,b,c,77); R4(c,d,e,a,b,78); R4(b,c,d,e,a,79); /* Add the working vars back into context.state[] */ state[0] += a; state[1] += b; state[2] += c; state[3] += d; state[4] += e; } /* SHA1Init - Initialize new context */ void SHA1Init(struct SHA1_Context *context) { /* SHA1 initialization constants */ context->state[0] = 0x67452301; context->state[1] = 0xEFCDAB89; context->state[2] = 0x98BADCFE; context->state[3] = 0x10325476; context->state[4] = 0xC3D2E1F0; context->count = 0; } /* Run your data through this. */ void SHA1Update(struct SHA1_Context *context, const uint8_t* data, const size_t len) { size_t i, j; j = context->count & 63; context->count += len; if (j == 0) { for (i = 0 ; len >= i + 64 ; i += 64) { SHA1_Transform(context->state, data + i); } j = 0; } else if ((j + len) >= 64) { memcpy(&context->buffer[j], data, (i = 64-j)); SHA1_Transform(context->state, context->buffer); for (; len >= i + 64 ; i += 64) { SHA1_Transform(context->state, data + i); } j = 0; } else i = 0; memcpy(&context->buffer[j], &data[i], len - i); } /* Add padding and return the message digest. */ void SHA1Final(struct SHA1_Context *context, uint8_t digest[SHA1_DIGEST_SIZE]) { unsigned char i; int j; uint64_t bitcount; bitcount = context->count << 3; i = context->count & 63; context->buffer[i] = '\200'; i++; if (i > 56) { if (i < 64) memset(context->buffer + i, 0, 64-i); SHA1_Transform(context->state, context->buffer); i = 0; } if (i < 56) { memset(context->buffer + i, 0, 56-i); } for (j = 7; j >= 0; j--) { context->buffer[56 + j] = bitcount & 0xFF; bitcount >>= 8; } SHA1_Transform(context->state, context->buffer); for (i = 0; i < SHA1_DIGEST_SIZE; i++) { digest[i] = (uint8_t) ((context->state[i>>2] >> ((3-(i & 3)) * 8) ) & 255); } } reprepro-4.13.1/dirs.c0000644000175100017510000001237512152651661011503 00000000000000/* This file is part of "reprepro" * Copyright (C) 2003 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include "error.h" #include "strlist.h" #include "dirs.h" #include "names.h" /* create directory dirname. */ retvalue dirs_create(const char *dirname) { int ret, e; ret = mkdir(dirname, 0775); if (ret == 0) { if (verbose > 1) printf("Created directory \"%s\"\n", dirname); return RET_OK; } else if (ret < 0 && (e = errno) != EEXIST) { fprintf(stderr, "Error %d creating directory \"%s\": %s\n", e, dirname, strerror(e)); return RET_ERROR; } return RET_NOTHING; } /* create recursively all parent directories before the last '/' */ retvalue dirs_make_parent(const char *filename) { const char *p; char *h; int i; retvalue r; for (p = filename+1, i = 1 ; *p != '\0' ; p++, i++) { if (*p == '/') { h = strndup(filename, i); if (FAILEDTOALLOC(h)) return RET_ERROR_OOM; r = dirs_create(h); if (RET_WAS_ERROR(r)) { free(h); return r; } free(h); } } return RET_OK; } /* create dirname and any '/'-separated part of it */ retvalue dirs_make_recursive(const char *directory) { retvalue r, result; if (interrupted()) { return RET_ERROR_INTERRUPTED; } r = dirs_make_parent(directory); result = dirs_create(directory); RET_UPDATE(result, r); return result; } /* create directory and return the number of created directoried */ retvalue dir_create_needed(const char *directory, int *createddepth) { retvalue r; int ret; size_t len = strlen(directory); int check, depth = 0; char *this; int e; if (interrupted()) { return RET_ERROR_INTERRUPTED; } while (len > 0 && directory[len-1] == '/') len--; while (len > 0) { this = strndup(directory, len); if (FAILEDTOALLOC(this)) return RET_ERROR_OOM; ret = mkdir(this, 0777); e = errno; if (ret == 0) { if (verbose > 1) printf("Created directory \"%s\"\n", this); } else if (e == EEXIST) { free(this); break; /* normaly ENOENT should be the only problem, * but check the others to be nice to annoying filesystems */ } else if (e != ENOENT && e != EACCES && e != EPERM) { fprintf(stderr, "Cannot create directory \"%s\": %s(%d)\n", this, strerror(e), e); free(this); return RET_ERRNO(e); } free(this); depth++; while (len > 0 && directory[len-1] != '/') len--; while (len > 0 && directory[len-1] == '/') len--; } check = depth; while (directory[len] == '/') len++; while (directory[len] != '\0') { while (directory[len] != '\0' && directory[len] != '/') len++; this = strndup(directory, len); if (FAILEDTOALLOC(this)) return RET_ERROR_OOM; r = dirs_create(this); free(this); if (RET_WAS_ERROR(r)) return r; // TODO: if we get RET_NOTHING here, reduce depth? check--; while (directory[len] == '/') len++; } assert(check == 0); *createddepth = depth; return RET_OK; } void dir_remove_new(const char *directory, int created) { size_t len = strlen(directory); char *this; int ret; while (len > 0 && directory[len-1] == '/') len--; while (created > 0 && len > 0) { this = strndup(directory, len); if (FAILEDTOALLOC(this)) return; ret = rmdir(this); if (ret == 0) { if (verbose > 1) printf( "Removed empty directory \"%s\"\n", this); } else { int e = errno; if (e != ENOTEMPTY) { fprintf(stderr, "Error removing directory \"%s\": %s(%d)\n", this, strerror(e), e); } free(this); return; } free(this); created--; while (len > 0 && directory[len-1] != '/') len--; while (len > 0 && directory[len-1] == '/') len--; } return; } retvalue dirs_getdirectory(const char *filename, char **directory) { size_t len; assert (filename != NULL && *filename != '\0'); len = strlen(filename); while (len > 1 && filename[len-1] == '/') { len--; } while (len > 0 && filename[len-1] != '/') { len--; } if (len == 0) { *directory = strdup("."); } else { if (len == 1) *directory = strdup("/"); else *directory = strndup(filename, len-1); } if (FAILEDTOALLOC(*directory)) return RET_ERROR_OOM; else return RET_OK; } const char *dirs_basename(const char *filename) { const char *bn; bn = strrchr(filename, '/'); if (bn == NULL) return filename; // not really suited for the basename of directories, // things like /bla/blub/ will give emtpy string... return bn+1; } bool isdir(const char *fullfilename) { struct stat s; int i; assert(fullfilename != NULL); i = stat(fullfilename, &s); return i == 0 && S_ISDIR(s.st_mode); } reprepro-4.13.1/readtextfile.c0000644000175100017510000000703012152651661013212 00000000000000/* This file is part of "reprepro" * Copyright (C) 2007 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include "error.h" #include "names.h" #include "chunks.h" #include "readtextfile.h" /* This file supplies code to read a text file (.changes, .dsc, Release, ...) * into a chunk, warning if it is too long or if it contains binary data */ static bool isbinarydata(const char *buffer, size_t len, const char *source) { size_t i; unsigned char c; for (i = 0 ; i < len ; i++) { c = (unsigned char)buffer[i]; if (c < ' ' && c != '\t' && c != '\n' && c != '\r') { fprintf(stderr, "Unexpected binary character \\%03hho in %s\n", c, source); return true; } } return false; } retvalue readtextfilefd(int fd, const char *source, char **data, size_t *len) { size_t buffersize = 102400, readdata = 0; ssize_t readbytes; char *buffer, *h; buffer = malloc(buffersize); if (FAILEDTOALLOC(buffer)) return RET_ERROR_OOM; errno = 0; while ((readbytes = read(fd, buffer + readdata, buffersize-readdata)) > 0) { /* text files are normaly small, so it does not hurt to check * the whole of them always */ if (isbinarydata(buffer + readdata, (size_t)readbytes, source)) { free(buffer); return RET_ERROR; } readdata += readbytes; assert (readdata <= buffersize); if (readdata + 1024 >= buffersize) { if (buffersize >= 10*1024*1024) { fprintf(stderr, "Ridiculously large %s\n", source); free(buffer); return RET_ERROR; } buffersize += 51200; h = realloc(buffer, buffersize); if (FAILEDTOALLOC(h)) { free(buffer); return RET_ERROR_OOM; } buffer = h; } } if (readbytes < 0) { int e = errno; free(buffer); fprintf(stderr, "Error reading %s: %s\n", source, strerror(e)); return RET_ERRNO(e); } h = realloc(buffer, readdata + 1); if (h == NULL) { #ifdef SPLINT h = NULL; #endif if (readdata >= buffersize) { free(buffer); return RET_ERROR_OOM; } } else buffer = h; buffer[readdata] = '\0'; *data = buffer; if (len != NULL) *len = readdata; return RET_OK; } retvalue readtextfile(const char *source, const char *sourcetoshow, char **data, size_t *len) { int fd; char *buffer; size_t bufferlen; retvalue r; int ret; fd = open(source, O_RDONLY|O_NOCTTY); if (fd < 0) { int e = errno; fprintf(stderr, "Error opening '%s': %s\n", sourcetoshow, strerror(e)); return RET_ERRNO(e); } r = readtextfilefd(fd, sourcetoshow, &buffer, &bufferlen); if (!RET_IS_OK(r)) { (void)close(fd); return r; } ret = close(fd); if (ret != 0) { int e = errno; free(buffer); fprintf(stderr, "Error reading %s: %s\n", sourcetoshow, strerror(e)); return RET_ERRNO(e); } *data = buffer; if (len != NULL) *len = bufferlen; return RET_OK; } reprepro-4.13.1/indexfile.h0000644000175100017510000000104712152651661012510 00000000000000#ifndef REPREPRO_INDEXFILE_H #define REPREPRO_INDEXFILE_H #ifndef REPREPRO_ERROR_H #include "error.h" #warning "What's hapening here?" #endif #ifndef REPREPRO_TARGET_H #include "target.h" #endif struct indexfile; retvalue indexfile_open(/*@out@*/struct indexfile **, const char *, enum compression); retvalue indexfile_close(/*@only@*/struct indexfile *); bool indexfile_getnext(struct indexfile *, /*@out@*/char **, /*@out@*/char **, /*@out@*/const char **, /*@out@*/ architecture_t *, const struct target *, bool allowwrongarchitecture); #endif reprepro-4.13.1/byhandhook.c0000644000175100017510000001430312152651661012661 00000000000000/* This file is part of "reprepro" * Copyright (C) 2010 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include "error.h" #include "filecntl.h" #include "names.h" #include "configparser.h" #include "globmatch.h" #include "hooks.h" #include "byhandhook.h" struct byhandhook { /*@null@*/struct byhandhook *next; char *sectionglob; char *priorityglob; char *filenameglob; char *script; }; void byhandhooks_free(struct byhandhook *l) { while (l != NULL) { /*@null@*/struct byhandhook *n = l->next; free(l->sectionglob); free(l->priorityglob); free(l->filenameglob); free(l->script); free(l); l = n; } } retvalue byhandhooks_parse(struct configiterator *iter, struct byhandhook **hooks_p) { retvalue r; char *v; struct byhandhook *h, *hooks = NULL, **nexthook_p = &hooks; r = config_getwordinline(iter, &v); if (RET_IS_OK(r)) { fprintf(stderr, "Error parsing %s, line %u, column %u: unexpected input '%s'" " (each hook must be in its own line)!\n", config_filename(iter), config_markerline(iter), config_markercolumn(iter), v); free(v); r = RET_ERROR; } if (RET_WAS_ERROR(r)) return r; while (config_nextline(iter)) { r = config_getwordinline(iter, &v); if (r == RET_NOTHING) continue; if (RET_WAS_ERROR(r)) break; h = zNEW(struct byhandhook); if (FAILEDTOALLOC(h)) { r = RET_ERROR_OOM; break; } *nexthook_p = h; nexthook_p = &h->next; h->sectionglob = v; r = config_getwordinline(iter, &v); if (r == RET_NOTHING) { fprintf(stderr, "Error parsing %s, line %u, column %u: each byhand hooks needs 4 arguments, found only 1!\n", config_filename(iter), config_markerline(iter), config_markercolumn(iter)); r = RET_ERROR; } if (RET_WAS_ERROR(r)) break; h->priorityglob = v; r = config_getwordinline(iter, &v); if (r == RET_NOTHING) { fprintf(stderr, "Error parsing %s, line %u, column %u: each byhand hooks needs 4 arguments, found only 2!\n", config_filename(iter), config_markerline(iter), config_markercolumn(iter)); r = RET_ERROR; } if (RET_WAS_ERROR(r)) break; h->filenameglob = v; r = config_getwordinline(iter, &v); if (r == RET_NOTHING) { fprintf(stderr, "Error parsing %s, line %u, column %u: each byhand hooks needs 4 arguments, found only 2!\n", config_filename(iter), config_markerline(iter), config_markercolumn(iter)); r = RET_ERROR; } if (RET_WAS_ERROR(r)) break; assert (v != NULL && v[0] != '\0'); \ h->script = configfile_expandname(v, v); if (FAILEDTOALLOC(h->script)) { r = RET_ERROR_OOM; break; } r = config_getwordinline(iter, &v); if (RET_IS_OK(r)) { fprintf(stderr, "Error parsing %s, line %u, column %u: each byhand hooks needs exactly 4 arguments, but there are more (first unexpected: '%s'!\n", config_filename(iter), config_markerline(iter), config_markercolumn(iter), v); free(v); r = RET_ERROR; } if (RET_WAS_ERROR(r)) break; } if (RET_WAS_ERROR(r)) { byhandhooks_free(hooks); return r; } *hooks_p = hooks; return RET_OK; } bool byhandhooks_matched(const struct byhandhook *list, const struct byhandhook **touse, const char *section, const char *priority, const char *filename) { const struct byhandhook *h; /* for each file the first matching hook is called * it might later be extended to allow multiple with some keywords */ if (*touse != NULL) /* if ((*touse)->nonexclusive) list = (*touse)->next ; else */ return false; for (h = list ; h != NULL ; h = h->next) { if (!globmatch(section, h->sectionglob)) continue; if (!globmatch(priority, h->priorityglob)) continue; if (!globmatch(filename, h->filenameglob)) continue; *touse = h; return true; } return false; } retvalue byhandhook_call(const struct byhandhook *h, const char *codename, const char *section, const char *priority, const char *name, const char *fullfilename) { pid_t child; child = fork(); if (child == 0) { /* Try to close all open fd but 0,1,2 */ closefrom(3); sethookenvironment(causingfile, NULL, NULL, NULL); (void)execl(h->script, h->script, codename, section, priority, name, fullfilename, (char*)NULL); { int e = errno; fprintf(stderr, "Error %d executing '%s': %s\n", e, h->script, strerror(e)); } _exit(255); } if (child < 0) { int e = errno; fprintf(stderr, "Error %d forking: %s!\n", e, strerror(e)); return RET_ERRNO(e); } while (true) { int status; pid_t pid; pid = waitpid(child, &status, 0); if (pid == child) { if (WIFEXITED(status)) { if (WEXITSTATUS(status) == 0) { return RET_OK; } fprintf(stderr, "Byhandhook '%s' '%s' '%s' '%s' '%s' '%s' failed with exit code %d!\n", h->script, codename, section, priority, name, fullfilename, (int)(WEXITSTATUS(status))); } else if (WIFSIGNALED(status)) { fprintf(stderr, "Byhandhook '%s' '%s' '%s' '%s' '%s' '%s' killed by signal %d!\n", h->script, codename, section, priority, name, fullfilename, (int)(WTERMSIG(status))); } else { fprintf(stderr, "Byhandhook '%s' '%s' '%s' '%s' '%s' '%s' failed!\n", h->script, codename, section, priority, name, fullfilename); } return RET_ERROR; } else if (pid == (pid_t)-1) { int e = errno; if (e == EINTR) continue; fprintf(stderr, "Error %d calling waitpid on byhandhook child: %s\n", e, strerror(e)); return RET_ERRNO(e); } } /* NOT REACHED */ } reprepro-4.13.1/ChangeLog0000644000175100017510000021426512152651661012152 000000000000002013-06-02 Bernhard R. Link * as gcc got better, remove conditional workarounds for most uninitialized-false-positives and make the remaining cases unconditonal (but marked with SETBUTNOTUSED). 2013-05-30 Bernhard R. Link * fix bug is restore to only act if the last package looked at is restored. 2013-05-04 Bernhard R. Link * build-needing properly handles sources with architecture wildcards (linux-any) in them. 2013-04-12 Bernhard R. Link * fix percomponent udeb Contents filenames 2013-02-17 Bernhard R. Link * add outsftphook.py example 2012-12-31 Bernhard R. Link * add --outhook 2012-12-20 Bernhard R. Link * fix inconsistent spacing of ls command, * fix --nothingiserror ls not treating no result as error * add lsbycomponent command (as ls, but grouped by component) 2012-12-15 Bernhard R. Link * move around some of the code related to moving (In)Release(.gpg) to it's final place. Side effect is that those files are removed if there are no longer requested. 2012-12-09 Bernhard R. Link * unify export handling (moving it out of the action specific code) 2012-12-02 Bernhard R. Link * keep around relative release filenames always 2012-11-24 Bernhard R. Link * make setting of environment variables for hooks more uniform (and with less code duplication). 2012-11-17 Bernhard R. Link * '~/' or '+{b,o,c}/' or './' now also special in ByHandHooks and ListHook. * add support for signing hooks (SignWith: !...) 2012-11-11 Bernhard R. Link * add --endhook to start a script when terminating 2012-11-04 Bernhard R. Link * add repairdescriptions command to readd missing long descriptions (which you might get as reprepro cannot yet get Translations files and get them from there) from the .deb files. 2012-10-30 Bernhard R. Link * add ${$basename}, ${$filekey} and ${$fullfilename} to --listformat * fix some bitrot in the non-libarchive code paths 2012-10-21 Bernhard R. Link * reject absurd large values in ValidFor header * fix wrong include type in termdecide.h 2012-09-03 * fix overlong VerifyRelease example in manual.html 2012-07-12 * add 'deleteifunreferenced' command to safely delete and forget the given files in a repository with keepunreferencedfiles set. 2012-07-11 * fix bug in checking old unchanged {Packages/Sources}.bz2 files for existance. (Triggering even an assertion when only .bz2 index files are requested). * ignore diff comments about unterminated lines when parsing .diff files 2012-06-24 * support http-method's extended 103 redirect status * actually set REPREPRO_CONFIG_DIR in hooks as documented in manpage. * document more environment variables in manpage 2012-06-07 * fix bash and zsh completion to work with conf/distributions and conf/incoming directories. * fix allocation error with more than 16 group members in allocation files. 2012-05-30 * add support for -A, -C, -T to *update and *pull. 2012-05-22 * try to get InRelease from remote repositories instead of Release (with fall-back of the old behaviour) * new GetInRelease: to conf/updates, defaults to yes 2012-05-21 * fix some errors when compiled without libgpgme 2012-05-20 * normalize included package control information to always start with the Package: field (as some clients assume that). * don't require md5sum to download binary or source packages in the remote index files (any known hash suffices) 2012-05-19 * avoid some problem with gcc-4.7 2012-04-24 * change Contents-* files generation default from "allcompontents" to "percomponent compatsymlink". (i.e. best for >= wheezy, only first component visible for <= squeeze) 2012-04-04 * 'include' now only warns about section "unknown" instead of rejecting it. add warnings to 'includedsc' and 'includedeb', too. 2012-03-26 * allow absolute filenames in !include directives, and expand filenames starting with "~/" "+b/" "+c/" in those and export hooks, filter lists, log scripts, override filenames, and uploaders filenames. * conf/distributions, conf/updates, conf/pulls and conf/incoming or files included by those can be directories with all *.conf files read instead. 2012-03-25 * changelogs.example can now also place changelogs in places where apt-get changelog looks for "third party site" changelogs. * add 'supersede' as FilterList keyword to remove the old package if the the new would be installed otherwise. * fix broken test against leading whitespace in config file field names * add support for !include directive in conf/distributions, conf/updates, conf/pulls and conf/incoming. 2012-01-23 * reject "any" as Architecture part of a distribution 2012-01-21 * build-needing now can list missing architecture 'all' packages. (Will not list .dsc files producing both architecture dependent and architecture indepentent ('all') packages unless they are built with dpkg-dev >= 1.16.1, though). 2012-01-19 * build-needing takes 'any' instead of a architecture, too. * uploader files can 'include' other files. 2012-01-17 * improve config file parser error messages about missing fields 2010-12-18 * rredtool: produce .diff/Index files that reprepro can understand. * warn if uploader files contains key ids too long to handle * don't warn against .git files as unknown extension 2010-12-09 * if failing to parse .diff/Index, proceed with other ways to retrieve Packages/Sources. 2010-10-30 * don't give spurious warnings about "strange filekey"s if components contain slashes. 2010-10-10 * fix NULL-reference segfault if patch in a Packages.diff does not have a history attached to it (or if it is listed two times) 2010-10-03 * when using nocompatsymlink in Contents warn about old file/symlink still present. 2010-09-28 * fix archive_set_error calls (don't give error messages as format strings) * remove undocumented Contents: options with leading +/- * add compatsymlink nocompatsymlink Contents: options (and document that the default will change in the future) 2010-08-22 * add 'redochecksums' command to complete the checksum information in package indices. 2010-08-19 * add percomponent and allcomponents to Contents: flags to switch between the format of Contents file to generate. Currently the default is allcomponents but that will switch later. * fix bug that would delete files only to be deleted after an successful export also when aborting an export 2010-07-07 * don't give downgrading message if not downgrading but replacing with same version 2010-06-02 * fix bug not deleting packages if none added in update 2010-05-05 * ignore leading comments in control files 2010-04-18 * add --restrict and --restrict-bin to restrict update and pull operations to specific packages. * add --restrict-file and --restrict-file-bin. 2010-04-17 * add --export=silent-never like never but silenting all warnings (mostly useful for testsuite). * avoid 'Data seems not to be signed trying to use directly' message if data start like unsigned file should start. 2010-04-16 * add 'FilterSrcList'. 2010-04-15 * Many clean-ups and coding style fixes. 2010-03-30 * Support specifying a version in FilterList 2010-02-29 * support compiling with libdb5 * fix memory bug in filelist generation (as realloc usually not moves stuff when reducing the size that is no real issue, but newer valgrind detects it and warns). 2010-02-28 * 'check' also checks if architectures match * fix 'sourcemissing', 'unusedsources' and 'reportcruft' on distributions without tracking. * fix 'pull' copying packages with wrong architecture 2010-02-21 * support reading of Release files without MD5Sum * add all missing Checksums-* when importing from remote repositories * allow md5 in IgnoreHashes 2010-02-16 * make 'sourcemissing', 'unusedsources' and 'reportcruft' work on distributions without tracking. 2010-02-14 * add 'reportcruft' command * ignore source checking in distributions without 'source' architecture 2010-01-30 * add 'sizes' command. * add "distribution 'codename'" support to uploaders files. * some fixes for __checkuploaders 2010-01-27 * SignWith can take multiple arguments to denote multiple keys to sign a repository with. 2010-01-22 * add removesrcs command (like removesrc but can get multiple source package names) 2010-01-03 * add groups to Uploaders:-lists. * add __checkuploaders command so uploaders lists can be tested from the test-suite 2010-12-23 * fix some minor memory/resource leaks found by cppcheck 2010-10-16 * support "ButAutomaticUpgrades" field to be copied to the generated Release files (Thanks to Modestas Vainius) 2010-10-15 * add support for lzip compressed files (Thanks to Daniel Baumann for the patch). 2010-09-10 * add special '$Delete' override field to delete fields 2010-09-09 * fix reoverride problem with packages only having a $Component special-override-field. 2010-08-12 * fix missing #ifdef breaking --without-libbz2 compiles * include sys/stat.h in filecntl.c, thanks to Jeroen van Meeuwen 2010-08-04 * add unusedsources and sourcemissing commands. 2010-07-10 * create InRelease files when signing... 2010-07-05 * special $Component in override files will force placing packages in the specified component upon inclusion (unless -C is given). 2010-07-04 * consult override files when importing packages with 'update' or 'pull'. 2010-07-01 * fix inconsistency in changelog.example. Thanks to Christoph Mathys. 2010-06-30 * allow patterns in override files 2010-06-29 * do not stop with error if a downloaded Packages file contains unexpected wrong Architecture lines but only print a warning. Add --ignore=wrongarchitecture to not print that warning. 2010-06-26 * store override data in a tree instead of an list and some preparations for patterns in override files. 2010-06-25 * Ignore overrides for fields starting with '$' and warn about unknown fields to allow later introduction of special values. * disallow overrides of core fields (Package, Version, Filename, ...) 2010-05-07 * add --onlysmalldeletes option that cancels pulls and updates that delete more than 20% of some target (but at least 10 packages). The change also causes update no longer claiming to get packages if there are not any packages to get... 2010-04-30 * change parsing of .changes lines to cope with N_V.orig-X.tar.C files where V.orig-X does not survive a proper version check (underscores most prominently). 2010-04-23 * Fix typo causing --changes Log-notifiers not being called with processincoming in many cases. 2010-04-07 * add '${$source}' and '${$sourceversion}' to --list-format 2010-03-31 * describe byhand file in the manpage's "nomenclature". 2010-03-19 * add "dumbremove" to changestool. 2010-02-10 * fix failure if trying to extract exactly one of section or priority from a tar file. 2010-01-24 * add ByHandHooks to conf/distributions for hooks called by processincoming (and in the future perhaps by include) 2010-01-18 * properly handle relative LogDir in conf/incoming 2009-12-08 * add byhand statement to uploaders files 2009-11-22 * fix build with --without-libgpgme (thanks to Reto Gantenbein for reporting) 2009-11-16 * include where *int*_t is used 2009-11-13 * 'include' now errors out early if the .changes includes source files but no .dsc file. 2009-11-12 * add mode to rredtool to act as reprepro index hook and generate and update a *.diff/Index file. 2009-11-06 * when 'include'ing a .changes file, do not insist on section information of non-.dsc source files. 2009-10-27 * Do not warn about a missing VerifyRelease if there is a IgnoreRelease. * Handle apt transport methods returning missing files as success with alternate filename suggestion more gracefully. * when getting packages from an other architecture while updating, ignore all packages with architecture not fitting into the target. (Fixes a regression introduced in 3.8.0) 2009-10-21 * reduce number of places where new compressions must be added * improve checking for proper filenames in changestool's verify * allow .build as synonym for .log as suffix in changes files 2009-10-20 * reduce number of places where new compressions must be added 2009-10-17 * support xz compressed files if unxz is installed. 2009-10-02 * make 'check' (and some other commands) warn if a file expected is not in the checksums database but found correctly in the pool. 2009-09-23 * Method: and Fallback: in conf/updates now strip the last '/' from the URI given. (Some apt methods get confused if they get "//"). 2009-09-15 * fix exit-code of 'list' with --nothingiserror 2009-09-10 * call gpgme_check_version so that libgpgme 1.2.0 does not fail to initialize. 2009-08-24 * remove all files.db code (except translatelegacyfilelists). * remove --oldfilesdb option. * remove --overridedir 2009-08-23 * warn if old legacy files.db is still used and add new translatelegacyfilelists command for easier migration. 2009-08-21 * new --showpercent option to show percent and total download size when downloading packages. * do not output the new warning about a new architecture when all architectures are new (i.e. new distribution) 2009-08-20 * new 'Options: limit_arch_all' in conf/incoming causes processincoming to only put architecture all packages into the architectures uploaded with them to allow usage together with 'flood'. 2009-08-18 * speed up 'flood' by using an tree instead of a list for source package lookups. 2009-08-17 * add new 'flood' command to distribute architecture all packages within one architecture. 2009-08-15 * -A, -T and -C can now have multiple arguments separated by '|'. 2009-08-13 * FakeComponentPrefix now does not add the prefix to components already having it and removes it from the relative directory where it is put into (so no duplication on the whole path, either). 2009-08-06 * command line (and conf/options) options to specify a directory now treat arguments starting with '+b/', '+c/' or '+o/' as relative to the basedir, confdir or outdir. * warn if directories do not start with '/', './' or '+x/'. 2009-08-05 * if a package is not accepted by processincoming because no distribution is found for it or no distribution allows it, the existcode is now 243 2009-08-03 * add a MorgueDir option to conf/incoming where cleaned up files are moved to. * if a .changes has improper name, version or architectures, trigger the 'Cleanup: on_error' case. 2009-08-01 * improve deleteunreferenced's error message with keepunreferencedfiles 2009-07-25 * add $Version, $Source, $SourceVersion, $Architecture, $Component, $PackageType as special fields in formulas. 2009-07-21 * fix build-needing to look at the correct Architecture field in .dsc files. 2009-07-20 * add an --morguedir where files removed from the pool are stored. 2009-07-15 * add --create-with-all-fields to changestool that is like --create but also creates Urgency and Changes fields. 2009-07-11 * make predelete also call retrack when needed, silence false warning of stale tracking by removesrc 2009-07-10 * warn if a distribution with tracking is modified in a form tracking data might get out of data. update and pull automatically cause a retrack on distributions with tracking enabled. 2009-07-09 * some more improvements to the build-needing command 2009-07-07 * fix bug in processincoming not accepting Suite or AlsoAcceptFor because of counting it two times and erroring out. (Thanks to Wookey for finding this bug). 2009-06-16 * add listmatched, removematched, copymatched and restorematched. (For those who think listfilter 'Package (% glob)' is too hard to write, to remember or too slow). * add build-needing command 2009-06-05 * add glob-matching in formulas via '(% pattern)' * uploaders list conditions that supported stars now use the generic globmatch (thus more stars and ? and []). 2009-06-03 * new --list-max and --list-skip 2009-06-02 * new 'architectures' condition for uploader lists and other conditions support 'contains' now. 2009-05-31 * add --list-format 2009-05-29 * add _listdbidentifiers and _listconfidentifiers * add condition "source 'something'" for uploader lists, to limit a uploader to packages with the specified source. 2009-05-22 * allow subkey matching in uploader lists, 'unsigned' now only means unsigned while the new 'anybody' means everybody. Preparations for more conditions. 2009-05-12 * copy and copysrc give warnings about not found packages unless verbosity is reduced by --silent. (To help people catch their typos). 2009-04-13 * rewrite Release.gpg verification code: - to allow usage of expired or revoced keys, the key-id in VerifyRelease has to be appended with '!' and the corresponding new ignore option given. - subkeys are accepted if the key-id is appended with '+'. - keys are requested from libgpgme before anything is downloaded (helps catching c&p errors and makes subkey checks possible). - if verification fails, the status of all found signatures is printed. 2009-04-07 * bugfix: ListHook was not used in rules including the rule with it in "From:" * add "ListShellHook", that is like ListHook but with arguments and the files in stdin and stdout. 2009-04-03 * fix bug (catched by assertion) that inverts the logic of downloading .diff files when there is no DownLoadListsAs line. 2009-03-18 * support new suffix ".new." for export hooks. (Which moves filename + ".new" to filename on success, but unlike ".new" does not mention the file in Release) * new suffix ".keep" for export hooks tha just ignores that line, for compatibility with future changes. * warn if an (Deb|UDeb|Dsc)Indices line contains no filename. (warn against everything starting with a dot to avoid a user putting forgetting it and putting a compression identifier there). 2009-03-14 * fix mishandling of libz return code causing "Zlib error 1"..."stream end" error messages. This defect seems to be only triggered with at least lenny's libz. (And only when extracting Section and Priority from a dsc). 2009-03-05 * Implement force. as DownLoadListAs item to download an index not found in the Release file. * warn if database is in old format 2009-03-04 * also continue downloading index files after failure to get the prefered one in the IgnoreRelease case. 2009-03-03 * regression fix: when updating with IgnoreRelease, old index files were no longer deleted in 3.8 before telling the apt-methods to download new ones, which can trigger buggy behaviour in those. * if one index file fails to be downloaded, try the next one (except for updates with IgnoreRelease, yet) 2009-03-02 * fix bug not taking all DownloadListAs into account when multiple update rules requests the same index file to be downloaded. * if a .diff/Index file does not list the available Packages file or if not for targeted file, proceed with other ways to retrieve it. * add .diff processing as first default when there is no DownloadListsAs. 2009-03-01 * support using Packages.diff when updating. (Fallback to other methods not yet supported, so not yet enabled in the default DownloadlistsAs) 2009-02-28 * fix some bugs in --nothingiserror handling 2009-02-27 * move handling of downloaded files from aptmethod.c to the code queuing the files. (refactorisation in preparation of later changes) 2009-02-24 * fix race condition causing external uncompressors sometimes to catch a sigpipe if their output is closed before they receive the signal to kill them. * changestool now supports looking into lzma files (and bz2 files even when not compiled against libbz2), if external uncompressors are available. * fix bug extracting the Section and Priority from .diff files if control was not the first file in it. * fix bug .diff parsing's exception to also allow diff generated files. 2009-02-23 * log notifiers get variables REPREPRO_CAUSING_RULE and REPREPRO_FROM set when adding packages via update/pull. The later also in copy* and restore* commands. * delete unexpected (i.e. not registered in the database) files in pool when trying to replace with new ones. 2009-02-21 * add --keeptemporaries and without it delete all .new files when exporting fails (and not only Release) and with it keep all (including Release). Also fix gpg error message to not suggest trying it with a file that later will be deleted. 2009-02-20 * add 'warning' flag for FilterList files 2009-02-13 * add ReadOnly option for conf/distributions 2009-02-08 * processincoming support includebyhand and includelogs tracking options * new LogDir for processincoming, that gets the .changes files, .log files and unused byhand (or raw-*) files. 2009-02-06 * ignore byhand and logfiles in 'include' unless tracking with includebyhand or includelogs is activated, then store them into the pool. 2009-01-22 * fix typo causing copyfilter to fail * add --gnupghome option to set GNUPGHOME environment variable * fix importing of source packages from flat repositories without a Directory field in Sources index. 2009-01-17 * fix erroneous "strange filekey" warning for lib files in 3.8.0~alpha 2009-01-16 * make Date: more like official Release files by replacing the old "+0000" with "UTC". 2009-01-15 * add support to generate Valid-Until in Release 2009-01-09 * handle 'raw-*' sections like 'byhand' sections (i.e. mostly not handle them, but give better error messages). 2009-01-06 * add DownloadListsAs: option for conf/updates to specify which index files (.gz, .bz2, .lzma, ...) to download when available. 2009-01-04 * add support for libdb4.7 (yet with some warnings to note I have not tested it much yet) * bugfix in checkpool with old files.db 2009-01-02 * FilterList/FilterFormula can be inherited with From: in update rules. * bugfix: if FilterList return hold, FilterFormula was not asked. Not it is only hold if FilterFormula also includes this package. (pull/update) * if a distribution is both flat and non-flat, do not raise an assert, but emmit a warning and proceed (new flatandnonflat ignore class to ignore that warning). 2008-12-06 * add 'upgradeonly' value for FilterList, that only takes an package into account if it already exists. 2008-12-02 * implement cleanlists command 2008-11-24 * fix bug in sha256 calculation over very large files 2008-11-13 * add dumpupdate and dumppull actions that are like checkupdate and checkpull but with less information but that more easily parseable. 2008-11-04 * fix parsing error of contents of very big .deb files. Thanks to Aramian Wasielak and Alexander Perlis. 2008-11-03 * rework handling of files added to the pool not used by anything. (for example because the package was not added due to error). New --keepunusednewfiles option to not delete such files. 2008-11-01 * print number of newly unreferenced file on --keepunreferenced and commands not deleting their references. 2008-10-30 * add support for flat repositories with Sources files without Directory lines (Thanks to Cody A.W. Somerville for noting). 2008-10-12 * some rework on unreferenced files bookkeeping. Should make no difference yet but only make the "Deleting files not longer referenced" only show up if something is deleted... 2008-10-05 * Internaly atomize components architectures and packagetypes. Causes multiple checks for unknown identifiers to be earlier or more strict. (And fields in conf/distributions have more restrictions w.r.t their order). * fix bug in (tracking enabled) removesrc that caused malformed tracking data when a source package's track record contains a file no longer found in any distribution. [2009-01-16: I previously believed this nearly impossible to trigger, but a simply outdated tracking data already suffices to trigger it] 2008-10-01 * warn if an update rule references local components or architectures that were not seen in conf/distributions (old behaviour was to check if any distribution that references this rule had this architecture, but that was too complex with the new rule-can-reference-rule possibilities). 2008-09-18 * update rules can include other rules with From: allowing leaner conf/updates file and avoiding duplicate downloading of upstream indices. * do not process distributions without Updates: field upon update/checkupdate/predelete... 2008-09-09 * also support external uncompression programs for .orig.tar/.debian.tar/.tar uncompression, i.e.: - support Section/Priority extraction from lzma compressed dsc packages - libarchive no longer needs to be linked against zlib/libbz2 * fix some corner cases in .diff parsing 2008-09-07 * add support for external uncompression programs - speeding up updating, as downloading and uncompressing can happen at the same time - support lzma compressed .deb and .diff (when unlzma is available) - supporting .bz2 compressed files even when compiled without libbz2 (but needing runtime bunzip2 then) * make --nooldfilesdb the default 2008-08-24 * unify reading of compressed files, adding support for: - extracting section and priority from a .diff.bz2 - restoring from a snapshot with only .bz2 indices 2008-08-23 * massive refactorisation of the update code to retrieve remote index files. Most important modifications: - when the same remote distribution is needed by multiple updates, then the index files are only downloaded once. (still needs futher changes to allow better detection of the same source). - ListHooks are called once per use (should mostly only make a difference for flat sources or with settings where this is needed). - --nolistsdownload now only not downloads lists and has no other effects (checksums still checked, --noskipold no longer implied). - deleting of old no longer needed lists (the default --nokeepunneeded) no longer exists. - index files are stored uncompressed in lists/ and the way files are named there is less strange... - many other changes are possible now and will hopefully be implemented soon. * support downloading .bz2 indices * add --via to Log-notifiers to only call notification scripts when the action was triggered by a specific command. 2008-08-22 * some internal cleanup preparing for future changes... 2008-08-16 * allow multiple export hooks 2008-08-12 * check for Ctrl-C in file_foreach (dumpunreferenced, ...) 2008-08-08 * fix handling of libbz2 return codes 2008-08-07 * make reoverride work again... (and not ignore section and priority) 2008-08-03 * remove iteratedupdate 2008-07-30 * fix double-free whith --export=never 2008-07-27 * buffered read of index files upon "update". 2008-07-26 * add support to retrieve packages from flat repositories. 2008-07-25 * refactor indexfile parsing. (Needed for future changes, perhaps speeding some things up a tiny littly bit). * fix logic error causing restorefilter aborting 2008-07-23 * Do not claim --noskipold makes a difference in the update output for targets not having any upstream to pull from. 2008-07-22 * better cope with a file needed multiple times when updating 2008-07-12 * make list package argument optional, listing all packages if not there. * fix bug causing assert() instead of proper error message if list gets too many arguments. 2008-07-03 * add IgnoreHashes directive for conf/updates 2008-06-26 Bernhard R. Link * add FakeComponentPrefix, that adds a prefix to components in the Release file and removes them from Codename and Suite in the central Release file. This way it looks more like security /updates and thus apt is not confused. 2008-06-25 Bernhard R. Link * avoid creating symlinks that cannot work because of a '/' in the link to create. 2008-06-23 Bernhard R. Link * fix bug in optionsfilename calculating introduced in last revision. 2008-06-22 Bernhard R. Link * move some directoy variables to global variables, some related cleanup in the code * set REPREPRO_BASE_DIR, REPREPRO_OUT_DIR, REPREPRO_DIST_DIR, REPREPRO_CONF_DIR and REPREPRO_LOG_DIR when calling log notifiers, apt methods, update hooks or export hooks. 2008-06-07 Bernhard R. Link * remove some checks that fail for version 2 or 3 debian source packages. (in reprepro include and changestool verify) * extract missing Section and Priority also from a .debian.tar.{gz,bz2} file. 2008-06-06 Bernhard R. Link * switch to 'new' AC_INIT and AM_INIT_AUTOMAKE syntax, move automaitcally included autoconf to ac/ subdir * fix typo causing internal error when removesrc is called for a distribution with tracking for an unknown source name. 2008-05-17 Bernhard R. Link * Add support for sha256. * changestool puts Files: last, makes it easier to use some versions of dupload. 2008-05-16 Bernhard R. Link * When include'ing a .changes file with Checksums header and limiting to some files with -A or -T, do not errounously complain about not expecting the skipped files in Checksums-* headers * Look at suite names when no distribution with the requested codename exists. 2008-05-15 Bernhard R. Link * Print warning when not including when not including a package because of unknown key/expire/revocation. (In addition to the warning with -v about those problems with a signature and in addition to the message of not including a package at all if that was the only chance to get it in) 2008-04-17 Bernhard R. Link * fix free of uninitialized pointer when calling log notifiers while removing (this time for real) 2008-04-12 Bernhard R. Link * move assertion to not abort() on wrong md5sums in include command, but cleanly error out. * do not close random fd when starting client without control data. * fix free of uninitialized pointer when calling log notifiers while removing 2008-04-05 Bernhard R. Link * add restore restoresrc restorefilter and _addpackage 2008-04-04 Bernhard R. Link * add copysrc and copyfilter * reimplement copy command (should no longer invalidate tracking information) * warn against impossible -T values and impossible -A -T combinations (source is dsc and dsc is source) 2008-03-31 Bernhard R. Link * bugfix: no longer confuse -S and -P (introduced in 3.0.1) 2008-03-25 Bernhard R. Link * put a fake Suite: field in Release files generated by gensnapshot to avoid apt warning about the distribution name not matching. 2008-03-17 Bernhard R. Link * Log:-scripts are starting with environment-variable REPREPRO_CAUSING_FILE set to the main file causing this change. (.changes for include/processincoming, .dsc for includedsc, .deb for includedeb); 2008-03-14 Bernhard R. Link * read Checksums-Sha1 in .changes file in processincoming 2008-03-13 Bernhard R. Link * changestool can write Checksums-Sha1 headers now * read Checksums-Sha1 in .changes file in the include command 2008-03-12 Bernhard R. Link * Bugfix: When replacing fields only those matching with the same case were replaced. 2008-03-10 Bernhard R. Link * write Checksums-Sha1 to Sources.gz when available and remove Checksums-Sha256 to avoid problems with not yet being able to add the .dsc file. * Do not warn about missing Standards-Version as newer dpkg-source no longer include them. 2008-03-09 Bernhard R. Link * read Checksums-Sha1 in .dsc files 2008-03-08 Bernhard R. Link * When missing section or priority reprepro's includedsc and changestool's add[dsc] look into the .diff and the .tar file. * changestool's add* commands look for files in the current directory first, adddsc for files referenced in the directory of the dsc file. 2008-03-06 Bernhard R. Link * fix/improve some messages, based upon many suggestions by Marc Haber. 2008-03-02 Bernhard R. Link * fix double free error in checksums upgrade case of includedeb 2008-03-01 Bernhard R. Link * cleaning: port changestool to new checksums code, finally removing the old md5sum code. 2008-02-29 Bernhard R. Link * improve documentation of listfilter command 2008-02-21 Bernhard R. Link * make --without-libarchive compile again, thanks to Jesus Roncero for noticing. 2008-02-19 Bernhard R. Link * Try harder not to leave any newly added files to the pool in the case of an error. 2008-02-15 Bernhard R. Link * Also ignore missing Changes and Description lines in .changes files with "include". 2008-02-12 Bernhard R. Link * Add --outdir directive to set the directory the pool hierarchy is put under (and the dists hierarchy unless --distdir puts it somewhere else). 2008-02-11 Bernhard R. Link * fix --waitforlock parsing on 64 bit size_t architectures. (Thanks to Arno Renevier for reporting the bug) 2008-02-01 Bernhard R. Link * new --nooldfilesdb switch to only use new-style checksum database * improve db/version generation, set minimum required reprepro version to 3.3.0 when only using checksums.db 2008-01-13 Bernhard R. Link * improve collecting of not yet known checksums and using already recorded checksums in the database 2008-01-06 Bernhard R. Link * implement collectnewchecksums 2008-01-04 Bernhard R. Link * add checksums.db to store all checksums (as opposed to only md5sums in files.db). The old files.db persists for compatibility, but when checksums.db is up to date (when repository is generated with new reprepro or to be implemented collectnewchecksums was run) the old files.db can be deleted and only checksums.db is used then. (Of course you should not run an older reprepro with that repository then, ever). 2008-01-03 Bernhard R. Link * tracking.c uses database.c instead of libdb directly 2007-12-14 - 2007-12-23 Bernhard R. Link * collect and advertise more checksums, though not yet stored 2007-12-10 Bernhard R. Link * support lzma compressed source packages 2007-12-01 Bernhard R. Link * beautify control data read from .deb or .dsc/.changes files: remove all CR and make sure leading or trailing newlines do not hurt. 2007-11-27 Bernhard R. Link * rewrite support for reading text files containing a single chunk. (Release, .dsc, .changes). Unsigned .dsc and .changes files are no longer routed through libgpgme. 2007-11-24 Bernhard R. Link * references.c uses database.c instead of accessing libdb directly 2007-11-19 Bernhard R. Link * mark more filedescriptors closeonexec, support closefrom and F_CLOSEM when available. 2007-11-18 Bernhard R. Link * add sha1 hash calculation code * add sha1 hashes of index files into Release files. release.cache.db renmamed to release.caches.db due to modified syntax. 2007-10-31 Bernhard R. Link * translatefilelists now can be run when both old and new style filelists are there (this can happen when it was translated and an old version of reprepro was run over this database. You should not do this, but when it happens, translatefilelists can be used now instead of having to reextract the lists). 2007-10-29 Bernhard R. Link * If exporting a distribution fails, warn if something is left in a state that needs manual exporting. 2007-10-26 Bernhard R. Link * change --export default from "normal" (now also available under the name "lookedat") to "changed". 2007-10-21 Bernhard R. Link * warn against -A,-C,-T,-S or -P given to an action not using it, with new --ignore=unusedoption to ignore this. 2007-10-07 Bernhard R. Link * change db/version file to final format, abort if version or libdb version specified there cannot be fulfilled. 2007-09-27 Bernhard R. Link * allow comments starting within lines in config files * also allow tab as first character for continued lines as manpage already says. 2007-09-23 Bernhard R. Link * save another 2 seconds while sorting filelists for Contents files 2007-09-22 Bernhard R. Link * make empty Architectures and Components fields in conf/distributions an error. * Contents: fields no longer has a rate value, ContentsComponents/Architectures/UComponents triggers or disables contents generation if non-/empty. * empty Architecturs/Components/UdebComponents in conf/updates and conf/pulls now mean nothing instead of all. * minimal additional speedup when sorting filelists 2007-09-21 Bernhard R. Link * save cached filelists of packages for Contents files in a preprocessed form, needing only about half the disk space and only half the time when generating the Contents file. * new translatefilelists command to translate old to new format * filelists reading no longer available without libarchive 2007-09-19 Bernhard R. Link * files.c uses database.c instead of accessing libdb directly * release.c uses database.c instead of accessing libdb directly 2007-09-16 Bernhard R. Link * add removesrc and removefilter action 2007-09-15 Bernhard R. Link * move package database handling from packages.c to database.c 2007-09-14 Bernhard R. Link * rereference now also refreshes references by tracking data. 2007-09-13 Bernhard R. Link * retrack no longer create track records for distributions with tracking disabled, dumptracks no longer generated empty databases. * removealltracks now also works on distributions no longer listed in conf/distributions, no longer supports being used on all distributions listed there (i.e. without argumnts) * tidytracks not remove all tracking data from a distribution without tracking activated. * clearvanished removes tracking data from vanished distributions. * in default --nofast mode, check for unexpected tracking data and do not run, unless --ignore=undefinedtracking is defined * retrack refreshes tracking information instead of destroying and starting new. * make update's ListHook relative to confdir * low level part of the includelogs options added 2007-09-11 Bernhard R. Link * reject spaces and tabs in key-names (i.e. before :) in config files, instead of bubbling about unknown fields. 2007-09-10 Bernhard R. Link * improve parsing of update's Config lines 2007-09-09 Bernhard R. Link * never hardlink index files, but copy them always into the lists directory. (Should not make a difference yet, but feels safer). * warn if update rules list components or architectures are always ignored 2007-09-08 Bernhard R. Link * warn if pull rules list components or architectures are always ignored 2007-09-07 Bernhard R. Link * create db/version * always create all packages.db subtables, so future versions can detect new architectures/components. 2007-09-06 Bernhard R. Link * read all distribution definitions before starting any action. 2007-09-04 Bernhard R. Link * test number of arguments earlier. 2007-09-03 Bernhard R. Link * remove the dbdirs and all its parents created at startup that are still empty at shutdown. (Does not make much difference yet, as most commands create an empty file database in there.) * obsolete --overridedir, overrides belong to conf dir like all the other config files now. 2007-09-02 Bernhard R. Link * fix uninitialized use of errno in listclean. (might cause update to report error opening dir: file exists) * new config file parser * remove --ignore from changestool, --ignore=shortkeyid from reprepro * move to C99's bool, false and true 2007-08-21 Bernhard R. Link * ignore SIGPIPE, so that libgpgme cannot tear us apart so easily. 2007-08-20 Bernhard R. Link * Print ignored signatures in Release.gpg files when verbosity > 10 2007-08-18 Bernhard R. Link * stop dumpreferences output when Ctrl-c is received. 2007-08-03 Bernhard R. Link * add --without-libgpgme to compile without gpgme support (checking and signing are then not available, yet). 2007-08-19 Bernhard R. Link * [SECURITY] fix bug causing a Release.gpg with only unknown signatures considered as properly signed. 2007-07-28 Bernhard R. Link * fix segfault in changestool's verify if md5sum of .orig.tar.gz is wrong and not listed in the .changes file. * changestool's verify knows about epochs not showing up in filenames now. 2007-07-26 Bernhard R. Link * add support for .changes file having the source version in the Sources: header (like binNMUs) to the include and processincoming commands. 2007-07-22 Bernhard R. Link * include[u]deb allows multiple files to include now 2007-06-25 Bernhard R. Link * don't complain if suite name and component name are the same in createsymlinks 2007-06-24 Bernhard R. Link * processincoming allows an optional second argument to limit processing to a specific file for better integration with inoticoming. 2007-06-16 Bernhard R. Link * when checking a file to have the expected checksum, first check if the file size matches before calculating its md5sum. 2007-06-11 Bernhard R. Link * detect "false" and "no" as false in boolean headers. (Until now only existance was tested and considered as true, which broke apt-methods telling "Send-Config: false") 2007-06-10 Bernhard R. Link * don't waste filedescriptors by not closing .done-files 2007-06-09 Bernhard R. Link * set GPG_TTY when unset and stdin is a terminal. (and new option --noguessgpgtty to suppress this) 2007-06-03 Bernhard R. Link * fix segfault when running processincoming without notificators (Thanks to Julien Valroff for finding this) 2007-06-02 Bernhard R. Link * rename --checkspace to --spacecheck, as manpage and error messages hint to that. * fix 64bit problem in errormessages for Log: 2007-05-29 Bernhard R. Link * adapt name include uses for .changes files to that of processincoming. 2007-05-25 Bernhard R. Link * some fixed and improvements of the free space calculation ( add --spacecheck, --safetymargin, --dbsafetymargin ) 2007-05-24 Bernhard R. Link * error/warn if trying to include a package via processincoming which is already there newer * do not notify a .changes when no package included (when using Log: --changes) * add Permit: unused_files older_version and Cleanup: unused_files on_deny on_error for conf/incoming * add --waitforlock option 2007-05-23 Bernhard R. Link * fix remove action not tidy tracked packages. (Thanks to Dan Pascu for finding this, too) * rename cleartracks in removealltracks * new tidytracks command 2007-05-22 Bernhard R. Link * Add per distribution notification scripts for accepted changes files. 2007-05-21 Bernhard R. Link * fix problem of not waiting for notificators in some commands (Thanks to Dan Pascu for finding this) 2007-05-07 Bernhard R. Link * move some code from release.c to signature.c in preperation of later changes 2007-05-06 Bernhard R. Link * changestool: add adddsc command * changestool: add --create option * changestool: add add command * changestool: add setdistribution command 2007-05-03 Bernhard R. Link * changestool: add addrawfile command 2007-04-03 Bernhard R. Link * first code for checking for enough free space 2007-03-29 Bernhard R. Link * add rerunnotifiers command 2007-03-28 Bernhard R. Link * add support logging to external notificators (including example to create changelog/ hierachy) 2007-03-26 Bernhard R. Link * fix bug in term parsing not accepting '<<' 2007-03-23 Bernhard R. Link * first part of logging code 2007-03-16 Bernhard R. Link * fix bug not recognizing already existing .bz2 files when exporting only changes. * more changes in verbose output 2007-03-15 Bernhard R. Link * more output to stdout instead of stderr 2007-03-14 Bernhard R. Link * processincoming only exports distributions looked at with --export=always (the default) and not every distribution. (other commands should not have changed) * changed output of many status messages to stdout instead of stderr * changed verbosity level needed to see some messages 2007-03-12 Bernhard R. Link * add --silent option * change some status output to stdout instead of stderr. 2007-02-26 Bernhard R. Link * add gensnapshot command 2007-02-23 Bernhard R. Link * rename import to processincoming * describe in manpage * update bash completion example 2007-02-11 Bernhard R. Link * fix bug in non-libarchive filelist extraction with long filelists 2007-01-25 Bernhard R. Link * import allow .changes files with multiple distributions 2007-01-21 Bernhard R. Link * add trackingsupport to "import" command 2007-01-17 Bernhard R. Link * fail cleanly when getting a .dsc without Format header 2007-01-16 Bernhard R. Link * improve error message of missing Files: line in .dsc files 2007-01-12 Bernhard R. Link * add AlsoAcceptFor for distributions 2007-01-06 Bernhard R. Link * incoming fixups and more testcases * omit some warnings about versions not starting with a digit 2007-01-05 Bernhard R. Link * better cope with double entries in some lists. (Like Architectures or Components) * incoming fixups and more testcases 2007-01-04 Bernhard R. Link * more fixups of incoming handling 2007-01-03 Bernhard R. Link * factor some checkindeb code into binaries.c * incoming.c uses now only binaries.c and not checkindeb.c in preperation of different semantics to come. 2007-01-02 Bernhard R. Link * factor some checkindsc code into source.c * add dsc support for import from incoming 2007-01-01 Bernhard R. Link * move uploaderslist load into distribution struct * fix bug in manpage: uploaders list keyword is allow and not accept * some more code for incoming processing 2006-12-31 Bernhard R. Link * first code for importing from an incoming dir, not yet useable (supports no source, no overrides, no ... yet) * move loaded overrides into distribution struct. 2006-12-17 Bernhard R. Link * tell about the filename in the non-libarchive case of failure to extract control or filelist from a .deb * add _fakeemptyfilelist action to omit a file when generting Content files. 2006-11-28 Bernhard R. Link * mostly rewrote "adddeb" 2006-11-27 Bernhard R. Link * add "adddeb" option to changestool 2006-10-31 Bernhard R. Link * fix spelling mistakes in manpage (thanks to A. Costa) fixed the same errors in the code and its messages 2006-10-29 Bernhard R. Link * fix updatechecksums for .changes files not listing entries from the .dsc 2006-10-11 Bernhard R. Link * add Uploaders: rule to conf/distributions to limit include to .changes files signed with specific keys. 2006-10-07 Bernhard R. Link * only show control information of to be added packages in checkpull/checkupdate with -V * fixed a missed refcount increasing in yesterdays code * give hints where to look when gpgme reports no error on failure 2006-10-06 Bernhard R. Link * FilterList in update and pull rules now is a space separated list of files. 2006-10-03 Bernhard R. Link * fix typos and spelling errors in manpage (Thanks to Bruce Sass) * fix type-mismatch to silence compiler-warning * work around signing problems in gpgme11, fix some memory holes 2006-10-01 Bernhard R. Link * new includeallsources command for changestool to change a .changes as if it was created with -sa 2006-09-30 Bernhard R. Link * new updatechecksums command for changestool 2006-09-24 Bernhard R. Link * ported to libgpgme11 * removed --onlyacceptsigned 2006-09-20 Bernhard R. Link * make strlist_init void 2006-09-19 Bernhard R. Link * rename modifychanges to changestool 2006-09-17 Bernhard R. Link * fix return of fingerprints in new signature handling code * move endswith from main.c to names.h * add modifychanges helper program (yet only validating some stuff) 2006-09-12 Bernhard R. Link * reject .changes with binaries not listed, unless --ignore=surprisingbinary * reject .changes with .dsc or .deb with wrong source version unless --ignore=wrongversion or --ignore=wrongsourceversion * earlier and better error message if source name differs from the one given in the .changes file. 2006-09-11 Bernhard R. Link * new strlist_add_dup * more fine tuned signature checking (one valid signature suffices) * fix a little memory hole in tracking code 2006-09-07 Bernhard R. Link * fix some typos (thanks to Jordi Mallach for noting) 2006-09-04 Bernhard R. Link * support .orig.tar.bz2 .tar.bz2 and .diff.bz2 in source packages * fix bug, causing Contents-* files containing only the first file of a package when this is the first time this package is accessed 2006-08-22 Bernhard R. Link * fix db3 mention in reprepro.1 2006-08-05 Bernhard R. Link * some error/status/debug messages improved a little 2006-08-03 Bernhard R. Link * improve messages when missing files (.tar.gz most likely) 2006-07-28 Bernhard R. Link * remove unreferenced files when doing removetracks * fix bug omitting an uncompressed Sources entry in Release files when only exporting changed values and the source part changed not. (Thanks to Alexander Kuehn for finding this one). * fix tiny memory in clearvanished 2006-07-26 Bernhard R. Link * do not error out if one file gets unreferenced by two different reasons at the same time. * implement "minimal" and "all" tracking support for packages losing files because of getting replaced by newer ones... 2006-07-23 Bernhard R. Link * rewrite some parts of tracking support, implement "minimal" and "all" methods... 2006-07-18 Bernhard R. Link * fix segfault in non-libarchive control extraction code introduced with the last change 2006-07-16 Bernhard R. Link * cope with control.tar.gz files without leading ./ when not using libarchive. 2006-07-15 Bernhard R. Link * cope with GNU style ar files when using libarchive (i.e. with .deb files not generated by dpkg-deb) 2006-07-08 Bernhard R. Link * add clearvanished command 2006-06-21 Bernhard R. Link * add copy command to pull only a specific package without having to add FilterFormulas to conf/pulls (and also a bit faster) 2006-06-19 Bernhard R. Link * add predelete action to remove packages from a distribution that would be deleted or replaced by a command. 2006-06-18 Bernhard R. Link * check for file conflicts and missing files when including .changes files before copying/moving files into the pool (Files missing in .dsc and files having the wrong md5sum are still only noticed after/while moving them in the pool) * delete files from the pool when checks after including the files but before including the packages failed. 2006-06-16 Bernhard R. Link * manpage mentions includeudeb now. (Thanks to Jordi Mallach for noting) * changed manpage to make clear options are before the command (dito) * catch TERM, ABRT, INT and QUIT and do not start any new stuff after that. * remove force option (rarely worked and caused ugly bugs otherwise) 2006-06-12 Bernhard R. Link * some prework for predelete action 2006-06-01 Bernhard R. Link * better usage description in tiffany.example * fix the fix for the export preprocessor 2006-05-30 Bernhard R. Link * fix bug in communication with Index file preprocessor (so the .diff directories tiffany.example creates are properly advertised so that apt-get can use them) 2006-05-15 Bernhard R. Link * warn against dobuled fields in config files. (ignorable with --ignore=doublefield) * better error message when trying to forget filekey not existing 2006-05-14 Bernhard R. Link * add support for libdb4.3 and libdb4.4, default is libdb4.4 now. 2006-05-13 Bernhard R. Link * add support for contents file when compiled without libarchive. 2006-05-12 Bernhard R. Link * add content file generation 2006-05-07 Bernhard R. Link * add support for extracting filelists from Debian packages for future usage and a __extractfilelist action. (only available when compiled with libarchive) 2006-05-06 Bernhard R. Link * add support for using libarchive to get the control file out of a .deb instead of calling ar and tar. 2006-05-03 Bernhard R. Link * add new pull and checkpull actions * repair checkupdate statistics of newest available version of checkupdate when using delete rules. (Showed 'unavailable for reload'). * fix segfault and memory leak in checkupdate * fix including a changes file with source and restricting to some binary distribution or to binary package type. * add some warnings against impossible combinations of -T and -A 2006-04-29 Bernhard R. Link * fix some minor memory leaks 2006-04-28 Bernhard R. Link * rewrite decision for exporting distributions a bit: export all distributions that did not have errors by default (it did not export anything when an error occured) added new --export option with possible values never, changed, normal and forced. 2006-04-25 Bernhard R. Link * do not export indices if all upgrades were skipped 2006-04-23 Bernhard R. Link * unbreak new skipold for delete rules 2006-04-22 Bernhard R. Link * explicitly save which files are already processed and to be skipped by --skipold. 2006-04-11 Bernhard R. Link * tell the user running gpg manually sometimes resolves problems while calling it through libgpgme does not help. * add a WORKAROUND part to the manpage 2006-04-09 Bernhard R. Link * remove the woody reference in signature.c 2006-03-30 Bernhard R. Link * warn about architectures called 'all' 2006-02-25 Bernhard R. Link * add --ignore=missingfile to look for .orig.tar.gz files of broken .changes (no -sa though needed) files in the directory of the .changes file. 2006-02-20 Bernhard R. Link * add optional "NotAutomatic" field for the distribution specification. 2006-02-10 Bernhard R. Link * add new --ignore=extension, without which it refuses to 'include' files not ending in '.changes', to 'include[u]deb' files not ending in '.[u]deb' or to 'includedsc' files not ending '.dsc'. 2006-01-21 Bernhard R. Link * fix typesetting error in ratpoison.1 and add an example for update's Config option. * fix segfault of FD_ISSET(-1,&...) when method is not used (i.e. --nolistsdownload and only need to get from other sources) * fix minor memory leak of --skipold 2005-12-24 Bernhard R. Link * add cache database to store md5sums of released files in there. 2005-12-23 Bernhard R. Link * Implement native .bz2 compression (only when libbz2.so was available at build time) 2005-12-22 Bernhard R. Link * fix some spelling errors (thanks to Guilherme de S. Pastore for notifying me) * make index exportion code more low level, allowing in-place md5sum calculation without needing to reread the generated files. * fix problem of bzip2.example script 2005-12-20 Bernhard R. Link * refactor index exporting/release generation so that is always puts the uncompressed checksums in the Release file. * reverting the changes from 2005-12-15 (i.e. again not writing uncompressed Sources by default, as the checksum now shows up in the Release file anyway, as apt needs it) * {Dsc,Deb,UDeb}Indices' external programs are now only called with the uncompressed files. 2005-12-19 Bernhard R. Link * fix segfault introduced into interatedupdate by --skipold. 2005-12-18 Bernhard R. Link * split Release reading from release.c to readrelease.c 2005-12-15 Bernhard R. Link * Generate uncompressed source/Sources by default. 2005-12-11 Bernhard R. Link * Unless the new --noskipold is used, only targets with newly downloaded index files are updated. 2005-12-10 Bernhard R. Link * remove pool-directories gotten empty (thanks to Julien Valroff for suggesting this) * new --keepdirectories option to not try this 2005-10-27 Bernhard R. Link * add colons in description within bzip.example (thanks to Steve Kemp for finding this) 2005-10-05 Bernhard R. Link * add --ignore=missingfield,brokenold,brokenversioncmp, unusedarch,surpisingarch 2005-10-03 Bernhard R. Link * replace readdir_r by readdir to be sure errno is set properly. 2005-10-02 Bernhard R. Link * some cleanups (strict truthvalue-typing and some integer signednesses...) 2005-09-28 Bernhard R. Link * Fix segfault when update file is empty. (Thanks to Gianluigi Tiesi for noticing this.) 2005-09-26 Bernhard R. Link * Document override files' format in manpage * Fix integer size in tracking data handling 2005-09-25 Bernhard R. Link * Documenting --ignore in manpage * some clarifications in manpage 2005-09-24 Bernhard R. Link * putting a .changes in the wrong distribution is an error now without --ignore=wrongdistribution * puttin new address in GPL notices, redownload COPYING (fixing some typos and addresses) 2005-09-22 Bernhard R. Link * add --unignore (with alias --noignore) to allow overwriting ignore in config. 2005-09-06 Bernhard R. Link * fix error in parsing FilterList default action (thanks to Sergio Talens-Oliag for finding that) 2005-08-28 Bernhard R. Link * add REPREPRO_CONFIG_DIR 2005-08-26 Bernhard R. Link * read conf/options for default command line options, use REPREPRO_BASE_DIR for default -b value, add --no options to disable previously enabled options again. * add a createsymlinks command to create suite->codename symlinks 2005-08-05 Bernhard R. Link * do not set execute bit of signed files 2005-08-02 Bernhard R. Link * allow ~ in versions listed within .changes * changed spacing in dpkgversions.c to make comparing to originals in dpkg easier. 2005-07-20 Bernhard R. Link * read SignWith:-argument and give it to libgpgme to decide which key to use. 2005-07-05 Bernhard R. Link * Document tracking 2005-07-03 Bernhard R. Link * add quick&dirty --ask-passphrase option 2005-06-18 Bernhard R. Link * add tracking.c and some starting functionality * therefor refactored .deb and .dsc inclusion so that .changes includsion can check those better before doing anything. * some little tidy ups (freeing more memory, fixing bad english 2005-06-02 Bernhard R. Link * Change default basedir to "." 2005-05-31 Bernhard R. Link * Fix bogus free causing segfaults * No longer silently ignore additional arguments with include* 2005-05-13 Bernhard R. Link * add Fallback option to update-methods. 2005-04-16 Bernhard R. Link * fix broken fix in signature.c from 2005-04-10 * fix bug when after a delete rule the second origin has the version already in an archive 2005-04-12 Bernhard R. Link * fix same more warnings 2005-04-10 Bernhard R. Link * apply some clean ups: - distinguish between boolean and non-boolean values - split globals from error.h in globals.h * fix bug in signature.c to not treat config error like valid key. 2005-04-07 Bernhard R. Link * fix wrong handling of bugs in update specifications * adopt short-howto to present * fix typo in manpage 2005-04-05 Bernhard R. Link * create files without executeable bit set when copying files. 2005-03-29 Bernhard R. Link * iteratedupdate directly exports indices instead of all at the end... 2005-03-28 Bernhard R. Link * Implement "interatedupdate" command, which iterates the distributions and targets within them, instead of first downloading all lists, then processing all lists, then downloading all packages and then installing them all. (This can be a bit slower, but needs less memory) * Two --force are needed to ignore wrong Release.gpg 2005-03-27 Bernhard R. Link * Implement ".tobedeleted" feature for export skripts. 2005-03-22 Bernhard R. Link * Repeat that there were errors at the end of reprepro. 2005-03-11 Bernhard R. Link * Do not accept multiple -A,-C,-T,-S or -Ps. 2005-03-02 Bernhard R. Link * Change Override/SrcOverride to DebOverride/UDebOverride/DscOverride * add new command reoverride to reapply overrides to all packages. 2005-02-20 Bernhard R. Link * add docs/tiffany.example, which generates apt-qupdate'able .diff directories. * Many small changes to make splint more happy. (Mostly annotations, some clearance and some fixes of memory holes or possible segfaults if running out of memory) 2005-02-19 Bernhard R. Link * Refactor Index Exporting and Release generation to reduce the time Release files and Package indices are out of sync (Everything is written to files ending in .new now, only when everything is ready all are moved to their final place) and to prepare DebIndices UDebIndices and DscIndices Options. * add another test-case * FIX the overflow bug in chunks_replacefield * add DebIndices UDebIndices and DscIndices options for conf/distributions. This allows to change which Indices to generate for this type, or calls hook to even generate additional ones. (See docs/bzip.example). 2005-02-14 Bernhard R. Link * Some little changes to make splint and valgrind happier. 2005-02-13 Bernhard R. Link * Remove some code duplication in main.c (and renamed _md5sums to _listmd5sums) * change -b to not overwrite prior given --listdir --distdir ... 2005-02-12 Bernhard R. Link * Some clean up of the code and added some paranoia checks. 2005-02-10 Bernhard R. Link * No longer shutdown aptmethods when nothing is to do. (This caused problems when index files are already in place but still packages to be downloaded). * Do not warn about deleting _changed files from listdir. 2005-02-08 Bernhard R. Link * Do some more checks reading signed sources. * Release 0.1.1 2005-02-07 Bernhard R. Link * Fix --onlyacceptsigned to safely handle unknown keys or multiple keys of different state. 2005-02-06 Bernhard R. Link * Release 0.1 2005-02-05 Bernhard R. Link * Add --onlyacceptsigned to make include and includedsc only accept signed files. * Check Codename, Components and Architectures fields of conf/distributions for sane values * fix checks for strange characters 2005-02-03 Bernhard R. Link * When updating delete files lists/_ for all updated distributions, which will not be needed any more. 2005-02-01 Bernhard R. Link * Add some missing files in Makefile.am so they end up in dist * Add some #includes so that it also compiles without warnings on sarge/i386 * --ignore= allows multiple options separated by commas. * Tell about -b if conf/distributions cannot be found * Tell which release.gpg file is missing the signature. * Some tidy up to reduce number of warnings with -W * Allow multiple keys specified in update's ReleaseCheck 2005-01-29 Bernhard R. Link * Be more descriptive with missing signatures. 2005-01-28 Bernhard R. Link * readd _detect command * write recovery HOWTO how to deal with database corruptions 2005-01-27(at least GMT) Bernhard R. Link * add a lockfile 2005-01-26 Bernhard R. Link * change FilterList to need a defaultaction given * tidy up upgradelist.c and report errors properly * ListHook is also called when --nolistsdownload is given * update/checkupdate only download lists not already here 2005-01-25 Bernhard R. Link * Add ListHook keyword for external processing of the downloaded index file before updating. * Add FilterList keyword for a list in the format of dpkg --get-selections 2005-01-24 Bernhard R. Link * Make includedeb work again. * Fix bugs in override file parsing * add a listfilter command * fix bug in term evaluation with non-existing fields * fix another parsing bug when too few spaces where around * implement T_NEGATED flag of parsing * document listfilter command * check conf/distributions conf/updates for unknown fields (to rule out typos, lines with # are ignored) 2005-01-22 Bernhard R. Link * Make -T work everywhere -A works. * rename variables from suffix to packagetype * allow colons in .changes filenames. (epoch with colon is stripped, but colons after that are allowed) * Add tests/test.sh to test for basic things to work... * fix bug that prevented Release regeneration when a index-file is changed to zero entries. 2005-01-19 Bernhard R. Link * now also include, includedeb, includedsc and update will remove files which are no longer needed due to newer versions available, except when --keepunreferencedfiles is given. * change some verbosities of files and refereces 2005-01-17 Bernhard R. Link * remove short options -e -N -l -r -M -d -D -c -p -o to make it more guessable (and reserving short options for important and likely often called functions). * add --keepunreferencedfile option (if you think this is long, remember GNU getopt_long will accept --keep, too) 2005-01-15 Bernhard R. Link * Seperate parsing and looking for allowed values a bit more. Some more things can be ignored with --ignore now. * includedsc and includedeb only export files that changed. * remove now deletes files of removed packages not referenced by any other package. 2005-01-10 Bernhard R. Link * Made updates using --force with failing parts more graceful * Make aptmethods less verbose 2005-01-07 Bernhard R. Link * Changed the meaning of the "Architectures:" field in conf/distributions. Now a distribution will have sources exactly when a "source" is in this line. 2005-01-05 Bernhard R. Link * Only generate Release (and Release.gpg) files when something changed. * Add a --nolistsdownload option to avoid update and checkupdate downloading all those lists again. 2005-01-04 Bernhard R. Link * Several code clean-ups, should not change anything.... 2004-12-30 Bernhard R. Link * Tidy up (introduce bool_t and replace dpkgversion_isNewer) * add a magic rule minus ("-") to mark all packages to be deleted. * add a checkupdate command to show what would be done. 2004-12-24 Bernhard R. Link * Fixed a boolean inversion in the check if | is allowed in formulas. * added FilterFormula to docs/reprepro.1 2004-12-19 Bernhard R. Link * change parsing of conf/distributions, the fields only copied to Release files can be omitted now. Additional it warns if required fields are missing intead of silently ignoring this block... 2004-12-18 Bernhard R. Link * remove now tells which packages were removed (with -v) and which could not be deleted. Indicies will only be exported when something was deleted. 2004-12-18 Bernhard R. Link * Modify remove to allow -T to specify the type (deb,dsc,udeb) to delete from. reprepro-4.13.1/byhandhook.h0000644000175100017510000000150212152651661012663 00000000000000#ifndef REPREPRO_BYHANDHOOK_H #define REPREPRO_BYHANDHOOK_H #ifndef REPREPRO_ERROR_H #include "error.h" #warning "What's hapening here?" #endif #ifndef REPREPRO_ERROR_H #include "error.h" #warning "What's hapening here?" #endif struct byhandhook; retvalue byhandhooks_parse(struct configiterator *, /*@out@*/struct byhandhook **); /* 2nd argument starts as NULL, returns true as long as there are hooks */ bool byhandhooks_matched(const struct byhandhook *, const struct byhandhook **, const char * /*section*/, const char * /*priority*/, const char * /*name*/); retvalue byhandhook_call(const struct byhandhook *, const char * /*codename*/, const char * /*section*/, const char * /*priority*/, const char * /*basename*/, const char * /*fullfilename*/); void byhandhooks_free(/*@null@*//*@only@*/struct byhandhook *); #endif reprepro-4.13.1/freespace.c0000644000175100017510000001464412152651661012500 00000000000000/* This file is part of "reprepro" * Copyright (C) 2007 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include "error.h" #include "database.h" #include "checksums.h" #include "freespace.h" struct device { /*@null@*/struct device *next; /* stat(2)'s st_dev number identifying this device */ dev_t id; /* some directory in this filesystem */ char *somepath; /* size of one block on this device according to statvfs(2) */ unsigned long blocksize; /* blocks available for us */ fsblkcnt_t available; /* blocks already known to be needed on that device */ fsblkcnt_t needed; /* calculated block to keep free */ fsblkcnt_t reserved; }; struct devices { /*@null@*/struct device *root; off_t reserved; }; void space_free(struct devices *devices) { struct device *d; if (devices == NULL) return; while ((d = devices->root) != NULL) { devices->root = d->next; free(d->somepath); free(d); } free(devices); } static retvalue device_find_or_create(struct devices *devices, dev_t id, const char *dirname, /*@out@*/struct device **result) { struct device *d; struct statvfs s; int ret; d = devices->root; while (d != NULL && d->id != id) d = d->next; if (d != NULL) { *result = d; return RET_OK; } ret = statvfs(dirname, &s); if (ret != 0) { int e = errno; fprintf(stderr, "Error judging free space for the fileystem '%s' belongs to: %d=%s\n" "(Take a look at --spacecheck in the manpage on how to modify checking.)\n", dirname, e, strerror(e)); return RET_ERRNO(e); } d = NEW(struct device); if (FAILEDTOALLOC(d)) return RET_ERROR_OOM; d->next = devices->root; d->id = id; d->somepath = strdup(dirname); if (FAILEDTOALLOC(d->somepath)) { free(d); return RET_ERROR_OOM; } d->blocksize = s.f_bsize; /* use bfree when being root? but why run as root? */ d->available = s.f_bavail; d->needed = 0; /* always keep at least one megabyte spare */ d->reserved = devices->reserved/d->blocksize+1; devices->root = d; *result = d; return RET_OK; } retvalue space_prepare(struct devices **devices, enum spacecheckmode mode, off_t reservedfordb, off_t reservedforothers) { struct devices *n; struct device *d; struct stat s; int ret; retvalue r; if (mode == scm_NONE) { *devices = NULL; return RET_OK; } assert (mode == scm_FULL); n = NEW(struct devices); if (FAILEDTOALLOC(n)) return RET_ERROR_OOM; n->root = NULL; n->reserved = reservedforothers; ret = stat(global.dbdir, &s); if (ret != 0) { int e = errno; fprintf(stderr, "Error stat'ing %s: %d=%s\n", global.dbdir, e, strerror(e)); free(n); return RET_ERRNO(e); } r = device_find_or_create(n, s.st_dev, global.dbdir, &d); if (RET_WAS_ERROR(r)) { space_free(n); return r; } d->reserved += reservedfordb/d->blocksize+1; *devices = n; return RET_OK; } retvalue space_needed(struct devices *devices, const char *filename, const struct checksums *checksums) { size_t l = strlen(filename); char buffer[l+1]; struct stat s; struct device *device; int ret; retvalue r; fsblkcnt_t blocks; off_t filesize; if (devices == NULL) return RET_NOTHING; while (l > 0 && filename[l-1] != '/') l--; assert (l > 0); memcpy(buffer, filename, l); buffer[l] = '\0'; ret = stat(buffer, &s); if (ret != 0) { int e = errno; fprintf(stderr, "Error stat'ing %s: %d=%s\n", filename, e, strerror(e)); return RET_ERRNO(e); } r = device_find_or_create(devices, s.st_dev, buffer, &device); if (RET_WAS_ERROR(r)) return r; filesize = checksums_getfilesize(checksums); blocks = (filesize + device->blocksize - 1) / device->blocksize; device->needed += 1 + blocks; return RET_OK; } retvalue space_check(struct devices *devices) { struct device *device; struct statvfs s; int ret; retvalue result = RET_OK; if (devices == NULL) return RET_NOTHING; for (device = devices->root ; device != NULL ; device = device->next) { /* recalculate free space, as created directories * and other stuff might have changed it */ ret = statvfs(device->somepath, &s); if (ret != 0) { int e = errno; fprintf(stderr, "Error judging free space for the fileystem '%s' belongs to: %d=%s\n" "(As this worked before in this run, something must have changed strangely)\n", device->somepath, e, strerror(e)); return RET_ERRNO(e); } if (device->blocksize != s.f_bsize) { fprintf(stderr, "The block size of the filesystem belonging to '%s' has changed.\n" "Either something was mounted or unmounted while reprepro was running,\n" "or some symlinks were changed. Aborting as utterly confused.\n", device->somepath); } device->available = s.f_bavail; if (device->needed >= device->available) { fprintf(stderr, "NOT ENOUGH FREE SPACE on filesystem 0x%lx (the filesystem '%s' is on)\n" "available blocks %llu, needed blocks %llu, block size is %llu.\n", (unsigned long)device->id, device->somepath, (unsigned long long)device->available, (unsigned long long)device->needed, (unsigned long long)device->blocksize); result = RET_ERROR; } else if (device->reserved >= device->available || device->needed >= device->available - device->reserved) { fprintf(stderr, "NOT ENOUGH FREE SPACE on filesystem 0x%lx (the filesystem '%s' is on)\n" "available blocks %llu, needed blocks %llu (+%llu safety margin), block size is %llu.\n" "(Take a look at --spacecheck in the manpage for more information.)\n", (unsigned long)device->id, device->somepath, (unsigned long long)device->available, (unsigned long long)device->needed, (unsigned long long)device->reserved, (unsigned long long)device->blocksize); result = RET_ERROR; } } return result; } reprepro-4.13.1/freespace.h0000644000175100017510000000107112152651661012473 00000000000000#ifndef REPREPRO_FREESPACE_H #define REPREPRO_FREESPACE_H #ifndef REPREPRO_DATABASE_H #include "database.h" #endif struct devices; enum spacecheckmode { scm_NONE, /* scm_ASSUMESINGLEFS, */ scm_FULL }; retvalue space_prepare(/*@out@*/struct devices **, enum spacecheckmode, off_t /*reservedfordb*/, off_t /*reservedforothers*/); struct checksums; retvalue space_needed(/*@null@*/struct devices *, const char * /*filename*/, const struct checksums *); retvalue space_check(/*@null@*/struct devices *); void space_free(/*@only@*//*@null@*/struct devices *); #endif reprepro-4.13.1/acinclude.m40000644000175100017510000000275112152651661012564 00000000000000dnl CHECK_ENUM and GET_DEFINE autoconf macros are dnl Copyright 2004,2006 Bernhard R. Link dnl and hereby in the public domain # Check for an enum, which seem to be forgotten in autoconf, # as this can neighter be checked with cpp, nor is it a symbol m4_define([CHECK_ENUM], [AS_VAR_PUSHDEF([check_Enum], [rr_cv_check_enum_$1])dnl AC_CACHE_CHECK([for $1 in $2], check_Enum, [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([AC_INCLUDES_DEFAULT([$5]) @%:@include <$2>], [ if( $1 == 0 ) return 0; ])], [AS_VAR_SET(check_Enum, yes)], [AS_VAR_SET(check_Enum, no)])]) AS_IF([test AS_VAR_GET(check_Enum) = yes], [$3], [$4])[]dnl AS_VAR_POPDEF([check_Enum])dnl ])dnl # extract the value of a #define from a header m4_define([GET_DEFINE], [AC_LANG_PREPROC_REQUIRE()dnl AS_VAR_PUSHDEF(get_Define, [rr_cv_get_define_$1])dnl AC_CACHE_CHECK([for $1], get_Define, [dnl m4_ifvaln([$2],[dnl echo "#include <$2>" > conftest.$ac_ext echo "$1" >> conftest.$ac_ext ],[dnl echo "$1" > conftest.$ac_ext ]) if _AC_EVAL_STDERR([$ac_cpp conftest.$ac_ext >conftest.out]) >/dev/null; then if test -s conftest.err; then AS_VAR_SET(get_Define, $1) else AS_VAR_SET(get_Define, "$(tail -1 conftest.out)") fi else AS_VAR_SET(get_Define, $1) fi rm -f conftest.err conftest.out conftest.$ac_ext ]) TMP_GET_DEFINE=AS_VAR_GET(get_Define) TMP_GET_DEFINE=${TMP_GET_DEFINE% } TMP_GET_DEFINE=${TMP_GET_DEFINE% } AS_IF([test "$TMP_GET_DEFINE" = $1], [$3], [$1="$TMP_GET_DEFINE"])[]dnl AS_VAR_POPDEF([get_Define])dnl ])dnl GET_DEFINE reprepro-4.13.1/pool.h0000644000175100017510000000160012152651661011505 00000000000000#ifndef REPREPRO_POOL_H #define REPREPRO_POOL_H #ifndef REPREPRO_ERROR_H #include "error.h" #endif #ifndef REPREPRO_DATABASE_H #include "database.h" #endif extern bool pool_havedereferenced; /* called from references.c to note the file lost a reference */ retvalue pool_dereferenced(const char *); /* called from files.c to note the file was added or forgotten */ retvalue pool_markadded(const char *); retvalue pool_markdeleted(const char *); /* Remove all files that lost their last reference, or only count them */ retvalue pool_removeunreferenced(bool /*delete*/); /* Delete all added files that are not used, or only count them */ void pool_tidyadded(bool deletenew); /* delete and forget a single file */ retvalue pool_delete(const char *); /* notify outhook of new files */ void pool_sendnewfiles(void); /* free all memory, to make valgrind happier */ void pool_free(void); #endif reprepro-4.13.1/checkin.h0000644000175100017510000000164712152651661012153 00000000000000#ifndef REPREPRO_CHECKIN_H #define REPREPRO_CHECKIN_H #ifndef REPREPRO_ERROR_H #include "error.h" #warning "What's hapening here?" #endif #ifndef REPREPRO_DATABASE_H #include "database.h" #endif #ifndef REPREPRO_DISTRIBUTION_H #include "distribution.h" #endif #ifndef REPREPRO_ATOMS_H #include "atoms.h" #endif /* insert the given .changes into the mirror in the * if forcecomponent, forcesection or forcepriority is NULL * get it from the files or try to guess it. * if dereferencedfilekeys is != NULL, add filekeys that lost reference, * if tracks != NULL, update/add tracking information there... */ retvalue changes_add(/*@null@*/trackingdb, const struct atomlist * /*packagetypes*/, component_t, const struct atomlist * /*forcearchitecture*/, /*@null@*/const char * /*forcesection*/, /*@null@*/const char * /*forcepriority*/, struct distribution *, const char * /*changesfilename*/, int /*delete*/); #endif reprepro-4.13.1/configure.ac0000644000175100017510000001153012152655314012653 00000000000000dnl dnl Process this file with autoconf to produce a configure script dnl AC_INIT(reprepro, 4.13.1, brlink@debian.org) AC_CONFIG_SRCDIR(main.c) AC_CONFIG_AUX_DIR(ac) AM_INIT_AUTOMAKE([-Wall -Werror -Wno-portability]) AM_CONFIG_HEADER(config.h) if test "${CFLAGS+set}" != set ; then CFLAGS="-Wall -O2 -g -Wmissing-prototypes -Wstrict-prototypes -Wshadow" fi AM_MAINTAINER_MODE AC_GNU_SOURCE AC_PROG_CC_C99 AC_PROG_INSTALL AC_SYS_LARGEFILE AC_C_BIGENDIAN() AC_HEADER_STDBOOL AC_CHECK_FUNCS([closefrom strndup dprintf tdestroy]) found_mktemp=no AC_CHECK_FUNCS([mkostemp mkstemp],[found_mktemp=yes ; break],) if test "$found_mktemp" = "no" ; then AC_MSG_ERROR([Missing mkstemp or mkostemp]) fi AC_CHECK_FUNC([vasprintf],,[AC_MSG_ERROR([Could not find vasprintf implementation!])]) DBLIBS="" # the only way to find out which is compileable is to look into db.h: AC_CHECK_HEADER(db.h,,[AC_MSG_ERROR(["no db.h found"])]) AC_CHECK_LIB(db, db_create, [DBLIBS="-ldb $DBLIBS" ],[AC_MSG_ERROR(["no libdb found"])],[$DBLIBS]) AC_SUBST([DBLIBS]) AC_CHECK_LIB(z,gzopen,,[AC_MSG_ERROR(["no zlib found"])],) AC_ARG_WITH(libgpgme, [ --with-libgpgme=path|yes|no Give path to prefix libgpgme was installed with],[dnl case "$withval" in no) ;; yes) AC_CHECK_HEADER(gpgme.h,,[AC_MSG_ERROR(["no gpgme.h found"])]) AC_CHECK_LIB(gpg-error,gpg_strsource,,[AC_MSG_ERROR(["no libgpg-error found"])],) AC_CHECK_LIB(gpgme,gpgme_get_protocol_name,,[AC_MSG_ERROR(["no libgpgme found (need at least 0.4.1)"])],) ;; *) CPPFLAGS="$CPPFLAGS -I$withval/include" LIBS="$LIBS -L$withval/lib" AC_CHECK_HEADER(gpgme.h,,[AC_MSG_ERROR(["no gpgme.h found"])]) AC_CHECK_LIB(gpg-error,gpg_strsource,,[AC_MSG_ERROR(["no libgpg-error found"])],) AC_CHECK_LIB(gpgme,gpgme_get_protocol_name,,[AC_MSG_ERROR(["no libgpgme found (need at least 0.4.1)"])],) ;; esac ],[dnl default is to behave like yes (for libgpgme only) AC_CHECK_HEADER(gpgme.h,,[AC_MSG_ERROR(["no gpgme.h found (to disable run with --without-libgpgme)"])]) AC_CHECK_LIB(gpg-error,gpg_strsource,,[AC_MSG_ERROR(["no libgpg-error found (to disable run with --without-libgpgme)"])],) AC_CHECK_LIB(gpgme,gpgme_get_protocol_name,,[AC_MSG_ERROR(["did not find libgpgme versoion 0.4.1 or later (to disable run with --without-libgpgme)"])],) ]) AC_ARG_WITH(libbz2, [ --with-libbz2=path|yes|no Give path to prefix libbz2 was installed with],[dnl case "$withval" in no) ;; yes) AC_CHECK_LIB(bz2,BZ2_bzCompressInit,,[AC_MSG_ERROR(["no libbz2 found, despite being told to use it"])],) ;; *) AC_CHECK_LIB(bz2,BZ2_bzCompressInit,[dnl AC_DEFINE_UNQUOTED(AS_TR_CPP(HAVE_LIBBZ2)) LIBS="$LIBS -L$withval/lib -lbz2" CPPFLAGS="$CPPFLAGS -I$withval/include" ],[AC_MSG_ERROR(["no libbz2 found, despite being told to use it"])],[-L$withval/lib]) ;; esac ],[dnl without --with-libbz2 we look for it but not finding it is no error: AC_CHECK_LIB(bz2,BZ2_bzCompressInit,,[AC_MSG_WARN(["no libbz2 found, compiling without"])],) ]) ARCHIVELIBS="" ARCHIVECPP="" AH_TEMPLATE([HAVE_LIBARCHIVE],[Defined if libarchive is available]) AC_ARG_WITH(libarchive, [ --with-libarchive=path|yes|no Give path to prefix libarchive was installed with],[dnl case "$withval" in no) ;; yes) AC_CHECK_LIB(archive,archive_read_new,[dnl AC_CHECK_HEADER(archive.h,[dnl AC_DEFINE_UNQUOTED(AS_TR_CPP(HAVE_LIBARCHIVE),1) ARCHIVELIBS="-larchive" ],[AC_MSG_ERROR([Could not find archive.h])]) ],[AC_MSG_ERROR([Could not find libarchive])]) ;; *) AC_CHECK_LIB(archive,archive_read_new,[dnl mysave_CPPFLAGS="$CPPFLAGS" CPPFLAGS="-I$withval/include $CPPFLAGS" AC_CHECK_HEADER(archive.h,[dnl AC_DEFINE_UNQUOTED(AS_TR_CPP(HAVE_LIBARCHIVE),1) ARCHIVELIBS="-L$withval/lib -larchive" ARCHIVECPP="-I$withval/include" ],[AC_MSG_ERROR([Could not find archive.h])]) CPPFLAGS="$mysave_CPPFLAGS" ],[AC_MSG_ERROR([Could not find libarchive])],[-L$withval/lib]) ;; esac ],[dnl without --with-libarchive we look for it but not finding it is no error: AC_CHECK_LIB(archive,archive_read_new,[dnl AC_CHECK_HEADER(archive.h,[dnl AC_DEFINE_UNQUOTED(AS_TR_CPP(HAVE_LIBARCHIVE),1) ARCHIVELIBS="-larchive" ],) ],) ]) AC_ARG_WITH(static-libarchive, [ --with-static-libarchive=.a-file static libarchive library to be linked against], [ case "$withval" in no|yes) AC_MSG_ERROR([--with-static-libarchive needs an .a file as parameter]) ;; *) AC_CHECK_LIB(c,archive_read_new,[dnl mysave_CPPFLAGS="$CPPFLAGS" CPPFLAGS="$ARCHIVECPP $CPPFLAGS" AC_CHECK_HEADER(archive.h,[dnl AC_DEFINE_UNQUOTED(AS_TR_CPP(HAVE_LIBARCHIVE)) ARCHIVELIBS="$withval" ],[AC_MSG_ERROR([Could not find archive.h])]) CPPFLAGS="$mysave_CPPFLAGS" ],[AC_MSG_ERROR([Error linking against $withval])],[$withval]) esac ]) AM_CONDITIONAL([HAVE_LIBARCHIVE],[test -n "$ARCHIVELIBS"]) AC_SUBST([ARCHIVELIBS]) AC_SUBST([ARCHIVECPP]) dnl dnl Create makefiles dnl AC_CONFIG_FILES([Makefile docs/Makefile tests/Makefile]) AC_OUTPUT reprepro-4.13.1/sourcecheck.c0000644000175100017510000003005312152651661013031 00000000000000/* This file is part of "reprepro" * Copyright (C) 2010,2011 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include "error.h" #include "distribution.h" #include "trackingt.h" #include "sourcecheck.h" /* This is / will be the implementation of the * unusedsources * withoutsource * reportcruft * removecruft (to be implemented) * commands. * * Currently those only work with tracking enabled, but * are in this file as the implementation without tracking * will need similar infrastructure */ /* TODO: some tree might be more efficient, check how bad the comparisons are here */ struct info_source { struct info_source *next; char *name; struct info_source_version { struct info_source_version *next; char *version; bool used; } version; }; static void free_source_info(struct info_source *s) { while (s != NULL) { struct info_source *h = s; s = s->next; while (h->version.next != NULL) { struct info_source_version *v = h->version.next; h->version.next = v->next; free(v->version); free(v); } free(h->version.version); free(h->name); free(h); } } static retvalue collect_source_versions(struct distribution *d, struct info_source **out) { struct info_source *root = NULL, *last = NULL; struct target *t; struct target_cursor target_cursor = TARGET_CURSOR_ZERO; const char *name, *chunk; retvalue result = RET_NOTHING, r; for (t = d->targets ; t != NULL ; t = t->next) { if (t->architecture != architecture_source) continue; r = target_openiterator(t, true, &target_cursor); if (RET_WAS_ERROR(r)) { RET_UPDATE(result, r); break; } while (target_nextpackage(&target_cursor, &name, &chunk)) { char *version; struct info_source **into = NULL; struct info_source_version *v; r = t->getversion(chunk, &version); if (!RET_IS_OK(r)) { RET_UPDATE(result, r); continue; } if (last != NULL) { int c; c = strcmp(name, last->name); if (c < 0) { /* start at the beginning */ last = NULL; } else while (c > 0) { into = &last->next; if (last->next == NULL) break; last = last->next; c = strcmp(name, last->name); if (c == 0) { into = NULL; break; } } } /* if into != NULL, place there, * if last != NULL, already found */ if (last == NULL) { into = &root; while ((last = *into) != NULL) { int c; c = strcmp(name, last->name); if (c == 0) { into = NULL; break; } if (c < 0) break; into = &last->next; } } if (into != NULL) { last = zNEW(struct info_source); if (FAILEDTOALLOC(last)) { free(version); result = RET_ERROR_OOM; break; } last->name = strdup(name); if (FAILEDTOALLOC(last->name)) { free(version); free(last); result = RET_ERROR_OOM; break; } last->version.version = version; last->next = *into; *into = last; RET_UPDATE(result, RET_OK); continue; } assert (last != NULL); assert (strcmp(name, last->name)==0); v = &last->version; while (strcmp(v->version, version) != 0) { if (v->next == NULL) { v->next = zNEW(struct info_source_version); if (FAILEDTOALLOC(v->next)) { free(version); result = RET_ERROR_OOM; break; } v = v->next; v->version = version; version = NULL; RET_UPDATE(result, RET_OK); break; } v = v->next; } free(version); } r = target_closeiterator(&target_cursor); if (RET_WAS_ERROR(r)) { RET_UPDATE(result, r); break; } } if (RET_IS_OK(result)) *out = root; else { assert (result != RET_NOTHING || root == NULL); free_source_info(root); } return result; } static retvalue process_binaries(struct distribution *d, struct info_source *sources, retvalue (*action)(struct distribution *, struct target *, const char *, const char *, const char *, const char *, void *), void *privdata) { struct target *t; struct target_cursor target_cursor = TARGET_CURSOR_ZERO; const char *name, *chunk; retvalue result = RET_NOTHING, r; for (t = d->targets ; t != NULL ; t = t->next) { if (t->architecture == architecture_source) continue; r = target_openiterator(t, true, &target_cursor); if (RET_WAS_ERROR(r)) { RET_UPDATE(result, r); break; } while (target_nextpackage(&target_cursor, &name, &chunk)) { char *source, *version; struct info_source *s; struct info_source_version *v; r = t->getsourceandversion(chunk, name, &source, &version); if (!RET_IS_OK(r)) { RET_UPDATE(result, r); continue; } s = sources; while (s != NULL && strcmp(s->name, source) < 0) { s = s->next; } if (s != NULL && strcmp(source, s->name) == 0) { v = &s->version; while (v != NULL && strcmp(version, v->version) != 0) v = v->next; } else v = NULL; if (v != NULL) { v->used = true; } else if (action != NULL) { r = action(d, t, name, source, version, chunk, privdata); RET_UPDATE(result, r); } free(source); free(version); } r = target_closeiterator(&target_cursor); if (RET_WAS_ERROR(r)) { RET_UPDATE(result, r); break; } } return result; } static retvalue listunusedsources(struct distribution *d, const struct trackedpackage *pkg) { bool hasbinary = false, hassource = false; int i; for (i = 0 ; i < pkg->filekeys.count ; i++) { if (pkg->refcounts[i] == 0) continue; if (pkg->filetypes[i] == 's') hassource = true; if (pkg->filetypes[i] == 'b') hasbinary = true; if (pkg->filetypes[i] == 'a') hasbinary = true; } if (hassource && ! hasbinary) { printf("%s %s %s\n", d->codename, pkg->sourcename, pkg->sourceversion); return RET_OK; } return RET_NOTHING; } retvalue unusedsources(struct distribution *alldistributions) { struct distribution *d; retvalue result = RET_NOTHING, r; for (d = alldistributions ; d != NULL ; d = d->next) { if (!d->selected) continue; if (!atomlist_in(&d->architectures, architecture_source)) continue; if (d->tracking != dt_NONE) { r = tracking_foreach_ro(d, listunusedsources); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) return r; continue; } struct info_source *sources = NULL; const struct info_source *s; const struct info_source_version *v; r = collect_source_versions(d, &sources); if (!RET_IS_OK(r)) continue; r = process_binaries(d, sources, NULL, NULL); RET_UPDATE(result, r); for (s = sources ; s != NULL ; s = s->next) { for (v = &s->version ; v != NULL ; v = v->next) { if (v->used) continue; printf("%s %s %s\n", d->codename, s->name, v->version); } } free_source_info(sources); } return result; } static retvalue listsourcemissing(struct distribution *d, const struct trackedpackage *pkg) { bool hasbinary = false, hassource = false; int i; for (i = 0 ; i < pkg->filekeys.count ; i++) { if (pkg->refcounts[i] == 0) continue; if (pkg->filetypes[i] == 's') hassource = true; if (pkg->filetypes[i] == 'b') hasbinary = true; if (pkg->filetypes[i] == 'a') hasbinary = true; } if (hasbinary && ! hassource) { for (i = 0 ; i < pkg->filekeys.count ; i++) { if (pkg->refcounts[i] == 0) continue; if (pkg->filetypes[i] != 'b' && pkg->filetypes[i] != 'a') continue; printf("%s %s %s %s\n", d->codename, pkg->sourcename, pkg->sourceversion, pkg->filekeys.values[i]); } return RET_OK; } return RET_NOTHING; } static retvalue listmissing(struct distribution *d, struct target *t, UNUSED(const char *name), const char *source, const char *version, const char *chunk, UNUSED(void*data)) { retvalue r; struct strlist list; r = t->getfilekeys(chunk, &list); if (!RET_IS_OK(r)) return r; assert (list.count == 1); printf("%s %s %s %s\n", d->codename, source, version, list.values[0]); strlist_done(&list); return RET_OK; } retvalue sourcemissing(struct distribution *alldistributions) { struct distribution *d; retvalue result = RET_NOTHING, r; for (d = alldistributions ; d != NULL ; d = d->next) { if (!d->selected) continue; if (!atomlist_in(&d->architectures, architecture_source)) { if (verbose >= 0) fprintf(stderr, "Not processing distribution '%s', as it has no source packages.\n", d->codename); continue; } if (d->tracking != dt_NONE) { r = tracking_foreach_ro(d, listsourcemissing); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) return r; } else { struct info_source *sources = NULL; r = collect_source_versions(d, &sources); if (!RET_IS_OK(r)) continue; r = process_binaries(d, sources, listmissing, NULL); RET_UPDATE(result, r); free_source_info(sources); } } return result; } static retvalue listcruft(struct distribution *d, const struct trackedpackage *pkg) { bool hasbinary = false, hassource = false; int i; for (i = 0 ; i < pkg->filekeys.count ; i++) { if (pkg->refcounts[i] == 0) continue; if (pkg->filetypes[i] == 's') hassource = true; if (pkg->filetypes[i] == 'b') hasbinary = true; if (pkg->filetypes[i] == 'a') hasbinary = true; } if (hasbinary && ! hassource) { printf("binaries-without-source %s %s %s\n", d->codename, pkg->sourcename, pkg->sourceversion); return RET_OK; } else if (hassource && ! hasbinary) { printf("source-without-binaries %s %s %s\n", d->codename, pkg->sourcename, pkg->sourceversion); return RET_OK; } return RET_NOTHING; } static retvalue listmissingonce(struct distribution *d, UNUSED(struct target *t), UNUSED(const char *name), const char *source, const char *version, UNUSED(const char *chunk), void *data) { struct info_source **already = data; struct info_source *s; for (s = *already ; s != NULL ; s = s->next) { if (strcmp(s->name, source) != 0) continue; if (strcmp(s->version.version, version) != 0) continue; return RET_NOTHING; } s = zNEW(struct info_source); if (FAILEDTOALLOC(s)) return RET_ERROR_OOM; s->name = strdup(source); s->version.version = strdup(version); if (FAILEDTOALLOC(s->name) || FAILEDTOALLOC(s->version.version)) { free(s->name); free(s->version.version); free(s); return RET_ERROR_OOM; } s->next = *already; *already = s; printf("binaries-without-source %s %s %s\n", d->codename, source, version); return RET_OK; } retvalue reportcruft(struct distribution *alldistributions) { struct distribution *d; retvalue result = RET_NOTHING, r; for (d = alldistributions ; d != NULL ; d = d->next) { if (!d->selected) continue; if (!atomlist_in(&d->architectures, architecture_source)) { if (verbose >= 0) fprintf(stderr, "Not processing distribution '%s', as it has no source packages.\n", d->codename); continue; } if (d->tracking != dt_NONE) { r = tracking_foreach_ro(d, listcruft); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) return r; continue; } struct info_source *sources = NULL; struct info_source *list = NULL; const struct info_source *s; const struct info_source_version *v; r = collect_source_versions( d, &sources); if (!RET_IS_OK(r)) continue; r = process_binaries( d, sources, listmissingonce, &list); RET_UPDATE(result, r); for (s = sources ; s != NULL ; s = s->next) { for (v = &s->version ; v != NULL ; v = v->next) { if (v->used) continue; printf("source-without-binaries %s %s %s\n", d->codename, s->name, v->version); } } free_source_info(list); free_source_info(sources); } return result; } reprepro-4.13.1/mprintf.h0000644000175100017510000000047112152651661012220 00000000000000#ifndef REPREPRO_MPRINTF #define REPREPRO_MPRINTF #include /* This is just a asprintf-wrapper to be more easily used * and alwasy returns NULL on error */ /*@null@*/char * mprintf(const char *, ...) __attribute__ ((format (printf, 1, 2))); /*@null@*/char * vmprintf(const char *, va_list); #endif reprepro-4.13.1/sources.c0000644000175100017510000004602612152651661012225 00000000000000/* This file is part of "reprepro" * Copyright (C) 2003,2004,2005,2006,2007,2008,2009,2010 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include "error.h" #include "mprintf.h" #include "strlist.h" #include "chunks.h" #include "sources.h" #include "names.h" #include "dirs.h" #include "dpkgversions.h" #include "override.h" #include "tracking.h" #include "signature.h" /* split a " " into md5sum and filename */ static retvalue calc_parsefileline(const char *fileline, /*@out@*/char **filename) { const char *p, *fn, *fnend; char *filen; assert (fileline != NULL); if (*fileline == '\0') return RET_NOTHING; /* the md5sums begins after the (perhaps) heading spaces ... */ p = fileline; while (*p != '\0' && (*p == ' ' || *p == '\t')) p++; if (*p == '\0') return RET_NOTHING; /* ... and ends with the following spaces. */ while (*p != '\0' && !(*p == ' ' || *p == '\t')) p++; if (*p == '\0') { fprintf(stderr, "Expecting more data after md5sum!\n"); return RET_ERROR; } /* Then the size of the file is expected: */ while ((*p == ' ' || *p == '\t')) p++; while (*p !='\0' && !(*p == ' ' || *p == '\t')) p++; if (*p == '\0') { fprintf(stderr, "Expecting more data after size!\n"); return RET_ERROR; } /* Then the filename */ fn = p; while ((*fn == ' ' || *fn == '\t')) fn++; fnend = fn; while (*fnend != '\0' && !(*fnend == ' ' || *fnend == '\t')) fnend++; filen = strndup(fn, fnend-fn); if (FAILEDTOALLOC(filen)) return RET_ERROR_OOM; *filename = filen; return RET_OK; } static retvalue getBasenames(const struct strlist *filelines, /*@out@*/struct strlist *basenames) { int i; retvalue r; assert (filelines != NULL && basenames != NULL); r = strlist_init_n(filelines->count, basenames); if (RET_WAS_ERROR(r)) return r; r = RET_NOTHING; for (i = 0 ; i < filelines->count ; i++) { char *basefilename; const char *fileline = filelines->values[i]; r = calc_parsefileline(fileline, &basefilename); if (r == RET_NOTHING) { fprintf(stderr, "Malformed Files: line '%s'!\n", fileline); r = RET_ERROR; } if (RET_WAS_ERROR(r)) break; r = strlist_add(basenames, basefilename); if (RET_WAS_ERROR(r)) { break; } r = RET_OK; } if (RET_WAS_ERROR(r)) { strlist_done(basenames); } else { assert (filelines->count == basenames->count); } return r; } retvalue sources_getversion(const char *control, char **version) { retvalue r; r = chunk_getvalue(control, "Version", version); if (RET_WAS_ERROR(r)) return r; if (r == RET_NOTHING) { fprintf(stderr, "Missing 'Version' field in chunk:'%s'\n", control); return RET_ERROR; } return r; } retvalue sources_getarchitecture(UNUSED(const char *chunk), architecture_t *architecture_p) { *architecture_p = architecture_source; return RET_OK; } retvalue sources_getinstalldata(const struct target *t, const char *packagename, UNUSED(const char *version), architecture_t architecture, const char *chunk, char **control, struct strlist *filekeys, struct checksumsarray *origfiles) { retvalue r; char *origdirectory, *directory, *mychunk; struct strlist myfilekeys; struct strlist filelines[cs_hashCOUNT]; struct checksumsarray files; enum checksumtype cs; bool gothash = false; assert (architecture == architecture_source); for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) { assert (source_checksum_names[cs] != NULL); r = chunk_getextralinelist(chunk, source_checksum_names[cs], &filelines[cs]); if (r == RET_NOTHING) strlist_init(&filelines[cs]); else if (RET_WAS_ERROR(r)) { while (cs-- > cs_md5sum) { strlist_done(&filelines[cs]); } return r; } else gothash = true; } if (!gothash) { fprintf(stderr, "Missing 'Files' (or 'SHA1' or ...) entry in '%s'!\n", chunk); for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) strlist_done(&filelines[cs]); return RET_ERROR; } r = checksumsarray_parse(&files, filelines, packagename); for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) { strlist_done(&filelines[cs]); } if (RET_WAS_ERROR(r)) return r; r = chunk_getvalue(chunk, "Directory", &origdirectory); if (r == RET_NOTHING) { /* Flat repositories can come without this, TODO: add warnings in other cases fprintf(stderr, "Missing 'Directory' entry in '%s'!\n", chunk); r = RET_ERROR; */ origdirectory = strdup("."); if (FAILEDTOALLOC(origdirectory)) r = RET_ERROR_OOM; } if (RET_WAS_ERROR(r)) { checksumsarray_done(&files); return r; } r = propersourcename(packagename); assert (r != RET_NOTHING); if (RET_IS_OK(r)) r = properfilenames(&files.names); if (RET_WAS_ERROR(r)) { fprintf(stderr, "Forbidden characters in source package '%s'!\n", packagename); free(origdirectory); checksumsarray_done(&files); return r; } directory = calc_sourcedir(t->component, packagename); if (FAILEDTOALLOC(directory)) r = RET_ERROR_OOM; else r = calc_dirconcats(directory, &files.names, &myfilekeys); if (RET_WAS_ERROR(r)) { free(directory); free(origdirectory); checksumsarray_done(&files); return r; } r = calc_inplacedirconcats(origdirectory, &files.names); free(origdirectory); if (!RET_WAS_ERROR(r)) { char *n; n = chunk_normalize(chunk, "Package", packagename); if (FAILEDTOALLOC(n)) mychunk = NULL; else mychunk = chunk_replacefield(n, "Directory", directory, true); free(n); if (FAILEDTOALLOC(mychunk)) r = RET_ERROR_OOM; } free(directory); if (RET_WAS_ERROR(r)) { strlist_done(&myfilekeys); checksumsarray_done(&files); return r; } *control = mychunk; strlist_move(filekeys, &myfilekeys); checksumsarray_move(origfiles, &files); return RET_OK; } retvalue sources_getfilekeys(const char *chunk, struct strlist *filekeys) { char *origdirectory; struct strlist basenames; retvalue r; struct strlist filelines; /* Read the directory given there */ r = chunk_getvalue(chunk, "Directory", &origdirectory); if (r == RET_NOTHING) { //TODO: check if it is even text and do not print //of looking binary?? fprintf(stderr, "Does not look like source control: '%s'\n", chunk); return RET_ERROR; } if (RET_WAS_ERROR(r)) return r; r = chunk_getextralinelist(chunk, "Files", &filelines); if (r == RET_NOTHING) { //TODO: check if it is even text and do not print //of looking binary?? fprintf(stderr, "Does not look like source control: '%s'\n", chunk); r = RET_ERROR; } if (RET_WAS_ERROR(r)) { free(origdirectory); return r; } r = getBasenames(&filelines, &basenames); strlist_done(&filelines); if (RET_WAS_ERROR(r)) { free(origdirectory); return r; } r = calc_dirconcats(origdirectory, &basenames, filekeys); free(origdirectory); strlist_done(&basenames); return r; } retvalue sources_getchecksums(const char *chunk, struct checksumsarray *out) { char *origdirectory; struct checksumsarray a; retvalue r; struct strlist filelines[cs_hashCOUNT]; enum checksumtype cs; /* Read the directory given there */ r = chunk_getvalue(chunk, "Directory", &origdirectory); if (!RET_IS_OK(r)) return r; for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) { assert (source_checksum_names[cs] != NULL); r = chunk_getextralinelist(chunk, source_checksum_names[cs], &filelines[cs]); if (r == RET_NOTHING) { if (cs == cs_md5sum) { fprintf(stderr, "Missing 'Files' entry in '%s'!\n", chunk); r = RET_ERROR; } else strlist_init(&filelines[cs]); } if (RET_WAS_ERROR(r)) { while (cs-- > cs_md5sum) { strlist_done(&filelines[cs]); } free(origdirectory); return r; } } r = checksumsarray_parse(&a, filelines, "source chunk"); for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) { strlist_done(&filelines[cs]); } if (RET_WAS_ERROR(r)) { free(origdirectory); return r; } r = calc_inplacedirconcats(origdirectory, &a.names); free(origdirectory); if (RET_WAS_ERROR(r)) { checksumsarray_done(&a); return r; } checksumsarray_move(out, &a); return RET_OK; } retvalue sources_doreoverride(const struct target *target, const char *packagename, const char *controlchunk, /*@out@*/char **newcontrolchunk) { const struct overridedata *o; struct fieldtoadd *fields; char *newchunk; retvalue r; if (interrupted()) return RET_ERROR_INTERRUPTED; o = override_search(target->distribution->overrides.dsc, packagename); if (o == NULL) return RET_NOTHING; r = override_allreplacefields(o, &fields); if (!RET_IS_OK(r)) return r; newchunk = chunk_replacefields(controlchunk, fields, "Directory", true); addfield_free(fields); if (FAILEDTOALLOC(newchunk)) return RET_ERROR_OOM; *newcontrolchunk = newchunk; return RET_OK; } retvalue sources_retrack(const char *sourcename, const char *chunk, trackingdb tracks) { retvalue r; char *sourceversion; struct trackedpackage *pkg; struct strlist filekeys; int i; //TODO: elliminate duplicate code! assert(sourcename!=NULL); if (interrupted()) return RET_ERROR_INTERRUPTED; r = chunk_getvalue(chunk, "Version", &sourceversion); if (r == RET_NOTHING) { fprintf(stderr, "Missing 'Version' field in chunk:'%s'\n", chunk); r = RET_ERROR; } if (RET_WAS_ERROR(r)) { return r; } r = sources_getfilekeys(chunk, &filekeys); if (r == RET_NOTHING) { fprintf(stderr, "Malformed source control:'%s'\n", chunk); r = RET_ERROR; } if (RET_WAS_ERROR(r)) { free(sourceversion); return r; } r = tracking_getornew(tracks, sourcename, sourceversion, &pkg); free(sourceversion); if (RET_WAS_ERROR(r)) { strlist_done(&filekeys); return r; } // TODO: error handling is suboptimal here. // is there a way to again remove old additions (esp. references) // where something fails? for (i = 0 ; i < filekeys.count ; i++) { r = trackedpackage_addfilekey(tracks, pkg, ft_SOURCE, filekeys.values[i], true); filekeys.values[i] = NULL; if (RET_WAS_ERROR(r)) { strlist_done(&filekeys); trackedpackage_free(pkg); return r; } } strlist_done(&filekeys); return tracking_save(tracks, pkg); } retvalue sources_getsourceandversion(const char *chunk, const char *packagename, char **source, char **version) { retvalue r; char *sourceversion; char *sourcename; //TODO: elliminate duplicate code! assert(packagename!=NULL); r = chunk_getvalue(chunk, "Version", &sourceversion); if (r == RET_NOTHING) { fprintf(stderr, "Missing 'Version' field in chunk:'%s'\n", chunk); r = RET_ERROR; } if (RET_WAS_ERROR(r)) { return r; } sourcename = strdup(packagename); if (FAILEDTOALLOC(sourcename)) { free(sourceversion); return RET_ERROR_OOM; } *source = sourcename; *version = sourceversion; return RET_OK; } /****************************************************************/ static inline retvalue getvalue(const char *filename, const char *chunk, const char *field, char **value) { retvalue r; r = chunk_getvalue(chunk, field, value); if (r == RET_NOTHING) { fprintf(stderr, "Missing '%s' field in %s!\n", field, filename); r = RET_ERROR; } return r; } static inline retvalue checkvalue(const char *filename, const char *chunk, const char *field) { retvalue r; r = chunk_checkfield(chunk, field); if (r == RET_NOTHING) { fprintf(stderr, "Cannot find '%s' field in %s!\n", field, filename); r = RET_ERROR; } return r; } static inline retvalue getvalue_n(const char *chunk, const char *field, char **value) { retvalue r; r = chunk_getvalue(chunk, field, value); if (r == RET_NOTHING) { *value = NULL; } return r; } retvalue sources_readdsc(struct dsc_headers *dsc, const char *filename, const char *filenametoshow, bool *broken) { retvalue r; struct strlist filelines[cs_hashCOUNT]; enum checksumtype cs; r = signature_readsignedchunk(filename, filenametoshow, &dsc->control, NULL, broken); if (RET_WAS_ERROR(r)) { return r; } if (verbose > 100) { fprintf(stderr, "Extracted control chunk from '%s': '%s'\n", filenametoshow, dsc->control); } /* first look for fields that should be there */ r = chunk_getname(dsc->control, "Source", &dsc->name, false); if (r == RET_NOTHING) { fprintf(stderr, "Missing 'Source' field in %s!\n", filenametoshow); return RET_ERROR; } if (RET_WAS_ERROR(r)) return r; /* This is needed and cannot be ignored unless * sources_complete is changed to not need it */ r = checkvalue(filenametoshow, dsc->control, "Format"); if (RET_WAS_ERROR(r)) return r; r = checkvalue(filenametoshow, dsc->control, "Maintainer"); if (RET_WAS_ERROR(r)) return r; r = getvalue(filenametoshow, dsc->control, "Version", &dsc->version); if (RET_WAS_ERROR(r)) return r; r = getvalue_n(dsc->control, SECTION_FIELDNAME, &dsc->section); if (RET_WAS_ERROR(r)) return r; r = getvalue_n(dsc->control, PRIORITY_FIELDNAME, &dsc->priority); if (RET_WAS_ERROR(r)) return r; for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) { assert (source_checksum_names[cs] != NULL); r = chunk_getextralinelist(dsc->control, source_checksum_names[cs], &filelines[cs]); if (r == RET_NOTHING) { if (cs == cs_md5sum) { fprintf(stderr, "Missing 'Files' field in '%s'!\n", filenametoshow); r = RET_ERROR; } else strlist_init(&filelines[cs]); } if (RET_WAS_ERROR(r)) { while (cs-- > cs_md5sum) { strlist_done(&filelines[cs]); } return r; } } r = checksumsarray_parse(&dsc->files, filelines, filenametoshow); for (cs = cs_md5sum ; cs < cs_hashCOUNT ; cs++) { strlist_done(&filelines[cs]); } return r; } void sources_done(struct dsc_headers *dsc) { free(dsc->name); free(dsc->version); free(dsc->control); checksumsarray_done(&dsc->files); free(dsc->section); free(dsc->priority); } retvalue sources_complete(const struct dsc_headers *dsc, const char *directory, const struct overridedata *override, const char *section, const char *priority, char **newcontrol) { retvalue r; struct fieldtoadd *replace; char *newchunk, *newchunk2; char *newfilelines, *newsha1lines, *newsha256lines; assert(section != NULL && priority != NULL); newchunk2 = chunk_normalize(dsc->control, "Package", dsc->name); if (FAILEDTOALLOC(newchunk2)) return RET_ERROR_OOM; r = checksumsarray_genfilelist(&dsc->files, &newfilelines, &newsha1lines, &newsha256lines); if (RET_WAS_ERROR(r)) { free(newchunk2); return r; } assert (newfilelines != NULL); replace = aodfield_new("Checksums-Sha256", newsha256lines, NULL); if (!FAILEDTOALLOC(replace)) replace = aodfield_new("Checksums-Sha1", newsha1lines, replace); if (!FAILEDTOALLOC(replace)) replace = deletefield_new("Source", replace); if (!FAILEDTOALLOC(replace)) replace = addfield_new("Files", newfilelines, replace); if (!FAILEDTOALLOC(replace)) replace = addfield_new("Directory", directory, replace); if (!FAILEDTOALLOC(replace)) replace = deletefield_new("Status", replace); if (!FAILEDTOALLOC(replace)) replace = addfield_new(SECTION_FIELDNAME, section, replace); if (!FAILEDTOALLOC(replace)) replace = addfield_new(PRIORITY_FIELDNAME, priority, replace); if (!FAILEDTOALLOC(replace)) replace = override_addreplacefields(override, replace); if (FAILEDTOALLOC(replace)) { free(newsha256lines); free(newsha1lines); free(newfilelines); free(newchunk2); return RET_ERROR_OOM; } newchunk = chunk_replacefields(newchunk2, replace, "Files", true); free(newsha256lines); free(newsha1lines); free(newfilelines); free(newchunk2); addfield_free(replace); if (FAILEDTOALLOC(newchunk)) { return RET_ERROR_OOM; } *newcontrol = newchunk; return RET_OK; } /* update Checksums */ retvalue sources_complete_checksums(const char *chunk, const struct strlist *filekeys, struct checksums **c, char **out) { struct fieldtoadd *replace; char *newchunk; char *newfilelines, *newsha1lines, *newsha256lines; struct checksumsarray checksums; retvalue r; int i; /* fake a checksumarray... */ checksums.checksums = c; checksums.names.count = filekeys->count; checksums.names.values = nzNEW(filekeys->count, char *); if (FAILEDTOALLOC(checksums.names.values)) return RET_ERROR_OOM; for (i = 0 ; i < filekeys->count ; i++) { checksums.names.values[i] = (char*) dirs_basename(filekeys->values[i]); } r = checksumsarray_genfilelist(&checksums, &newfilelines, &newsha1lines, &newsha256lines); free(checksums.names.values); if (RET_WAS_ERROR(r)) return r; assert (newfilelines != NULL); replace = aodfield_new("Checksums-Sha256", newsha256lines, NULL); if (!FAILEDTOALLOC(replace)) replace = aodfield_new("Checksums-Sha1", newsha1lines, replace); if (!FAILEDTOALLOC(replace)) replace = addfield_new("Files", newfilelines, replace); if (FAILEDTOALLOC(replace)) { free(newsha256lines); free(newsha1lines); free(newfilelines); return RET_ERROR_OOM; } newchunk = chunk_replacefields(chunk, replace, "Files", true); free(newsha256lines); free(newsha1lines); free(newfilelines); addfield_free(replace); if (FAILEDTOALLOC(newchunk)) return RET_ERROR_OOM; *out = newchunk; return RET_OK; } char *calc_source_basename(const char *name, const char *version) { const char *v = strchr(version, ':'); if (v != NULL) v++; else v = version; return mprintf("%s_%s.dsc", name, v); } char *calc_sourcedir(component_t component, const char *sourcename) { assert (*sourcename != '\0'); if (sourcename[0] == 'l' && sourcename[1] == 'i' && sourcename[2] == 'b' && sourcename[3] != '\0') return mprintf("pool/%s/lib%c/%s", atoms_components[component], sourcename[3], sourcename); else if (*sourcename != '\0') return mprintf("pool/%s/%c/%s", atoms_components[component], sourcename[0], sourcename); else return NULL; } char *calc_filekey(component_t component, const char *sourcename, const char *filename) { if (sourcename[0] == 'l' && sourcename[1] == 'i' && sourcename[2] == 'b' && sourcename[3] != '\0') return mprintf("pool/%s/lib%c/%s/%s", atoms_components[component], sourcename[3], sourcename, filename); else if (*sourcename != '\0') return mprintf("pool/%s/%c/%s/%s", atoms_components[component], sourcename[0], sourcename, filename); else return NULL; } char *calc_byhanddir(component_t component, const char *sourcename, const char *version) { if (sourcename[0] == 'l' && sourcename[1] == 'i' && sourcename[2] == 'b' && sourcename[3] != '\0') return mprintf("pool/%s/lib%c/%s/%s_%s_byhand", atoms_components[component], sourcename[3], sourcename, sourcename, version); else if (*sourcename != '\0') return mprintf("pool/%s/%c/%s/%s_%s_byhand", atoms_components[component], sourcename[0], sourcename, sourcename, version); else return NULL; } reprepro-4.13.1/ignore.h0000644000175100017510000000267212152651661012031 00000000000000#ifndef REPREPRO_FORCE_H #define REPREPRO_FORCE_H #ifndef REPREPRO_ERROR_H #include "error.h" #endif #define VALID_IGNORES \ IGN(ignore) \ IGN(forbiddenchar) \ IGN(8bit) \ IGN(emptyfilenamepart) \ IGN(spaceonlyline) \ IGN(malformedchunk) \ IGN(unknownfield) \ IGN(wrongdistribution) \ IGN(missingfield) \ IGN(brokenold) \ IGN(brokenversioncmp) \ IGN(extension) \ IGN(unusedarch) \ IGN(surprisingarch) \ IGN(surprisingbinary) \ IGN(wrongsourceversion) \ IGN(wrongversion) \ IGN(dscinbinnmu) \ IGN(brokensignatures) \ IGN(uploaders) \ IGN(undefinedtarget) \ IGN(undefinedtracking) \ IGN(unusedoption) \ IGN(flatandnonflat) \ IGN(expiredkey) \ IGN(revokedkey) \ IGN(expiredsignature) \ IGN(wrongarchitecture) \ IGN(oldfile) \ IGN(longkeyid) \ IGN(missingfile) enum ignore { #define IGN(what) IGN_ ## what, VALID_IGNORES #undef IGN IGN_COUNT }; extern int ignored[IGN_COUNT]; extern bool ignore[IGN_COUNT]; /* Having that as function avoids those strings to be duplacated everywhere */ bool print_ignore_type_message(bool, enum ignore); #define IGNORING__(ignoring, what, ...) ({ \ fprintf(stderr, ## __VA_ARGS__); \ print_ignore_type_message(ignoring, IGN_ ## what ); \ }) #define IGNORING(what, ...) IGNORING__(true, what, __VA_ARGS__) #define IGNORING_(what, ...) IGNORING__(false, what, __VA_ARGS__) #define IGNORABLE(what) ignore[IGN_ ## what] retvalue set_ignore(const char *, bool, enum config_option_owner); #endif reprepro-4.13.1/signature_check.c0000644000175100017510000006320212152655314013672 00000000000000/* This file is part of "reprepro" * Copyright (C) 2003,2004,2005,2006,2007,2009,2012 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include "signature_p.h" #include "ignore.h" #include "chunks.h" #include "readtextfile.h" #ifdef HAVE_LIBGPGME static retvalue parse_condition_part(bool *allow_subkeys_p, bool *allow_bad_p, const char *full_condition, const char **condition_p, /*@out@*/ char **next_key_p) { const char *key = *condition_p, *p; char *next_key, *q; size_t kl; *allow_bad_p = false; *allow_subkeys_p = false; while (*key != '\0' && xisspace(*key)) key++; if (*key == '\0') { fprintf(stderr, "Error: unexpected end of VerifyRelease condition '%s'!\n", full_condition); return RET_ERROR; } p = key; while ((*p >= 'A' && *p <= 'F') || (*p >= 'a' && *p <= 'f') || (*p >= '0' && *p <= '9')) p++; if (*p != '\0' && !xisspace(*p) && *p != '|' && *p != '!' && *p != '+') { fprintf(stderr, "Error: Unexpected character 0x%02hhx='%c' in VerifyRelease condition '%s'!\n", *p, *p, full_condition); return RET_ERROR; } kl = p - key; if (kl < 8) { fprintf(stderr, "Error: Too short key id '%.*s' in VerifyRelease condition '%s'!\n", (int)kl, key, full_condition); return RET_ERROR; } next_key = strndup(key, kl); if (FAILEDTOALLOC(next_key)) return RET_ERROR_OOM; key = p; for (q = next_key ; *q != '\0' ; q++) { if (*q >= 'a' && *q <= 'f') *q -= 'a' - 'A'; } while (*key != '\0' && xisspace(*key)) key++; if (*key == '!') { *allow_bad_p = true; key++; } while (*key != '\0' && xisspace(*key)) key++; if (*key == '+') { *allow_subkeys_p = true; key++; } while (*key != '\0' && xisspace(*key)) key++; if ((*key >= 'A' && *key <= 'F') || (*key >= 'a' && *key <= 'f') || (*key >= '0' && *key <= '9')) { free(next_key); fprintf(stderr, "Error: Space separated key-ids in VerifyRelease condition '%s'!\n" "(Alternate keys can be separated with '|'. Do not put spaces in key-ids.)\n", full_condition); return RET_ERROR; } if (*key != '\0' && *key != '|') { free(next_key); fprintf(stderr, "Error: Unexpected character 0x%02hhx='%c' in VerifyRelease condition '%s'!\n", *key, *key, full_condition); return RET_ERROR; } if (*key == '|') key++; *next_key_p = next_key; *condition_p = key; return RET_OK; } static struct known_key { struct known_key *next; /* subkeys, first is primary key */ int count; struct known_subkey { /* full fingerprint or keyid */ char *name; unsigned int name_len; /* true if revoked */ bool revoked; /* true if expired */ bool expired; /* false if invalid or cannot sign */ bool cansign; } subkeys[]; } *known_keys = NULL; struct requested_key { /* pointer to the key in question */ const struct known_key *key; /* which of those keys are requested, -1 for any (i.e. allow subkeys) */ int subkey; /* allow some problems, if requested by the user */ bool allow_bad; }; static retvalue found_key(struct known_key *k, int i, bool allow_subkeys, bool allow_bad, const char *full_condition, const struct known_key **key_found, int *index_found) { if (!allow_bad && k->subkeys[i].revoked) { fprintf(stderr, "VerifyRelease condition '%s' lists revoked key '%s'.\n" "(To use it anyway, append it with a '!' to force usage).\n", full_condition, k->subkeys[i].name); return RET_ERROR; } if (!allow_bad && k->subkeys[i].expired) { fprintf(stderr, "VerifyRelease condition '%s' lists expired key '%s'.\n" "(To use it anyway, append it with a '!' to force usage).\n", full_condition, k->subkeys[i].name); return RET_ERROR; } if (!allow_bad && !k->subkeys[i].cansign) { fprintf(stderr, "VerifyRelease condition '%s' lists non-signing key '%s'.\n" "(To use it anyway, append it with a '!' to force usage).\n", full_condition, k->subkeys[i].name); return RET_ERROR; } if (allow_subkeys) { if (i != 0) { fprintf(stderr, "VerifyRelease condition '%s' lists non-primary key '%s' with '+'.\n", full_condition, k->subkeys[i].name); return RET_ERROR; } *index_found = -1; } else *index_found = i; *key_found = k; return RET_OK; } /* name must already be upper-case */ static retvalue load_key(const char *name, bool allow_subkeys, bool allow_bad, const char *full_condition, const struct known_key **key_found, int *index_found) { gpg_error_t err; gpgme_key_t gpgme_key = NULL; gpgme_subkey_t subkey; int found = -1; struct known_key *k; int i; size_t l = strlen(name); /* first look if this key was already retrieved: */ for (k = known_keys ; k != NULL ; k = k->next) { for(i = 0 ; i < k->count ; i++) { struct known_subkey *s = &k->subkeys[i]; if (s->name_len < l) continue; if (memcmp(name, s->name + (s->name_len - l), l) != 0) continue; return found_key(k, i, allow_subkeys, allow_bad, full_condition, key_found, index_found); } } /* If not yet found, request it: */ err = gpgme_get_key(context, name, &gpgme_key, 0); if ((gpg_err_code(err) == GPG_ERR_EOF) && gpgme_key == NULL) { fprintf(stderr, "Error: unknown key '%s'!\n", name); return RET_ERROR_MISSING; } if (err != 0) { fprintf(stderr, "gpgme error %s:%d retrieving key '%s': %s\n", gpg_strsource(err), (int)gpg_err_code(err), name, gpg_strerror(err)); if (gpg_err_code(err) == GPG_ERR_ENOMEM) return RET_ERROR_OOM; else return RET_ERROR_GPGME; } i = 0; subkey = gpgme_key->subkeys; while (subkey != NULL) { subkey = subkey->next; i++; } k = calloc(1, sizeof(struct known_key) + i * sizeof(struct known_subkey)); if (FAILEDTOALLOC(k)) { gpgme_key_unref(gpgme_key); return RET_ERROR_OOM; } k->count = i; k->next = known_keys; known_keys = k; subkey = gpgme_key->subkeys; for (i = 0 ; i < k->count ; i++ , subkey = subkey->next) { struct known_subkey *s = &k->subkeys[i]; assert (subkey != NULL); s->revoked = subkey->revoked; s->expired = subkey->expired; s->cansign = subkey->can_sign && !subkey->invalid; s->name = strdup(subkey->keyid); if (FAILEDTOALLOC(s->name)) { gpgme_key_unref(gpgme_key); return RET_ERROR_OOM; } for (char *p = s->name ; *p != '\0' ; p++) { if (*p >= 'a' && *p <= 'z') *p -= 'a'-'A'; } s->name_len = strlen(s->name); if (memcmp(name, s->name + (s->name_len - l), l) == 0) found = i; } assert (subkey == NULL); gpgme_key_unref(gpgme_key); if (found < 0) { fprintf(stderr, "Error: not a valid key id '%s'!\n" "Use hex-igits from the end of the key as identifier\n", name); return RET_ERROR; } return found_key(k, found, allow_subkeys, allow_bad, full_condition, key_found, index_found); } static void free_known_key(/*@only@*/struct known_key *k) { int i; for (i = 0 ; i < k->count ; i++) { free(k->subkeys[i].name); } free(k); } void free_known_keys(void) { while (known_keys != NULL) { struct known_key *k = known_keys; known_keys = k->next; free_known_key(k); } known_keys = NULL; } /* This checks a Release.gpg/Release file pair. requirements is a list of * requirements. (as this Release file can be requested by multiple update * rules, there can be multiple requirements for one file) */ struct signature_requirement { /* next condition */ struct signature_requirement *next; /* the original desription for error messages */ char *condition; /* an array of or-connected conditions */ size_t num_keys; struct requested_key keys[]; }; #define sizeof_requirement(n) (sizeof(struct signature_requirement) + (n) * sizeof(struct requested_key)) void signature_requirements_free(struct signature_requirement *list) { while (list != NULL) { struct signature_requirement *p = list; list = p->next; free(p->condition); free(p); } } static bool key_good(const struct requested_key *req, const gpgme_signature_t signatures) { const struct known_key *k = req->key; gpgme_signature_t sig; for (sig = signatures ; sig != NULL ; sig = sig->next) { const char *fpr = sig->fpr; size_t l = strlen(sig->fpr); int i; /* while gpg reports the subkey of an key that is expired to be expired to, it does not tell this in the signature, so we use this here... */ bool key_expired = false; if (req->subkey < 0) { /* any subkey is allowed */ for(i = 0 ; i < k->count ; i++) { const struct known_subkey *s = &k->subkeys[i]; if (s->name_len > l) continue; if (memcmp(s->name, fpr + (l - s->name_len), s->name_len) != 0) continue; key_expired = k->subkeys[i].expired; break; } if (i >= k->count) continue; } else { const struct known_subkey *s; assert (req->subkey < k->count); s = &k->subkeys[req->subkey]; if (memcmp(s->name, fpr + (l - s->name_len), s->name_len) != 0) continue; key_expired = k->subkeys[req->subkey].expired; } /* only accept perfectly good signatures and silently ignore everything else. Those are warned about or even accepted in the run with key_good_enough */ if (gpg_err_code(sig->status) == GPG_ERR_NO_ERROR && !key_expired) return true; /* we have to continue otherwise, as another subkey might still follow */ continue; } /* no valid signature with this key found */ return false; } static bool key_good_enough(const struct requested_key *req, const gpgme_signature_t signatures, const char *releasegpg, const char *release) { const struct known_key *k = req->key; gpgme_signature_t sig; for (sig = signatures ; sig != NULL ; sig = sig->next) { const char *fpr = sig->fpr; size_t l = strlen(sig->fpr); int i; bool key_expired = false; /* dito */ if (req->subkey < 0) { /* any subkey is allowed */ for(i = 0 ; i < k->count ; i++) { const struct known_subkey *s = &k->subkeys[i]; if (s->name_len > l) continue; if (memcmp(s->name, fpr + (l - s->name_len), s->name_len) != 0) continue; key_expired = k->subkeys[i].expired; break; } if (i >= k->count) continue; } else { const struct known_subkey *s; assert (req->subkey < k->count); s = &k->subkeys[req->subkey]; if (memcmp(s->name, fpr + (l - s->name_len), s->name_len) != 0) continue; key_expired = k->subkeys[req->subkey].expired; } /* this key we look for. if it is acceptable, we are finished. if it is not acceptable, we still have to look at the other signatures, as a signature with another subkey is following */ switch (gpg_err_code(sig->status)) { case GPG_ERR_NO_ERROR: if (! key_expired) return true; if (req->allow_bad && IGNORABLE(expiredkey)) { if (verbose >= 0) fprintf(stderr, "WARNING: valid signature in '%s' with parent-expired '%s' is accepted as requested!\n", releasegpg, fpr); return true; } fprintf(stderr, "Not accepting valid signature in '%s' with parent-EXPIRED '%s'\n", releasegpg, fpr); if (verbose >= 0) fprintf(stderr, "(To ignore it append a ! to the key and run reprepro with --ignore=expiredkey)\n"); /* not accepted */ continue; case GPG_ERR_KEY_EXPIRED: if (req->allow_bad && IGNORABLE(expiredkey)) { if (verbose >= 0) fprintf(stderr, "WARNING: valid signature in '%s' with expired '%s' is accepted as requested!\n", releasegpg, fpr); return true; } fprintf(stderr, "Not accepting valid signature in '%s' with EXPIRED '%s'\n", releasegpg, fpr); if (verbose >= 0) fprintf(stderr, "(To ignore it append a ! to the key and run reprepro with --ignore=expiredkey)\n"); /* not accepted */ continue; case GPG_ERR_CERT_REVOKED: if (req->allow_bad && IGNORABLE(revokedkey)) { if (verbose >= 0) fprintf(stderr, "WARNING: valid signature in '%s' with revoked '%s' is accepted as requested!\n", releasegpg, fpr); return RET_OK; } fprintf(stderr, "Not accepting valid signature in '%s' with REVOKED '%s'\n", releasegpg, fpr); if (verbose >= 0) fprintf(stderr, "(To ignore it append a ! to the key and run reprepro with --ignore=revokedkey)\n"); /* not accepted */ continue; case GPG_ERR_SIG_EXPIRED: if (req->allow_bad && IGNORABLE(expiredsignature)) { if (verbose >= 0) fprintf(stderr, "WARNING: valid but expired signature in '%s' with '%s' is accepted as requested!\n", releasegpg, fpr); return RET_OK; } fprintf(stderr, "Not accepting valid but EXPIRED signature in '%s' with '%s'\n", releasegpg, fpr); if (verbose >= 0) fprintf(stderr, "(To ignore it append a ! to the key and run reprepro with --ignore=expiredsignature)\n"); /* not accepted */ continue; case GPG_ERR_BAD_SIGNATURE: case GPG_ERR_NO_PUBKEY: /* not accepted */ continue; case GPG_ERR_GENERAL: if (release == NULL) fprintf(stderr, "gpgme returned an general error verifing signature with '%s' in '%s'!\n" "Try running gpg --verify '%s' manually for hints what is happening.\n" "If this does not print any errors, retry the command causing this message.\n", fpr, releasegpg, releasegpg); else fprintf(stderr, "gpgme returned an general error verifing signature with '%s' in '%s'!\n" "Try running gpg --verify '%s' '%s' manually for hints what is happening.\n" "If this does not print any errors, retry the command causing this message.\n", fpr, releasegpg, releasegpg, release); continue; /* there sadly no more is a way to make sure we have * all possible ones handled */ default: break; } fprintf(stderr, "Error checking signature (gpgme returned unexpected value %d)!\n" "Please file a bug report, so reprepro can handle this in the future.\n", gpg_err_code(sig->status)); return false; } return false; } retvalue signature_requirement_add(struct signature_requirement **list_p, const char *condition) { struct signature_requirement *req; const char *full_condition = condition; retvalue r; r = signature_init(false); if (RET_WAS_ERROR(r)) return r; if (condition == NULL || strcmp(condition, "blindtrust") == 0) return RET_NOTHING; /* no need to add the same condition multiple times */ for (req = *list_p ; req != NULL ; req = req->next) { if (strcmp(req->condition, condition) == 0) return RET_NOTHING; } req = malloc(sizeof_requirement(1)); if (FAILEDTOALLOC(req)) return RET_ERROR_OOM; req->next = NULL; req->condition = strdup(condition); if (FAILEDTOALLOC(req->condition)) { free(req); return RET_ERROR_OOM; } req->num_keys = 0; do { bool allow_subkeys, allow_bad; char *next_key; r = parse_condition_part(&allow_subkeys, &allow_bad, full_condition, &condition, &next_key); ASSERT_NOT_NOTHING(r); if (RET_WAS_ERROR(r)) { signature_requirements_free(req); return r; } req->keys[req->num_keys].allow_bad = allow_bad; r = load_key(next_key, allow_subkeys, allow_bad, full_condition, &req->keys[req->num_keys].key, &req->keys[req->num_keys].subkey); free(next_key); if (RET_WAS_ERROR(r)) { signature_requirements_free(req); return r; } req->num_keys++; if (*condition != '\0') { struct signature_requirement *h; h = realloc(req, sizeof_requirement(req->num_keys+1)); if (FAILEDTOALLOC(h)) { signature_requirements_free(req); return r; } req = h; } else break; } while (true); req->next = *list_p; *list_p = req; return RET_OK; } static void print_signatures(FILE *f, gpgme_signature_t s, const char *releasegpg) { char timebuffer[20]; struct tm *tm; time_t t; if (s == NULL) { fprintf(f, "gpgme reported no signatures in '%s':\n" "Either there are really none or something else is strange.\n" "One known reason for this effect is forgeting -b when signing.\n", releasegpg); return; } fprintf(f, "Signatures in '%s':\n", releasegpg); for (; s != NULL ; s = s->next) { t = s->timestamp; tm = localtime(&t); strftime(timebuffer, 19, "%Y-%m-%d", tm); fprintf(f, "'%s' (signed %s): ", s->fpr, timebuffer); switch (gpg_err_code(s->status)) { case GPG_ERR_NO_ERROR: fprintf(f, "valid\n"); continue; case GPG_ERR_KEY_EXPIRED: fprintf(f, "expired key\n"); continue; case GPG_ERR_CERT_REVOKED: fprintf(f, "key revoced\n"); continue; case GPG_ERR_SIG_EXPIRED: t = s->exp_timestamp; tm = localtime(&t); strftime(timebuffer, 19, "%Y-%m-%d", tm); fprintf(f, "expired signature (since %s)\n", timebuffer); continue; case GPG_ERR_BAD_SIGNATURE: fprintf(f, "bad signature\n"); continue; case GPG_ERR_NO_PUBKEY: fprintf(f, "missing pubkey\n"); continue; default: fprintf(f, "unknown\n"); continue; } } } static inline retvalue verify_signature(const struct signature_requirement *requirements, const char *releasegpg, const char *releasename) { gpgme_verify_result_t result; int i; const struct signature_requirement *req; result = gpgme_op_verify_result(context); if (result == NULL) { fprintf(stderr, "Internal error communicating with libgpgme: no result record!\n\n"); return RET_ERROR_GPGME; } for (req = requirements ; req != NULL ; req = req->next) { bool fullfilled = false; /* check first for good signatures, and then for good enough signatures, to not pester the user with warnings of one of the alternate keys, if the last one is good enough */ for (i = 0 ; (size_t)i < req->num_keys ; i++) { if (key_good(&req->keys[i], result->signatures)) { fullfilled = true; break; } } for (i = 0 ; !fullfilled && (size_t)i < req->num_keys ; i++) { if (key_good_enough(&req->keys[i], result->signatures, releasegpg, releasename)) { fullfilled = true; break; } } if (!fullfilled) { fprintf(stderr, "ERROR: Condition '%s' not fullfilled for '%s'.\n", req->condition, releasegpg); print_signatures(stderr, result->signatures, releasegpg); return RET_ERROR_BADSIG; } if (verbose > 10) { fprintf(stdout, "Condition '%s' fullfilled for '%s'.\n", req->condition, releasegpg); } } if (verbose > 20) print_signatures(stdout, result->signatures, releasegpg); return RET_OK; } retvalue signature_check(const struct signature_requirement *requirements, const char *releasegpg, const char *releasename, const char *releasedata, size_t releaselen) { gpg_error_t err; int gpgfd; gpgme_data_t dh, dh_gpg; assert (requirements != NULL); if (FAILEDTOALLOC(releasedata) || FAILEDTOALLOC(releasegpg)) return RET_ERROR_OOM; assert (context != NULL); /* Read the file and its signature into memory: */ gpgfd = open(releasegpg, O_RDONLY|O_NOCTTY); if (gpgfd < 0) { int e = errno; fprintf(stderr, "Error opening '%s': %s\n", releasegpg, strerror(e)); return RET_ERRNO(e); } err = gpgme_data_new_from_fd(&dh_gpg, gpgfd); if (err != 0) { (void)close(gpgfd); fprintf(stderr, "Error reading '%s':\n", releasegpg); return gpgerror(err); } err = gpgme_data_new_from_mem(&dh, releasedata, releaselen, 0); if (err != 0) { gpgme_data_release(dh_gpg); return gpgerror(err); } /* Verify the signature */ err = gpgme_op_verify(context, dh_gpg, dh, NULL); gpgme_data_release(dh_gpg); gpgme_data_release(dh); close(gpgfd); if (err != 0) { fprintf(stderr, "Error verifying '%s':\n", releasegpg); return gpgerror(err); } return verify_signature(requirements, releasegpg, releasename); } retvalue signature_check_inline(const struct signature_requirement *requirements, const char *filename, char **chunk_p) { gpg_error_t err; gpgme_data_t dh, dh_gpg; int fd; fd = open(filename, O_RDONLY|O_NOCTTY); if (fd < 0) { int e = errno; fprintf(stderr, "Error opening '%s': %s\n", filename, strerror(e)); return RET_ERRNO(e); } err = gpgme_data_new_from_fd(&dh_gpg, fd); if (err != 0) { (void)close(fd); return gpgerror(err); } err = gpgme_data_new(&dh); if (err != 0) { (void)close(fd); gpgme_data_release(dh_gpg); return gpgerror(err); } err = gpgme_op_verify(context, dh_gpg, NULL, dh); (void)close(fd); if (gpg_err_code(err) == GPG_ERR_NO_DATA) { char *chunk; const char *n; size_t len; retvalue r; gpgme_data_release(dh); gpgme_data_release(dh_gpg); r = readtextfile(filename, filename, &chunk, &len); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; assert (chunk[len] == '\0'); len = chunk_extract(chunk, chunk, len, false, &n); if (chunk[0] == '-' || *n != '\0') { fprintf(stderr, "Cannot parse '%s': found no signature but does not looks safe to be assumed unsigned, either.\n", filename); free(chunk); return RET_ERROR; } if (requirements != NULL) { free(chunk); return RET_ERROR_BADSIG; } fprintf(stderr, "WARNING: No signature found in %s, assuming it is unsigned!\n", filename); assert (chunk[len] == '\0'); *chunk_p = realloc(chunk, len+1); if (FAILEDTOALLOC(*chunk_p)) *chunk_p = chunk; return RET_OK; } else { char *plain_data, *chunk; const char *n; size_t plain_len, len; retvalue r; if (err != 0) { gpgme_data_release(dh_gpg); gpgme_data_release(dh); return gpgerror(err); } gpgme_data_release(dh_gpg); plain_data = gpgme_data_release_and_get_mem(dh, &plain_len); if (plain_data == NULL) { fprintf(stderr, "Error: libgpgme failed to extract the plain data out of\n" "'%s'.\n" "While it did so in a way indicating running out of memory, experience says\n" "this also happens when gpg returns a error code it does not understand.\n" "To check this please try running gpg --verify '%s' manually.\n" "Continuing extracting it ignoring all signatures...", filename, filename); return RET_ERROR; } chunk = malloc(plain_len+1); if (FAILEDTOALLOC(chunk)) return RET_ERROR_OOM; len = chunk_extract(chunk, plain_data, plain_len, false, &n); #ifdef HAVE_GPGPME_FREE gpgme_free(plain_data); #else free(plain_data); #endif assert (len <= plain_len); if (plain_len != (size_t)(n - plain_data)) { fprintf(stderr, "Cannot parse '%s': extraced signed data looks malformed.\n", filename); r = RET_ERROR; } else r = verify_signature(requirements, filename, NULL); if (RET_IS_OK(r)) { *chunk_p = realloc(chunk, len+1); if (FAILEDTOALLOC(*chunk_p)) *chunk_p = chunk; } else free(chunk); return r; } } #else /* HAVE_LIBGPGME */ retvalue signature_check(const struct signature_requirement *requirements, const char *releasegpg, const char *releasename, const char *releasedata, size_t releaselen) { assert (requirements != NULL); if (FAILEDTOALLOC(releasedata) || FAILEDTOALLOC(releasegpg)) return RET_ERROR_OOM; fprintf(stderr, "ERROR: Cannot check signatures as this reprepro binary is compiled with support\n" "for libgpgme.\n"); // TODO: "Only running external programs is supported.\n" return RET_ERROR_GPGME; } retvalue signature_check_inline(const struct signature_requirement *requirements, const char *filename, char **chunk_p) { retvalue r; char *chunk; size_t len; const char *n; if (requirements != NULL) { fprintf(stderr, "ERROR: Cannot check signatures as this reprepro binary is compiled with support\n" "for libgpgme.\n"); return RET_ERROR_GPGME; } r = readtextfile(filename, filename, &chunk, &len); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; assert (chunk[len] == '\0'); len = chunk_extract(chunk, chunk, len, false, &n); if (len == 0) { fprintf(stderr, "Could not find any data within '%s'!\n", filename); free(chunk); return RET_ERROR; } if (chunk[0] == '-') { const char *endmarker; if (len < 10 || memcmp(chunk, "-----BEGIN", 10) != 0) { fprintf(stderr, "Strange content of '%s': First non-space character is '-',\n" "but it does not begin with '-----BEGIN'.\n", filename); free(chunk); return RET_ERROR; } len = chunk_extract(chunk, n, strlen(n), false, &n); endmarker = strstr(chunk, "\n-----"); if (endmarker != NULL) { endmarker++; assert ((size_t)(endmarker-chunk) < len); len = endmarker-chunk; chunk[len] = '\0'; } else if (*n == '\0') { fprintf(stderr, "ERROR: Could not find end marker of signed data within '%s'.\n" "Cannot determine what is data and what is not!\n", filename); free(chunk); return RET_ERROR; } else if (strncmp(n, "-----", 5) != 0) { fprintf(stderr, "ERROR: Spurious empty line within '%s'.\n" "Cannot determine what is data and what is not!\n", filename); free(chunk); return RET_ERROR; } } else { if (*n != '\0') { fprintf(stderr, "Cannot parse '%s': found no signature but does not looks safe to be assumed unsigned, either.\n", filename); return RET_ERROR; } fprintf(stderr, "WARNING: No signature found in %s, assuming it is unsigned!\n", filename); } assert (chunk[len] == '\0'); *chunk_p = realloc(chunk, len+1); if (FAILEDTOALLOC(*chunk_p)) *chunk_p = chunk; return RET_OK; } void signature_requirements_free(/*@only@*/struct signature_requirement *p) { free(p); } retvalue signature_requirement_add(UNUSED(struct signature_requirement **x), const char *condition) { if (condition == NULL || strcmp(condition, "blindtrust") == 0) return RET_NOTHING; fprintf(stderr, "ERROR: Cannot check signatures as this reprepro binary is compiled with support\n" "for libgpgme.\n"); // TODO: "Only running external programs is supported.\n" return RET_ERROR_GPGME; } void free_known_keys(void) { } #endif /* HAVE_LIBGPGME */ reprepro-4.13.1/checks.c0000644000175100017510000002506612152651661012003 00000000000000/* This file is part of "reprepro" * Copyright (C) 2003,2004,2005,2006,2007 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include "error.h" #include "ignore.h" #include "strlist.h" #include "mprintf.h" #include "names.h" #include "checks.h" typedef unsigned char uchar; /* check if the character starting where points * at is a overlong one */ static inline bool overlongUTF8(const char *character) { /* This checks for overlong utf-8 characters. * (as they might mask '.' '\0' or '/' chars). * we assume no filesystem/ar/gpg code will parse * invalid utf8, as we would only be able to rule * this out if we knew it is utf8 we are coping * with. (Well, you should not use --ignore=validchars * anyway). */ uchar c = *character; if ((c & (uchar)0xC2 /*11000010*/) == (uchar)0xC0 /*11000000*/) { uchar nextc = *(character+1); if ((nextc & (uchar)0xC0 /*11000000*/) != (uchar)0x80 /*10000000*/) return false; if ((c & (uchar)0x3E /* 00111110 */) == (uchar)0) return true; if (c == (uchar)0xE0 /*11100000*/ && (nextc & (uchar)0x20 /*00100000*/) == (uchar)0) return true; if (c == (uchar)0xF0 /*11110000*/ && (nextc & (uchar)0x30 /*00110000*/) == (uchar)0) return true; if (c == (uchar)0xF8 /*11111000*/ && (nextc & (uchar)0x38 /*00111000*/) == (uchar)0) return true; if (c == (uchar)0xFC /*11111100*/ && (nextc & (uchar)0x3C /*00111100*/) == (uchar)0) return true; } return false; } #define REJECTLOWCHARS(s, str, descr) \ if ((uchar)*s < (uchar)' ') { \ fprintf(stderr, \ "Character 0x%02hhx not allowed within %s '%s'!\n", \ *s, descr, str); \ return RET_ERROR; \ } #define REJECTCHARIF(c, s, str, descr) \ if (c) { \ fprintf(stderr, \ "Character '%c' not allowed within %s '%s'!\n", \ *s, descr, string); \ return RET_ERROR; \ } /* check if this is something that can be used as directory safely */ retvalue propersourcename(const char *string) { const char *s; bool firstcharacter = true; if (string[0] == '\0') { /* This is not really ignoreable, as this will lead * to paths not normalized, so all checks go wrong */ fprintf(stderr, "Source name is not allowed to be emtpy!\n"); return RET_ERROR; } if (string[0] == '.') { /* A dot is not only hard to see, it would cause the directory * to become /./.bla, which is quite dangerous. */ fprintf(stderr, "Source names are not allowed to start with a dot!\n"); return RET_ERROR; } s = string; while (*s != '\0') { if ((*s > 'z' || *s < 'a') && (*s > '9' || *s < '0') && (firstcharacter || (*s != '+' && *s != '-' && *s != '.'))) { REJECTLOWCHARS(s, string, "sourcename"); REJECTCHARIF (*s == '/', s, string, "sourcename"); if (overlongUTF8(s)) { fprintf(stderr, "This could contain an overlong UTF8 sequence, rejecting source name '%s'!\n", string); return RET_ERROR; } if (!IGNORING_(forbiddenchar, "Character 0x%02hhx not allowed in sourcename: '%s'!\n", *s, string)) { return RET_ERROR; } if (ISSET(*s, 0x80)) { if (!IGNORING_(8bit, "8bit character in source name: '%s'!\n", string)) { return RET_ERROR; } } } s++; firstcharacter = false; } return RET_OK; } /* check if this is something that can be used as directory safely */ retvalue properfilename(const char *string) { const char *s; if (string[0] == '\0') { fprintf(stderr, "Error: empty filename!\n"); return RET_ERROR; } if ((string[0] == '.' && string[1] == '\0') || (string[0] == '.' && string[1] == '.' && string[2] == '\0')) { fprintf(stderr, "File name not allowed: '%s'!\n", string); return RET_ERROR; } for (s = string ; *s != '\0' ; s++) { REJECTLOWCHARS(s, string, "filename"); REJECTCHARIF (*s == '/' , s, string, "filename"); if (ISSET(*s, 0x80)) { if (overlongUTF8(s)) { fprintf(stderr, "This could contain an overlong UTF8 sequence, rejecting file name '%s'!\n", string); return RET_ERROR; } if (!IGNORING_(8bit, "8bit character in file name: '%s'!\n", string)) { return RET_ERROR; } } } return RET_OK; } static const char *formaterror(const char *format, ...) { va_list ap; static char *data = NULL; if (data != NULL) free(data); va_start(ap, format); data = vmprintf(format, ap); va_end(ap); if (data == NULL) return "Out of memory"; return data; } /* check if this is something that can be used as directory *and* identifer safely */ const char *checkfordirectoryandidentifier(const char *string) { const char *s; assert (string != NULL && string[0] != '\0'); if ((string[0] == '.' && (string[1] == '\0'||string[1]=='/'))) return "'.' is not allowed as directory part"; if ((string[0] == '.' && string[1] == '.' && (string[2] == '\0'||string[2] =='/'))) return "'..' is not allowed as directory part"; for (s = string; *s != '\0'; s++) { if (*s == '|') return "'|' is not allowed"; if ((uchar)*s < (uchar)' ') return formaterror("Character 0x%02hhx not allowed", *s); if (*s == '/' && s[1] == '.' && (s[2] == '\0' || s[2] == '/')) return "'.' is not allowed as directory part"; if (*s == '/' && s[1] == '.' && s[2] == '.' && (s[3] == '\0' || s[3] =='/')) return "'..' is not allowed as directory part"; if (*s == '/' && s[1] == '/') return "\"//\" is not allowed"; if (ISSET(*s, 0x80)) { if (overlongUTF8(s)) return "Contains overlong UTF-8 sequence if treated as UTF-8"; if (!IGNORABLE(8bit)) return "Contains 8bit character (use --ignore=8bit to ignore)"; } } return NULL; } /* check if this can be used as part of identifier (and as part of a filename) */ const char *checkforidentifierpart(const char *string) { const char *s; assert (string != NULL && string[0] != '\0'); for (s = string; *s != '\0' ; s++) { if (*s == '|') return "'|' is not allowed"; if (*s == '/') return "'/' is not allowed"; if ((uchar)*s < (uchar)' ') return formaterror("Character 0x%02hhx not allowed", *s); if (ISSET(*s, 0x80)) { if (overlongUTF8(s)) return "Contains overlong UTF-8 sequence if treated as UTF-8"; if (!IGNORABLE(8bit)) return "Contains 8bit character (use --ignore=8bit to ignore)"; } } return NULL; } retvalue properfilenamepart(const char *string) { const char *s; for (s = string ; *s != '\0' ; s++) { REJECTLOWCHARS(s, string, "filenamepart"); REJECTCHARIF (*s == '/' , s, string, "filenamepart"); if (ISSET(*s, 0x80)) { if (overlongUTF8(s)) { fprintf(stderr, "This could contain an overlong UTF8 sequence, rejecting part of file name '%s'!\n", string); return RET_ERROR; } if (!IGNORING_(8bit, "8bit character in part of file name: '%s'!\n", string)) return RET_ERROR; } } return RET_OK; } retvalue properversion(const char *string) { const char *s = string; bool hadepoch = false; bool first = true; bool yetonlydigits = true; if (string[0] == '\0' && !IGNORING(emptyfilenamepart, "A version string is empty!\n")) { return RET_ERROR; } if ((*s < '0' || *s > '9') && ((*s >= 'a' && *s <= 'z') || (*s >='A' && *s <= 'Z'))) { /* As there are official packages violating the rule * of policy 5.6.11 to start with a digit, disabling * this test, and only omitting a warning. */ if (verbose >= 0) fprintf(stderr, "Warning: Package version '%s' does not start with a digit, violating 'should'-directive in policy 5.6.11\n", string); } for (; *s != '\0' ; s++, first=false) { if ((*s <= '9' || *s >= '0')) { continue; } if (!first && yetonlydigits && *s == ':') { hadepoch = true; continue; } yetonlydigits = false; if ((*s >= 'A' && *s <= 'Z') || (*s >= 'a' || *s <= 'z')) { yetonlydigits = false; continue; } if (first || (*s != '+' && *s != '-' && *s != '.' && *s != '~' && (!hadepoch || *s != ':'))) { REJECTLOWCHARS(s, string, "version"); REJECTCHARIF (*s == '/' , s, string, "version"); if (overlongUTF8(s)) { fprintf(stderr, "This could contain an overlong UTF8 sequence, rejecting version '%s'!\n", string); return RET_ERROR; } if (!IGNORING_(forbiddenchar, "Character '%c' not allowed in version: '%s'!\n", *s, string)) return RET_ERROR; if (ISSET(*s, 0x80)) { if (!IGNORING_(8bit, "8bit character in version: '%s'!\n", string)) return RET_ERROR; } } } return RET_OK; } retvalue properfilenames(const struct strlist *names) { int i; for (i = 0 ; i < names->count ; i ++) { retvalue r = properfilename(names->values[i]); assert (r != RET_NOTHING); if (RET_WAS_ERROR(r)) return r; } return RET_OK; } retvalue properpackagename(const char *string) { const char *s; bool firstcharacter = true; /* To be able to avoid multiple warnings, * this should always be a subset of propersourcename */ if (string[0] == '\0') { /* This is not really ignoreable, as this is a primary * key for our database */ fprintf(stderr, "Package name is not allowed to be emtpy!\n"); return RET_ERROR; } s = string; while (*s != '\0') { /* DAK also allowed upper case letters last I looked, policy * does not, so they are not allowed without --ignore=forbiddenchar */ // perhaps some extra ignore-rule for upper case? if ((*s > 'z' || *s < 'a') && (*s > '9' || *s < '0') && (firstcharacter || (*s != '+' && *s != '-' && *s != '.'))) { REJECTLOWCHARS(s, string, "package name"); REJECTCHARIF (*s == '/' , s, string, "package name"); if (overlongUTF8(s)) { fprintf(stderr, "This could contain an overlong UTF8 sequence, rejecting package name '%s'!\n", string); return RET_ERROR; } if (!IGNORING(forbiddenchar, "Character 0x%02hhx not allowed in package name: '%s'!\n", *s, string)) { return RET_ERROR; } if (ISSET(*s, 0x80)) { if (!IGNORING_(8bit, "8bit character in package name: '%s'!\n", string)) { return RET_ERROR; } } } s++; firstcharacter = false; } return RET_OK; } reprepro-4.13.1/ac/0000755000175100017510000000000012152655345011034 500000000000000reprepro-4.13.1/ac/depcomp0000755000175100017510000005064312152653272012336 00000000000000#! /bin/sh # depcomp - compile a program generating dependencies as side-effects scriptversion=2012-03-27.16; # UTC # Copyright (C) 1999, 2000, 2003, 2004, 2005, 2006, 2007, 2009, 2010, # 2011, 2012 Free Software Foundation, Inc. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. # Originally written by Alexandre Oliva . case $1 in '') echo "$0: No command. Try '$0 --help' for more information." 1>&2 exit 1; ;; -h | --h*) cat <<\EOF Usage: depcomp [--help] [--version] PROGRAM [ARGS] Run PROGRAMS ARGS to compile a file, generating dependencies as side-effects. Environment variables: depmode Dependency tracking mode. source Source file read by 'PROGRAMS ARGS'. object Object file output by 'PROGRAMS ARGS'. DEPDIR directory where to store dependencies. depfile Dependency file to output. tmpdepfile Temporary file to use when outputting dependencies. libtool Whether libtool is used (yes/no). Report bugs to . EOF exit $? ;; -v | --v*) echo "depcomp $scriptversion" exit $? ;; esac # A tabulation character. tab=' ' # A newline character. nl=' ' if test -z "$depmode" || test -z "$source" || test -z "$object"; then echo "depcomp: Variables source, object and depmode must be set" 1>&2 exit 1 fi # Dependencies for sub/bar.o or sub/bar.obj go into sub/.deps/bar.Po. depfile=${depfile-`echo "$object" | sed 's|[^\\/]*$|'${DEPDIR-.deps}'/&|;s|\.\([^.]*\)$|.P\1|;s|Pobj$|Po|'`} tmpdepfile=${tmpdepfile-`echo "$depfile" | sed 's/\.\([^.]*\)$/.T\1/'`} rm -f "$tmpdepfile" # Some modes work just like other modes, but use different flags. We # parameterize here, but still list the modes in the big case below, # to make depend.m4 easier to write. Note that we *cannot* use a case # here, because this file can only contain one case statement. if test "$depmode" = hp; then # HP compiler uses -M and no extra arg. gccflag=-M depmode=gcc fi if test "$depmode" = dashXmstdout; then # This is just like dashmstdout with a different argument. dashmflag=-xM depmode=dashmstdout fi cygpath_u="cygpath -u -f -" if test "$depmode" = msvcmsys; then # This is just like msvisualcpp but w/o cygpath translation. # Just convert the backslash-escaped backslashes to single forward # slashes to satisfy depend.m4 cygpath_u='sed s,\\\\,/,g' depmode=msvisualcpp fi if test "$depmode" = msvc7msys; then # This is just like msvc7 but w/o cygpath translation. # Just convert the backslash-escaped backslashes to single forward # slashes to satisfy depend.m4 cygpath_u='sed s,\\\\,/,g' depmode=msvc7 fi if test "$depmode" = xlc; then # IBM C/C++ Compilers xlc/xlC can output gcc-like dependency informations. gccflag=-qmakedep=gcc,-MF depmode=gcc fi case "$depmode" in gcc3) ## gcc 3 implements dependency tracking that does exactly what ## we want. Yay! Note: for some reason libtool 1.4 doesn't like ## it if -MD -MP comes after the -MF stuff. Hmm. ## Unfortunately, FreeBSD c89 acceptance of flags depends upon ## the command line argument order; so add the flags where they ## appear in depend2.am. Note that the slowdown incurred here ## affects only configure: in makefiles, %FASTDEP% shortcuts this. for arg do case $arg in -c) set fnord "$@" -MT "$object" -MD -MP -MF "$tmpdepfile" "$arg" ;; *) set fnord "$@" "$arg" ;; esac shift # fnord shift # $arg done "$@" stat=$? if test $stat -eq 0; then : else rm -f "$tmpdepfile" exit $stat fi mv "$tmpdepfile" "$depfile" ;; gcc) ## There are various ways to get dependency output from gcc. Here's ## why we pick this rather obscure method: ## - Don't want to use -MD because we'd like the dependencies to end ## up in a subdir. Having to rename by hand is ugly. ## (We might end up doing this anyway to support other compilers.) ## - The DEPENDENCIES_OUTPUT environment variable makes gcc act like ## -MM, not -M (despite what the docs say). ## - Using -M directly means running the compiler twice (even worse ## than renaming). if test -z "$gccflag"; then gccflag=-MD, fi "$@" -Wp,"$gccflag$tmpdepfile" stat=$? if test $stat -eq 0; then : else rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" echo "$object : \\" > "$depfile" alpha=ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz ## The second -e expression handles DOS-style file names with drive letters. sed -e 's/^[^:]*: / /' \ -e 's/^['$alpha']:\/[^:]*: / /' < "$tmpdepfile" >> "$depfile" ## This next piece of magic avoids the "deleted header file" problem. ## The problem is that when a header file which appears in a .P file ## is deleted, the dependency causes make to die (because there is ## typically no way to rebuild the header). We avoid this by adding ## dummy dependencies for each header file. Too bad gcc doesn't do ## this for us directly. tr ' ' "$nl" < "$tmpdepfile" | ## Some versions of gcc put a space before the ':'. On the theory ## that the space means something, we add a space to the output as ## well. hp depmode also adds that space, but also prefixes the VPATH ## to the object. Take care to not repeat it in the output. ## Some versions of the HPUX 10.20 sed can't process this invocation ## correctly. Breaking it into two sed invocations is a workaround. sed -e 's/^\\$//' -e '/^$/d' -e "s|.*$object$||" -e '/:$/d' \ | sed -e 's/$/ :/' >> "$depfile" rm -f "$tmpdepfile" ;; hp) # This case exists only to let depend.m4 do its work. It works by # looking at the text of this script. This case will never be run, # since it is checked for above. exit 1 ;; sgi) if test "$libtool" = yes; then "$@" "-Wp,-MDupdate,$tmpdepfile" else "$@" -MDupdate "$tmpdepfile" fi stat=$? if test $stat -eq 0; then : else rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" if test -f "$tmpdepfile"; then # yes, the sourcefile depend on other files echo "$object : \\" > "$depfile" # Clip off the initial element (the dependent). Don't try to be # clever and replace this with sed code, as IRIX sed won't handle # lines with more than a fixed number of characters (4096 in # IRIX 6.2 sed, 8192 in IRIX 6.5). We also remove comment lines; # the IRIX cc adds comments like '#:fec' to the end of the # dependency line. tr ' ' "$nl" < "$tmpdepfile" \ | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' | \ tr "$nl" ' ' >> "$depfile" echo >> "$depfile" # The second pass generates a dummy entry for each header file. tr ' ' "$nl" < "$tmpdepfile" \ | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' -e 's/$/:/' \ >> "$depfile" else # The sourcefile does not contain any dependencies, so just # store a dummy comment line, to avoid errors with the Makefile # "include basename.Plo" scheme. echo "#dummy" > "$depfile" fi rm -f "$tmpdepfile" ;; xlc) # This case exists only to let depend.m4 do its work. It works by # looking at the text of this script. This case will never be run, # since it is checked for above. exit 1 ;; aix) # The C for AIX Compiler uses -M and outputs the dependencies # in a .u file. In older versions, this file always lives in the # current directory. Also, the AIX compiler puts '$object:' at the # start of each line; $object doesn't have directory information. # Version 6 uses the directory in both cases. dir=`echo "$object" | sed -e 's|/[^/]*$|/|'` test "x$dir" = "x$object" && dir= base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'` if test "$libtool" = yes; then tmpdepfile1=$dir$base.u tmpdepfile2=$base.u tmpdepfile3=$dir.libs/$base.u "$@" -Wc,-M else tmpdepfile1=$dir$base.u tmpdepfile2=$dir$base.u tmpdepfile3=$dir$base.u "$@" -M fi stat=$? if test $stat -eq 0; then : else rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" exit $stat fi for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" do test -f "$tmpdepfile" && break done if test -f "$tmpdepfile"; then # Each line is of the form 'foo.o: dependent.h'. # Do two passes, one to just change these to # '$object: dependent.h' and one to simply 'dependent.h:'. sed -e "s,^.*\.[a-z]*:,$object:," < "$tmpdepfile" > "$depfile" sed -e 's,^.*\.[a-z]*:['"$tab"' ]*,,' -e 's,$,:,' < "$tmpdepfile" >> "$depfile" else # The sourcefile does not contain any dependencies, so just # store a dummy comment line, to avoid errors with the Makefile # "include basename.Plo" scheme. echo "#dummy" > "$depfile" fi rm -f "$tmpdepfile" ;; icc) # Intel's C compiler anf tcc (Tiny C Compiler) understand '-MD -MF file'. # However on # $CC -MD -MF foo.d -c -o sub/foo.o sub/foo.c # ICC 7.0 will fill foo.d with something like # foo.o: sub/foo.c # foo.o: sub/foo.h # which is wrong. We want # sub/foo.o: sub/foo.c # sub/foo.o: sub/foo.h # sub/foo.c: # sub/foo.h: # ICC 7.1 will output # foo.o: sub/foo.c sub/foo.h # and will wrap long lines using '\': # foo.o: sub/foo.c ... \ # sub/foo.h ... \ # ... # tcc 0.9.26 (FIXME still under development at the moment of writing) # will emit a similar output, but also prepend the continuation lines # with horizontal tabulation characters. "$@" -MD -MF "$tmpdepfile" stat=$? if test $stat -eq 0; then : else rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" # Each line is of the form 'foo.o: dependent.h', # or 'foo.o: dep1.h dep2.h \', or ' dep3.h dep4.h \'. # Do two passes, one to just change these to # '$object: dependent.h' and one to simply 'dependent.h:'. sed -e "s/^[ $tab][ $tab]*/ /" -e "s,^[^:]*:,$object :," \ < "$tmpdepfile" > "$depfile" sed ' s/[ '"$tab"'][ '"$tab"']*/ /g s/^ *// s/ *\\*$// s/^[^:]*: *// /^$/d /:$/d s/$/ :/ ' < "$tmpdepfile" >> "$depfile" rm -f "$tmpdepfile" ;; hp2) # The "hp" stanza above does not work with aCC (C++) and HP's ia64 # compilers, which have integrated preprocessors. The correct option # to use with these is +Maked; it writes dependencies to a file named # 'foo.d', which lands next to the object file, wherever that # happens to be. # Much of this is similar to the tru64 case; see comments there. dir=`echo "$object" | sed -e 's|/[^/]*$|/|'` test "x$dir" = "x$object" && dir= base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'` if test "$libtool" = yes; then tmpdepfile1=$dir$base.d tmpdepfile2=$dir.libs/$base.d "$@" -Wc,+Maked else tmpdepfile1=$dir$base.d tmpdepfile2=$dir$base.d "$@" +Maked fi stat=$? if test $stat -eq 0; then : else rm -f "$tmpdepfile1" "$tmpdepfile2" exit $stat fi for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" do test -f "$tmpdepfile" && break done if test -f "$tmpdepfile"; then sed -e "s,^.*\.[a-z]*:,$object:," "$tmpdepfile" > "$depfile" # Add 'dependent.h:' lines. sed -ne '2,${ s/^ *// s/ \\*$// s/$/:/ p }' "$tmpdepfile" >> "$depfile" else echo "#dummy" > "$depfile" fi rm -f "$tmpdepfile" "$tmpdepfile2" ;; tru64) # The Tru64 compiler uses -MD to generate dependencies as a side # effect. 'cc -MD -o foo.o ...' puts the dependencies into 'foo.o.d'. # At least on Alpha/Redhat 6.1, Compaq CCC V6.2-504 seems to put # dependencies in 'foo.d' instead, so we check for that too. # Subdirectories are respected. dir=`echo "$object" | sed -e 's|/[^/]*$|/|'` test "x$dir" = "x$object" && dir= base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'` if test "$libtool" = yes; then # With Tru64 cc, shared objects can also be used to make a # static library. This mechanism is used in libtool 1.4 series to # handle both shared and static libraries in a single compilation. # With libtool 1.4, dependencies were output in $dir.libs/$base.lo.d. # # With libtool 1.5 this exception was removed, and libtool now # generates 2 separate objects for the 2 libraries. These two # compilations output dependencies in $dir.libs/$base.o.d and # in $dir$base.o.d. We have to check for both files, because # one of the two compilations can be disabled. We should prefer # $dir$base.o.d over $dir.libs/$base.o.d because the latter is # automatically cleaned when .libs/ is deleted, while ignoring # the former would cause a distcleancheck panic. tmpdepfile1=$dir.libs/$base.lo.d # libtool 1.4 tmpdepfile2=$dir$base.o.d # libtool 1.5 tmpdepfile3=$dir.libs/$base.o.d # libtool 1.5 tmpdepfile4=$dir.libs/$base.d # Compaq CCC V6.2-504 "$@" -Wc,-MD else tmpdepfile1=$dir$base.o.d tmpdepfile2=$dir$base.d tmpdepfile3=$dir$base.d tmpdepfile4=$dir$base.d "$@" -MD fi stat=$? if test $stat -eq 0; then : else rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" "$tmpdepfile4" exit $stat fi for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" "$tmpdepfile4" do test -f "$tmpdepfile" && break done if test -f "$tmpdepfile"; then sed -e "s,^.*\.[a-z]*:,$object:," < "$tmpdepfile" > "$depfile" sed -e 's,^.*\.[a-z]*:['"$tab"' ]*,,' -e 's,$,:,' < "$tmpdepfile" >> "$depfile" else echo "#dummy" > "$depfile" fi rm -f "$tmpdepfile" ;; msvc7) if test "$libtool" = yes; then showIncludes=-Wc,-showIncludes else showIncludes=-showIncludes fi "$@" $showIncludes > "$tmpdepfile" stat=$? grep -v '^Note: including file: ' "$tmpdepfile" if test "$stat" = 0; then : else rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" echo "$object : \\" > "$depfile" # The first sed program below extracts the file names and escapes # backslashes for cygpath. The second sed program outputs the file # name when reading, but also accumulates all include files in the # hold buffer in order to output them again at the end. This only # works with sed implementations that can handle large buffers. sed < "$tmpdepfile" -n ' /^Note: including file: *\(.*\)/ { s//\1/ s/\\/\\\\/g p }' | $cygpath_u | sort -u | sed -n ' s/ /\\ /g s/\(.*\)/'"$tab"'\1 \\/p s/.\(.*\) \\/\1:/ H $ { s/.*/'"$tab"'/ G p }' >> "$depfile" rm -f "$tmpdepfile" ;; msvc7msys) # This case exists only to let depend.m4 do its work. It works by # looking at the text of this script. This case will never be run, # since it is checked for above. exit 1 ;; #nosideeffect) # This comment above is used by automake to tell side-effect # dependency tracking mechanisms from slower ones. dashmstdout) # Important note: in order to support this mode, a compiler *must* # always write the preprocessed file to stdout, regardless of -o. "$@" || exit $? # Remove the call to Libtool. if test "$libtool" = yes; then while test "X$1" != 'X--mode=compile'; do shift done shift fi # Remove '-o $object'. IFS=" " for arg do case $arg in -o) shift ;; $object) shift ;; *) set fnord "$@" "$arg" shift # fnord shift # $arg ;; esac done test -z "$dashmflag" && dashmflag=-M # Require at least two characters before searching for ':' # in the target name. This is to cope with DOS-style filenames: # a dependency such as 'c:/foo/bar' could be seen as target 'c' otherwise. "$@" $dashmflag | sed 's:^['"$tab"' ]*[^:'"$tab"' ][^:][^:]*\:['"$tab"' ]*:'"$object"'\: :' > "$tmpdepfile" rm -f "$depfile" cat < "$tmpdepfile" > "$depfile" tr ' ' "$nl" < "$tmpdepfile" | \ ## Some versions of the HPUX 10.20 sed can't process this invocation ## correctly. Breaking it into two sed invocations is a workaround. sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile" rm -f "$tmpdepfile" ;; dashXmstdout) # This case only exists to satisfy depend.m4. It is never actually # run, as this mode is specially recognized in the preamble. exit 1 ;; makedepend) "$@" || exit $? # Remove any Libtool call if test "$libtool" = yes; then while test "X$1" != 'X--mode=compile'; do shift done shift fi # X makedepend shift cleared=no eat=no for arg do case $cleared in no) set ""; shift cleared=yes ;; esac if test $eat = yes; then eat=no continue fi case "$arg" in -D*|-I*) set fnord "$@" "$arg"; shift ;; # Strip any option that makedepend may not understand. Remove # the object too, otherwise makedepend will parse it as a source file. -arch) eat=yes ;; -*|$object) ;; *) set fnord "$@" "$arg"; shift ;; esac done obj_suffix=`echo "$object" | sed 's/^.*\././'` touch "$tmpdepfile" ${MAKEDEPEND-makedepend} -o"$obj_suffix" -f"$tmpdepfile" "$@" rm -f "$depfile" # makedepend may prepend the VPATH from the source file name to the object. # No need to regex-escape $object, excess matching of '.' is harmless. sed "s|^.*\($object *:\)|\1|" "$tmpdepfile" > "$depfile" sed '1,2d' "$tmpdepfile" | tr ' ' "$nl" | \ ## Some versions of the HPUX 10.20 sed can't process this invocation ## correctly. Breaking it into two sed invocations is a workaround. sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile" rm -f "$tmpdepfile" "$tmpdepfile".bak ;; cpp) # Important note: in order to support this mode, a compiler *must* # always write the preprocessed file to stdout. "$@" || exit $? # Remove the call to Libtool. if test "$libtool" = yes; then while test "X$1" != 'X--mode=compile'; do shift done shift fi # Remove '-o $object'. IFS=" " for arg do case $arg in -o) shift ;; $object) shift ;; *) set fnord "$@" "$arg" shift # fnord shift # $arg ;; esac done "$@" -E | sed -n -e '/^# [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \ -e '/^#line [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' | sed '$ s: \\$::' > "$tmpdepfile" rm -f "$depfile" echo "$object : \\" > "$depfile" cat < "$tmpdepfile" >> "$depfile" sed < "$tmpdepfile" '/^$/d;s/^ //;s/ \\$//;s/$/ :/' >> "$depfile" rm -f "$tmpdepfile" ;; msvisualcpp) # Important note: in order to support this mode, a compiler *must* # always write the preprocessed file to stdout. "$@" || exit $? # Remove the call to Libtool. if test "$libtool" = yes; then while test "X$1" != 'X--mode=compile'; do shift done shift fi IFS=" " for arg do case "$arg" in -o) shift ;; $object) shift ;; "-Gm"|"/Gm"|"-Gi"|"/Gi"|"-ZI"|"/ZI") set fnord "$@" shift shift ;; *) set fnord "$@" "$arg" shift shift ;; esac done "$@" -E 2>/dev/null | sed -n '/^#line [0-9][0-9]* "\([^"]*\)"/ s::\1:p' | $cygpath_u | sort -u > "$tmpdepfile" rm -f "$depfile" echo "$object : \\" > "$depfile" sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::'"$tab"'\1 \\:p' >> "$depfile" echo "$tab" >> "$depfile" sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::\1\::p' >> "$depfile" rm -f "$tmpdepfile" ;; msvcmsys) # This case exists only to let depend.m4 do its work. It works by # looking at the text of this script. This case will never be run, # since it is checked for above. exit 1 ;; none) exec "$@" ;; *) echo "Unknown depmode $depmode" 1>&2 exit 1 ;; esac exit 0 # Local Variables: # mode: shell-script # sh-indentation: 2 # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC" # time-stamp-end: "; # UTC" # End: reprepro-4.13.1/ac/install-sh0000755000175100017510000003325612152653271012765 00000000000000#!/bin/sh # install - install a program, script, or datafile scriptversion=2011-01-19.21; # UTC # This originates from X11R5 (mit/util/scripts/install.sh), which was # later released in X11R6 (xc/config/util/install.sh) with the # following copyright and license. # # Copyright (C) 1994 X Consortium # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN # AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC- # TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # Except as contained in this notice, the name of the X Consortium shall not # be used in advertising or otherwise to promote the sale, use or other deal- # ings in this Software without prior written authorization from the X Consor- # tium. # # # FSF changes to this file are in the public domain. # # Calling this script install-sh is preferred over install.sh, to prevent # `make' implicit rules from creating a file called install from it # when there is no Makefile. # # This script is compatible with the BSD install script, but was written # from scratch. nl=' ' IFS=" "" $nl" # set DOITPROG to echo to test this script # Don't use :- since 4.3BSD and earlier shells don't like it. doit=${DOITPROG-} if test -z "$doit"; then doit_exec=exec else doit_exec=$doit fi # Put in absolute file names if you don't have them in your path; # or use environment vars. chgrpprog=${CHGRPPROG-chgrp} chmodprog=${CHMODPROG-chmod} chownprog=${CHOWNPROG-chown} cmpprog=${CMPPROG-cmp} cpprog=${CPPROG-cp} mkdirprog=${MKDIRPROG-mkdir} mvprog=${MVPROG-mv} rmprog=${RMPROG-rm} stripprog=${STRIPPROG-strip} posix_glob='?' initialize_posix_glob=' test "$posix_glob" != "?" || { if (set -f) 2>/dev/null; then posix_glob= else posix_glob=: fi } ' posix_mkdir= # Desired mode of installed file. mode=0755 chgrpcmd= chmodcmd=$chmodprog chowncmd= mvcmd=$mvprog rmcmd="$rmprog -f" stripcmd= src= dst= dir_arg= dst_arg= copy_on_change=false no_target_directory= usage="\ Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE or: $0 [OPTION]... SRCFILES... DIRECTORY or: $0 [OPTION]... -t DIRECTORY SRCFILES... or: $0 [OPTION]... -d DIRECTORIES... In the 1st form, copy SRCFILE to DSTFILE. In the 2nd and 3rd, copy all SRCFILES to DIRECTORY. In the 4th, create DIRECTORIES. Options: --help display this help and exit. --version display version info and exit. -c (ignored) -C install only if different (preserve the last data modification time) -d create directories instead of installing files. -g GROUP $chgrpprog installed files to GROUP. -m MODE $chmodprog installed files to MODE. -o USER $chownprog installed files to USER. -s $stripprog installed files. -t DIRECTORY install into DIRECTORY. -T report an error if DSTFILE is a directory. Environment variables override the default commands: CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG RMPROG STRIPPROG " while test $# -ne 0; do case $1 in -c) ;; -C) copy_on_change=true;; -d) dir_arg=true;; -g) chgrpcmd="$chgrpprog $2" shift;; --help) echo "$usage"; exit $?;; -m) mode=$2 case $mode in *' '* | *' '* | *' '* | *'*'* | *'?'* | *'['*) echo "$0: invalid mode: $mode" >&2 exit 1;; esac shift;; -o) chowncmd="$chownprog $2" shift;; -s) stripcmd=$stripprog;; -t) dst_arg=$2 # Protect names problematic for `test' and other utilities. case $dst_arg in -* | [=\(\)!]) dst_arg=./$dst_arg;; esac shift;; -T) no_target_directory=true;; --version) echo "$0 $scriptversion"; exit $?;; --) shift break;; -*) echo "$0: invalid option: $1" >&2 exit 1;; *) break;; esac shift done if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then # When -d is used, all remaining arguments are directories to create. # When -t is used, the destination is already specified. # Otherwise, the last argument is the destination. Remove it from $@. for arg do if test -n "$dst_arg"; then # $@ is not empty: it contains at least $arg. set fnord "$@" "$dst_arg" shift # fnord fi shift # arg dst_arg=$arg # Protect names problematic for `test' and other utilities. case $dst_arg in -* | [=\(\)!]) dst_arg=./$dst_arg;; esac done fi if test $# -eq 0; then if test -z "$dir_arg"; then echo "$0: no input file specified." >&2 exit 1 fi # It's OK to call `install-sh -d' without argument. # This can happen when creating conditional directories. exit 0 fi if test -z "$dir_arg"; then do_exit='(exit $ret); exit $ret' trap "ret=129; $do_exit" 1 trap "ret=130; $do_exit" 2 trap "ret=141; $do_exit" 13 trap "ret=143; $do_exit" 15 # Set umask so as not to create temps with too-generous modes. # However, 'strip' requires both read and write access to temps. case $mode in # Optimize common cases. *644) cp_umask=133;; *755) cp_umask=22;; *[0-7]) if test -z "$stripcmd"; then u_plus_rw= else u_plus_rw='% 200' fi cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;; *) if test -z "$stripcmd"; then u_plus_rw= else u_plus_rw=,u+rw fi cp_umask=$mode$u_plus_rw;; esac fi for src do # Protect names problematic for `test' and other utilities. case $src in -* | [=\(\)!]) src=./$src;; esac if test -n "$dir_arg"; then dst=$src dstdir=$dst test -d "$dstdir" dstdir_status=$? else # Waiting for this to be detected by the "$cpprog $src $dsttmp" command # might cause directories to be created, which would be especially bad # if $src (and thus $dsttmp) contains '*'. if test ! -f "$src" && test ! -d "$src"; then echo "$0: $src does not exist." >&2 exit 1 fi if test -z "$dst_arg"; then echo "$0: no destination specified." >&2 exit 1 fi dst=$dst_arg # If destination is a directory, append the input filename; won't work # if double slashes aren't ignored. if test -d "$dst"; then if test -n "$no_target_directory"; then echo "$0: $dst_arg: Is a directory" >&2 exit 1 fi dstdir=$dst dst=$dstdir/`basename "$src"` dstdir_status=0 else # Prefer dirname, but fall back on a substitute if dirname fails. dstdir=` (dirname "$dst") 2>/dev/null || expr X"$dst" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$dst" : 'X\(//\)[^/]' \| \ X"$dst" : 'X\(//\)$' \| \ X"$dst" : 'X\(/\)' \| . 2>/dev/null || echo X"$dst" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q' ` test -d "$dstdir" dstdir_status=$? fi fi obsolete_mkdir_used=false if test $dstdir_status != 0; then case $posix_mkdir in '') # Create intermediate dirs using mode 755 as modified by the umask. # This is like FreeBSD 'install' as of 1997-10-28. umask=`umask` case $stripcmd.$umask in # Optimize common cases. *[2367][2367]) mkdir_umask=$umask;; .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;; *[0-7]) mkdir_umask=`expr $umask + 22 \ - $umask % 100 % 40 + $umask % 20 \ - $umask % 10 % 4 + $umask % 2 `;; *) mkdir_umask=$umask,go-w;; esac # With -d, create the new directory with the user-specified mode. # Otherwise, rely on $mkdir_umask. if test -n "$dir_arg"; then mkdir_mode=-m$mode else mkdir_mode= fi posix_mkdir=false case $umask in *[123567][0-7][0-7]) # POSIX mkdir -p sets u+wx bits regardless of umask, which # is incompatible with FreeBSD 'install' when (umask & 300) != 0. ;; *) tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$ trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0 if (umask $mkdir_umask && exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1 then if test -z "$dir_arg" || { # Check for POSIX incompatibilities with -m. # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or # other-writeable bit of parent directory when it shouldn't. # FreeBSD 6.1 mkdir -m -p sets mode of existing directory. ls_ld_tmpdir=`ls -ld "$tmpdir"` case $ls_ld_tmpdir in d????-?r-*) different_mode=700;; d????-?--*) different_mode=755;; *) false;; esac && $mkdirprog -m$different_mode -p -- "$tmpdir" && { ls_ld_tmpdir_1=`ls -ld "$tmpdir"` test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1" } } then posix_mkdir=: fi rmdir "$tmpdir/d" "$tmpdir" else # Remove any dirs left behind by ancient mkdir implementations. rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null fi trap '' 0;; esac;; esac if $posix_mkdir && ( umask $mkdir_umask && $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir" ) then : else # The umask is ridiculous, or mkdir does not conform to POSIX, # or it failed possibly due to a race condition. Create the # directory the slow way, step by step, checking for races as we go. case $dstdir in /*) prefix='/';; [-=\(\)!]*) prefix='./';; *) prefix='';; esac eval "$initialize_posix_glob" oIFS=$IFS IFS=/ $posix_glob set -f set fnord $dstdir shift $posix_glob set +f IFS=$oIFS prefixes= for d do test X"$d" = X && continue prefix=$prefix$d if test -d "$prefix"; then prefixes= else if $posix_mkdir; then (umask=$mkdir_umask && $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break # Don't fail if two instances are running concurrently. test -d "$prefix" || exit 1 else case $prefix in *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;; *) qprefix=$prefix;; esac prefixes="$prefixes '$qprefix'" fi fi prefix=$prefix/ done if test -n "$prefixes"; then # Don't fail if two instances are running concurrently. (umask $mkdir_umask && eval "\$doit_exec \$mkdirprog $prefixes") || test -d "$dstdir" || exit 1 obsolete_mkdir_used=true fi fi fi if test -n "$dir_arg"; then { test -z "$chowncmd" || $doit $chowncmd "$dst"; } && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } && { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false || test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1 else # Make a couple of temp file names in the proper directory. dsttmp=$dstdir/_inst.$$_ rmtmp=$dstdir/_rm.$$_ # Trap to clean up those temp files at exit. trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0 # Copy the file name to the temp name. (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") && # and set any options; do chmod last to preserve setuid bits. # # If any of these fail, we abort the whole thing. If we want to # ignore errors from any of these, just make sure not to ignore # errors from the above "$doit $cpprog $src $dsttmp" command. # { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } && { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } && { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } && # If -C, don't bother to copy if it wouldn't change the file. if $copy_on_change && old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` && new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` && eval "$initialize_posix_glob" && $posix_glob set -f && set X $old && old=:$2:$4:$5:$6 && set X $new && new=:$2:$4:$5:$6 && $posix_glob set +f && test "$old" = "$new" && $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1 then rm -f "$dsttmp" else # Rename the file to the real destination. $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null || # The rename failed, perhaps because mv can't rename something else # to itself, or perhaps because mv is so ancient that it does not # support -f. { # Now remove or move aside any old file at destination location. # We try this two ways since rm can't unlink itself on some # systems and the destination file might be busy for other # reasons. In this case, the final cleanup might fail but the new # file should still install successfully. { test ! -f "$dst" || $doit $rmcmd -f "$dst" 2>/dev/null || { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null && { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; } } || { echo "$0: cannot unlink or rename $dst" >&2 (exit 1); exit 1 } } && # Now rename the file to the real destination. $doit $mvcmd "$dsttmp" "$dst" } fi || exit 1 trap '' 0 fi done # Local variables: # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC" # time-stamp-end: "; # UTC" # End: reprepro-4.13.1/ac/missing0000755000175100017510000002415212152653271012353 00000000000000#! /bin/sh # Common stub for a few missing GNU programs while installing. scriptversion=2012-01-06.13; # UTC # Copyright (C) 1996, 1997, 1999, 2000, 2002, 2003, 2004, 2005, 2006, # 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc. # Originally by Fran,cois Pinard , 1996. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. if test $# -eq 0; then echo 1>&2 "Try \`$0 --help' for more information" exit 1 fi run=: sed_output='s/.* --output[ =]\([^ ]*\).*/\1/p' sed_minuso='s/.* -o \([^ ]*\).*/\1/p' # In the cases where this matters, `missing' is being run in the # srcdir already. if test -f configure.ac; then configure_ac=configure.ac else configure_ac=configure.in fi msg="missing on your system" case $1 in --run) # Try to run requested program, and just exit if it succeeds. run= shift "$@" && exit 0 # Exit code 63 means version mismatch. This often happens # when the user try to use an ancient version of a tool on # a file that requires a minimum version. In this case we # we should proceed has if the program had been absent, or # if --run hadn't been passed. if test $? = 63; then run=: msg="probably too old" fi ;; -h|--h|--he|--hel|--help) echo "\ $0 [OPTION]... PROGRAM [ARGUMENT]... Handle \`PROGRAM [ARGUMENT]...' for when PROGRAM is missing, or return an error status if there is no known handling for PROGRAM. Options: -h, --help display this help and exit -v, --version output version information and exit --run try to run the given command, and emulate it if it fails Supported PROGRAM values: aclocal touch file \`aclocal.m4' autoconf touch file \`configure' autoheader touch file \`config.h.in' autom4te touch the output file, or create a stub one automake touch all \`Makefile.in' files bison create \`y.tab.[ch]', if possible, from existing .[ch] flex create \`lex.yy.c', if possible, from existing .c help2man touch the output file lex create \`lex.yy.c', if possible, from existing .c makeinfo touch the output file yacc create \`y.tab.[ch]', if possible, from existing .[ch] Version suffixes to PROGRAM as well as the prefixes \`gnu-', \`gnu', and \`g' are ignored when checking the name. Send bug reports to ." exit $? ;; -v|--v|--ve|--ver|--vers|--versi|--versio|--version) echo "missing $scriptversion (GNU Automake)" exit $? ;; -*) echo 1>&2 "$0: Unknown \`$1' option" echo 1>&2 "Try \`$0 --help' for more information" exit 1 ;; esac # normalize program name to check for. program=`echo "$1" | sed ' s/^gnu-//; t s/^gnu//; t s/^g//; t'` # Now exit if we have it, but it failed. Also exit now if we # don't have it and --version was passed (most likely to detect # the program). This is about non-GNU programs, so use $1 not # $program. case $1 in lex*|yacc*) # Not GNU programs, they don't have --version. ;; *) if test -z "$run" && ($1 --version) > /dev/null 2>&1; then # We have it, but it failed. exit 1 elif test "x$2" = "x--version" || test "x$2" = "x--help"; then # Could not run --version or --help. This is probably someone # running `$TOOL --version' or `$TOOL --help' to check whether # $TOOL exists and not knowing $TOOL uses missing. exit 1 fi ;; esac # If it does not exist, or fails to run (possibly an outdated version), # try to emulate it. case $program in aclocal*) echo 1>&2 "\ WARNING: \`$1' is $msg. You should only need it if you modified \`acinclude.m4' or \`${configure_ac}'. You might want to install the \`Automake' and \`Perl' packages. Grab them from any GNU archive site." touch aclocal.m4 ;; autoconf*) echo 1>&2 "\ WARNING: \`$1' is $msg. You should only need it if you modified \`${configure_ac}'. You might want to install the \`Autoconf' and \`GNU m4' packages. Grab them from any GNU archive site." touch configure ;; autoheader*) echo 1>&2 "\ WARNING: \`$1' is $msg. You should only need it if you modified \`acconfig.h' or \`${configure_ac}'. You might want to install the \`Autoconf' and \`GNU m4' packages. Grab them from any GNU archive site." files=`sed -n 's/^[ ]*A[CM]_CONFIG_HEADER(\([^)]*\)).*/\1/p' ${configure_ac}` test -z "$files" && files="config.h" touch_files= for f in $files; do case $f in *:*) touch_files="$touch_files "`echo "$f" | sed -e 's/^[^:]*://' -e 's/:.*//'`;; *) touch_files="$touch_files $f.in";; esac done touch $touch_files ;; automake*) echo 1>&2 "\ WARNING: \`$1' is $msg. You should only need it if you modified \`Makefile.am', \`acinclude.m4' or \`${configure_ac}'. You might want to install the \`Automake' and \`Perl' packages. Grab them from any GNU archive site." find . -type f -name Makefile.am -print | sed 's/\.am$/.in/' | while read f; do touch "$f"; done ;; autom4te*) echo 1>&2 "\ WARNING: \`$1' is needed, but is $msg. You might have modified some files without having the proper tools for further handling them. You can get \`$1' as part of \`Autoconf' from any GNU archive site." file=`echo "$*" | sed -n "$sed_output"` test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"` if test -f "$file"; then touch $file else test -z "$file" || exec >$file echo "#! /bin/sh" echo "# Created by GNU Automake missing as a replacement of" echo "# $ $@" echo "exit 0" chmod +x $file exit 1 fi ;; bison*|yacc*) echo 1>&2 "\ WARNING: \`$1' $msg. You should only need it if you modified a \`.y' file. You may need the \`Bison' package in order for those modifications to take effect. You can get \`Bison' from any GNU archive site." rm -f y.tab.c y.tab.h if test $# -ne 1; then eval LASTARG=\${$#} case $LASTARG in *.y) SRCFILE=`echo "$LASTARG" | sed 's/y$/c/'` if test -f "$SRCFILE"; then cp "$SRCFILE" y.tab.c fi SRCFILE=`echo "$LASTARG" | sed 's/y$/h/'` if test -f "$SRCFILE"; then cp "$SRCFILE" y.tab.h fi ;; esac fi if test ! -f y.tab.h; then echo >y.tab.h fi if test ! -f y.tab.c; then echo 'main() { return 0; }' >y.tab.c fi ;; lex*|flex*) echo 1>&2 "\ WARNING: \`$1' is $msg. You should only need it if you modified a \`.l' file. You may need the \`Flex' package in order for those modifications to take effect. You can get \`Flex' from any GNU archive site." rm -f lex.yy.c if test $# -ne 1; then eval LASTARG=\${$#} case $LASTARG in *.l) SRCFILE=`echo "$LASTARG" | sed 's/l$/c/'` if test -f "$SRCFILE"; then cp "$SRCFILE" lex.yy.c fi ;; esac fi if test ! -f lex.yy.c; then echo 'main() { return 0; }' >lex.yy.c fi ;; help2man*) echo 1>&2 "\ WARNING: \`$1' is $msg. You should only need it if you modified a dependency of a manual page. You may need the \`Help2man' package in order for those modifications to take effect. You can get \`Help2man' from any GNU archive site." file=`echo "$*" | sed -n "$sed_output"` test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"` if test -f "$file"; then touch $file else test -z "$file" || exec >$file echo ".ab help2man is required to generate this page" exit $? fi ;; makeinfo*) echo 1>&2 "\ WARNING: \`$1' is $msg. You should only need it if you modified a \`.texi' or \`.texinfo' file, or any other file indirectly affecting the aspect of the manual. The spurious call might also be the consequence of using a buggy \`make' (AIX, DU, IRIX). You might want to install the \`Texinfo' package or the \`GNU make' package. Grab either from any GNU archive site." # The file to touch is that specified with -o ... file=`echo "$*" | sed -n "$sed_output"` test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"` if test -z "$file"; then # ... or it is the one specified with @setfilename ... infile=`echo "$*" | sed 's/.* \([^ ]*\) *$/\1/'` file=`sed -n ' /^@setfilename/{ s/.* \([^ ]*\) *$/\1/ p q }' $infile` # ... or it is derived from the source name (dir/f.texi becomes f.info) test -z "$file" && file=`echo "$infile" | sed 's,.*/,,;s,.[^.]*$,,'`.info fi # If the file does not exist, the user really needs makeinfo; # let's fail without touching anything. test -f $file || exit 1 touch $file ;; *) echo 1>&2 "\ WARNING: \`$1' is needed, and is $msg. You might have modified some files without having the proper tools for further handling them. Check the \`README' file, it often tells you about the needed prerequisites for installing this package. You may also peek at any GNU archive site, in case some other package would contain this missing \`$1' program." exit 1 ;; esac exit 0 # Local variables: # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC" # time-stamp-end: "; # UTC" # End: reprepro-4.13.1/readtextfile.h0000644000175100017510000000065712152651661013227 00000000000000#ifndef REPREPRO_READTEXTFILE #define REPREPRO_READTEXTFILE #ifndef REPREPRO_ERROR_H #include "error.h" #warning "What's hapening here?" #endif #ifndef REPREPRO_GLOBALS_H #include "globals.h" #warning "What's hapening here?" #endif retvalue readtextfilefd(int, const char *, /*@out@*/char **, /*@null@*//*@out@*/size_t *); retvalue readtextfile(const char *, const char *, /*@out@*/char **, /*@null@*//*@out@*/size_t *); #endif reprepro-4.13.1/signedfile.c0000644000175100017510000003300412152651661012643 00000000000000/* This file is part of "reprepro" * Copyright (C) 2003,2004,2005,2006,2007,2009,2010,2012 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "signature_p.h" #include "mprintf.h" #include "strlist.h" #include "dirs.h" #include "names.h" #include "chunks.h" #include "release.h" #include "filecntl.h" #include "hooks.h" #ifdef HAVE_LIBGPGME static retvalue check_signature_created(bool clearsign, bool willcleanup, /*@null@*/const struct strlist *options, const char *filename, const char *signaturename) { gpgme_sign_result_t signresult; char *uidoptions; int i; signresult = gpgme_op_sign_result(context); if (signresult != NULL && signresult->signatures != NULL) return RET_OK; /* in an ideal world, this point is never reached. * Sadly it is and people are obviously confused by it, * so do some work to give helpful messages. */ if (options != NULL) { assert (options->count > 0); uidoptions = mprintf(" -u '%s'", options->values[0]); for (i = 1 ; uidoptions != NULL && i < options->count ; i++) { char *u = mprintf("%s -u '%s'", uidoptions, options->values[0]); free(uidoptions); uidoptions = u; } if (FAILEDTOALLOC(uidoptions)) return RET_ERROR_OOM; } else uidoptions = NULL; if (signresult == NULL) fputs( "Error: gpgme returned NULL unexpectedly for gpgme_op_sign_result\n", stderr); else fputs("Error: gpgme created no signature!\n", stderr); fputs( "This most likely means gpg is confused or produces some error libgpgme is\n" "not able to understand. Try running\n", stderr); if (willcleanup) fprintf(stderr, "gpg %s --output 'some-other-file' %s 'some-file'\n", (uidoptions==NULL)?"":uidoptions, clearsign?"--clearsign":"--detach-sign"); else fprintf(stderr, "gpg %s --output '%s' %s '%s'\n", (uidoptions==NULL)?"":uidoptions, signaturename, clearsign?"--clearsign":"--detach-sign", filename); fputs( "for hints what this error might have been. (Sometimes just running\n" "it once manually seems also to help...)\n", stderr); return RET_ERROR_GPGME; } static retvalue signature_to_file(gpgme_data_t dh_gpg, const char *signaturename) { char *signature_data; const char *p; size_t signature_len; ssize_t written; int fd, e, ret; signature_data = gpgme_data_release_and_get_mem(dh_gpg, &signature_len); if (FAILEDTOALLOC(signature_data)) return RET_ERROR_OOM; fd = open(signaturename, O_WRONLY|O_CREAT|O_EXCL|O_NOCTTY|O_NOFOLLOW, 0666); if (fd < 0) { free(signature_data); return RET_ERRNO(errno); } p = signature_data; while (signature_len > 0) { written = write(fd, p, signature_len); if (written < 0) { e = errno; fprintf(stderr, "Error %d writing to %s: %s\n", e, signaturename, strerror(e)); free(signature_data); (void)close(fd); return RET_ERRNO(e); } signature_len -= written; p += written; } #ifdef HAVE_GPGPME_FREE gpgme_free(signature_data); #else free(signature_data); #endif ret = close(fd); if (ret < 0) { e = errno; fprintf(stderr, "Error %d writing to %s: %s\n", e, signaturename, strerror(e)); return RET_ERRNO(e); } if (verbose > 1) { printf("Successfully created '%s'\n", signaturename); } return RET_OK; } static retvalue create_signature(bool clearsign, gpgme_data_t dh, /*@null@*/const struct strlist *options, const char *filename, const char *signaturename, bool willcleanup) { gpg_error_t err; gpgme_data_t dh_gpg; retvalue r; err = gpgme_data_new(&dh_gpg); if (err != 0) return gpgerror(err); err = gpgme_op_sign(context, dh, dh_gpg, clearsign?GPGME_SIG_MODE_CLEAR:GPGME_SIG_MODE_DETACH); if (err != 0) return gpgerror(err); r = check_signature_created(clearsign, willcleanup, options, filename, signaturename); if (RET_WAS_ERROR(r)) { gpgme_data_release(dh_gpg); return r; } /* releases dh_gpg: */ return signature_to_file(dh_gpg, signaturename); } static retvalue signature_sign(const struct strlist *options, const char *filename, void *data, size_t datalen, const char *signaturename, const char *clearsignfilename, bool willcleanup) { retvalue r; int i; gpg_error_t err; gpgme_data_t dh; assert (options != NULL && options->count > 0); assert (options->values[0][0] != '!'); r = signature_init(false); if (RET_WAS_ERROR(r)) return r; gpgme_signers_clear(context); if (options->count == 1 && (strcasecmp(options->values[0], "yes") == 0 || strcasecmp(options->values[0], "default") == 0)) { /* use default options */ options = NULL; } else for (i = 0 ; i < options->count ; i++) { const char *option = options->values[i]; gpgme_key_t key; err = gpgme_op_keylist_start(context, option, 1); if (err != 0) return gpgerror(err); err = gpgme_op_keylist_next(context, &key); if (gpg_err_code(err) == GPG_ERR_EOF) { fprintf(stderr, "Could not find any key matching '%s'!\n", option); return RET_ERROR; } err = gpgme_signers_add(context, key); gpgme_key_unref(key); if (err != 0) { gpgme_op_keylist_end(context); return gpgerror(err); } gpgme_op_keylist_end(context); } err = gpgme_data_new_from_mem(&dh, data, datalen, 0); if (err != 0) { return gpgerror(err); } r = create_signature(false, dh, options, filename, signaturename, willcleanup); if (RET_WAS_ERROR(r)) { gpgme_data_release(dh); return r; } i = gpgme_data_seek(dh, 0, SEEK_SET); if (i < 0) { int e = errno; fprintf(stderr, "Error %d rewinding gpgme's data buffer to start: %s\n", e, strerror(e)); gpgme_data_release(dh); return RET_ERRNO(e); } r = create_signature(true, dh, options, filename, clearsignfilename, willcleanup); gpgme_data_release(dh); if (RET_WAS_ERROR(r)) return r; return RET_OK; } #endif /* HAVE_LIBGPGME */ static retvalue signature_with_extern(const struct strlist *options, const char *filename, const char *clearsignfilename, char **detachedfilename_p) { const char *clearsign; const char *detached; struct stat s; int status; pid_t child, found; const char *command; assert (options->count == 2); command = options->values[1]; clearsign = (clearsignfilename == NULL)?"":clearsignfilename; detached = (*detachedfilename_p == NULL)?"":*detachedfilename_p; if (interrupted()) return RET_ERROR_INTERRUPTED; if (lstat(filename, &s) != 0 || !S_ISREG(s.st_mode)) { fprintf(stderr, "Internal error: lost unsigned file '%s'?!\n", filename); return RET_ERROR; } child = fork(); if (child == 0) { /* Try to close all open fd but 0,1,2 */ closefrom(3); sethookenvironment(NULL, NULL, NULL, NULL); (void)execl(command, command, filename, clearsign, detached, ENDOFARGUMENTS); fprintf(stderr, "Error executing '%s' '%s' '%s' '%s': %s\n", command, filename, clearsign, detached, strerror(errno)); _exit(255); } if (child < 0) { int e = errno; fprintf(stderr, "Error forking: %d=%s!\n", e, strerror(e)); return RET_ERRNO(e); } errno = 0; while ((found = waitpid(child, &status, 0)) < 0) { int e = errno; if (e != EINTR) { fprintf(stderr, "Error %d waiting for signing-command child %ld: %s!\n", e, (long)child, strerror(e)); return RET_ERRNO(e); } } if (found != child) { fprintf(stderr, "Confusing return value %ld from waitpid(%ld, ..., 0)", (long)found, (long)child); return RET_ERROR; } if (!WIFEXITED(status)) { fprintf(stderr, "Error: Signing-hook '%s' called with arguments '%s' '%s' '%s' terminated abnormally!\n", command, filename, clearsign, detached); return RET_ERROR; } if (WEXITSTATUS(status) != 0) { fprintf(stderr, "Error: Signing-hook '%s' called with arguments '%s' '%s' '%s' returned with exit code %d!\n", command, filename, clearsign, detached, (int)(WEXITSTATUS(status))); return RET_ERROR; } if (clearsignfilename != NULL) { if (lstat(clearsign, &s) != 0 || !S_ISREG(s.st_mode)) { fprintf(stderr, "Error: Script '%s' did not generate '%s'!\n", command, clearsign); return RET_ERROR; } else if (s.st_size == 0) { fprintf(stderr, "Error: Script '%s' created an empty '%s' file!\n", command, clearsign); return RET_ERROR; } } if (*detachedfilename_p != NULL) { if (lstat(detached, &s) != 0 || !S_ISREG(s.st_mode)) { /* no detached signature, no an error if there * was a clearsigned file:*/ if (clearsignfilename == NULL) { fprintf(stderr, "Error: Script '%s' did not generate '%s'!\n", command, detached); return RET_ERROR; } else { if (verbose > 1) fprintf(stderr, "Ignoring legacy detached signature '%s' not generated by '%s'\n", detached, command); detached = NULL; free(*detachedfilename_p); *detachedfilename_p = NULL; } } else if (s.st_size == 0) { fprintf(stderr, "Error: Script '%s' created an empty '%s' file!\n", command, detached); return RET_ERROR; } } return RET_OK; } struct signedfile { retvalue result; #define DATABUFFERUNITS (128ul * 1024ul) size_t bufferlen, buffersize; char *buffer; }; retvalue signature_startsignedfile(struct signedfile **out) { struct signedfile *n; n = zNEW(struct signedfile); if (FAILEDTOALLOC(n)) return RET_ERROR_OOM; n->bufferlen = 0; n->buffersize = DATABUFFERUNITS; n->buffer = malloc(n->buffersize); if (FAILEDTOALLOC(n->buffer)) { free(n); return RET_ERROR_OOM; } *out = n; return RET_OK; } void signedfile_free(struct signedfile *f) { if (f == NULL) return; free(f->buffer); free(f); return; } /* store data into buffer */ void signedfile_write(struct signedfile *f, const void *data, size_t len) { /* no need to try anything if there already was an error */ if (RET_WAS_ERROR(f->result)) return; if (len > f->buffersize - f->bufferlen) { size_t blocks = (len + f->bufferlen)/DATABUFFERUNITS; size_t newsize = (blocks + 1) * DATABUFFERUNITS; char *newbuffer; /* realloc is wasteful, but should not happen too often */ newbuffer = realloc(f->buffer, newsize); if (FAILEDTOALLOC(newbuffer)) { free(f->buffer); f->buffer = NULL; f->result = RET_ERROR_OOM; return; } f->buffer = newbuffer; f->buffersize = newsize; assert (f->bufferlen < f->buffersize); } assert (len <= f->buffersize - f->bufferlen); memcpy(f->buffer + f->bufferlen, data, len); f->bufferlen += len; assert (f->bufferlen <= f->buffersize); } retvalue signedfile_create(struct signedfile *f, const char *newplainfilename, char **newsignedfilename_p, char **newdetachedsignature_p, const struct strlist *options, bool willcleanup) { size_t len, ofs; int fd, ret; if (RET_WAS_ERROR(f->result)) return f->result; /* write content to file */ assert (newplainfilename != NULL); (void)dirs_make_parent(newplainfilename); (void)unlink(newplainfilename); fd = open(newplainfilename, O_WRONLY|O_CREAT|O_TRUNC|O_NOCTTY, 0666); if (fd < 0) { int e = errno; fprintf(stderr, "Error creating file '%s': %s\n", newplainfilename, strerror(e)); return RET_ERRNO(e); } ofs = 0; len = f->bufferlen; while (len > 0) { ssize_t written; written = write(fd, f->buffer + ofs, len); if (written < 0) { int e = errno; fprintf(stderr, "Error %d writing to file '%s': %s\n", e, newplainfilename, strerror(e)); (void)close(fd); return RET_ERRNO(e); } assert ((size_t)written <= len); ofs += written; len -= written; } ret = close(fd); if (ret < 0) { int e = errno; fprintf(stderr, "Error %d writing to file '%s': %s\n", e, newplainfilename, strerror(e)); return RET_ERRNO(e); } /* now do the actual signing */ if (options != NULL && options->count > 0) { retvalue r; const char *newsigned = *newsignedfilename_p; const char *newdetached = *newdetachedsignature_p; /* make sure the new files do not already exist: */ if (unlink(newdetached) != 0 && errno != ENOENT) { fprintf(stderr, "Could not remove '%s' to prepare replacement: %s\n", newdetached, strerror(errno)); return RET_ERROR; } if (unlink(newsigned) != 0 && errno != ENOENT) { fprintf(stderr, "Could not remove '%s' to prepare replacement: %s\n", newsigned, strerror(errno)); return RET_ERROR; } /* if an hook is given, use that instead */ if (options->values[0][0] == '!') r = signature_with_extern(options, newplainfilename, newsigned, newdetachedsignature_p); else #ifdef HAVE_LIBGPGME r = signature_sign(options, newplainfilename, f->buffer, f->bufferlen, newdetached, newsigned, willcleanup); #else /* HAVE_LIBGPGME */ fputs( "ERROR: Cannot creature signatures as this reprepro binary is not compiled\n" "with support for libgpgme. (Only external signing using 'Signwith: !hook'\n" "is supported.\n", stderr); return RET_ERROR_GPGME; #endif if (RET_WAS_ERROR(r)) return r; } else { /* no signatures requested */ free(*newsignedfilename_p); *newsignedfilename_p = NULL; free(*newdetachedsignature_p); *newdetachedsignature_p = NULL; } return RET_OK; } reprepro-4.13.1/rredpatch.c0000644000175100017510000004745612152651661012526 00000000000000/* This file is part of "reprepro" * Copyright (C) 2009 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include "error.h" #include "rredpatch.h" struct modification { /* next item in the list (sorted by oldlinestart) */ struct modification *next, *previous; /* each modification removes an (possible empty) range from * the file and replaces it with an (possible empty) range * of new lines */ int oldlinestart, oldlinecount, newlinecount; size_t len; const char *content; /* a entry might be followed by one other with the same * oldlinestart (due to merging or inefficient patches), * but always: next->oldlinestart >= oldlinestart + oldlinecount */ }; struct rred_patch { int fd; /* content of the file mapped with mmap */ char *data; off_t len; struct modification *modifications; bool alreadyinuse; }; void modification_freelist(struct modification *p) { while (p != NULL) { struct modification *m = p; p = m->next; free(m); } } struct modification *modification_dup(const struct modification *p) { struct modification *first = NULL, *last = NULL; for (; p != NULL ; p = p->next) { struct modification *m = NEW(struct modification); if (FAILEDTOALLOC(m)) { modification_freelist(first); return NULL; } *m = *p; m->next = NULL; m->previous = last; if (last == NULL) first = m; else m->previous->next = m; last = m; } return first; } struct modification *patch_getmodifications(struct rred_patch *p) { struct modification *m; assert (!p->alreadyinuse); m = p->modifications; p->modifications = NULL; p->alreadyinuse = true; return m; } const struct modification *patch_getconstmodifications(struct rred_patch *p) { assert (!p->alreadyinuse); return p->modifications; } static struct modification *modification_freehead(/*@only@*/struct modification *p) { struct modification *m = p->next; free(p); return m; } void patch_free(/*@only@*/struct rred_patch *p) { if (p->data != NULL) (void)munmap(p->data, p->len); if (p->fd >= 0) (void)close(p->fd); modification_freelist(p->modifications); free(p); } retvalue patch_load(const char *filename, off_t length, struct rred_patch **patch_p) { int fd; fd = open(filename, O_NOCTTY|O_RDONLY); if (fd < 0) { int err = errno; fprintf(stderr, "Error %d opening '%s' for reading: %s\n", err, filename, strerror(err)); return RET_ERRNO(err); } return patch_loadfd(filename, fd, length, patch_p); } retvalue patch_loadfd(const char *filename, int fd, off_t length, struct rred_patch **patch_p) { int i; struct rred_patch *patch; const char *p, *e, *d, *l; int number, number2, line; char type; struct modification *n; struct stat statbuf; patch = zNEW(struct rred_patch); if (FAILEDTOALLOC(patch)) { (void)close(fd); return RET_ERROR_OOM; } patch->fd = fd; i = fstat(patch->fd, &statbuf); if (i != 0) { int err = errno; fprintf(stderr, "Error %d retrieving length of '%s': %s\n", err, filename, strerror(err)); patch_free(patch); return RET_ERRNO(err); } if (length == -1) length = statbuf.st_size; if (statbuf.st_size != length) { int err = errno; fprintf(stderr, "Unexpected size of '%s': expected %lld, got %lld\n", filename, (long long)length, (long long)statbuf.st_size); patch_free(patch); return RET_ERRNO(err); } if (length == 0) { /* handle empty patches gracefully */ close(patch->fd); patch->fd = -1; patch->data = NULL; patch->len = 0; patch->modifications = NULL; *patch_p = patch; return RET_OK; } patch->len = length; patch->data = mmap(NULL, patch->len, PROT_READ, MAP_PRIVATE, patch->fd, 0); if (patch->data == MAP_FAILED) { int err = errno; fprintf(stderr, "Error %d mapping '%s' into memory: %s\n", err, filename, strerror(err)); patch_free(patch); return RET_ERRNO(err); } p = patch->data; e = p + patch->len; line = 1; while (p < e) { /* ,(c|d)\n or (a|i|c|d) */ d = p; number = 0; number2 = -1; while (d < e && *d >= '0' && *d <= '9') { number = (*d - '0') + 10 * number; d++; } if (d > p && d < e && *d == ',') { d++; number2 = 0; while (d < e && *d >= '0' && *d <= '9') { number2 = (*d - '0') + 10 * number2; d++; } if (number2 < number) { fprintf(stderr, "Error parsing '%s': malformed range (2nd number smaller than 1s) at line %d\n", filename, line); patch_free(patch); return RET_ERROR; } } if (d >= e || (*d != 'c' && *d != 'i' && *d != 'a' && *d != 'd')) { fprintf(stderr, "Error parsing '%s': expected rule (c,i,a or d) at line %d\n", filename, line); patch_free(patch); return RET_ERROR; } type = *d; d++; while (d < e && *d == '\r') d++; if (d >= e || *d != '\n') { fprintf(stderr, "Error parsing '%s': expected newline after command at line %d\n", filename, line); patch_free(patch); return RET_ERROR; } d++; line++; if (type != 'a' && number == 0) { fprintf(stderr, "Error parsing '%s': missing number at line %d\n", filename, line); patch_free(patch); return RET_ERROR; } if (type != 'c' && type != 'd' && number2 >= 0) { fprintf(stderr, "Error parsing '%s': line range not allowed with %c at line %d\n", filename, (char)type, line); patch_free(patch); return RET_ERROR; } n = zNEW(struct modification); if (FAILEDTOALLOC(n)) { patch_free(patch); return RET_ERROR_OOM; } n->next = patch->modifications; if (n->next != NULL) n->next->previous = n; patch->modifications = n; p = d; if (type == 'd') { n->content = NULL; n->len = 0; n->newlinecount = 0; } else { int startline = line; l = p; while (l < e) { p = l; while (l < e && *l != '\n') l++; if (l >= e) { if (l == p + 1 && *p == '.') { /* that is also corrupted, * but we can cure it */ break; } fprintf(stderr, "Error parsing '%s': ends in unterminated line. File most likely corrupted\n", filename); patch_free(patch); return RET_ERROR; } l++; if (p[0] == '.' && (p[1] == '\n' || p[1] == '\r')) break; line++; } if (p[0] != '.' || (l > p + 1 && p[1] != '\n' && p[1] != '\r')) { fprintf(stderr, "Error parsing '%s': ends waiting for dot. File most likely corrupted\n", filename); patch_free(patch); return RET_ERROR; } n->content = d; n->len = p - d; n->newlinecount = line - startline; p = l; line++; } if (type == 'a') { /* appends appends after instead of before something: */ n->oldlinestart = number + 1; n->oldlinecount = 0; } else if (type == 'i') { n->oldlinestart = number; n->oldlinecount = 0; } else { n->oldlinestart = number; if (number2 < 0) n->oldlinecount = 1; else n->oldlinecount = (number2 - number) + 1; } /* make sure things are in the order diff usually * generates them, which makes line-calculation much easier: */ if (n->next != NULL) { if (n->oldlinestart + n->oldlinecount > n->next->oldlinestart) { struct modification *first, *second; retvalue r; // TODO: it might be more efficient to // first store the different parts as different // patchsets and then combine... /* unlink and feed into patch merger */ first = n->next; first->previous = NULL; second = n; n->next = NULL; n = NULL; r = combine_patches(&n, first, second); patch->modifications = n; if (RET_WAS_ERROR(r)) { patch_free(patch); return r; } } } } *patch_p = patch; return RET_OK; } static void modification_stripendlines(struct modification *m, int r) { int lines; const char *p; m->newlinecount -= r; lines = m->newlinecount; p = m->content; while (lines > 0) { while (*p != '\n') p++; p++; lines--; } assert ((size_t)(p - m->content) <= m->len); m->len = p - m->content; } static void modification_stripstartlines(struct modification *m, int r) { const char *p; m->newlinecount -= r; p = m->content; while (r > 0) { while (*p != '\n') p++; p++; r--; } assert ((size_t)(p - m->content) <= m->len); m->len -= p - m->content; m->content = p; } static inline void move_queue(struct modification **last_p, struct modification **result_p, struct modification **from_p) { struct modification *toadd, *last; /* remove from queue: */ toadd = *from_p; *from_p = toadd->next; if (toadd->next != NULL) { toadd->next->previous = NULL; toadd->next = NULL; } /* if nothing yet, make it the first */ if (*last_p == NULL) { *result_p = toadd; toadd->previous = NULL; *last_p = toadd; return; } last = *last_p; if (toadd->oldlinestart == last->oldlinestart + last->oldlinecount) { /* check if something can be combined: */ if (toadd->newlinecount == 0) { last->oldlinecount += toadd->oldlinecount; free(toadd); return; } if (last->newlinecount == 0) { toadd->oldlinestart = last->oldlinestart; toadd->oldlinecount += last->oldlinecount; toadd->previous = last->previous; if (toadd->previous == NULL) *result_p = toadd; else toadd->previous->next = toadd; *last_p = toadd; free(last); return; } if (last->content + last->len == toadd->content) { last->oldlinecount += toadd->oldlinecount; last->newlinecount += toadd->newlinecount; last->len += toadd->len; free(toadd); return; } } toadd->previous = last; last->next = toadd; assert (last->oldlinestart + last->oldlinecount <= toadd->oldlinestart); *last_p = toadd; return; } /* this merges a set of modifications into an already existing stack, * modifying line numbers or even cutting away deleted/newly overwritten * stuff as necessary */ retvalue combine_patches(struct modification **result_p, /*@only@*/struct modification *first, /*@only@*/struct modification *second) { struct modification *p, *a, *result, *last; long lineofs; p = first; result = NULL; last = NULL; a = second; lineofs = 0; while (a != NULL) { /* modification totally before current one, * so just add it before it */ if (p == NULL || lineofs + a->oldlinestart + a->oldlinecount <= p->oldlinestart) { a->oldlinestart += lineofs; move_queue(&last, &result, &a); assert (p == NULL || p->oldlinestart >= last->oldlinestart + last->oldlinecount); continue; } /* modification to add after current head modification, * so finalize head modification and update lineofs */ if (lineofs + a->oldlinestart >= p->oldlinestart + p->newlinecount) { lineofs += p->oldlinecount - p->newlinecount; move_queue(&last, &result, &p); assert (lineofs + a->oldlinestart >= last->oldlinestart + last->oldlinecount); continue; } /* new modification removes everything the old one added: */ if (lineofs + a->oldlinestart <= p->oldlinestart && lineofs + a->oldlinestart + a->oldlinecount >= p->oldlinestart + p->newlinecount) { a->oldlinestart -= p->oldlinecount - p->newlinecount; a->oldlinecount += p->oldlinecount - p->newlinecount; lineofs += p->oldlinecount - p->newlinecount; p = modification_freehead(p); if (a->oldlinecount == 0 && a->newlinecount == 0) { /* a exactly cancels p */ a = modification_freehead(a); } /* otherwise a is not yet finished, * it might modify more */ continue; } /* otherwise something overlaps, things get complicated here: */ /* start of *a removes end of *p, so reduce *p: */ if (lineofs + a->oldlinestart > p->oldlinestart && lineofs + a->oldlinestart < p->oldlinestart + p->newlinecount && lineofs + a->oldlinestart + a->oldlinecount >= p->oldlinestart + p->newlinecount) { int removedlines = p->oldlinestart + p->newlinecount - (lineofs + a->oldlinestart); /* finalize p as before */ lineofs += p->oldlinecount - p->newlinecount; /* just telling a to delete less */ a->oldlinestart += removedlines; a->oldlinecount -= removedlines; /* and p to add less */ modification_stripendlines(p, removedlines); move_queue(&last, &result, &p); assert (lineofs + a->oldlinestart >= last->oldlinestart + last->oldlinecount); continue; } /* end of *a remove start of *p, so finalize *a and reduce *p */ if (lineofs + a->oldlinestart <= p->oldlinestart && lineofs + a->oldlinestart + a->oldlinecount > p->oldlinestart && lineofs + a->oldlinestart + a->oldlinecount < p->oldlinestart + p->newlinecount) { int removedlines = lineofs + a->oldlinestart + a->oldlinecount - p->oldlinestart; /* finalize *a with less lines deleted:*/ a->oldlinestart += lineofs; a->oldlinecount -= removedlines; if (a->oldlinecount == 0 && a->newlinecount == 0) { /* a only removed something and this was hereby * removed from p */ a = modification_freehead(a); } else move_queue(&last, &result, &a); /* and reduce the number of lines of *p */ assert (removedlines < p->newlinecount); modification_stripstartlines(p, removedlines); /* p->newlinecount got smaller, * so less will be deleted later */ lineofs -= removedlines; if (last != NULL) { assert (p->oldlinestart >= last->oldlinestart + last->oldlinecount); if (a != NULL) assert (lineofs + a->oldlinestart >= last->oldlinestart + last->oldlinecount); } /* note that a->oldlinestart+a->oldlinecount+1 * == p->oldlinestart */ continue; } /* the most complex case left, a inside p, this * needs p split in two */ if (lineofs + a->oldlinestart > p->oldlinestart && lineofs + a->oldlinestart + a->oldlinecount < p->oldlinestart + p->newlinecount) { struct modification *n; int removedlines = p->oldlinestart + p->newlinecount - (lineofs + a->oldlinestart); n = zNEW(struct modification); if (FAILEDTOALLOC(n)) { modification_freelist(result); modification_freelist(p); modification_freelist(a); return RET_ERROR_OOM; } *n = *p; /* all removing into the later p, so * that later numbers fit */ n->next = NULL; n->oldlinecount = 0; assert (removedlines < n->newlinecount); modification_stripendlines(n, removedlines); lineofs += n->oldlinecount - n->newlinecount; assert (lineofs+a->oldlinestart <= p->oldlinestart); move_queue(&last, &result, &n); assert (n == NULL); /* only remove this and let the rest of the * code handle the other changes */ modification_stripstartlines(p, p->newlinecount - removedlines); assert(p->newlinecount == removedlines); assert (lineofs + a->oldlinestart >= last->oldlinestart + last->oldlinecount); continue; } modification_freelist(result); modification_freelist(p); modification_freelist(a); fputs("Internal error in rred merging!\n", stderr); return RET_ERROR; } while (p != NULL) { move_queue(&last, &result, &p); } *result_p = result; return RET_OK; } retvalue patch_file(FILE *o, const char *source, const struct modification *patch) { FILE *i; int currentline, ignore, c; i = fopen(source, "r"); if (i == NULL) { int e = errno; fprintf(stderr, "Error %d opening %s: %s\n", e, source, strerror(e)); return RET_ERRNO(e); } assert (patch == NULL || patch->oldlinestart > 0); currentline = 1; do { while (patch != NULL && patch->oldlinestart == currentline) { fwrite(patch->content, patch->len, 1, o); ignore = patch->oldlinecount; patch = patch->next; while (ignore > 0) { do { c = getc(i); } while (c != '\n' && c != EOF); ignore--; currentline++; } } assert (patch == NULL || patch->oldlinestart >= currentline); while ((c = getc(i)) != '\n') { if (c == EOF) { if (patch != NULL) { fprintf(stderr, "Error patching '%s', file shorter than expected by patches!\n", source); (void)fclose(i); return RET_ERROR; } break; } putc(c, o); } if (c == EOF) break; putc(c, o); currentline++; } while (1); if (ferror(i) != 0) { int e = errno; fprintf(stderr, "Error %d reading %s: %s\n", e, source, strerror(e)); (void)fclose(i); return RET_ERRNO(e); } if (fclose(i) != 0) { int e = errno; fprintf(stderr, "Error %d reading %s: %s\n", e, source, strerror(e)); return RET_ERRNO(e); } return RET_OK; } void modification_printaspatch(void *f, const struct modification *m, void write_func(const void *, size_t, void *)) { const struct modification *p, *q, *r; char line[30]; int len; if (m == NULL) return; assert (m->previous == NULL); /* go to the end, as we have to print it backwards */ p = m; while (p->next != NULL) { assert (p->next->previous == p); p = p->next; } /* then print, possibly merging things */ while (p != NULL) { int start, oldcount, newcount; start = p->oldlinestart; oldcount = p->oldlinecount; newcount = p->newlinecount; if (p->next != NULL) assert (start + oldcount <= p->next->oldlinestart); r = p; for (q = p->previous ; q != NULL && q->oldlinestart + q->oldlinecount == start ; q = q->previous) { oldcount += q->oldlinecount; start = q->oldlinestart; newcount += q->newlinecount; r = q; } if (newcount == 0) { assert (oldcount > 0); if (oldcount == 1) len = snprintf(line, sizeof(line), "%dd\n", start); else len = snprintf(line, sizeof(line), "%d,%dd\n", start, start + oldcount - 1); } else { if (oldcount == 0) len = snprintf(line, sizeof(line), "%da\n", start - 1); else if (oldcount == 1) len = snprintf(line, sizeof(line), "%dc\n", start); else len = snprintf(line, sizeof(line), "%d,%dc\n", start, start + oldcount - 1); } assert (len < (int)sizeof(line)); write_func(line, len, f); if (newcount != 0) { while (r != p->next) { if (r->len > 0) write_func(r->content, r->len, f); newcount -= r->newlinecount; r = r->next; } assert (newcount == 0); write_func(".\n", 2, f); } p = q; } } /* make sure a patch is not empty and does not only add lines at the start, * to work around some problems in apt */ retvalue modification_addstuff(const char *source, struct modification **patch_p, char **line_p) { struct modification **pp, *n, *m = NULL; char *line = NULL; size_t bufsize = 0; ssize_t got; FILE *i; long lineno = 0; pp = patch_p; /* check if this only adds things at the start and count how many */ while (*pp != NULL) { m = *pp; if (m->oldlinecount > 0 || m->oldlinestart > 1) { *line_p = NULL; return RET_OK; } lineno += m->newlinecount; pp = &(*pp)->next; } /* not get the next line and claim it was changed */ i = fopen(source, "r"); if (i == NULL) { int e = errno; fprintf(stderr, "Error %d opening '%s': %s\n", e, source, strerror(e)); return RET_ERRNO(e); } do { got = getline(&line, &bufsize, i); } while (got >= 0 && lineno-- > 0); if (got < 0) { int e = errno; /* You should have made sure the old file is not empty */ fprintf(stderr, "Error %d reading '%s': %s\n", e, source, strerror(e)); (void)fclose(i); return RET_ERRNO(e); } (void)fclose(i); n = NEW(struct modification); if (FAILEDTOALLOC(n)) return RET_ERROR_OOM; *pp = n; n->next = NULL; n->previous = m; n->oldlinestart = 1; n->oldlinecount = 1; n->newlinecount = 1; n->len = got; n->content = line; *line_p = line; return RET_OK; } reprepro-4.13.1/filecntl.c0000644000175100017510000000331712152651661012336 00000000000000/* written 2007 by Bernhard R. Link * This file is in the public domain. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include "filecntl.h" #ifndef HAVE_CLOSEFROM void closefrom(int lowfd) { long maxopen; int fd; # ifdef F_CLOSEM if (fcntl(lowfd, F_CLOSEM, NULL) == 0) return; # endif maxopen = sysconf(_SC_OPEN_MAX); if (maxopen > INT_MAX) maxopen = INT_MAX; if (maxopen < 0) maxopen = 1024; for (fd = lowfd ; fd <= maxopen ; fd++) (void)close(fd); } #endif void markcloseonexec(int fd) { long l; l = fcntl(fd, F_GETFD, 0); if (l >= 0) { (void)fcntl(fd, F_SETFD, l|FD_CLOEXEC); } } int deletefile(const char *fullfilename) { int ret, e; ret = unlink(fullfilename); if (ret != 0) { e = errno; fprintf(stderr, "error %d unlinking %s: %s\n", e, fullfilename, strerror(e)); return (e != 0)?e:EINVAL; } return 0; } bool isregularfile(const char *fullfilename) { struct stat s; int i; assert(fullfilename != NULL); i = stat(fullfilename, &s); return i == 0 && S_ISREG(s.st_mode); } bool isdirectory(const char *fullfilename) { struct stat s; int i; assert(fullfilename != NULL); i = stat(fullfilename, &s); return i == 0 && S_ISDIR(s.st_mode); } bool isanyfile(const char *fullfilename) { struct stat s; int i; assert(fullfilename != NULL); i = lstat(fullfilename, &s); return i == 0; } reprepro-4.13.1/changes.c0000644000175100017510000002161012152651661012142 00000000000000/* This file is part of "reprepro" * Copyright (C) 2003,2004,2005,2006,2008 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include "error.h" #include "names.h" #include "uncompression.h" #include "checksums.h" #include "changes.h" retvalue changes_parsefileline(const char *fileline, /*@out@*/filetype *result_type, /*@out@*/char **result_basename, /*@out@*/struct hash_data *hash_p, /*@out@*/struct hash_data *size_p, /*@out@*/char **result_section, /*@out@*/char **result_priority, /*@out@*/architecture_t *result_architecture, /*@out@*/char **result_name) { const char *p, *md5start, *md5end; const char *sizestart, *sizeend; const char *sectionstart, *sectionend; const char *priostart, *prioend; const char *filestart, *nameend, *fileend; const char *archstart, *archend; const char *versionstart; filetype type; char *section, *priority, *basefilename, *name; architecture_t architecture; size_t l; bool checkfilename = false; p = fileline; while (*p !='\0' && xisspace(*p)) p++; md5start = p; while ((*p >= '0' && *p <= '9') || (*p >= 'a' && *p <= 'f')) p++; if (*p == '\0') { fprintf(stderr, "Missing md5sum in '%s'!\n", fileline); return RET_ERROR; } if (!xisspace(*p)) { fprintf(stderr, "Malformed md5 hash in '%s'!\n", fileline); return RET_ERROR; } md5end = p; while (*p !='\0' && xisspace(*p)) p++; while (*p == '0' && p[1] >= '0' && p[1] <= '9') p++; sizestart = p; while (*p >= '0' && *p <= '9') p++; if (*p == '\0') { fprintf(stderr, "Missing size (second argument) in '%s'!\n", fileline); return RET_ERROR; } if (!xisspace(*p)) { fprintf(stderr, "Malformed size (second argument) in '%s'!\n", fileline); return RET_ERROR; } sizeend = p; while (*p !='\0' && xisspace(*p)) p++; sectionstart = p; while (*p !='\0' && !xisspace(*p)) p++; sectionend = p; while (*p !='\0' && xisspace(*p)) p++; priostart = p; while (*p !='\0' && !xisspace(*p)) p++; prioend = p; while (*p !='\0' && xisspace(*p)) p++; filestart = p; while (*p !='\0' && !xisspace(*p)) p++; fileend = p; while (*p !='\0' && xisspace(*p)) p++; if (*p != '\0') { fprintf(stderr, "Unexpected sixth argument in '%s'!\n", fileline); return RET_ERROR; } if (*md5start == '\0' || *sizestart == '\0' || *sectionstart == '\0' || *priostart == '\0' || *filestart == '\0') { fprintf(stderr, "Wrong number of arguments in '%s' (5 expected)!\n", fileline); return RET_ERROR; } if ((sectionend - sectionstart == 6 && strncmp(sectionstart, "byhand", 6) == 0) || (sectionend - sectionstart > 4 && strncmp(sectionstart, "raw-", 4) == 0)) { section = strndup(sectionstart, sectionend - sectionstart); priority = strndup(priostart, prioend - priostart); basefilename = strndup(filestart, fileend - filestart); if (FAILEDTOALLOC(section) || FAILEDTOALLOC(priority) || FAILEDTOALLOC(basefilename)) { free(section); free(priority); free(basefilename); return RET_ERROR_OOM; } hash_p->start = md5start; hash_p->len = md5end - md5start; size_p->start = sizestart; size_p->len = sizeend - sizestart; *result_section = section; *result_priority = priority; *result_basename = basefilename; *result_architecture = atom_unknown; *result_name = NULL; *result_type = fe_BYHAND; return RET_OK; } p = filestart; while (*p != '\0' && *p != '_' && !xisspace(*p)) p++; if (*p != '_') { if (*p == '\0') fprintf(stderr, "No underscore found in file name in '%s'!\n", fileline); else fprintf(stderr, "Unexpected character '%c' in file name in '%s'!\n", *p, fileline); return RET_ERROR; } nameend = p; p++; versionstart = p; /* changing 3.0 format to now also allow _ in source files * makes this parsing quite more ugly... */ while (*p !='\0' && !xisspace(*p)) p++; l = p - versionstart; /* identify the binary types (they have no compression * and will need a _ */ if (l >= 4 && memcmp(p-4, ".deb", 4) == 0) type = fe_DEB; else if (l >= 5 && memcmp(p-5, ".udeb", 5) == 0) type = fe_UDEB; else type = fe_UNKNOWN; if (type != fe_UNKNOWN) { /* a _ should separate the version from the rest */ p = versionstart; names_overversion(&p, true); if (*p != '\0' && *p != '_') { fprintf(stderr, "Unexpected character '%c' in file name within '%s'!\n", *p, fileline); return RET_ERROR; } if (*p != '_') { fprintf(stderr, "Cannot cope with .[u]deb filename not containing an underscore (in '%s')!", fileline); return RET_ERROR; } p++; archstart = p; if (type == fe_DEB) archend = versionstart + l - 4; else { assert (type == fe_UDEB); archend = versionstart + l - 5; } if (archend - archstart == 6 && strncmp(archstart, "source", 6) == 0) { fprintf(stderr, "Architecture 'source' not allowed for .[u]debs ('%s')!\n", filestart); return RET_ERROR; } } else { enum compression c; const char *eoi; /* without those, it gets more complicated. * It's not .deb or .udeb, so most likely a * source file (or perhaps a log (reprepro extension)) */ /* if it uses a known compression, things are easy, * so try this first: */ c = compression_by_suffix(versionstart, &l); p = versionstart + l; archstart = "source"; archend = archstart + 6; if (l > 9 && strncmp(p-9, ".orig.tar", 9) == 0) { type = fe_ORIG; eoi = p - 9; } else if (l > 4 && strncmp(p-4, ".tar", 4) == 0) { type = fe_TAR; eoi = p - 4; } else if (l > 5 && strncmp(p-5, ".diff", 5) == 0) { type = fe_DIFF; eoi = p - 5; } else if (l > 4 && strncmp(p-4, ".dsc", 4) == 0 && c == c_none) { type = fe_DSC; eoi = p - 4; } else if (l > 4 && strncmp(p-4, ".git", 4) == 0 && c == c_none) { type = fe_ALTSRC; eoi = p - 4; } else if (l > 4 && strncmp(p-4, ".log", 4) == 0) { type = fe_LOG; eoi = p - 4; } else if (l > 6 && strncmp(p-6, ".build", 6) == 0) { type = fe_LOG; eoi = p - 6; } if (type != fe_UNKNOWN) { /* check for a proper version */ p = versionstart; names_overversion(&p, true); if (p >= eoi) { /* all well */ } else if (type == fe_TAR) { /* a tar might be a component with ugly * data between .orig- and the .tar.c */ const char *o = strstr(versionstart, ".orig-"); if (o == NULL || o > eoi) { fprintf(stderr, "Unexpected character '%c' in file name within '%s'!\n", *p, fileline); return RET_ERROR; } checkfilename = true; } else if (type == fe_LOG) { if (*p == '_') { archstart = p + 1; archend = eoi; checkfilename = true; } else { fprintf(stderr, "Unexpected character '%c' in file name within '%s'!\n", *p, fileline); } } else { fprintf(stderr, "Unexpected character '%c' in file name within '%s'!\n", *p, fileline); return RET_ERROR; } } else { /* everything else is assumed to be source */ checkfilename = true; fprintf(stderr, "Unknown file type: '%s', assuming source format...\n", fileline); } } section = strndup(sectionstart, sectionend - sectionstart); priority = strndup(priostart, prioend - priostart); basefilename = strndup(filestart, fileend - filestart); // TODO: this does not make much sense for log files, as they might // list multiple.. architecture = architecture_find_l(archstart, archend - archstart); name = strndup(filestart, nameend - filestart); if (FAILEDTOALLOC(section) || FAILEDTOALLOC(priority) || FAILEDTOALLOC(basefilename) || FAILEDTOALLOC(name)) { free(section); free(priority); free(basefilename); free(name); return RET_ERROR_OOM; } if (checkfilename || !atom_defined(architecture)) { retvalue r; /* as we no longer run properversion over the whole * rest of the string, at least make sure nothing evil * is in this name */ r = properfilename(basefilename); if (!RET_IS_OK(r)) { assert (r != RET_NOTHING); free(section); free(priority); free(basefilename); free(name); return r; } } hash_p->start = md5start; hash_p->len = md5end - md5start; size_p->start = sizestart; size_p->len = sizeend - sizestart; *result_section = section; *result_priority = priority; *result_basename = basefilename; *result_architecture = architecture; *result_name = name; *result_type = type; return RET_OK; } reprepro-4.13.1/TODO0000644000175100017510000000221212152651661011053 00000000000000TODO: think about a way to make removesrc work on all distributions ('*'?) -> or make things like remove(src)/list/.. work with distribution globs... write something for manual.html how to manually modify snapshots... write more automated test-cases (not even run in the testcase yet: reoverride, ... (probably many)) finish import from incoming dir, implement sending mails to uploader extend FilterList et al to specify type/architecture/component add switch to only include if source is present action to redownload missing pool/ files from some update-rules (looking for md5sum) Fields to exclude architectures and components in update rules, (or alternatively allow ! in inclusion lists). half far goals: rewrite error handling, caching error messages and handling Ctrl-C better. far goals: check for unmeet Dependencies for unmeet Build-dependencies. write dokumentation, some examples allow multiple versions in a distribution (needs major rewrite I fear) record timestamp when packages are added. option to keep apt-get'able source to each binary (needs mutliple source versions) switch from libdb?.? to sane database reprepro-4.13.1/contents.c0000644000175100017510000002641012152651661012372 00000000000000/* This file is part of "reprepro" * Copyright (C) 2006,2007 Bernhard R. Link * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1301 USA */ #include #include #include #include #include #include #include #include #include "error.h" #include "mprintf.h" #include "chunks.h" #include "dirs.h" #include "names.h" #include "release.h" #include "distribution.h" #include "filelist.h" #include "files.h" #include "ignore.h" #include "configparser.h" /* options are zerroed when called, when error is returned contentsopions_done * is called by the caller */ retvalue contentsoptions_parse(struct distribution *distribution, struct configiterator *iter) { enum contentsflags { cf_disable, cf_dummy, cf_udebs, cf_nodebs, cf_uncompressed, cf_gz, cf_bz2, cf_percomponent, cf_allcomponents, cf_compatsymlink, cf_nocompatsymlink, cf_COUNT }; bool flags[cf_COUNT]; static const struct constant contentsflags[] = { {"0", cf_disable}, {"1", cf_dummy}, {"2", cf_dummy}, {"udebs", cf_udebs}, {"nodebs", cf_nodebs}, {"percomponent", cf_percomponent}, {"allcomponents", cf_allcomponents}, {"compatsymlink", cf_compatsymlink}, {"nocompatsymlink", cf_nocompatsymlink}, {".bz2", cf_bz2}, {".gz", cf_gz}, {".", cf_uncompressed}, {NULL, -1} }; retvalue r; distribution->contents.flags.enabled = true; memset(flags, 0, sizeof(flags)); r = config_getflags(iter, "Contents", contentsflags, flags, IGNORABLE(unknownfield), ""); if (r == RET_ERROR_UNKNOWNFIELD) (void)fputs( "Note that the format of the Contents field has changed with reprepro 3.0.0.\n" "There is no longer a number needed (nor possible) there.\n", stderr); if (RET_WAS_ERROR(r)) return r; if (flags[cf_dummy]) { (void)fputs( "Warning: Contents headers in conf/distribution no longer need an\n" "rate argument. Ignoring the number there, this might cause a error\n" "future versions.\n", stderr); } else if (flags[cf_disable]) { (void)fputs( "Warning: Contents headers in conf/distribution no longer need an\n" "rate argument. Treating the '0' as sign to not activate Contents-\n" "-generation, but it will cause an error in future version.\n", stderr); distribution->contents.flags.enabled = false; } if (flags[cf_allcomponents] && flags[cf_compatsymlink]) { fprintf(stderr, "Cannot have allcomponents and compatsymlink in the same Contents line!\n"); return RET_ERROR; } if (flags[cf_allcomponents] && flags[cf_nocompatsymlink]) { fprintf(stderr, "Cannot have allcomponents and nocompatsymlink in the same Contents line!\n"); return RET_ERROR; } #ifndef HAVE_LIBBZ2 if (flags[cf_bz2]) { fprintf(stderr, "Warning: Ignoring request to generate .bz2'ed Contents files.\n" "(bzip2 support disabled at build time.)\n" "Request was in %s in the Contents header ending in line %u\n", config_filename(iter), config_line(iter)); flags[cf_bz2] = false; } #endif distribution->contents.compressions = 0; if (flags[cf_uncompressed]) distribution->contents.compressions |= IC_FLAG(ic_uncompressed); if (flags[cf_gz]) distribution->contents.compressions |= IC_FLAG(ic_gzip); #ifdef HAVE_LIBBZ2 if (flags[cf_bz2]) distribution->contents.compressions |= IC_FLAG(ic_bzip2); #endif distribution->contents.flags.udebs = flags[cf_udebs]; distribution->contents.flags.nodebs = flags[cf_nodebs]; if (flags[cf_allcomponents]) distribution->contents.flags.allcomponents = true; else /* default is now off */ distribution->contents.flags.allcomponents = false; if (flags[cf_percomponent]) distribution->contents.flags.percomponent = true; else if (flags[cf_allcomponents]) /* if allcomponents is specified, default is off */ distribution->contents.flags.percomponent = false; else /* otherwise default is on */ distribution->contents.flags.percomponent = true; /* compat symlink is only possible if there are no files * created there, and on by default unless explicitly specified */ if (distribution->contents.flags.allcomponents) distribution->contents.flags.compatsymlink = false; else if (flags[cf_compatsymlink]) distribution->contents.flags.compatsymlink = true; else if (flags[cf_nocompatsymlink]) distribution->contents.flags.compatsymlink = false; else { assert(distribution->contents.flags.percomponent); distribution->contents.flags.compatsymlink = true; } assert(distribution->contents.flags.percomponent || distribution->contents.flags.allcomponents); return RET_OK; } static retvalue addpackagetocontents(UNUSED(struct distribution *di), UNUSED(struct target *ta), const char *packagename, const char *chunk, void *data) { struct filelist_list *contents = data; retvalue r; char *section, *filekey; r = chunk_getvalue(chunk, "Section", §ion); /* Ignoring packages without section, as they should not exist anyway */ if (!RET_IS_OK(r)) return r; r = chunk_getvalue(chunk, "Filename", &filekey); /* dito with filekey */ if (!RET_IS_OK(r)) { free(section); return r; } r = filelist_addpackage(contents, packagename, section, filekey); free(filekey); free(section); return r; } static retvalue gentargetcontents(struct target *target, struct release *release, bool onlyneeded, bool symlink) { retvalue result, r; char *contentsfilename; struct filetorelease *file; struct filelist_list *contents; struct target_cursor iterator; if (onlyneeded && target->saved_wasmodified) onlyneeded = false; contentsfilename = mprintf("%s/Contents%s-%s", atoms_components[target->component], (target->packagetype == pt_udeb)?"-udeb":"", atoms_architectures[target->architecture]); if (FAILEDTOALLOC(contentsfilename)) return RET_ERROR_OOM; if (symlink) { char *symlinkas = mprintf("%sContents-%s", (target->packagetype == pt_udeb)?"s":"", atoms_architectures[target->architecture]); if (FAILEDTOALLOC(symlinkas)) { free(contentsfilename); return RET_ERROR_OOM; } r = release_startlinkedfile(release, contentsfilename, symlinkas, target->distribution->contents.compressions, onlyneeded, &file); free(symlinkas); } else r = release_startfile(release, contentsfilename, target->distribution->contents.compressions, onlyneeded, &file); if (!RET_IS_OK(r)) { free(contentsfilename); return r; } if (verbose > 0) { printf(" generating %s...\n", contentsfilename); } free(contentsfilename); r = filelist_init(&contents); if (RET_WAS_ERROR(r)) { release_abortfile(file); return r; } result = target_openiterator(target, READONLY, &iterator); if (RET_IS_OK(result)) { const char *package, *control; while (target_nextpackage(&iterator, &package, &control)) { r = addpackagetocontents(target->distribution, target, package, control, contents); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) break; } r = target_closeiterator(&iterator); RET_ENDUPDATE(result, r); } if (!RET_WAS_ERROR(result)) result = filelist_write(contents, file); if (RET_WAS_ERROR(result)) release_abortfile(file); else result = release_finishfile(release, file); filelist_free(contents); return result; } static retvalue genarchcontents(struct distribution *distribution, architecture_t architecture, packagetype_t type, struct release *release, bool onlyneeded) { retvalue result = RET_NOTHING, r; char *contentsfilename; struct filetorelease *file; struct filelist_list *contents; const struct atomlist *components; struct target *target; bool combinedonlyifneeded; if (type == pt_udeb) { if (distribution->contents_components_set) components = &distribution->contents_ucomponents; else components = &distribution->udebcomponents; } else { if (distribution->contents_components_set) components = &distribution->contents_components; else components = &distribution->components; } if (components->count == 0) return RET_NOTHING; combinedonlyifneeded = onlyneeded; for (target=distribution->targets; target!=NULL; target=target->next) { if (target->architecture != architecture || target->packagetype != type || !atomlist_in(components, target->component)) continue; if (onlyneeded && target->saved_wasmodified) combinedonlyifneeded = false; if (distribution->contents.flags.percomponent) { r = gentargetcontents(target, release, onlyneeded, distribution->contents. flags.compatsymlink && !distribution->contents. flags.allcomponents && target->component == components->atoms[0]); RET_UPDATE(result, r); if (RET_WAS_ERROR(r)) return r; } } if (!distribution->contents.flags.allcomponents) { if (!distribution->contents.flags.compatsymlink) { char *symlinkas = mprintf("%sContents-%s", (type == pt_udeb)?"s":"", atoms_architectures[architecture]); if (FAILEDTOALLOC(symlinkas)) return RET_ERROR_OOM; release_warnoldfileorlink(release, symlinkas, distribution->contents.compressions); free(symlinkas); } return RET_OK; } contentsfilename = mprintf("%sContents-%s", (type == pt_udeb)?"u":"", atoms_architectures[architecture]); if (FAILEDTOALLOC(contentsfilename)) return RET_ERROR_OOM; r = release_startfile(release, contentsfilename, distribution->contents.compressions, combinedonlyifneeded, &file); if (!RET_IS_OK(r)) { free(contentsfilename); return r; } if (verbose > 0) { printf(" generating %s...\n", contentsfilename); } free(contentsfilename); r = filelist_init(&contents); if (RET_WAS_ERROR(r)) { release_abortfile(file); return r; } r = distribution_foreach_package_c(distribution, components, architecture, type, addpackagetocontents, contents); if (!RET_WAS_ERROR(r)) r = filelist_write(contents, file); if (RET_WAS_ERROR(r)) release_abortfile(file); else r = release_finishfile(release, file); filelist_free(contents); RET_UPDATE(result, r); return result; } retvalue contents_generate(struct distribution *distribution, struct release *release, bool onlyneeded) { retvalue result, r; int i; const struct atomlist *architectures; if (distribution->contents.compressions == 0) distribution->contents.compressions = IC_FLAG(ic_gzip); result = RET_NOTHING; if (distribution->contents_architectures_set) { architectures = &distribution->contents_architectures; } else { architectures = &distribution->architectures; } for (i = 0 ; i < architectures->count ; i++) { architecture_t architecture = architectures->atoms[i]; if (architecture == architecture_source) continue; if (!distribution->contents.flags.nodebs) { r = genarchcontents(distribution, architecture, pt_deb, release, onlyneeded); RET_UPDATE(result, r); } if (distribution->contents.flags.udebs) { r = genarchcontents(distribution, architecture, pt_udeb, release, onlyneeded); RET_UPDATE(result, r); } } return result; }