diff --git a/TODO b/TODO deleted file mode 100644 index 97acab7..0000000 --- a/TODO +++ /dev/null @@ -1,5 +0,0 @@ -TODO list for britney -===================== - - - check if it is need to consider fake source packages - diff --git a/britney.py b/britney.py index c6069a1..ed4f854 100644 --- a/britney.py +++ b/britney.py @@ -15,6 +15,24 @@ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. +""" +== Introdution == + +This is the Debian testing updater script, also known as "Britney". + +Packages are usually installed into the `testing' distribution after +they have undergone some degree of testing in unstable. The goal of +this software is to do this task in a smart way, allowing testing +to be alwasy fully installable and close to being a release candidate. + +Britney source code is splitted in two different, but related, tasks: +the first one is the generation of the update excuses, and with the +second one Britney tries to update testing with the valid candidates; +first, each package alone, and then larger and even larger sets of +packages together. Each try is accepted if testing is not more +uninstallable after the update than before. +""" + import os import re import sys @@ -26,23 +44,40 @@ import apt_pkg from excuse import Excuse -VERSION = '2.0.alpha1' +__author__ = 'Fabio Tranchitella' +__version__ = '2.0.alpha1' class Britney: - """Debian testing updater script""" + """Britney, the debian testing updater script + + This is the script that updates the testing_ distribution. It is executed + each day after the installation of the updated packages. It generates the + `Packages' files for the testing distribution, but it does so in an + intelligent manner; it try to avoid any inconsistency and to use only + non-buggy packages. - BINARY_FIELDS = ('Version', 'Pre-Depends', 'Depends', 'Conflicts', 'Provides', 'Source', 'Architecture', 'Version') - SOURCE_FIELDS = ('Version', 'Maintainer', 'Section') + For more documentation on this script, please read the Developers Reference. + """ HINTS_STANDARD = ("easy", "hint", "remove", "block", "unblock", "urgent", "approve") HINTS_ALL = ("force", "force-hint", "block-all") + HINTS_STANDARD def __init__(self): - """Class constructor method: initialize and populate the data lists""" - self.__parse_arguments() - apt_pkg.init() + """Class constructor + + This method initializes and populates the data lists, which contain all + the information needed by the other methods of the class. + """ self.date_now = int(((time.time() / (60*60)) - 15) / 24) + + # parse the command line arguments + self.__parse_arguments() + + # initialize the apt_pkg back-end + apt_pkg.init() + + # read the source and binary packages for the involved distributions self.sources = {'testing': self.read_sources(self.options.testing), 'unstable': self.read_sources(self.options.unstable), 'tpu': self.read_sources(self.options.tpu),} @@ -51,9 +86,13 @@ class Britney: self.binaries['testing'][arch] = self.read_binaries(self.options.testing, "testing", arch) self.binaries['unstable'][arch] = self.read_binaries(self.options.unstable, "unstable", arch) self.binaries['tpu'][arch] = self.read_binaries(self.options.tpu, "tpu", arch) + + # read the release-critical bug summaries for testing and unstable self.bugs = {'unstable': self.read_bugs(self.options.unstable), 'testing': self.read_bugs(self.options.testing),} self.normalize_bugs() + + # read additional data self.dates = self.read_dates(self.options.testing) self.urgencies = self.read_urgencies(self.options.testing) self.approvals = self.read_approvals(self.options.tpu) @@ -61,15 +100,26 @@ class Britney: self.excuses = [] def __parse_arguments(self): - """Parse command line arguments""" - self.parser = optparse.OptionParser(version="%prog " + VERSION) + """Parse the command line arguments + + This method parses and initializes the command line arguments. + While doing so, it preprocesses some of the options to be converted + in a suitable form for the other methods of the class. + """ + # initialize the parser + self.parser = optparse.OptionParser(version="%prog") self.parser.add_option("-v", "", action="count", dest="verbose", help="enable verbose output") self.parser.add_option("-c", "--config", action="store", dest="config", default="/etc/britney.conf", help="path for the configuration file") (self.options, self.args) = self.parser.parse_args() + + # if the configuration file exists, than read it and set the additional options if not os.path.isfile(self.options.config): self.__log("Unable to read the configuration file (%s), exiting!" % self.options.config, type="E") sys.exit(1) + + # minimum days for unstable-testing transition and the list of hints + # are handled as an ad-hoc case self.MINDAYS = {} self.HINTS = {} for k, v in [map(string.strip,r.split('=', 1)) for r in file(self.options.config) if '=' in r and not r.strip().startswith('#')]: @@ -80,7 +130,8 @@ class Britney: reduce(lambda x,y: x+y, [hasattr(self, "HINTS_" + i) and getattr(self, "HINTS_" + i) or (i,) for i in v.split()]) else: setattr(self.options, k.lower(), v) - # Sort architectures + + # Sort the architecture list allarches = sorted(self.options.architectures.split()) arches = [x for x in allarches if x in self.options.nobreakall_arches] arches += [x for x in allarches if x not in arches and x not in self.options.fucked_arches] @@ -89,30 +140,154 @@ class Britney: self.options.architectures = arches def __log(self, msg, type="I"): - """Print info messages according to verbosity level""" + """Print info messages according to verbosity level + + An easy-and-simple log method which prints messages to the standard + output. The type parameter controls the urgency of the message, and + can be equal to `I' for `Information', `W' for `Warning' and `E' for + `Error'. Warnings and errors are always printed, and information are + printed only if the verbose logging is enabled. + """ if self.options.verbose or type in ("E", "W"): print "%s: [%s] - %s" % (type, time.asctime(), msg) - # Data reading/writing + # Data reading/writing methods + # ---------------------------- def read_sources(self, basedir): - """Read the list of source packages from the specified directory""" + """Read the list of source packages from the specified directory + + The source packages are read from the `Sources' file within the + directory specified as `basedir' parameter. Considering the + large amount of memory needed, not all the fields are loaded + in memory. The available fields are Version, Maintainer and Section. + + The method returns a list where every item represents a source + package as a dictionary. + """ sources = {} package = None filename = os.path.join(basedir, "Sources") self.__log("Loading source packages from %s" % filename) - for l in open(filename): - if l.startswith(' ') or ':' not in l: continue - fields = map(string.strip, l.split(":",1)) - if fields[0] == 'Package': - package = fields[1] - sources[package] = dict([(k.lower(), None) for k in self.SOURCE_FIELDS] + [('binaries', [])]) - elif fields[0] in self.SOURCE_FIELDS: - sources[package][fields[0].lower()] = fields[1] + packages = apt_pkg.ParseTagFile(open(filename)) + while packages.Step(): + pkg = packages.Section.get('Package') + sources[pkg] = {'binaries': [], + 'version': packages.Section.get('Version'), + 'maintainer': packages.Section.get('Maintainer'), + 'section': packages.Section.get('Section'), + } return sources + def read_binaries(self, basedir, distribution, arch): + """Read the list of binary packages from the specified directory + + The binary packages are read from the `Packages_${arch}' files + within the directory specified as `basedir' parameter, replacing + ${arch} with the value of the arch parameter. Considering the + large amount of memory needed, not all the fields are loaded + in memory. The available fields are Version, Source, Pre-Depends, + Depends, Conflicts, Provides and Architecture. + + After reading the packages, reverse dependencies are computed + and saved in the `rdepends' keys, and the `Provides' field is + used to populate the virtual packages list. + + The dependencies are parsed with the apt.pkg.ParseDepends method, + and they are stored both as the format of its return value and + text. + + The method returns a tuple. The first element is a list where + every item represents a binary package as a dictionary; the second + element is a dictionary which maps virtual packages to real + packages that provide it. + """ + + packages = {} + provides = {} + package = None + filename = os.path.join(basedir, "Packages_%s" % arch) + self.__log("Loading binary packages from %s" % filename) + Packages = apt_pkg.ParseTagFile(open(filename)) + while Packages.Step(): + pkg = Packages.Section.get('Package') + version = Packages.Section.get('Version') + dpkg = {'rdepends': [], + 'version': version, + 'source': pkg, + 'source-ver': version, + 'pre-depends': Packages.Section.get('Pre-Depends'), + 'depends': Packages.Section.get('Depends'), + 'conflicts': Packages.Section.get('Conflicts'), + 'provides': Packages.Section.get('Provides'), + 'architecture': Packages.Section.get('Architecture'), + } + + # retrieve the name and the version of the source package + source = Packages.Section.get('Source') + if source: + dpkg['source'] = source.split(" ")[0] + if "(" in source: + dpkg['source-ver'] = source.split("(")[1].split(")")[0] + + # if the source package is available in the distribution, then register this binary package + if dpkg['source'] in self.sources[distribution]: + self.sources[distribution][dpkg['source']]['binaries'].append(pkg + "/" + arch) + # if the source package doesn't exist, create a fake one + else: + self.sources[distribution][dpkg['source']] = {'binaries': [pkg + "/" + arch], + 'version': dpkg['source-ver'], 'maintainer': None, 'section': None, 'fake': True} + + # register virtual packages and real packages that provide them + if dpkg['provides']: + parts = map(string.strip, dpkg['provides'].split(",")) + for p in parts: + try: + provides[p].append(pkg) + except KeyError: + provides[p] = [pkg] + del dpkg['provides'] + + # append the resulting dictionary to the package list + packages[pkg] = dpkg + + # loop again on the list of packages to register reverse dependencies + for pkg in packages: + dependencies = [] + + # analyze dependencies + if packages[pkg]['depends']: + packages[pkg]['depends-txt'] = packages[pkg]['depends'] + packages[pkg]['depends'] = apt_pkg.ParseDepends(packages[pkg]['depends']) + dependencies.extend(packages[pkg]['depends']) + + # analyze pre-dependencies + if packages[pkg]['pre-depends']: + packages[pkg]['pre-depends-txt'] = packages[pkg]['pre-depends'] + packages[pkg]['pre-depends'] = apt_pkg.ParseDepends(packages[pkg]['pre-depends']) + dependencies.extend(packages[pkg]['pre-depends']) + + # register the list of the dependencies for the depending packages + for p in dependencies: + for a in p: + if a[0] not in packages: continue + packages[a[0]]['rdepends'].append((pkg, a[1], a[2])) + + # return a tuple with the list of real and virtual packages + return (packages, provides) + def read_bugs(self, basedir): - """Read the RC bugs count from the specified directory""" + """Read the release critial bug summary from the specified directory + + The RC bug summaries are read from the `Bugs' file within the + directory specified as `basedir' parameter. The file contains + rows with the format: + + + + The method returns a dictionary where the key is the binary package + name and the value is the number of open RC bugs for it. + """ bugs = {} filename = os.path.join(basedir, "Bugs") self.__log("Loading RC bugs count from %s" % filename) @@ -125,7 +300,14 @@ class Britney: self.__log("Bugs, unable to parse \"%s\"" % line, type="E") return bugs - def maxver(self, pkg, dist): + def __maxver(self, pkg, dist): + """Return the maximum version for a given package name + + This method returns None if the specified source package + is not available in the `dist' distribution. If the package + exists, then it returns the maximum version between the + source package and its binary packages. + """ maxver = None if self.sources[dist].has_key(pkg): maxver = self.sources[dist][pkg]['version'] @@ -137,26 +319,54 @@ class Britney: return maxver def normalize_bugs(self): - """Normalize the RC bugs count for testing and unstable""" + """Normalize the release critical bug summaries for testing and unstable + + The method doesn't return any value: it directly modifies the + object attribute `bugs'. + """ + # loop on all the package names from testing and unstable bug summaries for pkg in set(self.bugs['testing'].keys() + self.bugs['unstable'].keys()): + + # make sure that the key is present in both dictionaries if not self.bugs['testing'].has_key(pkg): self.bugs['testing'][pkg] = 0 elif not self.bugs['unstable'].has_key(pkg): self.bugs['unstable'][pkg] = 0 - maxvert = self.maxver(pkg, 'testing') + # retrieve the maximum version of the package in testing: + maxvert = self.__maxver(pkg, 'testing') + + # if the package is not available in testing or it has the + # same RC bug count, then do nothing if maxvert == None or \ self.bugs['testing'][pkg] == self.bugs['unstable'][pkg]: continue - maxveru = self.maxver(pkg, 'unstable') + # retrieve the maximum version of the package in testing: + maxveru = self.__maxver(pkg, 'unstable') + + # if the package is not available in unstable, then do nothing if maxveru == None: continue + # else if the testing package is more recent, then use the + # unstable RC bug count for testing, too elif apt_pkg.VersionCompare(maxvert, maxveru) >= 0: self.bugs['testing'][pkg] = self.bugs['unstable'][pkg] def read_dates(self, basedir): - """Read the upload data for the packages from the specified directory""" + """Read the upload date for the packages from the specified directory + + The upload dates are read from the `Date' file within the directory + specified as `basedir' parameter. The file contains rows with the + format: + + + + The dates are expressed as days starting from the 1970-01-01. + + The method returns a dictionary where the key is the binary package + name and the value is tuple with two items, the version and the date. + """ dates = {} filename = os.path.join(basedir, "Dates") self.__log("Loading upload data from %s" % filename) @@ -170,7 +380,19 @@ class Britney: return dates def read_urgencies(self, basedir): - """Read the upload urgency of the packages from the specified directory""" + """Read the upload urgency of the packages from the specified directory + + The upload urgencies are read from the `Urgency' file within the + directory specified as `basedir' parameter. The file contains rows + with the format: + + + + The method returns a dictionary where the key is the binary package + name and the value is the greatest urgency from the versions of the + package that are higher then the testing one. + """ + urgencies = {} filename = os.path.join(basedir, "Urgency") self.__log("Loading upload urgencies from %s" % filename) @@ -178,23 +400,46 @@ class Britney: l = line.strip().split() if len(l) != 3: continue + # read the minimum days associated to the urgencies urgency_old = urgencies.get(l[0], self.options.default_urgency) mindays_old = self.MINDAYS.get(urgency_old, self.MINDAYS[self.options.default_urgency]) mindays_new = self.MINDAYS.get(l[2], self.MINDAYS[self.options.default_urgency]) + + # if the new urgency is lower (so the min days are higher), do nothing if mindays_old <= mindays_new: continue + + # if the package exists in testing and it is more recent, do nothing tsrcv = self.sources['testing'].get(l[0], None) if tsrcv and apt_pkg.VersionCompare(tsrcv['version'], l[1]) >= 0: continue + + # if the package doesn't exist in unstable or it is older, do nothing usrcv = self.sources['unstable'].get(l[0], None) if not usrcv or apt_pkg.VersionCompare(usrcv['version'], l[1]) < 0: continue + + # update the urgency for the package urgencies[l[0]] = l[2] return urgencies def read_approvals(self, basedir): - """Read the approvals data from the specified directory""" + """Read the approval commands from the specified directory + + The approval commands are read from the files contained by the + `Approved' directory within the directory specified as `basedir' + parameter. The name of the files has to be the same of the + authorized users for the approvals. + + The file contains rows with the format: + + + + The method returns a dictionary where the key is the binary package + name followed by an underscore and the version number, and the value + is the user who submitted the command. + """ approvals = {} for approver in self.options.approvers.split(): filename = os.path.join(basedir, "Approved", approver) @@ -206,7 +451,20 @@ class Britney: return approvals def read_hints(self, basedir): - """Read the approvals data from the specified directory""" + """Read the hint commands from the specified directory + + The hint commands are read from the files contained by the `Hints' + directory within the directory specified as `basedir' parameter. + The name of the files has to be the same of the authorized users + for the hints. + + The file contains rows with the format: + + [/] + + The method returns a dictionary where the key is the command, and + the value is the list of affected packages. + """ hints = dict([(k,[]) for k in self.HINTS_ALL]) for who in self.HINTS.keys(): @@ -239,76 +497,16 @@ class Britney: return hints - def read_binaries(self, basedir, distribution, arch): - """Read the list of binary packages from the specified directory""" - packages = {} - package = None - filename = os.path.join(basedir, "Packages_%s" % arch) - self.__log("Loading binary packages from %s" % filename) - for l in open(filename): - if l.startswith(' ') or ':' not in l: continue - fields = map(string.strip, l.split(":",1)) - if fields[0] == 'Package': - package = fields[1] - packages[package] = dict([(k.lower(), None) for k in self.BINARY_FIELDS] + [('rdepends', [])]) - packages[package]['source'] = package - packages[package]['source-ver'] = None - elif fields[0] == 'Source': - packages[package][fields[0].lower()] = fields[1].split(" ")[0] - if "(" in fields[1]: - packages[package]['source-ver'] = fields[1].split("(")[1].split(")")[0] - elif fields[0] in self.BINARY_FIELDS: - packages[package][fields[0].lower()] = fields[1] - - provides = {} - for pkgname in packages: - if not packages[pkgname]['source-ver']: - packages[pkgname]['source-ver'] = packages[pkgname]['version'] - if packages[pkgname]['source'] in self.sources[distribution]: - self.sources[distribution][packages[pkgname]['source']]['binaries'].append(pkgname + "/" + arch) - if not packages[pkgname]['provides']: - continue - parts = map(string.strip, packages[pkgname]["provides"].split(",")) - del packages[pkgname]["provides"] - for p in parts: - if p in provides: - provides[p].append(pkgname) - else: - provides[p] = [pkgname] - - for pkgname in packages: - dependencies = [] - if packages[pkgname]['depends']: - packages[pkgname]['depends-txt'] = packages[pkgname]['depends'] - packages[pkgname]['depends'] = apt_pkg.ParseDepends(packages[pkgname]['depends']) - dependencies.extend(packages[pkgname]['depends']) - if packages[pkgname]['pre-depends']: - packages[pkgname]['pre-depends-txt'] = packages[pkgname]['pre-depends'] - packages[pkgname]['pre-depends'] = apt_pkg.ParseDepends(packages[pkgname]['pre-depends']) - dependencies.extend(packages[pkgname]['pre-depends']) - for p in dependencies: - for a in p: - if a[0] not in packages: continue - packages[a[0]]['rdepends'].append((pkgname, a[1], a[2])) - - return (packages, provides) - - # Package analisys - - def should_remove_source(self, pkg): - """Check if a source package should be removed from testing""" - if self.sources['unstable'].has_key(pkg): - return False - src = self.sources['testing'][pkg] - excuse = Excuse("-" + pkg) - excuse.set_vers(src['version'], None) - src['maintainer'] and excuse.set_maint(src['maintainer'].strip()) - src['section'] and excuse.set_section(src['section'].strip()) - excuse.addhtml("Valid candidate") - self.excuses.append(excuse) - return True + # Utility methods for package analisys + # ------------------------------------ def same_source(self, sv1, sv2): + """Check if two version numbers are built from the same source + + This method returns a boolean value which is true if the two + version numbers specified as parameters are built from the same + source. The main use of this code is to detect binary-NMU. + """ if sv1 == sv2: return 1 @@ -342,70 +540,144 @@ class Britney: return 0 def get_dependency_solvers(self, block, arch, distribution): + """Find the packages which satisfy a dependency block + + This method returns the list of packages which satisfy a dependency + block (as returned by apt_pkg.ParseDepends) for the given architecture + and distribution. + + It returns a tuple with two items: the first is a boolean which is + True if the dependency is satisfied, the second is the list of the + solving packages. + """ + packages = [] + # for every package, version and operation in the block for name, version, op in block: - real_package = False + # look for the package in unstable if name in self.binaries[distribution][arch][0]: - real_package = True package = self.binaries[distribution][arch][0][name] + # check the versioned dependency (if present) if op == '' and version == '' or apt_pkg.CheckDep(package['version'], op, version): packages.append(name) - # TODO: this would be enough according to policy, but not according to britney v.1 - #if op == '' and version == '' and name in self.binaries[distribution][arch][1]: - # # packages.extend(self.binaries[distribution][arch][1][name]) - # return (True, packages) - + # look for the package in the virtual packages list if name in self.binaries[distribution][arch][1]: + # loop on the list of packages which provides it for prov in self.binaries[distribution][arch][1][name]: package = self.binaries[distribution][arch][0][prov] + # check the versioned dependency (if present) + # TODO: this is forbidden by the debian policy, which says that versioned + # dependencies on virtual packages are never satisfied. The old britney + # does it and we have to go with it, but at least a warning should be raised. if op == '' and version == '' or apt_pkg.CheckDep(package['version'], op, version): packages.append(prov) break return (len(packages) > 0, packages) - def excuse_unsat_deps(self, pkg, src, arch, suite, excuse, ignore_break=0): + def excuse_unsat_deps(self, pkg, src, arch, suite, excuse): + """Find unsatisfied dependencies for a binary package + + This method analyzes the dependencies of the binary package specified + by the parameter `pkg', built from the source package `src', for the + architecture `arch' within the suite `suite'. If the dependency can't + be satisfied in testing and/or unstable, it updates the excuse passed + as parameter. + + The dependency fields checked are Pre-Depends and Depends. + """ + # retrieve the binary package from the specified suite and arch binary_u = self.binaries[suite][arch][0][pkg] + + # analyze the dependency fields (if present) for type in ('Pre-Depends', 'Depends'): type_key = type.lower() if not binary_u[type_key]: continue + # this list will contain the packages that satisfy the dependency packages = [] + + # for every block of dependency (which is formed as conjunction of disconjunction) for block, block_txt in map(None, binary_u[type_key], binary_u[type_key + '-txt'].split(',')): + # if the block is satisfied in testing, then skip the block solved, packages = self.get_dependency_solvers(block, arch, 'testing') if solved: continue + # check if the block can be satisfied in unstable, and list the solving packages solved, packages = self.get_dependency_solvers(block, arch, suite) packages = [self.binaries[suite][arch][0][p]['source'] for p in packages] + + # if the dependency can be satisfied by the same source package, skip the block: + # obviously both binary packages will enter testing togheter if src in packages: continue + # if no package can satisfy the dependency, add this information to the excuse if len(packages) == 0: excuse.addhtml("%s/%s unsatisfiable %s: %s" % (pkg, arch, type, block_txt.strip())) + # for the solving packages, update the excuse to add the dependencies for p in packages: - if ignore_break or arch not in self.options.break_arches.split(): + if arch not in self.options.break_arches.split(): excuse.add_dep(p) else: excuse.add_break_dep(p, arch) + # Package analisys methods + # ------------------------ + + def should_remove_source(self, pkg): + """Check if a source package should be removed from testing + + This method checks if a source package should be removed from the + testing distribution; this happen if the source package is not + present in the unstable distribution anymore. + + It returns True if the package can be removed, False otherwise. + In the former case, a new excuse is appended to the the object + attribute excuses. + """ + # if the soruce package is available in unstable, then do nothing + if self.sources['unstable'].has_key(pkg): + return False + # otherwise, add a new excuse for its removal and return True + src = self.sources['testing'][pkg] + excuse = Excuse("-" + pkg) + excuse.set_vers(src['version'], None) + src['maintainer'] and excuse.set_maint(src['maintainer'].strip()) + src['section'] and excuse.set_section(src['section'].strip()) + excuse.addhtml("Valid candidate") + self.excuses.append(excuse) + return True + def should_upgrade_srcarch(self, src, arch, suite): - # binnmu this arch? + """Check if binary package should be upgraded + + This method checks if a binary package should be upgraded; this can + happen only if the binary package is a binary-NMU for the given arch. + The analisys is performed for the source package specified by the + `src' parameter, checking the architecture `arch' for the distribution + `suite'. + + It returns False if the given package doesn't need to be upgraded, + True otherwise. In the former case, a new excuse is appended to + the the object attribute excuses. + """ + # retrieve the source packages for testing and suite source_t = self.sources['testing'][src] source_u = self.sources[suite][src] + # build the common part of the excuse, which will be filled by the code below ref = "%s/%s%s" % (src, arch, suite != 'unstable' and "_" + suite or "") - excuse = Excuse(ref) excuse.set_vers(source_t['version'], source_t['version']) source_u['maintainer'] and excuse.set_maint(source_u['maintainer'].strip()) source_u['section'] and excuse.set_section(source_u['section'].strip()) - anywrongver = False - anyworthdoing = False - + # if there is a `remove' hint and the requested version is the same of the + # version in testing, then stop here and return False if self.hints["remove"].has_key(src) and \ self.same_source(source_t['version'], self.hints["remove"][src][0]): excuse.addhtml("Removal request by %s" % (self.hints["remove"][src][1])) @@ -414,91 +686,137 @@ class Britney: self.excuses.append(excuse) return False - for pkg in sorted(source_u['binaries']): - if not pkg.endswith("/" + arch): continue + # the starting point is that there is nothing wrong and nothing worth doing + anywrongver = False + anyworthdoing = False + + # for every binary package produced by this source in unstable for this architecture + for pkg in sorted(filter(lambda x: x.endswith("/" + arch), source_u['binaries'])): pkg_name = pkg.split("/")[0] + # retrieve the testing (if present) and unstable corresponding binary packages binary_t = pkg in source_t['binaries'] and self.binaries['testing'][arch][0][pkg_name] or None binary_u = self.binaries[suite][arch][0][pkg_name] + + # this is the source version for the new binary package pkgsv = self.binaries[suite][arch][0][pkg_name]['source-ver'] + # if the new binary package is architecture-independent, then skip it if binary_u['architecture'] == 'all': excuse.addhtml("Ignoring %s %s (from %s) as it is arch: all" % (pkg_name, binary_u['version'], pkgsv)) continue + # if the new binary package is not from the same source as the testing one, then skip it if not self.same_source(source_t['version'], pkgsv): anywrongver = True excuse.addhtml("From wrong source: %s %s (%s not %s)" % (pkg_name, binary_u['version'], pkgsv, source_t['version'])) break + # find unsatisfied dependencies for the new binary package self.excuse_unsat_deps(pkg_name, src, arch, suite, excuse) + # if the binary is not present in testing, then it is a new binary; + # in this case, there is something worth doing if not binary_t: excuse.addhtml("New binary: %s (%s)" % (pkg_name, binary_u['version'])) anyworthdoing = True continue + # at this point, the binary package is present in testing, so we can compare + # the versions of the packages ... vcompare = apt_pkg.VersionCompare(binary_t['version'], binary_u['version']) + + # ... if updating would mean downgrading, then stop here: there is something wrong if vcompare > 0: anywrongver = True excuse.addhtml("Not downgrading: %s (%s to %s)" % (pkg_name, binary_t['version'], binary_u['version'])) break + # ... if updating would mean upgrading, then there is something worth doing elif vcompare < 0: excuse.addhtml("Updated binary: %s (%s to %s)" % (pkg_name, binary_t['version'], binary_u['version'])) anyworthdoing = True - if not anywrongver and (anyworthdoing or src in self.sources[suite]): + # if there is nothing wrong and there is something worth doing or the source + # package is not fake, then check what packages shuold be removed + if not anywrongver and (anyworthdoing or self.sources[suite][src].has_key('fake')): srcv = self.sources[suite][src]['version'] ssrc = self.same_source(source_t['version'], srcv) + # for every binary package produced by this source in testing for this architecture for pkg in sorted([x.split("/")[0] for x in self.sources['testing'][src]['binaries'] if x.endswith("/"+arch)]): + # if the package is architecture-independent, then ignore it if self.binaries['testing'][arch][0][pkg]['architecture'] == 'all': excuse.addhtml("Ignoring removal of %s as it is arch: all" % (pkg)) continue + # if the package is not produced by the new source package, then remove it from testing if not self.binaries[suite][arch][0].has_key(pkg): tpkgv = self.binaries['testing'][arch][0][pkg]['version'] excuse.addhtml("Removed binary: %s %s" % (pkg, tpkgv)) if ssrc: anyworthdoing = True + # if there is nothing wrong and there is something worth doing, this is valid candidate if not anywrongver and anyworthdoing: excuse.addhtml("Valid candidate") self.excuses.append(excuse) + # else if there is something worth doing (but something wrong, too) this package won't be considered elif anyworthdoing: excuse.addhtml("Not considered") self.excuses.append(excuse) return False + # otherwise, return True return True def should_upgrade_src(self, src, suite): + """Check if source package should be upgraded + + This method checks if a source package should be upgraded. The analisys + is performed for the source package specified by the `src' parameter, + checking the architecture `arch' for the distribution `suite'. + + It returns False if the given package doesn't need to be upgraded, + True otherwise. In the former case, a new excuse is appended to + the the object attribute excuses. + """ + + # retrieve the source packages for testing (if available) and suite source_u = self.sources[suite][src] if src in self.sources['testing']: source_t = self.sources['testing'][src] + # if testing and unstable have the same version, then this is a candidate for binary-NMUs only if apt_pkg.VersionCompare(source_t['version'], source_u['version']) == 0: - # Candidate for binnmus only return False else: source_t = None + # build the common part of the excuse, which will be filled by the code below ref = "%s%s" % (src, suite != 'unstable' and "_" + suite or "") - - update_candidate = True - excuse = Excuse(ref) excuse.set_vers(source_t and source_t['version'] or None, source_u['version']) source_u['maintainer'] and excuse.set_maint(source_u['maintainer'].strip()) source_u['section'] and excuse.set_section(source_u['section'].strip()) + + # the starting point is that we will update the candidate + update_candidate = True + # if the version in unstable is older, then stop here with a warning in the excuse and return False if source_t and apt_pkg.VersionCompare(source_u['version'], source_t['version']) < 0: - # Version in unstable is older! excuse.addhtml("ALERT: %s is newer in testing (%s %s)" % (src, source_t['version'], source_u['version'])) self.excuses.append(excuse) return False + # check if the source package really exists or if it is a fake one + if source_u.has_key('fake'): + excuse.addhtml("%s source package doesn't exist" % (src)) + update_candidate = False + + # retrieve the urgency for the upload, ignoring it if this is a NEW package (not present in testing) urgency = self.urgencies.get(src, self.options.default_urgency) if not source_t and urgency != self.options.default_urgency: excuse.addhtml("Ignoring %s urgency setting for NEW package" % (urgency)) urgency = self.options.default_urgency + # if there is a `remove' hint and the requested version is the same of the + # version in testing, then stop here and return False if self.hints["remove"].has_key(src): if source_t and self.same_source(source_t['version'], self.hints['remove'][src][0]) or \ self.same_source(source_u['version'], self.hints['remove'][src][0]): @@ -506,22 +824,29 @@ class Britney: excuse.addhtml("Trying to remove package, not update it") update_candidate = False + # check if there is a `block' hint for this package or a `block-all source' hint blocked = None if self.hints["block"].has_key(src): blocked = self.hints["block"][src] elif self.hints["block-all"].has_key("source"): blocked = self.hints["block-all"]["source"] + # if the source is blocked, then look for an `unblock' hint; the unblock request + # is processed only if the specified version is correct if blocked: unblock = self.hints["unblock"].get(src,(None,None)) - if unblock[0] != None and self.same_source(unblock[0], source_u['version']): - excuse.addhtml("Ignoring request to block package by %s, due to unblock request by %s" % (blocked, unblock[1])) - else: - if unblock[0] != None: + if unblock[0] != None: + if self.same_source(unblock[0], source_u['version']): + excuse.addhtml("Ignoring request to block package by %s, due to unblock request by %s" % (blocked, unblock[1])) + else: excuse.addhtml("Unblock request by %s ignored due to version mismatch: %s" % (unblock[1], unblock[0])) + else: excuse.addhtml("Not touching package, as requested by %s (contact debian-release if update is needed)" % (blocked)) update_candidate = False + # if the suite is unstable, then we have to check the urgency and the minimum days of + # permanence in unstable before updating testing; if the source package is too young, + # the check fails and we set update_candidate to False to block the update if suite == 'unstable': if not self.dates.has_key(src): self.dates[src] = (source_u['version'], self.date_now) @@ -537,30 +862,45 @@ class Britney: else: update_candidate = False + # at this point, we check what is the status of the builds on all the supported architectures + # to catch the out-of-date ones pkgs = {src: ["source"]} for arch in self.options.architectures: oodbins = {} + # for every binary package produced by this source in the suite for this architecture for pkg in sorted([x.split("/")[0] for x in self.sources[suite][src]['binaries'] if x.endswith("/"+arch)]): if not pkgs.has_key(pkg): pkgs[pkg] = [] pkgs[pkg].append(arch) + # retrieve the binary package and its source version binary_u = self.binaries[suite][arch][0][pkg] pkgsv = binary_u['source-ver'] + + # if it wasn't builded by the same source, it is out-of-date if not self.same_source(source_u['version'], pkgsv): if not oodbins.has_key(pkgsv): oodbins[pkgsv] = [] oodbins[pkgsv].append(pkg) continue + # if the package is architecture-dependent or the current arch is `nobreakall' + # find unsatisfied dependencies for the binary package if binary_u['architecture'] != 'all' or arch in self.options.nobreakall_arches: self.excuse_unsat_deps(pkg, src, arch, suite, excuse) + # if there are out-of-date packages, warn about them in the excuse and set update_candidate + # to False to block the update; if the architecture where the package is out-of-date is + # in the `fucked_arches' list, then do not block the update if oodbins: oodtxt = "" for v in oodbins.keys(): if oodtxt: oodtxt = oodtxt + "; " - oodtxt = oodtxt + "%s (from %s)" % (", ".join(sorted(oodbins[v])), arch, src, v, v) - text = "out of date on %s: %s" % (arch, src, source_u['version'], arch, oodtxt) + oodtxt = oodtxt + "%s (from %s)" % \ + (", ".join(sorted(oodbins[v])), arch, src, v, v) + text = "out of date on %s: %s" % \ + (arch, src, source_u['version'], arch, oodtxt) if arch in self.options.fucked_arches: text = text + " (but %s isn't keeping up, so nevermind)" % (arch) @@ -570,10 +910,14 @@ class Britney: if self.date_now != self.dates[src][1]: excuse.addhtml(text) + # if the source package has no binaries, set update_candidate to False to block the update if len(self.sources[suite][src]['binaries']) == 0: excuse.addhtml("%s has no binaries on any arch" % src) update_candidate = False + # if the suite is unstable, then we have to check the release-critical bug counts before + # updating testing; if the unstable package have a RC bug count greater than the testing + # one, the check fails and we set update_candidate to False to block the update if suite == 'unstable': for pkg in pkgs.keys(): if not self.bugs['testing'].has_key(pkg): @@ -582,16 +926,25 @@ class Britney: self.bugs['unstable'][pkg] = 0 if self.bugs['unstable'][pkg] > self.bugs['testing'][pkg]: - excuse.addhtml("%s (%s) is buggy! (%d > %d)" % (pkg, ", ".join(pkgs[pkg]), pkg, self.bugs['unstable'][pkg], self.bugs['testing'][pkg])) + excuse.addhtml("%s (%s) is buggy! (%d > %d)" % \ + (pkg, ", ".join(pkgs[pkg]), pkg, self.bugs['unstable'][pkg], self.bugs['testing'][pkg])) update_candidate = False elif self.bugs['unstable'][pkg] > 0: - excuse.addhtml("%s (%s) is (less) buggy! (%d <= %d)" % (pkg, ", ".join(pkgs[pkg]), pkg, self.bugs['unstable'][pkg], self.bugs['testing'][pkg])) + excuse.addhtml("%s (%s) is (less) buggy! (%d <= %d)" % \ + (pkg, ", ".join(pkgs[pkg]), pkg, self.bugs['unstable'][pkg], self.bugs['testing'][pkg])) - if not update_candidate and self.hints["force"].has_key(src) and self.same_source(source_u['version'], self.hints["force"][src][0]) : + # check if there is a `force' hint for this package, which allows it to go in even if it is not updateable + if not update_candidate and self.hints["force"].has_key(src) and \ + self.same_source(source_u['version'], self.hints["force"][src][0]): excuse.dontinvalidate = 1 excuse.addhtml("Should ignore, but forced by %s" % (self.hints["force"][src][1])) update_candidate = True + # if the suite is testing-proposed-updates, the package needs an explicit approval in order to go in if suite == "tpu": if self.approvals.has_key("%s_%s" % (src, source_u['version'])): excuse.addhtml("Approved by %s" % approvals["%s_%s" % (src, source_u['version'])]) @@ -599,8 +952,10 @@ class Britney: excuse.addhtml("NEEDS APPROVAL BY RM") update_candidate = False + # if the package can be updated, it is a valid candidate if update_candidate: excuse.addhtml("Valid candidate") + # else it won't be considered else: excuse.addhtml("Not considered") @@ -608,6 +963,11 @@ class Britney: return update_candidate def reversed_exc_deps(self): + """Reverse the excuses dependencies + + This method returns a dictionary where the keys are the package names + and the values are the excuse names which depend on it. + """ res = {} for exc in self.excuses: for d in exc.deps: @@ -616,22 +976,39 @@ class Britney: return res def invalidate_excuses(self, valid, invalid): - i = 0 + """Invalidate impossible excuses + + This method invalidates the impossible excuses, which depend + on invalid excuses. The two parameters contains the list of + `valid' and `invalid' excuses. + """ + # build a lookup-by-name map exclookup = {} for e in self.excuses: exclookup[e.name] = e + + # build the reverse dependencies revdeps = self.reversed_exc_deps() + + # loop on the invalid excuses + i = 0 while i < len(invalid): + # if there is no reverse dependency, skip the item if not revdeps.has_key(invalid[i]): i += 1 continue + # if there dependency can be satisfied by a testing-proposed-updates excuse, skip the item if (invalid[i] + "_tpu") in valid: i += 1 continue + # loop on the reverse dependencies for x in revdeps[invalid[i]]: + # if the item is valid and it is marked as `dontinvalidate', skip the item if x in valid and exclookup[x].dontinvalidate: continue + # otherwise, invalidate the dependency and mark as invalidated and + # remove the depending excuses exclookup[x].invalidate_dep(invalid[i]) if x in valid: p = valid.index(x) @@ -640,45 +1017,60 @@ class Britney: exclookup[x].addhtml("Not considered") i = i + 1 - def main(self): - """Main method, entry point for the analisys""" + def write_excuses(self): + """Produce and write the update excuses + This method handles the update excuses generation: the packages are + looked to determine whether they are valid candidates. For the details + of this procedure, please refer to the module docstring. + """ + + # this list will contain the packages which are valid candidates; + # if a package is going to be removed, it will have a "-" prefix upgrade_me = [] - # Packages to be removed + # for every source package in testing, check if it should be removed for pkg in self.sources['testing']: if self.should_remove_source(pkg): upgrade_me.append("-" + pkg) - # Packages to be upgraded from unstable + # for every source package in unstable check if it should be upgraded for pkg in self.sources['unstable']: + # if the source package is already present in testing, + # check if it should be upgraded for every binary package if self.sources['testing'].has_key(pkg): for arch in self.options.architectures: if self.should_upgrade_srcarch(pkg, arch, 'unstable'): upgrade_me.append("%s/%s" % (pkg, arch)) + # check if the source package should be upgraded if self.should_upgrade_src(pkg, 'unstable'): upgrade_me.append(pkg) - # Packages to be upgraded from testing-proposed-updates + # for every source package in testing-proposed-updates, check if it should be upgraded for pkg in self.sources['tpu']: + # if the source package is already present in testing, + # check if it should be upgraded for every binary package if self.sources['testing'].has_key(pkg): for arch in self.options.architectures: if self.should_upgrade_srcarch(pkg, arch, 'tpu'): upgrade_me.append("%s/%s_tpu" % (pkg, arch)) + # check if the source package should be upgraded if self.should_upgrade_src(pkg, 'tpu'): upgrade_me.append("%s_tpu" % pkg) - # Process 'remove' hints + # process the `remove' hints, if the given package is not yet in upgrade_me for src in self.hints["remove"].keys(): if src in upgrade_me: continue if ("-"+src) in upgrade_me: continue if not self.sources['testing'].has_key(src): continue + # check if the version specified in the hint is the same of the considered package tsrcv = self.sources['testing'][src]['version'] if not self.same_source(tsrcv, self.hints["remove"][src][0]): continue + # add the removal of the package to upgrade_me and build a new excuse upgrade_me.append("-%s" % (src)) excuse = Excuse("-%s" % (src)) excuse.set_vers(tsrcv, None) @@ -686,20 +1078,20 @@ class Britney: excuse.addhtml("Package is broken, will try to remove") self.excuses.append(excuse) - # Sort excuses by daysold and name + # sort the excuses by daysold and name self.excuses.sort(lambda x, y: cmp(x.daysold, y.daysold) or cmp(x.name, y.name)) - # Extract unconsidered packages + # extract the not considered packages, which are in the excuses but not in upgrade_me unconsidered = [e.name for e in self.excuses if e.name not in upgrade_me] - # Invalidate impossible excuses + # invalidate impossible excuses for e in self.excuses: for d in e.deps: if d not in upgrade_me and d not in unconsidered: e.addhtml("Unpossible dep: %s -> %s" % (e.name, d)) self.invalidate_excuses(upgrade_me, unconsidered) - # Write excuses + # write excuses to the output file f = open(self.options.excuses_output, 'w') f.write("\n") f.write("excuses...") @@ -710,21 +1102,14 @@ class Britney: f.write("
  • %s" % e.html()) f.write("\n") f.close() - del self.excuses - - # Some examples ... - # print self.sources['testing']['zsh-beta']['version'] - # print self.sources['unstable']['zsh-beta']['version'] - # print self.urgencies['zsh-beta'] - # Which packages depend on passwd? - # for i in self.binaries['testing']['i386'][0]['passwd']['rdepends']: - # print i - # Which packages provide mysql-server? - # for i in self.binaries['testing']['i386'][1]['mysql-server']: - # print i - # Which binary packages are build from php4 testing source package? - # print self.sources['testing']['php4']['binaries'] + def main(self): + """Main method + + This is the entry point for the class: it includes the list of calls + for the member methods which will produce the output files. + """ + self.write_excuses() if __name__ == '__main__': Britney().main() diff --git a/doc/doxygen.conf b/doc/doxygen.conf new file mode 100644 index 0000000..d2989e3 --- /dev/null +++ b/doc/doxygen.conf @@ -0,0 +1,1237 @@ +# Doxyfile 1.4.6 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project +# +# All text after a hash (#) is considered a comment and will be ignored +# The format is: +# TAG = value [value, ...] +# For lists items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (" ") + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded +# by quotes) that should identify the project. + +PROJECT_NAME = briteny + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. +# This could be handy for archiving the generated documentation or +# if some version control system is used. + +PROJECT_NUMBER = 2.0.alpha1 + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) +# base path where the generated documentation will be put. +# If a relative path is entered, it will be relative to the location +# where doxygen was started. If left blank the current directory will be used. + +OUTPUT_DIRECTORY = doc/ + +# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create +# 4096 sub-directories (in 2 levels) under the output directory of each output +# format and will distribute the generated files over these directories. +# Enabling this option can be useful when feeding doxygen a huge amount of +# source files, where putting all generated files in the same directory would +# otherwise cause performance problems for the file system. + +CREATE_SUBDIRS = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# The default language is English, other supported languages are: +# Brazilian, Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, +# Dutch, Finnish, French, German, Greek, Hungarian, Italian, Japanese, +# Japanese-en (Japanese with English messages), Korean, Korean-en, Norwegian, +# Polish, Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish, +# Swedish, and Ukrainian. + +OUTPUT_LANGUAGE = English + +# This tag can be used to specify the encoding used in the generated output. +# The encoding is not always determined by the language that is chosen, +# but also whether or not the output is meant for Windows or non-Windows users. +# In case there is a difference, setting the USE_WINDOWS_ENCODING tag to YES +# forces the Windows encoding (this is the default for the Windows binary), +# whereas setting the tag to NO uses a Unix-style encoding (the default for +# all platforms other than Windows). + +USE_WINDOWS_ENCODING = NO + +# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will +# include brief member descriptions after the members that are listed in +# the file and class documentation (similar to JavaDoc). +# Set to NO to disable this. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend +# the brief description of a member or function before the detailed description. +# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator +# that is used to form the text in various listings. Each string +# in this list, if found as the leading text of the brief description, will be +# stripped from the text and the result after processing the whole list, is +# used as the annotated text. Otherwise, the brief description is used as-is. +# If left blank, the following values are used ("$name" is automatically +# replaced with the name of the entity): "The $name class" "The $name widget" +# "The $name file" "is" "provides" "specifies" "contains" +# "represents" "a" "an" "the" + +ABBREVIATE_BRIEF = + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# Doxygen will generate a detailed section even if there is only a brief +# description. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full +# path before files name in the file list and in the header files. If set +# to NO the shortest path that makes the file name unique will be used. + +FULL_PATH_NAMES = YES + +# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag +# can be used to strip a user-defined part of the path. Stripping is +# only done if one of the specified strings matches the left-hand part of +# the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the +# path to strip. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of +# the path mentioned in the documentation of a class, which tells +# the reader which header file to include in order to use a class. +# If left blank only the name of the header file containing the class +# definition is used. Otherwise one should specify the include paths that +# are normally passed to the compiler using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter +# (but less readable) file names. This can be useful is your file systems +# doesn't support long names like on DOS, Mac, or CD-ROM. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen +# will interpret the first line (until the first dot) of a JavaDoc-style +# comment as the brief description. If set to NO, the JavaDoc +# comments will behave just like the Qt-style comments (thus requiring an +# explicit @brief command for a brief description. + +JAVADOC_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen +# treat a multi-line C++ special comment block (i.e. a block of //! or /// +# comments) as a brief description. This used to be the default behaviour. +# The new default is to treat a multi-line C++ comment block as a detailed +# description. Set this tag to YES if you prefer the old behaviour instead. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the DETAILS_AT_TOP tag is set to YES then Doxygen +# will output the detailed description near the top, like JavaDoc. +# If set to NO, the detailed description appears after the member +# documentation. + +DETAILS_AT_TOP = NO + +# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented +# member inherits the documentation from any documented member that it +# re-implements. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce +# a new page for each member. If set to NO, the documentation of a member will +# be part of the file/class/namespace that contains it. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. +# Doxygen uses this value to replace tabs by spaces in code fragments. + +TAB_SIZE = 8 + +# This tag can be used to specify a number of aliases that acts +# as commands in the documentation. An alias has the form "name=value". +# For example adding "sideeffect=\par Side Effects:\n" will allow you to +# put the command \sideeffect (or @sideeffect) in the documentation, which +# will result in a user-defined paragraph with heading "Side Effects:". +# You can put \n's in the value part of an alias to insert newlines. + +ALIASES = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C +# sources only. Doxygen will then generate output that is more tailored for C. +# For instance, some of the names that are used will be different. The list +# of all members will be omitted, etc. + +OPTIMIZE_OUTPUT_FOR_C = NO + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java +# sources only. Doxygen will then generate output that is more tailored for Java. +# For instance, namespaces will be presented as packages, qualified scopes +# will look different, etc. + +OPTIMIZE_OUTPUT_JAVA = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want to +# include (a tag file for) the STL sources as input, then you should +# set this tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. +# func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. + +BUILTIN_STL_SUPPORT = NO + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES, then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. + +DISTRIBUTE_GROUP_DOC = NO + +# Set the SUBGROUPING tag to YES (the default) to allow class member groups of +# the same type (for instance a group of public functions) to be put as a +# subgroup of that type (e.g. under the Public Functions section). Set it to +# NO to prevent subgrouping. Alternatively, this can be done per class using +# the \nosubgrouping command. + +SUBGROUPING = YES + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in +# documentation are documented, even if no documentation was available. +# Private class members and static file members will be hidden unless +# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES + +EXTRACT_ALL = NO + +# If the EXTRACT_PRIVATE tag is set to YES all private members of a class +# will be included in the documentation. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_STATIC tag is set to YES all static members of a file +# will be included in the documentation. + +EXTRACT_STATIC = NO + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) +# defined locally in source files will be included in the documentation. +# If set to NO only classes defined in header files are included. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. When set to YES local +# methods, which are defined in the implementation section but not in +# the interface are included in the documentation. +# If set to NO (the default) only methods in the interface are included. + +EXTRACT_LOCAL_METHODS = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all +# undocumented members of documented classes, files or namespaces. +# If set to NO (the default) these members will be included in the +# various overviews, but no documentation section is generated. +# This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. +# If set to NO (the default) these classes will be included in the various +# overviews. This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all +# friend (class|struct|union) declarations. +# If set to NO (the default) these declarations will be included in the +# documentation. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any +# documentation blocks found inside the body of a function. +# If set to NO (the default) these blocks will be appended to the +# function's detailed documentation block. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation +# that is typed after a \internal command is included. If the tag is set +# to NO (the default) then the documentation will be excluded. +# Set it to YES to include the internal documentation. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate +# file names in lower-case letters. If set to YES upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen +# will show members with their full class and namespace scopes in the +# documentation. If set to YES the scope will be hidden. + +HIDE_SCOPE_NAMES = NO + +# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen +# will put a list of the files that are included by a file in the documentation +# of that file. + +SHOW_INCLUDE_FILES = YES + +# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] +# is inserted in the documentation for inline members. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen +# will sort the (detailed) documentation of file and class members +# alphabetically by member name. If set to NO the members will appear in +# declaration order. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the +# brief documentation of file, namespace and class members alphabetically +# by member name. If set to NO (the default) the members will appear in +# declaration order. + +SORT_BRIEF_DOCS = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be +# sorted by fully-qualified names, including namespaces. If set to +# NO (the default), the class list will be sorted only by class name, +# not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the +# alphabetical list. + +SORT_BY_SCOPE_NAME = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or +# disable (NO) the todo list. This list is created by putting \todo +# commands in the documentation. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or +# disable (NO) the test list. This list is created by putting \test +# commands in the documentation. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or +# disable (NO) the bug list. This list is created by putting \bug +# commands in the documentation. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or +# disable (NO) the deprecated list. This list is created by putting +# \deprecated commands in the documentation. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional +# documentation sections, marked by \if sectionname ... \endif. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines +# the initial value of a variable or define consists of for it to appear in +# the documentation. If the initializer consists of more lines than specified +# here it will be hidden. Use a value of 0 to hide initializers completely. +# The appearance of the initializer of individual variables and defines in the +# documentation can be controlled using \showinitializer or \hideinitializer +# command in the documentation regardless of this setting. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated +# at the bottom of the documentation of classes and structs. If set to YES the +# list will mention the files that were used to generate the documentation. + +SHOW_USED_FILES = YES + +# If the sources in your project are distributed over multiple directories +# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy +# in the documentation. The default is NO. + +SHOW_DIRECTORIES = NO + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from the +# version control system). Doxygen will invoke the program by executing (via +# popen()) the command , where is the value of +# the FILE_VERSION_FILTER tag, and is the name of an input file +# provided by doxygen. Whatever the program writes to standard output +# is used as the file version. See the manual for examples. + +FILE_VERSION_FILTER = + +#--------------------------------------------------------------------------- +# configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated +# by doxygen. Possible values are YES and NO. If left blank NO is used. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated by doxygen. Possible values are YES and NO. If left blank +# NO is used. + +WARNINGS = YES + +# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings +# for undocumented members. If EXTRACT_ALL is set to YES then this flag will +# automatically be disabled. + +WARN_IF_UNDOCUMENTED = YES + +# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some +# parameters in a documented function, or documenting parameters that +# don't exist or using markup commands wrongly. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be abled to get warnings for +# functions that are documented, but have no documentation for their parameters +# or return value. If set to NO (the default) doxygen will only warn about +# wrong or incomplete parameter documentation, but not about the absence of +# documentation. + +WARN_NO_PARAMDOC = NO + +# The WARN_FORMAT tag determines the format of the warning messages that +# doxygen can produce. The string should contain the $file, $line, and $text +# tags, which will be replaced by the file and line number from which the +# warning originated and the warning text. Optionally the format may contain +# $version, which will be replaced by the version of the file (if it could +# be obtained via FILE_VERSION_FILTER) + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning +# and error messages should be written. If left blank the output is written +# to stderr. + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag can be used to specify the files and/or directories that contain +# documented source files. You may enter file names like "myfile.cpp" or +# directories like "/usr/src/myproject". Separate the files or directories +# with spaces. + +INPUT = . + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank the following patterns are tested: +# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx +# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py + +FILE_PATTERNS = *.py + +# The RECURSIVE tag can be used to turn specify whether or not subdirectories +# should be searched for input files as well. Possible values are YES and NO. +# If left blank NO is used. + +RECURSIVE = YES + +# The EXCLUDE tag can be used to specify files and/or directories that should +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used select whether or not files or +# directories that are symbolic links (a Unix filesystem feature) are excluded +# from the input. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. Note that the wildcards are matched +# against the file with absolute path, so to exclude all test directories +# for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or +# directories that contain example code fragments that are included (see +# the \include command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank all files are included. + +EXAMPLE_PATTERNS = + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude +# commands irrespective of the value of the RECURSIVE tag. +# Possible values are YES and NO. If left blank NO is used. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or +# directories that contain image that are included in the documentation (see +# the \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command , where +# is the value of the INPUT_FILTER tag, and is the name of an +# input file. Doxygen will then use the output that the filter program writes +# to standard output. If FILTER_PATTERNS is specified, this tag will be +# ignored. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: +# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further +# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER +# is applied to all files. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will be used to filter the input files when producing source +# files to browse (i.e. when SOURCE_BROWSER is set to YES). + +FILTER_SOURCE_FILES = NO + +#--------------------------------------------------------------------------- +# configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will +# be generated. Documented entities will be cross-referenced with these sources. +# Note: To get rid of all source code in the generated output, make sure also +# VERBATIM_HEADERS is set to NO. + +SOURCE_BROWSER = YES + +# Setting the INLINE_SOURCES tag to YES will include the body +# of functions and classes directly in the documentation. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct +# doxygen to hide any special comment blocks from generated source code +# fragments. Normal C and C++ comments will always remain visible. + +STRIP_CODE_COMMENTS = NO + +# If the REFERENCED_BY_RELATION tag is set to YES (the default) +# then for each documented function all documented +# functions referencing it will be listed. + +REFERENCED_BY_RELATION = YES + +# If the REFERENCES_RELATION tag is set to YES (the default) +# then for each documented function all documented entities +# called/used by that function will be listed. + +REFERENCES_RELATION = YES + +# If the USE_HTAGS tag is set to YES then the references to source code +# will point to the HTML generated by the htags(1) tool instead of doxygen +# built-in source browser. The htags tool is part of GNU's global source +# tagging system (see http://www.gnu.org/software/global/global.html). You +# will need version 4.8.6 or higher. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen +# will generate a verbatim copy of the header file for each class for +# which an include is specified. Set to NO to disable this. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index +# of all compounds will be generated. Enable this if the project +# contains a lot of classes, structs, unions or interfaces. + +ALPHABETICAL_INDEX = NO + +# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then +# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns +# in which this list will be split (can be a number in the range [1..20]) + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all +# classes will be put under the same header in the alphabetical index. +# The IGNORE_PREFIX tag can be used to specify one or more prefixes that +# should be ignored while generating the index headers. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES (the default) Doxygen will +# generate HTML output. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `html' will be used as the default path. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for +# each generated HTML page (for example: .htm,.php,.asp). If it is left blank +# doxygen will generate files with .html extension. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a personal HTML header for +# each generated HTML page. If it is left blank doxygen will generate a +# standard header. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a personal HTML footer for +# each generated HTML page. If it is left blank doxygen will generate a +# standard footer. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading +# style sheet that is used by each HTML page. It can be used to +# fine-tune the look of the HTML output. If the tag is left blank doxygen +# will generate a default style sheet. Note that doxygen will try to copy +# the style sheet file to the HTML output directory, so don't put your own +# stylesheet in the HTML output directory as well, or it will be erased! + +HTML_STYLESHEET = + +# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, +# files or namespaces will be aligned in HTML using tables. If set to +# NO a bullet list will be used. + +HTML_ALIGN_MEMBERS = YES + +# If the GENERATE_HTMLHELP tag is set to YES, additional index files +# will be generated that can be used as input for tools like the +# Microsoft HTML help workshop to generate a compressed HTML help file (.chm) +# of the generated HTML documentation. + +GENERATE_HTMLHELP = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can +# be used to specify the file name of the resulting .chm file. You +# can add a path in front of the file if the result should not be +# written to the html output directory. + +CHM_FILE = + +# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can +# be used to specify the location (absolute path including file name) of +# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run +# the HTML help compiler on the generated index.hhp. + +HHC_LOCATION = + +# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag +# controls if a separate .chi index file is generated (YES) or that +# it should be included in the master .chm file (NO). + +GENERATE_CHI = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag +# controls whether a binary table of contents is generated (YES) or a +# normal table of contents (NO) in the .chm file. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members +# to the contents of the HTML help documentation and to the tree view. + +TOC_EXPAND = NO + +# The DISABLE_INDEX tag can be used to turn on/off the condensed index at +# top of each HTML page. The value NO (the default) enables the index and +# the value YES disables it. + +DISABLE_INDEX = NO + +# This tag can be used to set the number of enum values (range [1..20]) +# that doxygen will group on one line in the generated HTML documentation. + +ENUM_VALUES_PER_LINE = 4 + +# If the GENERATE_TREEVIEW tag is set to YES, a side panel will be +# generated containing a tree-like index structure (just like the one that +# is generated for HTML Help). For this to work a browser that supports +# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, +# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are +# probably better off using the HTML help feature. + +GENERATE_TREEVIEW = NO + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be +# used to set the initial width (in pixels) of the frame in which the tree +# is shown. + +TREEVIEW_WIDTH = 250 + +#--------------------------------------------------------------------------- +# configuration options related to the LaTeX output +#--------------------------------------------------------------------------- + +# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will +# generate Latex output. + +GENERATE_LATEX = YES + +# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `latex' will be used as the default path. + +LATEX_OUTPUT = latex + +# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be +# invoked. If left blank `latex' will be used as the default command name. + +LATEX_CMD_NAME = latex + +# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to +# generate index for LaTeX. If left blank `makeindex' will be used as the +# default command name. + +MAKEINDEX_CMD_NAME = makeindex + +# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact +# LaTeX documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_LATEX = NO + +# The PAPER_TYPE tag can be used to set the paper type that is used +# by the printer. Possible values are: a4, a4wide, letter, legal and +# executive. If left blank a4wide will be used. + +PAPER_TYPE = a4wide + +# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX +# packages that should be included in the LaTeX output. + +EXTRA_PACKAGES = + +# The LATEX_HEADER tag can be used to specify a personal LaTeX header for +# the generated latex document. The header should contain everything until +# the first chapter. If it is left blank doxygen will generate a +# standard header. Notice: only use this tag if you know what you are doing! + +LATEX_HEADER = + +# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated +# is prepared for conversion to pdf (using ps2pdf). The pdf file will +# contain links (just like the HTML output) instead of page references +# This makes the output suitable for online browsing using a pdf viewer. + +PDF_HYPERLINKS = NO + +# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of +# plain latex in the generated Makefile. Set this option to YES to get a +# higher quality PDF documentation. + +USE_PDFLATEX = NO + +# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. +# command to the generated LaTeX files. This will instruct LaTeX to keep +# running if errors occur, instead of asking the user for help. +# This option is also used when generating formulas in HTML. + +LATEX_BATCHMODE = NO + +# If LATEX_HIDE_INDICES is set to YES then doxygen will not +# include the index chapters (such as File Index, Compound Index, etc.) +# in the output. + +LATEX_HIDE_INDICES = NO + +#--------------------------------------------------------------------------- +# configuration options related to the RTF output +#--------------------------------------------------------------------------- + +# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output +# The RTF output is optimized for Word 97 and may not look very pretty with +# other RTF readers or editors. + +GENERATE_RTF = NO + +# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `rtf' will be used as the default path. + +RTF_OUTPUT = rtf + +# If the COMPACT_RTF tag is set to YES Doxygen generates more compact +# RTF documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_RTF = NO + +# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated +# will contain hyperlink fields. The RTF file will +# contain links (just like the HTML output) instead of page references. +# This makes the output suitable for online browsing using WORD or other +# programs which support those fields. +# Note: wordpad (write) and others do not support links. + +RTF_HYPERLINKS = NO + +# Load stylesheet definitions from file. Syntax is similar to doxygen's +# config file, i.e. a series of assignments. You only have to provide +# replacements, missing definitions are set to their default value. + +RTF_STYLESHEET_FILE = + +# Set optional variables used in the generation of an rtf document. +# Syntax is similar to doxygen's config file. + +RTF_EXTENSIONS_FILE = + +#--------------------------------------------------------------------------- +# configuration options related to the man page output +#--------------------------------------------------------------------------- + +# If the GENERATE_MAN tag is set to YES (the default) Doxygen will +# generate man pages + +GENERATE_MAN = NO + +# The MAN_OUTPUT tag is used to specify where the man pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `man' will be used as the default path. + +MAN_OUTPUT = man + +# The MAN_EXTENSION tag determines the extension that is added to +# the generated man pages (default is the subroutine's section .3) + +MAN_EXTENSION = .3 + +# If the MAN_LINKS tag is set to YES and Doxygen generates man output, +# then it will generate one additional man file for each entity +# documented in the real man page(s). These additional files +# only source the real man page, but without them the man command +# would be unable to find the correct page. The default is NO. + +MAN_LINKS = NO + +#--------------------------------------------------------------------------- +# configuration options related to the XML output +#--------------------------------------------------------------------------- + +# If the GENERATE_XML tag is set to YES Doxygen will +# generate an XML file that captures the structure of +# the code including all documentation. + +GENERATE_XML = NO + +# The XML_OUTPUT tag is used to specify where the XML pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `xml' will be used as the default path. + +XML_OUTPUT = xml + +# The XML_SCHEMA tag can be used to specify an XML schema, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_SCHEMA = + +# The XML_DTD tag can be used to specify an XML DTD, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_DTD = + +# If the XML_PROGRAMLISTING tag is set to YES Doxygen will +# dump the program listings (including syntax highlighting +# and cross-referencing information) to the XML output. Note that +# enabling this will significantly increase the size of the XML output. + +XML_PROGRAMLISTING = YES + +#--------------------------------------------------------------------------- +# configuration options for the AutoGen Definitions output +#--------------------------------------------------------------------------- + +# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will +# generate an AutoGen Definitions (see autogen.sf.net) file +# that captures the structure of the code including all +# documentation. Note that this feature is still experimental +# and incomplete at the moment. + +GENERATE_AUTOGEN_DEF = NO + +#--------------------------------------------------------------------------- +# configuration options related to the Perl module output +#--------------------------------------------------------------------------- + +# If the GENERATE_PERLMOD tag is set to YES Doxygen will +# generate a Perl module file that captures the structure of +# the code including all documentation. Note that this +# feature is still experimental and incomplete at the +# moment. + +GENERATE_PERLMOD = NO + +# If the PERLMOD_LATEX tag is set to YES Doxygen will generate +# the necessary Makefile rules, Perl scripts and LaTeX code to be able +# to generate PDF and DVI output from the Perl module output. + +PERLMOD_LATEX = NO + +# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be +# nicely formatted so it can be parsed by a human reader. This is useful +# if you want to understand what is going on. On the other hand, if this +# tag is set to NO the size of the Perl module output will be much smaller +# and Perl will parse it just the same. + +PERLMOD_PRETTY = YES + +# The names of the make variables in the generated doxyrules.make file +# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. +# This is useful so different doxyrules.make files included by the same +# Makefile don't overwrite each other's variables. + +PERLMOD_MAKEVAR_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the preprocessor +#--------------------------------------------------------------------------- + +# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will +# evaluate all C-preprocessor directives found in the sources and include +# files. + +ENABLE_PREPROCESSING = YES + +# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro +# names in the source code. If set to NO (the default) only conditional +# compilation will be performed. Macro expansion can be done in a controlled +# way by setting EXPAND_ONLY_PREDEF to YES. + +MACRO_EXPANSION = NO + +# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES +# then the macro expansion is limited to the macros specified with the +# PREDEFINED and EXPAND_AS_DEFINED tags. + +EXPAND_ONLY_PREDEF = NO + +# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files +# in the INCLUDE_PATH (see below) will be search if a #include is found. + +SEARCH_INCLUDES = YES + +# The INCLUDE_PATH tag can be used to specify one or more directories that +# contain include files that are not input files but should be processed by +# the preprocessor. + +INCLUDE_PATH = + +# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard +# patterns (like *.h and *.hpp) to filter out the header-files in the +# directories. If left blank, the patterns specified with FILE_PATTERNS will +# be used. + +INCLUDE_FILE_PATTERNS = + +# The PREDEFINED tag can be used to specify one or more macro names that +# are defined before the preprocessor is started (similar to the -D option of +# gcc). The argument of the tag is a list of macros of the form: name +# or name=definition (no spaces). If the definition and the = are +# omitted =1 is assumed. To prevent a macro definition from being +# undefined via #undef or recursively expanded use the := operator +# instead of the = operator. + +PREDEFINED = + +# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then +# this tag can be used to specify a list of macro names that should be expanded. +# The macro definition that is found in the sources will be used. +# Use the PREDEFINED tag if you want to use a different macro definition. + +EXPAND_AS_DEFINED = + +# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then +# doxygen's preprocessor will remove all function-like macros that are alone +# on a line, have an all uppercase name, and do not end with a semicolon. Such +# function macros are typically used for boiler-plate code, and will confuse +# the parser if not removed. + +SKIP_FUNCTION_MACROS = YES + +#--------------------------------------------------------------------------- +# Configuration::additions related to external references +#--------------------------------------------------------------------------- + +# The TAGFILES option can be used to specify one or more tagfiles. +# Optionally an initial location of the external documentation +# can be added for each tagfile. The format of a tag file without +# this location is as follows: +# TAGFILES = file1 file2 ... +# Adding location for the tag files is done as follows: +# TAGFILES = file1=loc1 "file2 = loc2" ... +# where "loc1" and "loc2" can be relative or absolute paths or +# URLs. If a location is present for each tag, the installdox tool +# does not have to be run to correct the links. +# Note that each tag file must have a unique name +# (where the name does NOT include the path) +# If a tag file is not located in the directory in which doxygen +# is run, you must also specify the path to the tagfile here. + +TAGFILES = + +# When a file name is specified after GENERATE_TAGFILE, doxygen will create +# a tag file that is based on the input files it reads. + +GENERATE_TAGFILE = + +# If the ALLEXTERNALS tag is set to YES all external classes will be listed +# in the class index. If set to NO only the inherited external classes +# will be listed. + +ALLEXTERNALS = NO + +# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed +# in the modules index. If set to NO, only the current project's groups will +# be listed. + +EXTERNAL_GROUPS = YES + +# The PERL_PATH should be the absolute path and name of the perl script +# interpreter (i.e. the result of `which perl'). + +PERL_PATH = /usr/bin/perl + +#--------------------------------------------------------------------------- +# Configuration options related to the dot tool +#--------------------------------------------------------------------------- + +# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will +# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base +# or super classes. Setting the tag to NO turns the diagrams off. Note that +# this option is superseded by the HAVE_DOT option below. This is only a +# fallback. It is recommended to install and use dot, since it yields more +# powerful graphs. + +CLASS_DIAGRAMS = YES + +# If set to YES, the inheritance and collaboration graphs will hide +# inheritance and usage relations if the target is undocumented +# or is not a class. + +HIDE_UNDOC_RELATIONS = YES + +# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is +# available from the path. This tool is part of Graphviz, a graph visualization +# toolkit from AT&T and Lucent Bell Labs. The other options in this section +# have no effect if this option is set to NO (the default) + +HAVE_DOT = NO + +# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect inheritance relations. Setting this tag to YES will force the +# the CLASS_DIAGRAMS tag to NO. + +CLASS_GRAPH = YES + +# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect implementation dependencies (inheritance, containment, and +# class references variables) of the class with other documented classes. + +COLLABORATION_GRAPH = YES + +# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for groups, showing the direct groups dependencies + +GROUP_GRAPHS = YES + +# If the UML_LOOK tag is set to YES doxygen will generate inheritance and +# collaboration diagrams in a style similar to the OMG's Unified Modeling +# Language. + +UML_LOOK = NO + +# If set to YES, the inheritance and collaboration graphs will show the +# relations between templates and their instances. + +TEMPLATE_RELATIONS = NO + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT +# tags are set to YES then doxygen will generate a graph for each documented +# file showing the direct and indirect include dependencies of the file with +# other documented files. + +INCLUDE_GRAPH = YES + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and +# HAVE_DOT tags are set to YES then doxygen will generate a graph for each +# documented header file showing the documented files that directly or +# indirectly include this file. + +INCLUDED_BY_GRAPH = YES + +# If the CALL_GRAPH and HAVE_DOT tags are set to YES then doxygen will +# generate a call dependency graph for every global function or class method. +# Note that enabling this option will significantly increase the time of a run. +# So in most cases it will be better to enable call graphs for selected +# functions only using the \callgraph command. + +CALL_GRAPH = NO + +# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen +# will graphical hierarchy of all classes instead of a textual one. + +GRAPHICAL_HIERARCHY = YES + +# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES +# then doxygen will show the dependencies a directory has on other directories +# in a graphical way. The dependency relations are determined by the #include +# relations between the files in the directories. + +DIRECTORY_GRAPH = YES + +# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images +# generated by dot. Possible values are png, jpg, or gif +# If left blank png will be used. + +DOT_IMAGE_FORMAT = png + +# The tag DOT_PATH can be used to specify the path where the dot tool can be +# found. If left blank, it is assumed the dot tool can be found in the path. + +DOT_PATH = + +# The DOTFILE_DIRS tag can be used to specify one or more directories that +# contain dot files that are included in the documentation (see the +# \dotfile command). + +DOTFILE_DIRS = + +# The MAX_DOT_GRAPH_WIDTH tag can be used to set the maximum allowed width +# (in pixels) of the graphs generated by dot. If a graph becomes larger than +# this value, doxygen will try to truncate the graph, so that it fits within +# the specified constraint. Beware that most browsers cannot cope with very +# large images. + +MAX_DOT_GRAPH_WIDTH = 1024 + +# The MAX_DOT_GRAPH_HEIGHT tag can be used to set the maximum allows height +# (in pixels) of the graphs generated by dot. If a graph becomes larger than +# this value, doxygen will try to truncate the graph, so that it fits within +# the specified constraint. Beware that most browsers cannot cope with very +# large images. + +MAX_DOT_GRAPH_HEIGHT = 1024 + +# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the +# graphs generated by dot. A depth value of 3 means that only nodes reachable +# from the root by following a path via at most 3 edges will be shown. Nodes +# that lay further from the root node will be omitted. Note that setting this +# option to 1 or 2 may greatly reduce the computation time needed for large +# code bases. Also note that a graph may be further truncated if the graph's +# image dimensions are not sufficient to fit the graph (see MAX_DOT_GRAPH_WIDTH +# and MAX_DOT_GRAPH_HEIGHT). If 0 is used for the depth value (the default), +# the graph is not depth-constrained. + +MAX_DOT_GRAPH_DEPTH = 0 + +# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent +# background. This is disabled by default, which results in a white background. +# Warning: Depending on the platform used, enabling this option may lead to +# badly anti-aliased labels on the edges of a graph (i.e. they become hard to +# read). + +DOT_TRANSPARENT = NO + +# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output +# files in one run (i.e. multiple -o and -T options on the command line). This +# makes dot run faster, but since only newer versions of dot (>1.8.10) +# support this, this feature is disabled by default. + +DOT_MULTI_TARGETS = NO + +# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will +# generate a legend page explaining the meaning of the various boxes and +# arrows in the dot generated graphs. + +GENERATE_LEGEND = YES + +# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will +# remove the intermediate dot files that are used to generate +# the various graphs. + +DOT_CLEANUP = YES + +#--------------------------------------------------------------------------- +# Configuration::additions related to the search engine +#--------------------------------------------------------------------------- + +# The SEARCHENGINE tag specifies whether or not a search engine should be +# used. If set to NO the values of all tags below this one will be ignored. + +SEARCHENGINE = NO diff --git a/doc/html/annotated.html b/doc/html/annotated.html new file mode 100644 index 0000000..b0a7af3 --- /dev/null +++ b/doc/html/annotated.html @@ -0,0 +1,29 @@ + + +briteny: Class List + + + + + + +

    briteny Class List

    Here are the classes, structs, unions and interfaces with brief descriptions: + + +
    britney.Britney
    excuse.Excuse
    +
    Generated on Sat Jun 24 18:50:20 2006 for briteny by  + +doxygen 1.4.6
    + + diff --git a/doc/html/britney_8py-source.html b/doc/html/britney_8py-source.html new file mode 100644 index 0000000..025bc84 --- /dev/null +++ b/doc/html/britney_8py-source.html @@ -0,0 +1,1116 @@ + + +briteny: britney.py Source File + + + + + +

    britney.py

    00001 #!/usr/bin/env python2.4
    +00002 # -*- coding: utf-8 -*-
    +00003 
    +00004 # Copyright (C) 2001-2004 Anthony Towns <ajt@debian.org>
    +00005 #                         Andreas Barth <aba@debian.org>
    +00006 #                         Fabio Tranchitella <kobold@debian.org>
    +00007 
    +00008 # This program is free software; you can redistribute it and/or modify
    +00009 # it under the terms of the GNU General Public License as published by
    +00010 # the Free Software Foundation; either version 2 of the License, or
    +00011 # (at your option) any later version.
    +00012 
    +00013 # This program is distributed in the hope that it will be useful,
    +00014 # but WITHOUT ANY WARRANTY; without even the implied warranty of
    +00015 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    +00016 # GNU General Public License for more details.
    +00017 
    +00018 import os
    +00019 import re
    +00020 import sys
    +00021 import string
    +00022 import time
    +00023 import optparse
    +00024 
    +00025 import apt_pkg
    +00026 
    +00027 from excuse import Excuse
    +00028 
    +00029 __author__ = 'Fabio Tranchitella'
    +00030 __version__ = '2.0.alpha1'
    +00031 
    +00032 
    +00033 class Britney:
    +00034     """Britney, the debian testing updater script
    +00035     
    +00036     This is the script that updates the testing_ distribution. It is executed
    +00037     each day after the installation of the updated packages. It generates the 
    +00038     `Packages' files for the testing distribution, but it does so in an
    +00039     intelligent manner; it try to avoid any inconsistency and to use only
    +00040     non-buggy packages.
    +00041 
    +00042     For more documentation on this script, please read the Developers Reference.
    +00043     """
    +00044 
    +00045     HINTS_STANDARD = ("easy", "hint", "remove", "block", "unblock", "urgent", "approve")
    +00046     HINTS_ALL = ("force", "force-hint", "block-all") + HINTS_STANDARD
    +00047 
    +00048     def __init__(self):
    +00049         """Class constructor
    +00050 
    +00051         This method initializes and populates the data lists, which contain all
    +00052         the information needed by the other methods of the class.
    +00053         """
    +00054         self.date_now = int(((time.time() / (60*60)) - 15) / 24)
    +00055 
    +00056         # parse the command line arguments
    +00057         self.__parse_arguments()
    +00058 
    +00059         # initialize the apt_pkg back-end
    +00060         apt_pkg.init()
    +00061 
    +00062         # read the source and binary packages for the involved distributions
    +00063         self.sources = {'testing': self.read_sources(self.options.testing),
    +00064                         'unstable': self.read_sources(self.options.unstable),
    +00065                         'tpu': self.read_sources(self.options.tpu),}
    +00066         self.binaries = {'testing': {}, 'unstable': {}, 'tpu': {}}
    +00067         for arch in self.options.architectures:
    +00068             self.binaries['testing'][arch] = self.read_binaries(self.options.testing, "testing", arch)
    +00069             self.binaries['unstable'][arch] = self.read_binaries(self.options.unstable, "unstable", arch)
    +00070             self.binaries['tpu'][arch] = self.read_binaries(self.options.tpu, "tpu", arch)
    +00071 
    +00072         # read the release-critical bug summaries for testing and unstable
    +00073         self.bugs = {'unstable': self.read_bugs(self.options.unstable),
    +00074                      'testing': self.read_bugs(self.options.testing),}
    +00075         self.normalize_bugs()
    +00076 
    +00077         # read additional data
    +00078         self.dates = self.read_dates(self.options.testing)
    +00079         self.urgencies = self.read_urgencies(self.options.testing)
    +00080         self.approvals = self.read_approvals(self.options.tpu)
    +00081         self.hints = self.read_hints(self.options.unstable)
    +00082         self.excuses = []
    +00083 
    +00084     def __parse_arguments(self):
    +00085         """Parse the command line arguments
    +00086 
    +00087         This method parses and initializes the command line arguments.
    +00088         While doing so, it preprocesses some of the options to be converted
    +00089         in a suitable form for the other methods of the class.
    +00090         """
    +00091         # initialize the parser
    +00092         self.parser = optparse.OptionParser(version="%prog")
    +00093         self.parser.add_option("-v", "", action="count", dest="verbose", help="enable verbose output")
    +00094         self.parser.add_option("-c", "--config", action="store", dest="config",
    +00095                           default="/etc/britney.conf", help="path for the configuration file")
    +00096         (self.options, self.args) = self.parser.parse_args()
    +00097 
    +00098         # if the configuration file exists, than read it and set the additional options
    +00099         if not os.path.isfile(self.options.config):
    +00100             self.__log("Unable to read the configuration file (%s), exiting!" % self.options.config, type="E")
    +00101             sys.exit(1)
    +00102 
    +00103         # minimum days for unstable-testing transition and the list of hints
    +00104         # are handled as an ad-hoc case
    +00105         self.MINDAYS = {}
    +00106         self.HINTS = {}
    +00107         for k, v in [map(string.strip,r.split('=', 1)) for r in file(self.options.config) if '=' in r and not r.strip().startswith('#')]:
    +00108             if k.startswith("MINDAYS_"):
    +00109                 self.MINDAYS[k.split("_")[1].lower()] = int(v)
    +00110             elif k.startswith("HINTS_"):
    +00111                 self.HINTS[k.split("_")[1].lower()] = \
    +00112                     reduce(lambda x,y: x+y, [hasattr(self, "HINTS_" + i) and getattr(self, "HINTS_" + i) or (i,) for i in v.split()])
    +00113             else:
    +00114                 setattr(self.options, k.lower(), v)
    +00115 
    +00116         # Sort the architecture list
    +00117         allarches = sorted(self.options.architectures.split())
    +00118         arches = [x for x in allarches if x in self.options.nobreakall_arches]
    +00119         arches += [x for x in allarches if x not in arches and x not in self.options.fucked_arches]
    +00120         arches += [x for x in allarches if x not in arches and x not in self.options.break_arches]
    +00121         arches += [x for x in allarches if x not in arches]
    +00122         self.options.architectures = arches
    +00123 
    +00124     def __log(self, msg, type="I"):
    +00125         """Print info messages according to verbosity level
    +00126         
    +00127         An easy-and-simple log method which prints messages to the standard
    +00128         output. The type parameter controls the urgency of the message, and
    +00129         can be equal to `I' for `Information', `W' for `Warning' and `E' for
    +00130         `Error'. Warnings and errors are always printed, and information are
    +00131         printed only if the verbose logging is enabled.
    +00132         """
    +00133         if self.options.verbose or type in ("E", "W"):
    +00134             print "%s: [%s] - %s" % (type, time.asctime(), msg)
    +00135 
    +00136     # Data reading/writing methods
    +00137     # ----------------------------
    +00138 
    +00139     def read_sources(self, basedir):
    +00140         """Read the list of source packages from the specified directory
    +00141         
    +00142         The source packages are read from the `Sources' file within the
    +00143         directory specified as `basedir' parameter. Considering the
    +00144         large amount of memory needed, not all the fields are loaded
    +00145         in memory. The available fields are Version, Maintainer and Section.
    +00146 
    +00147         The method returns a list where every item represents a source
    +00148         package as a dictionary.
    +00149         """
    +00150         sources = {}
    +00151         package = None
    +00152         filename = os.path.join(basedir, "Sources")
    +00153         self.__log("Loading source packages from %s" % filename)
    +00154         packages = apt_pkg.ParseTagFile(open(filename))
    +00155         while packages.Step():
    +00156             pkg = packages.Section.get('Package')
    +00157             sources[pkg] = {'binaries': [],
    +00158                             'version': packages.Section.get('Version'),
    +00159                             'maintainer': packages.Section.get('Maintainer'),
    +00160                             'section': packages.Section.get('Section'),
    +00161                             }
    +00162         return sources
    +00163 
    +00164     def read_binaries(self, basedir, distribution, arch):
    +00165         """Read the list of binary packages from the specified directory
    +00166         
    +00167         The binary packages are read from the `Packages_${arch}' files
    +00168         within the directory specified as `basedir' parameter, replacing
    +00169         ${arch} with the value of the arch parameter. Considering the
    +00170         large amount of memory needed, not all the fields are loaded
    +00171         in memory. The available fields are Version, Source, Pre-Depends,
    +00172         Depends, Conflicts, Provides and Architecture.
    +00173         
    +00174         After reading the packages, reverse dependencies are computed
    +00175         and saved in the `rdepends' keys, and the `Provides' field is
    +00176         used to populate the virtual packages list.
    +00177 
    +00178         The dependencies are parsed with the apt.pkg.ParseDepends method,
    +00179         and they are stored both as the format of its return value and
    +00180         text.
    +00181 
    +00182         The method returns a tuple. The first element is a list where
    +00183         every item represents a binary package as a dictionary; the second
    +00184         element is a dictionary which maps virtual packages to real
    +00185         packages that provide it.
    +00186         """
    +00187 
    +00188         packages = {}
    +00189         provides = {}
    +00190         package = None
    +00191         filename = os.path.join(basedir, "Packages_%s" % arch)
    +00192         self.__log("Loading binary packages from %s" % filename)
    +00193         Packages = apt_pkg.ParseTagFile(open(filename))
    +00194         while Packages.Step():
    +00195             pkg = Packages.Section.get('Package')
    +00196             version = Packages.Section.get('Version')
    +00197             dpkg = {'rdepends': [],
    +00198                     'version': version,
    +00199                     'source': pkg, 
    +00200                     'source-ver': version,
    +00201                     'pre-depends': Packages.Section.get('Pre-Depends'),
    +00202                     'depends': Packages.Section.get('Depends'),
    +00203                     'conflicts': Packages.Section.get('Conflicts'),
    +00204                     'provides': Packages.Section.get('Provides'),
    +00205                     'architecture': Packages.Section.get('Architecture'),
    +00206                     }
    +00207 
    +00208             # retrieve the name and the version of the source package
    +00209             source = Packages.Section.get('Source')
    +00210             if source:
    +00211                 dpkg['source'] = source.split(" ")[0]
    +00212                 if "(" in source:
    +00213                     dpkg['source-ver'] = source.split("(")[1].split(")")[0]
    +00214 
    +00215             # if the source package is available in the distribution, then register this binary package
    +00216             if dpkg['source'] in self.sources[distribution]:
    +00217                 self.sources[distribution][dpkg['source']]['binaries'].append(pkg + "/" + arch)
    +00218             # if the source package doesn't exist, create a fake one
    +00219             else:
    +00220                 self.sources[distribution][dpkg['source']] = {'binaries': [pkg + "/" + arch],
    +00221                     'version': dpkg['source-ver'], 'maintainer': None, 'section': None, 'fake': True}
    +00222 
    +00223             # register virtual packages and real packages that provide them
    +00224             if dpkg['provides']:
    +00225                 parts = map(string.strip, dpkg['provides'].split(","))
    +00226                 for p in parts:
    +00227                     try:
    +00228                         provides[p].append(pkg)
    +00229                     except KeyError:
    +00230                         provides[p] = [pkg]
    +00231             del dpkg['provides']
    +00232 
    +00233             # append the resulting dictionary to the package list
    +00234             packages[pkg] = dpkg
    +00235 
    +00236         # loop again on the list of packages to register reverse dependencies
    +00237         for pkg in packages:
    +00238             dependencies = []
    +00239 
    +00240             # analyze dependencies
    +00241             if packages[pkg]['depends']:
    +00242                 packages[pkg]['depends-txt'] = packages[pkg]['depends']
    +00243                 packages[pkg]['depends'] = apt_pkg.ParseDepends(packages[pkg]['depends'])
    +00244                 dependencies.extend(packages[pkg]['depends'])
    +00245 
    +00246             # analyze pre-dependencies
    +00247             if packages[pkg]['pre-depends']:
    +00248                 packages[pkg]['pre-depends-txt'] = packages[pkg]['pre-depends']
    +00249                 packages[pkg]['pre-depends'] = apt_pkg.ParseDepends(packages[pkg]['pre-depends'])
    +00250                 dependencies.extend(packages[pkg]['pre-depends'])
    +00251 
    +00252             # register the list of the dependencies for the depending packages
    +00253             for p in dependencies:
    +00254                 for a in p:
    +00255                     if a[0] not in packages: continue
    +00256                     packages[a[0]]['rdepends'].append((pkg, a[1], a[2]))
    +00257 
    +00258         # return a tuple with the list of real and virtual packages
    +00259         return (packages, provides)
    +00260 
    +00261     def read_bugs(self, basedir):
    +00262         """Read the release critial bug summary from the specified directory
    +00263         
    +00264         The RC bug summaries are read from the `Bugs' file within the
    +00265         directory specified as `basedir' parameter. The file contains
    +00266         rows with the format:
    +00267 
    +00268         <package-name> <count-of-rc-bugs>
    +00269 
    +00270         The method returns a dictionary where the key is the binary package
    +00271         name and the value is the number of open RC bugs for it.
    +00272         """
    +00273         bugs = {}
    +00274         filename = os.path.join(basedir, "Bugs")
    +00275         self.__log("Loading RC bugs count from %s" % filename)
    +00276         for line in open(filename):
    +00277             l = line.strip().split()
    +00278             if len(l) != 2: continue
    +00279             try:
    +00280                 bugs[l[0]] = int(l[1])
    +00281             except ValueError:
    +00282                 self.__log("Bugs, unable to parse \"%s\"" % line, type="E")
    +00283         return bugs
    +00284 
    +00285     def __maxver(self, pkg, dist):
    +00286         """Return the maximum version for a given package name
    +00287         
    +00288         This method returns None if the specified source package
    +00289         is not available in the `dist' distribution. If the package
    +00290         exists, then it returns the maximum version between the
    +00291         source package and its binary packages.
    +00292         """
    +00293         maxver = None
    +00294         if self.sources[dist].has_key(pkg):
    +00295             maxver = self.sources[dist][pkg]['version']
    +00296         for arch in self.options.architectures:
    +00297             if not self.binaries[dist][arch][0].has_key(pkg): continue
    +00298             pkgv = self.binaries[dist][arch][0][pkg]['version']
    +00299             if maxver == None or apt_pkg.VersionCompare(pkgv, maxver) > 0:
    +00300                 maxver = pkgv
    +00301         return maxver
    +00302 
    +00303     def normalize_bugs(self):
    +00304         """Normalize the release critical bug summaries for testing and unstable
    +00305         
    +00306         The method doesn't return any value: it directly modifies the
    +00307         object attribute `bugs'.
    +00308         """
    +00309         # loop on all the package names from testing and unstable bug summaries
    +00310         for pkg in set(self.bugs['testing'].keys() + self.bugs['unstable'].keys()):
    +00311 
    +00312             # make sure that the key is present in both dictionaries
    +00313             if not self.bugs['testing'].has_key(pkg):
    +00314                 self.bugs['testing'][pkg] = 0
    +00315             elif not self.bugs['unstable'].has_key(pkg):
    +00316                 self.bugs['unstable'][pkg] = 0
    +00317 
    +00318             # retrieve the maximum version of the package in testing:
    +00319             maxvert = self.__maxver(pkg, 'testing')
    +00320 
    +00321             # if the package is not available in testing or it has the
    +00322             # same RC bug count, then do nothing
    +00323             if maxvert == None or \
    +00324                self.bugs['testing'][pkg] == self.bugs['unstable'][pkg]:
    +00325                 continue
    +00326 
    +00327             # retrieve the maximum version of the package in testing:
    +00328             maxveru = self.__maxver(pkg, 'unstable')
    +00329 
    +00330             # if the package is not available in unstable, then do nothing
    +00331             if maxveru == None:
    +00332                 continue
    +00333             # else if the testing package is more recent, then use the
    +00334             # unstable RC bug count for testing, too
    +00335             elif apt_pkg.VersionCompare(maxvert, maxveru) >= 0:
    +00336                 self.bugs['testing'][pkg] = self.bugs['unstable'][pkg]
    +00337 
    +00338     def read_dates(self, basedir):
    +00339         """Read the upload date for the packages from the specified directory
    +00340         
    +00341         The upload dates are read from the `Date' file within the directory
    +00342         specified as `basedir' parameter. The file contains rows with the
    +00343         format:
    +00344 
    +00345         <package-name> <version> <date-of-upload>
    +00346 
    +00347         The dates are expressed as days starting from the 1970-01-01.
    +00348 
    +00349         The method returns a dictionary where the key is the binary package
    +00350         name and the value is tuple with two items, the version and the date.
    +00351         """
    +00352         dates = {}
    +00353         filename = os.path.join(basedir, "Dates")
    +00354         self.__log("Loading upload data from %s" % filename)
    +00355         for line in open(filename):
    +00356             l = line.strip().split()
    +00357             if len(l) != 3: continue
    +00358             try:
    +00359                 dates[l[0]] = (l[1], int(l[2]))
    +00360             except ValueError:
    +00361                 self.__log("Dates, unable to parse \"%s\"" % line, type="E")
    +00362         return dates
    +00363 
    +00364     def read_urgencies(self, basedir):
    +00365         """Read the upload urgency of the packages from the specified directory
    +00366         
    +00367         The upload urgencies are read from the `Urgency' file within the
    +00368         directory specified as `basedir' parameter. The file contains rows
    +00369         with the format:
    +00370 
    +00371         <package-name> <version> <urgency>
    +00372 
    +00373         The method returns a dictionary where the key is the binary package
    +00374         name and the value is the greatest urgency from the versions of the
    +00375         package that are higher then the testing one.
    +00376         """
    +00377 
    +00378         urgencies = {}
    +00379         filename = os.path.join(basedir, "Urgency")
    +00380         self.__log("Loading upload urgencies from %s" % filename)
    +00381         for line in open(filename):
    +00382             l = line.strip().split()
    +00383             if len(l) != 3: continue
    +00384 
    +00385             # read the minimum days associated to the urgencies
    +00386             urgency_old = urgencies.get(l[0], self.options.default_urgency)
    +00387             mindays_old = self.MINDAYS.get(urgency_old, self.MINDAYS[self.options.default_urgency])
    +00388             mindays_new = self.MINDAYS.get(l[2], self.MINDAYS[self.options.default_urgency])
    +00389 
    +00390             # if the new urgency is lower (so the min days are higher), do nothing
    +00391             if mindays_old <= mindays_new:
    +00392                 continue
    +00393 
    +00394             # if the package exists in testing and it is more recent, do nothing
    +00395             tsrcv = self.sources['testing'].get(l[0], None)
    +00396             if tsrcv and apt_pkg.VersionCompare(tsrcv['version'], l[1]) >= 0:
    +00397                 continue
    +00398 
    +00399             # if the package doesn't exist in unstable or it is older, do nothing
    +00400             usrcv = self.sources['unstable'].get(l[0], None)
    +00401             if not usrcv or apt_pkg.VersionCompare(usrcv['version'], l[1]) < 0:
    +00402                 continue
    +00403 
    +00404             # update the urgency for the package
    +00405             urgencies[l[0]] = l[2]
    +00406 
    +00407         return urgencies
    +00408 
    +00409     def read_approvals(self, basedir):
    +00410         """Read the approval commands from the specified directory
    +00411         
    +00412         The approval commands are read from the files contained by the 
    +00413         `Approved' directory within the directory specified as `basedir'
    +00414         parameter. The name of the files has to be the same of the
    +00415         authorized users for the approvals.
    +00416         
    +00417         The file contains rows with the format:
    +00418 
    +00419         <package-name> <version>
    +00420 
    +00421         The method returns a dictionary where the key is the binary package
    +00422         name followed by an underscore and the version number, and the value
    +00423         is the user who submitted the command.
    +00424         """
    +00425         approvals = {}
    +00426         for approver in self.options.approvers.split():
    +00427             filename = os.path.join(basedir, "Approved", approver)
    +00428             self.__log("Loading approvals list from %s" % filename)
    +00429             for line in open(filename):
    +00430                 l = line.strip().split()
    +00431                 if len(l) != 2: continue
    +00432                 approvals["%s_%s" % (l[0], l[1])] = approver
    +00433         return approvals
    +00434 
    +00435     def read_hints(self, basedir):
    +00436         """Read the hint commands from the specified directory
    +00437         
    +00438         The hint commands are read from the files contained by the `Hints'
    +00439         directory within the directory specified as `basedir' parameter. 
    +00440         The name of the files has to be the same of the authorized users
    +00441         for the hints.
    +00442         
    +00443         The file contains rows with the format:
    +00444 
    +00445         <command> <package-name>[/<version>]
    +00446 
    +00447         The method returns a dictionary where the key is the command, and
    +00448         the value is the list of affected packages.
    +00449         """
    +00450         hints = dict([(k,[]) for k in self.HINTS_ALL])
    +00451 
    +00452         for who in self.HINTS.keys():
    +00453             filename = os.path.join(basedir, "Hints", who)
    +00454             self.__log("Loading hints list from %s" % filename)
    +00455             for line in open(filename):
    +00456                 line = line.strip()
    +00457                 if line == "": continue
    +00458                 l = line.split()
    +00459                 if l[0] == 'finished':
    +00460                     break
    +00461                 elif l[0] not in self.HINTS[who]:
    +00462                     continue
    +00463                 elif l[0] in ["easy", "hint", "force-hint"]:
    +00464                     hints[l[0]].append((who, [k.split("/") for k in l if "/" in k]))
    +00465                 elif l[0] in ["block-all"]:
    +00466                     hints[l[0]].extend([(y, who) for y in l[1:]])
    +00467                 elif l[0] in ["block"]:
    +00468                     hints[l[0]].extend([(y, who) for y in l[1:]])
    +00469                 elif l[0] in ["remove", "approve", "unblock", "force", "urgent"]:
    +00470                     hints[l[0]].extend([(k.split("/")[0], (k.split("/")[1],who) ) for k in l if "/" in k])
    +00471 
    +00472         for x in ["block", "block-all", "unblock", "force", "urgent", "remove"]:
    +00473             z = {}
    +00474             for a, b in hints[x]:
    +00475                 if z.has_key(a):
    +00476                     self.__log("Overriding %s[%s] = %s with %s" % (x, a, z[a], b), type="W")
    +00477                 z[a] = b
    +00478             hints[x] = z
    +00479 
    +00480         return hints
    +00481 
    +00482     # Utility methods for package analisys
    +00483     # ------------------------------------
    +00484 
    +00485     def same_source(self, sv1, sv2):
    +00486         """Check if two version numbers are built from the same source
    +00487 
    +00488         This method returns a boolean value which is true if the two
    +00489         version numbers specified as parameters are built from the same
    +00490         source. The main use of this code is to detect binary-NMU.
    +00491         """
    +00492         if sv1 == sv2:
    +00493             return 1
    +00494 
    +00495         m = re.match(r'^(.*)\+b\d+$', sv1)
    +00496         if m: sv1 = m.group(1)
    +00497         m = re.match(r'^(.*)\+b\d+$', sv2)
    +00498         if m: sv2 = m.group(1)
    +00499 
    +00500         if sv1 == sv2:
    +00501             return 1
    +00502 
    +00503         if re.search("-", sv1) or re.search("-", sv2):
    +00504             m = re.match(r'^(.*-[^.]+)\.0\.\d+$', sv1)
    +00505             if m: sv1 = m.group(1)
    +00506             m = re.match(r'^(.*-[^.]+\.[^.]+)\.\d+$', sv1)
    +00507             if m: sv1 = m.group(1)
    +00508 
    +00509             m = re.match(r'^(.*-[^.]+)\.0\.\d+$', sv2)
    +00510             if m: sv2 = m.group(1)
    +00511             m = re.match(r'^(.*-[^.]+\.[^.]+)\.\d+$', sv2)
    +00512             if m: sv2 = m.group(1)
    +00513 
    +00514             return (sv1 == sv2)
    +00515         else:
    +00516             m = re.match(r'^([^-]+)\.0\.\d+$', sv1)
    +00517             if m and sv2 == m.group(1): return 1
    +00518 
    +00519             m = re.match(r'^([^-]+)\.0\.\d+$', sv2)
    +00520             if m and sv1 == m.group(1): return 1
    +00521 
    +00522             return 0
    +00523 
    +00524     def get_dependency_solvers(self, block, arch, distribution):
    +00525         """Find the packages which satisfy a dependency block
    +00526 
    +00527         This method returns the list of packages which satisfy a dependency
    +00528         block (as returned by apt_pkg.ParseDepends) for the given architecture
    +00529         and distribution.
    +00530 
    +00531         It returns a tuple with two items: the first is a boolean which is
    +00532         True if the dependency is satisfied, the second is the list of the
    +00533         solving packages.
    +00534         """
    +00535 
    +00536         packages = []
    +00537 
    +00538         # for every package, version and operation in the block
    +00539         for name, version, op in block:
    +00540             # look for the package in unstable
    +00541             if name in self.binaries[distribution][arch][0]:
    +00542                 package = self.binaries[distribution][arch][0][name]
    +00543                 # check the versioned dependency (if present)
    +00544                 if op == '' and version == '' or apt_pkg.CheckDep(package['version'], op, version):
    +00545                     packages.append(name)
    +00546 
    +00547             # look for the package in the virtual packages list
    +00548             if name in self.binaries[distribution][arch][1]:
    +00549                 # loop on the list of packages which provides it
    +00550                 for prov in self.binaries[distribution][arch][1][name]:
    +00551                     package = self.binaries[distribution][arch][0][prov]
    +00552                     # check the versioned dependency (if present)
    +00553                     # TODO: this is forbidden by the debian policy, which says that versioned
    +00554                     #       dependencies on virtual packages are never satisfied. The old britney
    +00555                     #       does it and we have to go with it, but at least a warning should be raised.
    +00556                     if op == '' and version == '' or apt_pkg.CheckDep(package['version'], op, version):
    +00557                         packages.append(prov)
    +00558                         break
    +00559 
    +00560         return (len(packages) > 0, packages)
    +00561 
    +00562     def excuse_unsat_deps(self, pkg, src, arch, suite, excuse):
    +00563         """Find unsatisfied dependencies for a binary package
    +00564 
    +00565         This method analyzes the dependencies of the binary package specified
    +00566         by the parameter `pkg', built from the source package `src', for the
    +00567         architecture `arch' within the suite `suite'. If the dependency can't
    +00568         be satisfied in testing and/or unstable, it updates the excuse passed
    +00569         as parameter.
    +00570 
    +00571         The dependency fields checked are Pre-Depends and Depends.
    +00572         """
    +00573         # retrieve the binary package from the specified suite and arch
    +00574         binary_u = self.binaries[suite][arch][0][pkg]
    +00575 
    +00576         # analyze the dependency fields (if present)
    +00577         for type in ('Pre-Depends', 'Depends'):
    +00578             type_key = type.lower()
    +00579             if not binary_u[type_key]:
    +00580                 continue
    +00581 
    +00582             # this list will contain the packages that satisfy the dependency
    +00583             packages = []
    +00584 
    +00585             # for every block of dependency (which is formed as conjunction of disconjunction)
    +00586             for block, block_txt in map(None, binary_u[type_key], binary_u[type_key + '-txt'].split(',')):
    +00587                 # if the block is satisfied in testing, then skip the block
    +00588                 solved, packages = self.get_dependency_solvers(block, arch, 'testing')
    +00589                 if solved: continue
    +00590 
    +00591                 # check if the block can be satisfied in unstable, and list the solving packages
    +00592                 solved, packages = self.get_dependency_solvers(block, arch, suite)
    +00593                 packages = [self.binaries[suite][arch][0][p]['source'] for p in packages]
    +00594 
    +00595                 # if the dependency can be satisfied by the same source package, skip the block:
    +00596                 # obviously both binary packages will enter testing togheter
    +00597                 if src in packages: continue
    +00598 
    +00599                 # if no package can satisfy the dependency, add this information to the excuse
    +00600                 if len(packages) == 0:
    +00601                     excuse.addhtml("%s/%s unsatisfiable %s: %s" % (pkg, arch, type, block_txt.strip()))
    +00602 
    +00603                 # for the solving packages, update the excuse to add the dependencies
    +00604                 for p in packages:
    +00605                     if arch not in self.options.break_arches.split():
    +00606                         excuse.add_dep(p)
    +00607                     else:
    +00608                         excuse.add_break_dep(p, arch)
    +00609 
    +00610     # Package analisys methods
    +00611     # ------------------------
    +00612 
    +00613     def should_remove_source(self, pkg):
    +00614         """Check if a source package should be removed from testing
    +00615         
    +00616         This method checks if a source package should be removed from the
    +00617         testing distribution; this happen if the source package is not
    +00618         present in the unstable distribution anymore.
    +00619 
    +00620         It returns True if the package can be removed, False otherwise.
    +00621         In the former case, a new excuse is appended to the the object
    +00622         attribute excuses.
    +00623         """
    +00624         # if the soruce package is available in unstable, then do nothing
    +00625         if self.sources['unstable'].has_key(pkg):
    +00626             return False
    +00627         # otherwise, add a new excuse for its removal and return True
    +00628         src = self.sources['testing'][pkg]
    +00629         excuse = Excuse("-" + pkg)
    +00630         excuse.set_vers(src['version'], None)
    +00631         src['maintainer'] and excuse.set_maint(src['maintainer'].strip())
    +00632         src['section'] and excuse.set_section(src['section'].strip())
    +00633         excuse.addhtml("Valid candidate")
    +00634         self.excuses.append(excuse)
    +00635         return True
    +00636 
    +00637     def should_upgrade_srcarch(self, src, arch, suite):
    +00638         """Check if binary package should be upgraded
    +00639 
    +00640         This method checks if a binary package should be upgraded; this can
    +00641         happen only if the binary package is a binary-NMU for the given arch.
    +00642         The analisys is performed for the source package specified by the
    +00643         `src' parameter, checking the architecture `arch' for the distribution
    +00644         `suite'.
    +00645        
    +00646         It returns False if the given package doesn't need to be upgraded,
    +00647         True otherwise. In the former case, a new excuse is appended to
    +00648         the the object attribute excuses.
    +00649         """
    +00650         # retrieve the source packages for testing and suite
    +00651         source_t = self.sources['testing'][src]
    +00652         source_u = self.sources[suite][src]
    +00653 
    +00654         # build the common part of the excuse, which will be filled by the code below
    +00655         ref = "%s/%s%s" % (src, arch, suite != 'unstable' and "_" + suite or "")
    +00656         excuse = Excuse(ref)
    +00657         excuse.set_vers(source_t['version'], source_t['version'])
    +00658         source_u['maintainer'] and excuse.set_maint(source_u['maintainer'].strip())
    +00659         source_u['section'] and excuse.set_section(source_u['section'].strip())
    +00660         
    +00661         # if there is a `remove' hint and the requested version is the same of the
    +00662         # version in testing, then stop here and return False
    +00663         if self.hints["remove"].has_key(src) and \
    +00664            self.same_source(source_t['version'], self.hints["remove"][src][0]):
    +00665             excuse.addhtml("Removal request by %s" % (self.hints["remove"][src][1]))
    +00666             excuse.addhtml("Trying to remove package, not update it")
    +00667             excuse.addhtml("Not considered")
    +00668             self.excuses.append(excuse)
    +00669             return False
    +00670 
    +00671         # the starting point is that there is nothing wrong and nothing worth doing
    +00672         anywrongver = False
    +00673         anyworthdoing = False
    +00674 
    +00675         # for every binary package produced by this source in unstable for this architecture
    +00676         for pkg in sorted(filter(lambda x: x.endswith("/" + arch), source_u['binaries'])):
    +00677             pkg_name = pkg.split("/")[0]
    +00678 
    +00679             # retrieve the testing (if present) and unstable corresponding binary packages
    +00680             binary_t = pkg in source_t['binaries'] and self.binaries['testing'][arch][0][pkg_name] or None
    +00681             binary_u = self.binaries[suite][arch][0][pkg_name]
    +00682 
    +00683             # this is the source version for the new binary package
    +00684             pkgsv = self.binaries[suite][arch][0][pkg_name]['source-ver']
    +00685 
    +00686             # if the new binary package is architecture-independent, then skip it
    +00687             if binary_u['architecture'] == 'all':
    +00688                 excuse.addhtml("Ignoring %s %s (from %s) as it is arch: all" % (pkg_name, binary_u['version'], pkgsv))
    +00689                 continue
    +00690 
    +00691             # if the new binary package is not from the same source as the testing one, then skip it
    +00692             if not self.same_source(source_t['version'], pkgsv):
    +00693                 anywrongver = True
    +00694                 excuse.addhtml("From wrong source: %s %s (%s not %s)" % (pkg_name, binary_u['version'], pkgsv, source_t['version']))
    +00695                 break
    +00696 
    +00697             # find unsatisfied dependencies for the new binary package
    +00698             self.excuse_unsat_deps(pkg_name, src, arch, suite, excuse)
    +00699 
    +00700             # if the binary is not present in testing, then it is a new binary;
    +00701             # in this case, there is something worth doing
    +00702             if not binary_t:
    +00703                 excuse.addhtml("New binary: %s (%s)" % (pkg_name, binary_u['version']))
    +00704                 anyworthdoing = True
    +00705                 continue
    +00706 
    +00707             # at this point, the binary package is present in testing, so we can compare
    +00708             # the versions of the packages ...
    +00709             vcompare = apt_pkg.VersionCompare(binary_t['version'], binary_u['version'])
    +00710 
    +00711             # ... if updating would mean downgrading, then stop here: there is something wrong
    +00712             if vcompare > 0:
    +00713                 anywrongver = True
    +00714                 excuse.addhtml("Not downgrading: %s (%s to %s)" % (pkg_name, binary_t['version'], binary_u['version']))
    +00715                 break
    +00716             # ... if updating would mean upgrading, then there is something worth doing
    +00717             elif vcompare < 0:
    +00718                 excuse.addhtml("Updated binary: %s (%s to %s)" % (pkg_name, binary_t['version'], binary_u['version']))
    +00719                 anyworthdoing = True
    +00720 
    +00721         # if there is nothing wrong and there is something worth doing or the source
    +00722         # package is not fake, then check what packages shuold be removed
    +00723         if not anywrongver and (anyworthdoing or self.sources[suite][src].has_key('fake')):
    +00724             srcv = self.sources[suite][src]['version']
    +00725             ssrc = self.same_source(source_t['version'], srcv)
    +00726             # for every binary package produced by this source in testing for this architecture
    +00727             for pkg in sorted([x.split("/")[0] for x in self.sources['testing'][src]['binaries'] if x.endswith("/"+arch)]):
    +00728                 # if the package is architecture-independent, then ignore it
    +00729                 if self.binaries['testing'][arch][0][pkg]['architecture'] == 'all':
    +00730                     excuse.addhtml("Ignoring removal of %s as it is arch: all" % (pkg))
    +00731                     continue
    +00732                 # if the package is not produced by the new source package, then remove it from testing
    +00733                 if not self.binaries[suite][arch][0].has_key(pkg):
    +00734                     tpkgv = self.binaries['testing'][arch][0][pkg]['version']
    +00735                     excuse.addhtml("Removed binary: %s %s" % (pkg, tpkgv))
    +00736                     if ssrc: anyworthdoing = True
    +00737 
    +00738         # if there is nothing wrong and there is something worth doing, this is valid candidate
    +00739         if not anywrongver and anyworthdoing:
    +00740             excuse.addhtml("Valid candidate")
    +00741             self.excuses.append(excuse)
    +00742         # else if there is something worth doing (but something wrong, too) this package won't be considered
    +00743         elif anyworthdoing:
    +00744             excuse.addhtml("Not considered")
    +00745             self.excuses.append(excuse)
    +00746             return False
    +00747 
    +00748         # otherwise, return True
    +00749         return True
    +00750 
    +00751     def should_upgrade_src(self, src, suite):
    +00752         """Check if source package should be upgraded
    +00753 
    +00754         This method checks if a source package should be upgraded. The analisys
    +00755         is performed for the source package specified by the `src' parameter, 
    +00756         checking the architecture `arch' for the distribution `suite'.
    +00757        
    +00758         It returns False if the given package doesn't need to be upgraded,
    +00759         True otherwise. In the former case, a new excuse is appended to
    +00760         the the object attribute excuses.
    +00761         """
    +00762 
    +00763         # retrieve the source packages for testing (if available) and suite
    +00764         source_u = self.sources[suite][src]
    +00765         if src in self.sources['testing']:
    +00766             source_t = self.sources['testing'][src]
    +00767             # if testing and unstable have the same version, then this is a candidate for binary-NMUs only
    +00768             if apt_pkg.VersionCompare(source_t['version'], source_u['version']) == 0:
    +00769                 return False
    +00770         else:
    +00771             source_t = None
    +00772 
    +00773         # build the common part of the excuse, which will be filled by the code below
    +00774         ref = "%s%s" % (src, suite != 'unstable' and "_" + suite or "")
    +00775         excuse = Excuse(ref)
    +00776         excuse.set_vers(source_t and source_t['version'] or None, source_u['version'])
    +00777         source_u['maintainer'] and excuse.set_maint(source_u['maintainer'].strip())
    +00778         source_u['section'] and excuse.set_section(source_u['section'].strip())
    +00779 
    +00780         # the starting point is that we will update the candidate
    +00781         update_candidate = True
    +00782         
    +00783         # if the version in unstable is older, then stop here with a warning in the excuse and return False
    +00784         if source_t and apt_pkg.VersionCompare(source_u['version'], source_t['version']) < 0:
    +00785             excuse.addhtml("ALERT: %s is newer in testing (%s %s)" % (src, source_t['version'], source_u['version']))
    +00786             self.excuses.append(excuse)
    +00787             return False
    +00788 
    +00789         # check if the source package really exists or if it is a fake one
    +00790         if source_u.has_key('fake'):
    +00791             excuse.addhtml("%s source package doesn't exist" % (src))
    +00792             update_candidate = False
    +00793 
    +00794         # retrieve the urgency for the upload, ignoring it if this is a NEW package (not present in testing)
    +00795         urgency = self.urgencies.get(src, self.options.default_urgency)
    +00796         if not source_t and urgency != self.options.default_urgency:
    +00797             excuse.addhtml("Ignoring %s urgency setting for NEW package" % (urgency))
    +00798             urgency = self.options.default_urgency
    +00799 
    +00800         # if there is a `remove' hint and the requested version is the same of the
    +00801         # version in testing, then stop here and return False
    +00802         if self.hints["remove"].has_key(src):
    +00803             if source_t and self.same_source(source_t['version'], self.hints['remove'][src][0]) or \
    +00804                self.same_source(source_u['version'], self.hints['remove'][src][0]):
    +00805                 excuse.addhtml("Removal request by %s" % (self.hints["remove"][src][1]))
    +00806                 excuse.addhtml("Trying to remove package, not update it")
    +00807                 update_candidate = False
    +00808 
    +00809         # check if there is a `block' hint for this package or a `block-all source' hint
    +00810         blocked = None
    +00811         if self.hints["block"].has_key(src):
    +00812             blocked = self.hints["block"][src]
    +00813         elif self.hints["block-all"].has_key("source"):
    +00814             blocked = self.hints["block-all"]["source"]
    +00815 
    +00816         # if the source is blocked, then look for an `unblock' hint; the unblock request
    +00817         # is processed only if the specified version is correct
    +00818         if blocked:
    +00819             unblock = self.hints["unblock"].get(src,(None,None))
    +00820             if unblock[0] != None:
    +00821                 if self.same_source(unblock[0], source_u['version']):
    +00822                     excuse.addhtml("Ignoring request to block package by %s, due to unblock request by %s" % (blocked, unblock[1]))
    +00823                 else:
    +00824                     excuse.addhtml("Unblock request by %s ignored due to version mismatch: %s" % (unblock[1], unblock[0]))
    +00825             else:
    +00826                 excuse.addhtml("Not touching package, as requested by %s (contact debian-release if update is needed)" % (blocked))
    +00827                 update_candidate = False
    +00828 
    +00829         # if the suite is unstable, then we have to check the urgency and the minimum days of
    +00830         # permanence in unstable before updating testing; if the source package is too young,
    +00831         # the check fails and we set update_candidate to False to block the update
    +00832         if suite == 'unstable':
    +00833             if not self.dates.has_key(src):
    +00834                 self.dates[src] = (source_u['version'], self.date_now)
    +00835             elif not self.same_source(self.dates[src][0], source_u['version']):
    +00836                 self.dates[src] = (source_u['version'], self.date_now)
    +00837 
    +00838             days_old = self.date_now - self.dates[src][1]
    +00839             min_days = self.MINDAYS[urgency]
    +00840             excuse.setdaysold(days_old, min_days)
    +00841             if days_old < min_days:
    +00842                 if self.hints["urgent"].has_key(src) and self.same_source(source_u['version'], self.hints["urgent"][src][0]):
    +00843                     excuse.addhtml("Too young, but urgency pushed by %s" % (self.hints["urgent"][src][1]))
    +00844                 else:
    +00845                     update_candidate = False
    +00846 
    +00847         # at this point, we check what is the status of the builds on all the supported architectures
    +00848         # to catch the out-of-date ones
    +00849         pkgs = {src: ["source"]}
    +00850         for arch in self.options.architectures:
    +00851             oodbins = {}
    +00852             # for every binary package produced by this source in the suite for this architecture
    +00853             for pkg in sorted([x.split("/")[0] for x in self.sources[suite][src]['binaries'] if x.endswith("/"+arch)]):
    +00854                 if not pkgs.has_key(pkg): pkgs[pkg] = []
    +00855                 pkgs[pkg].append(arch)
    +00856 
    +00857                 # retrieve the binary package and its source version
    +00858                 binary_u = self.binaries[suite][arch][0][pkg]
    +00859                 pkgsv = binary_u['source-ver']
    +00860 
    +00861                 # if it wasn't builded by the same source, it is out-of-date
    +00862                 if not self.same_source(source_u['version'], pkgsv):
    +00863                     if not oodbins.has_key(pkgsv):
    +00864                         oodbins[pkgsv] = []
    +00865                     oodbins[pkgsv].append(pkg)
    +00866                     continue
    +00867 
    +00868                 # if the package is architecture-dependent or the current arch is `nobreakall'
    +00869                 # find unsatisfied dependencies for the binary package
    +00870                 if binary_u['architecture'] != 'all' or arch in self.options.nobreakall_arches:
    +00871                     self.excuse_unsat_deps(pkg, src, arch, suite, excuse)
    +00872 
    +00873             # if there are out-of-date packages, warn about them in the excuse and set update_candidate
    +00874             # to False to block the update; if the architecture where the package is out-of-date is
    +00875             # in the `fucked_arches' list, then do not block the update
    +00876             if oodbins:
    +00877                 oodtxt = ""
    +00878                 for v in oodbins.keys():
    +00879                     if oodtxt: oodtxt = oodtxt + "; "
    +00880                     oodtxt = oodtxt + "%s (from <a href=\"http://buildd.debian.org/build.php?" \
    +00881                         "arch=%s&pkg=%s&ver=%s\" target=\"_blank\">%s</a>)" % \
    +00882                         (", ".join(sorted(oodbins[v])), arch, src, v, v)
    +00883                 text = "out of date on <a href=\"http://buildd.debian.org/build.php?" \
    +00884                     "arch=%s&pkg=%s&ver=%s\" target=\"_blank\">%s</a>: %s" % \
    +00885                     (arch, src, source_u['version'], arch, oodtxt)
    +00886 
    +00887                 if arch in self.options.fucked_arches:
    +00888                     text = text + " (but %s isn't keeping up, so nevermind)" % (arch)
    +00889                 else:
    +00890                     update_candidate = False
    +00891 
    +00892                 if self.date_now != self.dates[src][1]:
    +00893                     excuse.addhtml(text)
    +00894 
    +00895         # if the source package has no binaries, set update_candidate to False to block the update
    +00896         if len(self.sources[suite][src]['binaries']) == 0:
    +00897             excuse.addhtml("%s has no binaries on any arch" % src)
    +00898             update_candidate = False
    +00899 
    +00900         # if the suite is unstable, then we have to check the release-critical bug counts before
    +00901         # updating testing; if the unstable package have a RC bug count greater than the testing
    +00902         # one,  the check fails and we set update_candidate to False to block the update
    +00903         if suite == 'unstable':
    +00904             for pkg in pkgs.keys():
    +00905                 if not self.bugs['testing'].has_key(pkg):
    +00906                     self.bugs['testing'][pkg] = 0
    +00907                 if not self.bugs['unstable'].has_key(pkg):
    +00908                     self.bugs['unstable'][pkg] = 0
    +00909 
    +00910                 if self.bugs['unstable'][pkg] > self.bugs['testing'][pkg]:
    +00911                     excuse.addhtml("%s (%s) is <a href=\"http://bugs.debian.org/cgi-bin/pkgreport.cgi?" \
    +00912                                    "which=pkg&data=%s&sev-inc=critical&sev-inc=grave&sev-inc=serious\" " \
    +00913                                    "target=\"_blank\">buggy</a>! (%d > %d)" % \
    +00914                                    (pkg, ", ".join(pkgs[pkg]), pkg, self.bugs['unstable'][pkg], self.bugs['testing'][pkg]))
    +00915                     update_candidate = False
    +00916                 elif self.bugs['unstable'][pkg] > 0:
    +00917                     excuse.addhtml("%s (%s) is (less) <a href=\"http://bugs.debian.org/cgi-bin/pkgreport.cgi?" \
    +00918                                    "which=pkg&data=%s&sev-inc=critical&sev-inc=grave&sev-inc=serious\" " \
    +00919                                    "target=\"_blank\">buggy</a>! (%d <= %d)" % \
    +00920                                    (pkg, ", ".join(pkgs[pkg]), pkg, self.bugs['unstable'][pkg], self.bugs['testing'][pkg]))
    +00921 
    +00922         # check if there is a `force' hint for this package, which allows it to go in even if it is not updateable
    +00923         if not update_candidate and self.hints["force"].has_key(src) and \
    +00924            self.same_source(source_u['version'], self.hints["force"][src][0]):
    +00925             excuse.dontinvalidate = 1
    +00926             excuse.addhtml("Should ignore, but forced by %s" % (self.hints["force"][src][1]))
    +00927             update_candidate = True
    +00928 
    +00929         # if the suite is testing-proposed-updates, the package needs an explicit approval in order to go in
    +00930         if suite == "tpu":
    +00931             if self.approvals.has_key("%s_%s" % (src, source_u['version'])):
    +00932                 excuse.addhtml("Approved by %s" % approvals["%s_%s" % (src, source_u['version'])])
    +00933             else:
    +00934                 excuse.addhtml("NEEDS APPROVAL BY RM")
    +00935                 update_candidate = False
    +00936 
    +00937         # if the package can be updated, it is a valid candidate
    +00938         if update_candidate:
    +00939             excuse.addhtml("Valid candidate")
    +00940         # else it won't be considered
    +00941         else:
    +00942             excuse.addhtml("Not considered")
    +00943 
    +00944         self.excuses.append(excuse)
    +00945         return update_candidate
    +00946 
    +00947     def reversed_exc_deps(self):
    +00948         """Reverse the excuses dependencies
    +00949 
    +00950         This method returns a dictionary where the keys are the package names
    +00951         and the values are the excuse names which depend on it.
    +00952         """
    +00953         res = {}
    +00954         for exc in self.excuses:
    +00955             for d in exc.deps:
    +00956                 if not res.has_key(d): res[d] = []
    +00957                 res[d].append(exc.name)
    +00958         return res
    +00959 
    +00960     def invalidate_excuses(self, valid, invalid):
    +00961         """Invalidate impossible excuses
    +00962 
    +00963         This method invalidates the impossible excuses, which depend
    +00964         on invalid excuses. The two parameters contains the list of
    +00965         `valid' and `invalid' excuses.
    +00966         """
    +00967         # build a lookup-by-name map
    +00968         exclookup = {}
    +00969         for e in self.excuses:
    +00970             exclookup[e.name] = e
    +00971 
    +00972         # build the reverse dependencies
    +00973         revdeps = self.reversed_exc_deps()
    +00974 
    +00975         # loop on the invalid excuses
    +00976         i = 0
    +00977         while i < len(invalid):
    +00978             # if there is no reverse dependency, skip the item
    +00979             if not revdeps.has_key(invalid[i]):
    +00980                 i += 1
    +00981                 continue
    +00982             # if there dependency can be satisfied by a testing-proposed-updates excuse, skip the item
    +00983             if (invalid[i] + "_tpu") in valid:
    +00984                 i += 1
    +00985                 continue
    +00986             # loop on the reverse dependencies
    +00987             for x in revdeps[invalid[i]]:
    +00988                 # if the item is valid and it is marked as `dontinvalidate', skip the item
    +00989                 if x in valid and exclookup[x].dontinvalidate:
    +00990                     continue
    +00991 
    +00992                 # otherwise, invalidate the dependency and mark as invalidated and
    +00993                 # remove the depending excuses
    +00994                 exclookup[x].invalidate_dep(invalid[i])
    +00995                 if x in valid:
    +00996                     p = valid.index(x)
    +00997                     invalid.append(valid.pop(p))
    +00998                     exclookup[x].addhtml("Invalidated by dependency")
    +00999                     exclookup[x].addhtml("Not considered")
    +01000             i = i + 1
    +01001  
    +01002     def write_excuses(self):
    +01003         """Produce and write the update excuses
    +01004 
    +01005         This method handles the update excuses generation: the packages are
    +01006         looked to determine whether they are valid candidates. For the details
    +01007         of this procedure, please refer to the module docstring.
    +01008         """
    +01009 
    +01010         # this list will contain the packages which are valid candidates;
    +01011         # if a package is going to be removed, it will have a "-" prefix
    +01012         upgrade_me = []
    +01013 
    +01014         # for every source package in testing, check if it should be removed
    +01015         for pkg in self.sources['testing']:
    +01016             if self.should_remove_source(pkg):
    +01017                 upgrade_me.append("-" + pkg)
    +01018 
    +01019         # for every source package in unstable check if it should be upgraded
    +01020         for pkg in self.sources['unstable']:
    +01021             # if the source package is already present in testing,
    +01022             # check if it should be upgraded for every binary package
    +01023             if self.sources['testing'].has_key(pkg):
    +01024                 for arch in self.options.architectures:
    +01025                     if self.should_upgrade_srcarch(pkg, arch, 'unstable'):
    +01026                         upgrade_me.append("%s/%s" % (pkg, arch))
    +01027 
    +01028             # check if the source package should be upgraded
    +01029             if self.should_upgrade_src(pkg, 'unstable'):
    +01030                 upgrade_me.append(pkg)
    +01031 
    +01032         # for every source package in testing-proposed-updates, check if it should be upgraded
    +01033         for pkg in self.sources['tpu']:
    +01034             # if the source package is already present in testing,
    +01035             # check if it should be upgraded for every binary package
    +01036             if self.sources['testing'].has_key(pkg):
    +01037                 for arch in self.options.architectures:
    +01038                     if self.should_upgrade_srcarch(pkg, arch, 'tpu'):
    +01039                         upgrade_me.append("%s/%s_tpu" % (pkg, arch))
    +01040 
    +01041             # check if the source package should be upgraded
    +01042             if self.should_upgrade_src(pkg, 'tpu'):
    +01043                 upgrade_me.append("%s_tpu" % pkg)
    +01044 
    +01045         # process the `remove' hints, if the given package is not yet in upgrade_me
    +01046         for src in self.hints["remove"].keys():
    +01047             if src in upgrade_me: continue
    +01048             if ("-"+src) in upgrade_me: continue
    +01049             if not self.sources['testing'].has_key(src): continue
    +01050 
    +01051             # check if the version specified in the hint is the same of the considered package
    +01052             tsrcv = self.sources['testing'][src]['version']
    +01053             if not self.same_source(tsrcv, self.hints["remove"][src][0]): continue
    +01054 
    +01055             # add the removal of the package to upgrade_me and build a new excuse
    +01056             upgrade_me.append("-%s" % (src))
    +01057             excuse = Excuse("-%s" % (src))
    +01058             excuse.set_vers(tsrcv, None)
    +01059             excuse.addhtml("Removal request by %s" % (self.hints["remove"][src][1]))
    +01060             excuse.addhtml("Package is broken, will try to remove")
    +01061             self.excuses.append(excuse)
    +01062 
    +01063         # sort the excuses by daysold and name
    +01064         self.excuses.sort(lambda x, y: cmp(x.daysold, y.daysold) or cmp(x.name, y.name))
    +01065 
    +01066         # extract the not considered packages, which are in the excuses but not in upgrade_me
    +01067         unconsidered = [e.name for e in self.excuses if e.name not in upgrade_me]
    +01068 
    +01069         # invalidate impossible excuses
    +01070         for e in self.excuses:
    +01071             for d in e.deps:
    +01072                 if d not in upgrade_me and d not in unconsidered:
    +01073                     e.addhtml("Unpossible dep: %s -> %s" % (e.name, d))
    +01074         self.invalidate_excuses(upgrade_me, unconsidered)
    +01075 
    +01076         # write excuses to the output file
    +01077         f = open(self.options.excuses_output, 'w')
    +01078         f.write("<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n")
    +01079         f.write("<html><head><title>excuses...</title>")
    +01080         f.write("<meta http-equiv=\"Content-Type\" content=\"text/html;charset=utf-8\"></head><body>\n")
    +01081         f.write("<p>Generated: " + time.strftime("%Y.%m.%d %H:%M:%S %z", time.gmtime(time.time())) + "</p>\n")
    +01082         f.write("<ul>\n")
    +01083         for e in self.excuses:
    +01084             f.write("<li>%s" % e.html())
    +01085         f.write("</ul></body></html>\n")
    +01086         f.close()
    +01087 
    +01088     def main(self):
    +01089         """Main method
    +01090         
    +01091         This is the entry point for the class: it includes the list of calls
    +01092         for the member methods which will produce the output files.
    +01093         """
    +01094         self.write_excuses()
    +01095 
    +01096 if __name__ == '__main__':
    +01097     Britney().main()
    +

    Generated on Sat Jun 24 18:50:20 2006 for briteny by  + +doxygen 1.4.6
    + + diff --git a/doc/html/classbritney_1_1Britney-members.html b/doc/html/classbritney_1_1Britney-members.html new file mode 100644 index 0000000..6b03362 --- /dev/null +++ b/doc/html/classbritney_1_1Britney-members.html @@ -0,0 +1,50 @@ + + +briteny: Member List + + + + + + +

    britney.Britney Member List

    This is the complete list of members for britney.Britney, including all inherited members.

    + + + + + + + + + + + + + + + + + + + + + + + + +
    __init__britney.Britney
    __logbritney.Britney
    __maxverbritney.Britney
    __parse_argumentsbritney.Britney
    excuse_unsat_depsbritney.Britney
    get_dependency_solversbritney.Britney
    HINTS_ALLbritney.Britney [static]
    HINTS_STANDARDbritney.Britney [static]
    invalidate_excusesbritney.Britney
    mainbritney.Britney
    normalize_bugsbritney.Britney
    read_approvalsbritney.Britney
    read_binariesbritney.Britney
    read_bugsbritney.Britney
    read_datesbritney.Britney
    read_hintsbritney.Britney
    read_sourcesbritney.Britney
    read_urgenciesbritney.Britney
    reversed_exc_depsbritney.Britney
    same_sourcebritney.Britney
    should_remove_sourcebritney.Britney
    should_upgrade_srcbritney.Britney
    should_upgrade_srcarchbritney.Britney
    write_excusesbritney.Britney


    Generated on Sat Jun 24 18:50:20 2006 for briteny by  + +doxygen 1.4.6
    + + diff --git a/doc/html/classbritney_1_1Britney.html b/doc/html/classbritney_1_1Britney.html new file mode 100644 index 0000000..f170120 --- /dev/null +++ b/doc/html/classbritney_1_1Britney.html @@ -0,0 +1,1179 @@ + + +briteny: britney.Britney Class Reference + + + + + + + +

    britney.Britney Class Reference

    List of all members. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

    Public Member Functions

    def __init__
    def __parse_arguments
    def __log
    def read_sources
    def read_binaries
    def read_bugs
    def __maxver
    def normalize_bugs
    def read_dates
    def read_urgencies
    def read_approvals
    def read_hints
    def same_source
    def get_dependency_solvers
    def excuse_unsat_deps
    def should_remove_source
    def should_upgrade_srcarch
    def should_upgrade_src
    def reversed_exc_deps
    def invalidate_excuses
    def write_excuses
    def main

    Static Public Attributes

    +tuple HINTS_STANDARD = ("easy", "hint", "remove", "block", "unblock", "urgent", "approve")
    +tuple HINTS_ALL = ("force", "force-hint", "block-all")
    +

    Detailed Description

    +
    Britney, the debian testing updater script
    +
    +This is the script that updates the testing_ distribution. It is executed
    +each day after the installation of the updated packages. It generates the 
    +`Packages' files for the testing distribution, but it does so in an
    +intelligent manner; it try to avoid any inconsistency and to use only
    +non-buggy packages.
    +
    +For more documentation on this script, please read the Developers Reference.
    +
    +

    + +

    +Definition at line 33 of file britney.py.


    Member Function Documentation

    +

    + + + + +
    + + + + + + + + + +
    def britney.Britney.__init__   self  ) 
    +
    + + + + + +
    +   + + +

    +

    Class constructor
    +
    +This method initializes and populates the data lists, which contain all
    +the information needed by the other methods of the class.
    +
    +

    +Definition at line 39 of file britney.py.

    +

    + + + + +
    + + + + + + + + + + + + + + + + + + + + + + + + +
    def britney.Britney.__log   self,
      msg,
      type = "I"
    +
    + + + + + +
    +   + + +

    +

    Print info messages according to verbosity level
    +
    +An easy-and-simple log method which prints messages to the standard
    +output. The type parameter controls the urgency of the message, and
    +can be equal to `I' for `Information', `W' for `Warning' and `E' for
    +`Error'. Warnings and errors are always printed, and information are
    +printed only if the verbose logging is enabled.
    +
    +

    +Definition at line 115 of file britney.py.

    +

    + + + + +
    + + + + + + + + + + + + + + + + + + + + + + + + +
    def britney.Britney.__maxver   self,
      pkg,
      dist
    +
    + + + + + +
    +   + + +

    +

    Return the maximum version for a given package name
    +
    +This method returns None if the specified source package
    +is not available in the `dist' distribution. If the package
    +exists, then it returns the maximum version between the
    +source package and its binary packages.
    +
    +

    +Definition at line 276 of file britney.py.

    +

    + + + + +
    + + + + + + + + + +
    def britney.Britney.__parse_arguments   self  ) 
    +
    + + + + + +
    +   + + +

    +

    Parse the command line arguments
    +
    +This method parses and initializes the command line arguments.
    +While doing so, it preprocesses some of the options to be converted
    +in a suitable form for the other methods of the class.
    +
    +

    +Definition at line 75 of file britney.py.

    +

    + + + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    def britney.Britney.excuse_unsat_deps   self,
      pkg,
      src,
      arch,
      suite,
      excuse
    +
    + + + + + +
    +   + + +

    +

    Find unsatisfied dependencies for a binary package
    +
    +This method analyzes the dependencies of the binary package specified
    +by the parameter `pkg', built from the source package `src', for the
    +architecture `arch' within the suite `suite'. If the dependency can't
    +be satisfied in testing and/or unstable, it updates the excuse passed
    +as parameter.
    +
    +The dependency fields checked are Pre-Depends and Depends.
    +
    +

    +Definition at line 553 of file britney.py.

    +

    + + + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    def britney.Britney.get_dependency_solvers   self,
      block,
      arch,
      distribution
    +
    + + + + + +
    +   + + +

    +

    Find the packages which satisfy a dependency block
    +
    +This method returns the list of packages which satisfy a dependency
    +block (as returned by apt_pkg.ParseDepends) for the given architecture
    +and distribution.
    +
    +It returns a tuple with two items: the first is a boolean which is
    +True if the dependency is satisfied, the second is the list of the
    +solving packages.
    +
    +

    +Definition at line 515 of file britney.py.

    +

    + + + + +
    + + + + + + + + + + + + + + + + + + + + + + + + +
    def britney.Britney.invalidate_excuses   self,
      valid,
      invalid
    +
    + + + + + +
    +   + + +

    +

    Invalidate impossible excuses
    +
    +This method invalidates the impossible excuses, which depend
    +on invalid excuses. The two parameters contains the list of
    +`valid' and `invalid' excuses.
    +
    +

    +Definition at line 951 of file britney.py.

    +

    + + + + +
    + + + + + + + + + +
    def britney.Britney.main   self  ) 
    +
    + + + + + +
    +   + + +

    +

    Main method
    +
    +This is the entry point for the class: it includes the list of calls
    +for the member methods which will produce the output files.
    +
    +

    +Definition at line 1079 of file britney.py.

    +

    + + + + +
    + + + + + + + + + +
    def britney.Britney.normalize_bugs   self  ) 
    +
    + + + + + +
    +   + + +

    +

    Normalize the release critical bug summaries for testing and unstable
    +
    +The method doesn't return any value: it directly modifies the
    +object attribute `bugs'.
    +
    +

    +Definition at line 294 of file britney.py.

    +

    + + + + +
    + + + + + + + + + + + + + + + + + + +
    def britney.Britney.read_approvals   self,
      basedir
    +
    + + + + + +
    +   + + +

    +

    Read the approval commands from the specified directory
    +
    +The approval commands are read from the files contained by the 
    +`Approved' directory within the directory specified as `basedir'
    +parameter. The name of the files has to be the same of the
    +authorized users for the approvals.
    +
    +The file contains rows with the format:
    +
    +<package-name> <version>
    +
    +The method returns a dictionary where the key is the binary package
    +name followed by an underscore and the version number, and the value
    +is the user who submitted the command.
    +
    +

    +Definition at line 400 of file britney.py.

    +

    + + + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    def britney.Britney.read_binaries   self,
      basedir,
      distribution,
      arch
    +
    + + + + + +
    +   + + +

    +

    Read the list of binary packages from the specified directory
    +
    +The binary packages are read from the `Packages_${arch}' files
    +within the directory specified as `basedir' parameter, replacing
    +${arch} with the value of the arch parameter. Considering the
    +large amount of memory needed, not all the fields are loaded
    +in memory. The available fields are Version, Source, Pre-Depends,
    +Depends, Conflicts, Provides and Architecture.
    +
    +After reading the packages, reverse dependencies are computed
    +and saved in the `rdepends' keys, and the `Provides' field is
    +used to populate the virtual packages list.
    +
    +The dependencies are parsed with the apt.pkg.ParseDepends method,
    +and they are stored both as the format of its return value and
    +text.
    +
    +The method returns a tuple. The first element is a list where
    +every item represents a binary package as a dictionary; the second
    +element is a dictionary which maps virtual packages to real
    +packages that provide it.
    +
    +

    +Definition at line 155 of file britney.py.

    +

    + + + + +
    + + + + + + + + + + + + + + + + + + +
    def britney.Britney.read_bugs   self,
      basedir
    +
    + + + + + +
    +   + + +

    +

    Read the release critial bug summary from the specified directory
    +
    +The RC bug summaries are read from the `Bugs' file within the
    +directory specified as `basedir' parameter. The file contains
    +rows with the format:
    +
    +<package-name> <count-of-rc-bugs>
    +
    +The method returns a dictionary where the key is the binary package
    +name and the value is the number of open RC bugs for it.
    +
    +

    +Definition at line 252 of file britney.py.

    +

    + + + + +
    + + + + + + + + + + + + + + + + + + +
    def britney.Britney.read_dates   self,
      basedir
    +
    + + + + + +
    +   + + +

    +

    Read the upload date for the packages from the specified directory
    +
    +The upload dates are read from the `Date' file within the directory
    +specified as `basedir' parameter. The file contains rows with the
    +format:
    +
    +<package-name> <version> <date-of-upload>
    +
    +The dates are expressed as days starting from the 1970-01-01.
    +
    +The method returns a dictionary where the key is the binary package
    +name and the value is tuple with two items, the version and the date.
    +
    +

    +Definition at line 329 of file britney.py.

    +

    + + + + +
    + + + + + + + + + + + + + + + + + + +
    def britney.Britney.read_hints   self,
      basedir
    +
    + + + + + +
    +   + + +

    +

    Read the hint commands from the specified directory
    +
    +The hint commands are read from the files contained by the `Hints'
    +directory within the directory specified as `basedir' parameter. 
    +The name of the files has to be the same of the authorized users
    +for the hints.
    +
    +The file contains rows with the format:
    +
    +<command> <package-name>[/<version>]
    +
    +The method returns a dictionary where the key is the command, and
    +the value is the list of affected packages.
    +
    +

    +Definition at line 426 of file britney.py.

    +

    + + + + +
    + + + + + + + + + + + + + + + + + + +
    def britney.Britney.read_sources   self,
      basedir
    +
    + + + + + +
    +   + + +

    +

    Read the list of source packages from the specified directory
    +
    +The source packages are read from the `Sources' file within the
    +directory specified as `basedir' parameter. Considering the
    +large amount of memory needed, not all the fields are loaded
    +in memory. The available fields are Version, Maintainer and Section.
    +
    +The method returns a list where every item represents a source
    +package as a dictionary.
    +
    +

    +Definition at line 130 of file britney.py.

    +

    + + + + +
    + + + + + + + + + + + + + + + + + + +
    def britney.Britney.read_urgencies   self,
      basedir
    +
    + + + + + +
    +   + + +

    +

    Read the upload urgency of the packages from the specified directory
    +
    +The upload urgencies are read from the `Urgency' file within the
    +directory specified as `basedir' parameter. The file contains rows
    +with the format:
    +
    +<package-name> <version> <urgency>
    +
    +The method returns a dictionary where the key is the binary package
    +name and the value is the greatest urgency from the versions of the
    +package that are higher then the testing one.
    +
    +

    +Definition at line 355 of file britney.py.

    +

    + + + + +
    + + + + + + + + + +
    def britney.Britney.reversed_exc_deps   self  ) 
    +
    + + + + + +
    +   + + +

    +

    Reverse the excuses dependencies
    +
    +This method returns a dictionary where the keys are the package names
    +and the values are the excuse names which depend on it.
    +
    +

    +Definition at line 938 of file britney.py.

    +

    + + + + +
    + + + + + + + + + + + + + + + + + + + + + + + + +
    def britney.Britney.same_source   self,
      sv1,
      sv2
    +
    + + + + + +
    +   + + +

    +

    Check if two version numbers are built from the same source
    +
    +This method returns a boolean value which is true if the two
    +version numbers specified as parameters are built from the same
    +source. The main use of this code is to detect binary-NMU.
    +
    +

    +Definition at line 476 of file britney.py.

    +

    + + + + +
    + + + + + + + + + + + + + + + + + + +
    def britney.Britney.should_remove_source   self,
      pkg
    +
    + + + + + +
    +   + + +

    +

    Check if a source package should be removed from testing
    +
    +This method checks if a source package should be removed from the
    +testing distribution; this happen if the source package is not
    +present in the unstable distribution anymore.
    +
    +It returns True if the package can be removed, False otherwise.
    +In the former case, a new excuse is appended to the the object
    +attribute excuses.
    +
    +

    +Definition at line 604 of file britney.py.

    +

    + + + + +
    + + + + + + + + + + + + + + + + + + + + + + + + +
    def britney.Britney.should_upgrade_src   self,
      src,
      suite
    +
    + + + + + +
    +   + + +

    +

    Check if source package should be upgraded
    +
    +This method checks if a source package should be upgraded. The analisys
    +is performed for the source package specified by the `src' parameter, 
    +checking the architecture `arch' for the distribution `suite'.
    +       
    +It returns False if the given package doesn't need to be upgraded,
    +True otherwise. In the former case, a new excuse is appended to
    +the the object attribute excuses.
    +
    +

    +Definition at line 742 of file britney.py.

    +

    + + + + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    def britney.Britney.should_upgrade_srcarch   self,
      src,
      arch,
      suite
    +
    + + + + + +
    +   + + +

    +

    Check if binary package should be upgraded
    +
    +This method checks if a binary package should be upgraded; this can
    +happen only if the binary package is a binary-NMU for the given arch.
    +The analisys is performed for the source package specified by the
    +`src' parameter, checking the architecture `arch' for the distribution
    +`suite'.
    +       
    +It returns False if the given package doesn't need to be upgraded,
    +True otherwise. In the former case, a new excuse is appended to
    +the the object attribute excuses.
    +
    +

    +Definition at line 628 of file britney.py.

    +

    + + + + +
    + + + + + + + + + +
    def britney.Britney.write_excuses   self  ) 
    +
    + + + + + +
    +   + + +

    +

    Produce and write the update excuses
    +
    +This method handles the update excuses generation: the packages are
    +looked to determine whether they are valid candidates. For the details
    +of this procedure, please refer to the module docstring.
    +
    +

    +Definition at line 993 of file britney.py.

    +


    The documentation for this class was generated from the following file: +
    Generated on Sat Jun 24 18:50:20 2006 for briteny by  + +doxygen 1.4.6
    + + diff --git a/doc/html/classexcuse_1_1Excuse-members.html b/doc/html/classexcuse_1_1Excuse-members.html new file mode 100644 index 0000000..5e8aed6 --- /dev/null +++ b/doc/html/classexcuse_1_1Excuse-members.html @@ -0,0 +1,40 @@ + + +briteny: Member List + + + + + + +

    excuse.Excuse Member List

    This is the complete list of members for excuse.Excuse, including all inherited members.

    + + + + + + + + + + + + + + +
    __init__excuse.Excuse
    add_break_depexcuse.Excuse
    add_depexcuse.Excuse
    addhtmlexcuse.Excuse
    htmlexcuse.Excuse
    invalidate_depexcuse.Excuse
    reemailexcuse.Excuse [static]
    set_dateexcuse.Excuse
    set_maintexcuse.Excuse
    set_priorityexcuse.Excuse
    set_sectionexcuse.Excuse
    set_urgencyexcuse.Excuse
    set_versexcuse.Excuse
    setdaysoldexcuse.Excuse


    Generated on Sat Jun 24 18:50:20 2006 for briteny by  + +doxygen 1.4.6
    + + diff --git a/doc/html/classexcuse_1_1Excuse.html b/doc/html/classexcuse_1_1Excuse.html new file mode 100644 index 0000000..08e8486 --- /dev/null +++ b/doc/html/classexcuse_1_1Excuse.html @@ -0,0 +1,600 @@ + + +briteny: excuse.Excuse Class Reference + + + + + + + +

    excuse.Excuse Class Reference

    List of all members. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

    Public Member Functions

    def __init__
    def set_vers
    def set_maint
    def set_section
    def set_priority
    def set_date
    def set_urgency
    def add_dep
    def add_break_dep
    def invalidate_dep
    def setdaysold
    def addhtml
    def html

    Static Public Attributes

    +tuple reemail = re.compile(r"<.*?>")
     Regular expression for removing the email address.
    +

    Detailed Description

    +
    Excuse class
    +
    +This class represents an update excuse, which is a detailed explanation
    +of why a package can or cannot be updated in the testing distribution  from
    +a newer package in another distribution (like for example unstable).
    +
    +The main purpose of the excuses is to be written in an HTML file which
    +will be published over HTTP. The maintainers will be able to parse it
    +manually or automatically to find the explanation of why their packages
    +have been updated or not.
    +
    +

    + +

    +Definition at line 21 of file excuse.py.


    Member Function Documentation

    +

    + + + + +
    + + + + + + + + + + + + + + + + + + +
    def excuse.Excuse.__init__   self,
      name
    +
    + + + + + +
    +   + + +

    +

    Class constructor
    +
    +This method initializes the excuse with the specified name and
    +the default values.
    +
    +

    +Definition at line 28 of file excuse.py.

    +

    + + + + +
    + + + + + + + + + + + + + + + + + + + + + + + + +
    def excuse.Excuse.add_break_dep   self,
      name,
      arch
    +
    + + + + + +
    +   + + +

    +

    Add a break dependency
    +

    +Definition at line 80 of file excuse.py.

    +

    + + + + +
    + + + + + + + + + + + + + + + + + + +
    def excuse.Excuse.add_dep   self,
      name
    +
    + + + + + +
    +   + + +

    +

    Add a dependency
    +

    +Definition at line 76 of file excuse.py.

    +

    + + + + +
    + + + + + + + + + + + + + + + + + + +
    def excuse.Excuse.addhtml   self,
      note
    +
    + + + + + +
    +   + + +

    +

    Add a note in HTML
    +

    +Definition at line 94 of file excuse.py.

    +

    + + + + +
    + + + + + + + + + +
    def excuse.Excuse.html   self  ) 
    +
    + + + + + +
    +   + + +

    +

    Render the excuse in HTML
    +

    +Definition at line 98 of file excuse.py.

    +

    + + + + +
    + + + + + + + + + + + + + + + + + + +
    def excuse.Excuse.invalidate_dep   self,
      name
    +
    + + + + + +
    +   + + +

    +

    Invalidate dependency
    +

    +Definition at line 85 of file excuse.py.

    +

    + + + + +
    + + + + + + + + + + + + + + + + + + +
    def excuse.Excuse.set_date   self,
      date
    +
    + + + + + +
    +   + + +

    +

    Set the date of upload of the package
    +

    +Definition at line 68 of file excuse.py.

    +

    + + + + +
    + + + + + + + + + + + + + + + + + + +
    def excuse.Excuse.set_maint   self,
      maint
    +
    + + + + + +
    +   + + +

    +

    Set the package maintainer's name
    +

    +Definition at line 56 of file excuse.py.

    +

    + + + + +
    + + + + + + + + + + + + + + + + + + +
    def excuse.Excuse.set_priority   self,
      pri
    +
    + + + + + +
    +   + + +

    +

    Set the priority of the package
    +

    +Definition at line 64 of file excuse.py.

    +

    + + + + +
    + + + + + + + + + + + + + + + + + + +
    def excuse.Excuse.set_section   self,
      section
    +
    + + + + + +
    +   + + +

    +

    Set the section of the package
    +

    +Definition at line 60 of file excuse.py.

    +

    + + + + +
    + + + + + + + + + + + + + + + + + + +
    def excuse.Excuse.set_urgency   self,
      date
    +
    + + + + + +
    +   + + +

    +

    Set the urgency of upload of the package
    +

    +Definition at line 72 of file excuse.py.

    +

    + + + + +
    + + + + + + + + + + + + + + + + + + + + + + + + +
    def excuse.Excuse.set_vers   self,
      tver,
      uver
    +
    + + + + + +
    +   + + +

    +

    Set the testing and unstable versions
    +

    +Definition at line 51 of file excuse.py.

    +

    + + + + +
    + + + + + + + + + + + + + + + + + + + + + + + + +
    def excuse.Excuse.setdaysold   self,
      daysold,
      mindays
    +
    + + + + + +
    +   + + +

    +

    Set the number of days from the upload and the minimum number of days for the update
    +

    +Definition at line 89 of file excuse.py.

    +


    The documentation for this class was generated from the following file: +
    Generated on Sat Jun 24 18:50:20 2006 for briteny by  + +doxygen 1.4.6
    + + diff --git a/doc/html/doxygen.css b/doc/html/doxygen.css new file mode 100644 index 0000000..05615b2 --- /dev/null +++ b/doc/html/doxygen.css @@ -0,0 +1,310 @@ +BODY,H1,H2,H3,H4,H5,H6,P,CENTER,TD,TH,UL,DL,DIV { + font-family: Geneva, Arial, Helvetica, sans-serif; +} +BODY,TD { + font-size: 90%; +} +H1 { + text-align: center; + font-size: 160%; +} +H2 { + font-size: 120%; +} +H3 { + font-size: 100%; +} +CAPTION { font-weight: bold } +DIV.qindex { + width: 100%; + background-color: #e8eef2; + border: 1px solid #84b0c7; + text-align: center; + margin: 2px; + padding: 2px; + line-height: 140%; +} +DIV.nav { + width: 100%; + background-color: #e8eef2; + border: 1px solid #84b0c7; + text-align: center; + margin: 2px; + padding: 2px; + line-height: 140%; +} +DIV.navtab { + background-color: #e8eef2; + border: 1px solid #84b0c7; + text-align: center; + margin: 2px; + margin-right: 15px; + padding: 2px; +} +TD.navtab { + font-size: 70%; +} +A.qindex { + text-decoration: none; + font-weight: bold; + color: #1A419D; +} +A.qindex:visited { + text-decoration: none; + font-weight: bold; + color: #1A419D +} +A.qindex:hover { + text-decoration: none; + background-color: #ddddff; +} +A.qindexHL { + text-decoration: none; + font-weight: bold; + background-color: #6666cc; + color: #ffffff; + border: 1px double #9295C2; +} +A.qindexHL:hover { + text-decoration: none; + background-color: #6666cc; + color: #ffffff; +} +A.qindexHL:visited { text-decoration: none; background-color: #6666cc; color: #ffffff } +A.el { text-decoration: none; font-weight: bold } +A.elRef { font-weight: bold } +A.code:link { text-decoration: none; font-weight: normal; color: #0000FF} +A.code:visited { text-decoration: none; font-weight: normal; color: #0000FF} +A.codeRef:link { font-weight: normal; color: #0000FF} +A.codeRef:visited { font-weight: normal; color: #0000FF} +A:hover { text-decoration: none; background-color: #f2f2ff } +DL.el { margin-left: -1cm } +.fragment { + font-family: Fixed, monospace; + font-size: 95%; +} +PRE.fragment { + border: 1px solid #CCCCCC; + background-color: #f5f5f5; + margin-top: 4px; + margin-bottom: 4px; + margin-left: 2px; + margin-right: 8px; + padding-left: 6px; + padding-right: 6px; + padding-top: 4px; + padding-bottom: 4px; +} +DIV.ah { background-color: black; font-weight: bold; color: #ffffff; margin-bottom: 3px; margin-top: 3px } +TD.md { background-color: #F4F4FB; font-weight: bold; } +TD.mdPrefix { + background-color: #F4F4FB; + color: #606060; + font-size: 80%; +} +TD.mdname1 { background-color: #F4F4FB; font-weight: bold; color: #602020; } +TD.mdname { background-color: #F4F4FB; font-weight: bold; color: #602020; width: 600px; } +DIV.groupHeader { + margin-left: 16px; + margin-top: 12px; + margin-bottom: 6px; + font-weight: bold; +} +DIV.groupText { margin-left: 16px; font-style: italic; font-size: 90% } +BODY { + background: white; + color: black; + margin-right: 20px; + margin-left: 20px; +} +TD.indexkey { + background-color: #e8eef2; + font-weight: bold; + padding-right : 10px; + padding-top : 2px; + padding-left : 10px; + padding-bottom : 2px; + margin-left : 0px; + margin-right : 0px; + margin-top : 2px; + margin-bottom : 2px; + border: 1px solid #CCCCCC; +} +TD.indexvalue { + background-color: #e8eef2; + font-style: italic; + padding-right : 10px; + padding-top : 2px; + padding-left : 10px; + padding-bottom : 2px; + margin-left : 0px; + margin-right : 0px; + margin-top : 2px; + margin-bottom : 2px; + border: 1px solid #CCCCCC; +} +TR.memlist { + background-color: #f0f0f0; +} +P.formulaDsp { text-align: center; } +IMG.formulaDsp { } +IMG.formulaInl { vertical-align: middle; } +SPAN.keyword { color: #008000 } +SPAN.keywordtype { color: #604020 } +SPAN.keywordflow { color: #e08000 } +SPAN.comment { color: #800000 } +SPAN.preprocessor { color: #806020 } +SPAN.stringliteral { color: #002080 } +SPAN.charliteral { color: #008080 } +.mdTable { + border: 1px solid #868686; + background-color: #F4F4FB; +} +.mdRow { + padding: 8px 10px; +} +.mdescLeft { + padding: 0px 8px 4px 8px; + font-size: 80%; + font-style: italic; + background-color: #FAFAFA; + border-top: 1px none #E0E0E0; + border-right: 1px none #E0E0E0; + border-bottom: 1px none #E0E0E0; + border-left: 1px none #E0E0E0; + margin: 0px; +} +.mdescRight { + padding: 0px 8px 4px 8px; + font-size: 80%; + font-style: italic; + background-color: #FAFAFA; + border-top: 1px none #E0E0E0; + border-right: 1px none #E0E0E0; + border-bottom: 1px none #E0E0E0; + border-left: 1px none #E0E0E0; + margin: 0px; +} +.memItemLeft { + padding: 1px 0px 0px 8px; + margin: 4px; + border-top-width: 1px; + border-right-width: 1px; + border-bottom-width: 1px; + border-left-width: 1px; + border-top-color: #E0E0E0; + border-right-color: #E0E0E0; + border-bottom-color: #E0E0E0; + border-left-color: #E0E0E0; + border-top-style: solid; + border-right-style: none; + border-bottom-style: none; + border-left-style: none; + background-color: #FAFAFA; + font-size: 80%; +} +.memItemRight { + padding: 1px 8px 0px 8px; + margin: 4px; + border-top-width: 1px; + border-right-width: 1px; + border-bottom-width: 1px; + border-left-width: 1px; + border-top-color: #E0E0E0; + border-right-color: #E0E0E0; + border-bottom-color: #E0E0E0; + border-left-color: #E0E0E0; + border-top-style: solid; + border-right-style: none; + border-bottom-style: none; + border-left-style: none; + background-color: #FAFAFA; + font-size: 80%; +} +.memTemplItemLeft { + padding: 1px 0px 0px 8px; + margin: 4px; + border-top-width: 1px; + border-right-width: 1px; + border-bottom-width: 1px; + border-left-width: 1px; + border-top-color: #E0E0E0; + border-right-color: #E0E0E0; + border-bottom-color: #E0E0E0; + border-left-color: #E0E0E0; + border-top-style: none; + border-right-style: none; + border-bottom-style: none; + border-left-style: none; + background-color: #FAFAFA; + font-size: 80%; +} +.memTemplItemRight { + padding: 1px 8px 0px 8px; + margin: 4px; + border-top-width: 1px; + border-right-width: 1px; + border-bottom-width: 1px; + border-left-width: 1px; + border-top-color: #E0E0E0; + border-right-color: #E0E0E0; + border-bottom-color: #E0E0E0; + border-left-color: #E0E0E0; + border-top-style: none; + border-right-style: none; + border-bottom-style: none; + border-left-style: none; + background-color: #FAFAFA; + font-size: 80%; +} +.memTemplParams { + padding: 1px 0px 0px 8px; + margin: 4px; + border-top-width: 1px; + border-right-width: 1px; + border-bottom-width: 1px; + border-left-width: 1px; + border-top-color: #E0E0E0; + border-right-color: #E0E0E0; + border-bottom-color: #E0E0E0; + border-left-color: #E0E0E0; + border-top-style: solid; + border-right-style: none; + border-bottom-style: none; + border-left-style: none; + color: #606060; + background-color: #FAFAFA; + font-size: 80%; +} +.search { color: #003399; + font-weight: bold; +} +FORM.search { + margin-bottom: 0px; + margin-top: 0px; +} +INPUT.search { font-size: 75%; + color: #000080; + font-weight: normal; + background-color: #e8eef2; +} +TD.tiny { font-size: 75%; +} +a { + color: #1A41A8; +} +a:visited { + color: #2A3798; +} +.dirtab { padding: 4px; + border-collapse: collapse; + border: 1px solid #84b0c7; +} +TH.dirtab { background: #e8eef2; + font-weight: bold; +} +HR { height: 1px; + border: none; + border-top: 1px solid black; +} + diff --git a/doc/html/doxygen.png b/doc/html/doxygen.png new file mode 100644 index 0000000..f0a274b Binary files /dev/null and b/doc/html/doxygen.png differ diff --git a/doc/html/excuse_8py-source.html b/doc/html/excuse_8py-source.html new file mode 100644 index 0000000..47e7289 --- /dev/null +++ b/doc/html/excuse_8py-source.html @@ -0,0 +1,153 @@ + + +briteny: excuse.py Source File + + + + + +

    excuse.py

    00001 # -*- coding: utf-8 -*-
    +00002 
    +00003 # Copyright (C) 2001-2004 Anthony Towns <ajt@debian.org>
    +00004 #                         Andreas Barth <aba@debian.org>
    +00005 #                         Fabio Tranchitella <kobold@debian.org>
    +00006 
    +00007 # This program is free software; you can redistribute it and/or modify
    +00008 # it under the terms of the GNU General Public License as published by
    +00009 # the Free Software Foundation; either version 2 of the License, or
    +00010 # (at your option) any later version.
    +00011 
    +00012 # This program is distributed in the hope that it will be useful,
    +00013 # but WITHOUT ANY WARRANTY; without even the implied warranty of
    +00014 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    +00015 # GNU General Public License for more details.
    +00016 
    +00017 import re
    +00018 import string
    +00019 
    +00020 
    +00021 class Excuse:
    +00022     """Excuse class
    +00023     
    +00024     This class represents an update excuse, which is a detailed explanation
    +00025     of why a package can or cannot be updated in the testing distribution  from
    +00026     a newer package in another distribution (like for example unstable).
    +00027 
    +00028     The main purpose of the excuses is to be written in an HTML file which
    +00029     will be published over HTTP. The maintainers will be able to parse it
    +00030     manually or automatically to find the explanation of why their packages
    +00031     have been updated or not.
    +00032     """
    +00033 
    +00034     ## @var reemail
    +00035     # Regular expression for removing the email address
    +00036     reemail = re.compile(r"<.*?>")
    +00037 
    +00038     def __init__(self, name):
    +00039         """Class constructor
    +00040         
    +00041         This method initializes the excuse with the specified name and
    +00042         the default values.
    +00043         """
    +00044         self.name = name
    +00045         self.ver = ("-", "-")
    +00046         self.maint = None
    +00047         self.pri = None
    +00048         self.date = None
    +00049         self.urgency = None
    +00050         self.daysold = None
    +00051         self.mindays = None
    +00052         self.section = None
    +00053         self.dontinvalidate = 0
    +00054 
    +00055         self.invalid_deps = []
    +00056         self.deps = []
    +00057         self.break_deps = []
    +00058         self.bugs = []
    +00059         self.htmlline = []
    +00060 
    +00061     def set_vers(self, tver, uver):
    +00062         """Set the testing and unstable versions"""
    +00063         if tver: self.ver = (tver, self.ver[1])
    +00064         if uver: self.ver = (self.ver[0], uver)
    +00065 
    +00066     def set_maint(self, maint):
    +00067         """Set the package maintainer's name"""
    +00068         self.maint = self.reemail.sub("", maint)
    +00069 
    +00070     def set_section(self, section):
    +00071         """Set the section of the package"""
    +00072         self.section = section
    +00073 
    +00074     def set_priority(self, pri):
    +00075         """Set the priority of the package"""
    +00076         self.pri = pri
    +00077 
    +00078     def set_date(self, date):
    +00079         """Set the date of upload of the package"""
    +00080         self.date = date
    +00081 
    +00082     def set_urgency(self, date):
    +00083         """Set the urgency of upload of the package"""
    +00084         self.urgency = date
    +00085 
    +00086     def add_dep(self, name):
    +00087         """Add a dependency"""
    +00088         if name not in self.deps: self.deps.append(name)
    +00089 
    +00090     def add_break_dep(self, name, arch):
    +00091         """Add a break dependency"""
    +00092         if (name, arch) not in self.break_deps:
    +00093             self.break_deps.append( (name, arch) )
    +00094 
    +00095     def invalidate_dep(self, name):
    +00096         """Invalidate dependency"""
    +00097         if name not in self.invalid_deps: self.invalid_deps.append(name)
    +00098 
    +00099     def setdaysold(self, daysold, mindays):
    +00100         """Set the number of days from the upload and the minimum number of days for the update"""
    +00101         self.daysold = daysold
    +00102         self.mindays = mindays
    +00103 
    +00104     def addhtml(self, note):
    +00105         """Add a note in HTML"""
    +00106         self.htmlline.append(note)
    +00107 
    +00108     def html(self):
    +00109         """Render the excuse in HTML"""
    +00110         res = "<a id=\"%s\" name=\"%s\">%s</a> (%s to %s)\n<ul>\n" % \
    +00111             (self.name, self.name, self.name, self.ver[0], self.ver[1])
    +00112         if self.maint:
    +00113             res = res + "<li>Maintainer: %s\n" % (self.maint)
    +00114         if self.section and string.find(self.section, "/") > -1:
    +00115             res = res + "<li>Section: %s\n" % (self.section)
    +00116         if self.daysold != None:
    +00117             if self.daysold < self.mindays:
    +00118                 res = res + ("<li>Too young, only %d of %d days old\n" %
    +00119                 (self.daysold, self.mindays))
    +00120             else:
    +00121                 res = res + ("<li>%d days old (needed %d days)\n" %
    +00122                 (self.daysold, self.mindays))
    +00123         for x in self.htmlline:
    +00124             res = res + "<li>" + x + "\n"
    +00125         for x in self.deps:
    +00126             if x in self.invalid_deps:
    +00127                 res = res + "<li>Depends: %s <a href=\"#%s\">%s</a> (not considered)\n" % (self.name, x, x)
    +00128             else:
    +00129                 res = res + "<li>Depends: %s <a href=\"#%s\">%s</a>\n" % (self.name, x, x)
    +00130         for (n,a) in self.break_deps:
    +00131             if n not in self.deps:
    +00132                 res += "<li>Ignoring %s depends: <a href=\"#%s\">%s</a>\n" % (a, n, n)
    +00133         res = res + "</ul>\n"
    +00134         return res
    +

    Generated on Sat Jun 24 18:50:20 2006 for briteny by  + +doxygen 1.4.6
    + + diff --git a/doc/html/files.html b/doc/html/files.html new file mode 100644 index 0000000..75c32f0 --- /dev/null +++ b/doc/html/files.html @@ -0,0 +1,23 @@ + + +briteny: File Index + + + + + +

    briteny File List

    Here is a list of all documented files with brief descriptions: + + +
    britney.py [code]
    excuse.py [code]
    +
    Generated on Sat Jun 24 18:50:20 2006 for briteny by  + +doxygen 1.4.6
    + + diff --git a/doc/html/functions.html b/doc/html/functions.html new file mode 100644 index 0000000..95b7dbf --- /dev/null +++ b/doc/html/functions.html @@ -0,0 +1,110 @@ + + +briteny: Class Members + + + + + + +
    + +
    +
    + +
    + +

    +Here is a list of all documented class members with links to the class documentation for each member: +

    +

    - _ -

    +

    - a -

    +

    - e -

    +

    - g -

    +

    - h -

    +

    - i -

    +

    - m -

    +

    - n -

    +

    - r -

    +

    - s -

    +

    - w -

    +
    Generated on Sat Jun 24 18:50:20 2006 for briteny by  + +doxygen 1.4.6
    + + diff --git a/doc/html/functions_func.html b/doc/html/functions_func.html new file mode 100644 index 0000000..50b92e9 --- /dev/null +++ b/doc/html/functions_func.html @@ -0,0 +1,107 @@ + + +briteny: Class Members - Functions + + + + + + +
    + +
    +
    + +
    + +

    +  +

    +

    - _ -

    +

    - a -

    +

    - e -

    +

    - g -

    +

    - h -

    +

    - i -

    +

    - m -

    +

    - n -

    +

    - r -

    +

    - s -

    +

    - w -

    +
    Generated on Sat Jun 24 18:50:20 2006 for briteny by  + +doxygen 1.4.6
    + + diff --git a/doc/html/functions_vars.html b/doc/html/functions_vars.html new file mode 100644 index 0000000..acda1fe --- /dev/null +++ b/doc/html/functions_vars.html @@ -0,0 +1,39 @@ + + +briteny: Class Members - Variables + + + + + + +
    + +
    +  +

    +

    +
    Generated on Sat Jun 24 18:50:20 2006 for briteny by  + +doxygen 1.4.6
    + + diff --git a/doc/html/hierarchy.html b/doc/html/hierarchy.html new file mode 100644 index 0000000..ac80af1 --- /dev/null +++ b/doc/html/hierarchy.html @@ -0,0 +1,29 @@ + + +briteny: Hierarchical Index + + + + + + +

    briteny Class Hierarchy

    This inheritance list is sorted roughly, but not completely, alphabetically: +
    Generated on Sat Jun 24 18:50:20 2006 for briteny by  + +doxygen 1.4.6
    + + diff --git a/doc/html/index.html b/doc/html/index.html new file mode 100644 index 0000000..ce15d89 --- /dev/null +++ b/doc/html/index.html @@ -0,0 +1,21 @@ + + +briteny: Main Page + + + + + +

    briteny Documentation

    +

    +

    2.0.alpha1


    Generated on Sat Jun 24 18:50:20 2006 for briteny by  + +doxygen 1.4.6
    + + diff --git a/doc/html/namespacebritney.html b/doc/html/namespacebritney.html new file mode 100644 index 0000000..e8e3a7a --- /dev/null +++ b/doc/html/namespacebritney.html @@ -0,0 +1,27 @@ + + +briteny: Package britney + + + + + +

    Package britney

    +

    + + + + + +

    Classes

    class  Britney
    +


    Generated on Sat Jun 24 18:50:20 2006 for briteny by  + +doxygen 1.4.6
    + + diff --git a/doc/html/namespaceexcuse.html b/doc/html/namespaceexcuse.html new file mode 100644 index 0000000..ad0ce21 --- /dev/null +++ b/doc/html/namespaceexcuse.html @@ -0,0 +1,27 @@ + + +briteny: Package excuse + + + + + +

    Package excuse

    +

    + + + + + +

    Classes

    class  Excuse
    +


    Generated on Sat Jun 24 18:50:20 2006 for briteny by  + +doxygen 1.4.6
    + + diff --git a/doc/html/namespaces.html b/doc/html/namespaces.html new file mode 100644 index 0000000..b603d90 --- /dev/null +++ b/doc/html/namespaces.html @@ -0,0 +1,23 @@ + + +briteny: Package List + + + + + +

    briteny Package List

    Here are the packages with brief descriptions (if available): + + +
    britney
    excuse
    +
    Generated on Sat Jun 24 18:50:20 2006 for briteny by  + +doxygen 1.4.6
    + + diff --git a/doc/html/tab_b.gif b/doc/html/tab_b.gif new file mode 100644 index 0000000..0d62348 Binary files /dev/null and b/doc/html/tab_b.gif differ diff --git a/doc/html/tab_l.gif b/doc/html/tab_l.gif new file mode 100644 index 0000000..9b1e633 Binary files /dev/null and b/doc/html/tab_l.gif differ diff --git a/doc/html/tab_r.gif b/doc/html/tab_r.gif new file mode 100644 index 0000000..ce9dd9f Binary files /dev/null and b/doc/html/tab_r.gif differ diff --git a/doc/html/tabs.css b/doc/html/tabs.css new file mode 100644 index 0000000..a61552a --- /dev/null +++ b/doc/html/tabs.css @@ -0,0 +1,102 @@ +/* tabs styles, based on http://www.alistapart.com/articles/slidingdoors */ + +DIV.tabs +{ + float : left; + width : 100%; + background : url("tab_b.gif") repeat-x bottom; + margin-bottom : 4px; +} + +DIV.tabs UL +{ + margin : 0px; + padding-left : 10px; + list-style : none; +} + +DIV.tabs LI, DIV.tabs FORM +{ + display : inline; + margin : 0px; + padding : 0px; +} + +DIV.tabs FORM +{ + float : right; +} + +DIV.tabs A +{ + float : left; + background : url("tab_r.gif") no-repeat right top; + border-bottom : 1px solid #84B0C7; + font-size : x-small; + font-weight : bold; + text-decoration : none; +} + +DIV.tabs A:hover +{ + background-position: 100% -150px; +} + +DIV.tabs A:link, DIV.tabs A:visited, +DIV.tabs A:active, DIV.tabs A:hover +{ + color: #1A419D; +} + +DIV.tabs SPAN +{ + float : left; + display : block; + background : url("tab_l.gif") no-repeat left top; + padding : 5px 9px; + white-space : nowrap; +} + +DIV.tabs INPUT +{ + float : right; + display : inline; + font-size : 1em; +} + +DIV.tabs TD +{ + font-size : x-small; + font-weight : bold; + text-decoration : none; +} + + + +/* Commented Backslash Hack hides rule from IE5-Mac \*/ +DIV.tabs SPAN {float : none;} +/* End IE5-Mac hack */ + +DIV.tabs A:hover SPAN +{ + background-position: 0% -150px; +} + +DIV.tabs LI#current A +{ + background-position: 100% -150px; + border-width : 0px; +} + +DIV.tabs LI#current SPAN +{ + background-position: 0% -150px; + padding-bottom : 6px; +} + +DIV.nav +{ + background : none; + border : none; + border-bottom : 1px solid #84B0C7; +} diff --git a/doc/latex/Helvetica.ttf b/doc/latex/Helvetica.ttf new file mode 100644 index 0000000..8051f8a Binary files /dev/null and b/doc/latex/Helvetica.ttf differ diff --git a/doc/latex/Makefile b/doc/latex/Makefile new file mode 100644 index 0000000..776fcf9 --- /dev/null +++ b/doc/latex/Makefile @@ -0,0 +1,39 @@ +all: clean refman.dvi + +ps: refman.ps + +pdf: refman.pdf + +ps_2on1: refman_2on1.ps + +pdf_2on1: refman_2on1.pdf + +refman.ps: refman.dvi + dvips -o refman.ps refman.dvi + +refman.pdf: refman.ps + ps2pdf refman.ps refman.pdf + +refman.dvi: refman.tex doxygen.sty + echo "Running latex..." + latex refman.tex + echo "Running makeindex..." + makeindex refman.idx + echo "Rerunning latex...." + latex refman.tex + latex_count=5 ; \ + while egrep -s 'Rerun (LaTeX|to get cross-references right)' refman.log && [ $$latex_count -gt 0 ] ;\ + do \ + echo "Rerunning latex...." ;\ + latex refman.tex ;\ + latex_count=`expr $$latex_count - 1` ;\ + done + +refman_2on1.ps: refman.ps + psnup -2 refman.ps >refman_2on1.ps + +refman_2on1.pdf: refman_2on1.ps + ps2pdf refman_2on1.ps refman_2on1.pdf + +clean: + rm -f *.ps *.dvi *.aux *.toc *.idx *.ind *.ilg *.log *.out refman.pdf diff --git a/doc/latex/annotated.tex b/doc/latex/annotated.tex new file mode 100644 index 0000000..200fc72 --- /dev/null +++ b/doc/latex/annotated.tex @@ -0,0 +1,5 @@ +\section{briteny Class List} +Here are the classes, structs, unions and interfaces with brief descriptions:\begin{CompactList} +\item\contentsline{section}{{\bf britney.Britney} }{\pageref{classbritney_1_1Britney}}{} +\item\contentsline{section}{{\bf excuse.Excuse} }{\pageref{classexcuse_1_1Excuse}}{} +\end{CompactList} diff --git a/doc/latex/classbritney_1_1Britney.tex b/doc/latex/classbritney_1_1Britney.tex new file mode 100644 index 0000000..040c4a0 --- /dev/null +++ b/doc/latex/classbritney_1_1Britney.tex @@ -0,0 +1,514 @@ +\section{britney.Britney Class Reference} +\label{classbritney_1_1Britney}\index{britney::Britney@{britney::Britney}} +\subsection*{Public Member Functions} +\begin{CompactItemize} +\item +def {\bf \_\-\_\-init\_\-\_\-} +\item +def {\bf \_\-\_\-parse\_\-arguments} +\item +def {\bf \_\-\_\-log} +\item +def {\bf read\_\-sources} +\item +def {\bf read\_\-binaries} +\item +def {\bf read\_\-bugs} +\item +def {\bf \_\-\_\-maxver} +\item +def {\bf normalize\_\-bugs} +\item +def {\bf read\_\-dates} +\item +def {\bf read\_\-urgencies} +\item +def {\bf read\_\-approvals} +\item +def {\bf read\_\-hints} +\item +def {\bf same\_\-source} +\item +def {\bf get\_\-dependency\_\-solvers} +\item +def {\bf excuse\_\-unsat\_\-deps} +\item +def {\bf should\_\-remove\_\-source} +\item +def {\bf should\_\-upgrade\_\-srcarch} +\item +def {\bf should\_\-upgrade\_\-src} +\item +def {\bf reversed\_\-exc\_\-deps} +\item +def {\bf invalidate\_\-excuses} +\item +def {\bf write\_\-excuses} +\item +def {\bf main} +\end{CompactItemize} +\subsection*{Static Public Attributes} +\begin{CompactItemize} +\item +tuple {\bf HINTS\_\-STANDARD} = (\char`\"{}easy\char`\"{}, \char`\"{}hint\char`\"{}, \char`\"{}remove\char`\"{}, \char`\"{}block\char`\"{}, \char`\"{}unblock\char`\"{}, \char`\"{}urgent\char`\"{}, \char`\"{}approve\char`\"{})\label{classbritney_1_1Britney_ebbe3f40cca59e2de275b0558556ee63} + +\item +tuple {\bf HINTS\_\-ALL} = (\char`\"{}force\char`\"{}, \char`\"{}force-hint\char`\"{}, \char`\"{}block-all\char`\"{})\label{classbritney_1_1Britney_a088d6fd96963f87f88c9c40cda10bfa} + +\end{CompactItemize} + + +\subsection{Detailed Description} + + +\footnotesize\begin{verbatim}Britney, the debian testing updater script + +This is the script that updates the testing_ distribution. It is executed +each day after the installation of the updated packages. It generates the +`Packages' files for the testing distribution, but it does so in an +intelligent manner; it try to avoid any inconsistency and to use only +non-buggy packages. + +For more documentation on this script, please read the Developers Reference. +\end{verbatim} +\normalsize + + + + +Definition at line 33 of file britney.py. + +\subsection{Member Function Documentation} +\index{britney::Britney@{britney::Britney}!__init__@{\_\-\_\-init\_\-\_\-}} +\index{__init__@{\_\-\_\-init\_\-\_\-}!britney::Britney@{britney::Britney}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def britney.Britney.\_\-\_\-init\_\-\_\- ( {\em self})}\label{classbritney_1_1Britney_5846d81eace24f479292c47e30fd1851} + + + + +\footnotesize\begin{verbatim}Class constructor + +This method initializes and populates the data lists, which contain all +the information needed by the other methods of the class. +\end{verbatim} +\normalsize + + +Definition at line 39 of file britney.py.\index{britney::Britney@{britney::Britney}!__log@{\_\-\_\-log}} +\index{__log@{\_\-\_\-log}!britney::Britney@{britney::Britney}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def britney.Britney.\_\-\_\-log ( {\em self}, {\em msg}, {\em type} = {\tt \char`\"{}I\char`\"{}})}\label{classbritney_1_1Britney_678036a5200302d77249f5e702532681} + + + + +\footnotesize\begin{verbatim}Print info messages according to verbosity level + +An easy-and-simple log method which prints messages to the standard +output. The type parameter controls the urgency of the message, and +can be equal to `I' for `Information', `W' for `Warning' and `E' for +`Error'. Warnings and errors are always printed, and information are +printed only if the verbose logging is enabled. +\end{verbatim} +\normalsize + + +Definition at line 115 of file britney.py.\index{britney::Britney@{britney::Britney}!__maxver@{\_\-\_\-maxver}} +\index{__maxver@{\_\-\_\-maxver}!britney::Britney@{britney::Britney}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def britney.Britney.\_\-\_\-maxver ( {\em self}, {\em pkg}, {\em dist})}\label{classbritney_1_1Britney_0affb1945986a52c61a4492c9732968e} + + + + +\footnotesize\begin{verbatim}Return the maximum version for a given package name + +This method returns None if the specified source package +is not available in the `dist' distribution. If the package +exists, then it returns the maximum version between the +source package and its binary packages. +\end{verbatim} +\normalsize + + +Definition at line 276 of file britney.py.\index{britney::Britney@{britney::Britney}!__parse_arguments@{\_\-\_\-parse\_\-arguments}} +\index{__parse_arguments@{\_\-\_\-parse\_\-arguments}!britney::Britney@{britney::Britney}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def britney.Britney.\_\-\_\-parse\_\-arguments ( {\em self})}\label{classbritney_1_1Britney_506f9800068902cf7cac6236b78d1dc4} + + + + +\footnotesize\begin{verbatim}Parse the command line arguments + +This method parses and initializes the command line arguments. +While doing so, it preprocesses some of the options to be converted +in a suitable form for the other methods of the class. +\end{verbatim} +\normalsize + + +Definition at line 75 of file britney.py.\index{britney::Britney@{britney::Britney}!excuse_unsat_deps@{excuse\_\-unsat\_\-deps}} +\index{excuse_unsat_deps@{excuse\_\-unsat\_\-deps}!britney::Britney@{britney::Britney}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def britney.Britney.excuse\_\-unsat\_\-deps ( {\em self}, {\em pkg}, {\em src}, {\em arch}, {\em suite}, {\em excuse})}\label{classbritney_1_1Britney_f51c60a69f3a9dc2bc5afdb2ffaf3990} + + + + +\footnotesize\begin{verbatim}Find unsatisfied dependencies for a binary package + +This method analyzes the dependencies of the binary package specified +by the parameter `pkg', built from the source package `src', for the +architecture `arch' within the suite `suite'. If the dependency can't +be satisfied in testing and/or unstable, it updates the excuse passed +as parameter. + +The dependency fields checked are Pre-Depends and Depends. +\end{verbatim} +\normalsize + + +Definition at line 553 of file britney.py.\index{britney::Britney@{britney::Britney}!get_dependency_solvers@{get\_\-dependency\_\-solvers}} +\index{get_dependency_solvers@{get\_\-dependency\_\-solvers}!britney::Britney@{britney::Britney}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def britney.Britney.get\_\-dependency\_\-solvers ( {\em self}, {\em block}, {\em arch}, {\em distribution})}\label{classbritney_1_1Britney_5461f49e3e75a251ebedfd37d2a5ff0c} + + + + +\footnotesize\begin{verbatim}Find the packages which satisfy a dependency block + +This method returns the list of packages which satisfy a dependency +block (as returned by apt_pkg.ParseDepends) for the given architecture +and distribution. + +It returns a tuple with two items: the first is a boolean which is +True if the dependency is satisfied, the second is the list of the +solving packages. +\end{verbatim} +\normalsize + + +Definition at line 515 of file britney.py.\index{britney::Britney@{britney::Britney}!invalidate_excuses@{invalidate\_\-excuses}} +\index{invalidate_excuses@{invalidate\_\-excuses}!britney::Britney@{britney::Britney}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def britney.Britney.invalidate\_\-excuses ( {\em self}, {\em valid}, {\em invalid})}\label{classbritney_1_1Britney_171969785db449d7a06c3f762774e0cd} + + + + +\footnotesize\begin{verbatim}Invalidate impossible excuses + +This method invalidates the impossible excuses, which depend +on invalid excuses. The two parameters contains the list of +`valid' and `invalid' excuses. +\end{verbatim} +\normalsize + + +Definition at line 951 of file britney.py.\index{britney::Britney@{britney::Britney}!main@{main}} +\index{main@{main}!britney::Britney@{britney::Britney}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def britney.Britney.main ( {\em self})}\label{classbritney_1_1Britney_0e9551bdf927388f55be5ce15a48c94f} + + + + +\footnotesize\begin{verbatim}Main method + +This is the entry point for the class: it includes the list of calls +for the member methods which will produce the output files. +\end{verbatim} +\normalsize + + +Definition at line 1079 of file britney.py.\index{britney::Britney@{britney::Britney}!normalize_bugs@{normalize\_\-bugs}} +\index{normalize_bugs@{normalize\_\-bugs}!britney::Britney@{britney::Britney}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def britney.Britney.normalize\_\-bugs ( {\em self})}\label{classbritney_1_1Britney_5a6af4a100cfd54e872a27fa7f48ac3c} + + + + +\footnotesize\begin{verbatim}Normalize the release critical bug summaries for testing and unstable + +The method doesn't return any value: it directly modifies the +object attribute `bugs'. +\end{verbatim} +\normalsize + + +Definition at line 294 of file britney.py.\index{britney::Britney@{britney::Britney}!read_approvals@{read\_\-approvals}} +\index{read_approvals@{read\_\-approvals}!britney::Britney@{britney::Britney}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def britney.Britney.read\_\-approvals ( {\em self}, {\em basedir})}\label{classbritney_1_1Britney_39248f0cfea1c8798b2ca5a97d37eaf8} + + + + +\footnotesize\begin{verbatim}Read the approval commands from the specified directory + +The approval commands are read from the files contained by the +`Approved' directory within the directory specified as `basedir' +parameter. The name of the files has to be the same of the +authorized users for the approvals. + +The file contains rows with the format: + + + +The method returns a dictionary where the key is the binary package +name followed by an underscore and the version number, and the value +is the user who submitted the command. +\end{verbatim} +\normalsize + + +Definition at line 400 of file britney.py.\index{britney::Britney@{britney::Britney}!read_binaries@{read\_\-binaries}} +\index{read_binaries@{read\_\-binaries}!britney::Britney@{britney::Britney}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def britney.Britney.read\_\-binaries ( {\em self}, {\em basedir}, {\em distribution}, {\em arch})}\label{classbritney_1_1Britney_1b2b0f42e4af1cee472f93e955b30421} + + + + +\footnotesize\begin{verbatim}Read the list of binary packages from the specified directory + +The binary packages are read from the `Packages_${arch}' files +within the directory specified as `basedir' parameter, replacing +${arch} with the value of the arch parameter. Considering the +large amount of memory needed, not all the fields are loaded +in memory. The available fields are Version, Source, Pre-Depends, +Depends, Conflicts, Provides and Architecture. + +After reading the packages, reverse dependencies are computed +and saved in the `rdepends' keys, and the `Provides' field is +used to populate the virtual packages list. + +The dependencies are parsed with the apt.pkg.ParseDepends method, +and they are stored both as the format of its return value and +text. + +The method returns a tuple. The first element is a list where +every item represents a binary package as a dictionary; the second +element is a dictionary which maps virtual packages to real +packages that provide it. +\end{verbatim} +\normalsize + + +Definition at line 155 of file britney.py.\index{britney::Britney@{britney::Britney}!read_bugs@{read\_\-bugs}} +\index{read_bugs@{read\_\-bugs}!britney::Britney@{britney::Britney}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def britney.Britney.read\_\-bugs ( {\em self}, {\em basedir})}\label{classbritney_1_1Britney_6c777aae69e7bec2efebaf23ddd4a86c} + + + + +\footnotesize\begin{verbatim}Read the release critial bug summary from the specified directory + +The RC bug summaries are read from the `Bugs' file within the +directory specified as `basedir' parameter. The file contains +rows with the format: + + + +The method returns a dictionary where the key is the binary package +name and the value is the number of open RC bugs for it. +\end{verbatim} +\normalsize + + +Definition at line 252 of file britney.py.\index{britney::Britney@{britney::Britney}!read_dates@{read\_\-dates}} +\index{read_dates@{read\_\-dates}!britney::Britney@{britney::Britney}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def britney.Britney.read\_\-dates ( {\em self}, {\em basedir})}\label{classbritney_1_1Britney_085af5ac906813ea40fc2e623748f517} + + + + +\footnotesize\begin{verbatim}Read the upload date for the packages from the specified directory + +The upload dates are read from the `Date' file within the directory +specified as `basedir' parameter. The file contains rows with the +format: + + + +The dates are expressed as days starting from the 1970-01-01. + +The method returns a dictionary where the key is the binary package +name and the value is tuple with two items, the version and the date. +\end{verbatim} +\normalsize + + +Definition at line 329 of file britney.py.\index{britney::Britney@{britney::Britney}!read_hints@{read\_\-hints}} +\index{read_hints@{read\_\-hints}!britney::Britney@{britney::Britney}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def britney.Britney.read\_\-hints ( {\em self}, {\em basedir})}\label{classbritney_1_1Britney_46d535f617fcf1faaaf5d841ea23c184} + + + + +\footnotesize\begin{verbatim}Read the hint commands from the specified directory + +The hint commands are read from the files contained by the `Hints' +directory within the directory specified as `basedir' parameter. +The name of the files has to be the same of the authorized users +for the hints. + +The file contains rows with the format: + + [/] + +The method returns a dictionary where the key is the command, and +the value is the list of affected packages. +\end{verbatim} +\normalsize + + +Definition at line 426 of file britney.py.\index{britney::Britney@{britney::Britney}!read_sources@{read\_\-sources}} +\index{read_sources@{read\_\-sources}!britney::Britney@{britney::Britney}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def britney.Britney.read\_\-sources ( {\em self}, {\em basedir})}\label{classbritney_1_1Britney_054f44c47f17c0c4f5a069e821b7f868} + + + + +\footnotesize\begin{verbatim}Read the list of source packages from the specified directory + +The source packages are read from the `Sources' file within the +directory specified as `basedir' parameter. Considering the +large amount of memory needed, not all the fields are loaded +in memory. The available fields are Version, Maintainer and Section. + +The method returns a list where every item represents a source +package as a dictionary. +\end{verbatim} +\normalsize + + +Definition at line 130 of file britney.py.\index{britney::Britney@{britney::Britney}!read_urgencies@{read\_\-urgencies}} +\index{read_urgencies@{read\_\-urgencies}!britney::Britney@{britney::Britney}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def britney.Britney.read\_\-urgencies ( {\em self}, {\em basedir})}\label{classbritney_1_1Britney_09fc27899506b4830b1961f125a7b6a4} + + + + +\footnotesize\begin{verbatim}Read the upload urgency of the packages from the specified directory + +The upload urgencies are read from the `Urgency' file within the +directory specified as `basedir' parameter. The file contains rows +with the format: + + + +The method returns a dictionary where the key is the binary package +name and the value is the greatest urgency from the versions of the +package that are higher then the testing one. +\end{verbatim} +\normalsize + + +Definition at line 355 of file britney.py.\index{britney::Britney@{britney::Britney}!reversed_exc_deps@{reversed\_\-exc\_\-deps}} +\index{reversed_exc_deps@{reversed\_\-exc\_\-deps}!britney::Britney@{britney::Britney}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def britney.Britney.reversed\_\-exc\_\-deps ( {\em self})}\label{classbritney_1_1Britney_be1b4af9d6c6650c70b24267412bc1a8} + + + + +\footnotesize\begin{verbatim}Reverse the excuses dependencies + +This method returns a dictionary where the keys are the package names +and the values are the excuse names which depend on it. +\end{verbatim} +\normalsize + + +Definition at line 938 of file britney.py.\index{britney::Britney@{britney::Britney}!same_source@{same\_\-source}} +\index{same_source@{same\_\-source}!britney::Britney@{britney::Britney}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def britney.Britney.same\_\-source ( {\em self}, {\em sv1}, {\em sv2})}\label{classbritney_1_1Britney_85d2e45e8431779b62f398c34972ddf1} + + + + +\footnotesize\begin{verbatim}Check if two version numbers are built from the same source + +This method returns a boolean value which is true if the two +version numbers specified as parameters are built from the same +source. The main use of this code is to detect binary-NMU. +\end{verbatim} +\normalsize + + +Definition at line 476 of file britney.py.\index{britney::Britney@{britney::Britney}!should_remove_source@{should\_\-remove\_\-source}} +\index{should_remove_source@{should\_\-remove\_\-source}!britney::Britney@{britney::Britney}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def britney.Britney.should\_\-remove\_\-source ( {\em self}, {\em pkg})}\label{classbritney_1_1Britney_f8a6c9adbdec7a5a982dd2b74febcc08} + + + + +\footnotesize\begin{verbatim}Check if a source package should be removed from testing + +This method checks if a source package should be removed from the +testing distribution; this happen if the source package is not +present in the unstable distribution anymore. + +It returns True if the package can be removed, False otherwise. +In the former case, a new excuse is appended to the the object +attribute excuses. +\end{verbatim} +\normalsize + + +Definition at line 604 of file britney.py.\index{britney::Britney@{britney::Britney}!should_upgrade_src@{should\_\-upgrade\_\-src}} +\index{should_upgrade_src@{should\_\-upgrade\_\-src}!britney::Britney@{britney::Britney}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def britney.Britney.should\_\-upgrade\_\-src ( {\em self}, {\em src}, {\em suite})}\label{classbritney_1_1Britney_94785175a85f44b1afaf3add167a211f} + + + + +\footnotesize\begin{verbatim}Check if source package should be upgraded + +This method checks if a source package should be upgraded. The analisys +is performed for the source package specified by the `src' parameter, +checking the architecture `arch' for the distribution `suite'. + +It returns False if the given package doesn't need to be upgraded, +True otherwise. In the former case, a new excuse is appended to +the the object attribute excuses. +\end{verbatim} +\normalsize + + +Definition at line 742 of file britney.py.\index{britney::Britney@{britney::Britney}!should_upgrade_srcarch@{should\_\-upgrade\_\-srcarch}} +\index{should_upgrade_srcarch@{should\_\-upgrade\_\-srcarch}!britney::Britney@{britney::Britney}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def britney.Britney.should\_\-upgrade\_\-srcarch ( {\em self}, {\em src}, {\em arch}, {\em suite})}\label{classbritney_1_1Britney_bd18d7acde434387e94344a39db5b0e5} + + + + +\footnotesize\begin{verbatim}Check if binary package should be upgraded + +This method checks if a binary package should be upgraded; this can +happen only if the binary package is a binary-NMU for the given arch. +The analisys is performed for the source package specified by the +`src' parameter, checking the architecture `arch' for the distribution +`suite'. + +It returns False if the given package doesn't need to be upgraded, +True otherwise. In the former case, a new excuse is appended to +the the object attribute excuses. +\end{verbatim} +\normalsize + + +Definition at line 628 of file britney.py.\index{britney::Britney@{britney::Britney}!write_excuses@{write\_\-excuses}} +\index{write_excuses@{write\_\-excuses}!britney::Britney@{britney::Britney}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def britney.Britney.write\_\-excuses ( {\em self})}\label{classbritney_1_1Britney_010f6deffca32f7f71ecf1f5c1bb4985} + + + + +\footnotesize\begin{verbatim}Produce and write the update excuses + +This method handles the update excuses generation: the packages are +looked to determine whether they are valid candidates. For the details +of this procedure, please refer to the module docstring. +\end{verbatim} +\normalsize + + +Definition at line 993 of file britney.py. + +The documentation for this class was generated from the following file:\begin{CompactItemize} +\item +britney.py\end{CompactItemize} diff --git a/doc/latex/classexcuse_1_1Excuse.tex b/doc/latex/classexcuse_1_1Excuse.tex new file mode 100644 index 0000000..d9f08ea --- /dev/null +++ b/doc/latex/classexcuse_1_1Excuse.tex @@ -0,0 +1,213 @@ +\section{excuse.Excuse Class Reference} +\label{classexcuse_1_1Excuse}\index{excuse::Excuse@{excuse::Excuse}} +\subsection*{Public Member Functions} +\begin{CompactItemize} +\item +def {\bf \_\-\_\-init\_\-\_\-} +\item +def {\bf set\_\-vers} +\item +def {\bf set\_\-maint} +\item +def {\bf set\_\-section} +\item +def {\bf set\_\-priority} +\item +def {\bf set\_\-date} +\item +def {\bf set\_\-urgency} +\item +def {\bf add\_\-dep} +\item +def {\bf add\_\-break\_\-dep} +\item +def {\bf invalidate\_\-dep} +\item +def {\bf setdaysold} +\item +def {\bf addhtml} +\item +def {\bf html} +\end{CompactItemize} +\subsection*{Static Public Attributes} +\begin{CompactItemize} +\item +tuple {\bf reemail} = re.compile(r\char`\"{}$<$.$\ast$?$>$\char`\"{})\label{classexcuse_1_1Excuse_bb15f55eed8f034db8a64b4ddc46460d} + +\begin{CompactList}\small\item\em Regular expression for removing the email address. \item\end{CompactList}\end{CompactItemize} + + +\subsection{Detailed Description} + + +\footnotesize\begin{verbatim}Excuse class + +This class represents an update excuse, which is a detailed explanation +of why a package can or cannot be updated in the testing distribution from +a newer package in another distribution (like for example unstable). + +The main purpose of the excuses is to be written in an HTML file which +will be published over HTTP. The maintainers will be able to parse it +manually or automatically to find the explanation of why their packages +have been updated or not. +\end{verbatim} +\normalsize + + + + +Definition at line 21 of file excuse.py. + +\subsection{Member Function Documentation} +\index{excuse::Excuse@{excuse::Excuse}!__init__@{\_\-\_\-init\_\-\_\-}} +\index{__init__@{\_\-\_\-init\_\-\_\-}!excuse::Excuse@{excuse::Excuse}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def excuse.Excuse.\_\-\_\-init\_\-\_\- ( {\em self}, {\em name})}\label{classexcuse_1_1Excuse_4bdb0917f763d74951c621e466e98bdb} + + + + +\footnotesize\begin{verbatim}Class constructor + +This method initializes the excuse with the specified name and +the default values. +\end{verbatim} +\normalsize + + +Definition at line 28 of file excuse.py.\index{excuse::Excuse@{excuse::Excuse}!add_break_dep@{add\_\-break\_\-dep}} +\index{add_break_dep@{add\_\-break\_\-dep}!excuse::Excuse@{excuse::Excuse}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def excuse.Excuse.add\_\-break\_\-dep ( {\em self}, {\em name}, {\em arch})}\label{classexcuse_1_1Excuse_60e00fe0515f2dab003bd29baceedd34} + + + + +\footnotesize\begin{verbatim}Add a break dependency\end{verbatim} +\normalsize + + +Definition at line 80 of file excuse.py.\index{excuse::Excuse@{excuse::Excuse}!add_dep@{add\_\-dep}} +\index{add_dep@{add\_\-dep}!excuse::Excuse@{excuse::Excuse}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def excuse.Excuse.add\_\-dep ( {\em self}, {\em name})}\label{classexcuse_1_1Excuse_fa97c9f61fef17d6028491362153a766} + + + + +\footnotesize\begin{verbatim}Add a dependency\end{verbatim} +\normalsize + + +Definition at line 76 of file excuse.py.\index{excuse::Excuse@{excuse::Excuse}!addhtml@{addhtml}} +\index{addhtml@{addhtml}!excuse::Excuse@{excuse::Excuse}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def excuse.Excuse.addhtml ( {\em self}, {\em note})}\label{classexcuse_1_1Excuse_eb0a1ea0fae66a571e5efa703e53ba3a} + + + + +\footnotesize\begin{verbatim}Add a note in HTML\end{verbatim} +\normalsize + + +Definition at line 94 of file excuse.py.\index{excuse::Excuse@{excuse::Excuse}!html@{html}} +\index{html@{html}!excuse::Excuse@{excuse::Excuse}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def excuse.Excuse.html ( {\em self})}\label{classexcuse_1_1Excuse_84049740652a58b248fabdb3fa9d4b2c} + + + + +\footnotesize\begin{verbatim}Render the excuse in HTML\end{verbatim} +\normalsize + + +Definition at line 98 of file excuse.py.\index{excuse::Excuse@{excuse::Excuse}!invalidate_dep@{invalidate\_\-dep}} +\index{invalidate_dep@{invalidate\_\-dep}!excuse::Excuse@{excuse::Excuse}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def excuse.Excuse.invalidate\_\-dep ( {\em self}, {\em name})}\label{classexcuse_1_1Excuse_8594c46ccf4182fa8b37fe487bf53850} + + + + +\footnotesize\begin{verbatim}Invalidate dependency\end{verbatim} +\normalsize + + +Definition at line 85 of file excuse.py.\index{excuse::Excuse@{excuse::Excuse}!set_date@{set\_\-date}} +\index{set_date@{set\_\-date}!excuse::Excuse@{excuse::Excuse}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def excuse.Excuse.set\_\-date ( {\em self}, {\em date})}\label{classexcuse_1_1Excuse_ac01c3b9802ad26571f01b55ffc1098c} + + + + +\footnotesize\begin{verbatim}Set the date of upload of the package\end{verbatim} +\normalsize + + +Definition at line 68 of file excuse.py.\index{excuse::Excuse@{excuse::Excuse}!set_maint@{set\_\-maint}} +\index{set_maint@{set\_\-maint}!excuse::Excuse@{excuse::Excuse}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def excuse.Excuse.set\_\-maint ( {\em self}, {\em maint})}\label{classexcuse_1_1Excuse_189ec1709eef0bd8acb9cd093b8350b5} + + + + +\footnotesize\begin{verbatim}Set the package maintainer's name\end{verbatim} +\normalsize + + +Definition at line 56 of file excuse.py.\index{excuse::Excuse@{excuse::Excuse}!set_priority@{set\_\-priority}} +\index{set_priority@{set\_\-priority}!excuse::Excuse@{excuse::Excuse}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def excuse.Excuse.set\_\-priority ( {\em self}, {\em pri})}\label{classexcuse_1_1Excuse_3a0ebe3eb87c1af8f093e80a874ea0fa} + + + + +\footnotesize\begin{verbatim}Set the priority of the package\end{verbatim} +\normalsize + + +Definition at line 64 of file excuse.py.\index{excuse::Excuse@{excuse::Excuse}!set_section@{set\_\-section}} +\index{set_section@{set\_\-section}!excuse::Excuse@{excuse::Excuse}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def excuse.Excuse.set\_\-section ( {\em self}, {\em section})}\label{classexcuse_1_1Excuse_6b435fa4d19b929d9fb70c8d28688387} + + + + +\footnotesize\begin{verbatim}Set the section of the package\end{verbatim} +\normalsize + + +Definition at line 60 of file excuse.py.\index{excuse::Excuse@{excuse::Excuse}!set_urgency@{set\_\-urgency}} +\index{set_urgency@{set\_\-urgency}!excuse::Excuse@{excuse::Excuse}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def excuse.Excuse.set\_\-urgency ( {\em self}, {\em date})}\label{classexcuse_1_1Excuse_c504d40ac6d07ffdb08b7ff8ed555d10} + + + + +\footnotesize\begin{verbatim}Set the urgency of upload of the package\end{verbatim} +\normalsize + + +Definition at line 72 of file excuse.py.\index{excuse::Excuse@{excuse::Excuse}!set_vers@{set\_\-vers}} +\index{set_vers@{set\_\-vers}!excuse::Excuse@{excuse::Excuse}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def excuse.Excuse.set\_\-vers ( {\em self}, {\em tver}, {\em uver})}\label{classexcuse_1_1Excuse_b8751fc5d0033b4c734c476d92841d99} + + + + +\footnotesize\begin{verbatim}Set the testing and unstable versions\end{verbatim} +\normalsize + + +Definition at line 51 of file excuse.py.\index{excuse::Excuse@{excuse::Excuse}!setdaysold@{setdaysold}} +\index{setdaysold@{setdaysold}!excuse::Excuse@{excuse::Excuse}} +\subsubsection{\setlength{\rightskip}{0pt plus 5cm}def excuse.Excuse.setdaysold ( {\em self}, {\em daysold}, {\em mindays})}\label{classexcuse_1_1Excuse_cf1fa7c6fb741bbe7e3120113748f3a5} + + + + +\footnotesize\begin{verbatim}Set the number of days from the upload and the minimum number of days for the update\end{verbatim} +\normalsize + + +Definition at line 89 of file excuse.py. + +The documentation for this class was generated from the following file:\begin{CompactItemize} +\item +excuse.py\end{CompactItemize} diff --git a/doc/latex/doxygen.sty b/doc/latex/doxygen.sty new file mode 100644 index 0000000..1c795bd --- /dev/null +++ b/doc/latex/doxygen.sty @@ -0,0 +1,64 @@ +\NeedsTeXFormat{LaTeX2e} +\ProvidesPackage{doxygen} +\RequirePackage{calc} +\RequirePackage{array} +\pagestyle{fancyplain} +\newcommand{\clearemptydoublepage}{\newpage{\pagestyle{empty}\cleardoublepage}} +\renewcommand{\chaptermark}[1]{\markboth{#1}{}} +\renewcommand{\sectionmark}[1]{\markright{\thesection\ #1}} +\lhead[\fancyplain{}{\bfseries\thepage}] + {\fancyplain{}{\bfseries\rightmark}} +\rhead[\fancyplain{}{\bfseries\leftmark}] + {\fancyplain{}{\bfseries\thepage}} +\rfoot[\fancyplain{}{\bfseries\scriptsize Generated on Sat Jun 24 18:50:20 2006 for briteny by Doxygen }]{} +\lfoot[]{\fancyplain{}{\bfseries\scriptsize Generated on Sat Jun 24 18:50:20 2006 for briteny by Doxygen }} +\cfoot{} +\newenvironment{CompactList} +{\begin{list}{}{ + \setlength{\leftmargin}{0.5cm} + \setlength{\itemsep}{0pt} + \setlength{\parsep}{0pt} + \setlength{\topsep}{0pt} + \renewcommand{\makelabel}{}}} +{\end{list}} +\newenvironment{CompactItemize} +{ + \begin{itemize} + \setlength{\itemsep}{-3pt} + \setlength{\parsep}{0pt} + \setlength{\topsep}{0pt} + \setlength{\partopsep}{0pt} +} +{\end{itemize}} +\newcommand{\PBS}[1]{\let\temp=\\#1\let\\=\temp} +\newlength{\tmplength} +\newenvironment{TabularC}[1] +{ +\setlength{\tmplength} + {\linewidth/(#1)-\tabcolsep*2-\arrayrulewidth*(#1+1)/(#1)} + \par\begin{tabular*}{\linewidth} + {*{#1}{|>{\PBS\raggedright\hspace{0pt}}p{\the\tmplength}}|} +} +{\end{tabular*}\par} +\newcommand{\entrylabel}[1]{ + {\parbox[b]{\labelwidth-4pt}{\makebox[0pt][l]{\textbf{#1}}\\}}} +\newenvironment{Desc} +{\begin{list}{} + { + \settowidth{\labelwidth}{40pt} + \setlength{\leftmargin}{\labelwidth} + \setlength{\parsep}{0pt} + \setlength{\itemsep}{-4pt} + \renewcommand{\makelabel}{\entrylabel} + } +} +{\end{list}} +\newenvironment{Indent} + {\begin{list}{}{\setlength{\leftmargin}{0.5cm}} + \item[]\ignorespaces} + {\unskip\end{list}} +\setlength{\parindent}{0cm} +\setlength{\parskip}{0.2cm} +\addtocounter{secnumdepth}{1} +\sloppy +\usepackage[T1]{fontenc} diff --git a/doc/latex/hierarchy.tex b/doc/latex/hierarchy.tex new file mode 100644 index 0000000..d73be1d --- /dev/null +++ b/doc/latex/hierarchy.tex @@ -0,0 +1,5 @@ +\section{briteny Class Hierarchy} +This inheritance list is sorted roughly, but not completely, alphabetically:\begin{CompactList} +\item \contentsline{section}{britney.Britney}{\pageref{classbritney_1_1Britney}}{} +\item \contentsline{section}{excuse.Excuse}{\pageref{classexcuse_1_1Excuse}}{} +\end{CompactList} diff --git a/doc/latex/namespacebritney.tex b/doc/latex/namespacebritney.tex new file mode 100644 index 0000000..f75051a --- /dev/null +++ b/doc/latex/namespacebritney.tex @@ -0,0 +1,9 @@ +\section{Package britney} +\label{namespacebritney}\index{britney@{britney}} + + +\subsection*{Classes} +\begin{CompactItemize} +\item +class {\bf Britney} +\end{CompactItemize} diff --git a/doc/latex/namespaceexcuse.tex b/doc/latex/namespaceexcuse.tex new file mode 100644 index 0000000..ef344c5 --- /dev/null +++ b/doc/latex/namespaceexcuse.tex @@ -0,0 +1,9 @@ +\section{Package excuse} +\label{namespaceexcuse}\index{excuse@{excuse}} + + +\subsection*{Classes} +\begin{CompactItemize} +\item +class {\bf Excuse} +\end{CompactItemize} diff --git a/doc/latex/namespaces.tex b/doc/latex/namespaces.tex new file mode 100644 index 0000000..40edb65 --- /dev/null +++ b/doc/latex/namespaces.tex @@ -0,0 +1,5 @@ +\section{briteny Package List} +Here are the packages with brief descriptions (if available):\begin{CompactList} +\item\contentsline{section}{{\bf britney} }{\pageref{namespacebritney}}{} +\item\contentsline{section}{{\bf excuse} }{\pageref{namespaceexcuse}}{} +\end{CompactList} diff --git a/doc/latex/refman.tex b/doc/latex/refman.tex new file mode 100644 index 0000000..a17fc98 --- /dev/null +++ b/doc/latex/refman.tex @@ -0,0 +1,43 @@ +\documentclass[a4paper]{book} +\usepackage{a4wide} +\usepackage{makeidx} +\usepackage{fancyhdr} +\usepackage{graphicx} +\usepackage{multicol} +\usepackage{float} +\usepackage{textcomp} +\usepackage{alltt} +\usepackage{doxygen} +\makeindex +\setcounter{tocdepth}{1} +\renewcommand{\footrulewidth}{0.4pt} +\begin{document} +\begin{titlepage} +\vspace*{7cm} +\begin{center} +{\Large briteny Reference Manual\\[1ex]\large 2.0.alpha1 }\\ +\vspace*{1cm} +{\large Generated by Doxygen 1.4.6}\\ +\vspace*{0.5cm} +{\small Sat Jun 24 18:50:20 2006}\\ +\end{center} +\end{titlepage} +\clearemptydoublepage +\pagenumbering{roman} +\tableofcontents +\clearemptydoublepage +\pagenumbering{arabic} +\chapter{briteny Namespace Index} +\input{namespaces} +\chapter{briteny Hierarchical Index} +\input{hierarchy} +\chapter{briteny Class Index} +\input{annotated} +\chapter{briteny Namespace Documentation} +\input{namespacebritney} +\include{namespaceexcuse} +\chapter{briteny Class Documentation} +\input{classbritney_1_1Britney} +\include{classexcuse_1_1Excuse} +\printindex +\end{document} diff --git a/excuse.py b/excuse.py index 9f7f8d2..24fd352 100644 --- a/excuse.py +++ b/excuse.py @@ -19,9 +19,28 @@ import string class Excuse: + """Excuse class + + This class represents an update excuse, which is a detailed explanation + of why a package can or cannot be updated in the testing distribution from + a newer package in another distribution (like for example unstable). + + The main purpose of the excuses is to be written in an HTML file which + will be published over HTTP. The maintainers will be able to parse it + manually or automatically to find the explanation of why their packages + have been updated or not. + """ + + ## @var reemail + # Regular expression for removing the email address reemail = re.compile(r"<.*?>") def __init__(self, name): + """Class constructor + + This method initializes the excuse with the specified name and + the default values. + """ self.name = name self.ver = ("-", "-") self.maint = None @@ -40,42 +59,54 @@ class Excuse: self.htmlline = [] def set_vers(self, tver, uver): + """Set the testing and unstable versions""" if tver: self.ver = (tver, self.ver[1]) if uver: self.ver = (self.ver[0], uver) def set_maint(self, maint): + """Set the package maintainer's name""" self.maint = self.reemail.sub("", maint) def set_section(self, section): + """Set the section of the package""" self.section = section def set_priority(self, pri): + """Set the priority of the package""" self.pri = pri def set_date(self, date): + """Set the date of upload of the package""" self.date = date def set_urgency(self, date): + """Set the urgency of upload of the package""" self.urgency = date def add_dep(self, name): + """Add a dependency""" if name not in self.deps: self.deps.append(name) def add_break_dep(self, name, arch): + """Add a break dependency""" if (name, arch) not in self.break_deps: self.break_deps.append( (name, arch) ) def invalidate_dep(self, name): + """Invalidate dependency""" if name not in self.invalid_deps: self.invalid_deps.append(name) def setdaysold(self, daysold, mindays): + """Set the number of days from the upload and the minimum number of days for the update""" self.daysold = daysold self.mindays = mindays def addhtml(self, note): + """Add a note in HTML""" self.htmlline.append(note) def html(self): + """Render the excuse in HTML""" res = "%s (%s to %s)\n
      \n" % \ (self.name, self.name, self.name, self.ver[0], self.ver[1]) if self.maint: