debian/0000775000000000000000000000000012675040252007173 5ustar debian/grub-legacy-ec2.kernel-postinst0000775000000000000000000000054312574667652015154 0ustar #!/bin/sh version="$1" bootopt="" # passing the kernel version is required [ -z "${version}" ] && exit 0 # avoid running multiple times if [ -n "$DEB_MAINT_PARAMS" ]; then eval set -- "$DEB_MAINT_PARAMS" if [ -z "$1" ] || [ "$1" != "configure" ]; then exit 0 fi fi update=/usr/sbin/update-grub-legacy-ec2 [ ! -x "${update}" ] || exec "${update}" debian/control0000664000000000000000000000351212574667652010620 0ustar Source: cloud-init Section: admin Priority: extra Maintainer: Scott Moser Build-Depends: cdbs, debhelper (>= 5.0.38), po-debconf, pyflakes, pylint, python (>= 2.6.6-3~), python-argparse, python-cheetah, python-configobj, python-jsonpatch | python-json-patch, python-mocker, python-nose, python-oauth, python-prettytable, python-setuptools, python-requests, python-yaml XS-Python-Version: all Standards-Version: 3.9.5 Package: cloud-init Architecture: all Depends: cloud-guest-utils | cloud-utils, ifupdown (>= 0.6.10ubuntu5), procps, python, python-requests (>= 0.8.2), software-properties-common, ${misc:Depends}, ${python:Depends} Recommends: eatmydata Provides: ec2-init Replaces: ec2-init (<<0.5.3) Conflicts: ec2-init (<<0.5.3) XB-Python-Version: ${python:Versions} Description: Init scripts for cloud instances Cloud instances need special scripts to run during initialisation to retrieve and install ssh keys and to let the user run various scripts. Package: grub-legacy-ec2 Depends: debconf (>= 1.5.19) | cdebconf, ucf, util-linux (>= 2.15-1), ${misc:Depends} Conflicts: grub Suggests: grub-legacy-doc Architecture: all Description: Handles update-grub for ec2 instances EC2 instances that use grub-legacy as a bootloader need a way to keep /boot/grub/menu.lst up to date while not conflicting with grub-pc. This package provides that. Package: ec2-init Depends: cloud-init, ${misc:Depends} Architecture: all Description: package renamed -> cloud-init This package has been renamed to 'cloud-init'. debian/cloud-init.lintian-overrides0000664000000000000000000000022412574667652014641 0ustar # this explicitly diverts ureadahead.conf in ureadahead package # see LP: #499520 for more discussion cloud-init binary: diversion-for-unknown-file debian/update-grub-legacy-ec20000775000000000000000000012656312574667652013307 0ustar #!/bin/bash # # Insert a list of installed kernels in a grub config file # Copyright 2001 Wichert Akkerman # Copyright 2007, 2008 Canonical Ltd. # # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # Contributors: # Jason Thomas # David B.Harris # Marc Haber # Crispin Flowerday # Steve Langasek # Abort on errors set -e # load debconf first, since this re-execs the script . /usr/share/debconf/confmodule host_os=`uname -s | tr '[A-Z]' '[a-z]'` abort() { message=$@ echo >&2 printf '%s\n' "$message" >&2 echo >&2 exit 1 } find_grub_dir () { echo -n "Searching for GRUB installation directory ... " >&2 for d in $grub_dirs ; do if [ -d "$d" ] ; then grub_dir="$d" break fi done if [ -z "$grub_dir" ] ; then abort "No GRUB directory found. To create a template run 'mkdir /boot/grub' first. To install grub, install it manually or try the 'grub-install' command. ### Warning, grub-install is used to change your MBR. ###" else echo "found: $grub_dir" >&2 fi echo $grub_dir } find_device () { mount_point=$1 # Autodetect current root device device= if [ -f /etc/fstab ] ; then device=$(awk '$1!~/^#/{ if ($2 ~ "^/+$") { $2 = "/"; } else { sub("/*$", "", $2); } if ($2 == "'"$mount_point"'"){ print $1; } }' /etc/fstab | tail -n 1) fi if [ -n "$device" ] ; then case "$device" in LABEL=* | UUID=*) device=`readlink -f "$(findfs $device)"` ;; *) device=`readlink -f "$device"` ;; esac fi echo $device } find_root_device () { device=$(find_device "/") if [ -z "$device" ]; then echo "Cannot determine root device. Assuming /dev/hda1" >&2 echo "This error is probably caused by an invalid /etc/fstab" >&2 device=/dev/hda1 fi echo $device } # Usage: convert_raid1 os_device # Checks if os_device is a software raid1. # If so, converts to first physical device in array. convert_raid1 () { case $1 in /dev/md[0-9]) : ;; # Continue *) return 1 ;; esac [ -x /sbin/mdadm ] || return 1 # Check that the raid device is raid1 raidlevel=$(mdadm -D -b $1 | grep "^ARRAY" | \ sed "s/^.*level=//" | cut -d" " -f1) [ "$raidlevel" = "raid1" ] || return 1 # Take only the first device that makes up the raid raiddev=$(mdadm -D $1 | grep -A1 "Number" | grep "dev" \ | sed "s/^.*\(\/dev\/.*\)$/\1/") [ -n "$raiddev" ] || return 1 echo $raiddev return 0 } # Usage: convert os_device # Convert an OS device to the corresponding GRUB drive. # This part is OS-specific. convert () { # First, check if the device file exists. if test -e "$1"; then : else echo "$1: Not found or not a block device." 1>&2 exit 1 fi host_os=`uname -s | tr '[[:upper:]]' '[[:lower:]]'` # Break the device name into the disk part and the partition part. case "$host_os" in linux) tmp_disk=`echo "$1" | sed -e 's%\([sh]d[[:lower:]]\)[0-9]*$%\1%' \ -e 's%\(fd[0-9]*\)$%\1%' \ -e 's%/part[0-9]*$%/disc%' \ -e 's%\(c[0-7]d[0-9]*\).*$%\1%'` tmp_part=`echo "$1" | sed -e 's%.*/[sh]d[[:lower:]]\([0-9]*\)$%\1%' \ -e 's%.*/fd[0-9]*$%%' \ -e 's%.*/floppy/[0-9]*$%%' \ -e 's%.*/\(disc\|part\([0-9]*\)\)$%\2%' \ -e 's%.*c[0-7]d[0-9]*p*%%'` ;; gnu) tmp_disk=`echo "$1" | sed 's%\([sh]d[0-9]*\).*%\1%'` tmp_part=`echo "$1" | sed "s%$tmp_disk%%"` ;; freebsd|*/kfreebsd) tmp_disk=`echo "$1" | sed 's%r\{0,1\}\([saw]d[0-9]*\).*$%\1%' \ | sed 's%r\{0,1\}\(da[0-9]*\).*$%\1%'` tmp_part=`echo "$1" \ | sed "s%.*/r\{0,1\}[saw]d[0-9]\(s[0-9]*[a-h]\)%\1%" \ | sed "s%.*/r\{0,1\}da[0-9]\(s[0-9]*[a-h]\)%\1%"` ;; netbsd|*/knetbsd) tmp_disk=`echo "$1" | sed 's%r\{0,1\}\([sw]d[0-9]*\).*$%r\1d%' \ | sed 's%r\{0,1\}\(fd[0-9]*\).*$%r\1a%'` tmp_part=`echo "$1" \ | sed "s%.*/r\{0,1\}[sw]d[0-9]\([abe-p]\)%\1%"` ;; *) echo "update-grub does not support your OS yet." 1>&2 exit 1 ;; esac # Get the drive name. tmp_drive=`grep -v '^#' $device_map | grep "$tmp_disk *$" \ | sed 's%.*\(([hf]d[0-9][a-z0-9,]*)\).*%\1%'` # If not found, print an error message and exit. if test "x$tmp_drive" = x; then echo "$1 does not have any corresponding BIOS drive." 1>&2 exit 1 fi if test "x$tmp_part" != x; then # If a partition is specified, we need to translate it into the # GRUB's syntax. case "$host_os" in linux) echo "$tmp_drive" | sed "s%)$%,`expr $tmp_part - 1`)%" ;; gnu) if echo $tmp_part | grep "^s" >/dev/null; then tmp_pc_slice=`echo $tmp_part \ | sed "s%s\([0-9]*\)[a-z]*$%\1%"` tmp_drive=`echo "$tmp_drive" \ | sed "s%)%,\`expr "$tmp_pc_slice" - 1\`)%"` fi if echo $tmp_part | grep "[a-z]$" >/dev/null; then tmp_bsd_partition=`echo "$tmp_part" \ | sed "s%[^a-z]*\([a-z]\)$%\1%"` tmp_drive=`echo "$tmp_drive" \ | sed "s%)%,$tmp_bsd_partition)%"` fi echo "$tmp_drive" ;; freebsd|*/kfreebsd) if echo $tmp_part | grep "^s" >/dev/null; then tmp_pc_slice=`echo $tmp_part \ | sed "s%s\([0-9]*\)[a-h]*$%\1%"` tmp_drive=`echo "$tmp_drive" \ | sed "s%)%,\`expr "$tmp_pc_slice" - 1\`)%"` fi if echo $tmp_part | grep "[a-h]$" >/dev/null; then tmp_bsd_partition=`echo "$tmp_part" \ | sed "s%s\{0,1\}[0-9]*\([a-h]\)$%\1%"` tmp_drive=`echo "$tmp_drive" \ | sed "s%)%,$tmp_bsd_partition)%"` fi echo "$tmp_drive" ;; netbsd|*/knetbsd) if echo $tmp_part | grep "^[abe-p]$" >/dev/null; then tmp_bsd_partition=`echo "$tmp_part" \ | sed "s%\([a-p]\)$%\1%"` tmp_drive=`echo "$tmp_drive" \ | sed "s%)%,$tmp_bsd_partition)%"` fi echo "$tmp_drive" ;; esac else # If no partition is specified, just print the drive name. echo "$tmp_drive" fi } # Usage: convert_default os_device # Convert an OS device to the corresponding GRUB drive. # Calls OS-specific convert, and returns a default of # (hd0,0) if anything goes wrong convert_default () { # Check if device is software raid1 array if tmp_dev=$(convert_raid1 $1 2>/dev/null) ; then : # Use device returned by convert_raid1 else tmp_dev=$1 fi if tmp=$(convert $tmp_dev 2>/dev/null) ; then echo $tmp else echo "${grub_root_device_fallback}" fi } is_removable () { removabledevice="$(echo "$1" | sed -e 's%\([sh]d[a-z]\)[0-9]*$%\1%' -e 's%\(fd[0-9]*\)$%\1%' -e 's%/part[0-9]*$%/disc%' -e 's%\(c[0-7]d[0-9]*\).*$%\1%' -e 's%^/dev/%%g')" if [ -e "/sys/block/$removabledevice/removable" ]; then if [ "$(cat /sys/block/$removabledevice/removable)" != "0" ]; then echo "/dev/$removabledevice" return fi fi echo "" } convert_to_uuid() { local dev; dev=$1 convert=false case "$dev" in /dev/disk/*) ;; /dev/mapper/*) ;; /dev/evms/[hs]d[a-z][0-9]*) convert=: ;; /dev/evms/*) ;; /dev/md[0-9]*) ;; /dev/*) convert=: ;; esac if $convert; then if [ -b "$dev" ]; then uuid=$(blkid -o value -s UUID "$dev" || true) fi fi echo "$uuid" } convert_kopt_to_uuid() { local kopt; kopt=$1 convert=false root=$(echo "$kopt" | sed 's/.*root=//;s/ .*//') case "$root" in UUID=*|LABEL=*) ;; /dev/disk/*) ;; /dev/mapper/*) ;; /dev/evms/[hs]d[a-z][0-9]*) convert=: ;; /dev/evms/*) ;; /dev/md[0-9]*) ;; /dev/*) convert=: ;; esac if $convert; then if [ -L "$DEV" ] && readlink "$DEV" | grep -q "^/dev/mapper/" then : elif [ -b "$root" ]; then uuid=$(blkid -o value -s UUID "$root" || true) if [ -n "$uuid" ]; then kopt=$(echo "$kopt" | sed "s/\(.*root=\)[^ ]*/\1UUID=$uuid/") fi fi fi echo "$kopt" } ## Configuration Options # directory's to look for the grub installation and the menu file grub_dirs="/boot/grub /boot/boot/grub" # The grub installation directory grub_dir=$(find_grub_dir) # Full path to the menu.lst menu_file_basename=menu.lst menu_file=$grub_dir/$menu_file_basename # Full path to the menu.lst fragment used for ucf management ucf_menu_file=/var/run/grub/$menu_file_basename # Full path to the default file default_file_basename=default default_file=$grub_dir/$default_file_basename # the device for the / filesystem root_device=$(find_root_device) # the device for the /boot filesystem boot_device=$(find_device "/boot") # Full path to the device.map device_map=$grub_dir/device.map # Default kernel options, overidden by the kopt statement in the menufile. loop_file=$(awk '$2=="/" && $4~"loop" {print $1}' /etc/fstab) if [ -n "$loop_file" ]; then dev_mountpoint=$(awk '"'${loop_file}'"~"^"$2 && $2!="/" {print $1";"$2}' /proc/mounts|tail -n 1) host_device="${dev_mountpoint%;*}" host_mountpoint="${dev_mountpoint#*;}" fi if [ -n "$host_device" ]; then boot_device= root_device="$host_device" default_kopt="root=$host_device loop=${loop_file#$host_mountpoint} ro" else default_kopt="root=$root_device ro" fi default_kopt="$(convert_kopt_to_uuid "$default_kopt")" kopt="$default_kopt" # Title title=$(lsb_release --short --description 2>/dev/null) || title="Ubuntu" # should update-grub remember the default entry updatedefaultentry="false" # Drive(in GRUB terms) where the kernel is located. Overridden by the # kopt statement in menufile. # if we don't have a device.map then we can't use the convert function. # Try to use a UUID instead of the GRUB device name. if test -z "$boot_device" ; then uuid=$(convert_to_uuid "$root_device") else uuid=$(convert_to_uuid "$boot_device") fi #if [ -n "$uuid" ]; then # grub_root_device="$uuid" #fi ## The ec2 provide pv-grub do not support 'uuid' so we have to use a grub name ## when presented to grub, the root filesystem is on what grub sees ## as a bare disk (hd0), rather than what we see it as in user space (sda1). grub_root_device_fallback="(hd0)" grub_root_device="${grub_root_device_fallback}" check_removable="" if true; then if test -f "$device_map"; then if test -z "$boot_device" ; then grub_root_device=$(convert_default "$root_device") check_removable="$(is_removable "$root_device")" else grub_root_device=$(convert_default "$boot_device") check_removable="$(is_removable "$boot_device")" fi else grub_root_device="${grub_root_device_fallback}" fi fi # If the root/boot device is on a removable target, we need to override # the grub_root_device to (hd0,X). This is a requirement since the BIOS # will change device mapping dynamically if we switch boot device. if test -n "$check_removable" ; then grub_root_device="$(echo "$grub_root_device" | sed -e 's/d.*,/d0,/g')" fi # should grub create the alternative boot options in the menu alternative="true" # should grub lock the alternative boot options in the menu lockalternative="false" # additional options to use with the default boot option, but not with the # alternatives defoptions="console=hvc0" # should grub lock the old kernels lockold="false" # Xen hypervisor options to use with the default Xen boot option xenhopt="" # Xen Linux kernel options to use with the default Xen boot option xenkopt="console=tty0" # options to use with the alternative boot options altoptions="(recovery mode) single" # controls howmany kernels are listed in the config file, # this does not include the alternative kernels howmany="all" # should grub create a memtest86 entry memtest86="true" # should grub add "savedefault" to default boot options savedefault="false" # is grub running in a domU? indomU="true" # stores the command line arguments command_line_arguments=$1 # does this version of grub support the quiet option? if [ -f ${grub_dir}/installed-version ] && dpkg --compare-versions `cat ${grub_dir}/installed-version` ge 0.97-11ubuntu4; then supports_quiet=true else supports_quiet=false fi # read user configuration if test -f "/etc/default/grub" ; then . /etc/default/grub fi # Default options to use in a new config file. This will only be used if $menu_file # doesn't already exist. Only edit the lines between the two "EOF"s. The others are # part of the script. newtemplate=$(tempfile) cat >> "$newtemplate" <&2 if [ -f "$default_file" ] ; then echo "found: $default_file" >&2 else echo "Generating $default_file file and setting the default boot entry to 0" >&2 grub-set-default 0 fi # Make sure we use the standard sorting order LC_COLLATE=C # Magic markers we use start="### BEGIN AUTOMAGIC KERNELS LIST" end="### END DEBIAN AUTOMAGIC KERNELS LIST" startopt="## ## Start Default Options ##" endopt="## ## End Default Options ##" # path to grub2 grub2name="/boot/grub/core.img" # Extract options from config file ExtractMenuOpt() { opt=$1 sed -ne "/^$start\$/,/^$end\$/ { /^$startopt\$/,/^$endopt\$/ { /^# $opt=/ { s/^# $opt=\(.*\)\$/\1/ p } } }" $menu } GetMenuOpts() { opt=$1 sed -ne "/^$start\$/,/^$end\$/ { /^$startopt\$/,/^$endopt\$/ { /^# $opt=/ { p } } }" $menu } ExtractMenuOpts() { opt=$1 GetMenuOpts $opt | sed "s/^# $opt=\(.*\)\$/\1=\"\2\"/" } GetMenuOpt() { opt=$1 value=$2 [ -z "$(GetMenuOpts "$opt")" ] || value=$(ExtractMenuOpt "$opt") echo $value } # Compares two version strings A and B # Returns -1 if AB # This compares version numbers of the form # 2.4.14.2 > 2.4.14 # 2.4.14random = 2.4.14-random > 2.4.14-ac10 > 2.4.14 > 2.4.14-pre2 > # 2.4.14-pre1 > 2.4.13-ac99 CompareVersions() { #Changes the line something-x.y.z into somthing-x.y.z.q #This is to ensure that kernels with a .q is treated as higher than the ones without #First a space is put after the version number v1=$(echo $1 | sed -e 's!^\(.*-\([0-9]\+\.\)\{2,3\}[0-9]\+\)\(.*\)!\1 \3!g') v2=$(echo $2 | sed -e 's!^\(.*-\([0-9]\+\.\)\{2,3\}[0-9]\+\)\(.*\)!\1 \3!g') #If the version number only has 3 digits then put in another .0 v1=$(echo $v1 | sed -e 's!^\(.*-\([0-9]\+\.\)\{2\}[0-9]\+\)\( .*\|$\)!\1.0 \3!g') v2=$(echo $v2 | sed -e 's!^\(.*-\([0-9]\+\.\)\{2\}[0-9]\+\)\( .*\|$\)!\1.0 \3!g') # Then split the version number and remove any '.' 's or dashes v1=$(echo $v1 | sed -e 's![-\.]\+! !g' -e 's!\([0-9]\)\([[:alpha:]]\)!\1 \2!') v2=$(echo $v2 | sed -e 's![-\.]\+! !g' -e 's!\([0-9]\)\([[:alpha:]]\)!\1 \2!') # we weight different kernel suffixes here # ac = 50 # pre = -50 # rc = -40 # test = -60 # others are given 99 v1=$(echo $v1 | sed -e 's! k7! 786 !g' -e 's! ac! 50 !g' -e 's! rc! -40 !g' -e 's! pre! -50 !g' -e 's! test! -60 !g' -e 's![^ ]*[^-0-9 ][^ ]*!99!g') v2=$(echo $v2 | sed -e 's! k7! 786 !g' -e 's! ac! 50 !g' -e 's! rc! -40 !g' -e 's! pre! -50 !g' -e 's! test! -60 !g' -e 's![^ ]*[^-0-9 ][^ ]*!99!g') result=0; v1finished=0; v2finished=0; while [ $result -eq 0 ] && [ $v1finished -eq 0 ] && [ $v2finished -eq 0 ]; do if [ "$v1" = "" ]; then v1comp=0; v1finished=1 else set -- $v1; v1comp=$1; shift; v1=$* fi if [ "$v2" = "" ]; then v2comp=0; v2finished=1 else set -- $v2; v2comp=$1; shift; v2=$* fi set +e result=`expr $v1comp - $v2comp` result=`expr substr $result 1 2` set -e if [ $result -gt 0 ]; then result=1 elif [ $result -lt 0 ]; then result=-1 fi done # finally return the result echo $result } # looks in the directory specified for an initrd image with the version specified FindInitrdName() { # strip trailing slashes directory=$(echo $1 | sed -e 's#/*$##') version=$2 # initrd # initrd.img # initrd-lvm # .*.gz initrdName="" names="initrd initrd.img initrd-lvm" compressed="gz" for n in $names ; do # make sure we haven't already found it if [ -z "$initrdName" ] ; then if [ -f "$directory/$n$version" ] ; then initrdName="$n$version" break else for c in $compressed ; do if [ -f "$directory/$n$version.$c" ] ; then initrdName="$n$version.$c" break fi done fi else break fi done # return the result echo $initrdName } FindXenHypervisorVersions () { version=$1 if [ -f "/var/lib/linux-image-$version/xen-versions" ]; then ret="$(cat /var/lib/linux-image-$version/xen-versions)" fi echo $ret } get_kernel_opt() { kernel_version=$1 version=$(echo $kernel_version | sed 's/^[^0-9]*//') version=$(echo $version | sed 's/[-\+\.]/_/g') if [ -n "$version" ] ; then while [ -n "$version" ] ; do currentOpt="$(eval "echo \${kopt_$version}")" if [ -n "$currentOpt" ] ; then break fi oldversion="$version" version=$(echo $version | sed 's/_\?[^_]*$//') if [ "$version" = "$oldversion" ] ; then # Break infinite loop, if the version isn't what we expect break fi done fi if [ -z "$currentOpt" ] ; then currentOpt=$kopt fi echo $currentOpt } write_kernel_entry() { local kernel_version; kernel_version=$1; shift local recovery_desc; recovery_desc=$1; shift local lock_alternative; lock_alternative=$1; shift local grub_root_device; grub_root_device=$1; shift local kernel; kernel=$1; shift local kernel_options; kernel_options=$1; shift local recovery_suffix; recovery_suffix=$1; shift local initrd; initrd=$1; shift local savedefault; savedefault=$1; shift local lockold; lockold=$1; shift local dapper_upgrade; dapper_upgrade=$1; shift local hypervisor if [ -n "$1" ]; then # Hypervisor. hypervisor=$1; shift local hypervisor_image; hypervisor_image=$1; shift local hypervisor_version; hypervisor_version=$1; shift local hypervisor_options; hypervisor_options=$1; shift fi echo -n "title " >> $buffer if [ -n "$hypervisor" ]; then echo -n "$hypervisor $hypervisor_version / " >> $buffer fi echo -n "$title" >> $buffer if [ -n "$kernel_version" ]; then echo -n ", " >> $buffer # memtest86 is not strictly a kernel if ! echo "$kernel_version" | grep -q ^memtest86; then echo -n "kernel " >> $buffer fi echo -n "$kernel_version" >> $buffer fi if [ -n "$recovery_desc" ]; then echo -n " $recovery_desc" >> $buffer fi echo >> $buffer # lock the alternative options if test x"$lock_alternative" = x"true" ; then echo "lock" >> $buffer fi # lock the old entries if test x"$lockold" = x"true" ; then echo "lock" >> $buffer fi case "$grub_root_device" in [^A-Za-z0-9]*) echo "root $grub_root_device" >> $buffer ;; *) echo "uuid $grub_root_device" >> $buffer ;; esac echo -n "kernel " >> $buffer if [ -n "$hypervisor" ]; then echo -n "$hypervisor_image" >> $buffer if [ -n "$hypervisor_options" ]; then echo -n " $hypervisor_options" >> $buffer fi echo >> $buffer echo -n "module " >> $buffer fi echo -n "$kernel" >> $buffer if [ -n "$kernel_options" ]; then echo -n " $kernel_options" >> $buffer fi if [ -n "$recovery_desc" ]; then echo -n " $recovery_suffix" >> $buffer fi if [ -n "$dapper_upgrade" -a -z "$kernel_options$recovery_desc" ]; then echo -n " " >> $buffer fi echo >> $buffer if [ -n "$initrd" ]; then if [ -n "$hypervisor" ]; then echo -n "module " >> $buffer else echo -n "initrd " >> $buffer fi echo "$initrd" >> $buffer fi if [ ! -n "$recovery_desc" -a x"$supports_quiet" = x"true" -a -z "$dapper_upgrade" ]; then echo "quiet" >> $buffer fi if test x"$savedefault" = x"true" ; then echo "savedefault" >> $buffer fi if test x"$dapper_upgrade" != x ; then echo "boot" >> $buffer fi echo >> $buffer } ## write out the kernel entries output_kernel_list() { counter=0 # Xen entries first. for kern in $xenKernels ; do if test ! x"$howmany" = x"all" ; then if [ $counter -gt $howmany ] ; then break fi fi kernelName=$(basename $kern) kernelVersion=$(echo $kernelName | sed -e 's/vmlinuz//') initrdName=$(FindInitrdName "/boot" "$kernelVersion") initrd="" kernel=$kernel_dir/$kernelName if [ -n "$initrdName" ] ; then initrd=$kernel_dir/$initrdName fi kernelVersion=$(echo $kernelVersion | sed -e 's/^-//') currentOpt=$(get_kernel_opt $kernelVersion) hypervisorVersions=$(FindXenHypervisorVersions "$kernelVersion") found= for hypervisorVersion in $hypervisorVersions; do hypervisor="$kernel_dir/xen-$hypervisorVersion.gz" if [ -e "$hypervisor" ]; then found=1 echo "Found Xen hypervisor $hypervisorVersion, kernel: $kernel" >&2 write_kernel_entry "$kernelVersion" '' '' "$grub_root_device" \ "$kernel" "$currentOpt $xenkopt" '' "$initrd" "$savedefault" '' "$dapper_upgrade" \ Xen "$hypervisor" "$hypervisorVersion" "$xenhopt" counter=$(($counter + 1)) fi done if [ -z $found ]; then for hypervisor in $hypervisors; do hypVersion=`basename "$hypervisor" .gz | sed s%xen-%%` echo "Found Xen hypervisor $hypVersion, kernel: $kernel" >&2 write_kernel_entry "$kernelVersion" '' '' "$grub_root_device" \ "$kernel" "$currentOpt $xenkopt" '' "$initrd" "$savedefault" '' "$dapper_upgrade" \ Xen "$kernel_dir/$hypervisor" "$hypVersion" "$xenhopt" counter=$(($counter + 1)) done fi done for kern in $sortedKernels ; do counter=$(($counter + 1)) if test ! x"$howmany" = x"all" ; then if [ $counter -gt $howmany ] ; then break fi fi kernelName=$(basename $kern) initrdName="" initrd="" extra_opts="" if [ "$kern" = "/boot/last-good-boot/vmlinuz" ]; then kernelVersion="Last successful boot" if [ -e "/boot/last-good-boot/initrd.img" ]; then initrdName="last-good-boot/initrd.img" fi kernelName="last-good-boot/vmlinuz" extra_opts="$extra_opts last-good-boot" else kernelVersion=$(echo $kernelName | sed -e 's/vmlinuz//') initrdName=$(FindInitrdName "/boot" "$kernelVersion") if [ -x "/usr/bin/makedumpfile" ] && [ -x "/sbin/kexec" ]; then extra_opts="$extra_opts crashkernel=384M-2G:64M,2G-:128M" fi fi kernel=$kernel_dir/$kernelName if [ -n "$initrdName" ] ; then initrd=$kernel_dir/$initrdName fi echo "Found kernel: $kernel" >&2 if [ "$kernelName" = "vmlinuz" ]; then if [ -L "/boot/$kernelName" ]; then kernelVersion=`readlink -f "/boot/$kernelName"` kernelVersion=$(echo $kernelVersion | sed -e 's/.*vmlinuz-//') kernelVersion="$kernelVersion Default" else kernelVersion="Default" fi fi if [ "$kernelName" = "vmlinuz.old" ]; then if [ -L "/boot/$kernelName" ]; then kernelVersion=`readlink -f "/boot/$kernelName"` kernelVersion=$(echo $kernelVersion | sed -e 's/.*vmlinuz-//') kernelVersion="$kernelVersion Previous" else kernelVersion="Previous" fi fi kernelVersion=$(echo $kernelVersion | sed -e 's/^-//') currentOpt=$(get_kernel_opt $kernelVersion) do_lockold=$lockold # do not lockold for the first entry [ $counter -eq 1 ] && do_lockold=false if [ "$kernelName" = "last-good-boot/vmlinuz" ]; then if [ -e /boot/last-good-boot/cmdline ]; then cmdline="$(cat /boot/last-good-boot/cmdline) last-good-boot" else cmdline="$currentOpt $defoptions $extra_opts" fi write_kernel_entry "$kernelVersion" "" "" "$grub_root_device" "$kernel" \ "$cmdline" "" "$initrd" "$savedefault" "$do_lockold" \ "$dapper_upgrade" else write_kernel_entry "$kernelVersion" "" "" "$grub_root_device" "$kernel" \ "$currentOpt $defoptions $extra_opts" "" "$initrd" "$savedefault" \ "$do_lockold" "$dapper_upgrade" fi # insert the alternative boot options if test ! x"$alternative" = x"false" && \ test ! x"$kernelName" = x"last-good-boot/vmlinuz"; then # for each altoptions line do this stuff sed -ne 's/# altoptions=\(.*\)/\1/p' $buffer | while read line; do descr=$(echo $line | sed -ne 's/\(([^)]*)\)[[:space:]]\(.*\)/\1/p') suffix=$(echo $line | sed -ne 's/\(([^)]*)\)[[:space:]]\(.*\)/\2/p') test x"$lockalternative" = x"true" && do_lockold=false write_kernel_entry "$kernelVersion" "$descr" "$lockalternative" \ "$grub_root_device" "$kernel" "$currentOpt $extra_opts" \ "$suffix" "$initrd" "false" "$do_lockold" \ "$dapper_upgrade" done fi done ## Adding the chainload stanza is simply confusing, and for ## legacy ec2 grub, it will never be used. LP: #627451 ## # if test -f $grub2name ; then # echo "Found GRUB 2: $grub2name" >&2 # cat >> $buffer << EOF #title Chainload into GRUB 2 #root $grub_root_device #kernel $grub2name #EOF # if test x"$savedefault" = x"true" ; then # echo "savedefault" >> $buffer # fi # echo >> $buffer # fi memtest86names="memtest86 memtest86+" if test ! x"$memtest86" = x"false" ; then for name in $memtest86names ; do if test -f "/boot/$name.bin" ; then kernelVersion="$name" kernel="$kernel_dir/$name.bin" currentOpt= initrd= echo "Found kernel: $kernel" >&2 write_kernel_entry "$kernelVersion" "" "" "$grub_root_device" \ "$kernel" "$currentOpt" "" "$initrd" "false" "" "$dapper_upgrade" fi done fi echo $end >> $buffer } ucf_update_kernels() { local target; target="$1" local buffer; buffer="$2" sed -ni -e"/$endopt/,/$end/p" "$buffer" if [ "x$initialconfig" = "x" ]; then sed -n -e"/$endopt/,/$end/p" < $menu > $ucf_menu_file else cat $buffer > $ucf_menu_file fi db_x_loadtemplatefile "$(dpkg-query --control-path grub-legacy-ec2 templates)" grub ucf --debconf-ok \ --debconf-template grub/update_grub_changeprompt_threeway \ --three-way "$buffer" $ucf_menu_file rm "$buffer" # now re-merge the ucf results with the target file sed -i -e "/^$endopt/,/^$end/ { /^$endopt/r $ucf_menu_file d } " $target rm -f $ucf_menu_file ${ucf_menu_file}.ucf-old } echo -n "Testing for an existing GRUB $menu_file_basename file ... " >&2 # Test if our menu file exists if [ -f "$menu_file" ] ; then menu="$menu_file" rm -f $newtemplate unset newtemplate echo "found: $menu_file" >&2 cp -f "$menu_file" "$menu_file~" else # if not ask user if they want us to create one initialconfig=1 menu="$menu_file" echo >&2 echo >&2 echo -n "Could not find $menu_file file. " >&2 if [ "-y" = "$command_line_arguments" ] ; then echo >&2 echo "Generating $menu_file" >&2 answer=y else echo -n "Would you like $menu_file generated for you? " >&2 echo -n "(y/N) " >&2 read answer <&2 fi case "$answer" in y* | Y*) cat "$newtemplate" > $menu_file rm -f $newtemplate unset newtemplate ;; *) abort "Not creating $menu_file as you wish" ;; esac fi # Extract the kernel options to use kopt=$(GetMenuOpt "kopt" "$kopt") # Extract options for specific kernels opts="$(ExtractMenuOpts "\(kopt_[[:alnum:]_]\+\)")" test -z "$opts" || eval "$opts" CustomKopts=$(GetMenuOpts "\(kopt_[[:alnum:]_]\+\)" | \ grep -v "^# kopt_2_6=" || true) # Set the kernel 2.6 option only for fresh install (but convert it to # mount-by-UUID on upgrade) test -z "$kopt_2_6" && test -z "$(GetMenuOpt "kopt" "")" && \ kopt_2_6="$default_kopt" # Extract the grub root grub_root_device=$(GetMenuOpt "groot" "$grub_root_device") groot_cfg=$(GetMenuOpt groot "${grub_root_device_fallback}") case "${groot_cfg}" in [^A-Za-z0-9]*) :;; *) echo "uuid not supported. update 'groot' in ${menu_file}" >&2; abort "groot must be grub root device (ie '(hd0)'). not '${groot_cfg}'" >&2; esac # Extract the old recovery value alternative=$(GetMenuOpt "recovery" "$alternative") # Extract the alternative value alternative=$(GetMenuOpt "alternative" "$alternative") # Extract the lockalternative value lockalternative=$(GetMenuOpt "lockalternative" "$lockalternative") # Extract the additional default options # Check nonaltoptions too for compatibility with Ubuntu <= 5.10 defoptions=$(GetMenuOpt "nonaltoptions" "$defoptions") defoptions=$(GetMenuOpt "defoptions" "$defoptions") # Extract the lockold value lockold=$(GetMenuOpt "lockold" "$lockold") # Extract Xen hypervisor options xenhopt=$(GetMenuOpt "xenhopt" "$xenhopt") # Extract Xen Linux kernel options xenkopt=$(GetMenuOpt "xenkopt" "$xenkopt") # Extract the howmany value howmany=$(GetMenuOpt "howmany" "$howmany") # Extract the memtest86 value memtest86=$(GetMenuOpt "memtest86" "$memtest86") # Extract the indomU value indomU=$(GetMenuOpt "indomU" "$indomU") # Extract the updatedefaultentry option updatedefaultentry=$(GetMenuOpt "updatedefaultentry" "$updatedefaultentry") # If "default saved" is in use, set the default to true grep -q "^default.*saved" $menu && savedefault=true # Extract the savedefault option savedefault=$(GetMenuOpt "savedefault" "$savedefault") # Generate the menu options we want to insert buffer=$(tempfile) echo $start >> $buffer echo "## lines between the AUTOMAGIC KERNELS LIST markers will be modified" >> $buffer echo "## by the debian update-grub script except for the default options below" >> $buffer echo >> $buffer echo "## DO NOT UNCOMMENT THEM, Just edit them to your needs" >> $buffer echo >> $buffer echo "## ## Start Default Options ##" >> $buffer echo "## default kernel options" >> $buffer echo "## default kernel options for automagic boot options" >> $buffer echo "## If you want special options for specific kernels use kopt_x_y_z" >> $buffer echo "## where x.y.z is kernel version. Minor versions can be omitted." >> $buffer echo "## e.g. kopt=root=/dev/hda1 ro" >> $buffer echo "## kopt_2_6_8=root=/dev/hdc1 ro" >> $buffer echo "## kopt_2_6_8_2_686=root=/dev/hdc2 ro" >> $buffer echo "# kopt=$kopt" >> $buffer if [ -n "$kopt_2_6" ] && [ "$kopt" != "$kopt_2_6" ]; then echo "# kopt_2_6=$kopt_2_6" >> $buffer fi if [ -n "$CustomKopts" ] ; then echo "$CustomKopts" >> $buffer fi echo >> $buffer echo "## default grub root device" >> $buffer echo "## e.g. groot=${grub_root_device_fallback}" >> $buffer echo "# groot=$grub_root_device" >> $buffer echo >> $buffer echo "## should update-grub create alternative automagic boot options" >> $buffer echo "## e.g. alternative=true" >> $buffer echo "## alternative=false" >> $buffer echo "# alternative=$alternative" >> $buffer echo >> $buffer echo "## should update-grub lock alternative automagic boot options" >> $buffer echo "## e.g. lockalternative=true" >> $buffer echo "## lockalternative=false" >> $buffer echo "# lockalternative=$lockalternative" >> $buffer echo >> $buffer echo "## additional options to use with the default boot option, but not with the" >> $buffer echo "## alternatives" >> $buffer echo "## e.g. defoptions=vga=791 resume=/dev/hda5" >> $buffer echo "# defoptions=$defoptions" >> $buffer echo >> $buffer echo "## should update-grub lock old automagic boot options" >> $buffer echo "## e.g. lockold=false" >> $buffer echo "## lockold=true" >> $buffer echo "# lockold=$lockold" >> $buffer echo >> $buffer echo "## Xen hypervisor options to use with the default Xen boot option" >> $buffer echo "# xenhopt=$xenhopt" >> $buffer echo >> $buffer echo "## Xen Linux kernel options to use with the default Xen boot option" >> $buffer echo "# xenkopt=$xenkopt" >> $buffer echo >> $buffer echo "## altoption boot targets option" >> $buffer echo "## multiple altoptions lines are allowed" >> $buffer echo "## e.g. altoptions=(extra menu suffix) extra boot options" >> $buffer echo "## altoptions=(recovery) single" >> $buffer if ! grep -q "^# altoptions" $menu ; then echo "# altoptions=$altoptions" >> $buffer else grep "^# altoptions" $menu >> $buffer fi echo >> $buffer echo "## controls how many kernels should be put into the $menu_file_basename" >> $buffer echo "## only counts the first occurence of a kernel, not the" >> $buffer echo "## alternative kernel options" >> $buffer echo "## e.g. howmany=all" >> $buffer echo "## howmany=7" >> $buffer echo "# howmany=$howmany" >> $buffer echo >> $buffer echo "## specify if running in Xen domU or have grub detect automatically" >> $buffer echo "## update-grub will ignore non-xen kernels when running in domU and vice versa" >> $buffer echo "## e.g. indomU=detect" >> $buffer echo "## indomU=true" >> $buffer echo "## indomU=false" >> $buffer echo "# indomU=$indomU" >> $buffer echo >> $buffer echo "## should update-grub create memtest86 boot option" >> $buffer echo "## e.g. memtest86=true" >> $buffer echo "## memtest86=false" >> $buffer echo "# memtest86=$memtest86" >> $buffer echo >> $buffer echo "## should update-grub adjust the value of the default booted system" >> $buffer echo "## can be true or false" >> $buffer echo "# updatedefaultentry=$updatedefaultentry" >> $buffer echo >> $buffer echo "## should update-grub add savedefault to the default options" >> $buffer echo "## can be true or false" >> $buffer echo "# savedefault=$savedefault" >> $buffer echo >> $buffer echo "## ## End Default Options ##" >> $buffer echo >> $buffer echo -n "Searching for splash image ... " >&2 current_splash=`grep '^splashimage=' ${menu_file} || true` splash_root_device="" splash_uuid="" case "$grub_root_device" in [^A-Za-z0-9]*) splash_root_device=${grub_root_device} ;; *) splash_uuid="uuid $grub_root_device" ;; esac splashimage_path="splashimage=${splash_root_device}${grub_dir##${boot_device:+/boot}}/splash.xpm.gz" if [ `sed -e "/^$start/,/^$end/d" $menu_file | grep -c '^splashimage='` != "0" ] ; then #checks for splashscreen defined outside the autoupdated part splashimage=$(grep '^splashimage=' ${menu_file}) echo "found: ${splashimage##*=}" >&2 echo >&2 elif [ -f "${grub_dir}/splash.xpm.gz" ] && [ "$current_splash" = "" ]; then echo "found: /boot/grub/splash.xpm.gz" >&2 echo "$splash_uuid" >> $buffer echo "$splashimage_path" >> $buffer echo >> $buffer elif [ -f "${grub_dir}/splash.xpm.gz" ] && [ "$current_splash" = "$splashimage_path" ]; then echo "found: /boot/grub/splash.xpm.gz" >&2 echo "$splash_uuid" >> $buffer echo "$splashimage_path" >> $buffer echo >> $buffer elif [ "$current_splash" != "" ] && [ "$current_splash" != "$splashimage_path" ]; then echo "found but preserving previous setting: $(grep '^splashimage=' ${menu_file})" >&2 echo "$splash_uuid" >> $buffer echo "$current_splash" >> $buffer echo >> $buffer else echo "none found, skipping ..." >&2 fi hypervisors="" for hyp in /boot/xen-*.gz; do if [ ! -h "$hyp" ] && [ -f "$hyp" ]; then hypervisors="$hypervisors `basename "$hyp"`" fi done # figure out where grub looks for the kernels at boot time kernel_dir=/boot if [ -n "$boot_device" ] ; then kernel_dir= fi # We need a static path to use for the ucf registration; since we're not # using the full menu.lst file (maybe we should, just copying it around? # C.f. discussion with Manoj), create a directory in a fixed location # even though we're not treating the file in that location as # persistent. mkdir -p /var/run/grub # The first time ucf sees the file, we can only assume any difference # between the magic comments and the kernel options is a result of local # mods, so this will result in a ucf prompt for anyone whose first # invocation of update-grub is as a result of updating the magic comments. if ! ucfq grub | grep -q $ucf_menu_file; then otherbuffer=$(tempfile) cat $buffer > $otherbuffer sortedKernels=`sed -n -e " /$endopt/,/$end/ { s/^kernel[[:space:]]\+\([^[:space:]]\+\).*/\1/p }" < $menu | grep -vE "memtest86|$grub2name|xen" | uniq` xenKernels=`sed -n -e " /$endopt/,/$end/ { s/^module[[:space:]]\+\([^[:space:]]*vmlinuz[^[:space:]]\+\).*/\1/p }" < $menu | uniq` savebuffer="$buffer" buffer="$otherbuffer" savetitle="$title" title="$(sed -n -e "/$endopt/,/$end/ { s/^title[[:space:]]\+\(.*\),.*/\1/p }" < $menu | head -n 1)" if [ -z "$title" ]; then title="$savetitle" fi # Hack: the kernel list output in Ubuntu 6.06 was different than # in the current version, so to support smooth upgrades we need to # properly detect a config generated by this old version of # update-grub and mimic it for the initial ucf registration dapper_upgrade=`sed -n -e " /$endopt/,/$end/ { /^boot/p }" < $menu` save_savedefault="$savedefault" if [ -n "$dapper_upgrade" ]; then savedefault=true fi output_kernel_list savedefault="$save_savedefault" dapper_upgrade="" buffer="$savebuffer" title="$savetitle" ucf_update_kernels "$menu" "$otherbuffer" # all done, now register it ucfr grub $ucf_menu_file fi if ! type is_xen_kernel >/dev/null 2>&1; then is_xen_kernel() { # input is like /boot/vmlinuz-2.6.35-13-virtual # get the version string out of it. local ver_flavor=""; ver_flavor="${1##*vmlinuz-}" case "${ver_flavor}" in *-ec2) return 0;; *-virtual) # 10.04 LTS through 12.04 LTS -virtual is the EC2/Xen kernel dpkg --compare-versions ${ver_flavor%-virtual} gt 2.6.35-13 && return 0;; *-generic) # Starting with 12.10, -virtual was merged into -generic dpkg --compare-versions ${ver_flavor%-generic} ge 3.4.0-3 && return 0;; esac return 1; } fi for kern in /boot/vmlinuz-*; do is_xen_kernel "${kern}" && xen_verlist="${xen_verlist} ${kern#/boot/vmlinuz-}" done xen_verlist=${xen_verlist# } xenKernels="" for ver in ${xen_verlist}; do # ver is a kernel version kern="/boot/vmlinuz-$ver" if [ -r $kern ] ; then newerKernels="" for i in $xenKernels ; do res=$(CompareVersions "$kern" "$i") if [ "$kern" != "" ] && [ "$res" -gt 0 ] ; then newerKernels="$newerKernels $kern $i" kern="" else newerKernels="$newerKernels $i" fi done if [ "$kern" != "" ] ; then newerKernels="$newerKernels $kern" fi xenKernels="$newerKernels" fi done xenKernels=" ${xenKernels} " if [ "$indomU" = "detect" ]; then if [ -e /proc/xen/capabilities ] && ! grep -q "control_d" /proc/xen/capabilities; then indomU="true" else indomU="false" fi fi sortedKernels="" for kern in $(/bin/ls -1vr /boot | grep -v "dpkg-*" | grep "^vmlinuz-") ; do if `echo "$xenKernels" | grep -q "$kern "` || `echo "$kern" | grep -q "xen"`; then is_xen=1 else is_xen= fi if [ "$indomU" = "false" ] && [ "$is_xen" ]; then # We aren't running in a Xen domU, skip xen kernels echo "Ignoring Xen kernel on non-Xen host: $kern" continue elif [ "$indomU" = "true" ] && ! [ "$is_xen" ]; then # We are running in a Xen domU, skip non-xen kernels echo "Ignoring non-Xen Kernel on Xen domU host: $kern" continue fi kern="/boot/$kern" newerKernels="" for i in $sortedKernels ; do res=$(CompareVersions "$kern" "$i") if [ "$kern" != "" ] && [ "$res" -gt 0 ] ; then newerKernels="$newerKernels $kern $i" kern="" else newerKernels="$newerKernels $i" fi done if [ "$kern" != "" ] ; then newerKernels="$newerKernels $kern" fi sortedKernels="$newerKernels" done if test -f "/boot/vmlinuz.old" ; then sortedKernels="/boot/vmlinuz.old $sortedKernels" fi if test -f "/boot/vmlinuz" ; then sortedKernels="/boot/vmlinuz $sortedKernels" fi # Add our last-good-boot kernel, second in list. We always add it, because # it can appear out of nowhere. newerKernels="" last_good="/boot/last-good-boot/vmlinuz" if [ -e "$last_good" ]; then for i in $sortedKernels ; do if [ "$last_good" != "" ]; then newerKernels="$i $last_good" last_good="" else newerKernels="$newerKernels $i" fi done # Shouldn't happen, unless someone removed all the kernels if [ "$last_good" != "" ]; then newerKernels="$newerKernels $last_good" fi sortedKernels="$newerKernels" fi #Finding the value the default line use_grub_set_default="false" if test "$updatedefaultentry" = "true" ; then defaultEntryNumber=$(sed -ne 's/^[[:blank:]]*default[[:blank:]]*\(.*\).*/\1/p' $menu) if [ "$defaultEntryNumber" = "saved" ] ; then defaultEntryNumber=$(sed 'q' "$grub_dir/default") use_grub_set_default="true" fi if test -n "$defaultEntryNumber"; then defaultEntryNumberPlusOne=$(expr $defaultEntryNumber \+ 1); defaultEntry=$(grep "^[[:blank:]]*title" $menu | sed -ne "${defaultEntryNumberPlusOne}p" | sed -ne ";s/^[[:blank:]]*title[[:blank:]]*//p") defaultEntry=$(echo $defaultEntry | sed -e "s/[[:blank:]]*$//") # don't trust trailing blanks else notChangeDefault="yes" fi else notChangeDefault="yes" fi output_kernel_list otherbuffer=$(tempfile) cat $buffer > $otherbuffer ucf_update_kernels "$buffer" "$otherbuffer" echo -n "Updating $menu ... " >&2 # Insert the new options into the menu if ! grep -q "^$start" $menu ; then cat $buffer >> $menu rm -f $buffer else umask 077 sed -e "/^$start/,/^$end/{ /^$start/r $buffer d } " $menu > $menu.new cat $menu.new > $menu rm -f $buffer $menu.new fi # Function to update the default value set_default_value() { if [ "$use_grub_set_default" = "true" ] ; then grub-set-default $1 else value="$1" newmenu=$(tempfile) sed -e "s/^[[:blank:]]*default[[:blank:]]*[[:digit:]]*\(.*\)/default ${value}\1/;b" $menu > $newmenu cat $newmenu > $menu rm -f $newmenu unset newmenu fi } #Updating the default number if test -z "$notChangeDefault"; then newDefaultNumberPlusOne=$(grep "^[[:blank:]]*title[[:blank:]]*" $menu | grep -n "${defaultEntry}" | cut -f1 -d ":" | sed -ne "1p") if test -z "$newDefaultNumberPlusOne"; then echo "Previous default entry removed, resetting to 0">&2 set_default_value "0" elif test -z "$defaultEntry"; then echo "Value of default value matches no entry, resetting to 0" >&2 set_default_value "0" else if test "$newDefaultNumberPlusOne" = "1"; then newDefaultNumber="0" else newDefaultNumber=$(expr $newDefaultNumberPlusOne - 1) fi echo "Updating the default booting kernel">&2 set_default_value "$newDefaultNumber" fi fi echo "done" >&2 echo >&2 debian/pycompat0000664000000000000000000000000212574667652010763 0ustar 2 debian/cloud-init.postinst0000664000000000000000000001750212574667652013075 0ustar #!/bin/sh -e . /usr/share/debconf/confmodule set -f # disable pathname expansion db_capb escape # to support carriage return / multi-line values update_cfg() { # takes filename, header, new object (in yaml), optionally 'remover' # and merges new into existing object in filename, and then updates file # remover a string that means "delete existing entry" python -c ' import sys, yaml def update(src, cand): if not (isinstance(src, dict) and isinstance(cand, dict)): return cand for k, v in cand.iteritems(): # if the candidate has _ as value, delete source if v == REMOVER: if k in src: del src[k] continue if k not in src: src[k] = v else: src[k] = update(src[k], v) return src (fname, header, newyaml) = sys.argv[1:4] REMOVER = object if len(sys.argv) == 5: REMOVER = sys.argv[4] newcfg = yaml.load(newyaml) with open(fname, "r") as fp: cfg = yaml.load(fp) if not cfg: cfg = {} cfg = update(cfg, newcfg) with open(fname, "w") as fp: fp.write(header + "\n") fp.write(yaml.dump(cfg))' "$@" } handle_preseed_maas() { local cfg_file="/etc/cloud/cloud.cfg.d/90_dpkg_maas.cfg" local md_url="" creds_all="" c_key="" t_key="" t_sec="" c_sec=""; db_get "cloud-init/maas-metadata-url" && md_url="$RET" || : db_get "cloud-init/maas-metadata-credentials" && creds_all="$RET" || : # nothing to do [ -n "$md_url" -o -n "$creds_all" ] || return 0 # change a url query string format into : delimited if [ -n "$creds_all" -a "${creds_all#*&}" != "${creds_all}" ]; then creds_all=$(python -c 'from urlparse import parse_qs; import sys; keys = parse_qs(sys.argv[1]) for k in sys.argv[2:]: sys.stdout.write("%s:" % keys.get(k,[""])[0])' "$creds_all" \ oauth_consumer_key oauth_token_key oauth_token_secret ) fi # now, if non-empty creds_all is: consumer_key:token_key:token_secret if [ -n "$creds_all" ]; then OIFS="$IFS"; IFS=:; set -- $creds_all; IFS="$OIFS" c_key=$1; t_key=$2; t_sec=$3 fi if [ "$md_url" = "_" -a "${c_key}:${t_key}:${t_sec}" = "_:_:_" ]; then # if all these values were '_', the delete value, just delete the file. rm -f "$cfg_file" else local header="# written by cloud-init debian package per preseed entries # cloud-init/{maas-metadata-url,/maas-metadata-credentials}" local pair="" k="" v="" pload="" orig_umask="" for pair in "metadata_url:$md_url" "consumer_key:${c_key}" \ "token_key:${t_key}" "token_secret:$t_sec"; do k=${pair%%:*} v=${pair#${k}:} [ -n "$v" ] && pload="${pload} $k: \"$v\"," done # '_' would indicate "delete", otherwise, existing entries are left orig_umask=$(umask) umask 066 : >> "$cfg_file" && chmod 600 "$cfg_file" update_cfg "$cfg_file" "$header" "datasource: { MAAS: { ${pload%,} } }" _ umask ${orig_umask} fi # now clear the database of the values, as they've been consumed db_unregister "cloud-init/maas-metadata-url" || : db_unregister "cloud-init/maas-metadata-credentials" || : } handle_preseed_local_cloud_config() { local ccfg="" debconf_name="cloud-init/local-cloud-config" local cfg_file="/etc/cloud/cloud.cfg.d/90_dpkg_local_cloud_config.cfg" local header="# written by cloud-init debian package per preseed entry # $debconf_name" db_get "${debconf_name}" && ccfg="$RET" || : if [ "$ccfg" = "_" ]; then rm -f "$cfg_file" elif [ -n "$ccfg" ]; then { echo "$header"; echo "$ccfg"; } > "$cfg_file" fi db_unregister "${debconf_name}" || : } fix_1336855() { ### Begin fix for LP: 1336855 # fix issue where cloud-init misidentifies the location of grub and # where grub misidentifies the location of the device # if cloud-init's grub module did not run, then it did not break anything. [ -f /var/lib/cloud/instance/sem/config_grub_dpkg ] || return 0 # This bug only happened on /dev/xvda devices [ -b /dev/xvda ] || return 0 # we can't fix the system without /proc/cmdline [ -r /proc/cmdline ] || return 0 # Don't do anything unless we have grub [ -x /usr/sbin/grub-install ] || return 0 # First, identify the kernel device for the parent. for parm in $(cat /proc/cmdline); do dev=$(echo $parm | awk -F\= '{print$NF}') case $parm in root=UUID*) [ -d /dev/disk/by-uuid ] && root_dev=$(readlink -f /dev/disk/by-uuid/$dev);; root=LABEL*) [ -d /dev/disk/by-label ] && root_dev=$(readlink -f /dev/disk/by-label/$dev);; root=/dev*) [ -d /dev ] && root_dev=$(readlink -f $dev);; esac [ -n "$root_dev" ] && break done # Don't continue if we don't have a root directive [ -z "$root_dev" ] && return 0 # Only deal with simple, cloud-based devices case $root_dev in /dev/vda*|/dev/xvda*|/dev/sda*) ;; *) return 0;; esac # Make sure that we are not chrooted. [ "$(stat -c %d:%i /)" != "$(stat -c %d:%i /proc/1/root/.)" ] && return 0 # Check if we are in a container, i.e. LXC for t in running-in-container lxc-is-container; do command -v $t && $t && return 0 done >/dev/null 2>&1 # Find out where grub thinks the root device is. Only continue if # grub postinst would install/reinstall grub db_get grub-pc/install_devices && grub_cfg_dev=${RET} || return 0 db_get grub-pc/install_devices_empty && grub_dev_empty=${RET} || return 0 # Find out the parent device for the root device. # example output: sda/sda1 block_path=$(udevadm info -q path -n $root_dev | awk '-Fblock/' '{print$NF}') # Extract the parent device name. This works where the device is a block device # example output: /dev/sda parent_dev=$(echo $block_path | awk '-F/' '$1 { if ( $1 ) {print"/dev/"$1}}') [ -b "${parent_dev}" ] || return 0 # Do nothing if the device that the grub postinst would install is already used [ "$grub_cfg_dev" = "$parent_dev" -o "$grub_cfg_dev" = "$root_dev" ] && return 0 # If we get here, do the installation echo "Reconfiguring grub install device due to mismatch (LP: #1336855)" echo " Grub should use $parent_dev but is configured for $grub_cfg_dev" db_set grub-pc/install_devices "$parent_dev" grub-install $parent_dev && echo "Reinstalled grub" || echo "WARNING! Unable to fix grub device mismatch. You may be broken." } if [ "$1" = "configure" ]; then # disable ureadahead (LP: #499520) dpkg-divert --package cloud-init --rename --divert \ /etc/init/ureadahead.conf.disabled --add /etc/init/ureadahead.conf if db_get cloud-init/datasources; then values="$RET" if [ "${values#*MaaS}" != "${values}" ]; then # if db had old MAAS spelling, fix it. values=$(echo "$values" | sed 's,MaaS,MAAS,g') db_set cloud-init/datasources "$values" fi cat > /etc/cloud/cloud.cfg.d/90_dpkg.cfg </dev/null 2>&1 || echo "Warning: failed to setup apt-pipelining" 1>&2 elif [ ! -f "$pipeline_f" ]; then # there was no cloud available, so populate it ourselves. cat > "$pipeline_f" </cloud-init alternatively, if you're on the development release: lp:ubuntu/cloud-init Patches in debian/patches are also stored applied in bzr. To cherry pick revisions 391 to tip from from trunk, do something like: s=391; e=; b=../trunk; [ -n "${e}" ] || e=$(cd ${b} && bzr revno) pfile=catchup-${s}${e:+-${e}}.patch [ "$s" = "$e" ] && pfile=catchup-${s}.patch ( cd ../trunk && bzr log -r$s..$e && bzr diff -p1 -r$(($s-1))..$e ) > ../$pfile.full mkdir -p debian/patches filterdiff --exclude "*/ChangeLog" < ../$pfile.full > debian/patches/$pfile echo "$pfile" >> debian/patches/series quilt push quilt refresh files="$(quilt files ${pfile}) debian/patches/series debian/patches/${pfile}" files="$files .pc/${pfile} .pc/applied-patches" bzr add $files # now add an entry dch --append "${pfile} (sync to $e)" dch --edit # improve the entry debcommit $files ../$pfile.full will have the full diff. See 'ChangeLog' entries there and debian/patches/$pfile for help writing debian/changelog entry You can set 'e' (end) to not go to tip. == Cherry pick single patch == There is a utility in debian/patches named 'cherry-pick-revno' that will help to cherry pick a single commit from trunk. == New snapshot == To import a new snapshot, do: trunk="../trunk" uver=$(cd "$trunk" && ./tools/read-version) # the *next* upstream version revno=$(cd "$trunk" && bzr revno) version=${uver}~bzr${revno} tarball=cloud-init-${version}.tar.gz bzr export --format=tgz --revision=${revno} "$tarball" "${trunk}" bzr merge-upstream "$tarball" --version=${version} debian/cloud-init.preinst0000664000000000000000000002002312624644516012654 0ustar #!/bin/sh set -e #DEBHELPER# # Remove a no-longer used conffile rm_conffile() { local PKGNAME="$1" local CONFFILE="$2" [ -e "$CONFFILE" ] || return 0 local md5sum="$(md5sum $CONFFILE | sed -e 's/ .*//')" local old_md5sum="$(dpkg-query -W -f='${Conffiles}' $PKGNAME | \ sed -n -e "\' $CONFFILE ' { s/ obsolete$//; s/.* //; p }")" if [ "$md5sum" != "$old_md5sum" ]; then echo "Obsolete conffile $CONFFILE has been modified by you." echo "Saving as $CONFFILE.dpkg-bak ..." mv -f "$CONFFILE" "$CONFFILE".dpkg-bak else echo "Removing obsolete conffile $CONFFILE ..." rm -f "$CONFFILE" fi } # move_sem(src,targets) # rename sem/* items named $src to $targets # (with hard links if more than one) move_sem() { local src=$1 f="" targ="" freqid="" dir=/var/lib/cloud/sem shift # link the remaining targets to src, if it exists for f in "${dir}/${src}."*; do # if there were no src entries, nothing to do [ -f "${f}" ] || return 0 freqid=${f#${dir}/${src}.} # 'i-abcdefg' or 'always' for targ in "$@"; do [ -e "${dir}/${targ}.${freqid}" ] && continue ln -f "${f}" "${dir}/${targ}.${freqid}" done rm "${f}" done return 0 } fix_ephemeral0_micro() { # make ephemeral0 entries in /etc/fstab written by cloudconfig # 'nobootwait', so they do not block subsequent boots (LP: #634102) local out="" oldver=$1 dev="" adop="nobootwait" local s="[[:space:]]" ns="[^[:space:]]" # space and "not space" local remain="${s}\+.*" first4="" for dev in /dev/sda2 /dev/sdb; do first4="${dev}$s\+$ns\+$s\+$ns\+$s\+$ns\+" out=$(awk '$1 == dev && $4 ~ /cloudconfig/ && $4 !~ op { print $1 ; }' \ dev="${dev}" "op=${adop}" /etc/fstab) || return 0 [ -n "${out}" ] || continue echo "making ephemeral ${dev} in /etc/fstab ${adop} (LP: #634102)" 1>&2 sed -i "s|^\(${first4}\)\(${remain}\)|\1,${adop}\2|" /etc/fstab done } convert_varlib_05x_06x() { local url="http://169.254.169.254/2009-04-04/meta-data/instance-id" local tout="--connect-timeout .5 --read-timeout .5" local iid="" f="" uptime="" bn="" iid=$(wget "${url}" ${tout} --tries 1 -O - 2>/dev/null) || iid="" [ -n "${iid}" -a -d /var/lib/cloud ] || return 0 cd /var/lib/cloud mkdir -p "instances/${iid}" "instances/${iid}/sem" [ -e instance ] || ln -sf "instances/${iid}" instance for f in data/*; do [ -f "$f" ] || continue case "${f#*/}" in user-data.txt.i|user-data.txt|cloud-config.txt) mv "${f}" instance/ ;; esac done [ -f data/cache/obj.pkl ] && mv data/cache/obj.pkl instance/ for f in sem/*.${iid}; do [ -f "${f}" ] || continue bn=${f#*/}; bn=${bn%.${iid}} case "${bn}" in set_defaults) mv "${f}" "instance/sem/config-locale";; set_hostname) mv "${f}" "instance/sem/config-set_hostname";; *) mv "${f}" "instance/sem/${bn}";; esac done [ ! -f sem/update_hostname.always ] || mv sem/update_hostname.always sem/config-update_hostname.always rmdir data/cache 2>/dev/null || : rm -f data/available.build mkdir -p instance/scripts if [ -d data/scripts ]; then mv data/scripts/* instance/scripts || : rmdir data/scripts || : fi [ -d data/scripts/* ] && mv data/scripts instance/ [ ! -e instance/boot-finished ] && [ -f /proc/uptime ] && read uptime other instance/boot-finished || : return 0 } azure_apply_new_instance_id_1506187() { # With LP: #1506187, the Azure instance ID detection method was changed # to use the DMI data. In order to prevent existing instances from thinking # they are new instances, the instance ID needs to be updated here. if grep DataSourceAzure /var/lib/cloud/instance/datasource > /dev/null 2>&1; then product_id_f="/sys/devices/virtual/dmi/id/product_uuid" instance_id_f="/var/lib/cloud/data/instance-id" if [ ! -e "${product_id_f}" -o ! -e "${instance_id_f}" ]; then return 0 fi # Get the current instance ID's (new and old) new_instance_id="$(cat ${product_id_f})" old_instance_id="$(cat ${instance_id_f})" if [ "${new_instance_id}" = "${old_instance_id}" ]; then # this may have been applied for a prior version, i.e. upgrading # from 14.04 to 16.04 return 0 elif [ -z "${new_instance_id}" -o -z "${old_instance_id}" ]; then cat < /var/lib/cloud/data/instance-id # Remove the symlink for the instance rm /var/lib/cloud/instance # Rename the old instance id to the new one mv /var/lib/cloud/instances/${old_instance_id} \ /var/lib/cloud/instances/${new_instance_id} # Link the old id to the new one, just incase ln -s /var/lib/cloud/instances/${new_instance_id} \ /var/lib/cloud/instances/${old_instance_id} # Make the active instance the new id ln -s /var/lib/cloud/instances/${new_instance_id} \ /var/lib/cloud/instance fi fi } case "$1" in install|upgrade) # removing obsolete conffiles from the 'ec2-init' package if dpkg --compare-versions "$2" le "0.5.1"; then rm_conffile ec2-init "/etc/init/cloud-ssh-keygen.conf" fi if dpkg --compare-versions "$2" lt "0.5.3"; then rm_conffile ec2-init "/etc/init/ec2init.conf" fi if [ "$2" = "0.5.7-0ubuntu1" ]; then bad_d=/etc/update-motd.d/92-ec2-upgrade-available rm_conffile cloud-init "${bad_d}/motd-hook" # the dir for this file is almost certainly empty, but # if the file above was only moved, or other files there # then leave it be rmdir "${bad_d}" 2>/dev/null || true fi if dpkg --compare-versions "$2" le "0.5.10-0ubuntu2"; then old_confs="cloud-apt-update-upgrade cloud-config-misc cloud-config-mounts cloud-config-puppet cloud-config-ssh cloud-disable-ec2-metadata" for f in ${old_confs}; do rm_conffile cloud-init "/etc/init/${f}.conf" done fi if dpkg --compare-versions "$2" le "0.5.11-0ubuntu1"; then # rename the config entries in sem/ so they're not run again # transition names in 0.5.11 had only short name (no config- prefix) # so create config- entries for each for name in apt-update-upgrade disable-ec2-metadata mounts \ puppet runcmd ssh updates-check; do move_sem ${name} config-${name} done # 0.5.11 split 'config-misc' into 'updates-check' and 'runcmd' move_sem config-misc config-updates-check config-runcmd fi if dpkg --compare-versions "$2" le "0.5.14-0ubuntu5"; then fix_ephemeral0_micro fi if dpkg --compare-versions "$2" le 0.6.0-0ubuntu1; then # convert /var/lib/cloud from 0.5.x layout to 0.6.x convert_varlib_05x_06x fi # 0.6.0 changed 'user-scripts' to config-scripts-user (LP: #1049146) if [ -e /var/lib/cloud/instance/sem/user-scripts ]; then ln -sf user-scripts /var/lib/cloud/instance/sem/config-scripts-user fi # 0.7.7-bzr1556 introduced new instance ID source for Azure if dpkg --compare-versions "$2" le "0.7.5-0ubuntu1.15"; then azure_apply_new_instance_id_1506187 fi d=/etc/cloud/ if [ -f "$d/distro.cfg" ] && [ ! -f "$d/cloud.cfg.d/90_dpkg.cfg" ]; then echo "moving $d/distro.cfg -> $d/cloud.cfg.d/90_dpkg.cfg" [ -d "${d}/cloud.cfg.d" ] || mkdir "${d}/cloud.cfg.d" mv "$d/distro.cfg" "$d/cloud.cfg.d/90_dpkg.cfg" fi esac debian/cloud-init.templates0000664000000000000000000000242212574667652013203 0ustar Template: cloud-init/datasources Type: multiselect Default: NoCloud, ConfigDrive, OpenNebula, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, Ec2, CloudStack, SmartOS, None Choices-C: NoCloud, ConfigDrive, OpenNebula, Azure, AltCloud, OVF, MAAS, GCE, Openstack, CloudSigma, Ec2, CloudStack, SmartOS, None Choices: NoCloud: Reads info from /var/lib/cloud/seed only, ConfigDrive: Reads data from Openstack Config Drive, OpenNebula: read from OpenNebula context disk, Azure: read from MS Azure cdrom. Requires walinux-agent, AltCloud: config disks for RHEVm and vSphere, OVF: Reads data from OVF Transports, MAAS: Reads data from Ubuntu MAAS, GCE: google compute metadata service, OpenStack: native openstack metadata service, CloudSigma: metadata over serial for cloudsigma.com, Ec2: reads data from EC2 Metadata service, CloudStack: Read from CloudStack metadata service, SmartOS: Read from SmartOS metadata service, None: Failsafe datasource Description: Which data sources should be searched? Cloud-init supports searching different "Data Sources" for information that it uses to configure a cloud instance. . Warning: Only select 'Ec2' if this system will be run on a system with the EC2 metadata service present. Doing so incorrectly will result in a substantial timeout on boot. debian/po/0000775000000000000000000000000012574670065007622 5ustar debian/po/nl.po0000664000000000000000000000642012574667652010605 0ustar # translation of ucf_2.007_templates.po to dutch # This file is distributed under the same license as the ucf package. # # Translators, if you are not familiar with the PO format, gettext # documentation is worth reading, especially sections dedicated to # this format, e.g. by running: # info -n '(gettext)PO Files' # info -n '(gettext)Header Entry' # Some information specific to po-debconf are available at # /usr/share/doc/po-debconf/README-trans # or http://www.debian.org/intl/l10n/po-debconf/README-trans# # Developers do not need to manually edit POT or PO files. # msgid "" msgstr "" "Project-Id-Version: ucf_2.007_nl\n" "Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" "POT-Creation-Date: 2008-01-28 08:38-0800\n" "PO-Revision-Date: 2007-09-19 01:09-0500\n" "Last-Translator: Kurt De Bree \n" "Language-Team: Nederlands \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "X-Generator: KBabel 1.10.2\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "install the package maintainer's version" msgstr "installeer de versie van de pakketbeheerder" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "keep the local version currently installed" msgstr "behoud de reeds geïnstalleerde versie" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show the differences between the versions" msgstr "toon de verschillen tussen de versies" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a side-by-side difference between the versions" msgstr "toon de verschillende versies zij-aan-zij" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a 3-way difference between available versions" msgstr "" "toon een drievoudig verschil tussen de beschikbare versies van het bestand" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "do a 3-way merge between available versions (experimental)" msgstr "" "doe een drievoudige samenvoeging tussen de beschikbare versies van het " "bestand (Zeer Experimenteel)" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "start a new shell to examine the situation" msgstr "start een nieuwe shell om de situatie te onderzoeken" #. Type: select #. Description #: ../grub.templates:1002 msgid "What would you like to do about ${BASENAME}?" msgstr "Wat wilt u met ${BASENAME} doen?" #. Type: select #. Description #: ../grub.templates:1002 #, fuzzy msgid "" "A new version of /boot/grub/menu.lst is available, but the version installed " "currently has been locally modified." msgstr "" "Een nieuwe versie van het bestand /boot/grub/menu.lst is beschikbaar, maar " "uw versie werd handmatig gewijzigd." debian/po/pt_BR.po0000664000000000000000000000605312574667652011204 0ustar # # Translators, if you are not familiar with the PO format, gettext # documentation is worth reading, especially sections dedicated to # this format, e.g. by running: # info -n '(gettext)PO Files' # info -n '(gettext)Header Entry' # # Some information specific to po-debconf are available at # /usr/share/doc/po-debconf/README-trans # or http://www.debian.org/intl/l10n/po-debconf/README-trans # # Developers do not need to manually edit POT or PO files. # msgid "" msgstr "" "Project-Id-Version: ucf\n" "Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" "POT-Creation-Date: 2008-01-28 08:38-0800\n" "PO-Revision-Date: 2007-09-19 01:11-0500\n" "Last-Translator: André Luís Lopes \n" "Language-Team: Debian-BR Project \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "install the package maintainer's version" msgstr "instalar a versão do mantenedor do pacote" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "keep the local version currently installed" msgstr "manter a versão instalada atualmente" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show the differences between the versions" msgstr "exibir as diferenças entre as versões" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a side-by-side difference between the versions" msgstr "exibir as diferenças lado-a-lado entre as versões" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a 3-way difference between available versions" msgstr "exibir as diferenças entre as três versões disponíveis do arquivo" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "do a 3-way merge between available versions (experimental)" msgstr "juntar as 3 versões disponíveis do arquivo [Bem Experimental]" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "start a new shell to examine the situation" msgstr "iniciar um novo shell e examinar a situação" #. Type: select #. Description #: ../grub.templates:1002 msgid "What would you like to do about ${BASENAME}?" msgstr "O que você gostaria de fazer em relação a ${BASENAME}?" #. Type: select #. Description #: ../grub.templates:1002 msgid "" "A new version of /boot/grub/menu.lst is available, but the version installed " "currently has been locally modified." msgstr "" "Uma nova versão do arquivo /boot/grub/menu.lst está disponível, mas sua " "versão local foi modificada." debian/po/es.po0000664000000000000000000000705412574667652010607 0ustar # ucf translation to spanish # Copyright (C) 2004-2007 Software in the Public Interest # This file is distributed under the same license as the ucf package. # # Changes: # - Initial translation # Lucas Wall , 2004 # - Updated # Javier Fernandez-Sanguino , 2007 # # # Traductores, si no conoce el formato PO, merece la pena leer la # documentación de gettext, especialmente las secciones dedicadas a este # formato, por ejemplo ejecutando: # info -n '(gettext)PO Files' # info -n '(gettext)Header Entry' # # Equipo de traducción al español, por favor lean antes de traducir # los siguientes documentos: # # - El proyecto de traducción de Debian al español # http://www.debian.org/intl/spanish/coordinacion # especialmente las notas de traducción en # http://www.debian.org/intl/spanish/notas # # - La guía de traducción de po's de debconf: # /usr/share/doc/po-debconf/README-trans # o http://www.debian.org/intl/l10n/po-debconf/README-trans # msgid "" msgstr "" "Project-Id-Version: ucf 1.06\n" "Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" "POT-Creation-Date: 2008-01-28 08:38-0800\n" "PO-Revision-Date: 2007-06-21 13:41+0200\n" "Last-Translator: Javier Fernandez-Sanguino \n" "Language-Team: Debian Spanish \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=ISO-8859-15\n" "Content-Transfer-Encoding: 8bit\n" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "install the package maintainer's version" msgstr "instalar la versión del responsable del paquete" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "keep the local version currently installed" msgstr "conservar la versión local actualmente instalada" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show the differences between the versions" msgstr "mostrar las diferencias entre las versiones" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a side-by-side difference between the versions" msgstr "mostrar las diferencias entre las versiones lado a lado" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a 3-way difference between available versions" msgstr "mostrar las diferencias entre las tres versiones" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "do a 3-way merge between available versions (experimental)" msgstr "fusionar las tres versiones disponibles (experimental)" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "start a new shell to examine the situation" msgstr "ejecutar un nuevo intérprete para examinar la situación" #. Type: select #. Description #: ../grub.templates:1002 msgid "What would you like to do about ${BASENAME}?" msgstr "¿Qué desea hacer con ${BASENAME}?" #. Type: select #. Description #: ../grub.templates:1002 msgid "" "A new version of /boot/grub/menu.lst is available, but the version installed " "currently has been locally modified." msgstr "" "Hay una nueva versión del fichero /boot/grub/menu.lst, pero la versión que " "está instalada ha sido modificada localmente." debian/po/ru.po0000664000000000000000000000635512574667652010631 0ustar # translation of ru.po to Russian # This file is distributed under the same license as the PACKAGE package. # Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER. # # Yuri Kozlov , 2006, 2007. msgid "" msgstr "" "Project-Id-Version: 3.001\n" "Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" "POT-Creation-Date: 2008-01-28 08:38-0800\n" "PO-Revision-Date: 2007-07-01 13:45+0400\n" "Last-Translator: Yuri Kozlov \n" "Language-Team: Russian \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "X-Generator: KBabel 1.11.4\n" "Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%" "10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "install the package maintainer's version" msgstr "уÑтановить верÑию из пакета" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "keep the local version currently installed" msgstr "Ñохранить уÑтановленную локальную верÑию" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show the differences between the versions" msgstr "показать Ñ€Ð°Ð·Ð»Ð¸Ñ‡Ð¸Ñ Ð¼ÐµÐ¶Ð´Ñƒ верÑиÑми" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a side-by-side difference between the versions" msgstr "показать Ñ€Ð°Ð·Ð»Ð¸Ñ‡Ð¸Ñ Ð¼ÐµÐ¶Ð´Ñƒ верÑиÑми параллельно" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a 3-way difference between available versions" msgstr "показать Ñ€Ð°Ð·Ð»Ð¸Ñ‡Ð¸Ñ Ñразу между 3-Ð¼Ñ Ð´Ð¾Ñтупными верÑиÑми" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "do a 3-way merge between available versions (experimental)" msgstr "выполнить ÑлиÑние 3-Ñ… доÑтупных верÑий [ÑкÑпериментальный режим]" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "start a new shell to examine the situation" msgstr "запуÑтить новую оболочку командной Ñтроки Ð´Ð»Ñ Ð¿Ñ€Ð¾ÑÑÐ½ÐµÐ½Ð¸Ñ Ñитуации" #. Type: select #. Description #: ../grub.templates:1002 msgid "What would you like to do about ${BASENAME}?" msgstr "Что нужно Ñделать Ñ ${BASENAME}?" #. Type: select #. Description #: ../grub.templates:1002 #, fuzzy msgid "" "A new version of /boot/grub/menu.lst is available, but the version installed " "currently has been locally modified." msgstr "" "ДоÑтупна Ð½Ð¾Ð²Ð°Ñ Ð²ÐµÑ€ÑÐ¸Ñ Ñ„Ð°Ð¹Ð»Ð° /boot/grub/menu.lst, но верÑÐ¸Ñ Ñ„Ð°Ð¹Ð»Ð°, " "находÑщегоÑÑ Ð² ÑиÑтеме, была изменёна локально." debian/po/pl.po0000664000000000000000000000656112574667652010615 0ustar # translation of ucf3002.po to Polish # # Translators, if you are not familiar with the PO format, gettext # documentation is worth reading, especially sections dedicated to # this format, e.g. by running: # info -n '(gettext)PO Files' # info -n '(gettext)Header Entry' # # Some information specific to po-debconf are available at # /usr/share/doc/po-debconf/README-trans # or http://www.debian.org/intl/l10n/po-debconf/README-trans # # Developers do not need to manually edit POT or PO files. # # Wojciech Zarêba , 2007. msgid "" msgstr "" "Project-Id-Version: ucf3002\n" "Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" "POT-Creation-Date: 2008-01-28 08:38-0800\n" "PO-Revision-Date: 2007-06-27 17:15+0200\n" "Last-Translator: Wojciech Zarêba \n" "Language-Team: Polish \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=ISO-8859-2\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=2; plural=n != 1;\n" "X-Generator: KBabel 1.11.4\n" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "install the package maintainer's version" msgstr "zainstalowanie wersji przygotowanej przez opiekuna pakietu" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "keep the local version currently installed" msgstr "zachowanie lokalnie zainstalowanej wersji" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show the differences between the versions" msgstr "pokazanie ró¿nic pomiêdzy wersjami" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a side-by-side difference between the versions" msgstr "pokazanie ró¿nic - obok siebie - pomiêdzy wersjami" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a 3-way difference between available versions" msgstr "pokazanie ró¿nic pomiêdzy trzema dostêpnymi wersjami" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "do a 3-way merge between available versions (experimental)" msgstr "scalenie zmian pomiêdzy 3 dostêpnymi wersjami (eksperymentalne)" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "start a new shell to examine the situation" msgstr "uruchomienie pow³oki w celu zbadania sytuacji" #. Type: select #. Description #: ../grub.templates:1002 msgid "What would you like to do about ${BASENAME}?" msgstr "Proszê wybraæ akcjê do wykonania na pliku ${BASENAME}:" #. Type: select #. Description #: ../grub.templates:1002 #, fuzzy msgid "" "A new version of /boot/grub/menu.lst is available, but the version installed " "currently has been locally modified." msgstr "" "Dostêpna jest nowa wersja pliku /boot/grub/menu.lst, ale obecna wersja " "zosta³a lokalnie zmodyfikowana." #~ msgid "Modified configuration file" #~ msgstr "Zmodyfikowany plik konfiguracyjny" #~ msgid "Line by line differences between versions" #~ msgstr "Ró¿nice linia po linii pomiêdzy wersjami" debian/po/fr.po0000664000000000000000000000512012574667652010577 0ustar # msgid "" msgstr "" "Project-Id-Version: fr\n" "Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" "POT-Creation-Date: 2008-01-28 08:38-0800\n" "PO-Revision-Date: 2007-06-19 09:36+0200\n" "Last-Translator: Eric Madesclair \n" "Language-Team: French \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "X-Generator: KBabel 1.11.4\n" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "install the package maintainer's version" msgstr "Installer la version du responsable du paquet" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "keep the local version currently installed" msgstr "Garder la version actuellement installée" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show the differences between the versions" msgstr "Montrer les différences entre les versions" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a side-by-side difference between the versions" msgstr "Montrer côte à côte les différences entre les versions" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a 3-way difference between available versions" msgstr "Montrer les différences entre les trois versions du fichier" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "do a 3-way merge between available versions (experimental)" msgstr "Fusionner les trois versions disponibles du fichier (expérimental)" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "start a new shell to examine the situation" msgstr "Lancer un shell pour examiner la situation" #. Type: select #. Description #: ../grub.templates:1002 msgid "What would you like to do about ${BASENAME}?" msgstr "Action souhaitée pour ${BASENAME} :" #. Type: select #. Description #: ../grub.templates:1002 msgid "" "A new version of /boot/grub/menu.lst is available, but the version installed " "currently has been locally modified." msgstr "" "Une nouvelle version du fichier /boot/grub/menu.lst est disponible mais la " "version actuellement utilisée a été modifiée localement." debian/po/da.po0000664000000000000000000000544312574667652010564 0ustar # SOME DESCRIPTIVE TITLE. # This file is distributed under the same license as the PACKAGE package. # Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER. # # Claus Hindsgaul , 2005. # Claus Hindsgaul , 2007. msgid "" msgstr "" "Project-Id-Version: ucf debconf template\n" "Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" "POT-Creation-Date: 2008-01-28 08:38-0800\n" "PO-Revision-Date: 2007-09-19 01:09-0500\n" "Last-Translator: Claus Hindsgaul \n" "Language-Team: Danish\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=ISO-8859-1\n" "Content-Transfer-Encoding: 8bit\n" "X-Generator: KBabel 1.11.4\n" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "install the package maintainer's version" msgstr "installér pakkevedligeholderens udgave" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "keep the local version currently installed" msgstr "bevar din aktuelt-installerede udgave" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show the differences between the versions" msgstr "vis forskellene mellem udgaverne" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a side-by-side difference between the versions" msgstr "vis forskellene mellem versionerne overfor hinanden" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a 3-way difference between available versions" msgstr "vis 3-vejs forskelle mellem de tilgængelige udgaver af filen" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "do a 3-way merge between available versions (experimental)" msgstr "" "udfør en 3-vejs fletning mellem de tilgængelige udgaver af filen [Meget " "eksperimentelt]" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "start a new shell to examine the situation" msgstr "start en ny skal for at undersøge situationen" #. Type: select #. Description #: ../grub.templates:1002 msgid "What would you like to do about ${BASENAME}?" msgstr "Hvad vil du gøre med ${BASENAME}?" #. Type: select #. Description #: ../grub.templates:1002 #, fuzzy msgid "" "A new version of /boot/grub/menu.lst is available, but the version installed " "currently has been locally modified." msgstr "" "Der er kommet en ny version af filen /boot/grub/menu.lst, men din version er " "blevet ændret lokalt." debian/po/it.po0000664000000000000000000000522012574667652010605 0ustar # translation of ucf_1.18_templates.po to italian # Copyright Luca Bruno , 2005. msgid "" msgstr "" "Project-Id-Version: ucf_1.18_templates\n" "Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" "POT-Creation-Date: 2008-01-28 08:38-0800\n" "PO-Revision-Date: 2007-09-19 01:10-0500\n" "Last-Translator: Luca Bruno \n" "Language-Team: Italian \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "install the package maintainer's version" msgstr "installare la versione del manutentore del pacchetto" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "keep the local version currently installed" msgstr "mantenere la propria versione attualmente installata" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show the differences between the versions" msgstr "mostrare le differenze tra le versioni" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a side-by-side difference between the versions" msgstr "mostrare le differenze tra le versioni" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a 3-way difference between available versions" msgstr "Mostrare le differenze tra 3 versioni del file disponibili" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "do a 3-way merge between available versions (experimental)" msgstr "" "Integrare le differenze tra 3 versioni del file disponibili [Molto " "sperimentale]" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "start a new shell to examine the situation" msgstr "eseguire una nuova shell per esaminare la situazione" #. Type: select #. Description #: ../grub.templates:1002 msgid "What would you like to do about ${BASENAME}?" msgstr "Cosa si vuol fare di ${BASENAME}?" #. Type: select #. Description #: ../grub.templates:1002 #, fuzzy msgid "" "A new version of /boot/grub/menu.lst is available, but the version installed " "currently has been locally modified." msgstr "" "Una nuova versione del file /boot/grub/menu.lst è disponibile, ma la propria " "versione è stata modificata localmente." debian/po/pt.po0000664000000000000000000000532412574667652010621 0ustar # Portuguese translation of ucf's debconf messages. # 2007, Pedro Ribeiro # Bruno Queiros , 2007. # msgid "" msgstr "" "Project-Id-Version: ucf 2.0020\n" "Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" "POT-Creation-Date: 2008-01-28 08:38-0800\n" "PO-Revision-Date: 2007-09-19 01:09-0500\n" "Last-Translator: Bruno Queiros \n" "Language-Team: Portuguese \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "install the package maintainer's version" msgstr "instalar a versão do criador do pacote" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "keep the local version currently installed" msgstr "manter a versão actualmente instalada" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show the differences between the versions" msgstr "mostrar a diferença entre as versões" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a side-by-side difference between the versions" msgstr "mostrar uma diferença lado-a-lado entre as versões" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a 3-way difference between available versions" msgstr "mostrar uma diferença em 3 vias entre versões disponíveis do ficheiro" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "do a 3-way merge between available versions (experimental)" msgstr "" "fazer uma junção em 3 vias entre versões disponíveis do ficheiro [Muito " "Experimental]" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "start a new shell to examine the situation" msgstr "iniciar uma nova consola para examinar a situação" #. Type: select #. Description #: ../grub.templates:1002 msgid "What would you like to do about ${BASENAME}?" msgstr "O que quer fazer acerca de ${BASENAME}?" #. Type: select #. Description #: ../grub.templates:1002 msgid "" "A new version of /boot/grub/menu.lst is available, but the version installed " "currently has been locally modified." msgstr "" "Uma nova versão do ficheiro /boot/grub/menu.lst está disponível, mas a sua " "versão foi modificada localmente." debian/po/cs.po0000664000000000000000000000575112574667652010607 0ustar # # Translators, if you are not familiar with the PO format, gettext # documentation is worth reading, especially sections dedicated to # this format, e.g. by running: # info -n '(gettext)PO Files' # info -n '(gettext)Header Entry' # # Some information specific to po-debconf are available at # /usr/share/doc/po-debconf/README-trans # or http://www.debian.org/intl/l10n/po-debconf/README-trans # # Developers do not need to manually edit POT or PO files. # msgid "" msgstr "" "Project-Id-Version: ucf\n" "Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" "POT-Creation-Date: 2008-01-28 08:38-0800\n" "PO-Revision-Date: 2007-06-23 14:31+0200\n" "Last-Translator: Miroslav kure \n" "Language-Team: Czech \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "install the package maintainer's version" msgstr "instalovat verzi od správce balíku" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "keep the local version currently installed" msgstr "ponechat aktuálnÄ› instalovanou lokální verzi" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show the differences between the versions" msgstr "zobrazit rozdíly mezi verzemi" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a side-by-side difference between the versions" msgstr "zobrazit rozdíly mezi verzemi vedle sebe" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a 3-way difference between available versions" msgstr "zobrazit třícestný rozdíl mezi dostupnými verzemi" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "do a 3-way merge between available versions (experimental)" msgstr "provést třícestné slouÄení dostupných verzí (experimentální)" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "start a new shell to examine the situation" msgstr "spustit nový shell a prozkoumat situaci" #. Type: select #. Description #: ../grub.templates:1002 msgid "What would you like to do about ${BASENAME}?" msgstr "Co chcete udÄ›lat s ${BASENAME}?" #. Type: select #. Description #: ../grub.templates:1002 msgid "" "A new version of /boot/grub/menu.lst is available, but the version installed " "currently has been locally modified." msgstr "" "K dispozici je novÄ›jší verze souboru /boot/grub/menu.lst, avÅ¡ak " "nainstalovaná verze obsahuje lokální úpravy." debian/po/gl.po0000664000000000000000000000527212574667652010602 0ustar # Galician translation of ucf's debconf templates. # This file is distributed under the same license as the ucf package. # Jacobo Tarrio , 2006, 2007. # msgid "" msgstr "" "Project-Id-Version: ucf\n" "Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" "POT-Creation-Date: 2008-01-28 08:38-0800\n" "PO-Revision-Date: 2007-06-20 15:50+0200\n" "Last-Translator: Jacobo Tarrio \n" "Language-Team: Galician \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "install the package maintainer's version" msgstr "instalar a versión do mantedor de paquetes" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "keep the local version currently installed" msgstr "conservar a versión local instalada actualmente" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show the differences between the versions" msgstr "amosar as diferencias entre as versións" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a side-by-side difference between the versions" msgstr "amosar unha comparación entre as versións" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a 3-way difference between available versions" msgstr "amosar unha diferencia a tres entre as versións dispoñibles" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "do a 3-way merge between available versions (experimental)" msgstr "mesturar as versións dispoñibles (experimental)" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "start a new shell to examine the situation" msgstr "iniciar un intérprete de ordes para examinar a situación" #. Type: select #. Description #: ../grub.templates:1002 msgid "What would you like to do about ${BASENAME}?" msgstr "¿Que quere facer con ${BASENAME}?" #. Type: select #. Description #: ../grub.templates:1002 msgid "" "A new version of /boot/grub/menu.lst is available, but the version installed " "currently has been locally modified." msgstr "" "Hai dispoñible unha nova versión do ficheiro /boot/grub/menu.lst, pero a " "versión actualmente instalada ten modificacións locais." debian/po/ca.po0000664000000000000000000000512312574667652010556 0ustar # ucf (debconf) translation to Catalan. # Copyright (C) 2004 Free Software Foundation, Inc. # Aleix Badia i Bosch , 2004 # msgid "" msgstr "" "Project-Id-Version: ucf_0.30_templates\n" "Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" "POT-Creation-Date: 2008-01-28 08:38-0800\n" "PO-Revision-Date: 2007-09-19 01:08-0500\n" "Last-Translator: Aleix Badia i Bosch \n" "Language-Team: Catalan \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=ISO-8859-1\n" "Content-Transfer-Encoding: 8bit\n" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "install the package maintainer's version" msgstr "instal·la la versió del mantenidor del paquet" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "keep the local version currently installed" msgstr "mantén la versió instal·lada actualment" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show the differences between the versions" msgstr "mostra les diferències entre les versions" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a side-by-side difference between the versions" msgstr "mostra les diferències entre les versions" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a 3-way difference between available versions" msgstr "mostra les diferències entre les versions" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "do a 3-way merge between available versions (experimental)" msgstr "" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "start a new shell to examine the situation" msgstr "inicia una nova línia de comandes per examinar la situació" #. Type: select #. Description #: ../grub.templates:1002 msgid "What would you like to do about ${BASENAME}?" msgstr "Què voleu fer respecte el ${BASNAME}?" #. Type: select #. Description #: ../grub.templates:1002 msgid "" "A new version of /boot/grub/menu.lst is available, but the version installed " "currently has been locally modified." msgstr "" "Hi ha una nova versió del fitxer /boot/grub/menu.lst, però la vostra versió " "s'ha modificat localment." debian/po/POTFILES.in0000664000000000000000000000005112574667652011403 0ustar [type: gettext/rfc822deb] grub.templates debian/po/eu.po0000664000000000000000000000536112574667652010610 0ustar # SOME DESCRIPTIVE TITLE. # Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER # This file is distributed under the same license as the PACKAGE package. # Piarres Beobide , 2007. # msgid "" msgstr "" "Project-Id-Version: ucf-debconf\n" "Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" "POT-Creation-Date: 2008-01-28 08:38-0800\n" "PO-Revision-Date: 2007-06-20 13:40+0200\n" "Last-Translator: Piarres Beobide \n" "Language-Team: Euskara \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "X-Generator: Pootle 0.11\n" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "install the package maintainer's version" msgstr "Pakete arduradunaren bertsioa instalatu" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "keep the local version currently installed" msgstr "mantendu lokalean instalaturiko bertsioa" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show the differences between the versions" msgstr "bertsioen arteko ezberdintasunak ikusi" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a side-by-side difference between the versions" msgstr "bertsioen arteko ezberdintasunak aldez-alde ikusi" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a 3-way difference between available versions" msgstr "bertsioen arteko ezberdintasunak 3 eratara ikusi" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "do a 3-way merge between available versions (experimental)" msgstr "bertsioen arteko ezberdintasunak 3 eratara batu (esperimentala)" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "start a new shell to examine the situation" msgstr "shell berri bat hasi egoera aztertzeko" #. Type: select #. Description #: ../grub.templates:1002 msgid "What would you like to do about ${BASENAME}?" msgstr "Zer egitea gustatuko litzaizuke ${BASENAME}-ri buruz?" #. Type: select #. Description #: ../grub.templates:1002 #, fuzzy msgid "" "A new version of /boot/grub/menu.lst is available, but the version installed " "currently has been locally modified." msgstr "" "/boot/grub/menu.lst fitxategiaren bertsio berri bat dago eskuragarri, baina " "instalaturik dagoenak lokalean eraldatua izan da." debian/po/fi.po0000664000000000000000000000506412574667652010575 0ustar msgid "" msgstr "" "Project-Id-Version: ucf_3.003\n" "Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" "POT-Creation-Date: 2008-01-28 08:38-0800\n" "PO-Revision-Date: 2007-10-30 18:42+0200\n" "Last-Translator: Esko Arajärvi \n" "Language-Team: Finnish \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "X-Poedit-Language: Finnish\n" "X-Poedit-Country: FINLAND\n" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "install the package maintainer's version" msgstr "asenna paketin ylläpitäjän versio" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "keep the local version currently installed" msgstr "säilytä tällä hetkellä asennettu paikallinen versio" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show the differences between the versions" msgstr "näytä versioiden väliset erot" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a side-by-side difference between the versions" msgstr "näytä versioiden väliset erot rinnakkain" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a 3-way difference between available versions" msgstr "näytä versioiden välinen kolmisuuntainen erotus" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "do a 3-way merge between available versions (experimental)" msgstr "tee kolmisuuntainen versioiden yhdistys (kokeellinen)" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "start a new shell to examine the situation" msgstr "käynnistä uusi kuori tilanteen tutkimiseksi" #. Type: select #. Description #: ../grub.templates:1002 msgid "What would you like to do about ${BASENAME}?" msgstr "Miten käsitellään ${BASENAME}?" #. Type: select #. Description #: ../grub.templates:1002 #, fuzzy msgid "" "A new version of /boot/grub/menu.lst is available, but the version installed " "currently has been locally modified." msgstr "" "Asetustiedostosta /boot/grub/menu.lst on tarjolla uusi versio, mutta " "nykyistä versiota on muokattu paikallisesti." debian/po/templates.pot0000664000000000000000000000431712574667652012361 0ustar # SOME DESCRIPTIVE TITLE. # Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER # This file is distributed under the same license as the PACKAGE package. # FIRST AUTHOR , YEAR. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" "POT-Creation-Date: 2008-01-28 08:38-0800\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=CHARSET\n" "Content-Transfer-Encoding: 8bit\n" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "install the package maintainer's version" msgstr "" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "keep the local version currently installed" msgstr "" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show the differences between the versions" msgstr "" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a side-by-side difference between the versions" msgstr "" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a 3-way difference between available versions" msgstr "" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "do a 3-way merge between available versions (experimental)" msgstr "" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "start a new shell to examine the situation" msgstr "" #. Type: select #. Description #: ../grub.templates:1002 msgid "What would you like to do about ${BASENAME}?" msgstr "" #. Type: select #. Description #: ../grub.templates:1002 msgid "" "A new version of /boot/grub/menu.lst is available, but the version installed " "currently has been locally modified." msgstr "" debian/po/vi.po0000664000000000000000000000634712574667652010622 0ustar # Vietnamese translation for UCF. # Copyright © 2007 Free Software Foundation, Inc. # Clytie Siddall , 2005-2007. # msgid "" msgstr "" "Project-Id-Version: ucf 3.002\n" "Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" "POT-Creation-Date: 2008-01-28 08:38-0800\n" "PO-Revision-Date: 2007-06-21 17:52+0930\n" "Last-Translator: Clytie Siddall \n" "Language-Team: Vietnamese \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=1; plural=0;\n" "X-Generator: LocFactoryEditor 1.6.4a1\n" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "install the package maintainer's version" msgstr "cài đặt phiên bản cá»§a nhà duy trì gói" # msgid "keep your currently-installed version" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "keep the local version currently installed" msgstr "giữ phiên bản cục bá»™ đã cài đặt hiện thá»i" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show the differences between the versions" msgstr "hiển thị khác biệt giữa những phiên bản" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a side-by-side difference between the versions" msgstr "hiển thị khác biệt cạnh nhau giữa những phiên bản" # msgid "show a 3 way difference between available versions of the file" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a 3-way difference between available versions" msgstr "hiển thị khác biệt ba hướng giữa những phiên bản sẵn sàng" # msgid "" # do a 3 way merge between available versions of the file [Very # Experimental] #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "do a 3-way merge between available versions (experimental)" msgstr "trá»™n ba hướng những phiên bản sẵn sàng (thá»±c nghiệm)" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "start a new shell to examine the situation" msgstr "khởi chạy trình bao má»›i để khám xét trưá»ng hợp" #. Type: select #. Description #: ../grub.templates:1002 msgid "What would you like to do about ${BASENAME}?" msgstr "Äối vá»›i ${BASENAME}? muốn làm gì vậy?" # msgid "" # A new version of configuration file ${FILE} is available, but your # version has been locally modified. #. Type: select #. Description #: ../grub.templates:1002 #, fuzzy msgid "" "A new version of /boot/grub/menu.lst is available, but the version installed " "currently has been locally modified." msgstr "" "Có sẵn má»™t phiên bản má»›i cá»§a tập tin cấu hình /boot/grub/menu.lst, nhưng " "phiên bản được cài đặt hiện thá»i đã bị sá»­a đổi cục bá»™." debian/po/ja.po0000664000000000000000000000626312574667652010573 0ustar # # Translators, if you are not familiar with the PO format, gettext # documentation is worth reading, especially sections dedicated to # this format, e.g. by running: # info -n '(gettext)PO Files' # info -n '(gettext)Header Entry' # # Some information specific to po-debconf are available at # /usr/share/doc/po-debconf/README-trans # or http://www.debian.org/intl/l10n/po-debconf/README-trans # # Developers do not need to manually edit POT or PO files. # msgid "" msgstr "" "Project-Id-Version: ucf\n" "Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" "POT-Creation-Date: 2008-01-28 08:38-0800\n" "PO-Revision-Date: 2007-06-21 14:11+0900\n" "Last-Translator: Kenshi Muto \n" "Language-Team: Japanese \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "install the package maintainer's version" msgstr "パッケージメンテナã®ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã‚’インストール" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "keep the local version currently installed" msgstr "ç¾åœ¨ã‚¤ãƒ³ã‚¹ãƒˆãƒ¼ãƒ«ã•れã¦ã„るローカルãƒãƒ¼ã‚¸ãƒ§ãƒ³ã‚’ä¿æŒ" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show the differences between the versions" msgstr "ãƒãƒ¼ã‚¸ãƒ§ãƒ³é–“ã®å·®ç•°ã‚’表示" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a side-by-side difference between the versions" msgstr "ãƒãƒ¼ã‚¸ãƒ§ãƒ³é–“ã®å·®ç•°ã‚’並行表示" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a 3-way difference between available versions" msgstr "利用å¯èƒ½ãªãƒãƒ¼ã‚¸ãƒ§ãƒ³é–“ã®3種類ã®å·®ç•°ã‚’表示" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "do a 3-way merge between available versions (experimental)" msgstr "利用å¯èƒ½ãªãƒãƒ¼ã‚¸ãƒ§ãƒ³é–“ã§ã®3種類マージを行ㆠ(実験的)" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "start a new shell to examine the situation" msgstr "状æ³ã‚’検討ã™ã‚‹ãŸã‚ã®æ–°ã—ã„シェルを起動" #. Type: select #. Description #: ../grub.templates:1002 msgid "What would you like to do about ${BASENAME}?" msgstr "${BASENAME} ã«ã¤ã„ã¦ä½•を行ã„ãŸã„ã§ã™ã‹?" #. Type: select #. Description #: ../grub.templates:1002 #, fuzzy msgid "" "A new version of /boot/grub/menu.lst is available, but the version installed " "currently has been locally modified." msgstr "" "設定ファイル /boot/grub/menu.lst ã®æ–°ã—ã„ãƒãƒ¼ã‚¸ãƒ§ãƒ³ãŒåˆ©ç”¨å¯èƒ½ã§ã™ãŒã€ç¾åœ¨ã‚¤ãƒ³" "ストールã•れã¦ã„ã‚‹ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã¯ã€ãƒ­ãƒ¼ã‚«ãƒ«ã§å¤‰æ›´ã•れã¦ã„ã¾ã™ã€‚" debian/po/de.po0000664000000000000000000000631512574667652010567 0ustar # translation of ucf_3.002_de.po to German # # Translators, if you are not familiar with the PO format, gettext # documentation is worth reading, especially sections dedicated to # this format, e.g. by running: # info -n '(gettext)PO Files' # info -n '(gettext)Header Entry' # Some information specific to po-debconf are available at # /usr/share/doc/po-debconf/README-trans # or http://www.debian.org/intl/l10n/po-debconf/README-trans# # Developers do not need to manually edit POT or PO files. # # Erik Schanze , 2004-2007. msgid "" msgstr "" "Project-Id-Version: ucf_3.002_de\n" "Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" "POT-Creation-Date: 2008-01-28 08:38-0800\n" "PO-Revision-Date: 2007-07-01 22:08+0200\n" "Last-Translator: Erik Schanze \n" "Language-Team: German \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "X-Generator: KBabel 1.11.4\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "install the package maintainer's version" msgstr "Version des Paket-Betreuers installieren" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "keep the local version currently installed" msgstr "aktuell installierte Version behalten" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show the differences between the versions" msgstr "Unterschiede zwischen den Versionen anzeigen" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a side-by-side difference between the versions" msgstr "Unterschiede zwischen den Versionen nebeneinander anzeigen" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a 3-way difference between available versions" msgstr "3-Wege-Differenz der verfügbaren Versionen der Datei anzeigen" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "do a 3-way merge between available versions (experimental)" msgstr "3-Wege-Vereinigung verfügbarer Versionen [experimentell]" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "start a new shell to examine the situation" msgstr "die Angelegenheit in einer neu gestarteten Shell untersuchen" #. Type: select #. Description #: ../grub.templates:1002 msgid "What would you like to do about ${BASENAME}?" msgstr "Wie wollen Sie mit ${BASENAME} verfahren?" #. Type: select #. Description #: ../grub.templates:1002 #, fuzzy msgid "" "A new version of /boot/grub/menu.lst is available, but the version installed " "currently has been locally modified." msgstr "" "Eine neue Version der Datei /boot/grub/menu.lst ist verfügbar, aber die " "installierte Version wurde verändert." debian/po/sv.po0000664000000000000000000000534112574667652010625 0ustar # Swedish translation for ucf. # Copyright (C) 2007 Free Software Foundation, Inc. # This file is distributed under the same license as the ucf package. # Daniel Nylander , 2007. # msgid "" msgstr "" "Project-Id-Version: ucf 2.002\n" "Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" "POT-Creation-Date: 2008-01-28 08:38-0800\n" "PO-Revision-Date: 2007-06-25 10:07+0100\n" "Last-Translator: Daniel Nylander \n" "Language-Team: Swedish \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "install the package maintainer's version" msgstr "installera paketansvariges version" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "keep the local version currently installed" msgstr "behÃ¥ll den lokalt installerade version" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show the differences between the versions" msgstr "visa skillnaderna mellan versionerna" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a side-by-side difference between the versions" msgstr "visa skillnaderna sida vid sida mellan versionerna" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "show a 3-way difference between available versions" msgstr "visa en 3-vägs skillnad mellan tillgängliga versioner" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "do a 3-way merge between available versions (experimental)" msgstr "gör en 3-vägs sammanslagning mellan versionerna (experimentell)" #. Type: select #. Choices #. Translators, please keep translations *short* (less than 65 columns) #: ../grub.templates:1001 msgid "start a new shell to examine the situation" msgstr "starta ett nytt skal för att undersöka situationen" #. Type: select #. Description #: ../grub.templates:1002 msgid "What would you like to do about ${BASENAME}?" msgstr "Vad vill du göra med ${BASENAME}?" #. Type: select #. Description #: ../grub.templates:1002 #, fuzzy msgid "" "A new version of /boot/grub/menu.lst is available, but the version installed " "currently has been locally modified." msgstr "" "En ny version av filen /boot/grub/menu.lst finns tillgänglig, men versionen " "som är installerad har ändrats lokalt." debian/changelog0000664000000000000000000020722612675040252011056 0ustar cloud-init (0.7.5-0ubuntu1.18) trusty-security; urgency=medium * No change rebuild in the -security pocket. -- Marc Deslauriers Thu, 24 Mar 2016 15:31:45 -0400 cloud-init (0.7.5-0ubuntu1.17) trusty; urgency=medium * Microsoft Azure: - d/patches/lp-1551419-azure-handle-flipped-uuid-endianness.patch: Handle cases where the endianness of the DMI UUID used for the instance ID has changed since last boot.(LP: #1551419) -- Daniel Watkins Wed, 02 Mar 2016 14:23:18 +0000 cloud-init (0.7.5-0ubuntu1.16) trusty; urgency=medium * Joyent Smart DataOS: - d/patches/lp-1540965-SmartOS-Add-support-for-Joyent-LX-Brand-Zones.patch: SmartOS: Add support for Joyent LX-Brand Zones (LP: #1540965) -- Robert C Jennings Tue, 02 Feb 2016 09:32:02 -0600 cloud-init (0.7.5-0ubuntu1.15) trusty; urgency=medium * Microsoft Azure: - d/patches/lp-1506244-azure-ssh-key-values.patch: AZURE: Add support and preference for fabric provided public SSH public key values over fingerprints (LP: #1506244). - use stable VM instance ID over SharedConfig.xml (LP: #1506187): - d/patches/lp-1506187-azure_use_unique_vm_id.patch: use DMI data for the stable VM instance ID - d/cloud-init.preinst: migrate existing instances to stable VM instance ID on upgrade from prior versions of cloud-init. -- Ben Howard Tue, 17 Nov 2015 10:02:24 -0700 cloud-init (0.7.5-0ubuntu1.14) trusty; urgency=medium * d/patches/lp-1177432-same-archives-as-ubuntu-server.patch: use the same archive pockets as Ubuntu Server (LP: #1177432). -- Ben Howard Thu, 05 Nov 2015 09:50:57 -0700 cloud-init (0.7.5-0ubuntu1.13) trusty; urgency=medium * d/patches/lp-1177432-enable_backports.patch: Enable backports apt pocket (LP: #1177432). -- Ben Howard Mon, 02 Nov 2015 08:52:09 -0700 cloud-init (0.7.5-0ubuntu1.12) trusty; urgency=medium * d/patches/lp-1493453-nocloudds-vendor_data.patch: - fix vendor_data variable assignment for the NoCloud Datasource (LP: #1493453). -- Ben Howard Mon, 21 Sep 2015 15:24:17 -0600 cloud-init (0.7.5-0ubuntu1.11) trusty; urgency=medium [ Felipe Reyes ] * d/patches/fix-consumption-of-vendor-data.patch: - Fix consumption of vendor-data in OpenStack to allow namespacing (LP: #1469260). [ Scott Moser ] * d/patches/lp-1461242-generate-ed25519-host-keys.patch: - ssh: generate ed25519 host keys if supported (LP: #1461242) -- Scott Moser Fri, 11 Sep 2015 20:22:00 -0400 cloud-init (0.7.5-0ubuntu1.10) trusty; urgency=medium [ Daniel Watkins ] * d/patches/lp-1490796-azure-fix-mount_cb-for-symlinks.patch: - Fix a regression caused by switching to /dev/disk symlinks (LP: #1490796). -- Ben Howard Wed, 02 Sep 2015 10:57:30 -0600 cloud-init (0.7.5-0ubuntu1.9) trusty; urgency=medium * d/cloud-init.templates: Include SmartOS data source in the default list and choices (LP: #1398997). -- Daniel Watkins Wed, 02 Sep 2015 16:09:33 +0100 cloud-init (0.7.5-0ubuntu1.8) trusty; urgency=medium * debian/patches/lp-1411582-azure-udev-ephemeral-disks.patch: - Use udev rules to discover ephemeral disk locations rather than hard-coded device names (LP: #1411582). * debian/patches/lp-1470880-fix-gce-az-determination.patch: - Correctly parse GCE's availability zones (LP: #1470880). * d/patches/lp-1470890-include-regions-in-dynamic-mirror-discovery.patch: - Make %(region)s a valid substitution in mirror discovery (LP: #1470890). * Remove python-serial from Build-Depends; it was mistakenly added last upload. -- Daniel Watkins Fri, 14 Aug 2015 13:54:02 +0100 cloud-init (0.7.5-0ubuntu1.7) trusty; urgency=medium * d/patches/lp-1456684-eu-central-1.patch: - Add central as a direction for EC2 availability zones (LP: #1456684). * d/patches/lp-1464253-handle-new-cloudstack-passwords.patch: - Handle both old and new CloudStack password servers (LP: #1464253). * Add python-serial to Build-Depends (LP: #1381776). -- Daniel Watkins Thu, 16 Jul 2015 17:34:01 +0100 cloud-init (0.7.5-0ubuntu1.6) trusty; urgency=medium * d/patches/lp-1375252-1458052-Azure-hostname_password.patch: Backport of 15.10 Azure Datasource to fix various issues: - Azure Datasource writes user password in plain text (LP: #1458052). - Hostname not preserved across Azure reboots (LP: #1375252). -- Ben Howard Mon, 25 May 2015 09:30:20 -0600 cloud-init (0.7.5-0ubuntu1.5) trusty; urgency=medium * Backport support for fetching passwords in CloudStack (LP: #1422388). * Fix CloudStack metadata retrieval (LP: #1356855). -- Daniel Watkins Wed, 11 Mar 2015 10:57:10 -0600 cloud-init (0.7.5-0ubuntu1.4) trusty; urgency=medium [ Ben Howard ] * d/patches/lp-1383794-gce-short_name.patch: Use short hostname for GCE (LP: #1383794). [ Wayne Witzel III ] * d/patches/lp-1404311-gce-data_encoding.patch: Enable user-data encoding support for GCE (LP: #1404311). [ Daniel Watkins ] * d/patches/lp-1422919-azure-g5_ephemeral.patch: Properly format G5 series cloud instances (LP: #1422919). -- Ben Howard Tue, 17 Feb 2015 14:56:16 -0700 cloud-init (0.7.5-0ubuntu1.3) trusty-proposed; urgency=medium * d/patches/lp-1336855-grub_xvda.patch: include xvda devices for consideration for grub configuration (LP: #1336855). -- Ben Howard Thu, 18 Sep 2014 16:47:23 -0600 cloud-init (0.7.5-0ubuntu1.2) trusty-proposed; urgency=medium * d/patches/lp-1353008-cloud-init-local-needs-run.conf: backport change to cloud-init-local.conf to depend on /run being mounted (LP: #1353008) -- Scott Moser Wed, 17 Sep 2014 09:15:54 -0400 cloud-init (0.7.5-0ubuntu1.1) trusty-proposed; urgency=medium [ Ben Howard ] * debian/patches/lp1316475-1303986-cloudsigma.patch: Backport of CloudSigma Datasource from 14.10 - [FFe] Support VendorData for CloudSigma (LP: #1303986). - Only query /dev/ttys1 when CloudSigma is detected (LP: #1316475). [ Scott Moser ] * debian/cloud-init.templates: fix choices so dpkg-reconfigure works as expected (LP: #1325746) -- Scott Moser Fri, 20 Jun 2014 13:29:29 -0400 cloud-init (0.7.5-0ubuntu1) trusty; urgency=medium * New upstream release. * support base64 encoded user-data in OpenNebula, required to allow arbitrary content in user-data (LP: #1300941) * pep8 and pylint fixes -- Scott Moser Tue, 01 Apr 2014 14:39:03 -0400 cloud-init (0.7.5~bzr970-0ubuntu1) trusty; urgency=medium * New upstream snapshot. * fix NoCloud and seedfrom on the kernel command line (LP: #1295223) -- Scott Moser Thu, 20 Mar 2014 12:35:58 -0400 cloud-init (0.7.5~bzr969-0ubuntu1) trusty; urgency=medium * New upstream snapshot. * Azure: Reformat ephemeral disk if it got re-provisioned by the cloud on any reboot (LP: #1292648) * final_message: fix replacement of upper case keynames (LP: #1286164) * seed_random: do not capture output. Correctly provide environment variable RANDOM_SEED_FILE to command. * CloudSigma: support base64 encoded user-data -- Scott Moser Wed, 19 Mar 2014 14:04:34 -0400 cloud-init (0.7.5~bzr964-0ubuntu1) trusty; urgency=medium * New upstream snapshot. * SmartOS, AltCloud: disable running on arm systems due to bug (LP: #1243287, #1285686) [Oleg Strikov] * Allow running a command to seed random, default is 'pollinate -q' (LP: #1286316) [Dustin Kirkland] * Write status to /run/cloud-init/status.json for consumption by other programs (LP: #1284439) * fix output of network information to not include 'addr:' (LP: #1285185) -- Scott Moser Mon, 03 Mar 2014 16:59:27 -0500 cloud-init (0.7.5~bzr952-0ubuntu1) trusty; urgency=medium * New upstream snapshot. * fix broken seed of DAtaSourceNoCloud via external disk. -- Scott Moser Tue, 18 Feb 2014 14:10:52 -0500 cloud-init (0.7.5~bzr950-0ubuntu1) trusty; urgency=medium * New upstream snapshot. * support for vendor-data in NoCloud * fix in is_ipv4 to accept IP addresses with a '0' in them. * Azure: fix issue when stale data in /var/lib/waagent (LP: #1269626) * skip config_modules that declare themselves only verified on a set of distros. Add them to 'unverified_modules' list to run anyway. * Add CloudSigma datasource [Kiril Vladimiroff] * Add initial support for Gentoo and Arch distributions [Nate House] * Add GCE datasource [Vaidas Jablonskis] * Add native Openstack datasource which reads openstack metadata rather than relying on EC2 data in openstack metadata service. -- Scott Moser Fri, 14 Feb 2014 14:39:56 -0500 cloud-init (0.7.5~bzr933-0ubuntu1) trusty; urgency=medium * debian/control: bump Standards-Version to 3.9.5 * debian/control: drop boto dependency no longer required in trunk. * New upstream snapshot. * ConfigDrive: consider partitions labelled correctly as possible sources. * find root filesystem for resizing in cases where there is no initramfs * removal of dependency on python-boto * vendor-data support, and usage of that in Joyent datasource. * change default output to be logged to /var/log/cloud-init-output.log * SeLinuxGuard: Cast file path to string. (LP: #1260072) * drop support for resizing via parted (LP: #1212492) * SmartOS: changes to address changes in platform (LP: #1272115) * FreeBSD support. -- Scott Moser Fri, 24 Jan 2014 22:41:57 -0500 cloud-init (0.7.5~bzr902-0ubuntu1) trusty; urgency=medium * debian/control: Build-Depend on python-jsonpatch as #717916 is now fixed. * debian/control: Recommend eatmydata (LP: #1236531) * New upstream snapshot. * support invoking apt with 'eatmydata' (LP: #1236531) * add a message in log about dynamic import failures * New in '0.7.4' release. * fix reading of mount information on kernels < 2.6.26 (LP: #1248625) * SmartOS: change 'region' to 'datacenter_name' to address change in data provided to instance (LP: #1249124) * support calling 'add-apt-repository' for 'cloud-archive:' entries (LP: #1244355) * DataSourceAzure: fix incompatibility with python 2.6 (LP: #1232175) * fix bug mounting first partition of a alias'd name. (LP: #1236594) * SmartOS: fix bug with hostname due to trailing whitespace (LP: #1236445) * fix creation of partitions on Azure (LP: #1233698) * cc_growpart: respect /etc/growroot-disabled (LP: #1234331) * ubuntu config: add default user to 'sudo' group (LP: #1228228) * Fix usage of libselinux-python when selinux is disabled * add OpenNebula datasource -- Scott Moser Tue, 17 Dec 2013 16:51:30 -0500 cloud-init (0.7.3-0ubuntu2) saucy; urgency=low * fix bug where a mount entry of 'ephemeral0' would only consider the unpartitioned device, not also the first partition (LP: #1236594) -- Scott Moser Mon, 07 Oct 2013 20:16:02 -0400 cloud-init (0.7.3-0ubuntu1) saucy; urgency=low * New upstream release. * Fix for SmartOS datasource when hostname is provided via dmi data (LP: #1236445) -- Scott Moser Mon, 07 Oct 2013 14:49:56 -0400 cloud-init (0.7.3~bzr884-0ubuntu1) saucy; urgency=low * New upstream snapshot. * allow disabling of growpart via file /etc/growroot-disabled (LP: #1234331) * add default user to sudo group (LP: #1228228) * fix disk creation on azure (LP: #1233698) * DatasourceSmartOS: allow availabiltity-zone to be fed from the datasource via 'region' (which allows 'mirrors' and other things to make use of it). -- Scott Moser Fri, 04 Oct 2013 21:08:07 -0400 cloud-init (0.7.3~bzr879-0ubuntu1) saucy; urgency=low * New upstream snapshot. * fixes to disk_setup config module and enabling of partition creation and filesystem creation on Azure. -- Scott Moser Fri, 27 Sep 2013 19:47:37 -0400 cloud-init (0.7.3~bzr871-0ubuntu1) saucy; urgency=low * New upstream snapshot. * add 'disk_setup' config module for partitioning disks and creating filesystems. (LP: #1218506) -- Scott Moser Fri, 20 Sep 2013 20:46:08 -0400 cloud-init (0.7.3~bzr869-0ubuntu1) saucy; urgency=low * depend on cloud-utils or cloud-guest-utils (LP: #1224003) * New upstream snapshot. * Add OpenNebula datasource. * Support reading 'random_seed' from metadata and writing to /dev/urandom * fix for bug in log_time. -- Scott Moser Wed, 11 Sep 2013 17:04:45 -0400 cloud-init (0.7.3~bzr862-0ubuntu1) saucy; urgency=low * New upstream snapshot. * support base64 encoded data in the smart os datasource -- Scott Moser Thu, 29 Aug 2013 04:54:39 -0400 cloud-init (0.7.3~bzr861-0ubuntu1) saucy; urgency=low * New upstream snapshot. * fix publishing hostname on azure (LP: #1214541) -- Scott Moser Tue, 20 Aug 2013 16:06:22 -0400 cloud-init (0.7.3~bzr860-0ubuntu1) saucy; urgency=low * New upstream snapshot. * fix setting of password for a user on azure. (LP: #1212723) -- Scott Moser Thu, 15 Aug 2013 16:01:40 -0400 cloud-init (0.7.3~bzr858-0ubuntu1) saucy; urgency=low * New upstream snapshot. * fix resizing of root partition by preferring the functional 'growpart' support over the broken 'parted resizepart' support (LP: #1212444) * add options for apt_ftp_proxy, apt_https_proxy and apt_config (LP: #1057195) -- Scott Moser Wed, 14 Aug 2013 21:44:22 -0400 cloud-init (0.7.3~bzr851-0ubuntu1) saucy; urgency=low * New upstream snapshot. * azure: do not wait for output of bouncing interface (ifdown; ifup) as that waits on output of all ifupdown scripts to close all file descriptors. -- Scott Moser Mon, 29 Jul 2013 12:21:08 -0400 cloud-init (0.7.3~bzr850-0ubuntu1) saucy; urgency=low * New upstream snapshot. * fix bouncing of interface. environment was not being modified so command invoked did not have access to 'interface'. * debian/README.source: update to read upstream version from trunk -- Scott Moser Fri, 26 Jul 2013 14:34:02 -0400 cloud-init (0.7.3~bzr849-0ubuntu2) saucy; urgency=low * debian/control: fix bad dependency on python-jsonpatch by build-depending on python-json-patch, so dh_python2 can find the right package (LP: #1205358). -- Scott Moser Fri, 26 Jul 2013 10:47:59 -0400 cloud-init (0.7.3~bzr849-0ubuntu1) saucy; urgency=low * New upstream snapshot. * azure: support publishing hostname via bouncing interface (LP: #1202758) -- Scott Moser Thu, 25 Jul 2013 17:08:30 -0400 cloud-init (0.7.3~bzr845-0ubuntu2) saucy; urgency=low * debian/control: fix dependency python-json-patch. -- Scott Moser Wed, 24 Jul 2013 15:01:24 -0400 cloud-init (0.7.3~bzr845-0ubuntu1) saucy; urgency=low * Reads the currently set value in /etc/cloud/cloud.cfg.d/90_dpkg.cfg to db_set the value of cloud-init/datasources. (Closes: #709773) * New upstream snapshot. * azure: use deployment-id rather than static instance-id (LP: #1204190) * config-drive: make 'availability_zone' available. (LP: #1190431) * finalize handlers even on error (LP: #1203368) * azure: fix password based access (LP: #1201969) * add smartos (Joyent cloud) datasource * support patching cloud-config via jsonp (LP: #1200476) * debian/control: add dependency on python-jsonp -- Scott Moser Wed, 24 Jul 2013 13:47:53 -0400 cloud-init (0.7.3~bzr829-0ubuntu1) saucy; urgency=low * New upstream snapshot. * fix to upstart_job handler if version upstart is version 1.8. * Azure datasource: allow userdata to be found in node named 'UserData' or 'CustomData' -- Scott Moser Thu, 11 Jul 2013 10:20:03 -0400 cloud-init (0.7.3~bzr826-0ubuntu2) saucy; urgency=low * debian/cloud-init.templates: add 'Azure' datasource to list of available datasources. -- Scott Moser Wed, 10 Jul 2013 16:31:48 -0400 cloud-init (0.7.3~bzr826-0ubuntu1) saucy; urgency=low * New upstream snapshot. * Fix omnibus support (LP: #1182265) * invoke 'initctl reload-configuration' on upstart jobs again (LP: #1124384) * Remove unowned files in /etc/apt/apt.conf.d/ after purge. (Closes #674237) -- Scott Moser Wed, 10 Jul 2013 13:35:59 -0400 cloud-init (0.7.2-0ubuntu1) saucy; urgency=low * New upstream release. * fix merging routines to be backwards compatible (LP: #1180867) * fix for python 2.6 -- Scott Moser Wed, 05 Jun 2013 11:12:46 -0400 cloud-init (0.7.2~bzr812-0ubuntu1) saucy; urgency=low * New upstream snapshot. * catch up with upstream, which is hopefully 0.7.2 * straighten out the merging routines * fix a bug in Maas datasource -- Scott Moser Fri, 10 May 2013 17:53:49 -0400 cloud-init (0.7.2~bzr809-0ubuntu1) raring; urgency=low * New upstream snapshot. * make apt-get invoke 'dist-upgrade' rather than 'upgrade' for package_upgrade. (LP: #1164147) * workaround 2.6 kernel issue that stopped blkid from showing /dev/sr0 -- Scott Moser Thu, 11 Apr 2013 12:55:51 -0400 cloud-init (0.7.2~bzr804-0ubuntu1) raring; urgency=low * New upstream snapshot. * use python-requests rather than urllib2 for http (LP: #1067888) * handle failure of resizefs better. Specifically, do not show warnings or stack trace in lxc (LP: #1160462) -- Scott Moser Wed, 27 Mar 2013 10:04:41 -0400 cloud-init (0.7.2~bzr795-0ubuntu1) raring; urgency=low * New upstream snapshot. * documentation on write-files module (LP: #1111205) * support for specifying package versions in package installs * DataSourceNoCloud: allow specifyin user-data and meta-data in the datasource config (LP: #1115833) * work around bug in upstart for now (1124384) * support resizing btrfs fileystems * parse ssh keys more correctly (LP: #1136343) * upstart/cloud-init-nonet.conf: handle sigterm gracefully (LP: #1015223) * support growing partitions (LP: #1136936) * use --force-unsafe-io for dpkg installations to improve speed This is sane as it happens on instance initialization. * more powerful and user-suppliable cloud-config merge mechanisms (LP: #1023179) -- Scott Moser Thu, 07 Mar 2013 17:33:59 -0500 cloud-init (0.7.1-0ubuntu5) raring; urgency=low * catchup-751-760.patch (sync to 760) * DataSourceConfigDrive: allow config-drive data to come from a CD-ROM (LP: #1100545) * Allow 'sr0' to be used as a source for mount config [Vlastimil Holer] * do not log passwords provided via config (LP: #1096417) * DataSourceCloudStack: Attempt to find the 'virtual router' as provided from dhcp responses, rather than assuming it is the default route (LP: #1089989) [Gerard Dethier] in the CloudStack environment use virtual router rather than default route * notify upstart after writing upstart jobs to support filesystems that do not support inotify such as overlayfs (LP: #1080841) * fix cloud-config 'lock_password' user setup (LP: #1096423) * debian/README.source: minor improvements. -- Scott Moser Fri, 18 Jan 2013 10:12:34 -0500 cloud-init (0.7.1-0ubuntu4) raring; urgency=low * cherry pick relevant patches from trunk up to revision 750 * use short form of '--stderr' argument to logger for better cross distro support (LP: #1083715) * puppet: make installation configurable (LP: #1090205) * chef: add omnibus installation method * fix allowing cloud-config input via user-data to affect the apt mirror selection (LP: #090482) -- Scott Moser Mon, 17 Dec 2012 10:48:23 -0500 cloud-init (0.7.1-0ubuntu3) raring; urgency=low * cherry pick relevant patches from trunk up to revision 745 * fix writing of sudoers when suders rule is a string rather than an array (LP: #1079002) * add trailing slash to sudoers files that are written * fix resizefs module when 'noblock' was provided (LP: #1080985) * make sure there is no blank line before cloud-init entry in there are no blank lines in /etc/ca-certificates.conf (LP: #1077020) -- Scott Moser Mon, 03 Dec 2012 21:45:48 -0500 cloud-init (0.7.1-0ubuntu2) raring; urgency=low * debian/watch: add watch file * add 'ubuntu' user to sudoers (LP: #1080717) * set 'ubuntu' user shell to bash -- Scott Moser Mon, 19 Nov 2012 09:38:29 -0500 cloud-init (0.7.1-0ubuntu1) raring; urgency=low * New upstream release. * landscape: install landscape-client package if not installed. only take action if cloud-config is present (LP: #1066115) * landscape: restart landscape after install or config (LP: #1070345) * multipart/archive: do not fail on unknown headers in multipart mime or cloud-archive config (LP: #1065116). * tools/Z99-cloud-locale-test.sh: avoid warning when user's shell is zsh (LP: #1073077) * fix stack trace when unknown user-data input had unicode (LP: #1075756) * split 'apt-update-upgrade' config module into 'apt-configure' and 'package-update-upgrade-install'. The 'package-update-upgrade-install' will be a cross distro module. * fix bug where cloud-config from user-data could not affect system_info settings (LP: #1076811) * add yum_add_repo configuration module for adding additional yum repos * fix public key importing with config-drive-v2 datasource (LP: #1077700) * handle renaming and fixing up of marker names (LP: #1075980) this relieves that burden from the distro/packaging. * group config: fix how group members weren't being translated correctly when the group: [member, member...] format was used (LP: #1077245) * work around an issue with boto > 0.6.0 that lazy loaded the return from get_instance_metadata(). This resulted in failure for cloud-init to install ssh keys. (LP: #1068801) * add power_state_change config module for shutting down stystem after cloud-init finishes. (LP: #1064665) -- Scott Moser Wed, 14 Nov 2012 15:18:50 -0500 cloud-init (0.7.0-0ubuntu2) quantal; urgency=low * config-drive: copy metadata entry 'hostname' to 'local-hostname' to fix config modules (set_hostname) will function as expected (LP: #1061964) -- Scott Moser Fri, 05 Oct 2012 11:45:15 -0400 cloud-init (0.7.0-0ubuntu1) quantal; urgency=low * New upstream release. * minor change to oauth header fix (LP: #978127). * incorporation of 0.7.0 upstream release. -- Scott Moser Mon, 01 Oct 2012 14:19:46 -0400 cloud-init (0.7.0~bzr677-0ubuntu1) quantal; urgency=low * add CloudStack to DataSources listed by dpkg-reconfigure (LP: #1002155) * New upstream snapshot. * 0440 permissions on /etc/sudoers.d files rather than 0644 * get host ssh keys to the console (LP: #1055688) * MAAS DataSource adjust timestamp in oauth header to one based on the timestamp in the response of a 403. This accounts for a bad local clock. (LP: #978127) * re-start the salt daemon rather than start to ensure config changes are taken. * allow for python unicode types in yaml that is loaded. * cleanup in how config modules get at users and groups. -- Scott Moser Sun, 30 Sep 2012 14:29:04 -0400 cloud-init (0.7.0~bzr659-0ubuntu2) quantal; urgency=low * debian/cloud-init.templates: fix bad template file (LP: #1053239) -- Scott Moser Thu, 20 Sep 2012 09:18:20 -0400 cloud-init (0.7.0~bzr659-0ubuntu1) quantal; urgency=low * New upstream snapshot. * add signal handlers to more cleanly exit * add logging fallback in case logging fails * documentation fix for landscape config (LP: #1042764) * do not write a comment in /etc/hostname (LP: #1052664) * fix incorrect ubuntu mirrors for 'ports' arches [Robbie Basak] * fix generation of hostname based on ip address in datasource (LP: #1050962) [Andy Grimm] * remove 'start networking' from cloud-init-nonet and replace it with container specific fixes (LP: #1031065) * fix landscape configuration so client will run (LP: #1042758) * enable all available datasources (adding AltCloud and None) * fix bug where user data scripts re-ran on upgrade from 10.04 versions (LP: #1049146) -- Scott Moser Wed, 19 Sep 2012 22:08:51 -0400 cloud-init (0.7.0~bzr644-0ubuntu1) quantal; urgency=low * New upstream snapshot. * fix cloud-archives (LP: #1044594) * fix set_passwords for usergroups as a list (LP: #1044553) * fix 'failed to setup apt-pipelining' warning on install due to old 0.6 style usage of cloud-init. -- Scott Moser Sat, 01 Sep 2012 20:11:34 -0400 cloud-init (0.7.0~bzr642-0ubuntu1) quantal; urgency=low * New upstream snapshot. * support using launch-index (ami-launch-index) (LP: #1023177) * usergroup related fixes (LP: #1041384, #1044044, #1044508) -- Scott Moser Fri, 31 Aug 2012 17:04:06 -0400 cloud-init (0.7.0~bzr639-0ubuntu1) quantal; urgency=low * New upstream snapshot. * fix broken ssh_import_id, which would give stack trace -- Scott Moser Tue, 28 Aug 2012 14:09:47 -0400 cloud-init (0.7.0~bzr637-0ubuntu1) quantal; urgency=low * New upstream snapshot. * fix issue with public keys not being added to 'ubuntu' user since the user was not created yet. (LP: #1042459) * only search the top level domain 'instance-data' for the EC2 metadata service, to avoid misconfiguration or unexpected results by searching search entries in /etc/resolv.conf (LP: #1040200) -- Scott Moser Mon, 27 Aug 2012 20:27:06 -0400 cloud-init (0.7.0~bzr634-0ubuntu1) quantal; urgency=low * New upstream snapshot. * support for datasource from config-drive-v2 (LP: #1037567) -- Scott Moser Fri, 24 Aug 2012 17:24:26 -0400 cloud-init (0.7.0~bzr633-0ubuntu1) quantal; urgency=low * New upstream snapshot. * support creating users on boot. remove requirement for a 'ubuntu' user to be previously present in image. (LP: #1028503) * add experimental apt_reboot_if_required flag to reboot if necessary after upgrade or package install (LP: #1038108) * improve mirror selection for a distro: * support arm mirrors (LP: #1028501) * support seeding security mirror (LP: #1006963) * support dns mirrors including availability-zone reference (LP: #1037727) * include a "None" datasource so items like ssh host key generation occur if there is no other metadata service. (LP: #906669) * print authorized_keys for users to the console (LP: #1010582) * Add RHEVm and vSphere support as datasource AltCloud [Joseph VLcek] -- Scott Moser Thu, 23 Aug 2012 01:06:34 -0400 cloud-init (0.7.0~bzr614-0ubuntu1) quantal; urgency=low * New upstream snapshot. * disable searching for 'ubuntu-mirror' in local dns to find a local mirror (LP: #974509) * emit the cloud-config event (LP: #1028674) * write timestamps to console on reboot and shutdown (LP: #1018554) -- Scott Moser Fri, 03 Aug 2012 14:55:37 -0400 cloud-init (0.7.0~bzr608-0ubuntu1) quantal; urgency=low * New upstream snapshot. * fix issue with EC2 datasource that prevented /mnt from being mounted. -- Scott Moser Mon, 16 Jul 2012 16:49:55 -0400 cloud-init (0.7.0~bzr604-0ubuntu1) quantal; urgency=low * New upstream snapshot. * add cc_write_file for injecting files via cloud-config (LP: #1012854) * fix issue with empty user data * remove some un-needed warnings to console output in DataSourceOVF * allow user-data scripts output through to the console -- Scott Moser Thu, 12 Jul 2012 16:11:01 -0400 cloud-init (0.7.0~bzr583-0ubuntu1) quantal; urgency=low * New upstream snapshot. * debian/control: wrap-and-sort * debian/control: actually depend on software-properties-common * debian/control: depend on python-cheetah again instead of python-tempita -- Scott Moser Mon, 09 Jul 2012 17:41:22 -0400 cloud-init (0.7.0~bzr564-0ubuntu2) quantal; urgency=low * debian/control: Build-Depends on python-setuptools (LP: #1022101) -- Angel Abad Sat, 07 Jul 2012 18:43:05 +0200 cloud-init (0.7.0~bzr564-0ubuntu1) quantal; urgency=low * New upstream snapshot. Thanks to Joshua Harlow for hard work. * depend on software-properties-common rather than python-software-properties (LP: #1021418) -- Scott Moser Fri, 06 Jul 2012 17:31:01 -0400 cloud-init (0.6.3-0ubuntu3) quantal; urgency=low * grub-legacy-ec2: add missing dependency on ucf (LP: #960336). -- Robie Basak Sun, 24 Jun 2012 05:10:13 +0100 cloud-init (0.6.3-0ubuntu2) quantal; urgency=high * Added -generic to Xen kernels list since -virtual has been dropped with Quantal. (LP: #1005551) -- Ben Howard Tue, 29 May 2012 12:59:01 -0600 cloud-init (0.6.3-0ubuntu1) precise; urgency=low * New upstream release. * improve chef examples for working configurations on 11.10 and 12.04 [Lorin Hochstein] (LP: #960564) * fix bug in landscape module if /etc/landscape did not exist (LP: #978329) -- Scott Moser Wed, 11 Apr 2012 00:05:00 -0400 cloud-init (0.6.3~bzr554-0ubuntu1) precise; urgency=low * New upstream snapshot. * Fix bug in Chef support that required 'validation_cert' (LP: #960547) * Provide user-friendly message when a user ssh's in with an invalid locale (LP: #960547) * Support reading a url reference to cloud-config from the kernel command line. -- Scott Moser Thu, 05 Apr 2012 01:24:42 -0400 cloud-init (0.6.3~bzr551-0ubuntu1) precise; urgency=low * New upstream snapshot. * support running resize2fs in the background (default=off) (LP: #961226) -- Scott Moser Thu, 22 Mar 2012 14:33:59 -0400 cloud-init (0.6.3~bzr548-0ubuntu1) precise; urgency=low * New upstream snapshot. * If public-keys is a string, split it into multiple keys on newline This specifically helps the MAAS data source, and should not negatively affect others. -- Scott Moser Mon, 19 Mar 2012 13:50:50 -0400 cloud-init (0.6.3~bzr547-0ubuntu1) precise; urgency=low * New upstream snapshot. * rename DataSourceMaaS to DataSourceMAAS. * support public-keys in DataSourceMAAS * Warn in user-data processing on non-multipart, non-handled data * CloudStack data source added (not enabled by default) * fix bug in cloud-init.postinst where the name used was wrong causing config-apt-pipelining to run more than intended -- Scott Moser Fri, 16 Mar 2012 14:12:38 -0400 cloud-init (0.6.3~bzr539-0ubuntu3) precise; urgency=low * make maas config file only readable by root (LP: #954721) -- Scott Moser Wed, 14 Mar 2012 01:19:32 -0400 cloud-init (0.6.3~bzr539-0ubuntu2) precise; urgency=low [Cosmin Luta] * add dependency on python-oauth (LP: #953915) -- Scott Moser Tue, 13 Mar 2012 11:36:11 -0400 cloud-init (0.6.3~bzr539-0ubuntu1) precise; urgency=low * New upstream snapshot. * add ability to configure Acquire::http::Pipeline-Depth via cloud-config setting 'apt_pipelining' (LP: #942061) * if cloud-config settings removed default certificats (remove-defaults), then seed package ca-certificates to not install new ones on upgrade. * run-parts now uses internal implementation rather than separate command. * add MaaS datasource (LP: #942061) * debian/cloud-init.postinst: address population of apt_pipeline setting on installation. * debian/cloud-init.postinst: support configuring cloud-init maas datasource via preseed values cloud-init/maas-metadata-url and cloud-init/maas-credentials. (LP: #942061) * debian/cloud-init.postinst: support for (LP: #924375) -- Scott Moser Fri, 09 Mar 2012 16:37:01 -0500 cloud-init (0.6.3~bzr530-0ubuntu1) precise; urgency=low * New upstream snapshot. - fix DataSourceNoCloud seeded from local or cmdline (LP: #942695) - change 'islxc' to 'iscontainer' and use 'running-in-container' utility from upstart rather than 'lxc-is-container' (LP: #941955) - Do not fail on bad part handlers, instead catch error and log -- Scott Moser Tue, 28 Feb 2012 19:15:19 -0500 cloud-init (0.6.3~bzr527-0ubuntu1) precise; urgency=low * New upstream snapshot. - exit 0 in cloud-init if no metadata is found (nothing to do) - documentation improvements - support network config in DataSourceNoCloud -- Scott Moser Fri, 17 Feb 2012 17:11:50 -0500 cloud-init (0.6.3~bzr519-0ubuntu1) precise; urgency=low * New upstream snapshot. - [Mike Milner] add support for managing CA Certificates (LP: #915232) - in ci-info lines, use '.' to for empty field for easier machine reading - support empty lines in "#include" files (LP: #923043) - [Jef Baeur] support configuration of salt minions Bauer) (LP: #927795) - DataSourceOVF: only search for OVF data on ISO9660 filesystems (LP: #898373) - DataSourceConfigDrive: support getting data from openstack config drive (LP: #857378) - [Juerg Haefliger] formating and pylint cleanups * increase timeouts for initial config check for metadata service to address slow metadata service in openstack * add awareness of ConfigDrive data source -- Scott Moser Thu, 16 Feb 2012 17:27:05 -0500 cloud-init (0.6.3~bzr502-0ubuntu1) precise; urgency=low * New upstream snapshot. - [Mike Milner] add test case framework (LP: #890851) - [Juerg Haefliger] fix pylint warnings (LP: #914739) - fix regression where ec2 mirrors were not selected (LP: #915282) -- Scott Moser Thu, 12 Jan 2012 17:56:52 +0100 cloud-init (0.6.3~bzr497-0ubuntu1) precise; urgency=low * New upstream snapshot. - cloud-config support for configuring apt-proxy - selection of local mirror based on presense of 'ubuntu-mirror' dns entry in local domain. (LP: #897688) - DataSourceEc2: more resilliant to slow metadata service (LP: #894279) - close stdin in all programs launched by cloud-init (LP: #903993) - revert management of /etc/hosts to 0.6.1 style (LP: #890501, LP: #871966) - write full ssh keys to console for easy machine consumption (LP: #893400) - put INSTANCE_ID environment variable in bootcmd scripts - add 'cloud-init-per' script for easily running things with a given freq (this replaced cloud-init-run-module) - support configuration of landscape-client via cloud-config (LP: #857366) - part-handlers now get base64 decoded content rather than 2xbase64 encoded in the payload parameter. (LP: #874342) -- Scott Moser Thu, 22 Dec 2011 04:07:38 -0500 cloud-init (0.6.2-0ubuntu2) precise; urgency=low * Build using dh_python2. LP: #904248. * debian/rules: Explicitly set DEB_PYTHON2_MODULE_PACKAGES = cloud-init. -- Matthias Klose Sat, 17 Dec 2011 21:08:23 +0000 cloud-init (0.6.2-0ubuntu1) precise; urgency=low * New upstream release -- Scott Moser Thu, 27 Oct 2011 23:05:15 -0400 cloud-init (0.6.1-0ubuntu20) oneiric; urgency=low * fix broken path if local-hostname was not in metadata (LP: #857380) * redirect output of 'start networking' in 'cloud-init-nonet' to /dev/null * include GPLv3 in source tree -- Scott Moser Fri, 23 Sep 2011 09:24:27 -0400 cloud-init (0.6.1-0ubuntu19) oneiric; urgency=low * If local-hostname is not in meta-data, attempt to look up hostname in an alias in /etc/hosts. This will avoid setting domain portion of fqdn to 'localdomain' in some cases (LP: #850206). -- Scott Moser Wed, 14 Sep 2011 15:15:00 -0400 cloud-init (0.6.1-0ubuntu18) oneiric; urgency=low * minor documentation improvement. [Mike Moulton, Avishai Ish-Shalom] * Chef support fixes. support for environment and initial attr (LP: #845208) -- Scott Moser Tue, 13 Sep 2011 17:02:48 -0400 cloud-init (0.6.1-0ubuntu17) oneiric; urgency=low * fix issues with chef (LP: #845161) * be more forgiving on metadata for public-keys (LP: #845155) -- Scott Moser Fri, 09 Sep 2011 14:19:03 -0700 cloud-init (0.6.1-0ubuntu16) oneiric; urgency=low * catch up with trunk at revision 439 * warn on failure to set hostname (LP: #832175) * properly wait for all static interfaces to be up before cloud-init runs (depends on fix in LP:# 838968). * in DataSources NoCloud and OVF, do not set hostname to the static value 'ubuntuhost' if local-hostname is not in metadata (LP: #838280) * improve the way ssh_authorized_keys is updated, so that the values given will be used. (LP: #434076, LP: #833499) * cloud-init-notnet.conf: minor changes to config -- Scott Moser Thu, 01 Sep 2011 21:14:09 -0400 cloud-init (0.6.1-0ubuntu15) oneiric; urgency=low * catch up with trunk at revision 431 * add network debug info to console when cloud-init runs (LP: #828186) * fix issue where subprocesses (apt-add-repository) where given the console and would attempt to prompt user and hang boot (LP: #831505) * add awareness of ecdsa to cc_ssh -- Scott Moser Tue, 23 Aug 2011 00:01:01 -0400 cloud-init (0.6.1-0ubuntu14) oneiric; urgency=low * change the handling of user-data (LP: #810044) * boothooks will now run more than once as they were intended * cloud-config and user-scripts will be updated from user data every boot * Add a second type of part-handler that will be called on every boot * fix bad handling of /etc/hosts if manage_etc_hosts was false -- Scott Moser Mon, 08 Aug 2011 12:46:56 -0500 cloud-init (0.6.1-0ubuntu13) oneiric; urgency=low * do not install 92-uec-upgrade-available as a motd hook. This file was installed but did not do anything since updates-check was removed. * support multiple staticly configured network devices, as long as all of them come up early (LP: #810044) [Marc Cluet] * add support for passing mcollective keys via cloud-config * add support for 'include-once' type. include-once urls are only retrieved once-per-instance rather than on every boot. -- Scott Moser Mon, 01 Aug 2011 16:45:40 -0400 cloud-init (0.6.1-0ubuntu12) oneiric; urgency=low * do not give trace on failure to resize in lxc container (LP: #800856) * increase the timeout on url gets for "seedfrom" values (LP: #812646) * do not write entries for ephemeral0 on t1.micro (LP: #744019) [Adam Gandalman] * improve the updating of /etc/hosts with correct fqdn when possible (LP: #812539) [Avishai Ish-Shalom] * add chef support (cloudinit/CloudConfig/cc_chef.py) (LP: #798844) -- Scott Moser Thu, 21 Jul 2011 05:51:03 -0400 cloud-init (0.6.1-0ubuntu11) oneiric; urgency=low [Marc Cluet] * sanitize hosts file for system's hostname to 127.0.1.1 (LP: #802637) -- Scott Moser Thu, 30 Jun 2011 14:12:47 -0400 cloud-init (0.6.1-0ubuntu10) oneiric; urgency=low * sync with trunk (rev 405) * fix cloud-init in ubuntu lxc containers (LP: #800824) -- Scott Moser Tue, 28 Jun 2011 06:42:45 -0400 cloud-init (0.6.1-0ubuntu9) oneiric; urgency=low * sync with trunk (rev 404) * make metadata urls configurable, to support eucalyptus in STATIC or SYSTEM modes (LP: #761847) * support disabling byobu in cloud-config (LP: #797336) * guarantee that ssh_config runs before sshd starts (LP: #781101) * make prefix for keys added to /root/.ssh/authorized_keys configurable and add 'no-port-forwarding,no-agent-forwarding,no-X11-forwarding' to the default (LP: #798505) * make 'cloud-config ready' command configurable (LP: #785551) * make fstab fields used to 'fill in' shorthand mount entries configurable (LP: #785542) * read sshd_config to properly get path for authorized_keys (LP: #731849) -- Scott Moser Fri, 17 Jun 2011 12:18:34 -0400 cloud-init (0.6.1-0ubuntu8) natty; urgency=low * instead of including /boot/grub, create it in postinst of grub-legacy-ec2. -- Scott Moser Fri, 15 Apr 2011 13:01:17 -0400 cloud-init (0.6.1-0ubuntu7) natty; urgency=low * grub-legacy-ec2: add /boot/grub directory so installation does not depend on it already existing (LP: #759885) -- Scott Moser Wed, 13 Apr 2011 11:03:04 -0400 cloud-init (0.6.1-0ubuntu6) natty; urgency=low * avoid upgrade prompt for grub-pc when devices are named xvdX (LP: #752361) * catchup to trunk cloud-init (rev 395) -- Scott Moser Wed, 06 Apr 2011 06:46:55 -0400 cloud-init (0.6.1-0ubuntu5) natty; urgency=low * fix --purge of grub-legacy-ec2 package (LP: #749444) * catchup to trunk cloud-init (rev 394) * support user-data formated in dos format by converting to unix for user-scripts, boothooks, and upstart jobs (LP: #744965) * removal of some debug code, minor documentation fix -- Scott Moser Mon, 04 Apr 2011 13:20:27 -0400 cloud-init (0.6.1-0ubuntu4) natty; urgency=low * catch up to trunk cloud-init (rev 389). * fix bug in part-handler code, that broke part handlers (LP: #739694) * fix sporadic resizefs failure (LP: #726938) -- Scott Moser Mon, 21 Mar 2011 22:06:59 -0400 cloud-init (0.6.1-0ubuntu3) natty; urgency=low * catch up to trunk cloud-init (rev 385). * attempt to install packages on failed apt-get update (LP: #728167) * enabled timezone and mcollective cloud-config plugins -- Scott Moser Fri, 04 Mar 2011 21:17:21 -0500 cloud-init (0.6.1-0ubuntu2) natty; urgency=low * grub-legacy-ec2: Use dpkg-query --control-path instead of hard-coding a path to debconf templates file, for compatibility with multiarch. -- Scott Moser Tue, 01 Mar 2011 23:23:55 -0500 cloud-init (0.6.1-0ubuntu1) natty; urgency=low * New upstream release. * fix for puppet configuration options (LP: #709946) [Ryan Lane] * fix pickling of DataSource, which broke seeding. * turn resize_rootfs default to True * avoid mounts in DataSourceOVF if 'read' on device fails 'mount /dev/sr0' for an empty virtual cdrom device was taking 18 seconds * add 'manual_cache_clean' option to select manual cleaning of the /var/lib/cloud/instance/ link, for a data source that might not be present on every boot * make DataSourceEc2 retries and timeout configurable * add 'bootcmd' like 'runcmd' to cloud-config syntax for running things early * move from '#opt_include' in config file format to conf_d. now local config files should live in /etc/cloud/cloud.cfg.d/ * move /etc/cloud/distro.cfg to /etc/cloud/cloud.cfg.d/90_dpkg.cfg * allow /etc/hosts to be written from hosts.tmpl. which allows getting local-hostname into /etc/hosts (LP: #720440) * better handle startup if there is no eth0 (LP: #714807) * update rather than append in puppet config [Marc Cluet] * add cloud-config for mcollective [Marc Cluet] -- Scott Moser Sat, 19 Feb 2011 01:16:10 -0500 cloud-init (0.6.0-0ubuntu4) natty; urgency=low * fix running of user scripts (LP: #711480) (cherry pick 344) * fix 2 lintian warnings -- Scott Moser Tue, 01 Feb 2011 16:15:30 -0500 cloud-init (0.6.0-0ubuntu3) natty; urgency=low * make a better attempt at deciding if DataSourceEc2 should be used on first install or upgrade * fix behavior if def_log_file is empty in cloud-config (cherry pick 333) * improve comment strings in rsyslog config (cherry pick 334) * do not package cloud-init query (cherry pick 335) * add previous-instance-id and previous-datasource to cloud/data and cloud/instance/datasource files (cherry pick 337) * allow setting of passwords and enabling/disabling ssh password auth via cloud-config (cherry pick 338) -- Scott Moser Mon, 31 Jan 2011 12:48:39 -0500 cloud-init (0.6.0-0ubuntu2) natty; urgency=low * add a debian/README.source file * fix bug in fixing permission on /var/log/cloud-init.log (cherry pick) * remove dependency on update-motd as updates-check was removed * fix failure on cloud-init package purge * add configuration of DataSources via debconf. Default to not searching Ec2. (LP: #635188) * fix naming of pre-processed (now user-data.txt.i) (cherry pick) * upgrade existing content in /var/lib/cloud to 0.6.x format -- Scott Moser Thu, 27 Jan 2011 16:32:44 -0500 cloud-init (0.6.0-0ubuntu1) natty; urgency=low * New upstream release. * fix permissions on cloud-init.log so syslog can write to it (LP: ##704509) * rework of /var/lib/cloud layout * remove updates-check (LP: #653220) * support resizing root partition on first boot (enabled by default) * added cloud-config options for setting hostname, phone_home * indicate "all the way up" with message to console and file creation in /var/lib/cloud/instance/ (LP: #653271) * write ssh keys to console late in boot to ensure they're in console buffer * add support for redirecting output of cloud-init, cloud-config, cloud-final via the config file, or user data config file * add support for posting data about the instance to a url (phone_home) * add minimal OVF transport (iso) support * make DataSources that are attempted dynamic and configurable from config. config option 'cloud_type' replaced by 'datasource_list' * add 'timezone' option to cloud-config (LP: #645458) * Added an additional archive format, that can be used for multi-part input to cloud-init. This may be more user friendly then mime-multipart (LP: #641504) * add support for reading Rightscale style user data (LP: #668400) * make the message on 'disable_root' more clear (LP: #672417) * do not require public key if private is given in ssh cloud-config (LP: #648905) -- Scott Moser Wed, 26 Jan 2011 17:28:36 -0500 cloud-init (0.5.15-0ubuntu4) natty; urgency=low * Rebuild with python 2.7 as the python default. -- Matthias Klose Wed, 08 Dec 2010 15:01:36 +0000 cloud-init (0.5.15-0ubuntu3) maverick; urgency=low * do not use ec2 ubuntu archive if instance is VPC (LP: #615545) -- Scott Moser Thu, 16 Sep 2010 04:28:55 -0400 cloud-init (0.5.15-0ubuntu2) maverick; urgency=low * grub-legacy-ec2: boot with console=hvc0 (LP: #606373) -- Scott Moser Wed, 15 Sep 2010 16:41:48 -0400 cloud-init (0.5.15-0ubuntu1) maverick; urgency=low * New upstream release. * fix /etc/fstab cloudconfig entries for t1.micro and change default fstab values for ephemeral0 to nobootwait (LP: #634102) * grub-legacy-ec2: do not write chainload for grub2 to menu.lst (LP: #627451) * seed grub-pc correctly so update-grub runs on ec2 or uec(LP: #623609) -- Scott Moser Sun, 12 Sep 2010 15:23:39 -0400 cloud-init (0.5.14-0ubuntu5) maverick; urgency=low * add missing imports for cc_puppet (LP: #632744) * append to apt_sources files rather than truncating (LP: #627597) * get double commented lines into sources.list (LP: #627439) -- Scott Moser Wed, 08 Sep 2010 10:31:58 -0400 cloud-init (0.5.14-0ubuntu4) maverick; urgency=low * add commented out entries for partner, backports, and multiverse (LP: #620572) -- Scott Moser Thu, 26 Aug 2010 16:44:48 -0400 cloud-init (0.5.14-0ubuntu3) maverick; urgency=low * fix syntax error in cloudinit/util.py (failed installation) -- Scott Moser Tue, 17 Aug 2010 22:22:06 -0400 cloud-init (0.5.14-0ubuntu2) maverick; urgency=low * fix bug preventing 'seedfrom' from working (LP:617400) -- Scott Moser Tue, 17 Aug 2010 15:49:13 -0400 cloud-init (0.5.14-0ubuntu1) maverick; urgency=low * New upstream release. - support for reading metadata and userdata from filesystem - support for boot without metadata at all -- Scott Moser Thu, 12 Aug 2010 14:45:28 -0400 cloud-init (0.5.13-0ubuntu3) maverick; urgency=low * grub-legacy-ec2: fix 'apt-get --reinstall' and dpkg-divert (LP: #611812) * enable -virtual kernels as "xen" kernels (pv_ops now functional) * fix bad syntax in cloud-init-run-module.py -- Scott Moser Mon, 02 Aug 2010 16:26:48 -0400 cloud-init (0.5.13-0ubuntu2) maverick; urgency=low * debian/control: drop ssh-import as a recommends, as this has been subsumed by openssh-server -- Dustin Kirkland Sat, 24 Jul 2010 21:02:40 +0200 cloud-init (0.5.13-0ubuntu1) maverick; urgency=low * New upstream release. * invoke dpkg with --force-confold (LP: #607642) -- Scott Moser Wed, 21 Jul 2010 11:58:53 -0400 cloud-init (0.5.12-0ubuntu8) maverick; urgency=low * update-grub-legacy-ec2: - add code to stop use of 'uuid' grub syntax - change fallback grub device from (hd0,0) to (hd0) - change timeout in menu.lst to 0 * grub-legacy-ec2: add grub-set-default functionality (LP: #605961) -- Scott Moser Thu, 15 Jul 2010 13:07:01 -0400 cloud-init (0.5.12-0ubuntu7) maverick; urgency=low * update-grub-legacy-ec2: - force setting of indomU so output of build process has a menu.lst with -ec2 kernels listed. - remove 'quite splash' from kernel options - make sure grub_root_device is set to hd0 in image build -- Scott Moser Tue, 13 Jul 2010 16:33:51 -0400 cloud-init (0.5.12-0ubuntu6) maverick; urgency=low * fix installation error * add quilt-setup rule for package development -- Scott Moser Tue, 13 Jul 2010 12:04:21 -0400 cloud-init (0.5.12-0ubuntu5) maverick; urgency=low * sync with upstream r226. * fix bug where nfs/network mounts could not be specified (LP: #603329) * manage hostname setting better (LP: #596993) * add legacy-grub-ec2 package. -- Scott Moser Thu, 08 Jul 2010 22:24:59 -0400 cloud-init (0.5.12-0ubuntu4) maverick; urgency=low * handle hostname managing better with ebs root. (LP: #596993) -- Scott Moser Wed, 07 Jul 2010 11:54:10 -0400 cloud-init (0.5.12-0ubuntu3) maverick; urgency=low * fix cloud-boothook input type (LP: #600799) * sync with upstream. -- Scott Moser Thu, 01 Jul 2010 21:19:13 -0400 cloud-init (0.5.12-0ubuntu2) maverick; urgency=low * fix cloud config 'apt-update-upgrade' failure due to missing import -- Scott Moser Mon, 21 Jun 2010 15:08:32 -0400 cloud-init (0.5.12-0ubuntu1) maverick; urgency=low * New upstream release. * fix cloud-init-run-module to allow 'always' (LP: #568139) * add support for setting debconf selections * add cloud-config support for debconf selections (LP: #582667), byobu enablement, and ssh-import-lp-id -- Scott Moser Fri, 18 Jun 2010 15:48:14 -0400 cloud-init (0.5.11-0ubuntu2) maverick; urgency=low * handle renaming sem/markers for config items to avoid running per-instance again after package upgrade * pull 'config-' prefix on sem/ items for cloud-config modules from 0.5.12 -- Scott Moser Fri, 18 Jun 2010 12:52:10 -0400 cloud-init (0.5.11-0ubuntu1) maverick; urgency=low * New upstream release. * remove ec2-get-info. It is replaced by cloudutils ec2metadata * use python logging * reduce number of upstart jobs * add "boothook" user data type * Switch to dpkg-source 3.0 (quilt) format -- Scott Moser Fri, 18 Jun 2010 01:04:58 -0400 cloud-init (0.5.10-0ubuntu1) lucid; urgency=low * New upstream release. * ec2-get-info: fix for python traceback * ephemeral mount will show up in /etc/mtab or df on first boot LP: #527825 -- Scott Moser Fri, 26 Mar 2010 00:57:28 -0400 cloud-init (0.5.9-0ubuntu1) lucid; urgency=low * New upstream release. * rename apt list files. 'ubuntu-bug' now works without update (LP: #513060) * replace 'cloudconfig' entries in fstab rather than appending (LP: #524562) * fix to fstab writing on ebs-root instances -- Scott Moser Mon, 08 Mar 2010 13:07:02 -0500 cloud-init (0.5.8-0ubuntu1) lucid; urgency=low * New upstream release. * cache data from metadata service, LP: #527364 * fix format of cron entry in cron.d/cloudinit-updates * package egg-info file -- Scott Moser Tue, 02 Mar 2010 15:48:04 -0500 cloud-init (0.5.7-0ubuntu4) lucid; urgency=low * fix empty package previous package was emptpy for cloud-init due to adding the ec2-init package -- Scott Moser Fri, 26 Feb 2010 17:06:05 -0500 cloud-init (0.5.7-0ubuntu3) lucid; urgency=low * debian/control: - recommend ssh-import, such that the ssh-import-lp-id utility is available in UEC images for convenient importing of ssh public keys stored in Launchpad, LP: #524101 - build a transitional ec2-init package to handle the rename gracefully on upgrades, LP: #527187 -- Dustin Kirkland Thu, 25 Feb 2010 16:22:10 -0600 cloud-init (0.5.7-0ubuntu2) lucid; urgency=low * fix packaging but that put the message-of-the-day hook file into a subdir of etc/update-motd.d, remove old file (LP: #524999) -- Scott Moser Fri, 19 Feb 2010 21:02:10 -0500 cloud-init (0.5.7-0ubuntu1) lucid; urgency=low * New upstream release. * run cloud-init early in boot process (LP: #504883, #524516) -- Scott Moser Fri, 19 Feb 2010 18:27:45 -0500 cloud-init (0.5.6-0ubuntu1) lucid; urgency=low * New upstream release. * supports 'runcmd' in cloud-config * enable the update check code (LP: #524258) * fix retry_url in boto_utils.py when metadata service not around (LP: #523832) * run cloud-config-puppet.conf later (LP: #523625) [ Scott Moser 0.5.5 ] * New upstream release, supports checking for updates -- Scott Moser Fri, 19 Feb 2010 03:13:22 -0500 cloud-init (0.5.4-0ubuntu1) lucid; urgency=low * New upstream release. * fix broken user-data scripts * merge mathiaz work for cloud-config-puppet * fix bug causing apt update to fail * rename EC2Init class to CloudInit * only set hostname once per instance. (LP: #514492) -- Scott Moser Wed, 17 Feb 2010 09:40:30 -0500 cloud-init (0.5.3-0ubuntu2) lucid; urgency=low * divert ureadahead.conf in postinst (LP: #499520) * lintian cleanups -- Scott Moser Fri, 05 Feb 2010 15:48:21 -0500 cloud-init (0.5.3-0ubuntu1) lucid; urgency=low * Rename ec2-init to cloud-init. New upstream release. * set hostname to ip-u.x.y.z if local-hostname provides a ip addr (LP: #475354) -- Scott Moser Thu, 04 Feb 2010 03:00:05 -0500 ec2-init (0.5.2-0ubuntu1) lucid; urgency=low * new upstream release -- Scott Moser Fri, 29 Jan 2010 13:30:52 -0500 ec2-init (0.5.1-0ubuntu1) lucid; urgency=low * new upstream release -- Scott Moser Fri, 22 Jan 2010 16:19:30 -0500 ec2-init (0.5.0-0ubuntu4) lucid; urgency=low * add an upstart job to get ssh keys regenerated and written to console (LP: #506599, LP: #507070) -- Scott Moser Thu, 14 Jan 2010 13:10:55 -0500 ec2-init (0.5.0-0ubuntu3) lucid; urgency=low * work around difference in uec/ec2 metadata service (LP:506332) -- Scott Moser Tue, 12 Jan 2010 11:33:11 -0500 ec2-init (0.5.0-0ubuntu2) lucid; urgency=low * pull changes from devel branch to get functional on ec2 -- Scott Moser Mon, 11 Jan 2010 12:03:45 -0500 ec2-init (0.5.0-0ubuntu2) lucid; urgency=low * new upstream release -- Scott Moser Thu, 07 Jan 2010 22:00:38 -0500 ec2-init (0.4.999-0ubuntu8) lucid; urgency=low * fix mirror selection for us-west-1 (LP: #494185) -- Scott Moser Fri, 11 Dec 2009 15:12:19 -0500 ec2-init (0.4.999-0ubuntu7) karmic; urgency=low * work around differences in eucalyptus ephemeral mounts (LP: #458850) * get 'ec2:' prefix on ssh public key fingerprint (LP: #458576) -- Scott Moser Mon, 26 Oct 2009 16:18:06 -0400 ec2-init (0.4.999-0ubuntu6) karmic; urgency=low * make sources.list components for 'karmic-security' the same as 'karmic' and 'karmic-updates' (main, restricted) (LP: #457866) -- Scott Moser Thu, 22 Oct 2009 08:55:58 -0400 ec2-init (0.4.999-0ubuntu5) karmic; urgency=low * write regenerate_ssh_host_keys output directly to /dev/console to ensure that it gets there. (LP: #451881) -- Scott Moser Wed, 21 Oct 2009 17:23:38 -0400 ec2-init (0.4.999-0ubuntu4) karmic; urgency=low * set locale to en_US.UTF-8 if get_location_from_availability_zone doesn't have a match (LP: #407949) -- Scott Moser Tue, 20 Oct 2009 09:57:49 -0400 ec2-init (0.4.999-0ubuntu3) karmic; urgency=low * split running of user-data out of ec2-init into ec2-init-user-data run this at S99. (LP : #431255) -- Scott Moser Fri, 25 Sep 2009 14:17:17 -0400 ec2-init (0.4.999-0ubuntu2) karmic; urgency=low * remove rightscale-init from package (see LP: #434181, LP: #434693) * fix lintian warning, specify path to GPL-3 * replace multiple '| logger' in regenerate_ssh_host_keys single one * add ec2-is-compat-env, and disable init script by default. it can be enabled by setting 'compat=1' in /etc/ec2-init/is-compat-env -- Scott Moser Thu, 24 Sep 2009 16:32:42 -0400 ec2-init (0.4.999-0ubuntu1) karmic; urgency=low * New upstream release -- Soren Hansen Wed, 26 Aug 2009 01:23:52 +0200 ec2-init (0.4.99-0ubuntu3) karmic; urgency=low * Also update /etc/default/locale when setting the locale based on locality. -- Soren Hansen Tue, 11 Aug 2009 21:49:33 +0200 ec2-init (0.4.99-0ubuntu2) karmic; urgency=low * Consolidate build-dependencies a bit. * Sync default configuration with code. * Create /var/lib/ec2 in package. * Make ec2-get-info more robust in testing environments. * Handle missing public keys more gracefully. * Set proper ownership of user's authorized_keys. -- Soren Hansen Tue, 11 Aug 2009 09:54:16 +0200 ec2-init (0.4.99-0ubuntu1) karmic; urgency=low * Massive rewrite. (LP: #407871, #407919, #308530, #407949, #407950, #407892 and probably many others) * First Ubuntu version with the upstream tarball split out. * Switch to arch: all. There is no arch specific code here. -- Soren Hansen Tue, 11 Aug 2009 08:33:33 +0200 ec2-init (0.3.4ubuntu9) karmic; urgency=low * Really include the action id in the semaphore filename. -- Soren Hansen Sat, 11 Jul 2009 09:50:31 +0200 ec2-init (0.3.4ubuntu8) karmic; urgency=low * Add Vcs-Bzr header to debian/control. * Include the action id in the semaphore filename. -- Soren Hansen Sat, 11 Jul 2009 02:35:43 +0200 ec2-init (0.3.4ubuntu7) karmic; urgency=low * Re-add accidentally removed locale template. * Fix indentation in init script. -- Soren Hansen Fri, 10 Jul 2009 23:47:27 +0200 ec2-init (0.3.4ubuntu6) karmic; urgency=low * Replace calls to ec2-get-data with calls to ec2-get-info. * Make this package arch: all. -- Soren Hansen Fri, 10 Jul 2009 23:16:35 +0200 ec2-init (0.3.4ubuntu5) karmic; urgency=low * debian/init: Move instance reboot detection out of python scripts and move them to the init script. * debian/ec2-set-defaults.py: - Point to the right location for locate (LP: #387611) - Default to UTC (LP: #341060) * debian/ec2-set-apt-sources.py: If you cant contact EC2 then use the regular archive (LP: #387027) * debian/ec2-setup-hostname.py: Use the local hostname in /etc/hosts and dont change once rebooted. (LP: #352745) -- Chuck Short Wed, 08 Jul 2009 09:48:49 -0400 ec2-init (0.3.4ubuntu4) karmic; urgency=low * debian/init: Run update-motd regardless whether its a firstboot or not. * debian/init: Fix comments (LP: #373057) * debian/control: Add update-motd as a depends. * ec2-set-defaults.py: Wait for network to become available. (LP: #308530) -- Chuck Short Thu, 28 May 2009 05:04:31 -0400 ec2-init (0.3.4ubuntu3) karmic; urgency=low * debian/init: Move init script to run before ssh and regenerate the ssh host kes in the init script rather than /etc/rc.local (LP: #370628) * ec2-set-apt-sources.py: - Move sources.list to /var/ec2 so it doesnt get removed after user reboots. * ec2-set-defaults.py: - Move locale to /var/ec2/ so it doesnt get remove after user reboots. * ec2-set-hostname.py - Create an /etc/hostname as well. -- Chuck Short Thu, 14 May 2009 11:11:49 -0400 ec2-init (0.3.4ubuntu2) karmic; urgency=low * Really change the locale when setting up an instance. (LP: #341066) * Run ec2-run-user-data script last. (LP: #373055) * Minor comment tweaks. (LP: #373057) -- Chuck Short Wed, 13 May 2009 13:41:35 -0400 ec2-init (0.3.4ubuntu1) karmic; urgency=low * Add more smarts to ec2 instance bring up. (LP: #371936) -- Chuck Short Tue, 05 May 2009 08:59:54 -0400 ec2-init (0.3.3ubuntu12) jaunty; urgency=low * ec2-run-user-data.py: Fix error. -- Chuck Short Tue, 07 Apr 2009 08:14:07 -0400 ec2-init (0.3.3ubuntu11) jaunty; urgency=low * debian/control: - Add python-cheetah and python-apt as a dependency. * debian/ec2-config.cfg: - Remove distro due to the change in ec2-set-apt-sources.py * debian/inistall - Install the templates in the right place. * ec2-set-apt-sources.py: - Use python-apt to update the sources.list. -- Chuck Short Wed, 01 Apr 2009 13:58:43 -0400 ec2-init (0.3.3ubuntu10) jaunty; urgency=low * ec2-set-hostname.py: - Use template for /etc/hosts creation. - Dont use public_hostname in /etc/hosts. (LP: #352745) -- Chuck Short Wed, 01 Apr 2009 08:48:05 -0400 ec2-init (0.3.3ubuntu9) jaunty; urgency=low * ec2-set-apt-sources.py: - Use a template to generate the sources.list and generate it based on the lsb_release. -- Chuck Short Tue, 31 Mar 2009 15:15:55 -0400 ec2-init (0.3.3ubuntu8) jaunty; urgency=low * ec2-set-apt-sources.py: - Add the ubuntu-on-ec2 ppa. -- Chuck Short Tue, 31 Mar 2009 09:37:13 -0400 ec2-init (0.3.3ubuntu7) jaunty; urgency=low * debian/rules: Fix typo. -- Chuck Short Sun, 22 Mar 2009 17:14:16 -0400 ec2-init (0.3.3ubuntu6) jaunty; urgency=low * Set the configuration file to jaunty. * ec2-fetch-credentials: Fix typo. * ec2-set-defaults.py: - Remove timezone change when booting the instance. - Redirect output to /dev/null. * ec2-set-apt-sources.py: - Run apt-get update after the /etc/apt/sources.list and redirect the output to /dev/null. * rightscale-init: Updated rightscale-init -- Chuck Short Thu, 19 Mar 2009 20:52:59 -0400 ec2-init (0.3.3ubuntu5) jaunty; urgency=low * debian/ec2-config.cfg: - Add disable root option. * debian/ec2-init.rightscale-init.init: - Add rightscale detection script. * ec2-get-info.py: - Display the information about an AMI instance. -- Chuck Short Mon, 16 Mar 2009 08:54:49 -0400 ec2-init (0.3.3ubuntu4) jaunty; urgency=low * ec2-fetch-credentials.py: - Allow user to choose which user they wish to configure for. - Allow user to disable root user if they wish to. * ec2-set-defaults.py: - Set default timezone to UTC. - Set locale depending on zone. * debian/init: - Removed nash plugin. - Add ec2-set-defaults. -- Chuck Short Wed, 04 Mar 2009 08:33:01 -0500 ec2-init (0.3.3ubuntu3~intrepid4) intrepid; urgency=low * set distro to intrepid. -- Chuck Short Thu, 26 Feb 2009 10:28:06 -0500 ec2-init (0.3.3ubuntu3) jaunty; urgency=low * debian/ec2-init: Log results of ec2-run-user-data to syslog. * ec2-run-user-data.py :Dont leave files around and log the output to syslog. -- Chuck Short Thu, 26 Feb 2009 10:24:35 -0500 ec2-init (0.3.3ubuntu2) jaunty; urgency=low * ec2-set-apt-sources.py: - Use the ec2 mirrors. (LP: #317065, #333897) - Update the /etc/apt/sources.list (LP: #333904) * ec2-fetch-credentials.py: - Better error checking (LP: #325067) -- Chuck Short Tue, 24 Feb 2009 14:02:37 -0500 ec2-init (0.3.3ubuntu1) jaunty; urgency=low * debian/init: Fix init script. -- Chuck Short Fri, 20 Feb 2009 09:22:54 -0500 ec2-init (0.3.3) jaunty; urgency=low * ec2-set-apt-sources.py - Determine the zone that the user is in and generate a /etc/apt/sources.list.d/ based on that. * debian/init: - Check to see if there is an /var/run/ec2 and create it if it doesnt exist. - Start ec2-set-apt-sources at first bootup. * debian/rules: - Install ec2-set-apt-sources. * debian/control: - Add python-configobj as a dependency. * debian/{install,dirs} - Create an /etc/ec2-init to read the configuration file and install it. -- Chuck Short Mon, 09 Feb 2009 10:35:56 -0500 ec2-init (0.3.2) jaunty; urgency=low * debian/init: - Remove already ran detection - Log the running of ec2-run-user-data to /var/log/ec2-user-data.log * ec2-set-hostname.py: - set hostname to the Ec2 local-hostname - Update the /etc/hosts to change the ubuntu hostname to the public hostname. * ec2-fetch-credentials: - Copy the ssh keys to the ubuntu user. - Setup authorized keys for root to tell the user to login as the ubuntu user when they try to connect. * ec2-run-user-data: - Create an .already-ran file to check to see if ec2-run-user-data already ran. - Save the ec2-run-user-data script in /var/ec2. -- Chuck Short Wed, 04 Feb 2009 09:32:08 -0500 ec2-init (0.3.1) jaunty; urgency=low * debian/dir: Install /var/ec2 to save user-data scripts. * debian/rules: Start ec2-init after ssh. * ec2-run-user-data.py: Save run-user-data script with ami-id. -- Chuck Short Mon, 26 Jan 2009 10:40:52 -0500 ec2-init (0.3) jaunty; urgency=low * ec2-run-user-data: Fix python error when writing a file to the disk. -- Chuck Short Thu, 15 Jan 2009 11:49:08 -0500 ec2-init (0.2) jaunty; urgency=low * debian/init: Run fetch-credentials before anything else. (LP: #308533) * Add ec2-set-hostname.py: Queries ec2 metdada for public-hostname and then sets it (LP: #316201) -- Chuck Short Tue, 13 Jan 2009 15:20:21 -0500 ec2-init (0.1) intrepid; urgency=low * Initial release (LP: #269434). -- Soren Hansen Fri, 12 Sep 2008 15:30:32 +0200 debian/grub-legacy-ec2.lintian-overrides0000664000000000000000000000046612574667652015452 0ustar # no-debconf-config and debconf-is-not-a-registry lintian errors # are present in the ubuntu grub package that these were pulled from. # they're due to the use of debconf for merging prompt in # update-grub-legacy-ec2 grub-legacy-ec2 binary: no-debconf-config grub-legacy-ec2 binary: debconf-is-not-a-registry debian/grub-set-default-legacy-ec20000664000000000000000000000620012574667652014220 0ustar #! /bin/sh # Set a default boot entry for GRUB # Copyright (C) 2004 Free Software Foundation, Inc. # # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # Initialize some variables. PACKAGE=grub-legacy-ec2 VERSION=0.97-29ubuntu60 rootdir= entry= # Usage: usage # Print the usage. usage () { cat <. EOF } # Check the arguments. for option in "$@"; do case "$option" in -h | --help) usage exit 0 ;; -v | --version) echo "grub-set-default (GNU GRUB ${VERSION})" exit 0 ;; --root-directory=*) rootdir=`echo "$option" | sed 's/--root-directory=//'` ;; -*) echo "Unrecognized option \`$option'" 1>&2 usage exit 1 ;; *) if test "x$entry" != x; then echo "More than one entries?" 1>&2 usage exit 1 fi # We don't care about what the user specified actually. entry="${option}" ;; esac done if test "x$entry" = x; then echo "entry not specified." 1>&2 usage exit 1 fi find_grub_dir () { echo -n "Searching for GRUB installation directory ... " >&2 for d in $grub_dirs ; do if [ -d "$d" ] ; then grub_dir="$d" break fi done if [ -z "$grub_dir" ] ; then abort "No GRUB directory found.\n###" else echo "found: $grub_dir" >&2 fi echo $grub_dir } grub_dirs="/boot/grub /boot/boot/grub" # Determine the GRUB directory. This is different among OSes. # if rootdir has been informed use it or find grubdir otherwise if [ -n "${rootdir}" ]; then grubdir=${rootdir}/boot/grub if test -d ${grubdir}; then : else grubdir=${rootdir}/grub if test -d ${grubdir}; then : else echo "No GRUB directory found under ${rootdir}/" 1>&2 exit 1 fi fi else grubdir=$(find_grub_dir) fi file=${grubdir}/default if test -f ${file}; then chmod 0600 ${file} rm -f ${file} fi cat < $file $entry # # # # # # # # # # # WARNING: If you want to edit this file directly, do not remove any line # from this file, including this warning. Using \`grub-set-default\' is # strongly recommended. EOF # Bye. exit 0 debian/grub-legacy-ec2.templates0000664000000000000000000000137612574667652014013 0ustar Template: grub/update_grub_changeprompt_threeway Type: select # Translators, please keep translations *short* (less than 65 columns) __Choices: install the package maintainer's version, keep the local version currently installed, show the differences between the versions, show a side-by-side difference between the versions, show a 3-way difference between available versions, do a 3-way merge between available versions (experimental), start a new shell to examine the situation Choices-C: install_new, keep_current, diff, sdiff, diff_threeway, merge_threeway, shell Default: keep_current _Description: What would you like to do about ${BASENAME}? A new version of /boot/grub/menu.lst is available, but the version installed currently has been locally modified. debian/cloud-init.config0000664000000000000000000000512712574667652012457 0ustar #!/bin/sh -e . /usr/share/debconf/confmodule hasEc2Md() { local d=/var/lib/cloud i=/var/lib/cloud/instance/ f="" local flist="${i}/datasource ${i}/obj.pkl ${d}/cache/obj.pkl" # search current instance data source information for f in ${flist}; do [ -f "${f}" ] || continue grep -q DataSourceEc2 "${f}" 2>/dev/null && return 0 done # there was no data above found that would have been indicated # by a upgrade. lets just see if we can't find the metadata # service. if wget is not present (it is not 'essential') # then we will quietly return 1 local url="http://169.254.169.254/2009-04-04/meta-data/instance-id" i="" if command -v wget >/dev/null 2>&1; then local tout="--connect-timeout 5 --read-timeout 5 --tries 1" i=$(wget "${url}" ${tout} -O - 2>/dev/null) || : elif command -v curl >/dev/null 2>&1; then i=$(curl "${url}" --max-time 1) || : fi # if instance-id starts with 'i-' then in all likelyhood its good [ "${i#i-}" != "${i}" ] && return 0 return 1 } get_yaml_list() { # get_yaml_list(file, key, def): return a comma delimited list with the value # for the yaml array defined in 'key' from 'file'. if not found , return 'def' # only really supports 'key: [en1, en2 ]' format. local file="$1" key="$2" default="$3" [ -f "$file" ] || return 1 # any thing that didn't match the key is deleted so the final 'p' only # prints things that matched. RET=$(sed -n -e "/^$key:/"'!'d -e "s/$key:[ \[]*//"\ -e "s, \]$,," -e p "$file") [ -n "$RET" ] || RET="$default" } # old_dpkg_cfg is very old file that is no longer read by cloud-init. # it gets re-named to cloud.cfg.d/90_dpkg.cfg in the preinst. dpkg_cfg="/etc/cloud/cloud.cfg.d/90_dpkg.cfg" old_dpkg_cfg="/etc/cloud/distro.cfg" if [ -f "${old_dpkg_cfg}" -a ! -f "$dpkg_cfg" ]; then dpkg_cfg="${old_dpkg_cfg}" echo "WARN: reading value from ${old_dpkg_cfg}" 1>&2 fi if [ -f "$dpkg_cfg" ]; then if get_yaml_list "$dpkg_cfg" datasource_list NOTFOUND && val="$RET" && [ "$val" != "NOTFOUND" ]; then db_set cloud-init/datasources $val else echo "WARN: failed to read datasource_list from $dpkg_cfg" 1>&2 fi elif { db_fget cloud-init/datasources seen || : ; } && [ "${RET}" = "false" ]; then # this is the first time this we've run (installation or re-install after # purge). try to determine if the Ec2 datasource is there. # if it is, and Ec2 was not in the default list, then add it. db_get cloud-init/datasources def="${RET}" case " ${def}," in *\ Ec2,*) :;; *) hasEc2Md && db_set cloud-init/datasources "${def:+${def}, }Ec2";; esac fi db_input low cloud-init/datasources || true db_go exit 0 # vi: ts=4 noexpandtab debian/cherry-pick-rev0000775000000000000000000000173112574667652012156 0ustar #!/bin/sh Usage() { cat <&2; exit 1; } [ "$1" = "--help" -o "$1" = "-h" ] && { Usage; exit 0; } repo=${1} revno=${2} name=${3} name=${name%.patch} name=${name%.diff} fname="${revno}-${name}.patch" ( cd "${repo}" && bzr log -r${revno}..${revno} && bzr diff -p1 -r$((${revno}-1))..${revno} ) | filterdiff --exclude "*/ChangeLog" | quilt import -P "${fname}" /dev/stdin [ $? -eq 0 ] || { echo "failed"; exit 1; } cat </dev/null`" ]; then ucf --purge /var/run/grub/menu.lst fi if [ -x "`which ucfr 2>/dev/null`" ]; then ucfr --purge grub /var/run/grub/menu.lst fi fi if [ "$1" = "remove" ]; then dpkg-divert --package grub-legacy-ec2 --remove --rename --divert \ /usr/sbin/grub-set-default.real /usr/sbin/grub-set-default fi #DEBHELPER# debian/cloud-init.postrm0000664000000000000000000000061012574667652012526 0ustar #!/bin/sh set -e case "$1" in purge) rm -f /etc/cloud/cloud.cfg.d/90_dpkg.cfg rm -f /etc/apt/apt.conf.d/90cloud-init-pipelining ;; remove) dpkg-divert --package cloud-init --remove --rename --divert \ /etc/init/ureadahead.conf.disabled /etc/init/ureadahead.conf rm -f /etc/cloud/cloud.cfg.d/90cloud-init-pipelining ;; esac #DEBHELPER# debian/patches/0000775000000000000000000000000012666056267010637 5ustar debian/patches/lp-1456684-eu-central-1.patch0000664000000000000000000000162112574667652015331 0ustar Description: Add central as a direction for EC2 AZs Author: Scott Moser Origin: upstream, http://bazaar.launchpad.net/~cloud-init-dev/cloud-init/trunk/revision/1108 Bug: https://bugs.launchpad.net/cloud-init/+bug/1456684 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -534,8 +534,12 @@ if not mirror_info: mirror_info = {} - ec2_az_re = ("^[a-z][a-z]-(%s)-[1-9][0-9]*[a-z]$" % - "north|northeast|east|southeast|south|southwest|west|northwest") + # ec2 availability zones are named cc-direction-[0-9][a-d] (us-east-1b) + # the region is us-east-1. so region = az[0:-1] + directions_re = '|'.join([ + 'central', 'east', 'north', 'northeast', 'northwest', + 'south', 'southeast', 'southwest', 'west']) + ec2_az_re = ("^[a-z][a-z]-(%s)-[1-9][0-9]*[a-z]$" % directions_re) subst = {} if availability_zone: debian/patches/lp-1506244-azure-ssh-key-values.patch0000664000000000000000000001433712622663112017111 0ustar Description: AZURE: add support/preference for SSH public key values Azure has started to support SSH public key values in addition to SSH public key fingerprints. Per MS, this patch prefers fabric provided values instead of fingerprints. Author: Ben Howard Origin: upstream, http://bazaar.launchpad.net/~cloud-init-dev/cloud-init/trunk/revision/1149 Bug: https://bugs.launchpad.net/cloud-init/+bug/1506244 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -148,9 +148,15 @@ wait_for = [shcfgxml] fp_files = [] + key_value = None for pk in self.cfg.get('_pubkeys', []): - bname = str(pk['fingerprint'] + ".crt") - fp_files += [os.path.join(ddir, bname)] + if pk.get('value', None): + key_value = pk['value'] + LOG.debug("ssh authentication: using value from fabric") + else: + bname = str(pk['fingerprint'] + ".crt") + fp_files += [os.path.join(ddir, bname)] + LOG.debug("ssh authentication: using fingerprint from fabirc") missing = util.log_time(logfunc=LOG.debug, msg="waiting for files", func=wait_for_files, @@ -166,7 +172,8 @@ metadata['instance-id'] = iid_from_shared_config(shcfgxml) except ValueError as e: LOG.warn("failed to get instance id in %s: %s", shcfgxml, e) - metadata['public-keys'] = pubkeys_from_crt_files(fp_files) + + metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files) return metadata def get_data(self): @@ -432,7 +439,7 @@ elem.text != DEF_PASSWD_REDACTION): elem.text = DEF_PASSWD_REDACTION return ET.tostring(root) - except Exception as e: + except Exception: LOG.critical("failed to redact userpassword in {}".format(fname)) return cnt @@ -496,7 +503,8 @@ for pk_node in pubkeys: if not pk_node.hasChildNodes(): continue - cur = {'fingerprint': "", 'path': ""} + + cur = {'fingerprint': "", 'path': "", 'value': ""} for child in pk_node.childNodes: if child.nodeType == text_node or not child.localName: continue --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -58,10 +58,13 @@ if pubkeys: content += "\n" - for fp, path in pubkeys: + for fp, path, value in pubkeys: content += " " - content += ("%s%s" % - (fp, path)) + if fp and path: + content += ("%s%s" % + (fp, path)) + if value: + content += "%s" % value content += "\n" content += "" content += """ @@ -173,7 +176,7 @@ def xml_notequals(self, oxml, nxml): try: self.xml_equals(oxml, nxml) - except AssertionError as e: + except AssertionError: return raise AssertionError("XML is the same") @@ -286,10 +289,10 @@ self.assertFalse(ret) self.assertFalse('agent_invoked' in data) - def test_cfg_has_pubkeys(self): + def test_cfg_has_pubkeys_fingerprint(self): odata = {'HostName': "myhost", 'UserName': "myuser"} - mypklist = [{'fingerprint': 'fp1', 'path': 'path1'}] - pubkeys = [(x['fingerprint'], x['path']) for x in mypklist] + mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}] + pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist] data = {'ovfcontent': construct_valid_ovf_env(data=odata, pubkeys=pubkeys)} @@ -298,6 +301,39 @@ self.assertTrue(ret) for mypk in mypklist: self.assertIn(mypk, dsrc.cfg['_pubkeys']) + self.assertIn('pubkey_from', dsrc.metadata['public-keys'][-1]) + + def test_cfg_has_pubkeys_value(self): + # make sure that provided key is used over fingerprint + odata = {'HostName': "myhost", 'UserName': "myuser"} + mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': 'value1'}] + pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist] + data = {'ovfcontent': construct_valid_ovf_env(data=odata, + pubkeys=pubkeys)} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + + for mypk in mypklist: + self.assertIn(mypk, dsrc.cfg['_pubkeys']) + self.assertIn(mypk['value'], dsrc.metadata['public-keys']) + + def test_cfg_has_no_fingerprint_has_value(self): + # test value is used when fingerprint not provided + odata = {'HostName': "myhost", 'UserName': "myuser"} + mypklist = [{'fingerprint': None, 'path': 'path1', 'value': 'value1'}] + pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist] + data = {'ovfcontent': construct_valid_ovf_env(data=odata, + pubkeys=pubkeys)} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + + for mypk in mypklist: + self.assertIn(mypk['value'], dsrc.metadata['public-keys']) + def test_disabled_bounce(self): pass @@ -439,8 +475,8 @@ DataSourceAzure.read_azure_ovf, invalid_xml) def test_load_with_pubkeys(self): - mypklist = [{'fingerprint': 'fp1', 'path': 'path1'}] - pubkeys = [(x['fingerprint'], x['path']) for x in mypklist] + mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}] + pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist] content = construct_valid_ovf_env(pubkeys=pubkeys) (_md, _ud, cfg) = DataSourceAzure.read_azure_ovf(content) for mypk in mypklist: debian/patches/lp-1490796-azure-fix-mount_cb-for-symlinks.patch0000664000000000000000000000125312574667652021310 0ustar Description: Handle symlinks as devices in mount_cb Author: Daniel Watkins Origin: upstream, http://bazaar.launchpad.net/~cloud-init-dev/cloud-init/trunk/revision/1139 Bug: https://bugs.launchpad.net/cloud-init/+bug/1490796 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1352,8 +1352,8 @@ mounted = mounts() with tempdir() as tmpd: umount = False - if device in mounted: - mountpoint = mounted[device]['mountpoint'] + if os.path.realpath(device) in mounted: + mountpoint = mounted[os.path.realpath(device)]['mountpoint'] else: try: mountcmd = ['mount'] debian/patches/lp-1336855-grub_xvda.patch0000664000000000000000000000151012574667652015107 0ustar Description: consider xvda devices for grub-pc configuration Cloud-init previously did not consider /dev/xvda and /dev/xvda1 for setting as the grub device. Subsequently, unattended updates with grub may cause the instance to be unusable. Author: Ben Howard Bug: https://bugs.launchpad.net/bugs/1336855 Forwarded: yes --- a/cloudinit/config/cc_grub_dpkg.py +++ b/cloudinit/config/cc_grub_dpkg.py @@ -46,7 +46,8 @@ idevs_empty = "false" if idevs is None: idevs = "/dev/sda" - for dev in ("/dev/sda", "/dev/vda", "/dev/sda1", "/dev/vda1"): + for dev in ("/dev/sda", "/dev/vda", "/dev/xvda", + "/dev/xvda1", "/dev/sda1", "/dev/vda1"): if os.path.exists(dev): idevs = dev break debian/patches/lp1316475-1303986-cloudsigma.patch0000664000000000000000000001102212574667652016026 0ustar Author: Ben Howard Bug: https://launchpad.net/bugs/1316475 Bug: https://launchpad.net/bugs/1303986 Applied-Upstream: yes Description: Backport of 14.10 CloudSigma datasource Only use /dev/ttys1 for CloudSigma if on CloudSigma Added Vendordata support for CloudSigma datasource Updated test suite for CloudSigma datasource --- a/cloudinit/sources/DataSourceCloudSigma.py +++ b/cloudinit/sources/DataSourceCloudSigma.py @@ -16,10 +16,12 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . from base64 import b64decode +import os import re from cloudinit import log as logging from cloudinit import sources +from cloudinit import util from cloudinit.cs_utils import Cepko LOG = logging.getLogger(__name__) @@ -40,12 +42,40 @@ self.ssh_public_key = '' sources.DataSource.__init__(self, sys_cfg, distro, paths) + def is_running_in_cloudsigma(self): + """ + Uses dmidecode to detect if this instance of cloud-init is running + in the CloudSigma's infrastructure. + """ + uname_arch = os.uname()[4] + if uname_arch.startswith("arm") or uname_arch == "aarch64": + # Disabling because dmidecode in CMD_DMI_SYSTEM crashes kvm process + LOG.debug("Disabling CloudSigma datasource on arm (LP: #1243287)") + return False + + dmidecode_path = util.which('dmidecode') + if not dmidecode_path: + return False + + LOG.debug("Determining hypervisor product name via dmidecode") + try: + cmd = [dmidecode_path, "--string", "system-product-name"] + system_product_name, _ = util.subp(cmd) + return 'cloudsigma' in system_product_name.lower() + except: + LOG.warn("Failed to get hypervisor product name via dmidecode") + + return False + def get_data(self): """ Metadata is the whole server context and /meta/cloud-config is used as userdata. """ dsmode = None + if not self.is_running_in_cloudsigma(): + return False + try: server_context = self.cepko.all().result server_meta = server_context['meta'] @@ -66,6 +96,8 @@ self.userdata_raw = server_meta.get('cloudinit-user-data', "") if 'cloudinit-user-data' in base64_fields: self.userdata_raw = b64decode(self.userdata_raw) + if 'cloudinit' in server_context.get('vendor_data', {}): + self.vendordata_raw = server_context["vendor_data"]["cloudinit"] self.metadata = server_context self.ssh_public_key = server_meta['ssh_public_key'] --- a/tests/unittests/test_datasource/test_cloudsigma.py +++ b/tests/unittests/test_datasource/test_cloudsigma.py @@ -20,7 +20,11 @@ "smp": 1, "tags": ["much server", "very performance"], "uuid": "65b2fb23-8c03-4187-a3ba-8b7c919e8890", - "vnc_password": "9e84d6cb49e46379" + "vnc_password": "9e84d6cb49e46379", + "vendor_data": { + "location": "zrh", + "cloudinit": "#cloud-config\n\n...", + } } @@ -35,6 +39,7 @@ class DataSourceCloudSigmaTest(TestCase): def setUp(self): self.datasource = DataSourceCloudSigma.DataSourceCloudSigma("", "", "") + self.datasource.is_running_in_cloudsigma = lambda: True self.datasource.cepko = CepkoMock(SERVER_CONTEXT) self.datasource.get_data() @@ -68,3 +73,25 @@ self.datasource.get_data() self.assertEqual(self.datasource.userdata_raw, b'hi world\n') + + def test_vendor_data(self): + self.assertEqual(self.datasource.vendordata_raw, + SERVER_CONTEXT['vendor_data']['cloudinit']) + + def test_lack_of_vendor_data(self): + stripped_context = copy.deepcopy(SERVER_CONTEXT) + del stripped_context["vendor_data"] + self.datasource = DataSourceCloudSigma.DataSourceCloudSigma("", "", "") + self.datasource.cepko = CepkoMock(stripped_context) + self.datasource.get_data() + + self.assertIsNone(self.datasource.vendordata_raw) + + def test_lack_of_cloudinit_key_in_vendor_data(self): + stripped_context = copy.deepcopy(SERVER_CONTEXT) + del stripped_context["vendor_data"]["cloudinit"] + self.datasource = DataSourceCloudSigma.DataSourceCloudSigma("", "", "") + self.datasource.cepko = CepkoMock(stripped_context) + self.datasource.get_data() + + self.assertIsNone(self.datasource.vendordata_raw) debian/patches/lp-1493453-nocloudds-vendor_data.patch0000664000000000000000000000126512600072750017364 0ustar Description: Fix vendor_data assignment for NoCloud Datasource Author: Ben Howard Bug-Ubuntu: https://bugs.launchpad.net/bugs/1493453 Forwarded: https://bugs.launchpad.net/cloud-init/+bug/1493453 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -190,7 +190,7 @@ self.seed = ",".join(found) self.metadata = mydata['meta-data'] self.userdata_raw = mydata['user-data'] - self.vendordata = mydata['vendor-data'] + self.vendordata_raw = mydata['vendor-data'] return True LOG.debug("%s: not claiming datasource, dsmode=%s", self, md['dsmode']) debian/patches/series0000664000000000000000000000173512666056267012062 0ustar lp1316475-1303986-cloudsigma.patch lp-1353008-cloud-init-local-needs-run.conf lp-1336855-grub_xvda.patch lp-1383794-gce-short_name.patch lp-1404311-gce-data_encoding.patch lp-1422919-azure-g5_ephemeral.patch lp-1422388-cloudstack-passwords.patch lp-1356855-fix-cloudstack-metadata.patch lp-1375252-1458052-Azure-hostname_password.patch lp-1456684-eu-central-1.patch -p1 lp-1464253-handle-new-cloudstack-passwords.patch lp-1411582-azure-udev-ephemeral-disks.patch lp-1470880-fix-gce-az-determination.patch lp-1470890-include-regions-in-dynamic-mirror-discovery.patch lp-1490796-azure-fix-mount_cb-for-symlinks.patch lp-1469260-fix-consumption-of-vendor-data.patch lp-1461242-generate-ed25519-host-keys.patch lp-1493453-nocloudds-vendor_data.patch lp-1177432-same-archives-as-ubuntu-server.patch lp-1506244-azure-ssh-key-values.patch lp-1506187-azure_use_unique_vm_id.patch lp-1540965-SmartOS-Add-support-for-Joyent-LX-Brand-Zones.patch lp-1551419-azure-handle-flipped-uuid-endianness.patch debian/patches/lp-1464253-handle-new-cloudstack-passwords.patch0000664000000000000000000000524612574667652021331 0ustar Description: Use wget to fetch CloudStack passwords. Author: Daniel Watkins Origin: upstream, http://bazaar.launchpad.net/~cloud-init-dev/cloud-init/trunk/revision/1118 Bug: https://bugs.launchpad.net/cloud-init/+bug/1464253 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -27,8 +27,6 @@ import os import time -from six.moves import http_client - from cloudinit import ec2_utils as ec2 from cloudinit import log as logging from cloudinit import sources @@ -48,35 +46,22 @@ has documentation about the system. This implementation is following that found at https://github.com/shankerbalan/cloudstack-scripts/blob/master/cloud-set-guest-password-debian - - The CloudStack password server is, essentially, a broken HTTP - server. It requires us to provide a valid HTTP request (including a - DomU_Request header, which is the meat of the request), but just - writes the text of its response on to the socket, without a status - line or any HTTP headers. This makes HTTP libraries sad, which - explains the screwiness of the implementation of this class. - - This should be fixed in CloudStack by commit - a72f14ea9cb832faaac946b3cf9f56856b50142a in December 2014. """ def __init__(self, virtual_router_address): self.virtual_router_address = virtual_router_address def _do_request(self, domu_request): - # We have to provide a valid HTTP request, but a valid HTTP - # response is not returned. This means that getresponse() chokes, - # so we use the socket directly to read off the response. - # Because we're reading off the socket directly, we can't re-use the - # connection. - conn = http_client.HTTPConnection(self.virtual_router_address, 8080) - try: - conn.request('GET', '', headers={'DomU_Request': domu_request}) - conn.sock.settimeout(30) - output = conn.sock.recv(1024).decode('utf-8').strip() - finally: - conn.close() - return output + # The password server was in the past, a broken HTTP server, but is now + # fixed. wget handles this seamlessly, so it's easier to shell out to + # that rather than write our own handling code. + output, _ = util.subp([ + 'wget', '--quiet', '--tries', '3', '--timeout', '20', + '--output-document', '-', '--header', + 'DomU_Request: {0}'.format(domu_request), + '{0}:8080'.format(self.virtual_router_address) + ]) + return output.strip() def get_password(self): password = self._do_request('send_my_password') debian/patches/lp-1411582-azure-udev-ephemeral-disks.patch0000664000000000000000000003571012574667652020270 0ustar Description: Use udev rules to find Azure ephemeral disks Author: Daniel Watkins Origin: upstream, http://bazaar.launchpad.net/~cloud-init-dev/cloud-init/trunk/revision/1127 Bug: https://bugs.launchpad.net/cloud-init/+bug/1411582 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -649,6 +649,8 @@ table_type: Which partition table to use, defaults to MBR device: the device to work on. """ + # ensure that we get a real device rather than a symbolic link + device = os.path.realpath(device) LOG.debug("Checking values for %s definition" % device) overwrite = definition.get('overwrite', False) @@ -746,6 +748,9 @@ fs_replace = fs_cfg.get('replace_fs', False) overwrite = fs_cfg.get('overwrite', False) + # ensure that we get a real device rather than a symbolic link + device = os.path.realpath(device) + # This allows you to define the default ephemeral or swap LOG.debug("Checking %s against default devices", device) --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -28,15 +28,15 @@ from cloudinit import util # Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1, sr0 -SHORTNAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$" -SHORTNAME = re.compile(SHORTNAME_FILTER) +DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$" +DEVICE_NAME_RE = re.compile(DEVICE_NAME_FILTER) WS = re.compile("[%s]+" % (whitespace)) FSTAB_PATH = "/etc/fstab" LOG = logging.getLogger(__name__) -def is_mdname(name): +def is_meta_device_name(name): # return true if this is a metadata service name if name in ["ami", "root", "swap"]: return True @@ -48,6 +48,25 @@ return False +def _get_nth_partition_for_device(device_path, partition_number): + potential_suffixes = [str(partition_number), 'p%s' % (partition_number,), + '-part%s' % (partition_number,)] + for suffix in potential_suffixes: + potential_partition_device = '%s%s' % (device_path, suffix) + if os.path.exists(potential_partition_device): + return potential_partition_device + return None + + +def _is_block_device(device_path, partition_path=None): + device_name = os.path.realpath(device_path).split('/')[-1] + sys_path = os.path.join('/sys/block/', device_name) + if partition_path is not None: + sys_path = os.path.join( + sys_path, os.path.realpath(partition_path).split('/')[-1]) + return os.path.exists(sys_path) + + def sanitize_devname(startname, transformer, log): log.debug("Attempting to determine the real name of %s", startname) @@ -58,21 +77,34 @@ devname = "ephemeral0" log.debug("Adjusted mount option from ephemeral to ephemeral0") - (blockdev, part) = util.expand_dotted_devname(devname) + device_path, partition_number = util.expand_dotted_devname(devname) - if is_mdname(blockdev): - orig = blockdev - blockdev = transformer(blockdev) - if not blockdev: + if is_meta_device_name(device_path): + orig = device_path + device_path = transformer(device_path) + if not device_path: return None - if not blockdev.startswith("/"): - blockdev = "/dev/%s" % blockdev - log.debug("Mapped metadata name %s to %s", orig, blockdev) + if not device_path.startswith("/"): + device_path = "/dev/%s" % (device_path,) + log.debug("Mapped metadata name %s to %s", orig, device_path) else: - if SHORTNAME.match(startname): - blockdev = "/dev/%s" % blockdev + if DEVICE_NAME_RE.match(startname): + device_path = "/dev/%s" % (device_path,) - return devnode_for_dev_part(blockdev, part) + partition_path = None + if partition_number is None: + partition_path = _get_nth_partition_for_device(device_path, 1) + else: + partition_path = _get_nth_partition_for_device(device_path, + partition_number) + if partition_path is None: + return None + + if _is_block_device(device_path, partition_path): + if partition_path is not None: + return partition_path + return device_path + return None def handle(_name, cfg, cloud, log, _args): @@ -209,49 +241,3 @@ util.subp(("mount", "-a")) except: util.logexc(log, "Activating mounts via 'mount -a' failed") - - -def devnode_for_dev_part(device, partition): - """ - Find the name of the partition. While this might seem rather - straight forward, its not since some devices are '' - while others are 'p'. For example, /dev/xvda3 on EC2 - will present as /dev/xvda3p1 for the first partition since /dev/xvda3 is - a block device. - """ - if not os.path.exists(device): - return None - - short_name = os.path.basename(device) - sys_path = "/sys/block/%s" % short_name - - if not os.path.exists(sys_path): - LOG.debug("did not find entry for %s in /sys/block", short_name) - return None - - sys_long_path = sys_path + "/" + short_name - - if partition is not None: - partition = str(partition) - - if partition is None: - valid_mappings = [sys_long_path + "1", sys_long_path + "p1"] - elif partition != "0": - valid_mappings = [sys_long_path + "%s" % partition, - sys_long_path + "p%s" % partition] - else: - valid_mappings = [] - - for cdisk in valid_mappings: - if not os.path.exists(cdisk): - continue - - dev_path = "/dev/%s" % os.path.basename(cdisk) - if os.path.exists(dev_path): - return dev_path - - if partition is None or partition == "0": - return device - - LOG.debug("Did not fine partition %s for device %s", partition, device) - return None --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -253,7 +253,7 @@ self.metadata.update(fabric_data) - found_ephemeral = find_ephemeral_disk() + found_ephemeral = find_fabric_formatted_ephemeral_disk() if found_ephemeral: self.ds_cfg['disk_aliases']['ephemeral0'] = found_ephemeral LOG.debug("using detected ephemeral0 of %s", found_ephemeral) @@ -275,30 +275,33 @@ return len(fnmatch.filter(os.listdir(mp), '*[!cdrom]*')) -def find_ephemeral_part(): +def find_fabric_formatted_ephemeral_part(): """ - Locate the default ephmeral0.1 device. This will be the first device - that has a LABEL of DEF_EPHEMERAL_LABEL and is a NTFS device. If Azure - gets more ephemeral devices, this logic will only identify the first - such device. - """ - c_label_devs = util.find_devs_with("LABEL=%s" % DEF_EPHEMERAL_LABEL) - c_fstype_devs = util.find_devs_with("TYPE=ntfs") - for dev in c_label_devs: - if dev in c_fstype_devs: - return dev + Locate the first fabric formatted ephemeral device. + """ + potential_locations = ['/dev/disk/cloud/azure_resource-part1', + '/dev/disk/azure/resource-part1'] + device_location = None + for potential_location in potential_locations: + if os.path.exists(potential_location): + device_location = potential_location + break + if device_location is None: + return None + ntfs_devices = util.find_devs_with("TYPE=ntfs") + real_device = os.path.realpath(device_location) + if real_device in ntfs_devices: + return device_location return None -def find_ephemeral_disk(): +def find_fabric_formatted_ephemeral_disk(): """ Get the ephemeral disk. """ - part_dev = find_ephemeral_part() - if part_dev and str(part_dev[-1]).isdigit(): - return part_dev[:-1] - elif part_dev: - return part_dev + part_dev = find_fabric_formatted_ephemeral_part() + if part_dev: + return part_dev.split('-')[0] return None @@ -312,7 +315,7 @@ new ephemeral device is detected, cloud-init overrides the default frequency for both disk-setup and mounts for the current boot only. """ - device = find_ephemeral_part() + device = find_fabric_formatted_ephemeral_part() if not device: LOG.debug("no default fabric formated ephemeral0.1 found") return None --- /dev/null +++ b/tests/unittests/test_handler/test_handler_mounts.py @@ -0,0 +1,133 @@ +import os.path +import shutil +import tempfile + +from cloudinit.config import cc_mounts + +from .. import helpers as test_helpers + +try: + from unittest import mock +except ImportError: + import mock + + +class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase): + + def setUp(self): + super(TestSanitizeDevname, self).setUp() + self.new_root = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, self.new_root) + self.patchOS(self.new_root) + + def _touch(self, path): + path = os.path.join(self.new_root, path.lstrip('/')) + basedir = os.path.dirname(path) + if not os.path.exists(basedir): + os.makedirs(basedir) + open(path, 'a').close() + + def _makedirs(self, directory): + directory = os.path.join(self.new_root, directory.lstrip('/')) + if not os.path.exists(directory): + os.makedirs(directory) + + def mock_existence_of_disk(self, disk_path): + self._touch(disk_path) + self._makedirs(os.path.join('/sys/block', disk_path.split('/')[-1])) + + def mock_existence_of_partition(self, disk_path, partition_number): + self.mock_existence_of_disk(disk_path) + self._touch(disk_path + str(partition_number)) + disk_name = disk_path.split('/')[-1] + self._makedirs(os.path.join('/sys/block', + disk_name, + disk_name + str(partition_number))) + + def test_existent_full_disk_path_is_returned(self): + disk_path = '/dev/sda' + self.mock_existence_of_disk(disk_path) + self.assertEqual(disk_path, + cc_mounts.sanitize_devname(disk_path, + lambda x: None, + mock.Mock())) + + def test_existent_disk_name_returns_full_path(self): + disk_name = 'sda' + disk_path = '/dev/' + disk_name + self.mock_existence_of_disk(disk_path) + self.assertEqual(disk_path, + cc_mounts.sanitize_devname(disk_name, + lambda x: None, + mock.Mock())) + + def test_existent_meta_disk_is_returned(self): + actual_disk_path = '/dev/sda' + self.mock_existence_of_disk(actual_disk_path) + self.assertEqual( + actual_disk_path, + cc_mounts.sanitize_devname('ephemeral0', + lambda x: actual_disk_path, + mock.Mock())) + + def test_existent_meta_partition_is_returned(self): + disk_name, partition_part = '/dev/sda', '1' + actual_partition_path = disk_name + partition_part + self.mock_existence_of_partition(disk_name, partition_part) + self.assertEqual( + actual_partition_path, + cc_mounts.sanitize_devname('ephemeral0.1', + lambda x: disk_name, + mock.Mock())) + + def test_existent_meta_partition_with_p_is_returned(self): + disk_name, partition_part = '/dev/sda', 'p1' + actual_partition_path = disk_name + partition_part + self.mock_existence_of_partition(disk_name, partition_part) + self.assertEqual( + actual_partition_path, + cc_mounts.sanitize_devname('ephemeral0.1', + lambda x: disk_name, + mock.Mock())) + + def test_first_partition_returned_if_existent_disk_is_partitioned(self): + disk_name, partition_part = '/dev/sda', '1' + actual_partition_path = disk_name + partition_part + self.mock_existence_of_partition(disk_name, partition_part) + self.assertEqual( + actual_partition_path, + cc_mounts.sanitize_devname('ephemeral0', + lambda x: disk_name, + mock.Mock())) + + def test_nth_partition_returned_if_requested(self): + disk_name, partition_part = '/dev/sda', '3' + actual_partition_path = disk_name + partition_part + self.mock_existence_of_partition(disk_name, partition_part) + self.assertEqual( + actual_partition_path, + cc_mounts.sanitize_devname('ephemeral0.3', + lambda x: disk_name, + mock.Mock())) + + def test_transformer_returning_none_returns_none(self): + self.assertIsNone( + cc_mounts.sanitize_devname( + 'ephemeral0', lambda x: None, mock.Mock())) + + def test_missing_device_returns_none(self): + self.assertIsNone( + cc_mounts.sanitize_devname('/dev/sda', None, mock.Mock())) + + def test_missing_sys_returns_none(self): + disk_path = '/dev/sda' + self._makedirs(disk_path) + self.assertIsNone( + cc_mounts.sanitize_devname(disk_path, None, mock.Mock())) + + def test_existent_disk_but_missing_partition_returns_none(self): + disk_path = '/dev/sda' + self.mock_existence_of_disk(disk_path) + self.assertIsNone( + cc_mounts.sanitize_devname( + 'ephemeral0.1', lambda x: disk_path, mock.Mock())) --- /dev/null +++ b/udev/66-azure-ephemeral.rules @@ -0,0 +1,18 @@ +# Azure specific rules +ACTION!="add|change", GOTO="cloud_init_end" +SUBSYSTEM!="block", GOTO="cloud_init_end" +ATTRS{ID_VENDOR}!="Msft", GOTO="cloud_init_end" +ATTRS{ID_MODEL}!="Virtual_Disk", GOTO="cloud_init_end" + +# Root has a GUID of 0000 as the second value +# The resource/resource has GUID of 0001 as the second value +ATTRS{device_id}=="?00000000-0000-*", ENV{fabric_name}="azure_root", GOTO="ci_azure_names" +ATTRS{device_id}=="?00000000-0001-*", ENV{fabric_name}="azure_resource", GOTO="ci_azure_names" +GOTO="cloud_init_end" + +# Create the symlinks +LABEL="ci_azure_names" +ENV{DEVTYPE}=="disk", SYMLINK+="disk/cloud/$env{fabric_name}" +ENV{DEVTYPE}=="partition", SYMLINK+="disk/cloud/$env{fabric_name}-part%n" + +LABEL="cloud_init_end" --- a/setup.py +++ b/setup.py @@ -132,6 +132,7 @@ [f for f in glob('doc/examples/*') if is_f(f)]), ('/usr/share/doc/cloud-init/examples/seed', [f for f in glob('doc/examples/seed/*') if is_f(f)]), + ('/lib/udev/rules.d', ['udev/66-azure-ephemeral.rules']), ], install_requires=read_requires(), cmdclass={ debian/patches/lp-1422919-azure-g5_ephemeral.patch0000664000000000000000000001451712574667652016617 0ustar Author: Ben Howard Bug: https://launchpad.net/bugs/1422919 Applied-Upstream: yes Description: Add GPT partition support Microsoft Azure now has G4/G5 instances that use the GPT partitioning for the ephemeral devices. This patch supports GPT partitioning in both the Azure Datasource and cloud-init generally. --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -27,6 +27,7 @@ # Define the commands to use UDEVADM_CMD = util.which('udevadm') SFDISK_CMD = util.which("sfdisk") +SGDISK_CMD = util.which("sgdisk") LSBLK_CMD = util.which("lsblk") BLKID_CMD = util.which("blkid") BLKDEV_CMD = util.which("blockdev") @@ -151,7 +152,7 @@ name: the device name, i.e. sda """ - lsblk_cmd = [LSBLK_CMD, '--pairs', '--out', 'NAME,TYPE,FSTYPE,LABEL', + lsblk_cmd = [LSBLK_CMD, '--pairs', '--output', 'NAME,TYPE,FSTYPE,LABEL', device] if nodeps: @@ -315,22 +316,6 @@ return False -def get_hdd_size(device): - """ - Returns the hard disk size. - This works with any disk type, including GPT. - """ - - size_cmd = [SFDISK_CMD, '--show-size', device] - size = None - try: - size, _err = util.subp(size_cmd) - except Exception as e: - raise Exception("Failed to get %s size\n%s" % (device, e)) - - return int(size.strip()) - - def get_dyn_func(*args): """ Call the appropriate function. @@ -358,6 +343,30 @@ raise Exception("No such function %s to call!" % func_name) +def get_mbr_hdd_size(device): + size_cmd = [SFDISK_CMD, '--show-size', device] + size = None + try: + size, _err = util.subp(size_cmd) + except Exception as e: + raise Exception("Failed to get %s size\n%s" % (device, e)) + + return int(size.strip()) + + +def get_gpt_hdd_size(device): + out, _ = util.subp([SGDISK_CMD, '-p', device]) + return out.splitlines()[0].split()[2] + + +def get_hdd_size(table_type, device): + """ + Returns the hard disk size. + This works with any disk type, including GPT. + """ + return get_dyn_func("get_%s_hdd_size", table_type, device) + + def check_partition_mbr_layout(device, layout): """ Returns true if the partition layout matches the one on the disk @@ -393,6 +402,36 @@ break found_layout.append(type_label) + return found_layout + + +def check_partition_gpt_layout(device, layout): + prt_cmd = [SGDISK_CMD, '-p', device] + try: + out, _err = util.subp(prt_cmd) + except Exception as e: + raise Exception("Error running partition command on %s\n%s" % ( + device, e)) + + out_lines = iter(out.splitlines()) + # Skip header + for line in out_lines: + if line.strip().startswith('Number'): + break + + return [line.strip().split()[-1] for line in out_lines] + + +def check_partition_layout(table_type, device, layout): + """ + See if the partition lay out matches. + + This is future a future proofing function. In order + to add support for other disk layout schemes, add a + function called check_partition_%s_layout + """ + found_layout = get_dyn_func( + "check_partition_%s_layout", table_type, device, layout) if isinstance(layout, bool): # if we are using auto partitioning, or "True" be happy @@ -417,18 +456,6 @@ return False -def check_partition_layout(table_type, device, layout): - """ - See if the partition lay out matches. - - This is future a future proofing function. In order - to add support for other disk layout schemes, add a - function called check_partition_%s_layout - """ - return get_dyn_func("check_partition_%s_layout", table_type, device, - layout) - - def get_partition_mbr_layout(size, layout): """ Calculate the layout of the partition table. Partition sizes @@ -481,6 +508,29 @@ return sfdisk_definition +def get_partition_gpt_layout(size, layout): + if isinstance(layout, bool): + return [(None, [0, 0])] + + partition_specs = [] + for partition in layout: + if isinstance(partition, list): + if len(partition) != 2: + raise Exception( + "Partition was incorrectly defined: %s" % partition) + percent, partition_type = partition + else: + percent = partition + partition_type = None + + part_size = int(float(size) * (float(percent) / 100)) + partition_specs.append((partition_type, [0, '+{}'.format(part_size)])) + + # The last partition should use up all remaining space + partition_specs[-1][-1][-1] = 0 + return partition_specs + + def purge_disk_ptable(device): # wipe the first and last megabyte of a disk (or file) # gpt stores partition table both at front and at end. @@ -556,6 +606,22 @@ read_parttbl(device) +def exec_mkpart_gpt(device, layout): + try: + util.subp([SGDISK_CMD, '-Z', device]) + for index, (partition_type, (start, end)) in enumerate(layout): + index += 1 + util.subp([SGDISK_CMD, + '-n', '{}:{}:{}'.format(index, start, end), device]) + if partition_type is not None: + util.subp( + [SGDISK_CMD, + '-t', '{}:{}'.format(index, partition_type), device]) + except Exception: + print "Failed to partition device %s" % (device,) + raise + + def exec_mkpart(table_type, device, layout): """ Fetches the function for creating the table type. @@ -618,7 +684,7 @@ return LOG.debug("Checking for device size") - device_size = get_hdd_size(device) + device_size = get_hdd_size(table_type, device) LOG.debug("Calculating partition layout") part_definition = get_partition_layout(table_type, device_size, layout) --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -53,9 +53,9 @@ BUILTIN_CLOUD_CONFIG = { 'disk_setup': { - 'ephemeral0': {'table_type': 'mbr', - 'layout': True, - 'overwrite': False}, + 'ephemeral0': {'table_type': 'gpt', + 'layout': [100], + 'overwrite': True}, }, 'fs_setup': [{'filesystem': 'ext4', 'device': 'ephemeral0.1', debian/patches/lp-1177432-same-archives-as-ubuntu-server.patch0000664000000000000000000000371012616715445021067 0ustar --- a/templates/sources.list.ubuntu.tmpl +++ b/templates/sources.list.ubuntu.tmpl @@ -9,13 +9,13 @@ # See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to # newer versions of the distribution. -deb $mirror $codename main -deb-src $mirror $codename main +deb $mirror $codename main restricted +deb-src $mirror $codename main restricted \## Major bug fix updates produced after the final release of the \## distribution. -deb $mirror $codename-updates main -deb-src $mirror $codename-updates main +deb $mirror $codename-updates main restricted +deb-src $mirror $codename-updates main restricted \## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu \## team. Also, please note that software in universe WILL NOT receive any @@ -30,10 +30,10 @@ \## your rights to use the software. Also, please note that software in \## multiverse WILL NOT receive any review or updates from the Ubuntu \## security team. -# deb $mirror $codename multiverse -# deb-src $mirror $codename multiverse -# deb $mirror $codename-updates multiverse -# deb-src $mirror $codename-updates multiverse +deb $mirror $codename multiverse +deb-src $mirror $codename multiverse +deb $mirror $codename-updates multiverse +deb-src $mirror $codename-updates multiverse \## Uncomment the following two lines to add software from the 'backports' \## repository. @@ -42,8 +42,8 @@ \## newer versions of some applications which may provide useful features. \## Also, please note that software in backports WILL NOT receive any review \## or updates from the Ubuntu security team. -# deb $mirror $codename-backports main restricted universe multiverse -# deb-src $mirror $codename-backports main restricted universe multiverse +deb $mirror $codename-backports main restricted universe multiverse +deb-src $mirror $codename-backports main restricted universe multiverse \## Uncomment the following two lines to add software from Canonical's \## 'partner' repository. debian/patches/lp-1353008-cloud-init-local-needs-run.conf0000664000000000000000000000262212574667652020003 0ustar Author: Scott Moser Bug: https://launchpad.net/bugs/1353008 Applied-Upstream: yes revno 1012 Description: cloud-init-local depends on /run. reflect that in upstart job. With the writing of cloud-init status, cloud-init-local needs to have /run mounted. The issue we were seeing was a race where: cloud-init-local creates /run/cloud-init /run is mounted cloud-init-local tries to link a file into /run/cloud-init . that directory was no longer visisable as /run was mounted over the top. . This also fixes a bug that would occur if a local datasource ran and found a source before cloud-init-nonet ran and networking had not yet come up. If that occurred then cloud-init-nonet would exit without blocking network. --- a/upstart/cloud-init-local.conf +++ b/upstart/cloud-init-local.conf @@ -1,6 +1,6 @@ # cloud-init - the initial cloud-init job # crawls metadata service, emits cloud-config -start on mounted MOUNTPOINT=/ +start on mounted MOUNTPOINT=/ and mounted MOUNTPOINT=/run task --- a/upstart/cloud-init-nonet.conf +++ b/upstart/cloud-init-nonet.conf @@ -58,10 +58,6 @@ script # static_network_up already occurred static_network_up && exit 0 - # obj.pkl comes from cloud-init-local (or previous boot and - # manual_cache_clean) - [ -f /var/lib/cloud/instance/obj.pkl ] && exit 0 - dowait 10 dowait 120 msg "gave up waiting for a network device." debian/patches/lp-1470890-include-regions-in-dynamic-mirror-discovery.patch0000664000000000000000000001713312574667652023566 0ustar Description: Enable %(region)s as a dynamic mirror substitution Author: Daniel Watkins Origin: upstream, http://bazaar.launchpad.net/~cloud-init-dev/cloud-init/trunk/revision/1126 Bug: https://bugs.launchpad.net/cloud-init/+bug/1470890 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -107,12 +107,11 @@ arch = self.get_primary_arch() return _get_arch_package_mirror_info(mirror_info, arch) - def get_package_mirror_info(self, arch=None, - availability_zone=None): + def get_package_mirror_info(self, arch=None, data_source=None): # This resolves the package_mirrors config option # down to a single dict of {mirror_name: mirror_url} arch_info = self._get_arch_package_mirror_info(arch) - return _get_package_mirror_info(availability_zone=availability_zone, + return _get_package_mirror_info(data_source=data_source, mirror_info=arch_info) def apply_network(self, settings, bring_up=True): @@ -526,7 +525,7 @@ LOG.info("Added user '%s' to group '%s'" % (member, name)) -def _get_package_mirror_info(mirror_info, availability_zone=None, +def _get_package_mirror_info(mirror_info, data_source=None, mirror_filter=util.search_for_mirror): # given a arch specific 'mirror_info' entry (from package_mirrors) # search through the 'search' entries, and fallback appropriately @@ -542,11 +541,14 @@ ec2_az_re = ("^[a-z][a-z]-(%s)-[1-9][0-9]*[a-z]$" % directions_re) subst = {} - if availability_zone: - subst['availability_zone'] = availability_zone + if data_source and data_source.availability_zone: + subst['availability_zone'] = data_source.availability_zone - if availability_zone and re.match(ec2_az_re, availability_zone): - subst['ec2_region'] = "%s" % availability_zone[0:-1] + if re.match(ec2_az_re, data_source.availability_zone): + subst['ec2_region'] = "%s" % data_source.availability_zone[0:-1] + + if data_source and data_source.region: + subst['region'] = data_source.region results = {} for (name, mirror) in mirror_info.get('failsafe', {}).iteritems(): --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -197,6 +197,13 @@ except KeyError: return None + @property + def region(self): + az = self.availability_zone + if az is not None: + return az[:-1] + return None + # Used to match classes to dependencies datasources = [ (DataSourceEc2, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -141,6 +141,10 @@ def availability_zone(self): return self.metadata['availability-zone'] + @property + def region(self): + return self.availability_zone.rsplit('-', 1)[0] + # Used to match classes to dependencies datasources = [ (DataSourceGCE, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -155,6 +155,10 @@ return self.metadata.get('availability-zone', self.metadata.get('availability_zone')) + @property + def region(self): + return self.metadata.get('region') + def get_instance_id(self): if not self.metadata or 'instance-id' not in self.metadata: # Return a magic not really instance id string @@ -208,8 +212,7 @@ return hostname def get_package_mirror_info(self): - return self.distro.get_package_mirror_info( - availability_zone=self.availability_zone) + return self.distro.get_package_mirror_info(data_source=self) def normalize_pubkey_data(pubkey_data): --- a/config/cloud.cfg +++ b/config/cloud.cfg @@ -102,6 +102,7 @@ primary: - http://%(ec2_region)s.ec2.archive.ubuntu.com/ubuntu/ - http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu/ + - http://%(region)s.clouds.archive.ubuntu.com/ubuntu/ security: [] - arches: [armhf, armel, default] failsafe: --- a/tests/unittests/test_distros/test_generic.py +++ b/tests/unittests/test_distros/test_generic.py @@ -5,6 +5,11 @@ import os +try: + from unittest import mock +except ImportError: + import mock + unknown_arch_info = { 'arches': ['default'], 'failsafe': {'primary': 'http://fs-primary-default', @@ -142,33 +147,35 @@ def test_get_package_mirror_info_az_ec2(self): arch_mirrors = gapmi(package_mirrors, arch="amd64") + data_source_mock = mock.Mock(availability_zone="us-east-1a") - results = gpmi(arch_mirrors, availability_zone="us-east-1a", + results = gpmi(arch_mirrors, data_source=data_source_mock, mirror_filter=self.return_first) self.assertEqual(results, {'primary': 'http://us-east-1.ec2/', 'security': 'http://security-mirror1-intel'}) - results = gpmi(arch_mirrors, availability_zone="us-east-1a", + results = gpmi(arch_mirrors, data_source=data_source_mock, mirror_filter=self.return_second) self.assertEqual(results, {'primary': 'http://us-east-1a.clouds/', 'security': 'http://security-mirror2-intel'}) - results = gpmi(arch_mirrors, availability_zone="us-east-1a", + results = gpmi(arch_mirrors, data_source=data_source_mock, mirror_filter=self.return_none) self.assertEqual(results, package_mirrors[0]['failsafe']) def test_get_package_mirror_info_az_non_ec2(self): arch_mirrors = gapmi(package_mirrors, arch="amd64") + data_source_mock = mock.Mock(availability_zone="nova.cloudvendor") - results = gpmi(arch_mirrors, availability_zone="nova.cloudvendor", + results = gpmi(arch_mirrors, data_source=data_source_mock, mirror_filter=self.return_first) self.assertEqual(results, {'primary': 'http://nova.cloudvendor.clouds/', 'security': 'http://security-mirror1-intel'}) - results = gpmi(arch_mirrors, availability_zone="nova.cloudvendor", + results = gpmi(arch_mirrors, data_source=data_source_mock, mirror_filter=self.return_last) self.assertEqual(results, {'primary': 'http://nova.cloudvendor.clouds/', @@ -176,17 +183,18 @@ def test_get_package_mirror_info_none(self): arch_mirrors = gapmi(package_mirrors, arch="amd64") + data_source_mock = mock.Mock(availability_zone=None) # because both search entries here replacement based on # availability-zone, the filter will be called with an empty list and # failsafe should be taken. - results = gpmi(arch_mirrors, availability_zone=None, + results = gpmi(arch_mirrors, data_source=data_source_mock, mirror_filter=self.return_first) self.assertEqual(results, {'primary': 'http://fs-primary-intel', 'security': 'http://security-mirror1-intel'}) - results = gpmi(arch_mirrors, availability_zone=None, + results = gpmi(arch_mirrors, data_source=data_source_mock, mirror_filter=self.return_last) self.assertEqual(results, {'primary': 'http://fs-primary-intel', debian/patches/lp-1506187-azure_use_unique_vm_id.patch0000664000000000000000000004072712624645421017670 0ustar --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -79,12 +79,6 @@ './Container/RoleInstanceList/RoleInstance/InstanceId') @property - def shared_config_xml(self): - url = self._text_from_xpath('./Container/RoleInstanceList/RoleInstance' - '/Configuration/SharedConfig') - return self.http_client.get(url).contents - - @property def certificates_xml(self): if self._certificates_xml is None: url = self._text_from_xpath( @@ -172,19 +166,6 @@ return keys -def iid_from_shared_config_content(content): - """ - find INSTANCE_ID in: - - - - - """ - root = ElementTree.fromstring(content) - depnode = root.find('Deployment') - return depnode.get('name') - - class WALinuxAgentShim(object): REPORT_READY_XML_TEMPLATE = '\n'.join([ @@ -263,8 +244,6 @@ public_keys = self.openssl_manager.parse_certificates( goal_state.certificates_xml) data = { - 'instance-id': iid_from_shared_config_content( - goal_state.shared_config_xml), 'public-keys': public_keys, } self._report_ready(goal_state, http_client) --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -31,8 +31,7 @@ from cloudinit.settings import PER_ALWAYS from cloudinit import sources from cloudinit import util -from cloudinit.sources.helpers.azure import ( - get_metadata_from_fabric, iid_from_shared_config_content) +from cloudinit.sources.helpers.azure import get_metadata_from_fabric LOG = logging.getLogger(__name__) @@ -41,7 +40,6 @@ AGENT_START = ['service', 'walinuxagent', 'start'] BOUNCE_COMMAND = ['sh', '-xc', "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x"] -DATA_DIR_CLEAN_LIST = ['SharedConfig.xml'] BUILTIN_DS_CONFIG = { 'agent_command': AGENT_START, @@ -144,8 +142,6 @@ self.ds_cfg['agent_command']) ddir = self.ds_cfg['data_dir'] - shcfgxml = os.path.join(ddir, "SharedConfig.xml") - wait_for = [shcfgxml] fp_files = [] key_value = None @@ -160,19 +156,11 @@ missing = util.log_time(logfunc=LOG.debug, msg="waiting for files", func=wait_for_files, - args=(wait_for + fp_files,)) + args=(fp_files,)) if len(missing): LOG.warn("Did not find files, but going on: %s", missing) metadata = {} - if shcfgxml in missing: - LOG.warn("SharedConfig.xml missing, using static instance-id") - else: - try: - metadata['instance-id'] = iid_from_shared_config(shcfgxml) - except ValueError as e: - LOG.warn("failed to get instance id in %s: %s", shcfgxml, e) - metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files) return metadata @@ -228,21 +216,6 @@ user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg]) - if found != ddir: - cached_ovfenv = util.load_file( - os.path.join(ddir, 'ovf-env.xml'), quiet=True) - if cached_ovfenv != files['ovf-env.xml']: - # source was not walinux-agent's datadir, so we have to clean - # up so 'wait_for_files' doesn't return early due to stale data - cleaned = [] - for f in [os.path.join(ddir, f) for f in DATA_DIR_CLEAN_LIST]: - if os.path.exists(f): - util.del_file(f) - cleaned.append(f) - if cleaned: - LOG.info("removed stale file(s) in '%s': %s", - ddir, str(cleaned)) - # walinux agent writes files world readable, but expects # the directory to be protected. write_files(ddir, files, dirmode=0o700) @@ -258,6 +231,7 @@ " on Azure.", exc_info=True) return False + self.metadata['instance-id'] = get_instance_id() self.metadata.update(fabric_data) found_ephemeral = find_fabric_formatted_ephemeral_disk() @@ -282,6 +256,13 @@ return len(fnmatch.filter(os.listdir(mp), '*[!cdrom]*')) +def get_instance_id(): + """ + Read the instance ID from dmi data + """ + return util.read_dmi_data('system-uuid') + + def find_fabric_formatted_ephemeral_part(): """ Locate the first fabric formatted ephemeral device. @@ -515,7 +496,7 @@ continue if (len(child.childNodes) != 1 or - child.childNodes[0].nodeType != text_node): + child.childNodes[0].nodeType != text_node): continue cur[name] = child.childNodes[0].wholeText.strip() @@ -570,7 +551,7 @@ simple = False value = "" if (len(child.childNodes) == 1 and - child.childNodes[0].nodeType == dom.TEXT_NODE): + child.childNodes[0].nodeType == dom.TEXT_NODE): simple = True value = child.childNodes[0].wholeText @@ -648,12 +629,6 @@ return (md, ud, cfg, {'ovf-env.xml': contents}) -def iid_from_shared_config(path): - with open(path, "rb") as fp: - content = fp.read() - return iid_from_shared_config_content(content) - - class BrokenAzureDataSource(Exception): pass --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -120,15 +120,13 @@ data['pubkey_files'] = flist return ["pubkey_from: %s" % f for f in flist] - def _iid_from_shared_config(path): - data['iid_from_shared_cfg'] = path + def _get_instance_id(): return 'i-my-azure-id' if data.get('ovfcontent') is not None: populate_dir(os.path.join(self.paths.seed_dir, "azure"), {'ovf-env.xml': data['ovfcontent']}) - mod = DataSourceAzure mod.BUILTIN_DS_CONFIG = OVERRIDE_BUILTIN_DS_CONFIG mod.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d @@ -139,8 +137,7 @@ (mod, 'wait_for_files', _wait_for_files), (mod, 'pubkeys_from_crt_files', _pubkeys_from_crt_files), - (mod, 'iid_from_shared_config', - _iid_from_shared_config)]) + (mod, 'get_instance_id', _get_instance_id)]) dsrc = mod.DataSourceAzureNet( data.get('sys_cfg', {}), distro=None, paths=self.paths) @@ -209,7 +206,7 @@ yaml_cfg = "{agent_command: my_command}\n" cfg = yaml.safe_load(yaml_cfg) odata = {'HostName': "myhost", 'UserName': "myuser", - 'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}} + 'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}} data = {'ovfcontent': construct_valid_ovf_env(data=odata)} dsrc = self._get_ds(data) @@ -221,8 +218,8 @@ # set dscfg in via base64 encoded yaml cfg = {'agent_command': "my_command"} odata = {'HostName': "myhost", 'UserName': "myuser", - 'dscfg': {'text': base64.b64encode(yaml.dump(cfg)), - 'encoding': 'base64'}} + 'dscfg': {'text': base64.b64encode(yaml.dump(cfg)), + 'encoding': 'base64'}} data = {'ovfcontent': construct_valid_ovf_env(data=odata)} dsrc = self._get_ds(data) @@ -269,7 +266,8 @@ # should equal that after the '$' pos = defuser['passwd'].rfind("$") + 1 self.assertEqual(defuser['passwd'], - crypt.crypt(odata['UserPassword'], defuser['passwd'][0:pos])) + crypt.crypt(odata['UserPassword'], + defuser['passwd'][0:pos])) def test_userdata_found(self): mydata = "FOOBAR" @@ -282,7 +280,7 @@ self.assertEqual(dsrc.userdata_raw, mydata) def test_no_datasource_expected(self): - #no source should be found if no seed_dir and no devs + # no source should be found if no seed_dir and no devs data = {} dsrc = self._get_ds({}) ret = dsrc.get_data() @@ -334,7 +332,6 @@ for mypk in mypklist: self.assertIn(mypk['value'], dsrc.metadata['public-keys']) - def test_disabled_bounce(self): pass @@ -360,8 +357,8 @@ # Make sure that user can affect disk aliases dscfg = {'disk_aliases': {'ephemeral0': '/dev/sdc'}} odata = {'HostName': "myhost", 'UserName': "myuser", - 'dscfg': {'text': base64.b64encode(yaml.dump(dscfg)), - 'encoding': 'base64'}} + 'dscfg': {'text': base64.b64encode(yaml.dump(dscfg)), + 'encoding': 'base64'}} usercfg = {'disk_setup': {'/dev/sdc': {'something': '...'}, 'ephemeral0': False}} userdata = '#cloud-config' + yaml.dump(usercfg) + "\n" @@ -420,83 +417,22 @@ self.assertTrue(os.path.exists(ovf_env_path)) self.xml_equals(xml, load_file(ovf_env_path)) - def test_existing_ovf_same(self): - # waagent/SharedConfig left alone if found ovf-env.xml same as cached - odata = {'UserData': base64.b64encode("SOMEUSERDATA")} - data = {'ovfcontent': construct_valid_ovf_env(data=odata)} - - populate_dir(self.waagent_d, - {'ovf-env.xml': data['ovfcontent'], - 'otherfile': 'otherfile-content', - 'SharedConfig.xml': 'mysharedconfig'}) - - dsrc = self._get_ds(data) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertTrue(os.path.exists( - os.path.join(self.waagent_d, 'ovf-env.xml'))) - self.assertTrue(os.path.exists( - os.path.join(self.waagent_d, 'otherfile'))) - self.assertTrue(os.path.exists( - os.path.join(self.waagent_d, 'SharedConfig.xml'))) - - def test_existing_ovf_diff(self): - # waagent/SharedConfig must be removed if ovfenv is found elsewhere - - # 'get_data' should remove SharedConfig.xml in /var/lib/waagent - # if ovf-env.xml differs. - cached_ovfenv = construct_valid_ovf_env( - {'userdata': base64.b64encode("FOO_USERDATA")}) - new_ovfenv = construct_valid_ovf_env( - {'userdata': base64.b64encode("NEW_USERDATA")}) - - populate_dir(self.waagent_d, - {'ovf-env.xml': cached_ovfenv, - 'SharedConfig.xml': "mysharedconfigxml", - 'otherfile': 'otherfilecontent'}) - - dsrc = self._get_ds({'ovfcontent': new_ovfenv}) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(dsrc.userdata_raw, "NEW_USERDATA") - self.assertTrue(os.path.exists( - os.path.join(self.waagent_d, 'otherfile'))) - self.assertFalse( - os.path.exists(os.path.join(self.waagent_d, 'SharedConfig.xml'))) - self.assertTrue( - os.path.exists(os.path.join(self.waagent_d, 'ovf-env.xml'))) - new_xml = load_file(os.path.join(self.waagent_d, 'ovf-env.xml')) - self.xml_equals(new_ovfenv, new_xml) class TestReadAzureOvf(MockerTestCase): def test_invalid_xml_raises_non_azure_ds(self): invalid_xml = "" + construct_valid_ovf_env(data={}) self.assertRaises(DataSourceAzure.BrokenAzureDataSource, - DataSourceAzure.read_azure_ovf, invalid_xml) + DataSourceAzure.read_azure_ovf, invalid_xml) def test_load_with_pubkeys(self): mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}] pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist] content = construct_valid_ovf_env(pubkeys=pubkeys) - (_md, _ud, cfg) = DataSourceAzure.read_azure_ovf(content) + (_, _, cfg) = DataSourceAzure.read_azure_ovf(content) for mypk in mypklist: self.assertIn(mypk, cfg['_pubkeys']) -class TestReadAzureSharedConfig(MockerTestCase): - def test_valid_content(self): - xml = """ - - - - - - - """ - ret = DataSourceAzure.iid_from_shared_config_content(xml) - self.assertEqual("MY_INSTANCE_ID", ret) - - def apply_patches(patches): ret = [] for (ref, name, replace) in patches: --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -74,6 +74,31 @@ # Helper utils to see if running in a container CONTAINER_TESTS = ['running-in-container', 'lxc-is-container'] +# Path for DMI Data +DMI_SYS_PATH = "/sys/class/dmi/id" + +# dmidecode and /sys/class/dmi/id/* use different names for the same value, +# this allows us to refer to them by one canonical name +DMIDECODE_TO_DMI_SYS_MAPPING = { + 'baseboard-asset-tag': 'board_asset_tag', + 'baseboard-manufacturer': 'board_vendor', + 'baseboard-product-name': 'board_name', + 'baseboard-serial-number': 'board_serial', + 'baseboard-version': 'board_version', + 'bios-release-date': 'bios_date', + 'bios-vendor': 'bios_vendor', + 'bios-version': 'bios_version', + 'chassis-asset-tag': 'chassis_asset_tag', + 'chassis-manufacturer': 'chassis_vendor', + 'chassis-serial-number': 'chassis_serial', + 'chassis-version': 'chassis_version', + 'system-manufacturer': 'sys_vendor', + 'system-product-name': 'product_name', + 'system-serial-number': 'product_serial', + 'system-uuid': 'product_uuid', + 'system-version': 'product_version', +} + class ProcessExecutionError(IOError): @@ -1926,3 +1951,72 @@ raise ValueError("Missing required files: %s", ','.join(missing)) return ret + + +def _read_dmi_syspath(key): + """ + Reads dmi data with from /sys/class/dmi/id + """ + if key not in DMIDECODE_TO_DMI_SYS_MAPPING: + return None + mapped_key = DMIDECODE_TO_DMI_SYS_MAPPING[key] + dmi_key_path = "{0}/{1}".format(DMI_SYS_PATH, mapped_key) + LOG.debug("querying dmi data %s", dmi_key_path) + try: + if not os.path.exists(dmi_key_path): + LOG.debug("did not find %s", dmi_key_path) + return None + + key_data = load_file(dmi_key_path) + if not key_data: + LOG.debug("%s did not return any data", dmi_key_path) + return None + + LOG.debug("dmi data %s returned %s", dmi_key_path, key_data) + return key_data.strip() + + except Exception: + logexc(LOG, "failed read of %s", dmi_key_path) + return None + + +def _call_dmidecode(key, dmidecode_path): + """ + Calls out to dmidecode to get the data out. This is mostly for supporting + OS's without /sys/class/dmi/id support. + """ + try: + cmd = [dmidecode_path, "--string", key] + (result, _err) = subp(cmd) + LOG.debug("dmidecode returned '%s' for '%s'", result, key) + return result + except (IOError, OSError) as _err: + LOG.debug('failed dmidecode cmd: %s\n%s', cmd, _err.message) + return None + + +def read_dmi_data(key): + """ + Wrapper for reading DMI data. + + This will do the following (returning the first that produces a + result): + 1) Use a mapping to translate `key` from dmidecode naming to + sysfs naming and look in /sys/class/dmi/... for a value. + 2) Use `key` as a sysfs key directly and look in /sys/class/dmi/... + 3) Fall-back to passing `key` to `dmidecode --string`. + + If all of the above fail to find a value, None will be returned. + """ + syspath_value = _read_dmi_syspath(key) + if syspath_value is not None: + return syspath_value + + dmidecode_path = which('dmidecode') + if dmidecode_path: + return _call_dmidecode(key, dmidecode_path) + + LOG.warn("did not find either path %s or dmidecode command", + DMI_SYS_PATH) + return None + debian/patches/lp-1470880-fix-gce-az-determination.patch0000664000000000000000000000237512574667652017727 0ustar Description: Correctly parse GCE's availability zones Author: Daniel Watkins Origin: upstream, http://bazaar.launchpad.net/~cloud-init-dev/cloud-init/trunk/revision/1121 Bug: https://bugs.launchpad.net/cloud-init/+bug/1470880 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -105,6 +105,10 @@ lines = self.metadata['public-keys'].splitlines() self.metadata['public-keys'] = [self._trim_key(k) for k in lines] + if self.metadata['availability-zone']: + self.metadata['availability-zone'] = self.metadata[ + 'availability-zone'].split('/')[-1] + encoding = self.metadata.get('user-data-encoding') if encoding: if encoding == 'base64': --- a/tests/unittests/test_datasource/test_gce.py +++ b/tests/unittests/test_datasource/test_gce.py @@ -87,7 +87,7 @@ self.assertEqual(GCE_META.get('instance/id'), self.ds.get_instance_id()) - self.assertEqual(GCE_META.get('instance/zone'), + self.assertEqual(GCE_META.get('instance/zone').split('/')[-1], self.ds.availability_zone) self.assertEqual(GCE_META.get('instance/attributes/user-data'), debian/patches/lp-1422388-cloudstack-passwords.patch0000664000000000000000000001105312574667652017305 0ustar Description: Backport CloudStack password support. Author: Daniel Watkins Bug-Ubuntu: https://bugs.launchpad.net/bugs/1422388 Last-Update: 2015-02-25 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -27,6 +27,8 @@ import os import time +from six.moves import http_client + from cloudinit import ec2_utils as ec2 from cloudinit import log as logging from cloudinit import sources @@ -38,6 +40,54 @@ LOG = logging.getLogger(__name__) +class CloudStackPasswordServerClient(object): + """ + Implements password fetching from the CloudStack password server. + + http://cloudstack-administration.readthedocs.org/en/latest/templates.html#adding-password-management-to-your-templates + has documentation about the system. This implementation is following that + found at + https://github.com/shankerbalan/cloudstack-scripts/blob/master/cloud-set-guest-password-debian + + The CloudStack password server is, essentially, a broken HTTP + server. It requires us to provide a valid HTTP request (including a + DomU_Request header, which is the meat of the request), but just + writes the text of its response on to the socket, without a status + line or any HTTP headers. This makes HTTP libraries sad, which + explains the screwiness of the implementation of this class. + + This should be fixed in CloudStack by commit + a72f14ea9cb832faaac946b3cf9f56856b50142a in December 2014. + """ + + def __init__(self, virtual_router_address): + self.virtual_router_address = virtual_router_address + + def _do_request(self, domu_request): + # We have to provide a valid HTTP request, but a valid HTTP + # response is not returned. This means that getresponse() chokes, + # so we use the socket directly to read off the response. + # Because we're reading off the socket directly, we can't re-use the + # connection. + conn = http_client.HTTPConnection(self.virtual_router_address, 8080) + try: + conn.request('GET', '', headers={'DomU_Request': domu_request}) + conn.sock.settimeout(30) + output = conn.sock.recv(1024).decode('utf-8').strip() + finally: + conn.close() + return output + + def get_password(self): + password = self._do_request('send_my_password') + if password in ['', 'saved_password']: + return None + if password == 'bad_request': + raise RuntimeError('Error when attempting to fetch root password.') + self._do_request('saved_password') + return password + + class DataSourceCloudStack(sources.DataSource): def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) @@ -45,10 +95,11 @@ # Cloudstack has its metadata/userdata URLs located at # http:///latest/ self.api_ver = 'latest' - vr_addr = get_vr_address() - if not vr_addr: + self.vr_addr = get_vr_address() + if not self.vr_addr: raise RuntimeError("No virtual router found!") - self.metadata_address = "http://%s/" % (vr_addr) + self.metadata_address = "http://%s/" % (self.vr_addr,) + self.cfg = {} def _get_url_settings(self): mcfg = self.ds_cfg @@ -92,6 +143,9 @@ return bool(url) + def get_config_obj(self): + return self.cfg + def get_data(self): seed_ret = {} if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")): @@ -109,6 +163,22 @@ self.metadata_address) LOG.debug("Crawl of metadata service took %s seconds", int(time.time() - start_time)) + password_client = CloudStackPasswordServerClient(self.vr_addr) + try: + set_password = password_client.get_password() + except Exception: + util.logexc(LOG, + 'Failed to fetch password from virtual router %s', + self.vr_addr) + else: + if set_password: + self.cfg = { + 'ssh_pwauth': True, + 'password': set_password, + 'chpasswd': { + 'expire': False, + }, + } return True except Exception: util.logexc(LOG, 'Failed fetching from metadata service %s', debian/patches/lp-1383794-gce-short_name.patch0000664000000000000000000000305112574667652016027 0ustar Description: Use the shortname for GCE GCE FDQN's may exceed 64 characters. A number of programs like Hadoop, Easyrsa, Java, etc., have issues with long hostnames. Author: Ben Howard Bug-Ubuntu: https://bugs.launchpad.net/bugs/1383794 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -115,7 +115,8 @@ return self.metadata['public-keys'] def get_hostname(self, fqdn=False, _resolve_ip=False): - return self.metadata['local-hostname'] + # GCE has long FDQN's and has asked for short hostnames + return self.metadata['local-hostname'].split('.')[0] def get_userdata_raw(self): return self.metadata['user-data'] --- a/tests/unittests/test_datasource/test_gce.py +++ b/tests/unittests/test_datasource/test_gce.py @@ -80,7 +80,8 @@ body=_request_callback) self.ds.get_data() - self.assertEqual(GCE_META.get('instance/hostname'), + shostname = GCE_META.get('instance/hostname').split('.')[0] + self.assertEqual(shostname, self.ds.get_hostname()) self.assertEqual(GCE_META.get('instance/id'), @@ -107,5 +108,5 @@ self.assertEqual(GCE_META_PARTIAL.get('instance/id'), self.ds.get_instance_id()) - self.assertEqual(GCE_META_PARTIAL.get('instance/hostname'), - self.ds.get_hostname()) + shostname = GCE_META_PARTIAL.get('instance/hostname').split('.')[0] + self.assertEqual(shostname, self.ds.get_hostname()) debian/patches/lp-1540965-SmartOS-Add-support-for-Joyent-LX-Brand-Zones.patch0000664000000000000000000006700612655203724023346 0ustar From: Robert C Jennings Date: Tue, 2 Feb 2016 09:21:07 -0600 Subject: SmartOS: Add support for Joyent LX-Brand Zones LX-brand zones on Joyent's SmartOS use a different metadata source (socket file) than the KVM-based SmartOS virtualization (serial port). This patch adds support for recognizing the different flavors of virtualization on SmartOS and setting up a metadata source file object. After the file object is created, the rest of the code for the datasource can remain common. This patch reads the metadata byte-by-byte rather than using readline because we can not perform a readline on the file-like object for the socket as this block indefintely waiting for an EOF that is will not be sent by the host platform. This patch also moves to V2 metadata as it provides checksum validation and makes reading the metadata much more reliable. Author: Robert C Jennings Bug-Ubuntu: https://launchpad.net/bugs/1540965 --- cloudinit/sources/DataSourceSmartOS.py | 373 +++++++++++++++--------- doc/examples/cloud-config-datasources.txt | 7 + tests/unittests/test_datasource/test_smartos.py | 60 +++- 3 files changed, 288 insertions(+), 152 deletions(-) Index: b/cloudinit/sources/DataSourceSmartOS.py =================================================================== --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -20,28 +20,39 @@ # Datasource for provisioning on SmartOS. This works on Joyent # and public/private Clouds using SmartOS. # -# SmartOS hosts use a serial console (/dev/ttyS1) on Linux Guests. +# SmartOS hosts use a serial console (/dev/ttyS1) on KVM Linux Guests # The meta-data is transmitted via key/value pairs made by # requests on the console. For example, to get the hostname, you # would send "GET hostname" on /dev/ttyS1. +# For Linux Guests running in LX-Brand Zones on SmartOS hosts +# a socket (/native/.zonecontrol/metadata.sock) is used instead +# of a serial console. # # Certain behavior is defined by the DataDictionary # http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html # Comments with "@datadictionary" are snippets of the definition import base64 +import binascii +import contextlib +import os +import random +import re +import socket +import stat + +import serial + from cloudinit import log as logging from cloudinit import sources from cloudinit import util -import os -import os.path -import serial LOG = logging.getLogger(__name__) SMARTOS_ATTRIB_MAP = { - #Cloud-init Key : (SmartOS Key, Strip line endings) + # Cloud-init Key : (SmartOS Key, Strip line endings) + 'instance-id': ('sdc:uuid', True), 'local-hostname': ('hostname', True), 'public-keys': ('root_authorized_keys', True), 'user-script': ('user-script', False), @@ -72,6 +83,7 @@ DS_CFG_PATH = ['datasource', DS_NAME] # BUILTIN_DS_CONFIG = { 'serial_device': '/dev/ttyS1', + 'metadata_sockfile': '/native/.zonecontrol/metadata.sock', 'seed_timeout': 60, 'no_base64_decode': ['root_authorized_keys', 'motd_sys_info', @@ -79,6 +91,7 @@ BUILTIN_DS_CONFIG = { 'user-data', 'user-script', 'sdc:datacenter_name', + 'sdc:uuid', ], 'base64_keys': [], 'base64_all': False, @@ -96,21 +109,21 @@ BUILTIN_CLOUD_CONFIG = { 'device': 'ephemeral0'}], } -## builtin vendor-data is a boothook that writes a script into -## /var/lib/cloud/scripts/per-boot. *That* script then handles -## executing the 'operator-script' and 'user-script' files -## that cloud-init writes into /var/lib/cloud/instance/data/ -## if they exist. -## -## This is all very indirect, but its done like this so that at -## some point in the future, perhaps cloud-init wouldn't do it at -## all, but rather the vendor actually provide vendor-data that accomplished -## their desires. (That is the point of vendor-data). -## -## cloud-init does cheat a bit, and write the operator-script and user-script -## itself. It could have the vendor-script do that, but it seems better -## to not require the image to contain a tool (mdata-get) to read those -## keys when we have a perfectly good one inside cloud-init. +# builtin vendor-data is a boothook that writes a script into +# /var/lib/cloud/scripts/per-boot. *That* script then handles +# executing the 'operator-script' and 'user-script' files +# that cloud-init writes into /var/lib/cloud/instance/data/ +# if they exist. +# +# This is all very indirect, but its done like this so that at +# some point in the future, perhaps cloud-init wouldn't do it at +# all, but rather the vendor actually provide vendor-data that accomplished +# their desires. (That is the point of vendor-data). +# +# cloud-init does cheat a bit, and write the operator-script and user-script +# itself. It could have the vendor-script do that, but it seems better +# to not require the image to contain a tool (mdata-get) to read those +# keys when we have a perfectly good one inside cloud-init. BUILTIN_VENDOR_DATA = """\ #cloud-boothook #!/bin/sh @@ -146,17 +159,27 @@ class DataSourceSmartOS(sources.DataSour def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.is_smartdc = None - self.ds_cfg = util.mergemanydict([ self.ds_cfg, util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), BUILTIN_DS_CONFIG]) self.metadata = {} - self.cfg = BUILTIN_CLOUD_CONFIG - self.seed = self.ds_cfg.get("serial_device") - self.seed_timeout = self.ds_cfg.get("serial_timeout") + # SDC LX-Brand Zones lack dmidecode (no /dev/mem) but + # report 'BrandZ virtual linux' as the kernel version + if os.uname()[3].lower() == 'brandz virtual linux': + LOG.debug("Host is SmartOS, guest in Zone") + self.is_smartdc = True + self.smartos_type = 'lx-brand' + self.cfg = {} + self.seed = self.ds_cfg.get("metadata_sockfile") + else: + self.is_smartdc = True + self.smartos_type = 'kvm' + self.seed = self.ds_cfg.get("serial_device") + self.cfg = BUILTIN_CLOUD_CONFIG + self.seed_timeout = self.ds_cfg.get("serial_timeout") self.smartos_no_base64 = self.ds_cfg.get('no_base64_decode') self.b64_keys = self.ds_cfg.get('base64_keys') self.b64_all = self.ds_cfg.get('base64_all') @@ -166,12 +189,50 @@ class DataSourceSmartOS(sources.DataSour root = sources.DataSource.__str__(self) return "%s [seed=%s]" % (root, self.seed) + def _get_seed_file_object(self): + if not self.seed: + raise AttributeError("seed device is not set") + + if self.smartos_type == 'lx-brand': + if not stat.S_ISSOCK(os.stat(self.seed).st_mode): + LOG.debug("Seed %s is not a socket", self.seed) + return None + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + sock.connect(self.seed) + return sock.makefile('rw') + else: + if not stat.S_ISCHR(os.stat(self.seed).st_mode): + LOG.debug("Seed %s is not a character device") + return None + ser = serial.Serial(self.seed, timeout=self.seed_timeout) + if not ser.isOpen(): + raise SystemError("Unable to open %s" % self.seed) + return ser + return None + + def _set_provisioned(self): + '''Mark the instance provisioning state as successful. + + When run in a zone, the host OS will look for /var/svc/provisioning + to be renamed as /var/svc/provision_success. This should be done + after meta-data is successfully retrieved and from this point + the host considers the provision of the zone to be a success and + keeps the zone running. + ''' + + LOG.debug('Instance provisioning state set as successful') + svc_path = '/var/svc' + if os.path.exists('/'.join([svc_path, 'provisioning'])): + os.rename('/'.join([svc_path, 'provisioning']), + '/'.join([svc_path, 'provision_success'])) + def get_data(self): md = {} ud = "" - if not os.path.exists(self.seed): - LOG.debug("Host does not appear to be on SmartOS") + if not device_exists(self.seed): + LOG.debug("No metadata device '%s' found for SmartOS datasource", + self.seed) return False uname_arch = os.uname()[4] @@ -180,29 +241,36 @@ class DataSourceSmartOS(sources.DataSour LOG.debug("Disabling SmartOS datasource on arm (LP: #1243287)") return False - dmi_info = dmi_data() - if dmi_info is False: - LOG.debug("No dmidata utility found") + # SDC KVM instances will provide dmi data, LX-brand does not + if self.smartos_type == 'kvm': + dmi_info = dmi_data() + if dmi_info is False: + LOG.debug("No dmidata utility found") + return False + + system_type = dmi_info + if 'smartdc' not in system_type.lower(): + LOG.debug("Host is not on SmartOS. system_type=%s", + system_type) + return False + LOG.debug("Host is SmartOS, guest in KVM") + + seed_obj = self._get_seed_file_object() + if seed_obj is None: + LOG.debug('Seed file object not found.') return False - - system_uuid, system_type = tuple(dmi_info) - if 'smartdc' not in system_type.lower(): - LOG.debug("Host is not on SmartOS. system_type=%s", system_type) - return False - self.is_smartdc = True - md['instance-id'] = system_uuid - - b64_keys = self.query('base64_keys', strip=True, b64=False) - if b64_keys is not None: - self.b64_keys = [k.strip() for k in str(b64_keys).split(',')] - - b64_all = self.query('base64_all', strip=True, b64=False) - if b64_all is not None: - self.b64_all = util.is_true(b64_all) - - for ci_noun, attribute in SMARTOS_ATTRIB_MAP.iteritems(): - smartos_noun, strip = attribute - md[ci_noun] = self.query(smartos_noun, strip=strip) + with contextlib.closing(seed_obj) as seed: + b64_keys = self.query('base64_keys', seed, strip=True, b64=False) + if b64_keys is not None: + self.b64_keys = [k.strip() for k in str(b64_keys).split(',')] + + b64_all = self.query('base64_all', seed, strip=True, b64=False) + if b64_all is not None: + self.b64_all = util.is_true(b64_all) + + for ci_noun, attribute in SMARTOS_ATTRIB_MAP.iteritems(): + smartos_noun, strip = attribute + md[ci_noun] = self.query(smartos_noun, seed, strip=strip) # @datadictionary: This key may contain a program that is written # to a file in the filesystem of the guest on each boot and then @@ -217,11 +285,12 @@ class DataSourceSmartOS(sources.DataSour user_script = os.path.join(data_d, 'user-script') u_script_l = "%s/user-script" % LEGACY_USER_D write_boot_content(md.get('user-script'), content_f=user_script, - link=u_script_l, shebang=True, mode=0700) + link=u_script_l, shebang=True, mode=0o700) operator_script = os.path.join(data_d, 'operator-script') write_boot_content(md.get('operator-script'), - content_f=operator_script, shebang=False, mode=0700) + content_f=operator_script, shebang=False, + mode=0o700) # @datadictionary: This key has no defined format, but its value # is written to the file /var/db/mdata-user-data on each boot prior @@ -234,7 +303,7 @@ class DataSourceSmartOS(sources.DataSour # Handle the cloud-init regular meta if not md['local-hostname']: - md['local-hostname'] = system_uuid + md['local-hostname'] = md['instance-id'] ud = None if md['user-data']: @@ -251,6 +320,8 @@ class DataSourceSmartOS(sources.DataSour self.metadata = util.mergemanydict([md, self.metadata]) self.userdata_raw = ud self.vendordata_raw = md['vendor-data'] + + self._set_provisioned() return True def device_name_to_device(self, name): @@ -262,120 +333,144 @@ class DataSourceSmartOS(sources.DataSour def get_instance_id(self): return self.metadata['instance-id'] - def query(self, noun, strip=False, default=None, b64=None): + def query(self, noun, seed_file, strip=False, default=None, b64=None): if b64 is None: if noun in self.smartos_no_base64: b64 = False elif self.b64_all or noun in self.b64_keys: b64 = True - return query_data(noun=noun, strip=strip, seed_device=self.seed, - seed_timeout=self.seed_timeout, default=default, - b64=b64) + return self._query_data(noun, seed_file, strip=strip, + default=default, b64=b64) + def _query_data(self, noun, seed_file, strip=False, + default=None, b64=None): + """Makes a request via "GET " + + In the response, the first line is the status, while subsequent + lines are is the value. A blank line with a "." is used to + indicate end of response. + + If the response is expected to be base64 encoded, then set + b64encoded to true. Unfortantely, there is no way to know if + something is 100% encoded, so this method relies on being told + if the data is base64 or not. + """ -def get_serial(seed_device, seed_timeout): - """This is replaced in unit testing, allowing us to replace - serial.Serial with a mocked class. - - The timeout value of 60 seconds should never be hit. The value - is taken from SmartOS own provisioning tools. Since we are reading - each line individually up until the single ".", the transfer is - usually very fast (i.e. microseconds) to get the response. - """ - if not seed_device: - raise AttributeError("seed_device value is not set") + if not noun: + return False - ser = serial.Serial(seed_device, timeout=seed_timeout) - if not ser.isOpen(): - raise SystemError("Unable to open %s" % seed_device) + response = JoyentMetadataClient(seed_file).get_metadata(noun) - return ser + if response is None: + return default + if b64 is None: + b64 = self._query_data('b64-%s' % noun, seed_file, b64=False, + default=False, strip=True) + b64 = util.is_true(b64) + + resp = None + if b64 or strip: + resp = "".join(response).rstrip() + else: + resp = "".join(response) -def query_data(noun, seed_device, seed_timeout, strip=False, default=None, - b64=None): - """Makes a request to via the serial console via "GET " + if b64: + try: + return base64.b64decode(resp) + except TypeError: + LOG.warn("Failed base64 decoding key '%s'", noun) + return resp - In the response, the first line is the status, while subsequent lines - are is the value. A blank line with a "." is used to indicate end of - response. + return resp - If the response is expected to be base64 encoded, then set b64encoded - to true. Unfortantely, there is no way to know if something is 100% - encoded, so this method relies on being told if the data is base64 or - not. - """ - if not noun: - return False +def device_exists(device): + """Symplistic method to determine if the device exists or not""" + return os.path.exists(device) - ser = get_serial(seed_device, seed_timeout) - ser.write("GET %s\n" % noun.rstrip()) - status = str(ser.readline()).rstrip() - response = [] - eom_found = False - - if 'SUCCESS' not in status: - ser.close() - return default - - while not eom_found: - m = ser.readline() - if m.rstrip() == ".": - eom_found = True - else: - response.append(m) - ser.close() +class JoyentMetadataFetchException(Exception): + pass - if b64 is None: - b64 = query_data('b64-%s' % noun, seed_device=seed_device, - seed_timeout=seed_timeout, b64=False, - default=False, strip=True) - b64 = util.is_true(b64) - - resp = None - if b64 or strip: - resp = "".join(response).rstrip() - else: - resp = "".join(response) - if b64: - try: - return base64.b64decode(resp) - except TypeError: - LOG.warn("Failed base64 decoding key '%s'", noun) - return resp +class JoyentMetadataClient(object): + """ + A client implementing v2 of the Joyent Metadata Protocol Specification. - return resp + The full specification can be found at + http://eng.joyent.com/mdata/protocol.html + """ + line_regex = re.compile( + r'V2 (?P\d+) (?P[0-9a-f]+)' + r' (?P(?P[0-9a-f]+) (?PSUCCESS|NOTFOUND)' + r'( (?P.+))?)') + + def __init__(self, metasource): + self.metasource = metasource + + def _checksum(self, body): + return '{0:08x}'.format( + binascii.crc32(body.encode('utf-8')) & 0xffffffff) + + def _get_value_from_frame(self, expected_request_id, frame): + frame_data = self.line_regex.match(frame).groupdict() + if int(frame_data['length']) != len(frame_data['body']): + raise JoyentMetadataFetchException( + 'Incorrect frame length given ({0} != {1}).'.format( + frame_data['length'], len(frame_data['body']))) + expected_checksum = self._checksum(frame_data['body']) + if frame_data['checksum'] != expected_checksum: + raise JoyentMetadataFetchException( + 'Invalid checksum (expected: {0}; got {1}).'.format( + expected_checksum, frame_data['checksum'])) + if frame_data['request_id'] != expected_request_id: + raise JoyentMetadataFetchException( + 'Request ID mismatch (expected: {0}; got {1}).'.format( + expected_request_id, frame_data['request_id'])) + if not frame_data.get('payload', None): + LOG.debug('No value found.') + return None + value = base64.b64decode(frame_data['payload']) + LOG.debug('Value "%s" found.', value) + return value + + def get_metadata(self, metadata_key): + LOG.debug('Fetching metadata key "%s"...', metadata_key) + request_id = '{0:08x}'.format(random.randint(0, 0xffffffff)) + message_body = '{0} GET {1}'.format(request_id, + base64.b64encode(metadata_key)) + msg = 'V2 {0} {1} {2}\n'.format( + len(message_body), self._checksum(message_body), message_body) + LOG.debug('Writing "%s" to metadata transport.', msg) + self.metasource.write(msg.encode('ascii')) + self.metasource.flush() + + response = self.metasource.read(1) + while response[-1] != '\n': + response = ''.join([response, self.metasource.read(1)]) + response = str(response).rstrip() + + if 'SUCCESS' not in response: + return None + + response = response.decode('ascii') + LOG.debug('Read "%s" from metadata transport.', response) + return self._get_value_from_frame(request_id, response) def dmi_data(): - sys_uuid, sys_type = None, None - dmidecode_path = util.which('dmidecode') - if not dmidecode_path: - return False - - sys_uuid_cmd = [dmidecode_path, "-s", "system-uuid"] - try: - LOG.debug("Getting hostname from dmidecode") - (sys_uuid, _err) = util.subp(sys_uuid_cmd) - except Exception as e: - util.logexc(LOG, "Failed to get system UUID", e) - - sys_type_cmd = [dmidecode_path, "-s", "system-product-name"] - try: - LOG.debug("Determining hypervisor product name via dmidecode") - (sys_type, _err) = util.subp(sys_type_cmd) - except Exception as e: - util.logexc(LOG, "Failed to get system UUID", e) + sys_type = util.read_dmi_data("system-product-name") + + if not sys_type: + return None - return (sys_uuid.lower().strip(), sys_type.strip()) + return sys_type def write_boot_content(content, content_f, link=None, shebang=False, - mode=0400): + mode=0o400): """ Write the content to content_f. Under the following rules: 1. If no content, remove the file @@ -417,7 +512,7 @@ def write_boot_content(content, content_ except Exception as e: util.logexc(LOG, ("Failed to identify script type for %s" % - content_f, e)) + content_f, e)) if link: try: Index: b/doc/examples/cloud-config-datasources.txt =================================================================== --- a/doc/examples/cloud-config-datasources.txt +++ b/doc/examples/cloud-config-datasources.txt @@ -51,12 +51,19 @@ datasource: policy: on # [can be 'on', 'off' or 'force'] SmartOS: + # For KVM guests: # Smart OS datasource works over a serial console interacting with # a server on the other end. By default, the second serial console is the # device. SmartOS also uses a serial timeout of 60 seconds. serial_device: /dev/ttyS1 serial_timeout: 60 + # For LX-Brand Zones guests: + # Smart OS datasource works over a socket interacting with + # the host on the other end. By default, the socket file is in + # the native .zoncontrol directory. + metadata_sockfile: /native/.zonecontrol/metadata.sock + # a list of keys that will not be base64 decoded even if base64_all no_base64_decode: ['root_authorized_keys', 'motd_sys_info', 'iptables_disable'] Index: b/tests/unittests/test_datasource/test_smartos.py =================================================================== --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -32,6 +32,12 @@ import re import stat import uuid + +try: + from unittest import mock +except ImportError: + import mock + MOCK_RETURNS = { 'hostname': 'test-host', 'root_authorized_keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname', @@ -41,17 +47,18 @@ MOCK_RETURNS = { 'cloud-init:user-data': '\n'.join(['#!/bin/sh', '/bin/true', '']), 'sdc:datacenter_name': 'somewhere2', 'sdc:operator-script': '\n'.join(['bin/true', '']), + 'sdc:uuid': str(uuid.uuid4()), 'sdc:vendor-data': '\n'.join(['VENDOR_DATA', '']), 'user-data': '\n'.join(['something', '']), 'user-script': '\n'.join(['/bin/true', '']), } -DMI_DATA_RETURN = (str(uuid.uuid4()), 'smartdc') +DMI_DATA_RETURN = 'smartdc' -class MockSerial(object): - """Fake a serial terminal for testing the code that - interfaces with the serial""" +class MockMetaFile(object): + """Fake a metadata file object for testing the code that + reads/writes the metadata source""" port = None @@ -75,6 +82,9 @@ class MockSerial(object): def write(self, line): line = line.replace('GET ', '') self.last = line.rstrip() + self.new = True + self.count = 0 + self.mocked_out = [] def readline(self): if self.new: @@ -140,7 +150,8 @@ class TestSmartOSDataSource(helpers.File ret = apply_patches(patches) self.unapply += ret - def _get_ds(self, sys_cfg=None, ds_cfg=None, mockdata=None, dmi_data=None): + def _get_ds(self, sys_cfg=None, ds_cfg=None, mockdata=None, dmi_data=None, + is_lxbrand=False): mod = DataSourceSmartOS if mockdata is None: @@ -149,16 +160,17 @@ class TestSmartOSDataSource(helpers.File if dmi_data is None: dmi_data = DMI_DATA_RETURN - def _get_serial(*_): - return MockSerial(mockdata) - def _dmi_data(): return dmi_data def _os_uname(): - # LP: #1243287. tests assume this runs, but running test on - # arm would cause them all to fail. - return ('LINUX', 'NODENAME', 'RELEASE', 'VERSION', 'x86_64') + if not is_lxbrand: + # LP: #1243287. tests assume this runs, but running test on + # arm would cause them all to fail. + return ('LINUX', 'NODENAME', 'RELEASE', 'VERSION', 'x86_64') + else: + return ('LINUX', 'NODENAME', 'RELEASE', 'BRANDZ VIRTUAL LINUX', + 'X86_64') if sys_cfg is None: sys_cfg = {} @@ -168,11 +180,17 @@ class TestSmartOSDataSource(helpers.File sys_cfg['datasource']['SmartOS'] = ds_cfg self.apply_patches([(mod, 'LEGACY_USER_D', self.legacy_user_d)]) - self.apply_patches([(mod, 'get_serial', _get_serial)]) self.apply_patches([(mod, 'dmi_data', _dmi_data)]) self.apply_patches([(os, 'uname', _os_uname)]) + self.apply_patches([(mod, 'device_exists', lambda d: True)]) dsrc = mod.DataSourceSmartOS(sys_cfg, distro=None, paths=self.paths) + + def _get_seed_file_object(): + return MockMetaFile(mockdata) + + self.apply_patches([(dsrc, '_get_seed_file_object', + _get_seed_file_object)]) return dsrc def test_seed(self): @@ -180,14 +198,29 @@ class TestSmartOSDataSource(helpers.File dsrc = self._get_ds() ret = dsrc.get_data() self.assertTrue(ret) + self.assertEquals('kvm', dsrc.smartos_type) self.assertEquals('/dev/ttyS1', dsrc.seed) + def test_seed_lxbrand(self): + # default seed should be /dev/ttyS1 + dsrc = self._get_ds(is_lxbrand=True) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals('lx-brand', dsrc.smartos_type) + self.assertEquals('/native/.zonecontrol/metadata.sock', dsrc.seed) + def test_issmartdc(self): dsrc = self._get_ds() ret = dsrc.get_data() self.assertTrue(ret) self.assertTrue(dsrc.is_smartdc) + def test_issmartdc_lxbrand(self): + dsrc = self._get_ds(is_lxbrand=True) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertTrue(dsrc.is_smartdc) + def test_no_base64(self): ds_cfg = {'no_base64_decode': ['test_var1'], 'all_base': True} dsrc = self._get_ds(ds_cfg=ds_cfg) @@ -198,7 +231,8 @@ class TestSmartOSDataSource(helpers.File dsrc = self._get_ds(mockdata=MOCK_RETURNS) ret = dsrc.get_data() self.assertTrue(ret) - self.assertEquals(DMI_DATA_RETURN[0], dsrc.metadata['instance-id']) + self.assertEquals(MOCK_RETURNS['sdc:uuid'], + dsrc.metadata['instance-id']) def test_root_keys(self): dsrc = self._get_ds(mockdata=MOCK_RETURNS) debian/patches/lp-1404311-gce-data_encoding.patch0000664000000000000000000000265012574667652016426 0ustar Author: Ben Howard Bug: https://launchpad.net/bugs/1404311 Applied-Upstream: yes Description: Allow for user-defined encoding on user-data to address GCE's mangling of meta-data. --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -15,6 +15,8 @@ # along with this program. If not, see . +from base64 import b64decode + from cloudinit import log as logging from cloudinit import util from cloudinit import sources @@ -58,6 +60,8 @@ ('local-hostname', 'instance/hostname', True), ('public-keys', 'project/attributes/sshKeys', False), ('user-data', 'instance/attributes/user-data', False), + ('user-data-encoding', 'instance/attributes/user-data-encoding', + False), ] # if we cannot resolve the metadata server, then no point in trying @@ -101,6 +105,14 @@ lines = self.metadata['public-keys'].splitlines() self.metadata['public-keys'] = [self._trim_key(k) for k in lines] + encoding = self.metadata.get('user-data-encoding') + if encoding: + if encoding == 'base64': + self.metadata['user-data'] = b64decode( + self.metadata['user-data']) + else: + LOG.warn('unknown user-data-encoding: %s, ignoring', encoding) + return found @property debian/patches/lp-1375252-1458052-Azure-hostname_password.patch0000664000000000000000000007577512574667652020622 0ustar Description: Backport the 15.10 Azure Datasource Backport of 15.10 Azure Datasource to fix various issues: - Azure Datasource writes user password in plain text (LP: #1458052). - Hostname not preserved across Azure reboots (LP: #1375252). Author: Ben Howard Bug-Ubuntu: https://bugs.launchpad.net/bugs/1375252 Bug-Ubuntu: https://bugs.launchpad.net/bugs/1458052 Forwarded: yes --- cloud-init-0.7.6~bzr1022.orig/cloudinit/sources/DataSourceAzure.py +++ cloud-init-0.7.6~bzr1022/cloudinit/sources/DataSourceAzure.py @@ -17,17 +17,22 @@ # along with this program. If not, see . import base64 +import contextlib import crypt import fnmatch import os import os.path import time +import xml.etree.ElementTree as ET + from xml.dom import minidom from cloudinit import log as logging from cloudinit.settings import PER_ALWAYS from cloudinit import sources from cloudinit import util +from cloudinit.sources.helpers.azure import ( + get_metadata_from_fabric, iid_from_shared_config_content) LOG = logging.getLogger(__name__) @@ -65,6 +70,40 @@ BUILTIN_CLOUD_CONFIG = { DS_CFG_PATH = ['datasource', DS_NAME] DEF_EPHEMERAL_LABEL = 'Temporary Storage' +# The redacted password fails to meet password complexity requirements +# so we can safely use this to mask/redact the password in the ovf-env.xml +DEF_PASSWD_REDACTION = 'REDACTED' + + +def get_hostname(hostname_command='hostname'): + return util.subp(hostname_command, capture=True)[0].strip() + + +def set_hostname(hostname, hostname_command='hostname'): + util.subp([hostname_command, hostname]) + + +@contextlib.contextmanager +def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'): + """ + Set a temporary hostname, restoring the previous hostname on exit. + + Will have the value of the previous hostname when used as a context + manager, or None if the hostname was not changed. + """ + policy = cfg['hostname_bounce']['policy'] + previous_hostname = get_hostname(hostname_command) + if (not util.is_true(cfg.get('set_hostname')) + or util.is_false(policy) + or (previous_hostname == temp_hostname and policy != 'force')): + yield None + return + set_hostname(temp_hostname, hostname_command) + try: + yield previous_hostname + finally: + set_hostname(previous_hostname, hostname_command) + class DataSourceAzureNet(sources.DataSource): def __init__(self, sys_cfg, distro, paths): @@ -80,6 +119,56 @@ class DataSourceAzureNet(sources.DataSou root = sources.DataSource.__str__(self) return "%s [seed=%s]" % (root, self.seed) + def get_metadata_from_agent(self): + temp_hostname = self.metadata.get('local-hostname') + hostname_command = self.ds_cfg['hostname_bounce']['hostname_command'] + with temporary_hostname(temp_hostname, self.ds_cfg, + hostname_command=hostname_command) \ + as previous_hostname: + if (previous_hostname is not None + and util.is_true(self.ds_cfg.get('set_hostname'))): + cfg = self.ds_cfg['hostname_bounce'] + try: + perform_hostname_bounce(hostname=temp_hostname, + cfg=cfg, + prev_hostname=previous_hostname) + except Exception as e: + LOG.warn("Failed publishing hostname: %s", e) + util.logexc(LOG, "handling set_hostname failed") + + try: + invoke_agent(self.ds_cfg['agent_command']) + except util.ProcessExecutionError: + # claim the datasource even if the command failed + util.logexc(LOG, "agent command '%s' failed.", + self.ds_cfg['agent_command']) + + ddir = self.ds_cfg['data_dir'] + shcfgxml = os.path.join(ddir, "SharedConfig.xml") + wait_for = [shcfgxml] + + fp_files = [] + for pk in self.cfg.get('_pubkeys', []): + bname = str(pk['fingerprint'] + ".crt") + fp_files += [os.path.join(ddir, bname)] + + missing = util.log_time(logfunc=LOG.debug, msg="waiting for files", + func=wait_for_files, + args=(wait_for + fp_files,)) + if len(missing): + LOG.warn("Did not find files, but going on: %s", missing) + + metadata = {} + if shcfgxml in missing: + LOG.warn("SharedConfig.xml missing, using static instance-id") + else: + try: + metadata['instance-id'] = iid_from_shared_config(shcfgxml) + except ValueError as e: + LOG.warn("failed to get instance id in %s: %s", shcfgxml, e) + metadata['public-keys'] = pubkeys_from_crt_files(fp_files) + return metadata + def get_data(self): # azure removes/ejects the cdrom containing the ovf-env.xml # file on reboot. So, in order to successfully reboot we @@ -131,8 +220,6 @@ class DataSourceAzureNet(sources.DataSou # now update ds_cfg to reflect contents pass in config user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg]) - mycfg = self.ds_cfg - ddir = mycfg['data_dir'] if found != ddir: cached_ovfenv = util.load_file( @@ -151,48 +238,20 @@ class DataSourceAzureNet(sources.DataSou # walinux agent writes files world readable, but expects # the directory to be protected. - write_files(ddir, files, dirmode=0700) - - # handle the hostname 'publishing' - try: - handle_set_hostname(mycfg.get('set_hostname'), - self.metadata.get('local-hostname'), - mycfg['hostname_bounce']) - except Exception as e: - LOG.warn("Failed publishing hostname: %s", e) - util.logexc(LOG, "handling set_hostname failed") + write_files(ddir, files, dirmode=0o700) - try: - invoke_agent(mycfg['agent_command']) - except util.ProcessExecutionError: - # claim the datasource even if the command failed - util.logexc(LOG, "agent command '%s' failed.", - mycfg['agent_command']) - - shcfgxml = os.path.join(ddir, "SharedConfig.xml") - wait_for = [shcfgxml] - - fp_files = [] - for pk in self.cfg.get('_pubkeys', []): - bname = str(pk['fingerprint'] + ".crt") - fp_files += [os.path.join(ddir, bname)] - - missing = util.log_time(logfunc=LOG.debug, msg="waiting for files", - func=wait_for_files, - args=(wait_for + fp_files,)) - if len(missing): - LOG.warn("Did not find files, but going on: %s", missing) - - if shcfgxml in missing: - LOG.warn("SharedConfig.xml missing, using static instance-id") + if self.ds_cfg['agent_command'] == '__builtin__': + metadata_func = get_metadata_from_fabric else: - try: - self.metadata['instance-id'] = iid_from_shared_config(shcfgxml) - except ValueError as e: - LOG.warn("failed to get instance id in %s: %s", shcfgxml, e) + metadata_func = self.get_metadata_from_agent + try: + fabric_data = metadata_func() + except Exception as exc: + LOG.info("Error communicating with Azure fabric; assume we aren't" + " on Azure.", exc_info=True) + return False - pubkeys = pubkeys_from_crt_files(fp_files) - self.metadata['public-keys'] = pubkeys + self.metadata.update(fabric_data) found_ephemeral = find_ephemeral_disk() if found_ephemeral: @@ -298,39 +357,15 @@ def support_new_ephemeral(cfg): return mod_list -def handle_set_hostname(enabled, hostname, cfg): - if not util.is_true(enabled): - return - - if not hostname: - LOG.warn("set_hostname was true but no local-hostname") - return - - apply_hostname_bounce(hostname=hostname, policy=cfg['policy'], - interface=cfg['interface'], - command=cfg['command'], - hostname_command=cfg['hostname_command']) - - -def apply_hostname_bounce(hostname, policy, interface, command, - hostname_command="hostname"): +def perform_hostname_bounce(hostname, cfg, prev_hostname): # set the hostname to 'hostname' if it is not already set to that. # then, if policy is not off, bounce the interface using command - prev_hostname = util.subp(hostname_command, capture=True)[0].strip() - - util.subp([hostname_command, hostname]) - - msg = ("phostname=%s hostname=%s policy=%s interface=%s" % - (prev_hostname, hostname, policy, interface)) - - if util.is_false(policy): - LOG.debug("pubhname: policy false, skipping [%s]", msg) - return - - if prev_hostname == hostname and policy != "force": - LOG.debug("pubhname: no change, policy != force. skipping. [%s]", msg) - return + command = cfg['command'] + interface = cfg['interface'] + policy = cfg['policy'] + msg = ("hostname=%s policy=%s interface=%s" % + (hostname, policy, interface)) env = os.environ.copy() env['interface'] = interface env['hostname'] = hostname @@ -343,15 +378,16 @@ def apply_hostname_bounce(hostname, poli shell = not isinstance(command, (list, tuple)) # capture=False, see comments in bug 1202758 and bug 1206164. util.log_time(logfunc=LOG.debug, msg="publishing hostname", - get_uptime=True, func=util.subp, - kwargs={'args': command, 'shell': shell, 'capture': False, - 'env': env}) + get_uptime=True, func=util.subp, + kwargs={'args': command, 'shell': shell, 'capture': False, + 'env': env}) -def crtfile_to_pubkey(fname): +def crtfile_to_pubkey(fname, data=None): pipeline = ('openssl x509 -noout -pubkey < "$0" |' 'ssh-keygen -i -m PKCS8 -f /dev/stdin') - (out, _err) = util.subp(['sh', '-c', pipeline, fname], capture=True) + (out, _err) = util.subp(['sh', '-c', pipeline, fname], + capture=True, data=data) return out.rstrip() @@ -383,14 +419,30 @@ def wait_for_files(flist, maxwait=60, na def write_files(datadir, files, dirmode=None): + + def _redact_password(cnt, fname): + """Azure provides the UserPassword in plain text. So we redact it""" + try: + root = ET.fromstring(cnt) + for elem in root.iter(): + if ('UserPassword' in elem.tag and + elem.text != DEF_PASSWD_REDACTION): + elem.text = DEF_PASSWD_REDACTION + return ET.tostring(root) + except Exception as e: + LOG.critical("failed to redact userpassword in {}".format(fname)) + return cnt + if not datadir: return if not files: files = {} util.ensure_dir(datadir, dirmode) for (name, content) in files.items(): - util.write_file(filename=os.path.join(datadir, name), - content=content, mode=0600) + fname = os.path.join(datadir, name) + if 'ovf-env.xml' in name: + content = _redact_password(content, fname) + util.write_file(filename=fname, content=content, mode=0o600) def invoke_agent(cmd): @@ -461,20 +513,6 @@ def load_azure_ovf_pubkeys(sshnode): return found -def single_node_at_path(node, pathlist): - curnode = node - for tok in pathlist: - results = find_child(curnode, lambda n: n.localName == tok) - if len(results) == 0: - raise ValueError("missing %s token in %s" % (tok, str(pathlist))) - if len(results) > 1: - raise ValueError("found %s nodes of type %s looking for %s" % - (len(results), tok, str(pathlist))) - curnode = results[0] - - return curnode - - def read_azure_ovf(contents): try: dom = minidom.parseString(contents) @@ -559,7 +597,7 @@ def read_azure_ovf(contents): defuser = {} if username: defuser['name'] = username - if password: + if password and DEF_PASSWD_REDACTION != password: defuser['passwd'] = encrypt_pass(password) defuser['lock_passwd'] = False @@ -592,7 +630,7 @@ def load_azure_ds_dir(source_dir): if not os.path.isfile(ovf_file): raise NonAzureDataSource("No ovf-env file found") - with open(ovf_file, "r") as fp: + with open(ovf_file, "rb") as fp: contents = fp.read() md, ud, cfg = read_azure_ovf(contents) @@ -605,19 +643,6 @@ def iid_from_shared_config(path): return iid_from_shared_config_content(content) -def iid_from_shared_config_content(content): - """ - find INSTANCE_ID in: - - - - - """ - dom = minidom.parseString(content) - depnode = single_node_at_path(dom, ["SharedConfig", "Deployment"]) - return depnode.attributes.get('name').value - - class BrokenAzureDataSource(Exception): pass --- /dev/null +++ cloud-init-0.7.6~bzr1022/cloudinit/sources/helpers/azure.py @@ -0,0 +1,293 @@ +import logging +import os +import re +import socket +import struct +import tempfile +import time +from contextlib import contextmanager +from xml.etree import ElementTree + +from cloudinit import util + + +LOG = logging.getLogger(__name__) + + +@contextmanager +def cd(newdir): + prevdir = os.getcwd() + os.chdir(os.path.expanduser(newdir)) + try: + yield + finally: + os.chdir(prevdir) + + +class AzureEndpointHttpClient(object): + + headers = { + 'x-ms-agent-name': 'WALinuxAgent', + 'x-ms-version': '2012-11-30', + } + + def __init__(self, certificate): + self.extra_secure_headers = { + "x-ms-cipher-name": "DES_EDE3_CBC", + "x-ms-guest-agent-public-x509-cert": certificate, + } + + def get(self, url, secure=False): + headers = self.headers + if secure: + headers = self.headers.copy() + headers.update(self.extra_secure_headers) + return util.read_file_or_url(url, headers=headers) + + def post(self, url, data=None, extra_headers=None): + headers = self.headers + if extra_headers is not None: + headers = self.headers.copy() + headers.update(extra_headers) + return util.read_file_or_url(url, data=data, headers=headers) + + +class GoalState(object): + + def __init__(self, xml, http_client): + self.http_client = http_client + self.root = ElementTree.fromstring(xml) + self._certificates_xml = None + + def _text_from_xpath(self, xpath): + element = self.root.find(xpath) + if element is not None: + return element.text + return None + + @property + def container_id(self): + return self._text_from_xpath('./Container/ContainerId') + + @property + def incarnation(self): + return self._text_from_xpath('./Incarnation') + + @property + def instance_id(self): + return self._text_from_xpath( + './Container/RoleInstanceList/RoleInstance/InstanceId') + + @property + def shared_config_xml(self): + url = self._text_from_xpath('./Container/RoleInstanceList/RoleInstance' + '/Configuration/SharedConfig') + return self.http_client.get(url).contents + + @property + def certificates_xml(self): + if self._certificates_xml is None: + url = self._text_from_xpath( + './Container/RoleInstanceList/RoleInstance' + '/Configuration/Certificates') + if url is not None: + self._certificates_xml = self.http_client.get( + url, secure=True).contents + return self._certificates_xml + + +class OpenSSLManager(object): + + certificate_names = { + 'private_key': 'TransportPrivate.pem', + 'certificate': 'TransportCert.pem', + } + + def __init__(self): + self.tmpdir = tempfile.mkdtemp() + self.certificate = None + self.generate_certificate() + + def clean_up(self): + util.del_dir(self.tmpdir) + + def generate_certificate(self): + LOG.debug('Generating certificate for communication with fabric...') + if self.certificate is not None: + LOG.debug('Certificate already generated.') + return + with cd(self.tmpdir): + util.subp([ + 'openssl', 'req', '-x509', '-nodes', '-subj', + '/CN=LinuxTransport', '-days', '32768', '-newkey', 'rsa:2048', + '-keyout', self.certificate_names['private_key'], + '-out', self.certificate_names['certificate'], + ]) + certificate = '' + for line in open(self.certificate_names['certificate']): + if "CERTIFICATE" not in line: + certificate += line.rstrip() + self.certificate = certificate + LOG.debug('New certificate generated.') + + def parse_certificates(self, certificates_xml): + tag = ElementTree.fromstring(certificates_xml).find( + './/Data') + certificates_content = tag.text + lines = [ + b'MIME-Version: 1.0', + b'Content-Disposition: attachment; filename="Certificates.p7m"', + b'Content-Type: application/x-pkcs7-mime; name="Certificates.p7m"', + b'Content-Transfer-Encoding: base64', + b'', + certificates_content.encode('utf-8'), + ] + with cd(self.tmpdir): + with open('Certificates.p7m', 'wb') as f: + f.write(b'\n'.join(lines)) + out, _ = util.subp( + 'openssl cms -decrypt -in Certificates.p7m -inkey' + ' {private_key} -recip {certificate} | openssl pkcs12 -nodes' + ' -password pass:'.format(**self.certificate_names), + shell=True) + private_keys, certificates = [], [] + current = [] + for line in out.splitlines(): + current.append(line) + if re.match(r'[-]+END .*?KEY[-]+$', line): + private_keys.append('\n'.join(current)) + current = [] + elif re.match(r'[-]+END .*?CERTIFICATE[-]+$', line): + certificates.append('\n'.join(current)) + current = [] + keys = [] + for certificate in certificates: + with cd(self.tmpdir): + public_key, _ = util.subp( + 'openssl x509 -noout -pubkey |' + 'ssh-keygen -i -m PKCS8 -f /dev/stdin', + data=certificate, + shell=True) + keys.append(public_key) + return keys + + +def iid_from_shared_config_content(content): + """ + find INSTANCE_ID in: + + + + + """ + root = ElementTree.fromstring(content) + depnode = root.find('Deployment') + return depnode.get('name') + + +class WALinuxAgentShim(object): + + REPORT_READY_XML_TEMPLATE = '\n'.join([ + '', + '', + ' {incarnation}', + ' ', + ' {container_id}', + ' ', + ' ', + ' {instance_id}', + ' ', + ' Ready', + ' ', + ' ', + ' ', + ' ', + '']) + + def __init__(self): + LOG.debug('WALinuxAgentShim instantiated...') + self.endpoint = self.find_endpoint() + self.openssl_manager = None + self.values = {} + + def clean_up(self): + if self.openssl_manager is not None: + self.openssl_manager.clean_up() + + @staticmethod + def find_endpoint(): + LOG.debug('Finding Azure endpoint...') + content = util.load_file('/var/lib/dhcp/dhclient.eth0.leases') + value = None + for line in content.splitlines(): + if 'unknown-245' in line: + value = line.strip(' ').split(' ', 2)[-1].strip(';\n"') + if value is None: + raise Exception('No endpoint found in DHCP config.') + if ':' in value: + hex_string = '' + for hex_pair in value.split(':'): + if len(hex_pair) == 1: + hex_pair = '0' + hex_pair + hex_string += hex_pair + value = struct.pack('>L', int(hex_string.replace(':', ''), 16)) + else: + value = value.encode('utf-8') + endpoint_ip_address = socket.inet_ntoa(value) + LOG.debug('Azure endpoint found at %s', endpoint_ip_address) + return endpoint_ip_address + + def register_with_azure_and_fetch_data(self): + self.openssl_manager = OpenSSLManager() + http_client = AzureEndpointHttpClient(self.openssl_manager.certificate) + LOG.info('Registering with Azure...') + attempts = 0 + while True: + try: + response = http_client.get( + 'http://{0}/machine/?comp=goalstate'.format(self.endpoint)) + except Exception: + if attempts < 10: + time.sleep(attempts + 1) + else: + raise + else: + break + attempts += 1 + LOG.debug('Successfully fetched GoalState XML.') + goal_state = GoalState(response.contents, http_client) + public_keys = [] + if goal_state.certificates_xml is not None: + LOG.debug('Certificate XML found; parsing out public keys.') + public_keys = self.openssl_manager.parse_certificates( + goal_state.certificates_xml) + data = { + 'instance-id': iid_from_shared_config_content( + goal_state.shared_config_xml), + 'public-keys': public_keys, + } + self._report_ready(goal_state, http_client) + return data + + def _report_ready(self, goal_state, http_client): + LOG.debug('Reporting ready to Azure fabric.') + document = self.REPORT_READY_XML_TEMPLATE.format( + incarnation=goal_state.incarnation, + container_id=goal_state.container_id, + instance_id=goal_state.instance_id, + ) + http_client.post( + "http://{0}/machine?comp=health".format(self.endpoint), + data=document, + extra_headers={'Content-Type': 'text/xml; charset=utf-8'}, + ) + LOG.info('Reported ready to Azure fabric.') + + +def get_metadata_from_fabric(): + shim = WALinuxAgentShim() + try: + return shim.register_with_azure_and_fetch_data() + finally: + shim.clean_up() --- cloud-init-0.7.6~bzr1022.orig/tests/unittests/test_datasource/test_azure.py +++ cloud-init-0.7.6~bzr1022/tests/unittests/test_datasource/test_azure.py @@ -9,6 +9,20 @@ from mocker import MockerTestCase import os import stat import yaml +import xml.etree.ElementTree as ET + +OVERRIDE_BUILTIN_DS_CONFIG = { + 'agent_command': ['bin/true'], + 'data_dir': "/var/lib/waagent", + 'set_hostname': True, + 'hostname_bounce': { + 'interface': 'eth0', + 'policy': False, + 'command': '/bin/true', + 'hostname_command': '/bin/true', + }, + 'disk_aliases': {'ephemeral0': '/dev/sdb'}, +} def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None): @@ -107,14 +121,13 @@ class TestAzureDataSource(MockerTestCase data['iid_from_shared_cfg'] = path return 'i-my-azure-id' - def _apply_hostname_bounce(**kwargs): - data['apply_hostname_bounce'] = kwargs - if data.get('ovfcontent') is not None: populate_dir(os.path.join(self.paths.seed_dir, "azure"), {'ovf-env.xml': data['ovfcontent']}) + mod = DataSourceAzure + mod.BUILTIN_DS_CONFIG = OVERRIDE_BUILTIN_DS_CONFIG mod.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d self.apply_patches([(mod, 'list_possible_azure_ds_devs', dsdevs)]) @@ -124,15 +137,46 @@ class TestAzureDataSource(MockerTestCase (mod, 'pubkeys_from_crt_files', _pubkeys_from_crt_files), (mod, 'iid_from_shared_config', - _iid_from_shared_config), - (mod, 'apply_hostname_bounce', - _apply_hostname_bounce), ]) + _iid_from_shared_config)]) dsrc = mod.DataSourceAzureNet( data.get('sys_cfg', {}), distro=None, paths=self.paths) return dsrc + def xml_equals(self, oxml, nxml): + """Compare two sets of XML to make sure they are equal""" + + def create_tag_index(xml): + et = ET.fromstring(xml) + ret = {} + for x in et.iter(): + ret[x.tag] = x + return ret + + def tags_exists(x, y): + for tag in x.keys(): + self.assertIn(tag, y) + for tag in y.keys(): + self.assertIn(tag, x) + + def tags_equal(x, y): + for x_tag, x_val in x.items(): + y_val = y.get(x_val.tag) + self.assertEquals(x_val.text, y_val.text) + + old_cnt = create_tag_index(oxml) + new_cnt = create_tag_index(nxml) + tags_exists(old_cnt, new_cnt) + tags_equal(old_cnt, new_cnt) + + def xml_notequals(self, oxml, nxml): + try: + self.xml_equals(oxml, nxml) + except AssertionError as e: + return + raise AssertionError("XML is the same") + def test_basic_seed_dir(self): odata = {'HostName': "myhost", 'UserName': "myuser"} data = {'ovfcontent': construct_valid_ovf_env(data=odata), @@ -258,44 +302,6 @@ class TestAzureDataSource(MockerTestCase def test_disabled_bounce(self): pass - def test_apply_bounce_call_1(self): - # hostname needs to get through to apply_hostname_bounce - odata = {'HostName': 'my-random-hostname'} - data = {'ovfcontent': construct_valid_ovf_env(data=odata)} - - self._get_ds(data).get_data() - self.assertIn('hostname', data['apply_hostname_bounce']) - self.assertEqual(data['apply_hostname_bounce']['hostname'], - odata['HostName']) - - def test_apply_bounce_call_configurable(self): - # hostname_bounce should be configurable in datasource cfg - cfg = {'hostname_bounce': {'interface': 'eth1', 'policy': 'off', - 'command': 'my-bounce-command', - 'hostname_command': 'my-hostname-command'}} - odata = {'HostName': "xhost", - 'dscfg': {'text': base64.b64encode(yaml.dump(cfg)), - 'encoding': 'base64'}} - data = {'ovfcontent': construct_valid_ovf_env(data=odata)} - self._get_ds(data).get_data() - - for k in cfg['hostname_bounce']: - self.assertIn(k, data['apply_hostname_bounce']) - - for k, v in cfg['hostname_bounce'].items(): - self.assertEqual(data['apply_hostname_bounce'][k], v) - - def test_set_hostname_disabled(self): - # config specifying set_hostname off should not bounce - cfg = {'set_hostname': False} - odata = {'HostName': "xhost", - 'dscfg': {'text': base64.b64encode(yaml.dump(cfg)), - 'encoding': 'base64'}} - data = {'ovfcontent': construct_valid_ovf_env(data=odata)} - self._get_ds(data).get_data() - - self.assertEqual(data.get('apply_hostname_bounce', "N/A"), "N/A") - def test_default_ephemeral(self): # make sure the ephemeral device works odata = {} @@ -342,6 +348,31 @@ class TestAzureDataSource(MockerTestCase self.assertEqual(userdata, dsrc.userdata_raw) + def test_password_redacted_in_ovf(self): + odata = {'HostName': "myhost", 'UserName': "myuser", + 'UserPassword': "mypass"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + dsrc = self._get_ds(data) + ret = dsrc.get_data() + + self.assertTrue(ret) + ovf_env_path = os.path.join(self.waagent_d, 'ovf-env.xml') + + # The XML should not be same since the user password is redacted + on_disk_ovf = load_file(ovf_env_path) + self.xml_notequals(data['ovfcontent'], on_disk_ovf) + + # Make sure that the redacted password on disk is not used by CI + self.assertNotEquals(dsrc.cfg.get('password'), + DataSourceAzure.DEF_PASSWD_REDACTION) + + # Make sure that the password was really encrypted + et = ET.fromstring(on_disk_ovf) + for elem in et.iter(): + if 'UserPassword' in elem.tag: + self.assertEquals(DataSourceAzure.DEF_PASSWD_REDACTION, + elem.text) + def test_ovf_env_arrives_in_waagent_dir(self): xml = construct_valid_ovf_env(data={}, userdata="FOODATA") dsrc = self._get_ds({'ovfcontent': xml}) @@ -351,7 +382,7 @@ class TestAzureDataSource(MockerTestCase # we expect that the ovf-env.xml file is copied there. ovf_env_path = os.path.join(self.waagent_d, 'ovf-env.xml') self.assertTrue(os.path.exists(ovf_env_path)) - self.assertEqual(xml, load_file(ovf_env_path)) + self.xml_equals(xml, load_file(ovf_env_path)) def test_existing_ovf_same(self): # waagent/SharedConfig left alone if found ovf-env.xml same as cached @@ -398,9 +429,8 @@ class TestAzureDataSource(MockerTestCase os.path.exists(os.path.join(self.waagent_d, 'SharedConfig.xml'))) self.assertTrue( os.path.exists(os.path.join(self.waagent_d, 'ovf-env.xml'))) - self.assertEqual(new_ovfenv, - load_file(os.path.join(self.waagent_d, 'ovf-env.xml'))) - + new_xml = load_file(os.path.join(self.waagent_d, 'ovf-env.xml')) + self.xml_equals(new_ovfenv, new_xml) class TestReadAzureOvf(MockerTestCase): def test_invalid_xml_raises_non_azure_ds(self): debian/patches/lp-1469260-fix-consumption-of-vendor-data.patch0000664000000000000000000001312312574667652021074 0ustar Author: Scott Moser Bug: https://bugs.launchpad.net/cloud-init/+bug/1469260 Applied-Upstream: yes Origin: http://bazaar.launchpad.net/~cloud-init-dev/cloud-init/trunk/revision/1013 Description: OpenStack: fix consumption of vendor-data to allow namespacing . Not all vendor data is destined for cloud-init. This sanely reads the vendor data as a dict, array or a string. --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -125,7 +125,14 @@ self.userdata_raw = results.get('userdata') self.version = results['version'] self.files.update(results.get('files', {})) - self.vendordata_raw = results.get('vendordata') + vd = results.get('vendordata') + self.vendordata_pure = vd + try: + self.vendordata_raw = openstack.convert_vendordata_json(vd) + except ValueError as e: + LOG.warn("Invalid content in vendor-data: %s", e) + self.vendordata_raw = None + return True --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -143,13 +143,13 @@ self.version = results['version'] self.files.update(results.get('files', {})) - # if vendordata includes 'cloud-init', then read that explicitly - # for cloud-init (for namespacing). vd = results.get('vendordata') - if isinstance(vd, dict) and 'cloud-init' in vd: - self.vendordata_raw = vd['cloud-init'] - else: - self.vendordata_raw = vd + self.vendordata_pure = vd + try: + self.vendordata_raw = openstack.convert_vendordata_json(vd) + except ValueError as e: + LOG.warn("Invalid content in vendor-data: %s", e) + self.vendordata_raw = None return True --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -21,6 +21,7 @@ import abc import base64 import copy +import functools import os from cloudinit import ec2_utils @@ -196,6 +197,9 @@ If not a valid location, raise a NonReadable exception. """ + load_json_anytype = functools.partial( + util.load_json, root_types=(dict, basestring, list)) + def datafiles(version): files = {} files['metadata'] = ( @@ -214,7 +218,7 @@ files['vendordata'] = ( self._path_join("openstack", version, 'vendor_data.json'), False, - util.load_json, + load_json_anytype, ) return files @@ -437,3 +441,28 @@ return ec2_utils.get_instance_metadata(ssl_details=self.ssl_details, timeout=self.timeout, retries=self.retries) + + +def convert_vendordata_json(data, recurse=True): + """ data: a loaded json *object* (strings, arrays, dicts). + return something suitable for cloudinit vendordata_raw. + + if data is: + None: return None + string: return string + list: return data + the list is then processed in UserDataProcessor + dict: return convert_vendordata_json(data.get('cloud-init')) + """ + if not data: + return None + if isinstance(data, (str, unicode, basestring)): + return data + if isinstance(data, list): + return copy.deepcopy(data) + if isinstance(data, dict): + if recurse is True: + return convert_vendordata_json(data.get('cloud-init'), + recurse=False) + raise ValueError("vendordata['cloud-init'] cannot be dict") + raise ValueError("Unknown data type for vendordata: %s" % type(data)) --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -19,6 +19,7 @@ import copy import json import re +import unittest from StringIO import StringIO @@ -241,7 +242,8 @@ self.assertEquals(EC2_META, ds_os.ec2_metadata) self.assertEquals(USER_DATA, ds_os.userdata_raw) self.assertEquals(2, len(ds_os.files)) - self.assertEquals(VENDOR_DATA, ds_os.vendordata_raw) + self.assertEquals(VENDOR_DATA, ds_os.vendordata_pure) + self.assertEquals(ds_os.vendordata_raw, None) @hp.activate def test_bad_datasource_meta(self): @@ -299,3 +301,34 @@ found = ds_os.get_data() self.assertFalse(found) self.assertIsNone(ds_os.version) + + +class TestVendorDataLoading(unittest.TestCase): + def cvj(self, data): + return openstack.convert_vendordata_json(data) + + def test_vd_load_none(self): + # non-existant vendor-data should return none + self.assertIsNone(self.cvj(None)) + + def test_vd_load_string(self): + self.assertEqual(self.cvj("foobar"), "foobar") + + def test_vd_load_list(self): + data = [{'foo': 'bar'}, 'mystring', list(['another', 'list'])] + self.assertEqual(self.cvj(data), data) + + def test_vd_load_dict_no_ci(self): + self.assertEqual(self.cvj({'foo': 'bar'}), None) + + def test_vd_load_dict_ci_dict(self): + self.assertRaises(ValueError, self.cvj, + {'foo': 'bar', 'cloud-init': {'x': 1}}) + + def test_vd_load_dict_ci_string(self): + data = {'foo': 'bar', 'cloud-init': 'VENDOR_DATA'} + self.assertEqual(self.cvj(data), data['cloud-init']) + + def test_vd_load_dict_ci_list(self): + data = {'foo': 'bar', 'cloud-init': ['VD_1', 'VD_2']} + self.assertEqual(self.cvj(data), data['cloud-init']) debian/patches/lp-1356855-fix-cloudstack-metadata.patch0000664000000000000000000000177512574667652017643 0ustar Description: Backport CloudStack metadata fix. CloudStack requires a trailing slash for its EC2 "compatible" metadata service. Author: Daniel Watkins Bug-Ubuntu: https://bugs.launchpad.net/bugs/1356855 Last-Update: 2015-02-25 --- cloud-init-0.7.5.orig/cloudinit/ec2_utils.py +++ cloud-init-0.7.5/cloudinit/ec2_utils.py @@ -166,7 +166,9 @@ def get_instance_metadata(api_version='l metadata_address='http://169.254.169.254', ssl_details=None, timeout=5, retries=5): md_url = url_helper.combine_url(metadata_address, api_version) - md_url = url_helper.combine_url(md_url, 'meta-data') + # Note, 'meta-data' explicitly has trailing /. + # this is required for CloudStack (LP: #1356855) + md_url = url_helper.combine_url(md_url, 'meta-data/') caller = functools.partial(util.read_file_or_url, ssl_details=ssl_details, timeout=timeout, retries=retries) debian/patches/lp-1461242-generate-ed25519-host-keys.patch0000664000000000000000000001144012574667710017623 0ustar Author: Scott Moser Bug: https://launchpad.net/bugs/1461242 Applied-Upstream: yes Origin: http://bazaar.launchpad.net/~cloud-init-dev/cloud-init/trunk/revision/1125 Description: ssh: generate ed25519 host keys if supported . now we attempt to generate ed25519 host keys. If ssh-keygen does not support it, a debug log message will be written. === modified file 'cloudinit/config/cc_ssh.py' --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -20,6 +20,7 @@ import glob import os +import sys # Ensure this is aliased to a name not 'distros' # since the module attribute 'distros' @@ -33,26 +34,18 @@ DISABLE_ROOT_OPTS = ("no-port-forwarding "no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\" " "rather than the user \\\"root\\\".\';echo;sleep 10\"") -KEY_2_FILE = { - "rsa_private": ("/etc/ssh/ssh_host_rsa_key", 0600), - "rsa_public": ("/etc/ssh/ssh_host_rsa_key.pub", 0644), - "dsa_private": ("/etc/ssh/ssh_host_dsa_key", 0600), - "dsa_public": ("/etc/ssh/ssh_host_dsa_key.pub", 0644), - "ecdsa_private": ("/etc/ssh/ssh_host_ecdsa_key", 0600), - "ecdsa_public": ("/etc/ssh/ssh_host_ecdsa_key.pub", 0644), -} - -PRIV_2_PUB = { - 'rsa_private': 'rsa_public', - 'dsa_private': 'dsa_public', - 'ecdsa_private': 'ecdsa_public', -} - -KEY_GEN_TPL = 'o=$(ssh-keygen -yf "%s") && echo "$o" root@localhost > "%s"' +GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa', 'ed25519'] +KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key' -GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa'] +CONFIG_KEY_TO_FILE = {} +PRIV_TO_PUB = {} +for k in GENERATE_KEY_NAMES: + CONFIG_KEY_TO_FILE.update({"%s_private" % k: (KEY_FILE_TPL % k, 0o600)}) + CONFIG_KEY_TO_FILE.update( + {"%s_public" % k: (KEY_FILE_TPL % k + ".pub", 0o600)}) + PRIV_TO_PUB["%s_private" % k] = "%s_public" % k -KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key' +KEY_GEN_TPL = 'o=$(ssh-keygen -yf "%s") && echo "$o" root@localhost > "%s"' def handle(_name, cfg, cloud, log, _args): @@ -69,15 +62,15 @@ def handle(_name, cfg, cloud, log, _args if "ssh_keys" in cfg: # if there are keys in cloud-config, use them for (key, val) in cfg["ssh_keys"].iteritems(): - if key in KEY_2_FILE: - tgt_fn = KEY_2_FILE[key][0] - tgt_perms = KEY_2_FILE[key][1] + if key in CONFIG_KEY_TO_FILE: + tgt_fn = CONFIG_KEY_TO_FILE[key][0] + tgt_perms = CONFIG_KEY_TO_FILE[key][1] util.write_file(tgt_fn, val, tgt_perms) - for (priv, pub) in PRIV_2_PUB.iteritems(): + for (priv, pub) in PRIV_TO_PUB.items(): if pub in cfg['ssh_keys'] or not priv in cfg['ssh_keys']: continue - pair = (KEY_2_FILE[priv][0], KEY_2_FILE[pub][0]) + pair = (CONFIG_KEY_TO_FILE[priv][0], CONFIG_KEY_TO_FILE[pub][0]) cmd = ['sh', '-xc', KEY_GEN_TPL % pair] try: # TODO(harlowja): Is this guard needed? @@ -92,18 +85,28 @@ def handle(_name, cfg, cloud, log, _args genkeys = util.get_cfg_option_list(cfg, 'ssh_genkeytypes', GENERATE_KEY_NAMES) + lang_c = os.environ.copy() + lang_c['LANG'] = 'C' for keytype in genkeys: keyfile = KEY_FILE_TPL % (keytype) + if os.path.exists(keyfile): + continue util.ensure_dir(os.path.dirname(keyfile)) - if not os.path.exists(keyfile): - cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile] + cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile] + + # TODO(harlowja): Is this guard needed? + with util.SeLinuxGuard("/etc/ssh", recursive=True): try: - # TODO(harlowja): Is this guard needed? - with util.SeLinuxGuard("/etc/ssh", recursive=True): - util.subp(cmd, capture=False) - except: - util.logexc(log, "Failed generating key type %s to " - "file %s", keytype, keyfile) + out, err = util.subp(cmd, capture=True, env=lang_c) + sys.stdout.write(out) + except util.ProcessExecutionError as e: + err = e.stderr.lower() + if (e.exit_code == 1 and + err.lower().startswith("unknown key")): + log.debug("ssh-keygen: unknown key type '%s'", keytype) + else: + util.logexc(log, "Failed generating key type %s to " + "file %s", keytype, keyfile) try: (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro) debian/patches/lp-1551419-azure-handle-flipped-uuid-endianness.patch0000664000000000000000000000462712666056267022222 0ustar --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -256,11 +256,57 @@ return len(fnmatch.filter(os.listdir(mp), '*[!cdrom]*')) +def _batch_gen(data, batch_size): + # Generate batches of batch_size from data + for i in range(0, len(data), batch_size): + yield data[i:i+batch_size] + + +def _get_reversed_endianness_uuid(uuid): + uuid_parts = uuid.split('-') + for part_number in [0, 1, 2]: + original_part = uuid_parts[part_number] + hex_bytes = _batch_gen(original_part, 2) + reversed_endianness_part = ''.join(reversed(list(hex_bytes))) + uuid_parts[part_number] = reversed_endianness_part + return '-'.join(uuid_parts) + + +def _get_last_boot_instance_uuid(): + return util.read_file_or_url( + '/var/lib/cloud/data/instance-id').contents.strip() + + def get_instance_id(): """ Read the instance ID from dmi data + + If the reported UUID is the previous instance ID with endianness changed + (as per LP #1551419), then that previous instance ID will be used. """ - return util.read_dmi_data('system-uuid') + reported_instance_uuid = util.read_dmi_data('system-uuid') + LOG.debug('Reported instance UUID: %s', reported_instance_uuid) + if not os.path.exists('/var/lib/cloud/data/instance-id'): + LOG.debug('No last-boot instance ID') + # This is first boot of a fresh instance + return reported_instance_uuid + last_boot_instance_uuid = _get_last_boot_instance_uuid() + LOG.debug('Current instance UUID: %s', last_boot_instance_uuid) + if last_boot_instance_uuid == reported_instance_uuid: + LOG.debug('Reported UUID is unchanged') + return reported_instance_uuid + reversed_endianness_uuid = _get_reversed_endianness_uuid( + reported_instance_uuid) + LOG.debug('Reported instance UUID with reversed endianness: %s', + reversed_endianness_uuid) + if last_boot_instance_uuid == reversed_endianness_uuid: + # The endianness of the instance UUID has changed, keep using the + # last_boot UUID + LOG.debug('Reported UUID is last-boot UUID with endianness reversed;' + ' continuing to use last-boot instance ID') + return last_boot_instance_uuid + LOG.debug('Reported UUID is different; using it as instance ID.') + return reported_instance_uuid def find_fabric_formatted_ephemeral_part(): debian/cloud-init.install0000664000000000000000000000027012574667652012652 0ustar etc/cloud etc/init/*.conf etc/profile.d/* etc/rsyslog.d/* usr/bin usr/lib/cloud-init usr/lib/python*/*-packages/*egg-info usr/lib/python*/*-packages/cloudinit usr/share/doc/cloud-init debian/dirs0000664000000000000000000000007512574667652010102 0ustar var/lib/cloud usr/bin etc/init usr/share/doc/cloud etc/cloud debian/cloud-init.prerm0000664000000000000000000000010312574667652012324 0ustar #!/bin/sh set -e rm -f /etc/cron.d/cloudinit-updates #DEBHELPER# debian/copyright0000664000000000000000000000445512574667652011157 0ustar Format-Specification: http://svn.debian.org/wsvn/dep/web/deps/dep5.mdwn?op=file&rev=135 Name: cloud-init Maintainer: Scott Moser Source: https://launchpad.net/cloud-init This package was debianized by Soren Hansen on Thu, 04 Sep 2008 12:49:15 +0200 as ec2-init. It was later renamed to cloud-utils by Scott Moser Upstream Author: Scott Moser Soren Hansen Chuck Short Copyright: 2010, Canonical Ltd. License: GPL-3 This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License version 3, as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . The complete text of the GPL version 3 can be seen in /usr/share/common-licenses/GPL-3. Files: cloudinit/boto_utils.py Copyright: 2006,2007, Mitch Garnaat http://garnaat.org/ License: MIT Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, dis- tribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the fol- lowing conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. debian/watch0000664000000000000000000000013312574667652010242 0ustar version=3 https://launchpad.net/cloud-init/+download .*/\+download/cloud-init-(.+)\.tar.gz debian/grub-legacy-ec2.kernel-postrm0000775000000000000000000000054012574667652014612 0ustar #!/bin/sh version="$1" bootopt="" # passing the kernel version is required [ -z "${version}" ] && exit 0 # avoid running multiple times if [ -n "$DEB_MAINT_PARAMS" ]; then eval set -- "$DEB_MAINT_PARAMS" if [ -z "$1" ] || [ "$1" != "remove" ]; then exit 0 fi fi update=/usr/sbin/update-grub-legacy-ec2 [ ! -x "${update}" ] || exec "${update}" debian/grub-legacy-ec2.install0000664000000000000000000000025512574667652013456 0ustar etc/kernel/postinst.d/x-grub-legacy-ec2 etc/kernel/postrm.d/x-grub-legacy-ec2 usr/sbin/grub-set-default usr/sbin/grub-set-default-legacy-ec2 usr/sbin/update-grub-legacy-ec2 debian/grub-legacy-ec2.preinst0000664000000000000000000000033112574667652013467 0ustar #!/bin/sh set -e if [ "$1" = "install" -o "$1" = "upgrade" ]; then dpkg-divert --package grub-legacy-ec2 --rename --divert \ /usr/sbin/grub-set-default.real --add /usr/sbin/grub-set-default fi #DEBHELPER# debian/grub-set-default0000775000000000000000000000150012574667652012310 0ustar #!/bin/sh diverted=/usr/sbin/grub-set-default.real legacy_ec2=/usr/sbin/grub-set-default-legacy-ec2 normal=/usr/sbin/grub-set-default warn() { echo "WARNING:" "$@" 1>&2; } Usage() { cat <