From 1bfa3d53c1ee74525932b721c2dddd42fc129b8e Mon Sep 17 00:00:00 2001 From: Brad Hall Date: Thu, 27 Oct 2011 18:18:20 -0700 Subject: [PATCH 001/967] Add Quantum support --- files/apts/quantum | 2 ++ stack.sh | 83 ++++++++++++++++++++++++++++++++++++++++++---- stackrc | 4 +++ 3 files changed, 82 insertions(+), 7 deletions(-) create mode 100644 files/apts/quantum diff --git a/files/apts/quantum b/files/apts/quantum new file mode 100644 index 00000000..f5008adf --- /dev/null +++ b/files/apts/quantum @@ -0,0 +1,2 @@ +openvswitch-switch +openvswitch-datapath-dkms diff --git a/stack.sh b/stack.sh index e7f36e8e..e7c383b7 100755 --- a/stack.sh +++ b/stack.sh @@ -150,6 +150,10 @@ KEYSTONE_DIR=$DEST/keystone NOVACLIENT_DIR=$DEST/python-novaclient OPENSTACKX_DIR=$DEST/openstackx NOVNC_DIR=$DEST/noVNC +QUANTUM_DIR=$DEST/quantum + +# Default Quantum Plugin +Q_PLUGIN=${Q_PLUGIN:-openvswitch} # Specify which services to launch. These generally correspond to screen tabs ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-vnc,horizon,mysql,rabbit} @@ -244,6 +248,17 @@ FLAT_INTERFACE=${FLAT_INTERFACE:-eth0} ## FIXME(ja): should/can we check that FLAT_INTERFACE is sane? +# Using Quantum networking: +# +# Make sure that q-svc is enabled in ENABLED_SERVICES. If it is the network +# manager will be set to the QuantumManager. +# +# If you're planning to use the Quantum openvswitch plugin, set Q_PLUGIN to +# "openvswitch" and make sure the q-agt service is enabled in +# ENABLED_SERVICES. +# +# With Quantum networking the NET_MAN variable is ignored. + # MySQL & RabbitMQ # ---------------- @@ -362,6 +377,8 @@ git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH # openstackx is a collection of extensions to openstack.compute & nova # that is *deprecated*. The code is being moved into python-novaclient & nova. git_clone $OPENSTACKX_REPO $OPENSTACKX_DIR $OPENSTACKX_BRANCH +# quantum +git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH # Initialization # ============== @@ -376,6 +393,7 @@ cd $NOVA_DIR; sudo python setup.py develop cd $OPENSTACKX_DIR; sudo python setup.py develop cd $HORIZON_DIR/django-openstack; sudo python setup.py develop cd $HORIZON_DIR/openstack-dashboard; sudo python setup.py develop +cd $QUANTUM_DIR; sudo python setup.py develop # Add a useful screenrc. This isn't required to run openstack but is we do # it since we are going to run the services in screen for simple @@ -616,8 +634,16 @@ add_nova_flag "--nodaemon" add_nova_flag "--allow_admin_api" add_nova_flag "--scheduler_driver=$SCHEDULER" add_nova_flag "--dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf" -add_nova_flag "--network_manager=nova.network.manager.$NET_MAN" add_nova_flag "--fixed_range=$FIXED_RANGE" +if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then + add_nova_flag "--network_manager=nova.network.quantum.manager.QuantumManager" + if [[ "$Q_PLUGIN" = "openvswitch" ]]; then + add_nova_flag "--libvirt_vif_type=ethernet" + add_nova_flag "--libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtOpenVswitchDriver" + fi +else + add_nova_flag "--network_manager=nova.network.manager.$NET_MAN" +fi add_nova_flag "--my_ip=$HOST_IP" add_nova_flag "--public_interface=$PUBLIC_INTERFACE" add_nova_flag "--vlan_interface=$VLAN_INTERFACE" @@ -676,12 +702,6 @@ if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then # (re)create nova database $NOVA_DIR/bin/nova-manage db sync - - # create a small network - $NOVA_DIR/bin/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE - - # create some floating ips - $NOVA_DIR/bin/nova-manage floating create $FLOATING_RANGE fi @@ -764,6 +784,55 @@ if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then exit 1 fi fi + +# Quantum +if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then + # Create database for the plugin/agent + if [[ "$Q_PLUGIN" = "openvswitch" ]]; then + if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then + mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE IF NOT EXISTS ovs_quantum;' + else + echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin." + exit 1 + fi + fi + + QUANTUM_PLUGIN_INI_FILE=$QUANTUM_DIR/quantum/plugins.ini + # Make sure we're using the openvswitch plugin + sed -i -e "s/^provider =.*$/provider = quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin/g" $QUANTUM_PLUGIN_INI_FILE + screen_it q-svc "cd $QUANTUM_DIR && export PYTHONPATH=.:$PYTHONPATH; python $QUANTUM_DIR/bin/quantum $QUANTUM_DIR/etc/quantum.conf" +fi + +# Quantum agent (for compute nodes) +if [[ "$ENABLED_SERVICES" =~ "q-agt" ]]; then + if [[ "$Q_PLUGIN" = "openvswitch" ]]; then + # Set up integration bridge + OVS_BRIDGE=${OVS_BRIDGE:-br-int} + sudo ovs-vsctl --no-wait -- --if-exists del-br $OVS_BRIDGE + sudo ovs-vsctl --no-wait add-br $OVS_BRIDGE + sudo ovs-vsctl --no-wait br-set-external-id $OVS_BRIDGE bridge-id br-int + fi + + # Start up the quantum <-> openvswitch agent + screen_it q-agt "sleep 4; sudo python $QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py $QUANTUM_DIR/quantum/plugins/openvswitch/ovs_quantum_plugin.ini -v" +fi + +# NOTE(bgh): I moved the network creation here because Quantum has to be up +# and running before we can communicate with it if we're using Quantum for +# networking (i.e. q-svc is enabled). + +if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then + # create a small network + $NOVA_DIR/bin/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE + + if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then + echo "Not creating floating IPs (not supported by QuantumManager)" + else + # create some floating ips + $NOVA_DIR/bin/nova-manage floating create $FLOATING_RANGE + fi +fi + # Launching nova-compute should be as simple as running ``nova-compute`` but # have to do a little more than that in our script. Since we add the group # ``libvirtd`` to our user in this script, when nova-compute is run it is diff --git a/stackrc b/stackrc index 9b110a37..75ec4aa2 100644 --- a/stackrc +++ b/stackrc @@ -27,6 +27,10 @@ NOVACLIENT_BRANCH=master OPENSTACKX_REPO=https://github.com/cloudbuilders/openstackx.git OPENSTACKX_BRANCH=diablo +# quantum service +QUANTUM_REPO=https://github.com/openstack/quantum +QUANTUM_BRANCH=diablo + # Specify a comma-separated list of uec images to download and install into glance. IMAGE_URLS=http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz From d9e544e5c37ac208b2fe8c468623b14ab57565ff Mon Sep 17 00:00:00 2001 From: Brad Hall Date: Fri, 28 Oct 2011 08:28:26 -0700 Subject: [PATCH 002/967] Add Quantum support: address code review comments (commit 1bfa3d53c1ee74525932b721c2dddd42fc129b8e) --- stack.sh | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/stack.sh b/stack.sh index e7c383b7..31194bc4 100755 --- a/stack.sh +++ b/stack.sh @@ -792,8 +792,8 @@ if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE IF NOT EXISTS ovs_quantum;' else - echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin." - exit 1 + echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin." + exit 1 fi fi @@ -817,10 +817,8 @@ if [[ "$ENABLED_SERVICES" =~ "q-agt" ]]; then screen_it q-agt "sleep 4; sudo python $QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py $QUANTUM_DIR/quantum/plugins/openvswitch/ovs_quantum_plugin.ini -v" fi -# NOTE(bgh): I moved the network creation here because Quantum has to be up -# and running before we can communicate with it if we're using Quantum for -# networking (i.e. q-svc is enabled). - +# If we're using Quantum (i.e. q-svc is enabled), network creation has to +# happen after we've started the Quantum service. if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then # create a small network $NOVA_DIR/bin/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE From f6705491868494fb3b78139dad23f35cd99f12c7 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Tue, 1 Nov 2011 16:04:14 -0700 Subject: [PATCH 003/967] move from exercise.sh to exercises/.. --- exercises/euca.sh | 37 ++++++++++++++++++++++++ exercise.sh => exercises/floating_ips.sh | 5 ---- 2 files changed, 37 insertions(+), 5 deletions(-) create mode 100755 exercises/euca.sh rename exercise.sh => exercises/floating_ips.sh (97%) diff --git a/exercises/euca.sh b/exercises/euca.sh new file mode 100755 index 00000000..faeffcf9 --- /dev/null +++ b/exercises/euca.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash + +# **exercise.sh** - using the cloud can be fun + +# we will use the ``nova`` cli tool provided by the ``python-novaclient`` +# package +# + + +# This script exits on an error so that errors don't compound and you see +# only the first error that occured. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + + +# Settings +# ======== + +# Use openrc + stackrc + localrc for settings +source ./openrc + +# Max time till the vm is bootable +BOOT_TIMEOUT=${BOOT_TIMEOUT:-15} + +IMAGE=`euca-describe-images | grep machine | cut -f2` + +INSTANCE=`euca-run-instance $IMAGE | grep INSTANCE | cut -f2` + +if ! timeout $BOOT_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then + echo "server didn't become active within $BOOT_TIMEOUT seconds" + exit 1 +fi + +euca-terminate-instances $INSTANCE diff --git a/exercise.sh b/exercises/floating_ips.sh similarity index 97% rename from exercise.sh rename to exercises/floating_ips.sh index 99b0f3bb..06a2cd4a 100755 --- a/exercise.sh +++ b/exercises/floating_ips.sh @@ -186,8 +186,3 @@ nova secgroup-delete $SECGROUP # FIXME: validate shutdown within 5 seconds # (nova show $NAME returns 1 or status != ACTIVE)? -# Testing Euca2ools -# ================== - -# make sure that we can describe instances -euca-describe-instances From 9f1863450e71e19bb8ff42e9a7c182c3f5f67c3d Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Tue, 1 Nov 2011 16:05:40 -0700 Subject: [PATCH 004/967] docs --- exercises/euca.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/exercises/euca.sh b/exercises/euca.sh index faeffcf9..fc81af8f 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -1,12 +1,9 @@ #!/usr/bin/env bash -# **exercise.sh** - using the cloud can be fun - -# we will use the ``nova`` cli tool provided by the ``python-novaclient`` -# package +# we will use the ``euca2ools`` cli tool that wraps the python boto +# library to test ec2 compatibility # - # This script exits on an error so that errors don't compound and you see # only the first error that occured. set -o errexit @@ -25,10 +22,13 @@ source ./openrc # Max time till the vm is bootable BOOT_TIMEOUT=${BOOT_TIMEOUT:-15} +# find a machine image to boot IMAGE=`euca-describe-images | grep machine | cut -f2` +# launch it INSTANCE=`euca-run-instance $IMAGE | grep INSTANCE | cut -f2` +# assure it has booted within a reasonable time if ! timeout $BOOT_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then echo "server didn't become active within $BOOT_TIMEOUT seconds" exit 1 From 787af01bddbaace8f83c65c273da6a62a9658a06 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Tue, 1 Nov 2011 16:44:19 -0700 Subject: [PATCH 005/967] fix sourcing of openrc --- exercises/euca.sh | 2 ++ exercises/floating_ips.sh | 2 ++ 2 files changed, 4 insertions(+) diff --git a/exercises/euca.sh b/exercises/euca.sh index fc81af8f..0cb5feaf 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -17,7 +17,9 @@ set -o xtrace # ======== # Use openrc + stackrc + localrc for settings +pushd $(cd $(dirname "$0")/.. && pwd) source ./openrc +popd # Max time till the vm is bootable BOOT_TIMEOUT=${BOOT_TIMEOUT:-15} diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 06a2cd4a..edf784c5 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -20,7 +20,9 @@ set -o xtrace # ======== # Use openrc + stackrc + localrc for settings +pushd $(cd $(dirname "$0")/.. && pwd) source ./openrc +popd # Get a token for clients that don't support service catalog # ========================================================== From de8b9a2340437220a45a5f1181a23576d12a37bb Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Tue, 1 Nov 2011 17:23:04 -0700 Subject: [PATCH 006/967] change screen name to stack - addresses issue #139 --- stack.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index 94665121..c7edb117 100755 --- a/stack.sh +++ b/stack.sh @@ -721,13 +721,13 @@ fi function screen_it { NL=`echo -ne '\015'` if [[ "$ENABLED_SERVICES" =~ "$1" ]]; then - screen -S nova -X screen -t $1 - screen -S nova -p $1 -X stuff "$2$NL" + screen -S stack -X screen -t $1 + screen -S stack -p $1 -X stuff "$2$NL" fi } # create a new named screen to run processes in -screen -d -m -S nova -t nova +screen -d -m -S stack -t stack sleep 1 # launch the glance registery service From 1d1dda14572576a3242f113bc0d3a8c5f09b14fa Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Tue, 1 Nov 2011 19:46:17 -0700 Subject: [PATCH 007/967] allow build_libvirt.sh not to destroy/recreate net --- tools/build_libvirt.sh | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tools/build_libvirt.sh b/tools/build_libvirt.sh index 48e28534..fc281d31 100755 --- a/tools/build_libvirt.sh +++ b/tools/build_libvirt.sh @@ -174,6 +174,7 @@ unmount_images # Network configuration variables GUEST_NETWORK=${GUEST_NETWORK:-1} +GUEST_RECREATE_NET=${GUEST_RECREATE_NET:-yes} GUEST_IP=${GUEST_IP:-192.168.$GUEST_NETWORK.50} GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24} @@ -194,8 +195,10 @@ cat > $NET_XML < EOF -virsh net-destroy devstack-$GUEST_NETWORK || true -virsh net-create $VM_DIR/net.xml +if [[ "$GUEST_RECREATE_NET" == "yes" ]]; then + virsh net-destroy devstack-$GUEST_NETWORK || true + virsh net-create $VM_DIR/net.xml +fi # libvirt.xml configuration LIBVIRT_XML=$VM_DIR/libvirt.xml From e19d88478949bc31c7d2d224722655992414910a Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Tue, 1 Nov 2011 20:06:55 -0700 Subject: [PATCH 008/967] add some spacing to the output --- stack.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/stack.sh b/stack.sh index 94665121..e7f36e8e 100755 --- a/stack.sh +++ b/stack.sh @@ -843,6 +843,10 @@ for ret in "${PIPESTATUS[@]}"; do [ $ret -eq 0 ] || exit $ret; done # Using the cloud # =============== +echo "" +echo "" +echo "" + # If you installed the horizon on this server, then you should be able # to access the site using your browser. if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then From f1b3dbc41a294ccfc25042ea106a10f8ae6c3457 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Tue, 1 Nov 2011 21:52:07 -0700 Subject: [PATCH 009/967] install arping - used in ha network --- files/apts/nova | 1 + 1 file changed, 1 insertion(+) diff --git a/files/apts/nova b/files/apts/nova index 8ae74a21..17eb8774 100644 --- a/files/apts/nova +++ b/files/apts/nova @@ -1,6 +1,7 @@ dnsmasq-base kpartx parted +arping # used for send_arp_for_ha option in nova-network mysql-server python-mysqldb kvm From 4bd41ad77f9bc26b64a3a4362f787f5c9814f29a Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Tue, 1 Nov 2011 22:06:21 -0700 Subject: [PATCH 010/967] deleting build_kvm.sh in favor of build_libvirt.sh --- tools/build_kvm.sh | 402 --------------------------------------------- 1 file changed, 402 deletions(-) delete mode 100755 tools/build_kvm.sh diff --git a/tools/build_kvm.sh b/tools/build_kvm.sh deleted file mode 100755 index 1b339260..00000000 --- a/tools/build_kvm.sh +++ /dev/null @@ -1,402 +0,0 @@ -#!/usr/bin/env bash - -# exit on error to stop unexpected errors -set -o errexit - -# Make sure that we have the proper version of ubuntu -UBUNTU_VERSION=`cat /etc/lsb-release | grep CODENAME | sed 's/.*=//g'` -if [ ! "oneiric" = "$UBUNTU_VERSION" ]; then - if [ ! "natty" = "$UBUNTU_VERSION" ]; then - echo "This script only works with oneiric and natty" - exit 1 - fi -fi - -# Echo commands -set -o xtrace - -# Keep track of the current directory -TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$TOOLS_DIR/.. - -# Where to store files and instances -KVMSTACK_DIR=${KVMSTACK_DIR:-/opt/kvmstack} - -# Where to store images -IMAGES_DIR=$KVMSTACK_DIR/images - -# Create images dir -mkdir -p $IMAGES_DIR - -# Move to top devstack dir -cd $TOP_DIR - -# Abort if localrc is not set -if [ ! -e ./localrc ]; then - echo "You must have a localrc with ALL necessary passwords defined before proceeding." - echo "See stack.sh for required passwords." - exit 1 -fi - -# Source params -source ./stackrc - -# Configure the root password of the vm to be the same as ``ADMIN_PASSWORD`` -ROOT_PASSWORD=${ADMIN_PASSWORD:-password} - - -# Base image (natty by default) -DIST_NAME=${DIST_NAME:-natty} -IMAGE_FNAME=$DIST_NAME.raw - -# Name of our instance, used by libvirt -GUEST_NAME=${GUEST_NAME:-kvmstack} - -# Original version of built image -BASE_IMAGE=$KVMSTACK_DIR/images/$DIST_NAME.raw - -# Copy of base image, which we pre-install with tasty treats -VM_IMAGE=$IMAGES_DIR/$DIST_NAME.$GUEST_NAME.raw - -# Mop up after previous runs -virsh destroy $GUEST_NAME || true - -# Where this vm is stored -VM_DIR=$KVMSTACK_DIR/instances/$GUEST_NAME - -# Create vm dir -mkdir -p $VM_DIR - -# Mount point into copied base image -COPY_DIR=$VM_DIR/copy -mkdir -p $COPY_DIR - -# Create the base image if it does not yet exist -if [ ! -e $IMAGES_DIR/$IMAGE_FNAME ]; then - cd $TOOLS_DIR - ./make_image.sh -m -r 5000 $DIST_NAME raw - mv $DIST_NAME.raw $BASE_IMAGE - cd $TOP_DIR -fi - -# Create a copy of the base image -if [ ! -e $VM_IMAGE ]; then - cp -p $BASE_IMAGE $VM_IMAGE -fi - -# Unmount the copied base image -function unmount_images() { - # unmount the filesystem - while df | grep -q $COPY_DIR; do - umount $COPY_DIR || echo 'ok' - sleep 1 - done -} - -# Unmount from failed runs -unmount_images - -# Ctrl-c catcher -function kill_unmount() { - unmount_images - exit 1 -} - -# Install deps if needed -dpkg -l kvm libvirt-bin kpartx || apt-get install -y --force-yes kvm libvirt-bin kpartx - -# Let Ctrl-c kill tail and exit -trap kill_unmount SIGINT - -# Where Openstack code will live in image -DEST=${DEST:-/opt/stack} - -# Mount the file system -mount -o loop,offset=32256 $VM_IMAGE $COPY_DIR - -# git clone only if directory doesn't exist already. Since ``DEST`` might not -# be owned by the installation user, we create the directory and change the -# ownership to the proper user. -function git_clone { - if [ ! -d $2 ]; then - sudo mkdir $2 - sudo chown `whoami` $2 - git clone $1 $2 - cd $2 - # This checkout syntax works for both branches and tags - git checkout $3 - fi -} - -# Make sure that base requirements are installed -cp /etc/resolv.conf $COPY_DIR/etc/resolv.conf -chroot $COPY_DIR apt-get update -chroot $COPY_DIR apt-get install -y --force-yes `cat files/apts/* | cut -d\# -f1 | egrep -v "(rabbitmq|libvirt-bin|mysql-server)"` -chroot $COPY_DIR apt-get install -y --download-only rabbitmq-server libvirt-bin mysql-server -chroot $COPY_DIR pip install `cat files/pips/*` - -# Clean out code repos if directed to do so -if [ "$CLEAN" = "1" ]; then - rm -rf $COPY_DIR/$DEST -fi - -# Cache openstack code -mkdir -p $COPY_DIR/$DEST -git_clone $NOVA_REPO $COPY_DIR/$DEST/nova $NOVA_BRANCH -git_clone $GLANCE_REPO $COPY_DIR/$DEST/glance $GLANCE_BRANCH -git_clone $KEYSTONE_REPO $COPY_DIR/$DESTkeystone $KEYSTONE_BRANCH -git_clone $NOVNC_REPO $COPY_DIR/$DEST/noVNC $NOVNC_BRANCH -git_clone $HORIZON_REPO $COPY_DIR/$DEST/horizon $HORIZON_BRANCH $HORIZON_TAG -git_clone $NOVACLIENT_REPO $COPY_DIR/$DEST/python-novaclient $NOVACLIENT_BRANCH -git_clone $OPENSTACKX_REPO $COPY_DIR/$DEST/openstackx $OPENSTACKX_BRANCH -git_clone $KEYSTONE_REPO $COPY_DIR/$DEST/keystone $KEYSTONE_BRANCH -git_clone $NOVNC_REPO $COPY_DIR/$DEST/noVNC $NOVNC_BRANCH - -# Back to devstack -cd $TOP_DIR - -# Unmount the filesystems -unmount_images - -# Network configuration variables -BRIDGE=${BRIDGE:-br0} -GUEST_IP=${GUEST_IP:-192.168.1.50} -GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24} -GUEST_NETMASK=${GUEST_NETMASK:-255.255.255.0} -GUEST_GATEWAY=${GUEST_GATEWAY:-192.168.1.1} -GUEST_MAC=${GUEST_MAC:-"02:16:3e:07:69:`printf '%02X' $(echo $GUEST_IP | sed "s/.*\.//")`"} -GUEST_RAM=${GUEST_RAM:-1524288} -GUEST_CORES=${GUEST_CORES:-1} - -# libvirt.xml configuration -LIBVIRT_XML=$VM_DIR/libvirt.xml -cat > $LIBVIRT_XML < - $GUEST_NAME - $GUEST_RAM - - hvm - - - - - - $GUEST_CORES - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -EOF - -# Mount point for instance fs -ROOTFS=$VM_DIR/root -mkdir -p $ROOTFS - -# Make sure we have nbd-ness -modprobe nbd max_part=63 - -# Which NBD device to use? -NBD=${NBD:-/dev/nbd5} - -# Clean up from previous runs -umount $ROOTFS || echo 'ok' -qemu-nbd -d $NBD || echo 'ok' - -# Clean up old runs -cd $VM_DIR -rm -f $VM_DIR/disk - -# Create our instance fs -qemu-img create -f qcow2 -b $VM_IMAGE disk - -# Connect our nbd and wait till it is mountable -qemu-nbd -c $NBD disk -NBD_DEV=`basename $NBD` -if ! timeout 60 sh -c "while ! [ -e /sys/block/$NBD_DEV/pid ]; do sleep 1; done"; then - echo "Couldn't connect $NBD" - exit 1 -fi - -# Mount the instance -mount $NBD $ROOTFS -o offset=32256 -t ext4 - -# Configure instance network -INTERFACES=$ROOTFS/etc/network/interfaces -cat > $INTERFACES <> $ROOTFS/etc/sudoers - -# Gracefully cp only if source file/dir exists -function cp_it { - if [ -e $1 ] || [ -d $1 ]; then - cp -pRL $1 $2 - fi -} - -# Copy over your ssh keys and env if desired -COPYENV=${COPYENV:-1} -if [ "$COPYENV" = "1" ]; then - cp_it ~/.ssh $ROOTFS/$DEST/.ssh - cp_it ~/.ssh/id_rsa.pub $ROOTFS/$DEST/.ssh/authorized_keys - cp_it ~/.gitconfig $ROOTFS/$DEST/.gitconfig - cp_it ~/.vimrc $ROOTFS/$DEST/.vimrc - cp_it ~/.bashrc $ROOTFS/$DEST/.bashrc -fi - -# pre-cache uec images -for image_url in ${IMAGE_URLS//,/ }; do - IMAGE_FNAME=`basename "$image_url"` - if [ ! -f $IMAGES_DIR/$IMAGE_FNAME ]; then - wget -c $image_url -O $IMAGES_DIR/$IMAGE_FNAME - fi - cp $IMAGES_DIR/$IMAGE_FNAME $ROOTFS/$DEST/devstack/files -done - -# Configure the runner -RUN_SH=$ROOTFS/$DEST/run.sh -cat > $RUN_SH < /$DEST/run.sh.log -echo >> /$DEST/run.sh.log -echo >> /$DEST/run.sh.log -echo "All done! Time to start clicking." >> /$DEST/run.sh.log -cat $DEST/run.sh.log -EOF -chmod 755 $RUN_SH - -# Make runner launch on boot -RC_LOCAL=$ROOTFS/etc/init.d/local -cat > $RC_LOCAL <> $ROOTFS/$DEST/.bashrc -echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $ROOTFS/etc/profile - -# Give stack ownership over $DEST so it may do the work needed -chroot $ROOTFS chown -R stack $DEST - -# Change boot params so that we get a console log -sudo sed -e "s/quiet splash/splash console=ttyS0 console=ttyS1,19200n8/g" -i $ROOTFS/boot/grub/menu.lst -sudo sed -e "s/^hiddenmenu//g" -i $ROOTFS/boot/grub/menu.lst - -# Set the hostname -echo $GUEST_NAME > $ROOTFS/etc/hostname - -# We need the hostname to resolve for rabbit to launch -if ! grep -q $GUEST_NAME $ROOTFS/etc/hosts; then - echo "$GUEST_IP $GUEST_NAME" >> $ROOTFS/etc/hosts -fi - -# Unmount -umount $ROOTFS || echo 'ok' -qemu-nbd -d $NBD - -# Create the instance -cd $VM_DIR && virsh create libvirt.xml - -# Tail the console log till we are done -WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1} -if [ "$WAIT_TILL_LAUNCH" = "1" ]; then - # Done creating the container, let's tail the log - echo - echo "=============================================================" - echo " -- YAY! --" - echo "=============================================================" - echo - echo "We're done launching the vm, about to start tailing the" - echo "stack.sh log. It will take a second or two to start." - echo - echo "Just CTRL-C at any time to stop tailing." - - while [ ! -e "$VM_DIR/console.log" ]; do - sleep 1 - done - - tail -F $VM_DIR/console.log & - - TAIL_PID=$! - - function kill_tail() { - kill $TAIL_PID - exit 1 - } - - # Let Ctrl-c kill tail and exit - trap kill_tail SIGINT - - echo "Waiting stack.sh to finish..." - while ! cat $VM_DIR/console.log | grep -q 'All done' ; do - sleep 5 - done - - kill $TAIL_PID - - if grep -q "stack.sh failed" $VM_DIR/console.log; then - exit 1 - fi - echo "" - echo "Finished - Zip-a-dee Doo-dah!" -fi From 2cec3dc846e5202c2cdf0f3f829a588392514d74 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 2 Nov 2011 07:03:38 -0500 Subject: [PATCH 011/967] unpause paused instances before terminating --- tools/xen/build_domU.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/xen/build_domU.sh b/tools/xen/build_domU.sh index 8e40225b..c7bb3d17 100755 --- a/tools/xen/build_domU.sh +++ b/tools/xen/build_domU.sh @@ -229,6 +229,7 @@ xe vm-list --minimal name-label="$LABEL" | xargs ./scripts/uninstall-os-vpx.sh # Destroy any instances that were launched for uuid in `xe vm-list | grep -1 instance | grep uuid | sed "s/.*\: //g"`; do echo "Shutting down nova instance $uuid" + xe vm-unpause uuid=$uuid || true xe vm-shutdown uuid=$uuid xe vm-destroy uuid=$uuid done From af6ed6b1b5966aa468798584f81334510fb128a2 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 2 Nov 2011 07:50:27 -0500 Subject: [PATCH 012/967] source stackrc --- tools/xen/build_domU.sh | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tools/xen/build_domU.sh b/tools/xen/build_domU.sh index c7bb3d17..65049af3 100755 --- a/tools/xen/build_domU.sh +++ b/tools/xen/build_domU.sh @@ -7,6 +7,12 @@ if [ ! -e ../../localrc ]; then exit 1 fi +# This directory +TOP_DIR=$(cd $(dirname "$0") && pwd) + +# Source params +cd ../.. && source ./stackrc && cd $TOP_DIR + # Echo commands set -o xtrace @@ -41,9 +47,6 @@ GUEST_PASSWORD=${GUEST_PASSWORD:-secrete} # Size of image VDI_MB=${VDI_MB:-2500} -# This directory -TOP_DIR=$(cd $(dirname "$0") && pwd) - # Make sure we have git if ! which git; then GITDIR=/tmp/git-1.7.7 From 28fa4e8d940cb8a7a3d5fcb932a7552ad5f1c90c Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Tue, 1 Nov 2011 12:30:55 +0100 Subject: [PATCH 013/967] Add swift support. Based on github.sh/cloudbuilders/deploy.sh/swift. This is a WIP branch. --- files/apts/swift | 19 +++++++ files/swift-account-server.conf | 19 +++++++ files/swift-container-server.conf | 21 ++++++++ files/swift-object-server.conf | 19 +++++++ files/swift-proxy-server.conf | 25 ++++++++++ files/swift-rsyncd.conf | 79 +++++++++++++++++++++++++++++ files/swift.conf | 3 ++ stack.sh | 83 ++++++++++++++++++++++++++++++- stackrc | 4 ++ 9 files changed, 271 insertions(+), 1 deletion(-) create mode 100644 files/apts/swift create mode 100644 files/swift-account-server.conf create mode 100644 files/swift-container-server.conf create mode 100644 files/swift-object-server.conf create mode 100644 files/swift-proxy-server.conf create mode 100644 files/swift-rsyncd.conf create mode 100644 files/swift.conf diff --git a/files/apts/swift b/files/apts/swift new file mode 100644 index 00000000..07767077 --- /dev/null +++ b/files/apts/swift @@ -0,0 +1,19 @@ +curl +gcc +memcached +memcached +python-configobj +python-coverage +python-dev +python-eventlet +python-greenlet +python-netifaces +python-nose +python-nose +python-pastedeploy +python-setuptools +python-simplejson +python-webob +python-xattr +sqlite3 +xfsprogs diff --git a/files/swift-account-server.conf b/files/swift-account-server.conf new file mode 100644 index 00000000..b5451c69 --- /dev/null +++ b/files/swift-account-server.conf @@ -0,0 +1,19 @@ +[DEFAULT] +devices = %NODE_PATH%/node +mount_check = false +bind_port = %BIND_PORT% +user = stack +log_facility = LOG_LOCAL%LOG_FACILITY% + +[pipeline:main] +pipeline = account-server + +[app:account-server] +use = egg:swift#account + +[account-replicator] +vm_test_mode = yes + +[account-auditor] + +[account-reaper] diff --git a/files/swift-container-server.conf b/files/swift-container-server.conf new file mode 100644 index 00000000..c6300766 --- /dev/null +++ b/files/swift-container-server.conf @@ -0,0 +1,21 @@ +[DEFAULT] +devices = %NODE_PATH%/node +mount_check = false +bind_port = %BIND_PORT% +user = stack +log_facility = LOG_LOCAL%LOG_FACILITY% + +[pipeline:main] +pipeline = container-server + +[app:container-server] +use = egg:swift#container + +[container-replicator] +vm_test_mode = yes + +[container-updater] + +[container-auditor] + +[container-sync] diff --git a/files/swift-object-server.conf b/files/swift-object-server.conf new file mode 100644 index 00000000..4a007135 --- /dev/null +++ b/files/swift-object-server.conf @@ -0,0 +1,19 @@ +[DEFAULT] +devices = %NODE_PATH%/node +mount_check = false +bind_port = %BIND_PORT% +user = stack +log_facility = LOG_LOCAL%LOG_FACILITY% + +[pipeline:main] +pipeline = object-server + +[app:object-server] +use = egg:swift#object + +[object-replicator] +vm_test_mode = yes + +[object-updater] + +[object-auditor] diff --git a/files/swift-proxy-server.conf b/files/swift-proxy-server.conf new file mode 100644 index 00000000..99fc2860 --- /dev/null +++ b/files/swift-proxy-server.conf @@ -0,0 +1,25 @@ +[DEFAULT] +bind_port = 8080 +user = stack +log_facility = LOG_LOCAL1 + +[pipeline:main] +pipeline = healthcheck cache tempauth proxy-server + +[app:proxy-server] +use = egg:swift#proxy +allow_account_management = true + +[filter:tempauth] +use = egg:swift#tempauth +user_admin_admin = admin .admin .reseller_admin +user_test_tester = testing .admin +user_test2_tester2 = testing2 .admin +user_test_tester3 = testing3 +bind_ip = ${MY_IP} + +[filter:healthcheck] +use = egg:swift#healthcheck + +[filter:cache] +use = egg:swift#memcache diff --git a/files/swift-rsyncd.conf b/files/swift-rsyncd.conf new file mode 100644 index 00000000..80ec1864 --- /dev/null +++ b/files/swift-rsyncd.conf @@ -0,0 +1,79 @@ +uid = stack +gid = stack +log file = /var/log/rsyncd.log +pid file = /var/run/rsyncd.pid +address = 127.0.0.1 + +[account6012] +max connections = 25 +path = %SWIFT_LOOPBACK_DISK_SIZE%/1/node/ +read only = false +lock file = /var/lock/account6012.lock + +[account6022] +max connections = 25 +path = %SWIFT_LOOPBACK_DISK_SIZE%/2/node/ +read only = false +lock file = /var/lock/account6022.lock + +[account6032] +max connections = 25 +path = %SWIFT_LOOPBACK_DISK_SIZE%/3/node/ +read only = false +lock file = /var/lock/account6032.lock + +[account6042] +max connections = 25 +path = %SWIFT_LOOPBACK_DISK_SIZE%/4/node/ +read only = false +lock file = /var/lock/account6042.lock + + +[container6011] +max connections = 25 +path = %SWIFT_LOOPBACK_DISK_SIZE%/1/node/ +read only = false +lock file = /var/lock/container6011.lock + +[container6021] +max connections = 25 +path = %SWIFT_LOOPBACK_DISK_SIZE%/2/node/ +read only = false +lock file = /var/lock/container6021.lock + +[container6031] +max connections = 25 +path = %SWIFT_LOOPBACK_DISK_SIZE%/3/node/ +read only = false +lock file = /var/lock/container6031.lock + +[container6041] +max connections = 25 +path = %SWIFT_LOOPBACK_DISK_SIZE%/4/node/ +read only = false +lock file = /var/lock/container6041.lock + + +[object6010] +max connections = 25 +path = %SWIFT_LOOPBACK_DISK_SIZE%/1/node/ +read only = false +lock file = /var/lock/object6010.lock + +[object6020] +max connections = 25 +path = %SWIFT_LOOPBACK_DISK_SIZE%/2/node/ +read only = false +lock file = /var/lock/object6020.lock + +[object6030] +max connections = 25 +path = %SWIFT_LOOPBACK_DISK_SIZE%/3/node/ +read only = false +lock file = /var/lock/object6030.lock + +[object6040] +max connections = 25 +path = %SWIFT_LOOPBACK_DISK_SIZE%/4/node/ +read only = false +lock file = /var/lock/object6040.lock diff --git a/files/swift.conf b/files/swift.conf new file mode 100644 index 00000000..98df4663 --- /dev/null +++ b/files/swift.conf @@ -0,0 +1,3 @@ +[swift-hash] +# random unique string that can never change (DO NOT LOSE) +swift_hash_path_suffix = %SWIFT_HASH% diff --git a/stack.sh b/stack.sh index cba2db2d..666b5f18 100755 --- a/stack.sh +++ b/stack.sh @@ -150,9 +150,10 @@ KEYSTONE_DIR=$DEST/keystone NOVACLIENT_DIR=$DEST/python-novaclient OPENSTACKX_DIR=$DEST/openstackx NOVNC_DIR=$DEST/noVNC +SWIFT_DIR=$DEST/swift # Specify which services to launch. These generally correspond to screen tabs -ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-vnc,horizon,mysql,rabbit} +ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-vnc,horizon,mysql,rabbit,swift} # Nova hypervisor configuration. We default to libvirt whth **kvm** but will # drop back to **qemu** if we are unable to load the kvm module. Stack.sh can @@ -270,6 +271,14 @@ read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT." # Glance connection info. Note the port must be specified. GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$HOST_IP:9292} +# SWIFT +# ----- +# +# Location of SWIFT drives +SWIFT_DRIVE_LOCATION=${SWIFT_DRIVE_LOCATION:-/srv} + +# Size of the loopback disks +SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000} # Keystone # -------- @@ -349,6 +358,8 @@ function git_clone { # compute service git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH +# storage service +git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH # image catalog service git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH # unified auth system (manages accounts/tokens) @@ -370,6 +381,7 @@ git_clone $OPENSTACKX_REPO $OPENSTACKX_DIR $OPENSTACKX_BRANCH # setup our checkouts so they are installed into python path # allowing ``import nova`` or ``import glance.client`` cd $KEYSTONE_DIR; sudo python setup.py develop +cd $SWIFT_DIR; sudo python setup.py develop cd $GLANCE_DIR; sudo python setup.py develop cd $NOVACLIENT_DIR; sudo python setup.py develop cd $NOVA_DIR; sudo python setup.py develop @@ -580,6 +592,75 @@ if [[ "$ENABLED_SERVICES" =~ "n-net" ]]; then mkdir -p $NOVA_DIR/networks fi +# Storage Service +if [[ "$ENABLED_SERVICES" =~ "swift" ]];then + mkdir -p ${SWIFT_DRIVE_LOCATION}/drives + local s=${SWIFT_DRIVE_LOCATION}/drives/sdb1 # Shortcut variable + + # Create a loopback disk and format it with XFS. + if [[ ! -e ${SWIFT_DRIVE_LOCATION}/swift-disk ]];then + dd if=/dev/zero of=${SWIFT_DRIVE_LOCATION}/swift-disk bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE} + mkfs.xfs -f -i size=1024 ${SWIFT_DRIVE_LOCATION}/swift-disk + fi + + # Add the mountpoint to fstab + if ! egrep -q "^${SWIFT_DRIVE_LOCATION}/swift-disk" /etc/fstab;then + echo "# Added by devstack" | tee -a /etc/fstab + echo "${SWIFT_DRIVE_LOCATION}/swift-disk ${s} xfs loop,noatime,nodiratime,nobarrier,logbufs=8 0 0" | \ + tee -a /etc/fstab + fi + + # Create and mount drives. + mkdir -p ${s} + mount ${s} + mkdir ${s}/{1..4} + + # Create directories + install -g stack -o stack -d /etc/swift/{object,container,account}-server \ + ${SWIFT_DRIVE_LOCATION}/{1..4}/node/sdb1 /var/run/swift + + # Adjust rc.local to always have a /var/run/swift on reboot + # created and chown to our user. + # TODO (chmou): We may not have a "exit 0" + sed -i '/^exit 0/d' /etc/rc.local +cat <>/etc/rc.local +mkdir -p /var/run/swift +chown stack: /var/run/swift +exit 0 +EOF + + # Add rsync file + sed -e "s/%SWIFT_LOOPBACK_DISK_SIZE%/$SWIFT_DRIVE_LOCATION/" $FILES/swift-rsyncd.conf > /etc/rsyncd.conf + + # Copy proxy-server configuration + cp $FILES/swift-proxy-server.conf /etc/swift/ + + # Generate swift.conf, we need to have the swift-hash being random + # and unique. + local SWIFT_HASH=$(od -t x8 -N 8 -A n /etc/swift/swift.conf + + # We need to generate a object/account/proxy configuration + # emulating 4 nodes on different ports we have a litle function + # that help us doing that. + function generate_swift_configuration() { + local server_type=$1 + local bind_port=$2 + local log_facility=$3 + for node_number in {1..4};do + node_path=${SWIFT_DRIVE_LOCATION}/${node_number}/node + sed -e "s/%NODE_PATH%/${node_path}/;s/%BIND_PORT%/${bind_port}/;s/%LOG_FACILITY%/${log_facility}/" \ + $FILES/swift-${server_type}-server.conf > /etc/swift/${server_type}-server/${node_number}.conf + bind_port=$(( ${bind_port} + 10 )) + log_facility=$(( ${log_facility} + 1 )) + done + } + generate_swift_configuration object 6010 2 + generate_swift_configuration container 6011 2 + generate_swift_configuration account 6012 2 + +fi + # Volume Service # -------------- diff --git a/stackrc b/stackrc index 9b110a37..e880c170 100644 --- a/stackrc +++ b/stackrc @@ -2,6 +2,10 @@ NOVA_REPO=https://github.com/cloudbuilders/nova.git NOVA_BRANCH=diablo +# storage service +SWIFT_REPO=https://github.com/openstack/swift.git +SWIFT_BRANCH=diablo + # image catalog service GLANCE_REPO=https://github.com/cloudbuilders/glance.git GLANCE_BRANCH=diablo From a2cd841265c81e03a92a7abb5d788ad3d2d46bcd Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Tue, 1 Nov 2011 12:36:10 +0100 Subject: [PATCH 014/967] Change SWIFT_DRIVE_LOCATION to SWIFT_LOCATION Fix some retarness along the way. --- files/swift-proxy-server.conf | 2 +- files/swift-rsyncd.conf | 24 ++++++++++++------------ stack.sh | 22 +++++++++++----------- 3 files changed, 24 insertions(+), 24 deletions(-) diff --git a/files/swift-proxy-server.conf b/files/swift-proxy-server.conf index 99fc2860..9e2b1ddc 100644 --- a/files/swift-proxy-server.conf +++ b/files/swift-proxy-server.conf @@ -16,7 +16,7 @@ user_admin_admin = admin .admin .reseller_admin user_test_tester = testing .admin user_test2_tester2 = testing2 .admin user_test_tester3 = testing3 -bind_ip = ${MY_IP} +bind_ip = 0.0.0.0 [filter:healthcheck] use = egg:swift#healthcheck diff --git a/files/swift-rsyncd.conf b/files/swift-rsyncd.conf index 80ec1864..1cea98cc 100644 --- a/files/swift-rsyncd.conf +++ b/files/swift-rsyncd.conf @@ -6,74 +6,74 @@ address = 127.0.0.1 [account6012] max connections = 25 -path = %SWIFT_LOOPBACK_DISK_SIZE%/1/node/ +path = %SWIFT_LOCATION%/1/node/ read only = false lock file = /var/lock/account6012.lock [account6022] max connections = 25 -path = %SWIFT_LOOPBACK_DISK_SIZE%/2/node/ +path = %SWIFT_LOCATION%/2/node/ read only = false lock file = /var/lock/account6022.lock [account6032] max connections = 25 -path = %SWIFT_LOOPBACK_DISK_SIZE%/3/node/ +path = %SWIFT_LOCATION%/3/node/ read only = false lock file = /var/lock/account6032.lock [account6042] max connections = 25 -path = %SWIFT_LOOPBACK_DISK_SIZE%/4/node/ +path = %SWIFT_LOCATION%/4/node/ read only = false lock file = /var/lock/account6042.lock [container6011] max connections = 25 -path = %SWIFT_LOOPBACK_DISK_SIZE%/1/node/ +path = %SWIFT_LOCATION%/1/node/ read only = false lock file = /var/lock/container6011.lock [container6021] max connections = 25 -path = %SWIFT_LOOPBACK_DISK_SIZE%/2/node/ +path = %SWIFT_LOCATION%/2/node/ read only = false lock file = /var/lock/container6021.lock [container6031] max connections = 25 -path = %SWIFT_LOOPBACK_DISK_SIZE%/3/node/ +path = %SWIFT_LOCATION%/3/node/ read only = false lock file = /var/lock/container6031.lock [container6041] max connections = 25 -path = %SWIFT_LOOPBACK_DISK_SIZE%/4/node/ +path = %SWIFT_LOCATION%/4/node/ read only = false lock file = /var/lock/container6041.lock [object6010] max connections = 25 -path = %SWIFT_LOOPBACK_DISK_SIZE%/1/node/ +path = %SWIFT_LOCATION%/1/node/ read only = false lock file = /var/lock/object6010.lock [object6020] max connections = 25 -path = %SWIFT_LOOPBACK_DISK_SIZE%/2/node/ +path = %SWIFT_LOCATION%/2/node/ read only = false lock file = /var/lock/object6020.lock [object6030] max connections = 25 -path = %SWIFT_LOOPBACK_DISK_SIZE%/3/node/ +path = %SWIFT_LOCATION%/3/node/ read only = false lock file = /var/lock/object6030.lock [object6040] max connections = 25 -path = %SWIFT_LOOPBACK_DISK_SIZE%/4/node/ +path = %SWIFT_LOCATION%/4/node/ read only = false lock file = /var/lock/object6040.lock diff --git a/stack.sh b/stack.sh index 666b5f18..bfab0788 100755 --- a/stack.sh +++ b/stack.sh @@ -275,7 +275,7 @@ GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$HOST_IP:9292} # ----- # # Location of SWIFT drives -SWIFT_DRIVE_LOCATION=${SWIFT_DRIVE_LOCATION:-/srv} +SWIFT_LOCATION=${SWIFT_LOCATION:-/srv} # Size of the loopback disks SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000} @@ -594,19 +594,19 @@ fi # Storage Service if [[ "$ENABLED_SERVICES" =~ "swift" ]];then - mkdir -p ${SWIFT_DRIVE_LOCATION}/drives - local s=${SWIFT_DRIVE_LOCATION}/drives/sdb1 # Shortcut variable + mkdir -p ${SWIFT_LOCATION}/drives + local s=${SWIFT_LOCATION}/drives/sdb1 # Shortcut variable # Create a loopback disk and format it with XFS. - if [[ ! -e ${SWIFT_DRIVE_LOCATION}/swift-disk ]];then - dd if=/dev/zero of=${SWIFT_DRIVE_LOCATION}/swift-disk bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE} - mkfs.xfs -f -i size=1024 ${SWIFT_DRIVE_LOCATION}/swift-disk + if [[ ! -e ${SWIFT_LOCATION}/swift-disk ]];then + dd if=/dev/zero of=${SWIFT_LOCATION}/swift-disk bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE} + mkfs.xfs -f -i size=1024 ${SWIFT_LOCATION}/swift-disk fi # Add the mountpoint to fstab - if ! egrep -q "^${SWIFT_DRIVE_LOCATION}/swift-disk" /etc/fstab;then + if ! egrep -q "^${SWIFT_LOCATION}/swift-disk" /etc/fstab;then echo "# Added by devstack" | tee -a /etc/fstab - echo "${SWIFT_DRIVE_LOCATION}/swift-disk ${s} xfs loop,noatime,nodiratime,nobarrier,logbufs=8 0 0" | \ + echo "${SWIFT_LOCATION}/swift-disk ${s} xfs loop,noatime,nodiratime,nobarrier,logbufs=8 0 0" | \ tee -a /etc/fstab fi @@ -617,7 +617,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]];then # Create directories install -g stack -o stack -d /etc/swift/{object,container,account}-server \ - ${SWIFT_DRIVE_LOCATION}/{1..4}/node/sdb1 /var/run/swift + ${SWIFT_LOCATION}/{1..4}/node/sdb1 /var/run/swift # Adjust rc.local to always have a /var/run/swift on reboot # created and chown to our user. @@ -630,7 +630,7 @@ exit 0 EOF # Add rsync file - sed -e "s/%SWIFT_LOOPBACK_DISK_SIZE%/$SWIFT_DRIVE_LOCATION/" $FILES/swift-rsyncd.conf > /etc/rsyncd.conf + sed -e "s/%SWIFT_LOCATION%/$SWIFT_LOCATION/" $FILES/swift-rsyncd.conf > /etc/rsyncd.conf # Copy proxy-server configuration cp $FILES/swift-proxy-server.conf /etc/swift/ @@ -648,7 +648,7 @@ EOF local bind_port=$2 local log_facility=$3 for node_number in {1..4};do - node_path=${SWIFT_DRIVE_LOCATION}/${node_number}/node + node_path=${SWIFT_LOCATION}/${node_number}/node sed -e "s/%NODE_PATH%/${node_path}/;s/%BIND_PORT%/${bind_port}/;s/%LOG_FACILITY%/${log_facility}/" \ $FILES/swift-${server_type}-server.conf > /etc/swift/${server_type}-server/${node_number}.conf bind_port=$(( ${bind_port} + 10 )) From a03f005673107fd93226752f9531ae498b70da39 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Tue, 1 Nov 2011 13:08:29 +0000 Subject: [PATCH 015/967] Fixes (still not fully tested). --- stack.sh | 33 ++++++++++++++++++++------------- stackrc | 2 +- 2 files changed, 21 insertions(+), 14 deletions(-) diff --git a/stack.sh b/stack.sh index bfab0788..546cc065 100755 --- a/stack.sh +++ b/stack.sh @@ -594,51 +594,56 @@ fi # Storage Service if [[ "$ENABLED_SERVICES" =~ "swift" ]];then - mkdir -p ${SWIFT_LOCATION}/drives - local s=${SWIFT_LOCATION}/drives/sdb1 # Shortcut variable + sudo mkdir -p ${SWIFT_LOCATION}/drives + sudo chown -R stack: ${SWIFT_LOCATION}/drives + s=${SWIFT_LOCATION}/drives/sdb1 # Shortcut variable # Create a loopback disk and format it with XFS. if [[ ! -e ${SWIFT_LOCATION}/swift-disk ]];then + sudo touch ${SWIFT_LOCATION}/swift-disk + sudo chown stack: ${SWIFT_LOCATION}/swift-disk + dd if=/dev/zero of=${SWIFT_LOCATION}/swift-disk bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE} mkfs.xfs -f -i size=1024 ${SWIFT_LOCATION}/swift-disk fi # Add the mountpoint to fstab if ! egrep -q "^${SWIFT_LOCATION}/swift-disk" /etc/fstab;then - echo "# Added by devstack" | tee -a /etc/fstab + echo "# Added by devstack" | sudo tee -a /etc/fstab echo "${SWIFT_LOCATION}/swift-disk ${s} xfs loop,noatime,nodiratime,nobarrier,logbufs=8 0 0" | \ - tee -a /etc/fstab + sudo tee -a /etc/fstab fi # Create and mount drives. mkdir -p ${s} - mount ${s} - mkdir ${s}/{1..4} + if ! egrep -q "$s" /proc/mounts;then + sudo mount ${s} + fi # Create directories - install -g stack -o stack -d /etc/swift/{object,container,account}-server \ - ${SWIFT_LOCATION}/{1..4}/node/sdb1 /var/run/swift + sudo install -g stack -o stack -d /etc/swift /etc/swift/{object,container,account}-server \ + ${SWIFT_LOCATION}/{1..4}/node/sdb1 /var/run/swift ${s}/{1..4} # Adjust rc.local to always have a /var/run/swift on reboot # created and chown to our user. # TODO (chmou): We may not have a "exit 0" - sed -i '/^exit 0/d' /etc/rc.local -cat <>/etc/rc.local + sudo sed -i '/^exit 0/d' /etc/rc.local +cat < /etc/rsyncd.conf + sed -e "s/%SWIFT_LOCATION%/$SWIFT_LOCATION/" $FILES/swift-rsyncd.conf | sudo tee /etc/rsyncd.conf # Copy proxy-server configuration cp $FILES/swift-proxy-server.conf /etc/swift/ # Generate swift.conf, we need to have the swift-hash being random # and unique. - local SWIFT_HASH=$(od -t x8 -N 8 -A n /etc/swift/swift.conf + swift_hash=$(od -t x8 -N 8 -A n /etc/swift/swift.conf # We need to generate a object/account/proxy configuration # emulating 4 nodes on different ports we have a litle function @@ -658,6 +663,8 @@ EOF generate_swift_configuration object 6010 2 generate_swift_configuration container 6011 2 generate_swift_configuration account 6012 2 + + unset s swift_hasH fi diff --git a/stackrc b/stackrc index e880c170..78479f9b 100644 --- a/stackrc +++ b/stackrc @@ -4,7 +4,7 @@ NOVA_BRANCH=diablo # storage service SWIFT_REPO=https://github.com/openstack/swift.git -SWIFT_BRANCH=diablo +SWIFT_BRANCH=1.4.3 # image catalog service GLANCE_REPO=https://github.com/cloudbuilders/glance.git From a2118984c0b83925360bfe3388902696146b3468 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Tue, 1 Nov 2011 15:36:00 +0100 Subject: [PATCH 016/967] Add script to create ring and start server --- files/swift-remakerings | 26 ++++++++++++++++++++++++++ files/swift-startmain | 3 +++ stack.sh | 9 +++++++++ 3 files changed, 38 insertions(+) create mode 100644 files/swift-remakerings create mode 100644 files/swift-startmain diff --git a/files/swift-remakerings b/files/swift-remakerings new file mode 100644 index 00000000..9343783f --- /dev/null +++ b/files/swift-remakerings @@ -0,0 +1,26 @@ +#!/bin/bash + +cd /etc/swift + +rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz + +swift-ring-builder object.builder create %SWIFT_PARTITION_POWER_SIZE% 3 1 +swift-ring-builder object.builder add z1-127.0.0.1:6010/sdb1 1 +swift-ring-builder object.builder add z2-127.0.0.1:6020/sdb2 1 +swift-ring-builder object.builder add z3-127.0.0.1:6030/sdb3 1 +swift-ring-builder object.builder add z4-127.0.0.1:6040/sdb4 1 +swift-ring-builder object.builder rebalance + +swift-ring-builder container.builder create %SWIFT_PARTITION_POWER_SIZE% 3 1 +swift-ring-builder container.builder add z1-127.0.0.1:6011/sdb1 1 +swift-ring-builder container.builder add z2-127.0.0.1:6021/sdb2 1 +swift-ring-builder container.builder add z3-127.0.0.1:6031/sdb3 1 +swift-ring-builder container.builder add z4-127.0.0.1:6041/sdb4 1 +swift-ring-builder container.builder rebalance + +swift-ring-builder account.builder create %SWIFT_PARTITION_POWER_SIZE% 3 1 +swift-ring-builder account.builder add z1-127.0.0.1:6012/sdb1 1 +swift-ring-builder account.builder add z2-127.0.0.1:6022/sdb2 1 +swift-ring-builder account.builder add z3-127.0.0.1:6032/sdb3 1 +swift-ring-builder account.builder add z4-127.0.0.1:6042/sdb4 1 +swift-ring-builder account.builder rebalance diff --git a/files/swift-startmain b/files/swift-startmain new file mode 100644 index 00000000..05b95091 --- /dev/null +++ b/files/swift-startmain @@ -0,0 +1,3 @@ +#!/bin/bash + +swift-init all start diff --git a/stack.sh b/stack.sh index 546cc065..4c93e671 100755 --- a/stack.sh +++ b/stack.sh @@ -280,6 +280,9 @@ SWIFT_LOCATION=${SWIFT_LOCATION:-/srv} # Size of the loopback disks SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000} +# Default partition power size (bigger is slower) +SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9} + # Keystone # -------- @@ -664,6 +667,12 @@ EOF generate_swift_configuration container 6011 2 generate_swift_configuration account 6012 2 + # Install swift helper scripts to remake the rings and start all services. + sed -e "s/%SWIFT_PARTITION_POWER_SIZE%/$SWIFT_PARTITION_POWER_SIZE/" $FILES/swift-remakerings | \ + sudo tee /usr/local/bin/swift-remakerings + sudo install -m755 $FILES/swift-startmain /usr/local/bin/ + sudo chmod +x /usr/local/bin/swift-* + unset s swift_hasH fi From d5651bb5c6fe3e51849742527aae77dadf826cb4 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Tue, 1 Nov 2011 16:22:08 +0100 Subject: [PATCH 017/967] More fixes (this is now working). --- files/swift-proxy-server.conf | 1 + files/swift-startmain | 2 +- stack.sh | 14 +++++++++++--- 3 files changed, 13 insertions(+), 4 deletions(-) mode change 100644 => 100755 files/swift-startmain diff --git a/files/swift-proxy-server.conf b/files/swift-proxy-server.conf index 9e2b1ddc..737a66df 100644 --- a/files/swift-proxy-server.conf +++ b/files/swift-proxy-server.conf @@ -9,6 +9,7 @@ pipeline = healthcheck cache tempauth proxy-server [app:proxy-server] use = egg:swift#proxy allow_account_management = true +account_autocreate = true [filter:tempauth] use = egg:swift#tempauth diff --git a/files/swift-startmain b/files/swift-startmain old mode 100644 new mode 100755 index 05b95091..69efebd9 --- a/files/swift-startmain +++ b/files/swift-startmain @@ -1,3 +1,3 @@ #!/bin/bash -swift-init all start +swift-init all restart diff --git a/stack.sh b/stack.sh index 4c93e671..5cb5a18b 100755 --- a/stack.sh +++ b/stack.sh @@ -641,7 +641,7 @@ EOF sed -e "s/%SWIFT_LOCATION%/$SWIFT_LOCATION/" $FILES/swift-rsyncd.conf | sudo tee /etc/rsyncd.conf # Copy proxy-server configuration - cp $FILES/swift-proxy-server.conf /etc/swift/ + cp $FILES/swift-proxy-server.conf /etc/swift/proxy-server.conf # Generate swift.conf, we need to have the swift-hash being random # and unique. @@ -656,8 +656,8 @@ EOF local bind_port=$2 local log_facility=$3 for node_number in {1..4};do - node_path=${SWIFT_LOCATION}/${node_number}/node - sed -e "s/%NODE_PATH%/${node_path}/;s/%BIND_PORT%/${bind_port}/;s/%LOG_FACILITY%/${log_facility}/" \ + node_path=${SWIFT_LOCATION}/${node_number} + sed -e "s,%NODE_PATH%,${node_path},;s,%BIND_PORT%,${bind_port},;s,%LOG_FACILITY%,${log_facility}," \ $FILES/swift-${server_type}-server.conf > /etc/swift/${server_type}-server/${node_number}.conf bind_port=$(( ${bind_port} + 10 )) log_facility=$(( ${log_facility} + 1 )) @@ -673,6 +673,14 @@ EOF sudo install -m755 $FILES/swift-startmain /usr/local/bin/ sudo chmod +x /usr/local/bin/swift-* + # Create ring + /usr/local/bin/swift-remakerings + + # Start everything + /usr/local/bin/swift-startmain || : + + # This should work (tempauth) + # swift -A http://127.0.0.1:8080/auth/v1.0 -U test:tester -K testing stat unset s swift_hasH fi From e1d2bcb1b9ba1dc178f753c4b41c4e75ef29b7f8 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Tue, 1 Nov 2011 17:32:11 +0100 Subject: [PATCH 018/967] Fixes. Fix mounting location at the right place. Fix rerun of the script. Start rsync. Fix permissions. --- stack.sh | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/stack.sh b/stack.sh index 5cb5a18b..a449255b 100755 --- a/stack.sh +++ b/stack.sh @@ -623,10 +623,18 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]];then sudo mount ${s} fi + for x in {1..4}; do sudo ln -sf $s/$x ${SWIFT_LOCATION}/$x; done + # Create directories - sudo install -g stack -o stack -d /etc/swift /etc/swift/{object,container,account}-server \ - ${SWIFT_LOCATION}/{1..4}/node/sdb1 /var/run/swift ${s}/{1..4} + tmpd="" + for d in /etc/swift /etc/swift/{object,container,account}-server \ + ${SWIFT_LOCATION}/{1..4}/node/sdb1 /var/run/swift ${s}/{1..4};do + [[ -d $d ]] && continue + sudo install -g stack -o stack -d $d + done + sudo chown -R stack: ${SWIFT_LOCATION}/{1..4}/node + # Adjust rc.local to always have a /var/run/swift on reboot # created and chown to our user. # TODO (chmou): We may not have a "exit 0" @@ -638,8 +646,9 @@ exit 0 EOF # Add rsync file - sed -e "s/%SWIFT_LOCATION%/$SWIFT_LOCATION/" $FILES/swift-rsyncd.conf | sudo tee /etc/rsyncd.conf - + sed -e "s,%SWIFT_LOCATION%,$SWIFT_LOCATION," $FILES/swift-rsyncd.conf | sudo tee /etc/rsyncd.conf + sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync + # Copy proxy-server configuration cp $FILES/swift-proxy-server.conf /etc/swift/proxy-server.conf @@ -673,6 +682,9 @@ EOF sudo install -m755 $FILES/swift-startmain /usr/local/bin/ sudo chmod +x /usr/local/bin/swift-* + # Start rsync + sudo /etc/init.d/rsync restart || : + # Create ring /usr/local/bin/swift-remakerings From 3b3b775f3bb668671fe91ac4b440b466f668be46 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Tue, 1 Nov 2011 17:42:52 +0100 Subject: [PATCH 019/967] Ordering is important here. --- stack.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index a449255b..3e0ea21c 100755 --- a/stack.sh +++ b/stack.sh @@ -627,8 +627,8 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]];then # Create directories tmpd="" - for d in /etc/swift /etc/swift/{object,container,account}-server \ - ${SWIFT_LOCATION}/{1..4}/node/sdb1 /var/run/swift ${s}/{1..4};do + for d in ${s}/{1..4} /etc/swift /etc/swift/{object,container,account}-server \ + ${SWIFT_LOCATION}/{1..4}/node/sdb1 /var/run/swift ;do [[ -d $d ]] && continue sudo install -g stack -o stack -d $d done From 5ab5b2293240f7600e6b24aecfebe85f1abb1aa3 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Tue, 1 Nov 2011 18:15:36 +0100 Subject: [PATCH 020/967] Fix variabe subst. --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 3e0ea21c..d4e09be7 100755 --- a/stack.sh +++ b/stack.sh @@ -655,7 +655,7 @@ EOF # Generate swift.conf, we need to have the swift-hash being random # and unique. swift_hash=$(od -t x8 -N 8 -A n /etc/swift/swift.conf + sed -e "s/%SWIFT_HASH%/$swift_hash/" $FILES/swift.conf > /etc/swift/swift.conf # We need to generate a object/account/proxy configuration # emulating 4 nodes on different ports we have a litle function From 45c5113701ecbb426d8fea95d0c416a89b089671 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Tue, 1 Nov 2011 19:32:23 +0100 Subject: [PATCH 021/967] Add keystone support with swift. --- files/keystone_data.sh | 3 ++- files/swift-proxy-server.conf | 7 ++++++- stack.sh | 28 +++++++++++++++++++++++----- stackrc | 4 ++++ 4 files changed, 35 insertions(+), 7 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index a5e75a6b..d926c52d 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -30,12 +30,13 @@ $BIN_DIR/keystone-manage $* role grant KeystoneServiceAdmin admin $BIN_DIR/keystone-manage $* service add nova compute "Nova Compute Service" $BIN_DIR/keystone-manage $* service add glance image "Glance Image Service" $BIN_DIR/keystone-manage $* service add keystone identity "Keystone Identity Service" +$BIN_DIR/keystone-manage $* service add swift object-store "Swift Service" #endpointTemplates $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne nova http://%HOST_IP%:8774/v1.1/%tenant_id% http://%HOST_IP%:8774/v1.1/%tenant_id% http://%HOST_IP%:8774/v1.1/%tenant_id% 1 1 $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne glance http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% 1 1 $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne keystone http://%HOST_IP%:5000/v2.0 http://%HOST_IP%:35357/v2.0 http://%HOST_IP%:5000/v2.0 1 1 -# $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne swift http://%HOST_IP%:8080/v1/AUTH_%tenant_id% http://%HOST_IP%:8080/ http://%HOST_IP%:8080/v1/AUTH_%tenant_id% 1 1 +$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne swift http://%HOST_IP%:8080/v1/AUTH_%tenant_id% http://%HOST_IP%:8080/ http://%HOST_IP%:8080/v1/AUTH_%tenant_id% 1 1 # Tokens $BIN_DIR/keystone-manage $* token add %SERVICE_TOKEN% admin admin 2015-02-05T00:00 diff --git a/files/swift-proxy-server.conf b/files/swift-proxy-server.conf index 737a66df..9a3b54b5 100644 --- a/files/swift-proxy-server.conf +++ b/files/swift-proxy-server.conf @@ -4,13 +4,18 @@ user = stack log_facility = LOG_LOCAL1 [pipeline:main] -pipeline = healthcheck cache tempauth proxy-server +pipeline = healthcheck cache %AUTH_SERVER% proxy-server [app:proxy-server] use = egg:swift#proxy allow_account_management = true account_autocreate = true +[filter:keystone] +use = egg:swiftkeystone2#keystone2 +keystone_admin_token = %SERVICE_TOKEN% +keystone_url = http://localhost:35357/v2.0 + [filter:tempauth] use = egg:swift#tempauth user_admin_admin = admin .admin .reseller_admin diff --git a/stack.sh b/stack.sh index d4e09be7..8a1b9bc7 100755 --- a/stack.sh +++ b/stack.sh @@ -151,6 +151,7 @@ NOVACLIENT_DIR=$DEST/python-novaclient OPENSTACKX_DIR=$DEST/openstackx NOVNC_DIR=$DEST/noVNC SWIFT_DIR=$DEST/swift +SWIFT_KEYSTONE_DIR=$DEST/swift-keystone2 # Specify which services to launch. These generally correspond to screen tabs ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-vnc,horizon,mysql,rabbit,swift} @@ -363,6 +364,8 @@ function git_clone { git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH # storage service git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH +# swift + keystone middleware +git_clone $SWIFT_KEYSTONE_REPO $SWIFT_KEYSTONE_DIR $SWIFT_KEYSTONE_BRANCH # image catalog service git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH # unified auth system (manages accounts/tokens) @@ -385,6 +388,7 @@ git_clone $OPENSTACKX_REPO $OPENSTACKX_DIR $OPENSTACKX_BRANCH # allowing ``import nova`` or ``import glance.client`` cd $KEYSTONE_DIR; sudo python setup.py develop cd $SWIFT_DIR; sudo python setup.py develop +cd $SWIFT_KEYSTONE_DIR; sudo python setup.py develop cd $GLANCE_DIR; sudo python setup.py develop cd $NOVACLIENT_DIR; sudo python setup.py develop cd $NOVA_DIR; sudo python setup.py develop @@ -648,9 +652,18 @@ EOF # Add rsync file sed -e "s,%SWIFT_LOCATION%,$SWIFT_LOCATION," $FILES/swift-rsyncd.conf | sudo tee /etc/rsyncd.conf sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync - - # Copy proxy-server configuration - cp $FILES/swift-proxy-server.conf /etc/swift/proxy-server.conf + + if [[ "$ENABLED_SERVICES" =~ "key" ]]; then + swift_auth_server=keystone + # Temporary until we get this integrated in swift. + sudo curl -s -o/usr/local/bin/swift \ + 'https://review.openstack.org/gitweb?p=openstack/swift.git;a=blob_plain;f=bin/swift;hb=48bfda6e2fdf3886c98bd15649887d54b9a2574e' + else + swift_auth_server=tempauth + fi + + sed "s/%SERVICE_TOKEN%/${SERVICE_TOKEN}/;s/%AUTH_SERVER%/${swift_auth_server}/" \ + $FILES/swift-proxy-server.conf|sudo tee /etc/swift/proxy-server.conf # Generate swift.conf, we need to have the swift-hash being random # and unique. @@ -664,6 +677,8 @@ EOF local server_type=$1 local bind_port=$2 local log_facility=$3 + local node_number + for node_number in {1..4};do node_path=${SWIFT_LOCATION}/${node_number} sed -e "s,%NODE_PATH%,${node_path},;s,%BIND_PORT%,${bind_port},;s,%LOG_FACILITY%,${log_facility}," \ @@ -693,8 +708,7 @@ EOF # This should work (tempauth) # swift -A http://127.0.0.1:8080/auth/v1.0 -U test:tester -K testing stat - unset s swift_hasH - + unset s swift_hash swift_auth_server tmpd fi # Volume Service @@ -976,6 +990,10 @@ if [[ "$ENABLED_SERVICES" =~ "key" ]]; then echo "examples on using novaclient command line is in exercise.sh" echo "the default users are: admin and demo" echo "the password: $ADMIN_PASSWORD" + if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then + echo "Swift: swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin:admin -K $ADMIN_PASSWORD stat" + fi + fi # indicate how long this took to run (bash maintained variable 'SECONDS') diff --git a/stackrc b/stackrc index 78479f9b..6d4454e1 100644 --- a/stackrc +++ b/stackrc @@ -6,6 +6,10 @@ NOVA_BRANCH=diablo SWIFT_REPO=https://github.com/openstack/swift.git SWIFT_BRANCH=1.4.3 +# swift and keystone integration +SWIFT_KEYSTONE_REPO=https://github.com/cloudbuilders/swift-keystone2.git +SWIFT_KEYSTONE_BRANCH=master + # image catalog service GLANCE_REPO=https://github.com/cloudbuilders/glance.git GLANCE_BRANCH=diablo From 5c50f0dcde905e41725d6bb4d2a708373916a26b Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Wed, 2 Nov 2011 01:02:30 +0100 Subject: [PATCH 022/967] Don't enable it by default. --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 8a1b9bc7..9343f225 100755 --- a/stack.sh +++ b/stack.sh @@ -154,7 +154,7 @@ SWIFT_DIR=$DEST/swift SWIFT_KEYSTONE_DIR=$DEST/swift-keystone2 # Specify which services to launch. These generally correspond to screen tabs -ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-vnc,horizon,mysql,rabbit,swift} +ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-vnc,horizon,mysql,rabbit} # Nova hypervisor configuration. We default to libvirt whth **kvm** but will # drop back to **qemu** if we are unable to load the kvm module. Stack.sh can From ab75f4becfb839e8bab5f602bb414a8f4eab090e Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Wed, 2 Nov 2011 01:03:29 +0100 Subject: [PATCH 023/967] Ask for SWIFT_HASH. --- stack.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 9343f225..051be943 100755 --- a/stack.sh +++ b/stack.sh @@ -284,6 +284,9 @@ SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000} # Default partition power size (bigger is slower) SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9} +# Swift hash, this must be unique +read_password SWIFT_HASH "ENTER A RANDOM HASH SHARED BETWEEN ALL PROCESSES." + # Keystone # -------- @@ -667,7 +670,6 @@ EOF # Generate swift.conf, we need to have the swift-hash being random # and unique. - swift_hash=$(od -t x8 -N 8 -A n /etc/swift/swift.conf # We need to generate a object/account/proxy configuration From a55b09d9e86794c782e954084415da86cdb846ff Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Wed, 2 Nov 2011 01:07:43 +0100 Subject: [PATCH 024/967] change stack to $USER --- stack.sh | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/stack.sh b/stack.sh index 051be943..9ba05644 100755 --- a/stack.sh +++ b/stack.sh @@ -637,20 +637,10 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]];then for d in ${s}/{1..4} /etc/swift /etc/swift/{object,container,account}-server \ ${SWIFT_LOCATION}/{1..4}/node/sdb1 /var/run/swift ;do [[ -d $d ]] && continue - sudo install -g stack -o stack -d $d + sudo install -o ${USER} -d $d done sudo chown -R stack: ${SWIFT_LOCATION}/{1..4}/node - - # Adjust rc.local to always have a /var/run/swift on reboot - # created and chown to our user. - # TODO (chmou): We may not have a "exit 0" - sudo sed -i '/^exit 0/d' /etc/rc.local -cat < Date: Wed, 2 Nov 2011 01:10:38 +0100 Subject: [PATCH 025/967] Don't use 'stack' in static but use $USER. --- files/swift-account-server.conf | 2 +- files/swift-container-server.conf | 2 +- files/swift-object-server.conf | 2 +- files/swift-proxy-server.conf | 2 +- stack.sh | 4 ++-- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/files/swift-account-server.conf b/files/swift-account-server.conf index b5451c69..920d45c9 100644 --- a/files/swift-account-server.conf +++ b/files/swift-account-server.conf @@ -2,7 +2,7 @@ devices = %NODE_PATH%/node mount_check = false bind_port = %BIND_PORT% -user = stack +user = %USER% log_facility = LOG_LOCAL%LOG_FACILITY% [pipeline:main] diff --git a/files/swift-container-server.conf b/files/swift-container-server.conf index c6300766..8d59bf21 100644 --- a/files/swift-container-server.conf +++ b/files/swift-container-server.conf @@ -2,7 +2,7 @@ devices = %NODE_PATH%/node mount_check = false bind_port = %BIND_PORT% -user = stack +user = %USER% log_facility = LOG_LOCAL%LOG_FACILITY% [pipeline:main] diff --git a/files/swift-object-server.conf b/files/swift-object-server.conf index 4a007135..1b72e703 100644 --- a/files/swift-object-server.conf +++ b/files/swift-object-server.conf @@ -2,7 +2,7 @@ devices = %NODE_PATH%/node mount_check = false bind_port = %BIND_PORT% -user = stack +user = %USER% log_facility = LOG_LOCAL%LOG_FACILITY% [pipeline:main] diff --git a/files/swift-proxy-server.conf b/files/swift-proxy-server.conf index 9a3b54b5..6b7dd528 100644 --- a/files/swift-proxy-server.conf +++ b/files/swift-proxy-server.conf @@ -1,6 +1,6 @@ [DEFAULT] bind_port = 8080 -user = stack +user = %USER% log_facility = LOG_LOCAL1 [pipeline:main] diff --git a/stack.sh b/stack.sh index 9ba05644..62be7f56 100755 --- a/stack.sh +++ b/stack.sh @@ -655,7 +655,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]];then swift_auth_server=tempauth fi - sed "s/%SERVICE_TOKEN%/${SERVICE_TOKEN}/;s/%AUTH_SERVER%/${swift_auth_server}/" \ + sed "s/%USER%/$USER/;s/%SERVICE_TOKEN%/${SERVICE_TOKEN}/;s/%AUTH_SERVER%/${swift_auth_server}/" \ $FILES/swift-proxy-server.conf|sudo tee /etc/swift/proxy-server.conf # Generate swift.conf, we need to have the swift-hash being random @@ -673,7 +673,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]];then for node_number in {1..4};do node_path=${SWIFT_LOCATION}/${node_number} - sed -e "s,%NODE_PATH%,${node_path},;s,%BIND_PORT%,${bind_port},;s,%LOG_FACILITY%,${log_facility}," \ + sed -e "s,%USER%,$USER,;s,%NODE_PATH%,${node_path},;s,%BIND_PORT%,${bind_port},;s,%LOG_FACILITY%,${log_facility}," \ $FILES/swift-${server_type}-server.conf > /etc/swift/${server_type}-server/${node_number}.conf bind_port=$(( ${bind_port} + 10 )) log_facility=$(( ${log_facility} + 1 )) From 067163dfd1db129d089a393d0a15d301f5384335 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Wed, 2 Nov 2011 14:25:06 +0100 Subject: [PATCH 026/967] More stack user removals. --- files/swift-rsyncd.conf | 4 ++-- stack.sh | 14 ++++++++------ 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/files/swift-rsyncd.conf b/files/swift-rsyncd.conf index 1cea98cc..568f2404 100644 --- a/files/swift-rsyncd.conf +++ b/files/swift-rsyncd.conf @@ -1,5 +1,5 @@ -uid = stack -gid = stack +uid = %USER% +gid = %GROUP% log file = /var/log/rsyncd.log pid file = /var/run/rsyncd.pid address = 127.0.0.1 diff --git a/stack.sh b/stack.sh index 62be7f56..ed6daf86 100755 --- a/stack.sh +++ b/stack.sh @@ -121,7 +121,7 @@ if [[ $EUID -eq 0 ]]; then echo "Copying files to stack user" STACK_DIR="$DEST/${PWD##*/}" cp -r -f "$PWD" "$STACK_DIR" - chown -R stack "$STACK_DIR" + chown -R $USER "$STACK_DIR" if [[ "$SHELL_AFTER_RUN" != "no" ]]; then exec su -c "set -e; cd $STACK_DIR; bash stack.sh; bash" stack else @@ -604,14 +604,16 @@ fi # Storage Service if [[ "$ENABLED_SERVICES" =~ "swift" ]];then + USER_GROUP=$(id -g) + sudo mkdir -p ${SWIFT_LOCATION}/drives - sudo chown -R stack: ${SWIFT_LOCATION}/drives + sudo chown -R $USER: ${SWIFT_LOCATION}/drives s=${SWIFT_LOCATION}/drives/sdb1 # Shortcut variable # Create a loopback disk and format it with XFS. if [[ ! -e ${SWIFT_LOCATION}/swift-disk ]];then sudo touch ${SWIFT_LOCATION}/swift-disk - sudo chown stack: ${SWIFT_LOCATION}/swift-disk + sudo chown $USER: ${SWIFT_LOCATION}/swift-disk dd if=/dev/zero of=${SWIFT_LOCATION}/swift-disk bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE} mkfs.xfs -f -i size=1024 ${SWIFT_LOCATION}/swift-disk @@ -637,13 +639,13 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]];then for d in ${s}/{1..4} /etc/swift /etc/swift/{object,container,account}-server \ ${SWIFT_LOCATION}/{1..4}/node/sdb1 /var/run/swift ;do [[ -d $d ]] && continue - sudo install -o ${USER} -d $d + sudo install -o ${USER} -g $USER_GROUP -d $d done - sudo chown -R stack: ${SWIFT_LOCATION}/{1..4}/node + sudo chown -R $USER: ${SWIFT_LOCATION}/{1..4}/node # Add rsync file - sed -e "s,%SWIFT_LOCATION%,$SWIFT_LOCATION," $FILES/swift-rsyncd.conf | sudo tee /etc/rsyncd.conf + sed -e "s/%GROUP%/${USER_GROUP}/;s/%USER%/$USER/;s,%SWIFT_LOCATION%,$SWIFT_LOCATION," $FILES/swift-rsyncd.conf | sudo tee /etc/rsyncd.conf sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync if [[ "$ENABLED_SERVICES" =~ "key" ]]; then From 55ca8c31647cd28d2870cddb8e38ea7316f82205 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Wed, 2 Nov 2011 14:28:41 +0100 Subject: [PATCH 027/967] Remove fstab entry and mount it manually. --- stack.sh | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/stack.sh b/stack.sh index ed6daf86..69d48deb 100755 --- a/stack.sh +++ b/stack.sh @@ -619,17 +619,10 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]];then mkfs.xfs -f -i size=1024 ${SWIFT_LOCATION}/swift-disk fi - # Add the mountpoint to fstab - if ! egrep -q "^${SWIFT_LOCATION}/swift-disk" /etc/fstab;then - echo "# Added by devstack" | sudo tee -a /etc/fstab - echo "${SWIFT_LOCATION}/swift-disk ${s} xfs loop,noatime,nodiratime,nobarrier,logbufs=8 0 0" | \ - sudo tee -a /etc/fstab - fi - # Create and mount drives. mkdir -p ${s} if ! egrep -q "$s" /proc/mounts;then - sudo mount ${s} + sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 ${s} fi for x in {1..4}; do sudo ln -sf $s/$x ${SWIFT_LOCATION}/$x; done From 06018a6bdbfea7b423194bde1b8b05cd8ed91dff Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Wed, 2 Nov 2011 16:18:47 +0100 Subject: [PATCH 028/967] Fix wording. --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 69d48deb..83fec6a5 100755 --- a/stack.sh +++ b/stack.sh @@ -285,7 +285,7 @@ SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000} SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9} # Swift hash, this must be unique -read_password SWIFT_HASH "ENTER A RANDOM HASH SHARED BETWEEN ALL PROCESSES." +read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH." # Keystone # -------- From e1136cb8dc5c4583165e7472a7112bae6f165e02 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Wed, 2 Nov 2011 16:19:16 +0100 Subject: [PATCH 029/967] Fix SWIFT_HASH_PREFIX variable. --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 83fec6a5..7f2a5a23 100755 --- a/stack.sh +++ b/stack.sh @@ -655,7 +655,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]];then # Generate swift.conf, we need to have the swift-hash being random # and unique. - sed -e "s/%SWIFT_HASH%/$swift_hash/" $FILES/swift.conf > /etc/swift/swift.conf + sed -e "s/%SWIFT_HASH%/$SWIFT_HASH/" $FILES/swift.conf > /etc/swift/swift.conf # We need to generate a object/account/proxy configuration # emulating 4 nodes on different ports we have a litle function From b93478f6c752380481a75119bcf56abae5533d25 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Wed, 2 Nov 2011 16:49:56 +0100 Subject: [PATCH 030/967] Change drives location. Change drives and data location to ${SWIFT_DIR}/data --- stack.sh | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/stack.sh b/stack.sh index 7f2a5a23..8de1e846 100755 --- a/stack.sh +++ b/stack.sh @@ -274,9 +274,9 @@ GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$HOST_IP:9292} # SWIFT # ----- -# + # Location of SWIFT drives -SWIFT_LOCATION=${SWIFT_LOCATION:-/srv} +SWIFT_LOCATION=${SWIFT_LOCATION:-${SWIFT_DIR}/data} # Size of the loopback disks SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000} @@ -611,18 +611,21 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]];then s=${SWIFT_LOCATION}/drives/sdb1 # Shortcut variable # Create a loopback disk and format it with XFS. - if [[ ! -e ${SWIFT_LOCATION}/swift-disk ]];then - sudo touch ${SWIFT_LOCATION}/swift-disk - sudo chown $USER: ${SWIFT_LOCATION}/swift-disk + if [[ ! -e ${SWIFT_LOCATION}/drives/images/swift.img ]];then + mkdir -p ${SWIFT_LOCATION}/drives/images + sudo touch ${SWIFT_LOCATION}/drives/images/swift.img + sudo chown $USER: ${SWIFT_LOCATION}/drives/images/swift.img - dd if=/dev/zero of=${SWIFT_LOCATION}/swift-disk bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE} - mkfs.xfs -f -i size=1024 ${SWIFT_LOCATION}/swift-disk + dd if=/dev/zero of=${SWIFT_LOCATION}/drives/images/swift.img \ + bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE} + mkfs.xfs -f -i size=1024 ${SWIFT_LOCATION}/drives/images/swift.img fi # Create and mount drives. mkdir -p ${s} if ! egrep -q "$s" /proc/mounts;then - sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 ${s} + sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 \ + ${SWIFT_LOCATION}/drives/images/swift.img ${s} fi for x in {1..4}; do sudo ln -sf $s/$x ${SWIFT_LOCATION}/$x; done From 3d9c5d5e4eaadd5f28b6830fb3d6056aa918704c Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Wed, 2 Nov 2011 17:57:11 +0100 Subject: [PATCH 031/967] Add documentation and fixes. - Fix some spelling mistakes in the documentation. - Add swift documentation. - Try to make the code more explicit (ie: remove shortcut variables). --- stack.sh | 91 +++++++++++++++++++++++++++++++++++++------------------- 1 file changed, 61 insertions(+), 30 deletions(-) diff --git a/stack.sh b/stack.sh index 8de1e846..df9d4b2e 100755 --- a/stack.sh +++ b/stack.sh @@ -70,7 +70,7 @@ fi # called ``localrc`` # # If ``localrc`` exists, then ``stackrc`` will load those settings. This is -# useful for changing a branch or repostiory to test other versions. Also you +# useful for changing a branch or repository to test other versions. Also you # can store your other settings like **MYSQL_PASSWORD** or **ADMIN_PASSWORD** instead # of letting devstack generate random ones for you. source ./stackrc @@ -241,7 +241,7 @@ MULTI_HOST=${MULTI_HOST:-0} # If you are running on a single node and don't need to access the VMs from # devices other than that node, you can set the flat interface to the same # value as ``FLAT_NETWORK_BRIDGE``. This will stop the network hiccup from -# occuring. +# occurring. FLAT_INTERFACE=${FLAT_INTERFACE:-eth0} ## FIXME(ja): should/can we check that FLAT_INTERFACE is sane? @@ -274,17 +274,31 @@ GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$HOST_IP:9292} # SWIFT # ----- +# TODO: implement glance support +# TODO: add logging to different location. -# Location of SWIFT drives +# By default the location of swift drives and objects is located inside +# the swift source directory. SWIFT_LOCATION variable allow you to redefine +# this. SWIFT_LOCATION=${SWIFT_LOCATION:-${SWIFT_DIR}/data} -# Size of the loopback disks +# devstack will create a loop-back disk formatted as XFS to store the +# swift data. By default the disk size is 1 gigabyte. The variable +# SWIFT_LOOPBACK_DISK_SIZE specified in bytes allow you to change +# that. SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000} -# Default partition power size (bigger is slower) +# The ring uses a configurable number of bits from a path’s MD5 hash as +# a partition index that designates a device. The number of bits kept +# from the hash is known as the partition power, and 2 to the partition +# power indicates the partition count. Partitioning the full MD5 hash +# ring allows other parts of the cluster to work in batches of items at +# once which ends up either more efficient or at least less complex than +# working with each item separately or the entire cluster all at once. +# By default we define 9 for the partition count (which mean 512). SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9} -# Swift hash, this must be unique +# SWIFT_HASH is a random unique string for a swift cluster that can never change. read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH." # Keystone @@ -299,7 +313,7 @@ read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE ( LOGFILE=${LOGFILE:-"$PWD/stack.sh.$$.log"} ( # So that errors don't compound we exit on any errors so you see only the -# first error that occured. +# first error that occurred. trap failed ERR failed() { local r=$? @@ -604,13 +618,14 @@ fi # Storage Service if [[ "$ENABLED_SERVICES" =~ "swift" ]];then + # We first do a bit of setup by creating the directories and + # changing the permissions so we can run it as our user. + USER_GROUP=$(id -g) - sudo mkdir -p ${SWIFT_LOCATION}/drives - sudo chown -R $USER: ${SWIFT_LOCATION}/drives - s=${SWIFT_LOCATION}/drives/sdb1 # Shortcut variable + sudo chown -R $USER:${USER_GROUP} ${SWIFT_LOCATION}/drives - # Create a loopback disk and format it with XFS. + # We then create a loopback disk and format it to XFS. if [[ ! -e ${SWIFT_LOCATION}/drives/images/swift.img ]];then mkdir -p ${SWIFT_LOCATION}/drives/images sudo touch ${SWIFT_LOCATION}/drives/images/swift.img @@ -621,18 +636,22 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]];then mkfs.xfs -f -i size=1024 ${SWIFT_LOCATION}/drives/images/swift.img fi - # Create and mount drives. - mkdir -p ${s} - if ! egrep -q "$s" /proc/mounts;then + # After the drive being created we mount the disk with a few mount + # options to make it most efficient as possible for swift. + mkdir -p ${SWIFT_LOCATION}/drives/sdb1 + if ! egrep -q ${SWIFT_LOCATION}/drives/sdb1 /proc/mounts;then sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 \ - ${SWIFT_LOCATION}/drives/images/swift.img ${s} + ${SWIFT_LOCATION}/drives/images/swift.img ${SWIFT_LOCATION}/drives/sdb1 fi + # We then create link to that mounted location so swift would know + # where to go. for x in {1..4}; do sudo ln -sf $s/$x ${SWIFT_LOCATION}/$x; done - # Create directories + # We now have to emulate a few different servers into one we + # create all the directories needed for swift tmpd="" - for d in ${s}/{1..4} /etc/swift /etc/swift/{object,container,account}-server \ + for d in ${SWIFT_LOCATION}/drives/sdb1/{1..4} /etc/swift /etc/swift/{object,container,account}-server \ ${SWIFT_LOCATION}/{1..4}/node/sdb1 /var/run/swift ;do [[ -d $d ]] && continue sudo install -o ${USER} -g $USER_GROUP -d $d @@ -640,28 +659,35 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]];then sudo chown -R $USER: ${SWIFT_LOCATION}/{1..4}/node - # Add rsync file + # Swift use rsync to syncronize between all the different + # partitions (which make more sense when you have a multi-node + # setup) we configure it with our version of rsync. sed -e "s/%GROUP%/${USER_GROUP}/;s/%USER%/$USER/;s,%SWIFT_LOCATION%,$SWIFT_LOCATION," $FILES/swift-rsyncd.conf | sudo tee /etc/rsyncd.conf sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync + # By default Swift will be installed with the tempauth middleware + # which has some default username and password if you have + # configured keystone it will checkout the directory. if [[ "$ENABLED_SERVICES" =~ "key" ]]; then swift_auth_server=keystone - # Temporary until we get this integrated in swift. + # We need a special version of bin/swift which understand the + # OpenStack api 2.0, we download it until this is getting + # integrated in swift. sudo curl -s -o/usr/local/bin/swift \ 'https://review.openstack.org/gitweb?p=openstack/swift.git;a=blob_plain;f=bin/swift;hb=48bfda6e2fdf3886c98bd15649887d54b9a2574e' else swift_auth_server=tempauth fi + # We do the install of the proxy-server and swift configuration + # replacing a few directives to match our configuration. sed "s/%USER%/$USER/;s/%SERVICE_TOKEN%/${SERVICE_TOKEN}/;s/%AUTH_SERVER%/${swift_auth_server}/" \ $FILES/swift-proxy-server.conf|sudo tee /etc/swift/proxy-server.conf - # Generate swift.conf, we need to have the swift-hash being random - # and unique. sed -e "s/%SWIFT_HASH%/$SWIFT_HASH/" $FILES/swift.conf > /etc/swift/swift.conf # We need to generate a object/account/proxy configuration - # emulating 4 nodes on different ports we have a litle function + # emulating 4 nodes on different ports we have a little function # that help us doing that. function generate_swift_configuration() { local server_type=$1 @@ -681,23 +707,28 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]];then generate_swift_configuration container 6011 2 generate_swift_configuration account 6012 2 - # Install swift helper scripts to remake the rings and start all services. + # We create two helper scripts : + # + # - swift-remakerings + # Allow to recreate rings from scratch. + # - swift-startmain + # Restart your full cluster. + # sed -e "s/%SWIFT_PARTITION_POWER_SIZE%/$SWIFT_PARTITION_POWER_SIZE/" $FILES/swift-remakerings | \ sudo tee /usr/local/bin/swift-remakerings sudo install -m755 $FILES/swift-startmain /usr/local/bin/ sudo chmod +x /usr/local/bin/swift-* - # Start rsync + # We then can start rsync. sudo /etc/init.d/rsync restart || : - # Create ring + # Create our ring for the object/container/account. /usr/local/bin/swift-remakerings - # Start everything + # And now we launch swift-startmain to get our cluster running + # ready to be tested. /usr/local/bin/swift-startmain || : - # This should work (tempauth) - # swift -A http://127.0.0.1:8080/auth/v1.0 -U test:tester -K testing stat unset s swift_hash swift_auth_server tmpd fi @@ -851,7 +882,7 @@ function screen_it { screen -d -m -S stack -t stack sleep 1 -# launch the glance registery service +# launch the glance registry service if [[ "$ENABLED_SERVICES" =~ "g-reg" ]]; then screen_it g-reg "cd $GLANCE_DIR; bin/glance-registry --config-file=etc/glance-registry.conf" fi @@ -908,7 +939,7 @@ screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/apache2/error.log" # TTY also uses cloud-init, supporting login via keypair and sending scripts as # userdata. See https://help.ubuntu.com/community/CloudInit for more on cloud-init # -# Override ``IMAGE_URLS`` with a comma-seperated list of uec images. +# Override ``IMAGE_URLS`` with a comma-separated list of uec images. # # * **natty**: http://uec-images.ubuntu.com/natty/current/natty-server-cloudimg-amd64.tar.gz # * **oneiric**: http://uec-images.ubuntu.com/oneiric/current/oneiric-server-cloudimg-amd64.tar.gz From e8d11580912fbf3580b268baa7cb0371d214a8ab Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Wed, 2 Nov 2011 18:16:32 +0100 Subject: [PATCH 032/967] Missed one variable subst from the last commit. --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index df9d4b2e..d0125070 100755 --- a/stack.sh +++ b/stack.sh @@ -646,7 +646,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]];then # We then create link to that mounted location so swift would know # where to go. - for x in {1..4}; do sudo ln -sf $s/$x ${SWIFT_LOCATION}/$x; done + for x in {1..4}; do sudo ln -sf ${SWIFT_LOCATION}/drives/sdb1/$x ${SWIFT_LOCATION}/$x; done # We now have to emulate a few different servers into one we # create all the directories needed for swift From bbed01d3905fca1561b4071e7a2c2fffaf8aca88 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Wed, 2 Nov 2011 18:22:43 +0100 Subject: [PATCH 033/967] Add swift to exercise.sh --- exercise.sh | 19 +++++++++++++++++++ stack.sh | 4 ---- 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/exercise.sh b/exercise.sh index 99b0f3bb..c737bcc3 100755 --- a/exercise.sh +++ b/exercise.sh @@ -191,3 +191,22 @@ nova secgroup-delete $SECGROUP # make sure that we can describe instances euca-describe-instances + +# Testing Swift +# ============= + +# Check if we have to swift via keystone +swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD stat + +# We start by creating a test container +swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD post testcontainer + +# add some files into it. +swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD upload testcontainer /etc/issue + +# list them +swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD list testcontainer + +# And we may want to delete them now that we have tested that +# everything works. +swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD delete --all testcontainer diff --git a/stack.sh b/stack.sh index d0125070..3571df54 100755 --- a/stack.sh +++ b/stack.sh @@ -1011,10 +1011,6 @@ if [[ "$ENABLED_SERVICES" =~ "key" ]]; then echo "examples on using novaclient command line is in exercise.sh" echo "the default users are: admin and demo" echo "the password: $ADMIN_PASSWORD" - if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then - echo "Swift: swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin:admin -K $ADMIN_PASSWORD stat" - fi - fi # indicate how long this took to run (bash maintained variable 'SECONDS') From 8d5334c729c3625b97a23ead01aac313c40c7db8 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Wed, 2 Nov 2011 18:50:57 +0100 Subject: [PATCH 034/967] Remove dup and whitespaces. --- files/apts/swift | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/files/apts/swift b/files/apts/swift index 07767077..c52c68b7 100644 --- a/files/apts/swift +++ b/files/apts/swift @@ -1,19 +1,17 @@ -curl -gcc +curl +gcc memcached -memcached -python-configobj +python-configobj python-coverage python-dev -python-eventlet -python-greenlet +python-eventlet +python-greenlet python-netifaces python-nose -python-nose -python-pastedeploy -python-setuptools -python-simplejson -python-webob +python-pastedeploy +python-setuptools +python-simplejson +python-webob python-xattr -sqlite3 -xfsprogs +sqlite3 +xfsprogs From 1298dccb3d1696916ee2028f87634623ac33abc8 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Wed, 2 Nov 2011 19:09:04 +0100 Subject: [PATCH 035/967] Fix delete of container. --- exercise.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exercise.sh b/exercise.sh index c737bcc3..c49f1242 100755 --- a/exercise.sh +++ b/exercise.sh @@ -209,4 +209,4 @@ swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWOR # And we may want to delete them now that we have tested that # everything works. -swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD delete --all testcontainer +swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD delete testcontainer From 537ddff25987e79470613a605fdfc24629eaa862 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Wed, 2 Nov 2011 19:09:30 +0100 Subject: [PATCH 036/967] Move all swift files to $FILES/swift/ --- .../account-server.conf} | 0 .../container-server.conf} | 0 .../object-server.conf} | 0 .../proxy-server.conf} | 0 files/{swift-rsyncd.conf => swift/rsyncd.conf} | 0 files/{ => swift}/swift-remakerings | 0 files/{ => swift}/swift-startmain | 0 files/{ => swift}/swift.conf | 0 stack.sh | 14 +++++++------- 9 files changed, 7 insertions(+), 7 deletions(-) rename files/{swift-account-server.conf => swift/account-server.conf} (100%) rename files/{swift-container-server.conf => swift/container-server.conf} (100%) rename files/{swift-object-server.conf => swift/object-server.conf} (100%) rename files/{swift-proxy-server.conf => swift/proxy-server.conf} (100%) rename files/{swift-rsyncd.conf => swift/rsyncd.conf} (100%) rename files/{ => swift}/swift-remakerings (100%) mode change 100644 => 100755 rename files/{ => swift}/swift-startmain (100%) rename files/{ => swift}/swift.conf (100%) diff --git a/files/swift-account-server.conf b/files/swift/account-server.conf similarity index 100% rename from files/swift-account-server.conf rename to files/swift/account-server.conf diff --git a/files/swift-container-server.conf b/files/swift/container-server.conf similarity index 100% rename from files/swift-container-server.conf rename to files/swift/container-server.conf diff --git a/files/swift-object-server.conf b/files/swift/object-server.conf similarity index 100% rename from files/swift-object-server.conf rename to files/swift/object-server.conf diff --git a/files/swift-proxy-server.conf b/files/swift/proxy-server.conf similarity index 100% rename from files/swift-proxy-server.conf rename to files/swift/proxy-server.conf diff --git a/files/swift-rsyncd.conf b/files/swift/rsyncd.conf similarity index 100% rename from files/swift-rsyncd.conf rename to files/swift/rsyncd.conf diff --git a/files/swift-remakerings b/files/swift/swift-remakerings old mode 100644 new mode 100755 similarity index 100% rename from files/swift-remakerings rename to files/swift/swift-remakerings diff --git a/files/swift-startmain b/files/swift/swift-startmain similarity index 100% rename from files/swift-startmain rename to files/swift/swift-startmain diff --git a/files/swift.conf b/files/swift/swift.conf similarity index 100% rename from files/swift.conf rename to files/swift/swift.conf diff --git a/stack.sh b/stack.sh index 3571df54..d8be4656 100755 --- a/stack.sh +++ b/stack.sh @@ -617,7 +617,7 @@ if [[ "$ENABLED_SERVICES" =~ "n-net" ]]; then fi # Storage Service -if [[ "$ENABLED_SERVICES" =~ "swift" ]];then +if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then # We first do a bit of setup by creating the directories and # changing the permissions so we can run it as our user. @@ -662,7 +662,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]];then # Swift use rsync to syncronize between all the different # partitions (which make more sense when you have a multi-node # setup) we configure it with our version of rsync. - sed -e "s/%GROUP%/${USER_GROUP}/;s/%USER%/$USER/;s,%SWIFT_LOCATION%,$SWIFT_LOCATION," $FILES/swift-rsyncd.conf | sudo tee /etc/rsyncd.conf + sed -e "s/%GROUP%/${USER_GROUP}/;s/%USER%/$USER/;s,%SWIFT_LOCATION%,$SWIFT_LOCATION," $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync # By default Swift will be installed with the tempauth middleware @@ -682,9 +682,9 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]];then # We do the install of the proxy-server and swift configuration # replacing a few directives to match our configuration. sed "s/%USER%/$USER/;s/%SERVICE_TOKEN%/${SERVICE_TOKEN}/;s/%AUTH_SERVER%/${swift_auth_server}/" \ - $FILES/swift-proxy-server.conf|sudo tee /etc/swift/proxy-server.conf + $FILES/swift/proxy-server.conf|sudo tee /etc/swift/proxy-server.conf - sed -e "s/%SWIFT_HASH%/$SWIFT_HASH/" $FILES/swift.conf > /etc/swift/swift.conf + sed -e "s/%SWIFT_HASH%/$SWIFT_HASH/" $FILES/swift/swift.conf > /etc/swift/swift.conf # We need to generate a object/account/proxy configuration # emulating 4 nodes on different ports we have a little function @@ -698,7 +698,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]];then for node_number in {1..4};do node_path=${SWIFT_LOCATION}/${node_number} sed -e "s,%USER%,$USER,;s,%NODE_PATH%,${node_path},;s,%BIND_PORT%,${bind_port},;s,%LOG_FACILITY%,${log_facility}," \ - $FILES/swift-${server_type}-server.conf > /etc/swift/${server_type}-server/${node_number}.conf + $FILES/swift/${server_type}-server.conf > /etc/swift/${server_type}-server/${node_number}.conf bind_port=$(( ${bind_port} + 10 )) log_facility=$(( ${log_facility} + 1 )) done @@ -714,9 +714,9 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]];then # - swift-startmain # Restart your full cluster. # - sed -e "s/%SWIFT_PARTITION_POWER_SIZE%/$SWIFT_PARTITION_POWER_SIZE/" $FILES/swift-remakerings | \ + sed -e "s/%SWIFT_PARTITION_POWER_SIZE%/$SWIFT_PARTITION_POWER_SIZE/" $FILES/swift/swift-remakerings | \ sudo tee /usr/local/bin/swift-remakerings - sudo install -m755 $FILES/swift-startmain /usr/local/bin/ + sudo install -m755 $FILES/swift/swift-startmain /usr/local/bin/ sudo chmod +x /usr/local/bin/swift-* # We then can start rsync. From bdc254eb38037be51f125f3b84f5d4c698e7c2ab Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 2 Nov 2011 23:57:12 -0500 Subject: [PATCH 037/967] emergency fix to not install openvswitch if user is not using quantum --- files/apts/quantum | 2 -- stack.sh | 4 ++++ 2 files changed, 4 insertions(+), 2 deletions(-) delete mode 100644 files/apts/quantum diff --git a/files/apts/quantum b/files/apts/quantum deleted file mode 100644 index f5008adf..00000000 --- a/files/apts/quantum +++ /dev/null @@ -1,2 +0,0 @@ -openvswitch-switch -openvswitch-datapath-dkms diff --git a/stack.sh b/stack.sh index 31194bc4..19d7e844 100755 --- a/stack.sh +++ b/stack.sh @@ -787,6 +787,10 @@ fi # Quantum if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then + # Install deps + # FIXME add to file/apts/quantum, but don't install if not needed! + apt_get install -y openvswitch-switch openvswitch-datapath-dkms + # Create database for the plugin/agent if [[ "$Q_PLUGIN" = "openvswitch" ]]; then if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then From ae7f264970d0139831e899810995523e72a89dda Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 3 Nov 2011 00:03:53 -0500 Subject: [PATCH 038/967] remove -y since this is using apt_get --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 19d7e844..446661a6 100755 --- a/stack.sh +++ b/stack.sh @@ -789,7 +789,7 @@ fi if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then # Install deps # FIXME add to file/apts/quantum, but don't install if not needed! - apt_get install -y openvswitch-switch openvswitch-datapath-dkms + apt_get install openvswitch-switch openvswitch-datapath-dkms # Create database for the plugin/agent if [[ "$Q_PLUGIN" = "openvswitch" ]]; then From 0c3b60ce00bb23926668289110917cebcf47e0e4 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 3 Nov 2011 00:07:55 -0500 Subject: [PATCH 039/967] typo --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 446661a6..6a001c6f 100755 --- a/stack.sh +++ b/stack.sh @@ -788,7 +788,7 @@ fi # Quantum if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then # Install deps - # FIXME add to file/apts/quantum, but don't install if not needed! + # FIXME add to files/apts/quantum, but don't install if not needed! apt_get install openvswitch-switch openvswitch-datapath-dkms # Create database for the plugin/agent From 9a766999c82458783da274ac9fc2ecb642f837a9 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 3 Nov 2011 00:23:51 -0500 Subject: [PATCH 040/967] fix quantum branch name - diablo does not exist --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 75ec4aa2..d5bf6518 100644 --- a/stackrc +++ b/stackrc @@ -29,7 +29,7 @@ OPENSTACKX_BRANCH=diablo # quantum service QUANTUM_REPO=https://github.com/openstack/quantum -QUANTUM_BRANCH=diablo +QUANTUM_BRANCH=stable/diablo # Specify a comma-separated list of uec images to download and install into glance. IMAGE_URLS=http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz From f7788ac680a5fa50a96d57b61fecc4c77a7cfca8 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 3 Nov 2011 10:00:06 +0100 Subject: [PATCH 041/967] Use stable/diablo for swift. --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index b6cf33d7..b541711f 100644 --- a/stackrc +++ b/stackrc @@ -4,7 +4,7 @@ NOVA_BRANCH=diablo # storage service SWIFT_REPO=https://github.com/openstack/swift.git -SWIFT_BRANCH=1.4.3 +SWIFT_BRANCH=stable/diablo # swift and keystone integration SWIFT_KEYSTONE_REPO=https://github.com/cloudbuilders/swift-keystone2.git From 3875015010110fc31368676d8885218ea5dca3b5 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 3 Nov 2011 09:17:06 +0100 Subject: [PATCH 042/967] Change SWIFT_LOCATION to SWIFT_DATA_LOCATION --- files/swift/rsyncd.conf | 24 ++++++++++++------------ stack.sh | 38 +++++++++++++++++++------------------- 2 files changed, 31 insertions(+), 31 deletions(-) diff --git a/files/swift/rsyncd.conf b/files/swift/rsyncd.conf index 568f2404..66215c7f 100644 --- a/files/swift/rsyncd.conf +++ b/files/swift/rsyncd.conf @@ -6,74 +6,74 @@ address = 127.0.0.1 [account6012] max connections = 25 -path = %SWIFT_LOCATION%/1/node/ +path = %SWIFT_DATA_LOCATION%/1/node/ read only = false lock file = /var/lock/account6012.lock [account6022] max connections = 25 -path = %SWIFT_LOCATION%/2/node/ +path = %SWIFT_DATA_LOCATION%/2/node/ read only = false lock file = /var/lock/account6022.lock [account6032] max connections = 25 -path = %SWIFT_LOCATION%/3/node/ +path = %SWIFT_DATA_LOCATION%/3/node/ read only = false lock file = /var/lock/account6032.lock [account6042] max connections = 25 -path = %SWIFT_LOCATION%/4/node/ +path = %SWIFT_DATA_LOCATION%/4/node/ read only = false lock file = /var/lock/account6042.lock [container6011] max connections = 25 -path = %SWIFT_LOCATION%/1/node/ +path = %SWIFT_DATA_LOCATION%/1/node/ read only = false lock file = /var/lock/container6011.lock [container6021] max connections = 25 -path = %SWIFT_LOCATION%/2/node/ +path = %SWIFT_DATA_LOCATION%/2/node/ read only = false lock file = /var/lock/container6021.lock [container6031] max connections = 25 -path = %SWIFT_LOCATION%/3/node/ +path = %SWIFT_DATA_LOCATION%/3/node/ read only = false lock file = /var/lock/container6031.lock [container6041] max connections = 25 -path = %SWIFT_LOCATION%/4/node/ +path = %SWIFT_DATA_LOCATION%/4/node/ read only = false lock file = /var/lock/container6041.lock [object6010] max connections = 25 -path = %SWIFT_LOCATION%/1/node/ +path = %SWIFT_DATA_LOCATION%/1/node/ read only = false lock file = /var/lock/object6010.lock [object6020] max connections = 25 -path = %SWIFT_LOCATION%/2/node/ +path = %SWIFT_DATA_LOCATION%/2/node/ read only = false lock file = /var/lock/object6020.lock [object6030] max connections = 25 -path = %SWIFT_LOCATION%/3/node/ +path = %SWIFT_DATA_LOCATION%/3/node/ read only = false lock file = /var/lock/object6030.lock [object6040] max connections = 25 -path = %SWIFT_LOCATION%/4/node/ +path = %SWIFT_DATA_LOCATION%/4/node/ read only = false lock file = /var/lock/object6040.lock diff --git a/stack.sh b/stack.sh index e0a931d7..ce3c1630 100755 --- a/stack.sh +++ b/stack.sh @@ -293,9 +293,9 @@ GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$HOST_IP:9292} # TODO: add logging to different location. # By default the location of swift drives and objects is located inside -# the swift source directory. SWIFT_LOCATION variable allow you to redefine +# the swift source directory. SWIFT_DATA_LOCATION variable allow you to redefine # this. -SWIFT_LOCATION=${SWIFT_LOCATION:-${SWIFT_DIR}/data} +SWIFT_DATA_LOCATION=${SWIFT_DATA_LOCATION:-${SWIFT_DIR}/data} # devstack will create a loop-back disk formatted as XFS to store the # swift data. By default the disk size is 1 gigabyte. The variable @@ -640,47 +640,47 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then # changing the permissions so we can run it as our user. USER_GROUP=$(id -g) - sudo mkdir -p ${SWIFT_LOCATION}/drives - sudo chown -R $USER:${USER_GROUP} ${SWIFT_LOCATION}/drives + sudo mkdir -p ${SWIFT_DATA_LOCATION}/drives + sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_LOCATION}/drives # We then create a loopback disk and format it to XFS. - if [[ ! -e ${SWIFT_LOCATION}/drives/images/swift.img ]];then - mkdir -p ${SWIFT_LOCATION}/drives/images - sudo touch ${SWIFT_LOCATION}/drives/images/swift.img - sudo chown $USER: ${SWIFT_LOCATION}/drives/images/swift.img + if [[ ! -e ${SWIFT_DATA_LOCATION}/drives/images/swift.img ]];then + mkdir -p ${SWIFT_DATA_LOCATION}/drives/images + sudo touch ${SWIFT_DATA_LOCATION}/drives/images/swift.img + sudo chown $USER: ${SWIFT_DATA_LOCATION}/drives/images/swift.img - dd if=/dev/zero of=${SWIFT_LOCATION}/drives/images/swift.img \ + dd if=/dev/zero of=${SWIFT_DATA_LOCATION}/drives/images/swift.img \ bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE} - mkfs.xfs -f -i size=1024 ${SWIFT_LOCATION}/drives/images/swift.img + mkfs.xfs -f -i size=1024 ${SWIFT_DATA_LOCATION}/drives/images/swift.img fi # After the drive being created we mount the disk with a few mount # options to make it most efficient as possible for swift. - mkdir -p ${SWIFT_LOCATION}/drives/sdb1 - if ! egrep -q ${SWIFT_LOCATION}/drives/sdb1 /proc/mounts;then + mkdir -p ${SWIFT_DATA_LOCATION}/drives/sdb1 + if ! egrep -q ${SWIFT_DATA_LOCATION}/drives/sdb1 /proc/mounts;then sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 \ - ${SWIFT_LOCATION}/drives/images/swift.img ${SWIFT_LOCATION}/drives/sdb1 + ${SWIFT_DATA_LOCATION}/drives/images/swift.img ${SWIFT_DATA_LOCATION}/drives/sdb1 fi # We then create link to that mounted location so swift would know # where to go. - for x in {1..4}; do sudo ln -sf ${SWIFT_LOCATION}/drives/sdb1/$x ${SWIFT_LOCATION}/$x; done + for x in {1..4}; do sudo ln -sf ${SWIFT_DATA_LOCATION}/drives/sdb1/$x ${SWIFT_DATA_LOCATION}/$x; done # We now have to emulate a few different servers into one we # create all the directories needed for swift tmpd="" - for d in ${SWIFT_LOCATION}/drives/sdb1/{1..4} /etc/swift /etc/swift/{object,container,account}-server \ - ${SWIFT_LOCATION}/{1..4}/node/sdb1 /var/run/swift ;do + for d in ${SWIFT_DATA_LOCATION}/drives/sdb1/{1..4} /etc/swift /etc/swift/{object,container,account}-server \ + ${SWIFT_DATA_LOCATION}/{1..4}/node/sdb1 /var/run/swift ;do [[ -d $d ]] && continue sudo install -o ${USER} -g $USER_GROUP -d $d done - sudo chown -R $USER: ${SWIFT_LOCATION}/{1..4}/node + sudo chown -R $USER: ${SWIFT_DATA_LOCATION}/{1..4}/node # Swift use rsync to syncronize between all the different # partitions (which make more sense when you have a multi-node # setup) we configure it with our version of rsync. - sed -e "s/%GROUP%/${USER_GROUP}/;s/%USER%/$USER/;s,%SWIFT_LOCATION%,$SWIFT_LOCATION," $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf + sed -e "s/%GROUP%/${USER_GROUP}/;s/%USER%/$USER/;s,%SWIFT_DATA_LOCATION%,$SWIFT_DATA_LOCATION," $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync # By default Swift will be installed with the tempauth middleware @@ -714,7 +714,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then local node_number for node_number in {1..4};do - node_path=${SWIFT_LOCATION}/${node_number} + node_path=${SWIFT_DATA_LOCATION}/${node_number} sed -e "s,%USER%,$USER,;s,%NODE_PATH%,${node_path},;s,%BIND_PORT%,${bind_port},;s,%LOG_FACILITY%,${log_facility}," \ $FILES/swift/${server_type}-server.conf > /etc/swift/${server_type}-server/${node_number}.conf bind_port=$(( ${bind_port} + 10 )) From 3a64826b67c4fac5995600316db243fb8c7d4697 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 3 Nov 2011 10:43:46 +0100 Subject: [PATCH 043/967] Redefine swift configs in SWIFT_DIR We still need a link in /etc/swift until #885595 is fixed. --- files/swift/account-server.conf | 1 + files/swift/container-server.conf | 1 + files/swift/object-server.conf | 1 + files/swift/proxy-server.conf | 1 + files/swift/swift-remakerings | 2 +- stack.sh | 27 +++++++++++++++++++-------- 6 files changed, 24 insertions(+), 9 deletions(-) diff --git a/files/swift/account-server.conf b/files/swift/account-server.conf index 920d45c9..db0f097f 100644 --- a/files/swift/account-server.conf +++ b/files/swift/account-server.conf @@ -4,6 +4,7 @@ mount_check = false bind_port = %BIND_PORT% user = %USER% log_facility = LOG_LOCAL%LOG_FACILITY% +swift_dir = %SWIFT_CONFIG_LOCATION% [pipeline:main] pipeline = account-server diff --git a/files/swift/container-server.conf b/files/swift/container-server.conf index 8d59bf21..bdc3e3a0 100644 --- a/files/swift/container-server.conf +++ b/files/swift/container-server.conf @@ -4,6 +4,7 @@ mount_check = false bind_port = %BIND_PORT% user = %USER% log_facility = LOG_LOCAL%LOG_FACILITY% +swift_dir = %SWIFT_CONFIG_LOCATION% [pipeline:main] pipeline = container-server diff --git a/files/swift/object-server.conf b/files/swift/object-server.conf index 1b72e703..06fbffea 100644 --- a/files/swift/object-server.conf +++ b/files/swift/object-server.conf @@ -4,6 +4,7 @@ mount_check = false bind_port = %BIND_PORT% user = %USER% log_facility = LOG_LOCAL%LOG_FACILITY% +swift_dir = %SWIFT_CONFIG_LOCATION% [pipeline:main] pipeline = object-server diff --git a/files/swift/proxy-server.conf b/files/swift/proxy-server.conf index 6b7dd528..fe7e39ba 100644 --- a/files/swift/proxy-server.conf +++ b/files/swift/proxy-server.conf @@ -2,6 +2,7 @@ bind_port = 8080 user = %USER% log_facility = LOG_LOCAL1 +swift_dir = %SWIFT_CONFIG_LOCATION% [pipeline:main] pipeline = healthcheck cache %AUTH_SERVER% proxy-server diff --git a/files/swift/swift-remakerings b/files/swift/swift-remakerings index 9343783f..c65353ce 100755 --- a/files/swift/swift-remakerings +++ b/files/swift/swift-remakerings @@ -1,6 +1,6 @@ #!/bin/bash -cd /etc/swift +cd %SWIFT_CONFIG_LOCATION% rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz diff --git a/stack.sh b/stack.sh index ce3c1630..4d97e8d8 100755 --- a/stack.sh +++ b/stack.sh @@ -297,6 +297,10 @@ GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$HOST_IP:9292} # this. SWIFT_DATA_LOCATION=${SWIFT_DATA_LOCATION:-${SWIFT_DIR}/data} +# We are going to have the configuration files inside the source +# directory, change SWIFT_CONFIG_LOCATION if you want to adjust that. +SWIFT_CONFIG_LOCATION=${SWIFT_CONFIG_LOCATION:-${SWIFT_DIR}/config} + # devstack will create a loop-back disk formatted as XFS to store the # swift data. By default the disk size is 1 gigabyte. The variable # SWIFT_LOOPBACK_DISK_SIZE specified in bytes allow you to change @@ -669,14 +673,21 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then # We now have to emulate a few different servers into one we # create all the directories needed for swift tmpd="" - for d in ${SWIFT_DATA_LOCATION}/drives/sdb1/{1..4} /etc/swift /etc/swift/{object,container,account}-server \ + for d in ${SWIFT_DATA_LOCATION}/drives/sdb1/{1..4} \ + ${SWIFT_CONFIG_LOCATION}/{object,container,account}-server \ ${SWIFT_DATA_LOCATION}/{1..4}/node/sdb1 /var/run/swift ;do [[ -d $d ]] && continue sudo install -o ${USER} -g $USER_GROUP -d $d done - sudo chown -R $USER: ${SWIFT_DATA_LOCATION}/{1..4}/node + # We do want to make sure this is all owned by our user. + sudo chown -R $USER: ${SWIFT_DATA_LOCATION}/{1..4}/node + sudo chown -R $USER: ${SWIFT_CONFIG_LOCATION} + # swift-init has a bug using /etc/swift until bug #885595 is fixed + # we have to create a link + sudo ln -s ${SWIFT_CONFIG_LOCATION} /etc/swift + # Swift use rsync to syncronize between all the different # partitions (which make more sense when you have a multi-node # setup) we configure it with our version of rsync. @@ -699,10 +710,10 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then # We do the install of the proxy-server and swift configuration # replacing a few directives to match our configuration. - sed "s/%USER%/$USER/;s/%SERVICE_TOKEN%/${SERVICE_TOKEN}/;s/%AUTH_SERVER%/${swift_auth_server}/" \ - $FILES/swift/proxy-server.conf|sudo tee /etc/swift/proxy-server.conf + sed "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},;s/%USER%/$USER/;s/%SERVICE_TOKEN%/${SERVICE_TOKEN}/;s/%AUTH_SERVER%/${swift_auth_server}/" \ + $FILES/swift/proxy-server.conf|sudo tee ${SWIFT_CONFIG_LOCATION}/proxy-server.conf - sed -e "s/%SWIFT_HASH%/$SWIFT_HASH/" $FILES/swift/swift.conf > /etc/swift/swift.conf + sed -e "s/%SWIFT_HASH%/$SWIFT_HASH/" $FILES/swift/swift.conf > ${SWIFT_CONFIG_LOCATION}/swift.conf # We need to generate a object/account/proxy configuration # emulating 4 nodes on different ports we have a little function @@ -715,8 +726,8 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then for node_number in {1..4};do node_path=${SWIFT_DATA_LOCATION}/${node_number} - sed -e "s,%USER%,$USER,;s,%NODE_PATH%,${node_path},;s,%BIND_PORT%,${bind_port},;s,%LOG_FACILITY%,${log_facility}," \ - $FILES/swift/${server_type}-server.conf > /etc/swift/${server_type}-server/${node_number}.conf + sed -e "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},;s,%USER%,$USER,;s,%NODE_PATH%,${node_path},;s,%BIND_PORT%,${bind_port},;s,%LOG_FACILITY%,${log_facility}," \ + $FILES/swift/${server_type}-server.conf > ${SWIFT_CONFIG_LOCATION}/${server_type}-server/${node_number}.conf bind_port=$(( ${bind_port} + 10 )) log_facility=$(( ${log_facility} + 1 )) done @@ -732,7 +743,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then # - swift-startmain # Restart your full cluster. # - sed -e "s/%SWIFT_PARTITION_POWER_SIZE%/$SWIFT_PARTITION_POWER_SIZE/" $FILES/swift/swift-remakerings | \ + sed -e "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},;s/%SWIFT_PARTITION_POWER_SIZE%/$SWIFT_PARTITION_POWER_SIZE/" $FILES/swift/swift-remakerings | \ sudo tee /usr/local/bin/swift-remakerings sudo install -m755 $FILES/swift/swift-startmain /usr/local/bin/ sudo chmod +x /usr/local/bin/swift-* From df5e9949aa599523c3e97203a2a70f4fd9b2a094 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Thu, 3 Nov 2011 09:36:13 -0500 Subject: [PATCH 044/967] don't install memcached in general list --- stack.sh | 2 +- tools/build_libvirt.sh | 2 +- tools/build_lxc.sh | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index e0a931d7..066aff3f 100755 --- a/stack.sh +++ b/stack.sh @@ -355,7 +355,7 @@ fi # install apt requirements apt_get update -apt_get install `cat $FILES/apts/* | cut -d\# -f1 | grep -Ev "mysql-server|rabbitmq-server"` +apt_get install `cat $FILES/apts/* | cut -d\# -f1 | grep -Ev "mysql-server|rabbitmq-server|memcached"` # install python requirements sudo PIP_DOWNLOAD_CACHE=/var/cache/pip pip install `cat $FILES/pips/*` diff --git a/tools/build_libvirt.sh b/tools/build_libvirt.sh index fc281d31..3de8cc57 100755 --- a/tools/build_libvirt.sh +++ b/tools/build_libvirt.sh @@ -145,7 +145,7 @@ function git_clone { # Make sure that base requirements are installed cp /etc/resolv.conf $COPY_DIR/etc/resolv.conf chroot $COPY_DIR apt-get update -chroot $COPY_DIR apt-get install -y --force-yes `cat files/apts/* | cut -d\# -f1 | egrep -v "(rabbitmq|libvirt-bin|mysql-server)"` +chroot $COPY_DIR apt-get install -y --force-yes `cat files/apts/* | cut -d\# -f1 | egrep -v "(rabbitmq|libvirt-bin|mysql-server|memcached)"` chroot $COPY_DIR apt-get install -y --download-only rabbitmq-server libvirt-bin mysql-server chroot $COPY_DIR pip install `cat files/pips/*` diff --git a/tools/build_lxc.sh b/tools/build_lxc.sh index a2c5a22e..13b98df9 100755 --- a/tools/build_lxc.sh +++ b/tools/build_lxc.sh @@ -125,7 +125,7 @@ fi # Make sure that base requirements are installed chroot $CACHEDIR apt-get update -chroot $CACHEDIR apt-get install -y --force-yes `cat files/apts/* | cut -d\# -f1 | egrep -v "(rabbitmq|libvirt-bin|mysql-server)"` +chroot $CACHEDIR apt-get install -y --force-yes `cat files/apts/* | cut -d\# -f1 | egrep -v "(rabbitmq|libvirt-bin|mysql-server|memcached)"` chroot $CACHEDIR apt-get install -y --download-only rabbitmq-server libvirt-bin mysql-server chroot $CACHEDIR pip install `cat files/pips/*` From b2857e4df6b4a13b2ba0b05073ed44dbe71eab26 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 3 Nov 2011 16:19:14 +0100 Subject: [PATCH 045/967] Only ask for swift_hash if swift is enabled. --- stack.sh | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index 066aff3f..d1e35c0d 100755 --- a/stack.sh +++ b/stack.sh @@ -313,9 +313,13 @@ SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000} # By default we define 9 for the partition count (which mean 512). SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9} -# SWIFT_HASH is a random unique string for a swift cluster that can never change. -read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH." - +# We only ask for Swift Hash if we have enabled swift service. +if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then + # SWIFT_HASH is a random unique string for a swift cluster that + # can never change. + read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH." +fi + # Keystone # -------- From 53ca603b4590b59392511c61fa152cdabc9a43f8 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 3 Nov 2011 17:04:26 +0100 Subject: [PATCH 046/967] Only run swift excercise when swift is enabled. --- exercise.sh | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/exercise.sh b/exercise.sh index c49f1242..77d3a3b0 100755 --- a/exercise.sh +++ b/exercise.sh @@ -192,21 +192,23 @@ nova secgroup-delete $SECGROUP # make sure that we can describe instances euca-describe-instances -# Testing Swift -# ============= +if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then + # Testing Swift + # ============= -# Check if we have to swift via keystone -swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD stat + # Check if we have to swift via keystone + swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD stat -# We start by creating a test container -swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD post testcontainer + # We start by creating a test container + swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD post testcontainer -# add some files into it. -swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD upload testcontainer /etc/issue + # add some files into it. + swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD upload testcontainer /etc/issue -# list them -swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD list testcontainer + # list them + swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD list testcontainer -# And we may want to delete them now that we have tested that -# everything works. -swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD delete testcontainer + # And we may want to delete them now that we have tested that + # everything works. + swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD delete testcontainer +fi From 23324e94e2f44eb685f7c3decf7ab75d3ed4c60c Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 3 Nov 2011 13:47:15 -0700 Subject: [PATCH 047/967] create the uec cachedir if it doesn't exist. Otherwise build_libvirt fails on clean installs --- tools/get_uec_image.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/get_uec_image.sh b/tools/get_uec_image.sh index 3d62bbac..cb59b9aa 100755 --- a/tools/get_uec_image.sh +++ b/tools/get_uec_image.sh @@ -114,6 +114,7 @@ trap cleanup SIGHUP SIGINT SIGTERM # Get the UEC image UEC_NAME=$DIST_NAME-server-cloudimg-amd64 if [ ! -e $CACHEDIR/$UEC_NAME-disk1.img ]; then + mkdir -p $CACHEDIR (cd $CACHEDIR && wget -N http://uec-images.ubuntu.com/$DIST_NAME/current/$UEC_NAME-disk1.img) fi From 1468133003a7f380dc9ae60328d5c187c975ff10 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Thu, 3 Nov 2011 15:52:37 -0500 Subject: [PATCH 048/967] only download - don't install apts - also pip install --- tools/build_libvirt.sh | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tools/build_libvirt.sh b/tools/build_libvirt.sh index 3de8cc57..7efabbba 100755 --- a/tools/build_libvirt.sh +++ b/tools/build_libvirt.sh @@ -145,9 +145,8 @@ function git_clone { # Make sure that base requirements are installed cp /etc/resolv.conf $COPY_DIR/etc/resolv.conf chroot $COPY_DIR apt-get update -chroot $COPY_DIR apt-get install -y --force-yes `cat files/apts/* | cut -d\# -f1 | egrep -v "(rabbitmq|libvirt-bin|mysql-server|memcached)"` -chroot $COPY_DIR apt-get install -y --download-only rabbitmq-server libvirt-bin mysql-server -chroot $COPY_DIR pip install `cat files/pips/*` +chroot $COPY_DIR apt-get install -y --download-only `cat files/apts/* | cut -d\# -f1` +chroot $COPY_DIR apt-get install -y --force-yes `cat files/apts/general` # Clean out code repos if directed to do so if [ "$CLEAN" = "1" ]; then From b244ef34d581f14ed8889c61ef5cd2fa06e8f5fa Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Thu, 3 Nov 2011 18:19:21 -0400 Subject: [PATCH 049/967] Use $DEST for volume backing file. --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 976872ad..32b4539e 100755 --- a/stack.sh +++ b/stack.sh @@ -778,7 +778,7 @@ if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then # By default, the backing file is 2G in size, and is stored in /opt/stack. # if ! sudo vgdisplay | grep -q nova-volumes; then - VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-/opt/stack/nova-volumes-backing-file} + VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DEST/nova-volumes-backing-file} VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-2052M} truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE` From ca2c047b6eb28cebba25870f3dda9e6eae2ab1ea Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 3 Nov 2011 16:29:32 -0700 Subject: [PATCH 050/967] fix caching so that there is the ability to download services without installing them. This is done with a #NOPRIME comment in apts/files/ --- files/apts/nova | 6 +++--- files/apts/swift | 2 +- tools/build_libvirt.sh | 4 ++-- tools/build_lxc.sh | 4 ++-- tools/build_nfs.sh | 5 +++-- tools/build_ramdisk.sh | 3 ++- 6 files changed, 13 insertions(+), 11 deletions(-) diff --git a/files/apts/nova b/files/apts/nova index 17eb8774..405d53ba 100644 --- a/files/apts/nova +++ b/files/apts/nova @@ -2,7 +2,7 @@ dnsmasq-base kpartx parted arping # used for send_arp_for_ha option in nova-network -mysql-server +mysql-server # NOPRIME python-mysqldb kvm gawk @@ -11,10 +11,10 @@ ebtables sqlite3 sudo kvm -libvirt-bin +libvirt-bin # NOPRIME vlan curl -rabbitmq-server +rabbitmq-server # NOPRIME socat # used by ajaxterm python-mox python-paste diff --git a/files/apts/swift b/files/apts/swift index c52c68b7..f2983778 100644 --- a/files/apts/swift +++ b/files/apts/swift @@ -1,6 +1,6 @@ curl gcc -memcached +memcached # NOPRIME python-configobj python-coverage python-dev diff --git a/tools/build_libvirt.sh b/tools/build_libvirt.sh index 3de8cc57..d1928794 100755 --- a/tools/build_libvirt.sh +++ b/tools/build_libvirt.sh @@ -145,8 +145,8 @@ function git_clone { # Make sure that base requirements are installed cp /etc/resolv.conf $COPY_DIR/etc/resolv.conf chroot $COPY_DIR apt-get update -chroot $COPY_DIR apt-get install -y --force-yes `cat files/apts/* | cut -d\# -f1 | egrep -v "(rabbitmq|libvirt-bin|mysql-server|memcached)"` -chroot $COPY_DIR apt-get install -y --download-only rabbitmq-server libvirt-bin mysql-server +chroot $COPY_DIR apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1` +chroot $COPY_DIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1` chroot $COPY_DIR pip install `cat files/pips/*` # Clean out code repos if directed to do so diff --git a/tools/build_lxc.sh b/tools/build_lxc.sh index 13b98df9..9d8ce926 100755 --- a/tools/build_lxc.sh +++ b/tools/build_lxc.sh @@ -125,8 +125,8 @@ fi # Make sure that base requirements are installed chroot $CACHEDIR apt-get update -chroot $CACHEDIR apt-get install -y --force-yes `cat files/apts/* | cut -d\# -f1 | egrep -v "(rabbitmq|libvirt-bin|mysql-server|memcached)"` -chroot $CACHEDIR apt-get install -y --download-only rabbitmq-server libvirt-bin mysql-server +chroot $CACHEDIR apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1` +chroot $CACHEDIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1` chroot $CACHEDIR pip install `cat files/pips/*` # Clean out code repos if directed to do so diff --git a/tools/build_nfs.sh b/tools/build_nfs.sh index 5c591e44..39a2cf08 100755 --- a/tools/build_nfs.sh +++ b/tools/build_nfs.sh @@ -32,8 +32,9 @@ fi # prime natty with as many apt/pips as we can if [ ! -d $CHROOTCACHE/natty-dev ]; then rsync -azH $CHROOTCACHE/natty-base/ $CHROOTCACHE/natty-dev/ - chroot $CHROOTCACHE/natty-dev apt-get install -y `cat files/apts/* | cut -d\# -f1 | egrep -v "(rabbitmq|libvirt-bin|mysql-server)"` - chroot $CHROOTCACHE/natty-dev pip install `cat files/pips/*` + chroot $CHROOTCACHE apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1` + chroot $CHROOTCACHE apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1` + chroot $CHROOTCACHE pip install `cat files/pips/*` # Create a stack user that is a member of the libvirtd group so that stack # is able to interact with libvirt. diff --git a/tools/build_ramdisk.sh b/tools/build_ramdisk.sh index 187112a7..2c914dc7 100755 --- a/tools/build_ramdisk.sh +++ b/tools/build_ramdisk.sh @@ -113,7 +113,8 @@ if [ ! -r $DEV_FILE ]; then mount -t ext4 ${NBD}p1 $MNTDIR cp -p /etc/resolv.conf $MNTDIR/etc/resolv.conf - chroot $MNTDIR apt-get install -y `cat files/apts/* | cut -d\# -f1 | egrep -v "(rabbitmq|libvirt-bin|mysql-server)"` + chroot $MNTDIR apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1` + chroot $MNTDIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1` chroot $MNTDIR pip install `cat files/pips/*` # Create a stack user that is a member of the libvirtd group so that stack From 2599b3165ad35c9c62b5bfa543c03f2a3aecb4cd Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Fri, 4 Nov 2011 10:31:37 -0400 Subject: [PATCH 051/967] Wrap exercises with master script, with logs, and move common variables. --- exercise.sh | 24 ++++++++++++++++++++++++ exercises/euca.sh | 3 --- exercises/floating_ips.sh | 9 --------- openrc | 8 ++++++++ 4 files changed, 32 insertions(+), 12 deletions(-) create mode 100755 exercise.sh diff --git a/exercise.sh b/exercise.sh new file mode 100755 index 00000000..de906f25 --- /dev/null +++ b/exercise.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +# Run everything in the exercises/ directory that isn't explicitly disabled + +# comma separated list of script basenames to skip +# to refrain from exercising euca.sh use SKIP_EXERCISES=euca +SKIP_EXERCISES=${SKIP_EXERCISES:-""} + +EXERCISE_DIR=$(dirname "$0")/exercises +basenames=$(for b in `ls $EXERCISE_DIR/*.sh` ; do basename $b .sh ; done) + +for script in $basenames ; do + if [[ "$SKIP_EXERCISES" =~ $script ]] ; then + echo SKIPPING $script + else + echo Running $script + $EXERCISE_DIR/$script.sh 2> $script.log + if [[ $? -ne 0 ]] ; then + echo FAILED. See $script.log + else + rm $script.log + fi + fi +done diff --git a/exercises/euca.sh b/exercises/euca.sh index 0cb5feaf..0d48c935 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -21,9 +21,6 @@ pushd $(cd $(dirname "$0")/.. && pwd) source ./openrc popd -# Max time till the vm is bootable -BOOT_TIMEOUT=${BOOT_TIMEOUT:-15} - # find a machine image to boot IMAGE=`euca-describe-images | grep machine | cut -f2` diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index edf784c5..5c384309 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -84,15 +84,6 @@ nova boot --flavor $FLAVOR --image $IMAGE $NAME --security_groups=$SECGROUP # Waiting for boot # ---------------- -# Max time to wait while vm goes from build to active state -ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-10} - -# Max time till the vm is bootable -BOOT_TIMEOUT=${BOOT_TIMEOUT:-15} - -# Max time to wait for proper association and dis-association. -ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10} - # check that the status is active within ACTIVE_TIMEOUT seconds if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $NAME | grep status | grep -q ACTIVE; do sleep 1; done"; then echo "server didn't become active!" diff --git a/openrc b/openrc index 324780b5..db1a7d12 100644 --- a/openrc +++ b/openrc @@ -49,3 +49,11 @@ export EC2_SECRET_KEY=${ADMIN_PASSWORD:-secrete} # set log level to DEBUG (helps debug issues) # export NOVACLIENT_DEBUG=1 +# Max time till the vm is bootable +export BOOT_TIMEOUT=${BOOT_TIMEOUT:-15} + +# Max time to wait while vm goes from build to active state +export ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-10} + +# Max time to wait for proper IP association and dis-association. +export ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10} From 9e9132ddaf77a4b858352e827da29ce214a6848d Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Fri, 4 Nov 2011 12:09:54 -0400 Subject: [PATCH 052/967] Exercises: euca bugfix, output cleanup. Don't log stderr per-exercise, because stdout is barfy anyway. Move the state of skip/pass/fail to the end of the exercise run. --- exercise.sh | 30 ++++++++++++++++++++++++++---- exercises/euca.sh | 2 +- 2 files changed, 27 insertions(+), 5 deletions(-) diff --git a/exercise.sh b/exercise.sh index de906f25..d7763857 100755 --- a/exercise.sh +++ b/exercise.sh @@ -6,19 +6,41 @@ # to refrain from exercising euca.sh use SKIP_EXERCISES=euca SKIP_EXERCISES=${SKIP_EXERCISES:-""} +# Locate the scripts we should run EXERCISE_DIR=$(dirname "$0")/exercises basenames=$(for b in `ls $EXERCISE_DIR/*.sh` ; do basename $b .sh ; done) +# Track the state of each script +passes="" +failures="" +skips="" + +# Loop over each possible script (by basename) for script in $basenames ; do if [[ "$SKIP_EXERCISES" =~ $script ]] ; then - echo SKIPPING $script + skips="$skips $script" else + echo ========================= echo Running $script - $EXERCISE_DIR/$script.sh 2> $script.log + echo ========================= + $EXERCISE_DIR/$script.sh if [[ $? -ne 0 ]] ; then - echo FAILED. See $script.log + failures="$failures $script" else - rm $script.log + passes="$passes $script" fi fi done + +# output status of exercise run +echo ========================= +echo ========================= +for script in $skips ; do + echo SKIP $script +done +for script in $passes ; do + echo PASS $script +done +for script in $failures ; do + echo FAILED $script +done diff --git a/exercises/euca.sh b/exercises/euca.sh index 0d48c935..bf6910de 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -25,7 +25,7 @@ popd IMAGE=`euca-describe-images | grep machine | cut -f2` # launch it -INSTANCE=`euca-run-instance $IMAGE | grep INSTANCE | cut -f2` +INSTANCE=`euca-run-instances $IMAGE | grep INSTANCE | cut -f2` # assure it has booted within a reasonable time if ! timeout $BOOT_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then From 3e6ec236f01abaf80fe7dc8db73ecbfdf3532e89 Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Fri, 4 Nov 2011 12:23:35 -0400 Subject: [PATCH 053/967] Pull in swift testing from master. --- exercises/swift.sh | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 exercises/swift.sh diff --git a/exercises/swift.sh b/exercises/swift.sh new file mode 100644 index 00000000..f7be0994 --- /dev/null +++ b/exercises/swift.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash + +# Test swift via the command line tools that ship with it. + +# This script exits on an error so that errors don't compound and you see +# only the first error that occured. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + + +# Settings +# ======== + +# Use openrc + stackrc + localrc for settings +pushd $(cd $(dirname "$0")/.. && pwd) +source ./openrc +popd + + +# Testing Swift +# ============= + +# Check if we have to swift via keystone +swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD stat + +# We start by creating a test container +swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD post testcontainer + +# add some files into it. +swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD upload testcontainer /etc/issue + +# list them +swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD list testcontainer + +# And we may want to delete them now that we have tested that +# everything works. +swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD delete testcontainer From 49946a14b5c4ef290397a8ffd4254f2ff3c0797e Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Fri, 4 Nov 2011 15:09:41 -0500 Subject: [PATCH 054/967] proper path for arping in sudoers --- files/sudo/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/sudo/nova b/files/sudo/nova index 2ce1aac2..62685b31 100644 --- a/files/sudo/nova +++ b/files/sudo/nova @@ -41,7 +41,7 @@ Cmnd_Alias NOVACMDS = /bin/chmod /var/lib/nova/tmp/*/root/.ssh, \ /usr/bin/socat, \ /sbin/parted, \ /usr/sbin/dnsmasq, \ - /usr/bin/arping + /usr/sbin/arping %USER% ALL = (root) NOPASSWD: SETENV: NOVACMDS From 346e49131b40d3dc492ce2ad193a3e316839631e Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Sat, 5 Nov 2011 00:22:47 -0500 Subject: [PATCH 055/967] add script that demonstrates separation of head abd compute roles when using xen --- tools/xen/build_domU.sh | 25 +++++++++++++++---------- tools/xen/build_domU_multi.sh | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+), 10 deletions(-) create mode 100755 tools/xen/build_domU_multi.sh diff --git a/tools/xen/build_domU.sh b/tools/xen/build_domU.sh index 65049af3..98042555 100755 --- a/tools/xen/build_domU.sh +++ b/tools/xen/build_domU.sh @@ -226,16 +226,21 @@ mkdir -p /boot/guest SR_UUID=`xe sr-list --minimal name-label="Local storage"` xe sr-param-set uuid=$SR_UUID other-config:i18n-key=local-storage -# Uninstall previous runs -xe vm-list --minimal name-label="$LABEL" | xargs ./scripts/uninstall-os-vpx.sh - -# Destroy any instances that were launched -for uuid in `xe vm-list | grep -1 instance | grep uuid | sed "s/.*\: //g"`; do - echo "Shutting down nova instance $uuid" - xe vm-unpause uuid=$uuid || true - xe vm-shutdown uuid=$uuid - xe vm-destroy uuid=$uuid -done + +# Shutdown previous runs +DO_SHUTDOWN=${DO_SHUTDOWN:-1} +if [ "$DO_SHUTDOWN" = "1" ]; then + # Uninstall previous runs + xe vm-list --minimal name-label="$LABEL" | xargs ./scripts/uninstall-os-vpx.sh + + # Destroy any instances that were launched + for uuid in `xe vm-list | grep -1 instance | grep uuid | sed "s/.*\: //g"`; do + echo "Shutting down nova instance $uuid" + xe vm-unpause uuid=$uuid || true + xe vm-shutdown uuid=$uuid + xe vm-destroy uuid=$uuid + done +fi # Path to head xva. By default keep overwriting the same one to save space USE_SEPARATE_XVAS=${USE_SEPARATE_XVAS:-0} diff --git a/tools/xen/build_domU_multi.sh b/tools/xen/build_domU_multi.sh new file mode 100755 index 00000000..130bec5b --- /dev/null +++ b/tools/xen/build_domU_multi.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +# Echo commands +set -o xtrace + +# Head node host, which runs glance, api, keystone +HEAD_PUB_IP=${HEAD_PUB_IP:-192.168.1.57} +HEAD_MGT_IP=${HEAD_MGT_IP:-172.16.100.57} + +COMPUTE_PUB_IP=${COMPUTE_PUB_IP:-192.168.1.58} +COMPUTE_MGT_IP=${COMPUTE_MGT_IP:-172.16.100.58} + +# Networking params +FLOATING_RANGE=${FLOATING_RANGE:-192.168.1.196/30} + +# Variables common amongst all hosts in the cluster +COMMON_VARS="$STACKSH_PARAMS MYSQL_HOST=$HEAD_MGT_IP RABBIT_HOST=$HEAD_MGT_IP GLANCE_HOSTPORT=$HEAD_MGT_IP:9292 FLOATING_RANGE=$FLOATING_RANGE" + +# Helper to launch containers +function build_domU { + GUEST_NAME=$1 PUB_IP=$2 MGT_IP=$3 DO_SHUTDOWN=$4 TERMINATE=$TERMINATE STACKSH_PARAMS="$COMMON_VARS $5" ./build_domU.sh +} + +# Launch the head node - headnode uses a non-ip domain name, +# because rabbit won't launch with an ip addr hostname :( +build_domU HEADNODE $HEAD_PUB_IP $HEAD_MGT_IP 1 "ENABLED_SERVICES=g-api,g-reg,key,n-api,n-sch,n-vnc,horizon,mysql,rabbit" + +# Wait till the head node is up +while ! curl -L http://$HEAD_PUB_IP | grep -q username; do + echo "Waiting for head node ($HEAD_PUB_IP) to start..." + sleep 5 +done + +# Build the HA compute host +build_domU $COMPUTE_PUB_IP $COMPUTE_PUB_IP $COMPUTE_MGT_IP 0 "ENABLED_SERVICES=n-cpu,n-net,n-api" From 40b5737c4d00c0ba45989e98fb36dff102ffad4a Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Sat, 5 Nov 2011 00:30:07 -0500 Subject: [PATCH 056/967] fix comment --- tools/xen/build_domU.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/xen/build_domU.sh b/tools/xen/build_domU.sh index 98042555..6362849e 100755 --- a/tools/xen/build_domU.sh +++ b/tools/xen/build_domU.sh @@ -230,7 +230,7 @@ xe sr-param-set uuid=$SR_UUID other-config:i18n-key=local-storage # Shutdown previous runs DO_SHUTDOWN=${DO_SHUTDOWN:-1} if [ "$DO_SHUTDOWN" = "1" ]; then - # Uninstall previous runs + # Shutdown all domU's that created previously xe vm-list --minimal name-label="$LABEL" | xargs ./scripts/uninstall-os-vpx.sh # Destroy any instances that were launched From 0367cf1585eb7359a6bc741aab06dc3a3750575a Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Sat, 5 Nov 2011 10:46:56 -0400 Subject: [PATCH 057/967] remove spacicolons. --- exercise.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/exercise.sh b/exercise.sh index d7763857..7703f401 100755 --- a/exercise.sh +++ b/exercise.sh @@ -8,7 +8,7 @@ SKIP_EXERCISES=${SKIP_EXERCISES:-""} # Locate the scripts we should run EXERCISE_DIR=$(dirname "$0")/exercises -basenames=$(for b in `ls $EXERCISE_DIR/*.sh` ; do basename $b .sh ; done) +basenames=$(for b in `ls $EXERCISE_DIR/*.sh`; do basename $b .sh; done) # Track the state of each script passes="" @@ -16,7 +16,7 @@ failures="" skips="" # Loop over each possible script (by basename) -for script in $basenames ; do +for script in $basenames; do if [[ "$SKIP_EXERCISES" =~ $script ]] ; then skips="$skips $script" else @@ -35,12 +35,12 @@ done # output status of exercise run echo ========================= echo ========================= -for script in $skips ; do +for script in $skips; do echo SKIP $script done -for script in $passes ; do +for script in $passes; do echo PASS $script done -for script in $failures ; do +for script in $failures; do echo FAILED $script done From 9a3066f9fd4efae4ec838a673fe1517554e0e531 Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Sat, 5 Nov 2011 11:02:34 -0400 Subject: [PATCH 058/967] RUNNING_TIMEOUT = BOOT_TIMEOUT + ACTIVE_TIMEOUT --- exercises/euca.sh | 4 ++-- openrc | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/exercises/euca.sh b/exercises/euca.sh index bf6910de..9605ace2 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -28,8 +28,8 @@ IMAGE=`euca-describe-images | grep machine | cut -f2` INSTANCE=`euca-run-instances $IMAGE | grep INSTANCE | cut -f2` # assure it has booted within a reasonable time -if ! timeout $BOOT_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then - echo "server didn't become active within $BOOT_TIMEOUT seconds" +if ! timeout $RUNNING_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then + echo "server didn't become active within $RUNNING_TIMEOUT seconds" exit 1 fi diff --git a/openrc b/openrc index db1a7d12..4b36112e 100644 --- a/openrc +++ b/openrc @@ -55,5 +55,8 @@ export BOOT_TIMEOUT=${BOOT_TIMEOUT:-15} # Max time to wait while vm goes from build to active state export ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-10} +# Max time from run instance command until it is running +export RUNNING_TIMEOUT=${RUNNING_TIMEOUT:-$(($BOOT_TIMEOUT + $ACTIVE_TIMEOUT))} + # Max time to wait for proper IP association and dis-association. export ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10} From 2bbcd682aaa615957ae3c4758cdc5ac9aab91e83 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Sat, 5 Nov 2011 16:19:03 -0500 Subject: [PATCH 059/967] Add SERVICE_TIMEOUT --- stack.sh | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index 32b4539e..6b3e09c4 100755 --- a/stack.sh +++ b/stack.sh @@ -175,6 +175,9 @@ if [ ! -n "$HOST_IP" ]; then HOST_IP=`LC_ALL=C /sbin/ifconfig | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'` fi +# Service startup timeout +SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} + # Generic helper to configure passwords function read_password { set +o xtrace @@ -926,7 +929,7 @@ fi if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then screen_it g-api "cd $GLANCE_DIR; bin/glance-api --config-file=etc/glance-api.conf" echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..." - if ! timeout 60 sh -c "while ! wget -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then + if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then echo "g-api did not start" exit 1 fi @@ -936,7 +939,7 @@ fi if [[ "$ENABLED_SERVICES" =~ "key" ]]; then screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone --config-file $KEYSTONE_CONF -d" echo "Waiting for keystone to start..." - if ! timeout 60 sh -c "while ! wget -q -O- http://127.0.0.1:5000; do sleep 1; done"; then + if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget -q -O- http://127.0.0.1:5000; do sleep 1; done"; then echo "keystone did not start" exit 1 fi @@ -946,7 +949,7 @@ fi if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then screen_it n-api "cd $NOVA_DIR && $NOVA_DIR/bin/nova-api" echo "Waiting for nova-api to start..." - if ! timeout 60 sh -c "while ! wget -q -O- http://127.0.0.1:8774; do sleep 1; done"; then + if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget -q -O- http://127.0.0.1:8774; do sleep 1; done"; then echo "nova-api did not start" exit 1 fi From 43392f74b85f2ca74bdfd2b6268d0fb46cf57423 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Sat, 5 Nov 2011 16:55:15 -0500 Subject: [PATCH 060/967] Beef up error handling (#886666) --- tools/get_uec_image.sh | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/tools/get_uec_image.sh b/tools/get_uec_image.sh index cb59b9aa..7b95aab8 100755 --- a/tools/get_uec_image.sh +++ b/tools/get_uec_image.sh @@ -14,6 +14,9 @@ MIN_PKGS=${MIN_PKGS:-"apt-utils gpgv openssh-server"} TOOLS_DIR=$(cd $(dirname "$0") && pwd) TOP_DIR=`cd $TOOLS_DIR/..; pwd` +# exit on error to stop unexpected errors +set -o errexit + usage() { echo "Usage: $0 - Prepare Ubuntu images" echo "" @@ -44,6 +47,14 @@ cleanup() { trap 2; kill -2 $$ } +# apt-get wrapper to just get arguments set correctly +function apt_get() { + local sudo="sudo" + [ "$(id -u)" = "0" ] && sudo="env" + $sudo DEBIAN_FRONTEND=noninteractive apt-get \ + --option "Dpkg::Options::=--force-confold" --assume-yes "$@" +} + while getopts f:hmr: c; do case $c in f) FORMAT=$OPTARG @@ -107,7 +118,14 @@ case $DIST_NAME in ;; esac -trap cleanup SIGHUP SIGINT SIGTERM +trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT + +# Check for dependencies + +if [ ! -x "`which qemu-img`" -o ! -x "`which qemu-nbd`" ]; then + # Missing KVM? + apt_get install qemu-kvm +fi # Prepare the base image From 8b3eb5ffe31c6a112e9461f16c6955f303018f17 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sat, 5 Nov 2011 16:05:14 -0700 Subject: [PATCH 061/967] work towards simpiler uec --- tools/build_uec.sh | 198 ++++++++++++++++++++++++++++++++++++++++++++ tools/uec/meta-data | 19 +++++ tools/uec/user-data | 32 +++++++ 3 files changed, 249 insertions(+) create mode 100755 tools/build_uec.sh create mode 100644 tools/uec/meta-data create mode 100644 tools/uec/user-data diff --git a/tools/build_uec.sh b/tools/build_uec.sh new file mode 100755 index 00000000..aae4fb8e --- /dev/null +++ b/tools/build_uec.sh @@ -0,0 +1,198 @@ +#!/usr/bin/env bash + +# Make sure that we have the proper version of ubuntu +UBUNTU_VERSION=`cat /etc/lsb-release | grep CODENAME | sed 's/.*=//g'` +if [ ! "oneiric" = "$UBUNTU_VERSION" ]; then + if [ ! "natty" = "$UBUNTU_VERSION" ]; then + echo "This script only works with oneiric and natty" + exit 1 + fi +fi + +# exit on error to stop unexpected errors +set -o errexit + +# Keep track of the current directory +TOOLS_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=`cd $TOOLS_DIR/..; pwd` + +# Abort if localrc is not set +if [ ! -e $TOP_DIR/localrc ]; then + echo "You must have a localrc with ALL necessary passwords defined before proceeding." + echo "See stack.sh for required passwords." + exit 1 +fi + +# Install deps if needed +dpkg -l kvm libvirt-bin kpartx || apt-get install -y --force-yes kvm libvirt-bin kpartx + +# Where to store files and instances +WORK_DIR=${WORK_DIR:-/opt/kvmstack} + +# Where to store images +IMAGES_DIR=$WORK_DIR/images + +# Original version of built image +DIST_NAME=${DIST_NAME:oneiric} +UEC_NAME=$DIST_NAME-server-cloudimg-amd64 +UEC_URL=http://uec-images.ubuntu.com/$DIST_NAME/current/$UEC_NAME-disk1.img +BASE_IMAGE=$IMAGES_DIR/$DIST_NAME.raw + +# download the base uec image if we haven't already +if [ ! -e $BASE_IMAGE ]; then + mkdir -p $IMAGES_DIR + curl $UEC_URL -O $BASE_IMAGE +fi + +cd $TOP_DIR + +# Source params +source ./stackrc + +# Configure the root password of the vm to be the same as ``ADMIN_PASSWORD`` +ROOT_PASSWORD=${ADMIN_PASSWORD:-password} + +# Name of our instance, used by libvirt +GUEST_NAME=${GUEST_NAME:-devstack} + +# Mop up after previous runs +virsh destroy $GUEST_NAME || true + +# Where this vm is stored +VM_DIR=$WORK_DIR/instances/$GUEST_NAME + +# Create vm dir and remove old disk +mkdir -p $VM_DIR +rm -f $VM_DIR/disk.img + +# Create a copy of the base image +qemu-img create -f qcow2 -b ${BASE_IMAGE} $VM_DIR/disk.img + +# Back to devstack +cd $TOP_DIR + +GUEST_NETWORK=${GUEST_NETWORK:-1} +GUEST_RECREATE_NET=${GUEST_RECREATE_NET:-yes} +GUEST_IP=${GUEST_IP:-192.168.$GUEST_NETWORK.50} +GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24} +GUEST_NETMASK=${GUEST_NETMASK:-255.255.255.0} +GUEST_GATEWAY=${GUEST_GATEWAY:-192.168.$GUEST_NETWORK.1} +GUEST_MAC=${GUEST_MAC:-"02:16:3e:07:69:`printf '%02X' $GUEST_NETWORK`"} +GUEST_RAM=${GUEST_RAM:-1524288} +GUEST_CORES=${GUEST_CORES:-1} + +# libvirt.xml configuration +NET_XML=$VM_DIR/net.xml +cat > $NET_XML < + devstack-$GUEST_NETWORK + + + + +EOF + +if [[ "$GUEST_RECREATE_NET" == "yes" ]]; then + virsh net-destroy devstack-$GUEST_NETWORK || true + virsh net-create $VM_DIR/net.xml +fi + +# libvirt.xml configuration +LIBVIRT_XML=$VM_DIR/libvirt.xml +cat > $LIBVIRT_XML < + $GUEST_NAME + $GUEST_RAM + + hvm + + $VM_DIR/kernel + root=/dev/vda ro init=/usr/lib/cloud-init/uncloud-init ds=nocloud ubuntu-pass=ubuntu + + + + + + $GUEST_CORES + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +EOF + +# Create the instance +cd $VM_DIR && virsh create libvirt.xml + +# Tail the console log till we are done +WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1} +if [ "$WAIT_TILL_LAUNCH" = "1" ]; then + # Done creating the container, let's tail the log + echo + echo "=============================================================" + echo " -- YAY! --" + echo "=============================================================" + echo + echo "We're done launching the vm, about to start tailing the" + echo "stack.sh log. It will take a second or two to start." + echo + echo "Just CTRL-C at any time to stop tailing." + + while [ ! -e "$VM_DIR/console.log" ]; do + sleep 1 + done + + tail -F $VM_DIR/console.log & + + TAIL_PID=$! + + function kill_tail() { + kill $TAIL_PID + exit 1 + } + + # Let Ctrl-c kill tail and exit + trap kill_tail SIGINT + + set +o xtrace + + echo "Waiting stack.sh to finish..." + while ! cat $VM_DIR/console.log | grep -q 'All done' ; do + sleep 1 + done + + set -o xtrace + + kill $TAIL_PID + + if ! grep -q "^stack.sh completed in" $VM_DIR/console.log; then + exit 1 + fi + echo "" + echo "Finished - Zip-a-dee Doo-dah!" +fi diff --git a/tools/uec/meta-data b/tools/uec/meta-data new file mode 100644 index 00000000..d0681954 --- /dev/null +++ b/tools/uec/meta-data @@ -0,0 +1,19 @@ +#ami-id: ami-fd4aa494 +#ami-launch-index: '0' +#ami-manifest-path: ubuntu-images-us/ubuntu-lucid-10.04-amd64-server-20100427.1.manifest.xml +#block-device-mapping: {ami: sda1, ephemeral0: sdb, ephemeral1: sdc, root: /dev/sda1} +hostname: smoser-sys +#instance-action: none +instance-id: i-87018aed +instance-type: m1.large +#kernel-id: aki-c8b258a1 +local-hostname: smoser-sys.mosers.us +#local-ipv4: 10.223.26.178 +#placement: {availability-zone: us-east-1d} +#public-hostname: ec2-184-72-174-120.compute-1.amazonaws.com +#public-ipv4: 184.72.174.120 +#public-keys: +# ec2-keypair.us-east-1: [ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCD9dlT00vOUC8Ttq6YH8RzUCVqPQl6HaSfWSTKYnZiVCpTBj1CaRZPLRLmkSB9Nziy4aRJa/LZMbBHXytQKnB1psvNknqC2UNlrXXMk+Vx5S4vg21MXYYimK4uZEY0Qz29QUiTyNsx18jpAaF4ocUpTpRhxPEBCcSCDmMbc27MU2XuTbasM2NjW/w0bBF3ZFhdH68dZICXdTxS2jUrtrCnc1D/QXVZ5kQO3jsmSyJg8E0nE+6Onpx2YRoVRSwjpGzVZ+BlXPnN5xBREBG8XxzhNFHJbek+RgK5TfL+k4yD4XhnVZuZu53cBAFhj+xPKhtisSd+YmaEq+Jt9uS0Ekd5 +# ec2-keypair.us-east-1, ''] +#reservation-id: r-e2225889 +#security-groups: default diff --git a/tools/uec/user-data b/tools/uec/user-data new file mode 100644 index 00000000..f9fa4775 --- /dev/null +++ b/tools/uec/user-data @@ -0,0 +1,32 @@ +#cloud-config +#apt_update: false +#apt_upgrade: true +#packages: [ bzr, pastebinit, ubuntu-dev-tools, ccache, bzr-builddeb, vim-nox, git-core, lftp ] + +apt_sources: + - source: ppa:smoser/ppa + +disable_root: True + +mounts: + - [ ephemeral0, None ] + - [ swap, None ] + +ssh_import_id: [smoser ] + +sm_misc: + - &user_setup | + set -x; exec > ~/user_setup.log 2>&1 + echo "starting at $(date -R)" + echo "set -o vi" >> ~/.bashrc + cat >> ~/.profile < ~/runcmd.log' ] + - [ sudo, -Hu, ubuntu, sh, -c, 'read up sleep < /proc/uptime; echo $(date): runcmd up at $up | tee -a ~/runcmd.log' ] + - [ sudo, -Hu, ubuntu, sh, -c, *user_setup ] + +password: passw0rd +chpasswd: { expire: False } From 5f039326268cf452aa45c011b8ec4552fb49a578 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sat, 5 Nov 2011 16:12:20 -0700 Subject: [PATCH 062/967] make some changes prepping for trunk branch --- files/nova-api-paste.ini | 127 +++++++++++++++++++++++++++++++++++++++ stack.sh | 34 ++++++----- 2 files changed, 145 insertions(+), 16 deletions(-) create mode 100644 files/nova-api-paste.ini diff --git a/files/nova-api-paste.ini b/files/nova-api-paste.ini new file mode 100644 index 00000000..0b56c9f4 --- /dev/null +++ b/files/nova-api-paste.ini @@ -0,0 +1,127 @@ +####### +# EC2 # +####### + +[composite:ec2] +use = egg:Paste#urlmap +/: ec2versions +/services/Cloud: ec2cloud +/services/Admin: ec2admin +/latest: ec2metadata +/2007-01-19: ec2metadata +/2007-03-01: ec2metadata +/2007-08-29: ec2metadata +/2007-10-10: ec2metadata +/2007-12-15: ec2metadata +/2008-02-01: ec2metadata +/2008-09-01: ec2metadata +/2009-04-04: ec2metadata +/1.0: ec2metadata + +[pipeline:ec2cloud] +pipeline = logrequest totoken authtoken keystonecontext cloudrequest authorizer ec2executor + +[pipeline:ec2admin] +pipeline = logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor + +[pipeline:ec2metadata] +pipeline = logrequest ec2md + +[pipeline:ec2versions] +pipeline = logrequest ec2ver + +[filter:logrequest] +paste.filter_factory = nova.api.ec2:RequestLogging.factory + +[filter:ec2lockout] +paste.filter_factory = nova.api.ec2:Lockout.factory + +[filter:totoken] +paste.filter_factory = keystone.middleware.ec2_token:EC2Token.factory + +[filter:ec2noauth] +paste.filter_factory = nova.api.ec2:NoAuth.factory + +[filter:authenticate] +paste.filter_factory = nova.api.ec2:Authenticate.factory + +[filter:cloudrequest] +controller = nova.api.ec2.cloud.CloudController +paste.filter_factory = nova.api.ec2:Requestify.factory + +[filter:adminrequest] +controller = nova.api.ec2.admin.AdminController +paste.filter_factory = nova.api.ec2:Requestify.factory + +[filter:authorizer] +paste.filter_factory = nova.api.ec2:Authorizer.factory + +[app:ec2executor] +paste.app_factory = nova.api.ec2:Executor.factory + +[app:ec2ver] +paste.app_factory = nova.api.ec2:Versions.factory + +[app:ec2md] +paste.app_factory = nova.api.ec2.metadatarequesthandler:MetadataRequestHandler.factory + +############# +# Openstack # +############# + +[composite:osapi] +use = egg:Paste#urlmap +/: osversions +/v1.0: openstackapi10 +/v1.1: openstackapi11 + +[pipeline:openstackapi10] +pipeline = faultwrap authtoken keystonecontext ratelimit osapiapp10 + +[pipeline:openstackapi11] +pipeline = faultwrap authtoken keystonecontext ratelimit extensions osapiapp11 + +[filter:faultwrap] +paste.filter_factory = nova.api.openstack:FaultWrapper.factory + +[filter:auth] +paste.filter_factory = nova.api.openstack.auth:AuthMiddleware.factory + +[filter:noauth] +paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory + +[filter:ratelimit] +paste.filter_factory = nova.api.openstack.limits:RateLimitingMiddleware.factory + +[filter:extensions] +paste.filter_factory = nova.api.openstack.extensions:ExtensionMiddleware.factory + +[app:osapiapp10] +paste.app_factory = nova.api.openstack:APIRouterV10.factory + +[app:osapiapp11] +paste.app_factory = nova.api.openstack:APIRouterV11.factory + +[pipeline:osversions] +pipeline = faultwrap osversionapp + +[app:osversionapp] +paste.app_factory = nova.api.openstack.versions:Versions.factory + +########## +# Shared # +########## + +[filter:keystonecontext] +paste.filter_factory = keystone.middleware.nova_keystone_context:NovaKeystoneContext.factory + +[filter:authtoken] +paste.filter_factory = keystone.middleware.auth_token:filter_factory +service_protocol = http +service_host = 127.0.0.1 +service_port = 5000 +auth_host = 127.0.0.1 +auth_port = 35357 +auth_protocol = http +auth_uri = http://127.0.0.1:5000/ +admin_token = 999888777666 diff --git a/stack.sh b/stack.sh index 32b4539e..da097bd3 100755 --- a/stack.sh +++ b/stack.sh @@ -230,7 +230,7 @@ VLAN_INTERFACE=${VLAN_INTERFACE:-$PUBLIC_INTERFACE} # Multi-host is a mode where each compute node runs its own network node. This # allows network operations and routing for a VM to occur on the server that is # running the VM - removing a SPOF and bandwidth bottleneck. -MULTI_HOST=${MULTI_HOST:-0} +MULTI_HOST=${MULTI_HOST:-False} # If you are using FlatDHCP on multiple hosts, set the ``FLAT_INTERFACE`` # variable but make sure that the interface doesn't already have an @@ -323,7 +323,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then # can never change. read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH." fi - + # Keystone # -------- @@ -567,8 +567,10 @@ if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then # required for nova to validate keystone tokens - except we need to switch # the config to use our service token instead (instead of the invalid token # 999888777666). - cp $KEYSTONE_DIR/examples/paste/nova-api-paste.ini $NOVA_DIR/bin - sed -e "s,999888777666,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini + if [ ! -e $NOVA_DIR/bin/nova-api-paste.ini ]; then + cp $FILES/nova-api-paste.ini $NOVA_DIR/bin + sed -e "s,999888777666,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini + fi fi if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then @@ -650,13 +652,13 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then USER_GROUP=$(id -g) sudo mkdir -p ${SWIFT_DATA_LOCATION}/drives sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_LOCATION}/drives - + # We then create a loopback disk and format it to XFS. if [[ ! -e ${SWIFT_DATA_LOCATION}/drives/images/swift.img ]];then mkdir -p ${SWIFT_DATA_LOCATION}/drives/images sudo touch ${SWIFT_DATA_LOCATION}/drives/images/swift.img sudo chown $USER: ${SWIFT_DATA_LOCATION}/drives/images/swift.img - + dd if=/dev/zero of=${SWIFT_DATA_LOCATION}/drives/images/swift.img \ bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE} mkfs.xfs -f -i size=1024 ${SWIFT_DATA_LOCATION}/drives/images/swift.img @@ -673,9 +675,9 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then # We then create link to that mounted location so swift would know # where to go. for x in {1..4}; do sudo ln -sf ${SWIFT_DATA_LOCATION}/drives/sdb1/$x ${SWIFT_DATA_LOCATION}/$x; done - + # We now have to emulate a few different servers into one we - # create all the directories needed for swift + # create all the directories needed for swift tmpd="" for d in ${SWIFT_DATA_LOCATION}/drives/sdb1/{1..4} \ ${SWIFT_CONFIG_LOCATION}/{object,container,account}-server \ @@ -691,7 +693,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then # swift-init has a bug using /etc/swift until bug #885595 is fixed # we have to create a link sudo ln -s ${SWIFT_CONFIG_LOCATION} /etc/swift - + # Swift use rsync to syncronize between all the different # partitions (which make more sense when you have a multi-node # setup) we configure it with our version of rsync. @@ -727,7 +729,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then local bind_port=$2 local log_facility=$3 local node_number - + for node_number in {1..4};do node_path=${SWIFT_DATA_LOCATION}/${node_number} sed -e "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},;s,%USER%,$USER,;s,%NODE_PATH%,${node_path},;s,%BIND_PORT%,${bind_port},;s,%LOG_FACILITY%,${log_facility}," \ @@ -754,14 +756,14 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then # We then can start rsync. sudo /etc/init.d/rsync restart || : - + # Create our ring for the object/container/account. /usr/local/bin/swift-remakerings # And now we launch swift-startmain to get our cluster running # ready to be tested. /usr/local/bin/swift-startmain || : - + unset s swift_hash swift_auth_server tmpd fi @@ -828,12 +830,12 @@ add_nova_flag "--glance_api_servers=$GLANCE_HOSTPORT" if [ -n "$INSTANCES_PATH" ]; then add_nova_flag "--instances_path=$INSTANCES_PATH" fi -if [ -n "$MULTI_HOST" ]; then - add_nova_flag "--multi_host=$MULTI_HOST" - add_nova_flag "--send_arp_for_ha=1" +if [ "$MULTI_HOST" != "False" ]; then + add_nova_flag "--multi_host" + add_nova_flag "--send_arp_for_ha" fi if [ "$SYSLOG" != "False" ]; then - add_nova_flag "--use_syslog=1" + add_nova_flag "--use_syslog" fi # XenServer From 228f246a838ace75b620292f46f746aee1035c48 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sat, 5 Nov 2011 17:36:14 -0700 Subject: [PATCH 063/967] work towards booting --- tools/build_uec.sh | 69 +++++++++++++++++++++++++--------------------- 1 file changed, 37 insertions(+), 32 deletions(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index aae4fb8e..24422af3 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -1,6 +1,9 @@ #!/usr/bin/env bash -# Make sure that we have the proper version of ubuntu +# Ubuntu distro to install +DIST_NAME=${DIST_NAME:-oneiric} + +# Make sure that we have the proper version of ubuntu (only works on natty/oneiric) UBUNTU_VERSION=`cat /etc/lsb-release | grep CODENAME | sed 's/.*=//g'` if [ ! "oneiric" = "$UBUNTU_VERSION" ]; then if [ ! "natty" = "$UBUNTU_VERSION" ]; then @@ -9,13 +12,14 @@ if [ ! "oneiric" = "$UBUNTU_VERSION" ]; then fi fi -# exit on error to stop unexpected errors -set -o errexit - # Keep track of the current directory TOOLS_DIR=$(cd $(dirname "$0") && pwd) TOP_DIR=`cd $TOOLS_DIR/..; pwd` +# exit on error to stop unexpected errors +set -o errexit +set -o xtrace + # Abort if localrc is not set if [ ! -e $TOP_DIR/localrc ]; then echo "You must have a localrc with ALL necessary passwords defined before proceeding." @@ -30,18 +34,19 @@ dpkg -l kvm libvirt-bin kpartx || apt-get install -y --force-yes kvm libvirt-bin WORK_DIR=${WORK_DIR:-/opt/kvmstack} # Where to store images -IMAGES_DIR=$WORK_DIR/images +image_dir=$WORK_DIR/images/$DIST_NAME +mkdir -p $image_dir # Original version of built image -DIST_NAME=${DIST_NAME:oneiric} -UEC_NAME=$DIST_NAME-server-cloudimg-amd64 -UEC_URL=http://uec-images.ubuntu.com/$DIST_NAME/current/$UEC_NAME-disk1.img -BASE_IMAGE=$IMAGES_DIR/$DIST_NAME.raw +uec_url=http://uec-images.ubuntu.com/$DIST_NAME/current/$DIST_NAME-server-cloudimg-amd64.tar.gz +tarball=$image_dir/$(basename $UEC_URL) # download the base uec image if we haven't already -if [ ! -e $BASE_IMAGE ]; then - mkdir -p $IMAGES_DIR - curl $UEC_URL -O $BASE_IMAGE +if [ ! -f $tarball ]; then + curl $uec_url -o $tarball + tar -Sxvzf $tarball $image_dir + cp $image_dir/*.img $image_dir/disk + cp $image_dir/*-vmlinuz-virtual $image_dir/kernel fi cd $TOP_DIR @@ -59,14 +64,16 @@ GUEST_NAME=${GUEST_NAME:-devstack} virsh destroy $GUEST_NAME || true # Where this vm is stored -VM_DIR=$WORK_DIR/instances/$GUEST_NAME +vm_dir=$WORK_DIR/instances/$GUEST_NAME # Create vm dir and remove old disk -mkdir -p $VM_DIR -rm -f $VM_DIR/disk.img +mkdir -p $vm_dir +rm -f $vm_dir/disk # Create a copy of the base image -qemu-img create -f qcow2 -b ${BASE_IMAGE} $VM_DIR/disk.img +# qemu-img create -f qcow2 -b ${BASE_IMAGE} $vm_dir/disk +cp $image_dir/disk $vm_dir/disk +cp $image_dir/kernel $vm_dir/kernel # Back to devstack cd $TOP_DIR @@ -82,7 +89,7 @@ GUEST_RAM=${GUEST_RAM:-1524288} GUEST_CORES=${GUEST_CORES:-1} # libvirt.xml configuration -NET_XML=$VM_DIR/net.xml +NET_XML=$vm_dir/net.xml cat > $NET_XML < devstack-$GUEST_NETWORK @@ -94,20 +101,19 @@ EOF if [[ "$GUEST_RECREATE_NET" == "yes" ]]; then virsh net-destroy devstack-$GUEST_NETWORK || true - virsh net-create $VM_DIR/net.xml + virsh net-create $vm_dir/net.xml fi # libvirt.xml configuration -LIBVIRT_XML=$VM_DIR/libvirt.xml +LIBVIRT_XML=$vm_dir/libvirt.xml cat > $LIBVIRT_XML < $GUEST_NAME $GUEST_RAM - hvm - - $VM_DIR/kernel - root=/dev/vda ro init=/usr/lib/cloud-init/uncloud-init ds=nocloud ubuntu-pass=ubuntu + hvm + $vm_dir/kernel + root=/dev/vda console=ttyS0 init=/usr/lib/cloud-init/uncloud-init ds=nocloud ubuntu-pass=ubuntu @@ -117,7 +123,7 @@ cat > $LIBVIRT_XML < - + @@ -127,7 +133,7 @@ cat > $LIBVIRT_XML < - + @@ -147,11 +153,12 @@ cat > $LIBVIRT_XML < Date: Sat, 5 Nov 2011 17:37:33 -0700 Subject: [PATCH 064/967] uec_url should be underscore --- tools/build_uec.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 24422af3..53ada3e3 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -39,7 +39,7 @@ mkdir -p $image_dir # Original version of built image uec_url=http://uec-images.ubuntu.com/$DIST_NAME/current/$DIST_NAME-server-cloudimg-amd64.tar.gz -tarball=$image_dir/$(basename $UEC_URL) +tarball=$image_dir/$(basename $uec_url) # download the base uec image if we haven't already if [ ! -f $tarball ]; then From 3b7685823ca62469499f6e4354cce8cfea6e8ee1 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sat, 5 Nov 2011 17:40:20 -0700 Subject: [PATCH 065/967] extract tarball in image dir --- tools/build_uec.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 53ada3e3..accc37b6 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -44,7 +44,7 @@ tarball=$image_dir/$(basename $uec_url) # download the base uec image if we haven't already if [ ! -f $tarball ]; then curl $uec_url -o $tarball - tar -Sxvzf $tarball $image_dir + (cd $image_dir && tar -Sxvzf $tarball) cp $image_dir/*.img $image_dir/disk cp $image_dir/*-vmlinuz-virtual $image_dir/kernel fi From f5a76919b3da9a6a4c3a9a84b2455293b91e2711 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sat, 5 Nov 2011 17:47:50 -0700 Subject: [PATCH 066/967] closer to fine --- tools/build_uec.sh | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index accc37b6..5d93da3a 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -71,9 +71,7 @@ mkdir -p $vm_dir rm -f $vm_dir/disk # Create a copy of the base image -# qemu-img create -f qcow2 -b ${BASE_IMAGE} $vm_dir/disk -cp $image_dir/disk $vm_dir/disk -cp $image_dir/kernel $vm_dir/kernel +qemu-img create -f qcow2 -b $image_dir/disk $vm_dir/disk # Back to devstack cd $TOP_DIR @@ -112,8 +110,8 @@ cat > $LIBVIRT_XML <$GUEST_RAM hvm - $vm_dir/kernel - root=/dev/vda console=ttyS0 init=/usr/lib/cloud-init/uncloud-init ds=nocloud ubuntu-pass=ubuntu + $image_dir/kernel + root=/dev/vda ro console=ttyS0 init=/usr/lib/cloud-init/uncloud-init ds=nocloud ubuntu-pass=ubuntu From a6282623449666f945d6a3e569513486513eb9cf Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sat, 5 Nov 2011 18:39:33 -0700 Subject: [PATCH 067/967] let dhcp work --- tools/build_uec.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 5d93da3a..5f85486f 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -93,7 +93,11 @@ cat > $NET_XML <devstack-$GUEST_NETWORK - + + + + + EOF From 63fa7abd561d401df613a9611ab125737895563e Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sat, 5 Nov 2011 18:49:36 -0700 Subject: [PATCH 068/967] tweaks --- tools/build_uec.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 5f85486f..7d344f00 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -95,7 +95,7 @@ cat > $NET_XML < - + @@ -155,7 +155,7 @@ cat > $LIBVIRT_XML < Date: Sat, 5 Nov 2011 22:15:50 -0700 Subject: [PATCH 069/967] simple metadata service --- tools/uec/meta.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 tools/uec/meta.py diff --git a/tools/uec/meta.py b/tools/uec/meta.py new file mode 100644 index 00000000..5b845d81 --- /dev/null +++ b/tools/uec/meta.py @@ -0,0 +1,29 @@ +import sys +from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler +from SimpleHTTPServer import SimpleHTTPRequestHandler + +def main(host, port, HandlerClass = SimpleHTTPRequestHandler, + ServerClass = HTTPServer, protocol="HTTP/1.0"): + """simple http server that listens on a give address:port""" + + server_address = (host, port) + + HandlerClass.protocol_version = protocol + httpd = ServerClass(server_address, HandlerClass) + + sa = httpd.socket.getsockname() + print "Serving HTTP on", sa[0], "port", sa[1], "..." + httpd.serve_forever() + +if __name__ == '__main__': + if sys.argv[1:]: + address = sys.argv[1] + else: + address = '0.0.0.0' + if ':' in address: + host, port = address.split(':') + else: + host = address + port = 8080 + + main(host, int(port)) From 9ed6bbd503469e23bbe03b4ec15c955a07a47e9d Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sat, 5 Nov 2011 22:28:46 -0700 Subject: [PATCH 070/967] attempt to run the metadata service --- tools/build_uec.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 7d344f00..9b67cf22 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -115,7 +115,7 @@ cat > $LIBVIRT_XML < hvm $image_dir/kernel - root=/dev/vda ro console=ttyS0 init=/usr/lib/cloud-init/uncloud-init ds=nocloud ubuntu-pass=ubuntu + root=/dev/vda ro console=ttyS0 init=/usr/lib/cloud-init/uncloud-init ds=nocloud-net;s=http://192.168.$GUEST.1:4567/ ubuntu-pass=ubuntu @@ -154,6 +154,10 @@ cat > $LIBVIRT_XML < EOF +cp -r $TOOLS_DIR/uec $vm_dir/uec + +(cd $vm_dir/uec; python meta.py 192.168.$GUEST_NETWORK.1:4567 &) + # Create the instance virsh create $vm_dir/libvirt.xml From f504e281c0e1563f7d2d1c6faa6c6f820a2982af Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sat, 5 Nov 2011 22:29:35 -0700 Subject: [PATCH 071/967] don't need to spawn a bash --- tools/build_uec.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 9b67cf22..4ef2d048 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -156,7 +156,8 @@ EOF cp -r $TOOLS_DIR/uec $vm_dir/uec -(cd $vm_dir/uec; python meta.py 192.168.$GUEST_NETWORK.1:4567 &) +cd $vm_dir/uec +python meta.py 192.168.$GUEST_NETWORK.1:4567 & # Create the instance virsh create $vm_dir/libvirt.xml From 438ea577c4a27570ff402087cfbede07b888e239 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sat, 5 Nov 2011 22:33:49 -0700 Subject: [PATCH 072/967] typo --- tools/build_uec.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 4ef2d048..efae619a 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -115,7 +115,7 @@ cat > $LIBVIRT_XML < hvm $image_dir/kernel - root=/dev/vda ro console=ttyS0 init=/usr/lib/cloud-init/uncloud-init ds=nocloud-net;s=http://192.168.$GUEST.1:4567/ ubuntu-pass=ubuntu + root=/dev/vda ro console=ttyS0 init=/usr/lib/cloud-init/uncloud-init ds=nocloud-net;s=http://192.168.$GUEST_NETWORK.1:4567/ ubuntu-pass=ubuntu From e49f751aa9b495f7e2f19f5a82caff6aec27da18 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sat, 5 Nov 2011 22:34:45 -0700 Subject: [PATCH 073/967] force the uec to be recreated --- tools/build_uec.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index efae619a..0a821036 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -154,6 +154,7 @@ cat > $LIBVIRT_XML < EOF +rm -rf $vm_dir/uec cp -r $TOOLS_DIR/uec $vm_dir/uec cd $vm_dir/uec From ee34f62ba7b552062388df1520287d856d216c8d Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sat, 5 Nov 2011 22:41:57 -0700 Subject: [PATCH 074/967] kill the old metadata process --- tools/build_uec.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 0a821036..b4052f22 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -157,6 +157,8 @@ EOF rm -rf $vm_dir/uec cp -r $TOOLS_DIR/uec $vm_dir/uec +# (re)start a metadata service +`lsof -i -n | grep 192.168.$GUEST_NETWORK.1:4567 | awk '{print $2}' | xargs -n1 kill -9` cd $vm_dir/uec python meta.py 192.168.$GUEST_NETWORK.1:4567 & From d7ce7afe0fc67e40ff5c711794ef31d3f7a3031c Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sat, 5 Nov 2011 22:47:28 -0700 Subject: [PATCH 075/967] set the hostname --- tools/build_uec.sh | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index b4052f22..719d2684 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -154,11 +154,20 @@ cat > $LIBVIRT_XML < EOF + rm -rf $vm_dir/uec cp -r $TOOLS_DIR/uec $vm_dir/uec +# set metadata +cat > $vm_dir/uec/meta-data< Date: Sat, 5 Nov 2011 22:49:51 -0700 Subject: [PATCH 076/967] switch lsof syntax --- tools/build_uec.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 719d2684..61709ab2 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -166,8 +166,7 @@ instance-type: m1.large EOF # (re)start a metadata service -#lsof -iTCP:4567 -sTCP:LISTEN -n -lsof -i -n | grep 192.168.$GUEST_NETWORK.1:4567 | awk '{print $2}' | xargs -n1 kill -9 +lsof -iTCP@192.168.$GUEST_NETWORK.1:4567 -n | awk '{print $2}' | xargs -n1 kill -9 cd $vm_dir/uec python meta.py 192.168.$GUEST_NETWORK.1:4567 & From 3ce79aa55bdadd4be53cee8ad9c0ef5da0ad3749 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sat, 5 Nov 2011 22:52:20 -0700 Subject: [PATCH 077/967] improve kill --- tools/build_uec.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 61709ab2..b7567f6e 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -166,7 +166,10 @@ instance-type: m1.large EOF # (re)start a metadata service -lsof -iTCP@192.168.$GUEST_NETWORK.1:4567 -n | awk '{print $2}' | xargs -n1 kill -9 +( + pid=`lsof -iTCP@192.168.$GUEST_NETWORK.1:4567 -n | awk '{print $2}' | tail -1` + [ "$pid" == "PID" ] || kill -9 $pid +) cd $vm_dir/uec python meta.py 192.168.$GUEST_NETWORK.1:4567 & From cc03cc8d58514a4c88579791fdfa0559569f9818 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sat, 5 Nov 2011 22:54:54 -0700 Subject: [PATCH 078/967] kill works --- tools/build_uec.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index b7567f6e..89d46618 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -168,7 +168,7 @@ EOF # (re)start a metadata service ( pid=`lsof -iTCP@192.168.$GUEST_NETWORK.1:4567 -n | awk '{print $2}' | tail -1` - [ "$pid" == "PID" ] || kill -9 $pid + [ ! -e $pid ] || kill -9 $pid ) cd $vm_dir/uec python meta.py 192.168.$GUEST_NETWORK.1:4567 & From 9645b0c9c9d862a585e8923cad79e916f7585b6e Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sat, 5 Nov 2011 23:05:33 -0700 Subject: [PATCH 079/967] kill ... --- tools/build_uec.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 89d46618..f420d6b6 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -168,7 +168,7 @@ EOF # (re)start a metadata service ( pid=`lsof -iTCP@192.168.$GUEST_NETWORK.1:4567 -n | awk '{print $2}' | tail -1` - [ ! -e $pid ] || kill -9 $pid + [ -z "$pid" ] || kill -9 $pid ) cd $vm_dir/uec python meta.py 192.168.$GUEST_NETWORK.1:4567 & From 7306f3bfc77e92657107c9ec17da35a6df2110f5 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sat, 5 Nov 2011 23:13:34 -0700 Subject: [PATCH 080/967] more metadata --- tools/build_uec.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index f420d6b6..87b72750 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -163,6 +163,7 @@ cat > $vm_dir/uec/meta-data< Date: Sat, 5 Nov 2011 23:16:53 -0700 Subject: [PATCH 081/967] more userdata --- tools/build_uec.sh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 87b72750..392427d4 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -166,6 +166,14 @@ instance-type: m1.large local-hostname: $GUEST_NAME.local EOF +# set metadata +cat > $vm_dir/uec/user-data< Date: Sat, 5 Nov 2011 23:20:11 -0700 Subject: [PATCH 082/967] more userdata --- tools/build_uec.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 392427d4..3b5e49f1 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -172,6 +172,9 @@ cat > $vm_dir/uec/user-data< Date: Sat, 5 Nov 2011 23:30:22 -0700 Subject: [PATCH 083/967] git clone --- tools/build_uec.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 3b5e49f1..cd0f0e08 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -175,6 +175,8 @@ packages: [ vim-nox, git-core ] password: pass chpasswd: { expire: False } disable_root: false +runcmd: + - [ git, clone, https://github.com/cloudbuilders/devstack.git ] EOF # (re)start a metadata service From 446a3304bcdff585d0fcea487a89e247bbaa4f6b Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sat, 5 Nov 2011 23:36:29 -0700 Subject: [PATCH 084/967] another attempt at userdata --- tools/build_uec.sh | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index cd0f0e08..d6de8472 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -168,15 +168,18 @@ EOF # set metadata cat > $vm_dir/uec/user-data< localrc +echo ADMIN_PASSWORD=golfing >> localrc +echo MYSQL_PASSWORD=golfing >> localrc +echo RABBIT_PASSWORD=golfing >> localrc +echo SERVICE_TOKEN=123124123124 >> localrc +echo FLAT_INTERFACE=br100 >> localrc +./stack.sh EOF # (re)start a metadata service From 9102d454f6a892ebb9e000f34a1b515c49da3f8c Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sat, 5 Nov 2011 23:49:08 -0700 Subject: [PATCH 085/967] resize the uec image --- tools/build_uec.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index d6de8472..e1d64471 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -45,7 +45,7 @@ tarball=$image_dir/$(basename $uec_url) if [ ! -f $tarball ]; then curl $uec_url -o $tarball (cd $image_dir && tar -Sxvzf $tarball) - cp $image_dir/*.img $image_dir/disk + resize-part-image $image_dir/*.img 10G $image_dir/disk cp $image_dir/*-vmlinuz-virtual $image_dir/kernel fi From 6b1c26e96194d8a20d25cf3b11674d2a2fe72136 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sun, 6 Nov 2011 00:13:30 -0700 Subject: [PATCH 086/967] use the provided localrc --- tools/build_uec.sh | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index e1d64471..240ea78e 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -173,12 +173,9 @@ apt-get update apt-get install git -y git clone https://github.com/cloudbuilders/devstack.git cd devstack -echo DASH_BRANCH=instance-overview > localrc -echo ADMIN_PASSWORD=golfing >> localrc -echo MYSQL_PASSWORD=golfing >> localrc -echo RABBIT_PASSWORD=golfing >> localrc -echo SERVICE_TOKEN=123124123124 >> localrc -echo FLAT_INTERFACE=br100 >> localrc +cat > localrc < Date: Sun, 6 Nov 2011 00:22:41 -0700 Subject: [PATCH 087/967] use the right devstack sha --- tools/build_uec.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 240ea78e..cb1ce2d1 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -173,6 +173,9 @@ apt-get update apt-get install git -y git clone https://github.com/cloudbuilders/devstack.git cd devstack +git remote set-url origin `cd $TOP_DIR; git remote show origin | grep Fetch | awk '{print $3}'` +git fetch +git checkout $GIT_COMMIT cat > localrc < Date: Sun, 6 Nov 2011 00:26:29 -0700 Subject: [PATCH 088/967] accidentally running stack.sh ... --- tools/build_uec.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index cb1ce2d1..98b39b91 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -176,9 +176,9 @@ cd devstack git remote set-url origin `cd $TOP_DIR; git remote show origin | grep Fetch | awk '{print $3}'` git fetch git checkout $GIT_COMMIT -cat > localrc < localrc < Date: Sun, 6 Nov 2011 00:32:21 -0700 Subject: [PATCH 089/967] use the right revision --- tools/build_uec.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 98b39b91..2a699523 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -175,7 +175,7 @@ git clone https://github.com/cloudbuilders/devstack.git cd devstack git remote set-url origin `cd $TOP_DIR; git remote show origin | grep Fetch | awk '{print $3}'` git fetch -git checkout $GIT_COMMIT +git checkout `git rev-parse HEAD` cat > localrc < Date: Sun, 6 Nov 2011 00:42:11 -0700 Subject: [PATCH 090/967] sleep half a second to allow bash to start in screen --- stack.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/stack.sh b/stack.sh index 32b4539e..bb26e44d 100755 --- a/stack.sh +++ b/stack.sh @@ -909,6 +909,7 @@ function screen_it { NL=`echo -ne '\015'` if [[ "$ENABLED_SERVICES" =~ "$1" ]]; then screen -S stack -X screen -t $1 + sleep 0.5 screen -S stack -p $1 -X stuff "$2$NL" fi } From 5f4ae107efbc481db5d3c30c90a63934a2664d51 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sun, 6 Nov 2011 07:47:09 -0800 Subject: [PATCH 091/967] chown should be to stack user, not root --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index bb26e44d..7b0c9009 100755 --- a/stack.sh +++ b/stack.sh @@ -121,7 +121,7 @@ if [[ $EUID -eq 0 ]]; then echo "Copying files to stack user" STACK_DIR="$DEST/${PWD##*/}" cp -r -f "$PWD" "$STACK_DIR" - chown -R $USER "$STACK_DIR" + chown -R stack "$STACK_DIR" if [[ "$SHELL_AFTER_RUN" != "no" ]]; then exec su -c "set -e; cd $STACK_DIR; bash stack.sh; bash" stack else From 53d7533d1570a7fe536126c3b4e84ae4928931a1 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sun, 6 Nov 2011 07:54:11 -0800 Subject: [PATCH 092/967] pull DIST_NAME from source --- tools/build_uec.sh | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 2a699523..a0c27882 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -1,8 +1,5 @@ #!/usr/bin/env bash -# Ubuntu distro to install -DIST_NAME=${DIST_NAME:-oneiric} - # Make sure that we have the proper version of ubuntu (only works on natty/oneiric) UBUNTU_VERSION=`cat /etc/lsb-release | grep CODENAME | sed 's/.*=//g'` if [ ! "oneiric" = "$UBUNTU_VERSION" ]; then @@ -16,6 +13,14 @@ fi TOOLS_DIR=$(cd $(dirname "$0") && pwd) TOP_DIR=`cd $TOOLS_DIR/..; pwd` +cd $TOP_DIR + +# Source params +source ./stackrc + +# Ubuntu distro to install +DIST_NAME=${DIST_NAME:-oneiric} + # exit on error to stop unexpected errors set -o errexit set -o xtrace @@ -49,10 +54,6 @@ if [ ! -f $tarball ]; then cp $image_dir/*-vmlinuz-virtual $image_dir/kernel fi -cd $TOP_DIR - -# Source params -source ./stackrc # Configure the root password of the vm to be the same as ``ADMIN_PASSWORD`` ROOT_PASSWORD=${ADMIN_PASSWORD:-password} From 00d6bc6529899326568f37375db77ac3cea008e1 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sun, 6 Nov 2011 07:56:18 -0800 Subject: [PATCH 093/967] Don't forget to echo so we can find it --- tools/build_uec.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index a0c27882..bccddaf0 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -181,6 +181,7 @@ cat > localrc < Date: Sun, 6 Nov 2011 08:00:28 -0800 Subject: [PATCH 094/967] should speed up by 20 seconds - sudo and no sleep --- stack.sh | 3 +-- tools/build_uec.sh | 3 ++- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index 7b0c9009..33eb2079 100755 --- a/stack.sh +++ b/stack.sh @@ -103,8 +103,7 @@ if [[ $EUID -eq 0 ]]; then # since this script runs as a normal user, we need to give that user # ability to run sudo - apt_get update - apt_get install sudo + dpkg -l sudo || apt_get update && apt_get install sudo if ! getent passwd stack >/dev/null; then echo "Creating a user called stack" diff --git a/tools/build_uec.sh b/tools/build_uec.sh index bccddaf0..266356b9 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -171,13 +171,14 @@ EOF cat > $vm_dir/uec/user-data< localrc < Date: Sun, 6 Nov 2011 08:09:03 -0800 Subject: [PATCH 095/967] Switch the way we check for completion --- tools/build_uec.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 266356b9..44c8c0fc 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -182,7 +182,6 @@ ROOTSLEEP=0 `cat $TOP_DIR/localrc` LOCAL_EOF ./stack.sh -echo "All done" EOF # (re)start a metadata service @@ -228,7 +227,7 @@ if [ "$WAIT_TILL_LAUNCH" = "1" ]; then trap kill_tail SIGINT echo "Waiting stack.sh to finish..." - while ! cat $vm_dir/console.log | grep -q 'All done' ; do + while ! cat $vm_dir/console.log | grep -q '^stack.sh (completed|failed)' ; do sleep 1 done From d55a5159128a213789d9a7a6db3ed6225206eec3 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sun, 6 Nov 2011 08:16:42 -0800 Subject: [PATCH 096/967] egrep needed for parens --- tools/build_uec.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 44c8c0fc..ada65969 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -227,7 +227,7 @@ if [ "$WAIT_TILL_LAUNCH" = "1" ]; then trap kill_tail SIGINT echo "Waiting stack.sh to finish..." - while ! cat $vm_dir/console.log | grep -q '^stack.sh (completed|failed)' ; do + while ! egrep -q '^stack.sh (completed|failed)' $vm_dir/console.log ; do sleep 1 done From b17c4f30eb6038f58e0186d8621406b68bf54914 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sun, 6 Nov 2011 09:25:55 -0800 Subject: [PATCH 097/967] make sure hostname resolves --- tools/build_uec.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index ada65969..ee78192a 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -170,6 +170,8 @@ EOF # set metadata cat > $vm_dir/uec/user-data< Date: Sun, 6 Nov 2011 09:35:13 -0800 Subject: [PATCH 098/967] run hostname on remote server --- tools/build_uec.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index ee78192a..cbcdad65 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -171,7 +171,7 @@ EOF cat > $vm_dir/uec/user-data< Date: Sun, 6 Nov 2011 10:29:10 -0800 Subject: [PATCH 099/967] increase the dhcp range --- tools/build_uec.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index cbcdad65..a0997e02 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -96,7 +96,7 @@ cat > $NET_XML < - + From dca89009f5f5468f13158eec3b080ecb0bb5545f Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sun, 6 Nov 2011 10:33:33 -0800 Subject: [PATCH 100/967] destroying the network isn't enough to delete the leases --- tools/build_uec.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index a0997e02..3ee1e2fb 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -104,6 +104,8 @@ EOF if [[ "$GUEST_RECREATE_NET" == "yes" ]]; then virsh net-destroy devstack-$GUEST_NETWORK || true + # destroying the network isn't enough to delete the leases + rm -f /var/lib/libvirt/dnsmasq/devstack-$GUEST_NETWORK.leases virsh net-create $vm_dir/net.xml fi From 9812ffb9980a7ed7c3512873d522ac6ee5f52742 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sun, 6 Nov 2011 11:18:26 -0800 Subject: [PATCH 101/967] clean up service token --- files/nova-api-paste.ini | 2 +- stack.sh | 15 ++++++--------- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/files/nova-api-paste.ini b/files/nova-api-paste.ini index 0b56c9f4..2c642f8d 100644 --- a/files/nova-api-paste.ini +++ b/files/nova-api-paste.ini @@ -124,4 +124,4 @@ auth_host = 127.0.0.1 auth_port = 35357 auth_protocol = http auth_uri = http://127.0.0.1:5000/ -admin_token = 999888777666 +admin_token = %SERVICE_TOKEN% diff --git a/stack.sh b/stack.sh index da097bd3..fd305e2d 100755 --- a/stack.sh +++ b/stack.sh @@ -562,15 +562,12 @@ fi # ---- if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then - # We are going to use the sample http middleware configuration from the - # keystone project to launch nova. This paste config adds the configuration - # required for nova to validate keystone tokens - except we need to switch - # the config to use our service token instead (instead of the invalid token - # 999888777666). - if [ ! -e $NOVA_DIR/bin/nova-api-paste.ini ]; then - cp $FILES/nova-api-paste.ini $NOVA_DIR/bin - sed -e "s,999888777666,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini - fi + # We are going to use a sample http middleware configuration based on the + # one from the keystone project to launch nova. This paste config adds + # the configuration required for nova to validate keystone tokens. We add + # our own service token to the configuration. + cp $FILES/nova-api-paste.ini $NOVA_DIR/bin + sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini fi if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then From 6cbf2872544be0d81f0c9471ae5be7d8e319b7ea Mon Sep 17 00:00:00 2001 From: "James E. Blair" Date: Mon, 7 Nov 2011 07:23:34 -0800 Subject: [PATCH 102/967] Fix typo in exercise.sh --- exercise.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exercise.sh b/exercise.sh index 77d3a3b0..cca9a13a 100755 --- a/exercise.sh +++ b/exercise.sh @@ -165,7 +165,7 @@ ping -c1 -w1 $IP nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 # FIXME (anthony): make xs support security groups -if [ "$VIRT_DRIVER" != "xenserver"]; then +if [ "$VIRT_DRIVER" != "xenserver" ]; then # test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then print "Security group failure - ping should not be allowed!" From fda65b83f465ad372ac3d9088ebf17c97cff4a3a Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 2 Nov 2011 12:13:33 -0500 Subject: [PATCH 103/967] New build_ci_config.sh --- tools/build_ci_config.sh | 126 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 126 insertions(+) create mode 100755 tools/build_ci_config.sh diff --git a/tools/build_ci_config.sh b/tools/build_ci_config.sh new file mode 100755 index 00000000..c6e3cb4c --- /dev/null +++ b/tools/build_ci_config.sh @@ -0,0 +1,126 @@ +#!/usr/bin/env bash +# +# build_ci_config.sh - Build a config.ini for openstack-integration-tests +# (https://github.com/openstack/openstack-integration-tests) + +function usage { + echo "$0 - Build config.ini for openstack-integration-tests" + echo "" + echo "Usage: $0 configfile" + exit 1 +} + +if [ ! "$#" -eq "1" ]; then + usage +fi + +CONFIG_FILE=$1 + +# Clean up any resources that may be in use +cleanup() { + set +o errexit + + # Mop up temporary files + if [ -n "$CONFIG_FILE_TMP" -a -e "$CONFIG_FILE_TMP" ]; then + rm -f $CONFIG_FILE_TMP + fi + + # Kill ourselves to signal any calling process + trap 2; kill -2 $$ +} + +trap cleanup SIGHUP SIGINT SIGTERM + +# Keep track of the current directory +TOOLS_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=`cd $TOOLS_DIR/..; pwd` + +# Abort if localrc is not set +if [ ! -e $TOP_DIR/localrc ]; then + echo "You must have a localrc with ALL necessary passwords and configuration defined before proceeding." + echo "See stack.sh for required passwords." + exit 1 +fi + +# Source params +source ./stackrc + +# Where Openstack code lives +DEST=${DEST:-/opt/stack} + +# Process network configuration vars +GUEST_NETWORK=${GUEST_NETWORK:-1} +GUEST_RECREATE_NET=${GUEST_RECREATE_NET:-yes} + +GUEST_IP=${GUEST_IP:-192.168.$GUEST_NETWORK.50} +GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24} +GUEST_NETMASK=${GUEST_NETMASK:-255.255.255.0} +GUEST_GATEWAY=${GUEST_GATEWAY:-192.168.$GUEST_NETWORK.1} +GUEST_MAC=${GUEST_MAC:-"02:16:3e:07:69:`printf '%02X' $GUEST_NETWORK`"} +GUEST_RAM=${GUEST_RAM:-1524288} +GUEST_CORES=${GUEST_CORES:-1} + +# Use the GUEST_IP unless an explicit IP is set by ``HOST_IP`` +HOST_IP=${HOST_IP:-$GUEST_IP} +# Use the first IP if HOST_IP still is not set +if [ ! -n "$HOST_IP" ]; then + HOST_IP=`LC_ALL=C /sbin/ifconfig | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'` +fi + +RABBIT_HOST=${RABBIT_HOST:-localhost} + +# Glance connection info. Note the port must be specified. +GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$HOST_IP:9292} +set `echo $GLANCE_HOSTPORT | tr ':' ' '` +GLANCE_HOST=$1 +GLANCE_PORT=$2 + +CONFIG_FILE_TMP=$(mktemp $CONFIG_FILE.XXXXXX) +cat >$CONFIG_FILE_TMP < Date: Wed, 2 Nov 2011 21:21:36 -0500 Subject: [PATCH 104/967] Keystone uses api v2.0 --- tools/build_ci_config.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/build_ci_config.sh b/tools/build_ci_config.sh index c6e3cb4c..48f2a829 100755 --- a/tools/build_ci_config.sh +++ b/tools/build_ci_config.sh @@ -91,7 +91,7 @@ image_id = 1 [keystone] service_host = $HOST_IP service_port = 5000 -apiver = v1.1 +apiver = v2.0 user = admin password = $ADMIN_PASSWORD tenant_id = 1 @@ -123,4 +123,4 @@ username = root password = password EOF -mv $CONFIG_FILE_TMP $CONFIG_FILE \ No newline at end of file +mv $CONFIG_FILE_TMP $CONFIG_FILE From b0e57cf38f90a005f057fc88e7ee112cb54bba53 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 4 Nov 2011 12:13:43 -0500 Subject: [PATCH 105/967] Add CI tests --- stackrc | 4 ++++ tools/build_ci_config.sh | 10 ++++++---- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/stackrc b/stackrc index b541711f..ba98f157 100644 --- a/stackrc +++ b/stackrc @@ -39,6 +39,10 @@ OPENSTACKX_BRANCH=diablo QUANTUM_REPO=https://github.com/openstack/quantum QUANTUM_BRANCH=stable/diablo +# CI test suite +CITEST_REPO=https://github.com/openstack/openstack-integration-tests.git +CITEST_BRANCH=master + # Specify a comma-separated list of uec images to download and install into glance. IMAGE_URLS=http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz diff --git a/tools/build_ci_config.sh b/tools/build_ci_config.sh index 48f2a829..aecbd80b 100755 --- a/tools/build_ci_config.sh +++ b/tools/build_ci_config.sh @@ -48,6 +48,8 @@ source ./stackrc # Where Openstack code lives DEST=${DEST:-/opt/stack} +DIST_NAME=${DIST_NAME:-natty} + # Process network configuration vars GUEST_NETWORK=${GUEST_NETWORK:-1} GUEST_RECREATE_NET=${GUEST_RECREATE_NET:-yes} @@ -78,9 +80,9 @@ GLANCE_PORT=$2 CONFIG_FILE_TMP=$(mktemp $CONFIG_FILE.XXXXXX) cat >$CONFIG_FILE_TMP < Date: Fri, 4 Nov 2011 13:22:09 -0500 Subject: [PATCH 106/967] Checkout tests and download image file --- tools/build_libvirt.sh | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/tools/build_libvirt.sh b/tools/build_libvirt.sh index d1928794..f82399da 100755 --- a/tools/build_libvirt.sh +++ b/tools/build_libvirt.sh @@ -31,7 +31,7 @@ cleanup() { trap 2; kill -2 $$ } -trap cleanup SIGHUP SIGINT SIGTERM +trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT # Echo commands set -o xtrace @@ -165,6 +165,7 @@ git_clone $NOVACLIENT_REPO $COPY_DIR/$DEST/python-novaclient $NOVACLIENT_BRANCH git_clone $OPENSTACKX_REPO $COPY_DIR/$DEST/openstackx $OPENSTACKX_BRANCH git_clone $KEYSTONE_REPO $COPY_DIR/$DEST/keystone $KEYSTONE_BRANCH git_clone $NOVNC_REPO $COPY_DIR/$DEST/noVNC $NOVNC_BRANCH +git_clone $CITEST_REPO $COPY_DIR/$DEST/openstack-integration-tests $CITEST_BRANCH # Back to devstack cd $TOP_DIR @@ -409,6 +410,15 @@ umount $ROOTFS/dev chroot $ROOTFS dpkg-reconfigure openssh-server sed -e 's/^PasswordAuthentication.*$/PasswordAuthentication yes/' -i $ROOTFS/etc/ssh/sshd_config +# Pre-load an image for testing +UEC_NAME=$DIST_NAME-server-cloudimg-amd64 +CIVMDIR=${ROOTFS}${DEST}/openstack-integration-tests/include/sample_vm +if [ ! -e $CIVMDIR/$UEC_NAME.tar.gz ]; then + mkdir -p $CIVMDIR + (cd $CIVMDIR && wget -N http://uec-images.ubuntu.com/$DIST_NAME/current/$UEC_NAME.tar.gz; + tar xzf $UEC_NAME.tar.gz;) +fi + # Unmount umount $ROOTFS || echo 'ok' ROOTFS="" From a0e294852fd44fd2a1920319baf9f02a5c8c858a Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 4 Nov 2011 19:44:06 -0500 Subject: [PATCH 107/967] Change glance apiver --- tools/build_ci_config.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/build_ci_config.sh b/tools/build_ci_config.sh index aecbd80b..78ebcd68 100755 --- a/tools/build_ci_config.sh +++ b/tools/build_ci_config.sh @@ -86,9 +86,10 @@ ami_location = $DEST/openstack-integration-tests/include/sample_vm/$DIST_NAME-se [glance] host = $GLANCE_HOST -apiver = v1.0 +apiver = v1 port = $GLANCE_PORT image_id = 1 +tenant_id = 1 [keystone] service_host = $HOST_IP From ff0ed1db2732d88c056fb864b8e10fce20497ac8 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Sat, 5 Nov 2011 16:15:11 -0500 Subject: [PATCH 108/967] Support UPLOAD_LEGACY_TTY --- tools/build_ci_config.sh | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/tools/build_ci_config.sh b/tools/build_ci_config.sh index 78ebcd68..ca77ccf2 100755 --- a/tools/build_ci_config.sh +++ b/tools/build_ci_config.sh @@ -78,12 +78,27 @@ GLANCE_HOST=$1 GLANCE_PORT=$2 CONFIG_FILE_TMP=$(mktemp $CONFIG_FILE.XXXXXX) -cat >$CONFIG_FILE_TMP <$CONFIG_FILE_TMP <$CONFIG_FILE_TMP <>$CONFIG_FILE_TMP < Date: Sat, 5 Nov 2011 16:16:54 -0500 Subject: [PATCH 109/967] Cache UEC image earlier --- tools/build_libvirt.sh | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/tools/build_libvirt.sh b/tools/build_libvirt.sh index f82399da..e2c33c66 100755 --- a/tools/build_libvirt.sh +++ b/tools/build_libvirt.sh @@ -167,6 +167,15 @@ git_clone $KEYSTONE_REPO $COPY_DIR/$DEST/keystone $KEYSTONE_BRANCH git_clone $NOVNC_REPO $COPY_DIR/$DEST/noVNC $NOVNC_BRANCH git_clone $CITEST_REPO $COPY_DIR/$DEST/openstack-integration-tests $CITEST_BRANCH +# Pre-load an image for testing +UEC_NAME=$DIST_NAME-server-cloudimg-amd64 +CIVMDIR=${COPY_DIR}${DEST}/openstack-integration-tests/include/sample_vm +if [ ! -e $CIVMDIR/$UEC_NAME.tar.gz ]; then + mkdir -p $CIVMDIR + (cd $CIVMDIR && wget -N http://uec-images.ubuntu.com/$DIST_NAME/current/$UEC_NAME.tar.gz; + tar xzf $UEC_NAME.tar.gz;) +fi + # Back to devstack cd $TOP_DIR @@ -410,15 +419,6 @@ umount $ROOTFS/dev chroot $ROOTFS dpkg-reconfigure openssh-server sed -e 's/^PasswordAuthentication.*$/PasswordAuthentication yes/' -i $ROOTFS/etc/ssh/sshd_config -# Pre-load an image for testing -UEC_NAME=$DIST_NAME-server-cloudimg-amd64 -CIVMDIR=${ROOTFS}${DEST}/openstack-integration-tests/include/sample_vm -if [ ! -e $CIVMDIR/$UEC_NAME.tar.gz ]; then - mkdir -p $CIVMDIR - (cd $CIVMDIR && wget -N http://uec-images.ubuntu.com/$DIST_NAME/current/$UEC_NAME.tar.gz; - tar xzf $UEC_NAME.tar.gz;) -fi - # Unmount umount $ROOTFS || echo 'ok' ROOTFS="" From f0b41f3fb7c8802cfbd1576e10f268e20d5e1e7b Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Mon, 7 Nov 2011 09:51:15 -0800 Subject: [PATCH 110/967] update for why we sleep --- stack.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 33eb2079..ca8ab411 100755 --- a/stack.sh +++ b/stack.sh @@ -908,7 +908,10 @@ function screen_it { NL=`echo -ne '\015'` if [[ "$ENABLED_SERVICES" =~ "$1" ]]; then screen -S stack -X screen -t $1 - sleep 0.5 + # sleep to allow bash to be ready to be send the command - we are + # creating a new window in screen and then sends characters, so if + # bash isn't running by the time we send the command, nothing happens + sleep 1 screen -S stack -p $1 -X stuff "$2$NL" fi } From 955e5e73f2badc22532d8054bd0965e26510cab8 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Mon, 7 Nov 2011 10:29:05 -0800 Subject: [PATCH 111/967] don't need static uec meta/user data since we generate --- tools/uec/meta-data | 19 ------------------- tools/uec/user-data | 32 -------------------------------- 2 files changed, 51 deletions(-) delete mode 100644 tools/uec/meta-data delete mode 100644 tools/uec/user-data diff --git a/tools/uec/meta-data b/tools/uec/meta-data deleted file mode 100644 index d0681954..00000000 --- a/tools/uec/meta-data +++ /dev/null @@ -1,19 +0,0 @@ -#ami-id: ami-fd4aa494 -#ami-launch-index: '0' -#ami-manifest-path: ubuntu-images-us/ubuntu-lucid-10.04-amd64-server-20100427.1.manifest.xml -#block-device-mapping: {ami: sda1, ephemeral0: sdb, ephemeral1: sdc, root: /dev/sda1} -hostname: smoser-sys -#instance-action: none -instance-id: i-87018aed -instance-type: m1.large -#kernel-id: aki-c8b258a1 -local-hostname: smoser-sys.mosers.us -#local-ipv4: 10.223.26.178 -#placement: {availability-zone: us-east-1d} -#public-hostname: ec2-184-72-174-120.compute-1.amazonaws.com -#public-ipv4: 184.72.174.120 -#public-keys: -# ec2-keypair.us-east-1: [ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCD9dlT00vOUC8Ttq6YH8RzUCVqPQl6HaSfWSTKYnZiVCpTBj1CaRZPLRLmkSB9Nziy4aRJa/LZMbBHXytQKnB1psvNknqC2UNlrXXMk+Vx5S4vg21MXYYimK4uZEY0Qz29QUiTyNsx18jpAaF4ocUpTpRhxPEBCcSCDmMbc27MU2XuTbasM2NjW/w0bBF3ZFhdH68dZICXdTxS2jUrtrCnc1D/QXVZ5kQO3jsmSyJg8E0nE+6Onpx2YRoVRSwjpGzVZ+BlXPnN5xBREBG8XxzhNFHJbek+RgK5TfL+k4yD4XhnVZuZu53cBAFhj+xPKhtisSd+YmaEq+Jt9uS0Ekd5 -# ec2-keypair.us-east-1, ''] -#reservation-id: r-e2225889 -#security-groups: default diff --git a/tools/uec/user-data b/tools/uec/user-data deleted file mode 100644 index f9fa4775..00000000 --- a/tools/uec/user-data +++ /dev/null @@ -1,32 +0,0 @@ -#cloud-config -#apt_update: false -#apt_upgrade: true -#packages: [ bzr, pastebinit, ubuntu-dev-tools, ccache, bzr-builddeb, vim-nox, git-core, lftp ] - -apt_sources: - - source: ppa:smoser/ppa - -disable_root: True - -mounts: - - [ ephemeral0, None ] - - [ swap, None ] - -ssh_import_id: [smoser ] - -sm_misc: - - &user_setup | - set -x; exec > ~/user_setup.log 2>&1 - echo "starting at $(date -R)" - echo "set -o vi" >> ~/.bashrc - cat >> ~/.profile < ~/runcmd.log' ] - - [ sudo, -Hu, ubuntu, sh, -c, 'read up sleep < /proc/uptime; echo $(date): runcmd up at $up | tee -a ~/runcmd.log' ] - - [ sudo, -Hu, ubuntu, sh, -c, *user_setup ] - -password: passw0rd -chpasswd: { expire: False } From e3c47a351e869cd9026bc37879ccf7f9c709e285 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Mon, 7 Nov 2011 10:44:43 -0800 Subject: [PATCH 112/967] parameterize vm size, improve metadata, conditional for ubuntu version --- tools/build_uec.sh | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 3ee1e2fb..6bab526c 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -1,12 +1,9 @@ #!/usr/bin/env bash # Make sure that we have the proper version of ubuntu (only works on natty/oneiric) -UBUNTU_VERSION=`cat /etc/lsb-release | grep CODENAME | sed 's/.*=//g'` -if [ ! "oneiric" = "$UBUNTU_VERSION" ]; then - if [ ! "natty" = "$UBUNTU_VERSION" ]; then - echo "This script only works with oneiric and natty" - exit 1 - fi +if ! egrep -q "oneiric|natty" /etc/lsb-release; then + echo "This script only works with ubuntu oneiric and natty" + exit 1 fi # Keep track of the current directory @@ -21,6 +18,9 @@ source ./stackrc # Ubuntu distro to install DIST_NAME=${DIST_NAME:-oneiric} +# Configure how large the VM should be +GUEST_SIZE=${GUEST_SIZE:-10G} + # exit on error to stop unexpected errors set -o errexit set -o xtrace @@ -33,7 +33,8 @@ if [ ! -e $TOP_DIR/localrc ]; then fi # Install deps if needed -dpkg -l kvm libvirt-bin kpartx || apt-get install -y --force-yes kvm libvirt-bin kpartx +DEPS="kvm libvirt-bin kpartx" +dpkg -l $DEPS || apt-get install -y --force-yes $DEPS # Where to store files and instances WORK_DIR=${WORK_DIR:-/opt/kvmstack} @@ -50,7 +51,7 @@ tarball=$image_dir/$(basename $uec_url) if [ ! -f $tarball ]; then curl $uec_url -o $tarball (cd $image_dir && tar -Sxvzf $tarball) - resize-part-image $image_dir/*.img 10G $image_dir/disk + resize-part-image $image_dir/*.img $GUEST_SIZE $image_dir/disk cp $image_dir/*-vmlinuz-virtual $image_dir/kernel fi @@ -164,8 +165,8 @@ cp -r $TOOLS_DIR/uec $vm_dir/uec # set metadata cat > $vm_dir/uec/meta-data< Date: Mon, 7 Nov 2011 14:06:15 -0500 Subject: [PATCH 113/967] Comment out log_file options in glance configs Comment out log_file options in glance config files to make log output appear in g-api and g-reg screen windows, like the other server daemons... --- files/glance-api.conf | 2 +- files/glance-registry.conf | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/files/glance-api.conf b/files/glance-api.conf index 3499ff75..bb758afb 100644 --- a/files/glance-api.conf +++ b/files/glance-api.conf @@ -24,7 +24,7 @@ registry_port = 9191 # Log to this file. Make sure you do not set the same log # file for both the API and registry servers! -log_file = %DEST%/glance/api.log +#log_file = %DEST%/glance/api.log # Send logs to syslog (/dev/log) instead of to file specified by `log_file` use_syslog = %SYSLOG% diff --git a/files/glance-registry.conf b/files/glance-registry.conf index 351b09fb..1e041860 100644 --- a/files/glance-registry.conf +++ b/files/glance-registry.conf @@ -13,7 +13,7 @@ bind_port = 9191 # Log to this file. Make sure you do not set the same log # file for both the API and registry servers! -log_file = %DEST%/glance/registry.log +#log_file = %DEST%/glance/registry.log # Where to store images filesystem_store_datadir = %DEST%/glance/images From 7c259cea84b474b76b811f63a4f5274214d96fb8 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Mon, 7 Nov 2011 13:18:28 -0600 Subject: [PATCH 114/967] Use eth0 to determine host ip address --- stack.sh | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index 841cbb45..dea75013 100755 --- a/stack.sh +++ b/stack.sh @@ -169,9 +169,13 @@ LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm} # cases unless you are working on multi-zone mode. SCHEDULER=${SCHEDULER:-nova.scheduler.simple.SimpleScheduler} -# Use the first IP unless an explicit is set by ``HOST_IP`` environment variable +# Use the eth0 IP unless an explicit is set by ``HOST_IP`` environment variable if [ ! -n "$HOST_IP" ]; then - HOST_IP=`LC_ALL=C /sbin/ifconfig | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'` + HOST_IP=`LC_ALL=C /sbin/ifconfig eth0 | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'` + if [ "$HOST_IP" = "" ]; + echo "Could not determine host ip address. Please specify HOST_IP in your localrc." + exit 1 + fi fi # Service startup timeout From a3475e53c1eba531e8ff718c33788abfb7c58b77 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Mon, 7 Nov 2011 13:24:00 -0600 Subject: [PATCH 115/967] fix typo --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index dea75013..9bebe840 100755 --- a/stack.sh +++ b/stack.sh @@ -172,7 +172,7 @@ SCHEDULER=${SCHEDULER:-nova.scheduler.simple.SimpleScheduler} # Use the eth0 IP unless an explicit is set by ``HOST_IP`` environment variable if [ ! -n "$HOST_IP" ]; then HOST_IP=`LC_ALL=C /sbin/ifconfig eth0 | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'` - if [ "$HOST_IP" = "" ]; + if [ "$HOST_IP" = "" ]; then echo "Could not determine host ip address. Please specify HOST_IP in your localrc." exit 1 fi From 857035dc6728383579528aa1b203717279d75eaf Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Mon, 7 Nov 2011 14:02:13 -0600 Subject: [PATCH 116/967] improve comment --- stack.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 9bebe840..8d1ec8a0 100755 --- a/stack.sh +++ b/stack.sh @@ -173,7 +173,10 @@ SCHEDULER=${SCHEDULER:-nova.scheduler.simple.SimpleScheduler} if [ ! -n "$HOST_IP" ]; then HOST_IP=`LC_ALL=C /sbin/ifconfig eth0 | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'` if [ "$HOST_IP" = "" ]; then - echo "Could not determine host ip address. Please specify HOST_IP in your localrc." + echo "Could not determine host ip address." + echo "If this is not your first run of stack.sh, it is " + echo "possible that nova moved your eth0 ip address to the FLAT_NETWORK_BRIDGE." + echo "Please specify your HOST_IP in your localrc." exit 1 fi fi From e7fa90934d3d0f1dc0a89fbf6f498e927f041d39 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Mon, 7 Nov 2011 16:10:59 -0600 Subject: [PATCH 117/967] script to warm apts/pips on a base image, to speed up performace of build_ scripts --- tools/warm_apts_and_pips.sh | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 tools/warm_apts_and_pips.sh diff --git a/tools/warm_apts_and_pips.sh b/tools/warm_apts_and_pips.sh new file mode 100644 index 00000000..c0b02b93 --- /dev/null +++ b/tools/warm_apts_and_pips.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash + +# Keep track of the current directory +TOOLS_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=`cd $TOOLS_DIR/..; pwd` + +# cd to top of devstack +cd $TOP_DIR + +# Echo usage +usage() { + echo "Cache OpenStack dependencies on a uec image to speed up performance." + echo "" + echo "Usage: $0 [full path to raw uec base image]" +} + +# Make sure this is a raw image +if ! qemu-img info $1 | grep -q "file format: raw"; then + usage + exit 1 +fi + +# Mount the image +STAGING_DIR=`mktemp -d uec.XXXXXXXXXX` +mkdir -p $STAGING_DIR +mount -t ext4 -o loop $1 $STAGING_DIR + +# Make sure that base requirements are installed +cp /etc/resolv.conf $STAGING_DIR/etc/resolv.conf + +# Perform caching on the base image to speed up subsequent runs +chroot $STAGING_DIR apt-get update +chroot $STAGING_DIR apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1` +chroot $STAGING_DIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1` +chroot $STAGING_DIR pip install `cat files/pips/*` +umount $STAGING_DIR && rm -rf $STAGING_DIR From 069f2f7a534b8ace5e9a6e68143c7951b65d4046 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Mon, 7 Nov 2011 16:13:03 -0600 Subject: [PATCH 118/967] +x --- tools/warm_apts_and_pips.sh | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 tools/warm_apts_and_pips.sh diff --git a/tools/warm_apts_and_pips.sh b/tools/warm_apts_and_pips.sh old mode 100644 new mode 100755 From 8655bf0e6efdd3d87a5ed149be2ee5e3aa8473db Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Mon, 7 Nov 2011 16:37:00 -0600 Subject: [PATCH 119/967] more checks to make sure script is run as intended --- tools/warm_apts_and_pips.sh | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tools/warm_apts_and_pips.sh b/tools/warm_apts_and_pips.sh index c0b02b93..b20519fa 100755 --- a/tools/warm_apts_and_pips.sh +++ b/tools/warm_apts_and_pips.sh @@ -1,5 +1,11 @@ #!/usr/bin/env bash +# echo commands +set -o xtrace + +# exit on error to stop unexpected errors +set -o errexit + # Keep track of the current directory TOOLS_DIR=$(cd $(dirname "$0") && pwd) TOP_DIR=`cd $TOOLS_DIR/..; pwd` @@ -20,6 +26,12 @@ if ! qemu-img info $1 | grep -q "file format: raw"; then exit 1 fi +# Make sure we are in the correct dir +if [ ! -d files/apts ]; then + echo "Please run this script from devstack/tools/" + exit 1 +fi + # Mount the image STAGING_DIR=`mktemp -d uec.XXXXXXXXXX` mkdir -p $STAGING_DIR From 208ae2f6aa0bf5dbc669c6fd4f2e4649c04ed039 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Mon, 7 Nov 2011 16:38:03 -0600 Subject: [PATCH 120/967] fix some comments --- tools/warm_apts_and_pips.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/warm_apts_and_pips.sh b/tools/warm_apts_and_pips.sh index b20519fa..10bd4af1 100755 --- a/tools/warm_apts_and_pips.sh +++ b/tools/warm_apts_and_pips.sh @@ -1,16 +1,16 @@ #!/usr/bin/env bash -# echo commands +# Echo commands set -o xtrace -# exit on error to stop unexpected errors +# Exit on error to stop unexpected errors set -o errexit # Keep track of the current directory TOOLS_DIR=$(cd $(dirname "$0") && pwd) TOP_DIR=`cd $TOOLS_DIR/..; pwd` -# cd to top of devstack +# Change dir to top of devstack cd $TOP_DIR # Echo usage From 39c2efcd235b88d214bf65c93268ecd05df8f5da Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 7 Nov 2011 16:40:35 -0600 Subject: [PATCH 121/967] Change default build to oneiric --- tools/build_ci_config.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/build_ci_config.sh b/tools/build_ci_config.sh index ca77ccf2..91124712 100755 --- a/tools/build_ci_config.sh +++ b/tools/build_ci_config.sh @@ -48,7 +48,7 @@ source ./stackrc # Where Openstack code lives DEST=${DEST:-/opt/stack} -DIST_NAME=${DIST_NAME:-natty} +DIST_NAME=${DIST_NAME:-oneiric} # Process network configuration vars GUEST_NETWORK=${GUEST_NETWORK:-1} From 8e99829ae33aaec89dac141ed9102211a6323d87 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 7 Nov 2011 16:41:47 -0600 Subject: [PATCH 122/967] Only download UEC image if UPLOAD_LEGACY_TTY is unset; bind mount /dev for oneiric openssl --- tools/build_libvirt.sh | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/tools/build_libvirt.sh b/tools/build_libvirt.sh index e2c33c66..585f5afe 100755 --- a/tools/build_libvirt.sh +++ b/tools/build_libvirt.sh @@ -17,6 +17,11 @@ cleanup() { set +o errexit unmount_images + if [ -n "$COPY_DIR" ]; then + umount $COPY_DIR/dev + umount $COPY_DIR + fi + if [ -n "$ROOTFS" ]; then umount $ROOTFS/dev umount $ROOTFS @@ -31,7 +36,7 @@ cleanup() { trap 2; kill -2 $$ } -trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT +trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT # Echo commands set -o xtrace @@ -127,6 +132,7 @@ DEST=${DEST:-/opt/stack} # Mount the file system # For some reason, UEC-based images want 255 heads * 63 sectors * 512 byte sectors = 8225280 mount -t ext4 -o loop,offset=8225280 $VM_IMAGE $COPY_DIR +mount -o bind /dev $COPY_DIR/dev # git clone only if directory doesn't exist already. Since ``DEST`` might not # be owned by the installation user, we create the directory and change the @@ -149,6 +155,8 @@ chroot $COPY_DIR apt-get install -y --download-only `cat files/apts/* | grep NOP chroot $COPY_DIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1` chroot $COPY_DIR pip install `cat files/pips/*` +umount $COPY_DIR/dev + # Clean out code repos if directed to do so if [ "$CLEAN" = "1" ]; then rm -rf $COPY_DIR/$DEST @@ -167,13 +175,15 @@ git_clone $KEYSTONE_REPO $COPY_DIR/$DEST/keystone $KEYSTONE_BRANCH git_clone $NOVNC_REPO $COPY_DIR/$DEST/noVNC $NOVNC_BRANCH git_clone $CITEST_REPO $COPY_DIR/$DEST/openstack-integration-tests $CITEST_BRANCH -# Pre-load an image for testing -UEC_NAME=$DIST_NAME-server-cloudimg-amd64 -CIVMDIR=${COPY_DIR}${DEST}/openstack-integration-tests/include/sample_vm -if [ ! -e $CIVMDIR/$UEC_NAME.tar.gz ]; then - mkdir -p $CIVMDIR - (cd $CIVMDIR && wget -N http://uec-images.ubuntu.com/$DIST_NAME/current/$UEC_NAME.tar.gz; - tar xzf $UEC_NAME.tar.gz;) +if [ -z "$UPLOAD_LEGACY_TTY" =; then + # Pre-load an image for testing + UEC_NAME=$DIST_NAME-server-cloudimg-amd64 + CIVMDIR=${COPY_DIR}${DEST}/openstack-integration-tests/include/sample_vm + if [ ! -e $CIVMDIR/$UEC_NAME.tar.gz ]; then + mkdir -p $CIVMDIR + (cd $CIVMDIR && wget -N http://uec-images.ubuntu.com/$DIST_NAME/current/$UEC_NAME.tar.gz; + tar xzf $UEC_NAME.tar.gz;) + fi fi # Back to devstack @@ -413,18 +423,20 @@ echo 'GRUB_DISABLE_OS_PROBER=true' >>$ROOTFS/etc/default/grub echo "GRUB_DEVICE_UUID=$G_DEV_UUID" >>$ROOTFS/etc/default/grub chroot $ROOTFS update-grub -umount $ROOTFS/dev # Pre-generate ssh host keys and allow password login chroot $ROOTFS dpkg-reconfigure openssh-server sed -e 's/^PasswordAuthentication.*$/PasswordAuthentication yes/' -i $ROOTFS/etc/ssh/sshd_config # Unmount +umount $ROOTFS/dev umount $ROOTFS || echo 'ok' ROOTFS="" qemu-nbd -d $NBD NBD="" +trap - SIGHUP SIGINT SIGTERM SIGQUIT EXIT + # Create the instance cd $VM_DIR && virsh create libvirt.xml From af95a4775ad01364b53a4fd6d2bcfda95dfcfa70 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 7 Nov 2011 17:01:05 -0600 Subject: [PATCH 123/967] Install pika for CI tests --- files/pips/openstack-integration-tests | 1 + 1 file changed, 1 insertion(+) create mode 100644 files/pips/openstack-integration-tests diff --git a/files/pips/openstack-integration-tests b/files/pips/openstack-integration-tests new file mode 100644 index 00000000..df7f4230 --- /dev/null +++ b/files/pips/openstack-integration-tests @@ -0,0 +1 @@ +pika From 43acae4aa51ab66d522d83568a7e9271be7e65f7 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Tue, 8 Nov 2011 14:23:56 -0800 Subject: [PATCH 124/967] add initial build_jenkins script --- tools/jenkins/build_jenkins.sh | 69 ++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100755 tools/jenkins/build_jenkins.sh diff --git a/tools/jenkins/build_jenkins.sh b/tools/jenkins/build_jenkins.sh new file mode 100755 index 00000000..a20872dc --- /dev/null +++ b/tools/jenkins/build_jenkins.sh @@ -0,0 +1,69 @@ +#!/bin/bash + +# Echo commands, exit on error +set -o xtrace +set -o errexit + +# Make sure only root can run our script +if [[ $EUID -ne 0 ]]; then + echo "This script must be run as root" + exit 1 +fi + +# This directory +CUR_DIR=$(cd $(dirname "$0") && pwd) + +# Install software +DEPS="jenkins" +apt-get install -y --force-yes $DEPS + +# Install jenkins +if [ ! -e /var/lib/jenkins ]; then + echo "Jenkins installation failed" + exit 1 +fi + +# Setup sudo +JENKINS_SUDO=/etc/sudoers.d/jenkins +cat > $JENKINS_SUDO < $JENKINS_GITCONF < + + 4 + Jenkins + jenkins@rcb.me + +EOF + +# Set ownership to jenkins +chown -R jenkins $CUR_DIR + +# Set up jobs symlink +if [ ! -h /var/lib/jenkins/jobs ]; then + echo "Installing jobs symlink" + if [ -d /var/lib/jenkins/jobs ]; then + mv /var/lib/jenkins/jobs /var/lib/jenkins/jobs.old + fi + ln -s $CUR_DIR/jobs /var/lib/jenkins/jobs +fi + +# List of plugins +PLUGINS=http://hudson-ci.org/downloads/plugins/build-timeout/1.6/build-timeout.hpi,http://mirrors.jenkins-ci.org/plugins/git/1.1.12/git.hpi,http://hudson-ci.org/downloads/plugins/global-build-stats/1.2/global-build-stats.hpi,http://hudson-ci.org/downloads/plugins/greenballs/1.10/greenballs.hpi,http://download.hudson-labs.org/plugins/console-column-plugin/1.0/console-column-plugin.hpi + +# Configure plugins +for plugin in ${PLUGINS//,/ }; do + name=`basename $plugin` + dest=/var/lib/jenkins/plugins/$name + if [ ! -e $dest ]; then + curl -L $plugin -o $dest + fi +done + +# Restart jenkins +restart jenkins From ccd4ea37b80e9e6018ccf294d680a5d8e34100b5 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Tue, 8 Nov 2011 17:25:12 -0600 Subject: [PATCH 125/967] more work on jenkins stuff --- tools/jenkins/README.md | 17 +++++++++ tools/jenkins/adapters/euca.sh | 3 ++ tools/jenkins/adapters/floating.sh | 3 ++ tools/jenkins/build_configuration.sh | 19 ++++++++++ tools/jenkins/configurations/kvm.sh | 45 +++++++++++++++++++++++ tools/jenkins/{ => home}/build_jenkins.sh | 0 tools/jenkins/run_test.sh | 19 ++++++++++ 7 files changed, 106 insertions(+) create mode 100644 tools/jenkins/README.md create mode 100755 tools/jenkins/adapters/euca.sh create mode 100755 tools/jenkins/adapters/floating.sh create mode 100755 tools/jenkins/build_configuration.sh create mode 100755 tools/jenkins/configurations/kvm.sh rename tools/jenkins/{ => home}/build_jenkins.sh (100%) create mode 100755 tools/jenkins/run_test.sh diff --git a/tools/jenkins/README.md b/tools/jenkins/README.md new file mode 100644 index 00000000..74237f88 --- /dev/null +++ b/tools/jenkins/README.md @@ -0,0 +1,17 @@ +Getting Started With Jenkins and Devstack +========================================= +This little corner of devstack is to show how to get an Openstack jenkins +environment up and running quickly, using the rcb configuration methodology. + + +To manually set up a testing environment +---------------------------------------- + ./build_configuration.sh [EXECUTOR_NUMBER] [CONFIGURATION] + +For now, use "./build_configuration.sh $EXECUTOR_NUMBER kvm" + +To manually run a test +---------------------- + ./run_test.sh [EXECUTOR_NUMBER] [ADAPTER] + +For now, use "./run_test.sh $EXECUTOR_NUMBER [euca|floating]" diff --git a/tools/jenkins/adapters/euca.sh b/tools/jenkins/adapters/euca.sh new file mode 100755 index 00000000..3cd97101 --- /dev/null +++ b/tools/jenkins/adapters/euca.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +echo "$0 SUCCESS!" diff --git a/tools/jenkins/adapters/floating.sh b/tools/jenkins/adapters/floating.sh new file mode 100755 index 00000000..3cd97101 --- /dev/null +++ b/tools/jenkins/adapters/floating.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +echo "$0 SUCCESS!" diff --git a/tools/jenkins/build_configuration.sh b/tools/jenkins/build_configuration.sh new file mode 100755 index 00000000..70babb62 --- /dev/null +++ b/tools/jenkins/build_configuration.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +EXECUTOR_NUMBER=$1 +CONFIGURATION=$2 + +function usage() { + echo "Usage: $0 - Build a configuration" + echo "" + echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION]" + exit 1 +} + +# Validate inputs +if [[ "$EXECUTOR_NUMBER" = "" || "$CONFIGURATION" = "" ]]; then + usage +fi + +# Execute configuration script +cd configurations && ./$CONFIGURATION.sh $EXECUTOR_NUMBER $CONFIGURATION diff --git a/tools/jenkins/configurations/kvm.sh b/tools/jenkins/configurations/kvm.sh new file mode 100755 index 00000000..af51d55f --- /dev/null +++ b/tools/jenkins/configurations/kvm.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +EXECUTOR_NUMBER=$1 +CONFIGURATION=$2 + +function usage() { + echo "Usage: $0 - Build a test configuration" + echo "" + echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION]" + exit 1 +} + +# Validate inputs +if [[ "$EXECUTOR_NUMBER" = "" || "$CONFIGURATION" = "" ]]; then + usage +fi + +# This directory +CUR_DIR=$(cd $(dirname "$0") && pwd) + +# devstack directory +cd ../../.. +TOP_DIR=(pwd) + +# Name test instance based on executor +NAME=executor-$EXECUTOR_NUMBER +virsh destroy `virsh list | grep $NAME | cut -d " " -f1` || true + +# Configure localrc +cat <localrc +RECLONE=yes +GUEST_NETWORK=$EXECUTOR_NUMBER +GUEST_NAME=$NAME +FLOATING_RANGE=192.168.$EXECUTOR_NUMBER.128/27 +GUEST_CORES=4 +GUEST_RAM=10000000 +MYSQL_PASSWORD=chicken +RABBIT_PASSWORD=chicken +SERVICE_TOKEN=chicken +ADMIN_PASSWORD=chicken +USERNAME=admin +TENANT=admin +EOF +cd tools +sudo ./build_uec.sh diff --git a/tools/jenkins/build_jenkins.sh b/tools/jenkins/home/build_jenkins.sh similarity index 100% rename from tools/jenkins/build_jenkins.sh rename to tools/jenkins/home/build_jenkins.sh diff --git a/tools/jenkins/run_test.sh b/tools/jenkins/run_test.sh new file mode 100755 index 00000000..cf09cd55 --- /dev/null +++ b/tools/jenkins/run_test.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +EXECUTOR_NUMBER=$1 +ADAPTER=$2 + +function usage() { + echo "Usage: $0 - Run a test" + echo "" + echo "$0 [EXECUTOR_NUMBER] [ADAPTER]" + exit 1 +} + +# Validate inputs +if [[ "$EXECUTOR_NUMBER" = "" || "$ADAPTER" = "" ]]; then + usage +fi + +# Execute configuration script +cd adapters && ./$ADAPTER.sh $EXECUTOR_NUMBER $ADAPTER From 4aab9030f4c251382d2e9b8abd4da2276642d97d Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Tue, 8 Nov 2011 16:20:14 -0800 Subject: [PATCH 126/967] rename --- tools/jenkins/adapters/{floating.sh => floating_ips.sh} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tools/jenkins/adapters/{floating.sh => floating_ips.sh} (100%) diff --git a/tools/jenkins/adapters/floating.sh b/tools/jenkins/adapters/floating_ips.sh similarity index 100% rename from tools/jenkins/adapters/floating.sh rename to tools/jenkins/adapters/floating_ips.sh From 2b2c86ef34b335695198b49ca6450d8e78f7fe7a Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Tue, 8 Nov 2011 16:21:35 -0800 Subject: [PATCH 127/967] add in kvm job --- tools/jenkins/home/.gitignore | 3 + tools/jenkins/home/jobs/kvm/config.xml | 72 +++++++++++++++++++ .../axis-ADAPTER/euca/config.xml | 15 ++++ .../axis-ADAPTER/floatingips/config.xml | 15 ++++ tools/jenkins/home/jobs/kvm/nextBuildNumber | 1 + 5 files changed, 106 insertions(+) create mode 100644 tools/jenkins/home/.gitignore create mode 100644 tools/jenkins/home/jobs/kvm/config.xml create mode 100644 tools/jenkins/home/jobs/kvm/configurations/axis-ADAPTER/euca/config.xml create mode 100644 tools/jenkins/home/jobs/kvm/configurations/axis-ADAPTER/floatingips/config.xml create mode 100644 tools/jenkins/home/jobs/kvm/nextBuildNumber diff --git a/tools/jenkins/home/.gitignore b/tools/jenkins/home/.gitignore new file mode 100644 index 00000000..d831d01c --- /dev/null +++ b/tools/jenkins/home/.gitignore @@ -0,0 +1,3 @@ +builds +workspace +*.sw* diff --git a/tools/jenkins/home/jobs/kvm/config.xml b/tools/jenkins/home/jobs/kvm/config.xml new file mode 100644 index 00000000..ccf0541b --- /dev/null +++ b/tools/jenkins/home/jobs/kvm/config.xml @@ -0,0 +1,72 @@ + + + + + false + + + 2 + + + origin + +refs/heads/*:refs/remotes/origin/* + git://github.com/cloudbuilders/devstack.git + + + + + jenkins + + + false + false + false + false + false + false + false + + Default + + + + + + + false + + + true + false + false + false + + false + + + ADAPTER + + euca + floatingips + + + + + + sed -i 's/) 2>&1 | tee "${LOGFILE}"/)/' stack.sh + + + set -o errexit +cd tools/jenkins +sudo ./build_configuration.sh $EXECUTOR_NUMBER kvm + + + set -o errexit +cd tools/jenkins +sudo ./run_test.sh $EXECUTOR_NUMBER $ADAPTER + + + + + false + \ No newline at end of file diff --git a/tools/jenkins/home/jobs/kvm/configurations/axis-ADAPTER/euca/config.xml b/tools/jenkins/home/jobs/kvm/configurations/axis-ADAPTER/euca/config.xml new file mode 100644 index 00000000..0be70a5c --- /dev/null +++ b/tools/jenkins/home/jobs/kvm/configurations/axis-ADAPTER/euca/config.xml @@ -0,0 +1,15 @@ + + + false + + + false + false + false + false + + false + + + + \ No newline at end of file diff --git a/tools/jenkins/home/jobs/kvm/configurations/axis-ADAPTER/floatingips/config.xml b/tools/jenkins/home/jobs/kvm/configurations/axis-ADAPTER/floatingips/config.xml new file mode 100644 index 00000000..0be70a5c --- /dev/null +++ b/tools/jenkins/home/jobs/kvm/configurations/axis-ADAPTER/floatingips/config.xml @@ -0,0 +1,15 @@ + + + false + + + false + false + false + false + + false + + + + \ No newline at end of file diff --git a/tools/jenkins/home/jobs/kvm/nextBuildNumber b/tools/jenkins/home/jobs/kvm/nextBuildNumber new file mode 100644 index 00000000..d00491fd --- /dev/null +++ b/tools/jenkins/home/jobs/kvm/nextBuildNumber @@ -0,0 +1 @@ +1 From 7e436c212ea2ac7fe60bddedf49548729675111e Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 9 Nov 2011 00:12:00 -0800 Subject: [PATCH 128/967] use dhcp_release --- files/apts/nova | 1 + stack.sh | 1 + 2 files changed, 2 insertions(+) diff --git a/files/apts/nova b/files/apts/nova index 405d53ba..9eefed77 100644 --- a/files/apts/nova +++ b/files/apts/nova @@ -1,4 +1,5 @@ dnsmasq-base +dnsmasq-utils # for dhcp_release kpartx parted arping # used for send_arp_for_ha option in nova-network diff --git a/stack.sh b/stack.sh index 841cbb45..78851b96 100755 --- a/stack.sh +++ b/stack.sh @@ -826,6 +826,7 @@ add_nova_flag "--ec2_dmz_host=$EC2_DMZ_HOST" add_nova_flag "--rabbit_host=$RABBIT_HOST" add_nova_flag "--rabbit_password=$RABBIT_PASSWORD" add_nova_flag "--glance_api_servers=$GLANCE_HOSTPORT" +add_nova_flag "--force_dhcp_release" if [ -n "$INSTANCES_PATH" ]; then add_nova_flag "--instances_path=$INSTANCES_PATH" fi From 79a20a01df3f698530e868173e6791196b428ff7 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 9 Nov 2011 11:00:26 -0800 Subject: [PATCH 129/967] rename jenkins home --- tools/jenkins/{home => jenkins_home}/.gitignore | 0 tools/jenkins/{home => jenkins_home}/build_jenkins.sh | 0 tools/jenkins/{home => jenkins_home}/jobs/kvm/config.xml | 0 .../jobs/kvm/configurations/axis-ADAPTER/euca/config.xml | 0 .../jobs/kvm/configurations/axis-ADAPTER/floatingips/config.xml | 0 tools/jenkins/{home => jenkins_home}/jobs/kvm/nextBuildNumber | 0 6 files changed, 0 insertions(+), 0 deletions(-) rename tools/jenkins/{home => jenkins_home}/.gitignore (100%) rename tools/jenkins/{home => jenkins_home}/build_jenkins.sh (100%) rename tools/jenkins/{home => jenkins_home}/jobs/kvm/config.xml (100%) rename tools/jenkins/{home => jenkins_home}/jobs/kvm/configurations/axis-ADAPTER/euca/config.xml (100%) rename tools/jenkins/{home => jenkins_home}/jobs/kvm/configurations/axis-ADAPTER/floatingips/config.xml (100%) rename tools/jenkins/{home => jenkins_home}/jobs/kvm/nextBuildNumber (100%) diff --git a/tools/jenkins/home/.gitignore b/tools/jenkins/jenkins_home/.gitignore similarity index 100% rename from tools/jenkins/home/.gitignore rename to tools/jenkins/jenkins_home/.gitignore diff --git a/tools/jenkins/home/build_jenkins.sh b/tools/jenkins/jenkins_home/build_jenkins.sh similarity index 100% rename from tools/jenkins/home/build_jenkins.sh rename to tools/jenkins/jenkins_home/build_jenkins.sh diff --git a/tools/jenkins/home/jobs/kvm/config.xml b/tools/jenkins/jenkins_home/jobs/kvm/config.xml similarity index 100% rename from tools/jenkins/home/jobs/kvm/config.xml rename to tools/jenkins/jenkins_home/jobs/kvm/config.xml diff --git a/tools/jenkins/home/jobs/kvm/configurations/axis-ADAPTER/euca/config.xml b/tools/jenkins/jenkins_home/jobs/kvm/configurations/axis-ADAPTER/euca/config.xml similarity index 100% rename from tools/jenkins/home/jobs/kvm/configurations/axis-ADAPTER/euca/config.xml rename to tools/jenkins/jenkins_home/jobs/kvm/configurations/axis-ADAPTER/euca/config.xml diff --git a/tools/jenkins/home/jobs/kvm/configurations/axis-ADAPTER/floatingips/config.xml b/tools/jenkins/jenkins_home/jobs/kvm/configurations/axis-ADAPTER/floatingips/config.xml similarity index 100% rename from tools/jenkins/home/jobs/kvm/configurations/axis-ADAPTER/floatingips/config.xml rename to tools/jenkins/jenkins_home/jobs/kvm/configurations/axis-ADAPTER/floatingips/config.xml diff --git a/tools/jenkins/home/jobs/kvm/nextBuildNumber b/tools/jenkins/jenkins_home/jobs/kvm/nextBuildNumber similarity index 100% rename from tools/jenkins/home/jobs/kvm/nextBuildNumber rename to tools/jenkins/jenkins_home/jobs/kvm/nextBuildNumber From 316117a5a2c4cd5af5487019d5dcc554c1ab7fb7 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 9 Nov 2011 11:10:26 -0800 Subject: [PATCH 130/967] more usability feedback --- tools/jenkins/jenkins_home/build_jenkins.sh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools/jenkins/jenkins_home/build_jenkins.sh b/tools/jenkins/jenkins_home/build_jenkins.sh index a20872dc..e71e9e0e 100755 --- a/tools/jenkins/jenkins_home/build_jenkins.sh +++ b/tools/jenkins/jenkins_home/build_jenkins.sh @@ -44,6 +44,14 @@ EOF # Set ownership to jenkins chown -R jenkins $CUR_DIR +# Make sure this directory is accessible to jenkins +if ! su -c "ls $CUR_DIR" jenkins; then + echo "Your devstack directory is not accessible by jenkins." + echo "There is a decent chance you are trying to run this from a directory in /root." + echo "If so, try moving devstack elsewhere (eg. /opt/devstack)." + exit 1 +fi + # Set up jobs symlink if [ ! -h /var/lib/jenkins/jobs ]; then echo "Installing jobs symlink" From 48336d0e9f04d49879d669c30fc0b4255dd63387 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 9 Nov 2011 11:12:14 -0800 Subject: [PATCH 131/967] always update the jobs link --- tools/jenkins/jenkins_home/build_jenkins.sh | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tools/jenkins/jenkins_home/build_jenkins.sh b/tools/jenkins/jenkins_home/build_jenkins.sh index e71e9e0e..fba2500b 100755 --- a/tools/jenkins/jenkins_home/build_jenkins.sh +++ b/tools/jenkins/jenkins_home/build_jenkins.sh @@ -52,15 +52,18 @@ if ! su -c "ls $CUR_DIR" jenkins; then exit 1 fi -# Set up jobs symlink +# Move aside old jobs, if present if [ ! -h /var/lib/jenkins/jobs ]; then echo "Installing jobs symlink" if [ -d /var/lib/jenkins/jobs ]; then mv /var/lib/jenkins/jobs /var/lib/jenkins/jobs.old fi - ln -s $CUR_DIR/jobs /var/lib/jenkins/jobs fi +# Set up jobs symlink +rm -f /var/lib/jenkins/jobs +ln -s $CUR_DIR/jobs /var/lib/jenkins/jobs + # List of plugins PLUGINS=http://hudson-ci.org/downloads/plugins/build-timeout/1.6/build-timeout.hpi,http://mirrors.jenkins-ci.org/plugins/git/1.1.12/git.hpi,http://hudson-ci.org/downloads/plugins/global-build-stats/1.2/global-build-stats.hpi,http://hudson-ci.org/downloads/plugins/greenballs/1.10/greenballs.hpi,http://download.hudson-labs.org/plugins/console-column-plugin/1.0/console-column-plugin.hpi From e28f77565d0dd214db5fa01bdea41c88e52dbafc Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 9 Nov 2011 11:48:09 -0800 Subject: [PATCH 132/967] install cloud-utils, so that we have resize-part-image --- tools/build_uec.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 6bab526c..a15a18ad 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash -# Make sure that we have the proper version of ubuntu (only works on natty/oneiric) -if ! egrep -q "oneiric|natty" /etc/lsb-release; then +# Make sure that we have the proper version of ubuntu (only works on oneiric) +if ! egrep -q "oneiric" /etc/lsb-release; then echo "This script only works with ubuntu oneiric and natty" exit 1 fi @@ -33,7 +33,7 @@ if [ ! -e $TOP_DIR/localrc ]; then fi # Install deps if needed -DEPS="kvm libvirt-bin kpartx" +DEPS="kvm libvirt-bin kpartx cloud-utils" dpkg -l $DEPS || apt-get install -y --force-yes $DEPS # Where to store files and instances From c18af14af886b2bf1d93c1595e41e734e5904006 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 9 Nov 2011 12:20:37 -0800 Subject: [PATCH 133/967] progress on out-of-box testing --- tools/jenkins/configurations/kvm.sh | 2 +- tools/jenkins/jenkins_home/build_jenkins.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/jenkins/configurations/kvm.sh b/tools/jenkins/configurations/kvm.sh index af51d55f..0e239d2f 100755 --- a/tools/jenkins/configurations/kvm.sh +++ b/tools/jenkins/configurations/kvm.sh @@ -33,7 +33,7 @@ GUEST_NETWORK=$EXECUTOR_NUMBER GUEST_NAME=$NAME FLOATING_RANGE=192.168.$EXECUTOR_NUMBER.128/27 GUEST_CORES=4 -GUEST_RAM=10000000 +GUEST_RAM=1000000 MYSQL_PASSWORD=chicken RABBIT_PASSWORD=chicken SERVICE_TOKEN=chicken diff --git a/tools/jenkins/jenkins_home/build_jenkins.sh b/tools/jenkins/jenkins_home/build_jenkins.sh index fba2500b..bd17f1c8 100755 --- a/tools/jenkins/jenkins_home/build_jenkins.sh +++ b/tools/jenkins/jenkins_home/build_jenkins.sh @@ -14,7 +14,7 @@ fi CUR_DIR=$(cd $(dirname "$0") && pwd) # Install software -DEPS="jenkins" +DEPS="jenkins cloud-utils" apt-get install -y --force-yes $DEPS # Install jenkins From 5605ec11446d70e466f4ff0a4a50292b8c3f633f Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Wed, 9 Nov 2011 12:40:01 -0800 Subject: [PATCH 134/967] now that we are using apt-get download this file doesn't speed things up that much --- files/apts/preseed | 18 ------------------ 1 file changed, 18 deletions(-) delete mode 100644 files/apts/preseed diff --git a/files/apts/preseed b/files/apts/preseed deleted file mode 100644 index 8712d5d2..00000000 --- a/files/apts/preseed +++ /dev/null @@ -1,18 +0,0 @@ -# a collection of packages that speed up installation as they are dependencies -# of packages we can't install during bootstraping (rabbitmq-server, -# mysql-server, libvirt-bin) -# -# NOTE: only add packages to this file that aren't needed directly -mysql-common -mysql-client-5.1 -erlang-base -erlang-ssl -erlang-nox -erlang-inets -erlang-mnesia -libhtml-template-perl -gettext-base -libavahi-client3 -libxml2-utils -libpciaccess0 -libparted0debian1 From 593e9aa87a2f81b4e1ff03ca8b8ee1789164890b Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 9 Nov 2011 12:42:08 -0800 Subject: [PATCH 135/967] fix message - no natty support --- tools/build_uec.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index a15a18ad..d57cb29e 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -2,7 +2,7 @@ # Make sure that we have the proper version of ubuntu (only works on oneiric) if ! egrep -q "oneiric" /etc/lsb-release; then - echo "This script only works with ubuntu oneiric and natty" + echo "This script only works with ubuntu oneiric." exit 1 fi From ec67f1e0644c74a6c6beb7bb42b56f8ce68ab24c Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 9 Nov 2011 17:46:28 -0800 Subject: [PATCH 136/967] more progress on jenkins on clean build --- tools/jenkins/jenkins_home/build_jenkins.sh | 7 +++++++ tools/jenkins/jenkins_home/jobs/kvm/config.xml | 4 ++-- tools/jenkins/jenkins_home/jobs/kvm/nextBuildNumber | 1 - 3 files changed, 9 insertions(+), 3 deletions(-) delete mode 100644 tools/jenkins/jenkins_home/jobs/kvm/nextBuildNumber diff --git a/tools/jenkins/jenkins_home/build_jenkins.sh b/tools/jenkins/jenkins_home/build_jenkins.sh index bd17f1c8..bbba8b07 100755 --- a/tools/jenkins/jenkins_home/build_jenkins.sh +++ b/tools/jenkins/jenkins_home/build_jenkins.sh @@ -41,6 +41,13 @@ cat > $JENKINS_GITCONF < EOF +# Add build numbers +for job in ${`ls jobs`// / }; do + if [ ! -e jobs/$job/nextBuildNumber ]; then + echo 1 > jobs/$job/nextBuildNumber + fi +done + # Set ownership to jenkins chown -R jenkins $CUR_DIR diff --git a/tools/jenkins/jenkins_home/jobs/kvm/config.xml b/tools/jenkins/jenkins_home/jobs/kvm/config.xml index ccf0541b..453044ba 100644 --- a/tools/jenkins/jenkins_home/jobs/kvm/config.xml +++ b/tools/jenkins/jenkins_home/jobs/kvm/config.xml @@ -47,7 +47,7 @@ ADAPTER euca - floatingips + floating_ips @@ -69,4 +69,4 @@ sudo ./run_test.sh $EXECUTOR_NUMBER $ADAPTER false - \ No newline at end of file + diff --git a/tools/jenkins/jenkins_home/jobs/kvm/nextBuildNumber b/tools/jenkins/jenkins_home/jobs/kvm/nextBuildNumber deleted file mode 100644 index d00491fd..00000000 --- a/tools/jenkins/jenkins_home/jobs/kvm/nextBuildNumber +++ /dev/null @@ -1 +0,0 @@ -1 From 4c012b207c3ee5318c1c1d8d798bbaa6b7ba5287 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 9 Nov 2011 18:49:47 -0800 Subject: [PATCH 137/967] remove newline --- tools/jenkins/jenkins_home/jobs/kvm/config.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/jenkins/jenkins_home/jobs/kvm/config.xml b/tools/jenkins/jenkins_home/jobs/kvm/config.xml index 453044ba..02efbc9a 100644 --- a/tools/jenkins/jenkins_home/jobs/kvm/config.xml +++ b/tools/jenkins/jenkins_home/jobs/kvm/config.xml @@ -69,4 +69,4 @@ sudo ./run_test.sh $EXECUTOR_NUMBER $ADAPTER false - + \ No newline at end of file From c1024d8987a9b35ef41f5a5615f9827d4b4e4468 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 9 Nov 2011 18:58:07 -0800 Subject: [PATCH 138/967] tweaks to warm script, add script to configure stack user --- tools/setup_stack_user.sh | 66 +++++++++++++++++++++++++++++++++++++ tools/warm_apts_and_pips.sh | 11 ++++--- 2 files changed, 73 insertions(+), 4 deletions(-) create mode 100755 tools/setup_stack_user.sh diff --git a/tools/setup_stack_user.sh b/tools/setup_stack_user.sh new file mode 100755 index 00000000..85d418ed --- /dev/null +++ b/tools/setup_stack_user.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash + +# Echo commands +set -o xtrace + +# Exit on error to stop unexpected errors +set -o errexit + +# Keep track of the current directory +TOOLS_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=`cd $TOOLS_DIR/..; pwd` + +# Change dir to top of devstack +cd $TOP_DIR + +# Echo usage +usage() { + echo "Add stack user and keys" + echo "" + echo "Usage: $0 [full path to raw uec base image]" +} + +# Make sure this is a raw image +if ! qemu-img info $1 | grep -q "file format: raw"; then + usage + exit 1 +fi + +# Mount the image +DEST=/opt/stack +STAGING_DIR=/tmp/`echo $1 | sed "s/\//_/g"`.stage.user +mkdir -p $STAGING_DIR +umount $STAGING_DIR || true +sleep 1 +mount -t ext4 -o loop $1 $STAGING_DIR +mkdir -p $STAGING_DIR/$DEST + +# Create a stack user that is a member of the libvirtd group so that stack +# is able to interact with libvirt. +chroot $STAGING_DIR groupadd libvirtd || true +chroot $STAGING_DIR useradd stack -s /bin/bash -d $DEST -G libvirtd || true + +# a simple password - pass +echo stack:pass | chroot $STAGING_DIR chpasswd + +# and has sudo ability (in the future this should be limited to only what +# stack requires) +echo "stack ALL=(ALL) NOPASSWD: ALL" >> $STAGING_DIR/etc/sudoers + +# Gracefully cp only if source file/dir exists +function cp_it { + if [ -e $1 ] || [ -d $1 ]; then + cp -pRL $1 $2 + fi +} + +# Copy over your ssh keys and env if desired +cp_it ~/.ssh $STAGING_DIR/$DEST/.ssh +cp_it ~/.ssh/id_rsa.pub $STAGING_DIR/$DEST/.ssh/authorized_keys +cp_it ~/.gitconfig $STAGING_DIR/$DEST/.gitconfig +cp_it ~/.vimrc $STAGING_DIR/$DEST/.vimrc +cp_it ~/.bashrc $STAGING_DIR/$DEST/.bashrc + +# Give stack ownership over $DEST so it may do the work needed +chroot $STAGING_DIR chown -R stack $DEST + diff --git a/tools/warm_apts_and_pips.sh b/tools/warm_apts_and_pips.sh index 10bd4af1..854a938b 100755 --- a/tools/warm_apts_and_pips.sh +++ b/tools/warm_apts_and_pips.sh @@ -33,8 +33,10 @@ if [ ! -d files/apts ]; then fi # Mount the image -STAGING_DIR=`mktemp -d uec.XXXXXXXXXX` +STAGING_DIR=/tmp/`echo $1 | sed "s/\//_/g"`.stage mkdir -p $STAGING_DIR +umount $STAGING_DIR || true +sleep 1 mount -t ext4 -o loop $1 $STAGING_DIR # Make sure that base requirements are installed @@ -43,6 +45,7 @@ cp /etc/resolv.conf $STAGING_DIR/etc/resolv.conf # Perform caching on the base image to speed up subsequent runs chroot $STAGING_DIR apt-get update chroot $STAGING_DIR apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1` -chroot $STAGING_DIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1` -chroot $STAGING_DIR pip install `cat files/pips/*` -umount $STAGING_DIR && rm -rf $STAGING_DIR +chroot $STAGING_DIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1` || true +mkdir -p $STAGING_DIR/var/cache/pip +PIP_DOWNLOAD_CACHE=/var/cache/pip chroot $STAGING_DIR pip install `cat files/pips/*` || true +umount $STAGING_DIR From e228093ecda9300428a399600758580eff3b44fe Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 9 Nov 2011 22:29:22 -0800 Subject: [PATCH 139/967] some cleanup for utility scripts --- tools/setup_stack_user.sh | 10 +++++++++- tools/warm_apts_and_pips.sh | 2 ++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/tools/setup_stack_user.sh b/tools/setup_stack_user.sh index 85d418ed..231a20f3 100755 --- a/tools/setup_stack_user.sh +++ b/tools/setup_stack_user.sh @@ -40,9 +40,15 @@ mkdir -p $STAGING_DIR/$DEST chroot $STAGING_DIR groupadd libvirtd || true chroot $STAGING_DIR useradd stack -s /bin/bash -d $DEST -G libvirtd || true -# a simple password - pass +# Add a simple password - pass echo stack:pass | chroot $STAGING_DIR chpasswd +# Configure sudo +grep -q "^#includedir.*/etc/sudoers.d" $STAGING_DIR/etc/sudoers || + echo "#includedir /etc/sudoers.d" | sudo tee -a $STAGING_DIR/etc/sudoers +cp $TOP_DIR/files/sudo/* $STAGING_DIR/etc/sudoers.d/ +sed -e "s,%USER%,$USER,g" -i $STAGING_DIR/etc/sudoers.d/* + # and has sudo ability (in the future this should be limited to only what # stack requires) echo "stack ALL=(ALL) NOPASSWD: ALL" >> $STAGING_DIR/etc/sudoers @@ -64,3 +70,5 @@ cp_it ~/.bashrc $STAGING_DIR/$DEST/.bashrc # Give stack ownership over $DEST so it may do the work needed chroot $STAGING_DIR chown -R stack $DEST +# Unmount +umount $STAGING_DIR diff --git a/tools/warm_apts_and_pips.sh b/tools/warm_apts_and_pips.sh index 854a938b..ec7e916c 100755 --- a/tools/warm_apts_and_pips.sh +++ b/tools/warm_apts_and_pips.sh @@ -48,4 +48,6 @@ chroot $STAGING_DIR apt-get install -y --download-only `cat files/apts/* | grep chroot $STAGING_DIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1` || true mkdir -p $STAGING_DIR/var/cache/pip PIP_DOWNLOAD_CACHE=/var/cache/pip chroot $STAGING_DIR pip install `cat files/pips/*` || true + +# Unmount umount $STAGING_DIR From 6ecc4f2839583bae69c01c9b12cb585a7397f2d4 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 9 Nov 2011 22:38:48 -0800 Subject: [PATCH 140/967] changes to improve debugability - more legible instance name --- tools/jenkins/configurations/kvm.sh | 13 ++++++++----- tools/jenkins/jenkins_home/jobs/kvm/config.xml | 2 +- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/tools/jenkins/configurations/kvm.sh b/tools/jenkins/configurations/kvm.sh index 0e239d2f..7239e4db 100755 --- a/tools/jenkins/configurations/kvm.sh +++ b/tools/jenkins/configurations/kvm.sh @@ -2,16 +2,17 @@ EXECUTOR_NUMBER=$1 CONFIGURATION=$2 +ADAPTER=$3 function usage() { echo "Usage: $0 - Build a test configuration" echo "" - echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION]" + echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER]" exit 1 } # Validate inputs -if [[ "$EXECUTOR_NUMBER" = "" || "$CONFIGURATION" = "" ]]; then +if [[ "$EXECUTOR_NUMBER" = "" || "$CONFIGURATION" = "" || "$ADAPTER" = ""]]; then usage fi @@ -23,14 +24,16 @@ cd ../../.. TOP_DIR=(pwd) # Name test instance based on executor -NAME=executor-$EXECUTOR_NUMBER -virsh destroy `virsh list | grep $NAME | cut -d " " -f1` || true +BASE_NAME=executor-`printf "%02d" $EXECUTOR_NUMBER` +GUEST_NAME=$BASE_NAME.$ADAPTER +virsh destroy `virsh list | grep $BASE_NAME | cut -d " " -f1` || true +rm -f /var/lib/libvirt/dnsmasq/$BASE_NAME*.leases # Configure localrc cat <localrc RECLONE=yes GUEST_NETWORK=$EXECUTOR_NUMBER -GUEST_NAME=$NAME +GUEST_NAME=$GUEST_NAME FLOATING_RANGE=192.168.$EXECUTOR_NUMBER.128/27 GUEST_CORES=4 GUEST_RAM=1000000 diff --git a/tools/jenkins/jenkins_home/jobs/kvm/config.xml b/tools/jenkins/jenkins_home/jobs/kvm/config.xml index 02efbc9a..32ce7684 100644 --- a/tools/jenkins/jenkins_home/jobs/kvm/config.xml +++ b/tools/jenkins/jenkins_home/jobs/kvm/config.xml @@ -58,7 +58,7 @@ set -o errexit cd tools/jenkins -sudo ./build_configuration.sh $EXECUTOR_NUMBER kvm +sudo ./build_configuration.sh $EXECUTOR_NUMBER kvm $ADAPTER set -o errexit From 0ae5fd039b4504f37195c831d9dbf4dcbec436c3 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 9 Nov 2011 22:39:56 -0800 Subject: [PATCH 141/967] missing space --- tools/jenkins/configurations/kvm.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/jenkins/configurations/kvm.sh b/tools/jenkins/configurations/kvm.sh index 7239e4db..2c52f64b 100755 --- a/tools/jenkins/configurations/kvm.sh +++ b/tools/jenkins/configurations/kvm.sh @@ -12,7 +12,7 @@ function usage() { } # Validate inputs -if [[ "$EXECUTOR_NUMBER" = "" || "$CONFIGURATION" = "" || "$ADAPTER" = ""]]; then +if [[ "$EXECUTOR_NUMBER" = "" || "$CONFIGURATION" = "" || "$ADAPTER" = "" ]]; then usage fi From 792b1165a4ce0d1851603e2a7b8b9126b15da8e8 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 9 Nov 2011 22:40:49 -0800 Subject: [PATCH 142/967] update builder interface --- tools/jenkins/build_configuration.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/jenkins/build_configuration.sh b/tools/jenkins/build_configuration.sh index 70babb62..cab133df 100755 --- a/tools/jenkins/build_configuration.sh +++ b/tools/jenkins/build_configuration.sh @@ -2,11 +2,12 @@ EXECUTOR_NUMBER=$1 CONFIGURATION=$2 +ADAPTER=$3 function usage() { echo "Usage: $0 - Build a configuration" echo "" - echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION]" + echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER]" exit 1 } @@ -16,4 +17,4 @@ if [[ "$EXECUTOR_NUMBER" = "" || "$CONFIGURATION" = "" ]]; then fi # Execute configuration script -cd configurations && ./$CONFIGURATION.sh $EXECUTOR_NUMBER $CONFIGURATION +cd configurations && ./$CONFIGURATION.sh $EXECUTOR_NUMBER $CONFIGURATION $ADAPTER From 8b47cdf15ac0c0611503f688df376faf36172e43 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 9 Nov 2011 23:36:18 -0800 Subject: [PATCH 143/967] working to improve debugability --- tools/build_uec.sh | 7 ++++--- tools/jenkins/configurations/kvm.sh | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 6bab526c..33a6a27b 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -90,9 +90,10 @@ GUEST_CORES=${GUEST_CORES:-1} # libvirt.xml configuration NET_XML=$vm_dir/net.xml +NET_NAME=${NET_NAME:-devstack-$GUEST_NETWORK} cat > $NET_XML < - devstack-$GUEST_NETWORK + $NET_NAME @@ -104,9 +105,9 @@ cat > $NET_XML <localrc @@ -43,6 +42,7 @@ SERVICE_TOKEN=chicken ADMIN_PASSWORD=chicken USERNAME=admin TENANT=admin +NET_NAME=$GUEST_NAME EOF cd tools sudo ./build_uec.sh From 72eab2276e264bc40417df7d1f16d8c0111cf8a2 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 9 Nov 2011 23:38:18 -0800 Subject: [PATCH 144/967] fix network name in libvirt.xml --- tools/build_uec.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 33a6a27b..ad35b2ac 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -135,7 +135,7 @@ cat > $LIBVIRT_XML < - + From ec38c40a4dc70206e4cf7a4e43362e986555df42 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 10 Nov 2011 09:42:28 -0800 Subject: [PATCH 145/967] do an explicit stop/start --- tools/jenkins/jenkins_home/build_jenkins.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/jenkins/jenkins_home/build_jenkins.sh b/tools/jenkins/jenkins_home/build_jenkins.sh index bbba8b07..abbdfd19 100755 --- a/tools/jenkins/jenkins_home/build_jenkins.sh +++ b/tools/jenkins/jenkins_home/build_jenkins.sh @@ -84,4 +84,5 @@ for plugin in ${PLUGINS//,/ }; do done # Restart jenkins -restart jenkins +stop jenkins || true +start jenkins From 5df681819ab58413a7c65d147bb9e4e8c72adcf9 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 10 Nov 2011 09:56:12 -0800 Subject: [PATCH 146/967] fix typo in job iteration --- tools/jenkins/jenkins_home/build_jenkins.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/jenkins/jenkins_home/build_jenkins.sh b/tools/jenkins/jenkins_home/build_jenkins.sh index abbdfd19..567a0562 100755 --- a/tools/jenkins/jenkins_home/build_jenkins.sh +++ b/tools/jenkins/jenkins_home/build_jenkins.sh @@ -42,7 +42,8 @@ cat > $JENKINS_GITCONF < jobs/$job/nextBuildNumber fi From 484dd5512575069a22f81d48ef9ca7b55178943e Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 10 Nov 2011 10:24:18 -0800 Subject: [PATCH 147/967] switch around naming scheme --- tools/jenkins/jenkins_home/jobs/{kvm => diablo-kvm_ha}/config.xml | 0 .../configurations/axis-ADAPTER/euca/config.xml | 0 .../configurations/axis-ADAPTER/floatingips/config.xml | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename tools/jenkins/jenkins_home/jobs/{kvm => diablo-kvm_ha}/config.xml (100%) rename tools/jenkins/jenkins_home/jobs/{kvm => diablo-kvm_ha}/configurations/axis-ADAPTER/euca/config.xml (100%) rename tools/jenkins/jenkins_home/jobs/{kvm => diablo-kvm_ha}/configurations/axis-ADAPTER/floatingips/config.xml (100%) diff --git a/tools/jenkins/jenkins_home/jobs/kvm/config.xml b/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/config.xml similarity index 100% rename from tools/jenkins/jenkins_home/jobs/kvm/config.xml rename to tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/config.xml diff --git a/tools/jenkins/jenkins_home/jobs/kvm/configurations/axis-ADAPTER/euca/config.xml b/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/configurations/axis-ADAPTER/euca/config.xml similarity index 100% rename from tools/jenkins/jenkins_home/jobs/kvm/configurations/axis-ADAPTER/euca/config.xml rename to tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/configurations/axis-ADAPTER/euca/config.xml diff --git a/tools/jenkins/jenkins_home/jobs/kvm/configurations/axis-ADAPTER/floatingips/config.xml b/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/configurations/axis-ADAPTER/floatingips/config.xml similarity index 100% rename from tools/jenkins/jenkins_home/jobs/kvm/configurations/axis-ADAPTER/floatingips/config.xml rename to tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/configurations/axis-ADAPTER/floatingips/config.xml From ce1b4a24da7bf149e9a5551bde2e754565b945d2 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 10 Nov 2011 19:36:14 +0100 Subject: [PATCH 148/967] Admin group is Member. This is not the admin group per-se but the group where users can create/delete containers. This is will be fixed properly when the swift-keystone2 middleware would be commited in keystone (should be sometime soon). --- files/swift/proxy-server.conf | 1 + 1 file changed, 1 insertion(+) diff --git a/files/swift/proxy-server.conf b/files/swift/proxy-server.conf index fe7e39ba..2db6d322 100644 --- a/files/swift/proxy-server.conf +++ b/files/swift/proxy-server.conf @@ -16,6 +16,7 @@ account_autocreate = true use = egg:swiftkeystone2#keystone2 keystone_admin_token = %SERVICE_TOKEN% keystone_url = http://localhost:35357/v2.0 +keystone_admin_group = Member [filter:tempauth] use = egg:swift#tempauth From 904b0d7d1d3fa68b940c82da5cedd171408f71ae Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 10 Nov 2011 19:44:58 +0100 Subject: [PATCH 149/967] Install memcached with swift+keystone midleware. --- stack.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/stack.sh b/stack.sh index 78851b96..5e22be98 100755 --- a/stack.sh +++ b/stack.sh @@ -704,6 +704,11 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then # configured keystone it will checkout the directory. if [[ "$ENABLED_SERVICES" =~ "key" ]]; then swift_auth_server=keystone + + # We install the memcache server as this is will be used by the + # middleware to cache the tokens auths for a long this is needed. + apt_get install memcached + # We need a special version of bin/swift which understand the # OpenStack api 2.0, we download it until this is getting # integrated in swift. From dbcdf90df180f0505ed724fa6b6f3914d74875e5 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 10 Nov 2011 11:14:16 -0800 Subject: [PATCH 150/967] clean up for executor on each run --- tools/jenkins/configurations/kvm.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/jenkins/configurations/kvm.sh b/tools/jenkins/configurations/kvm.sh index e4c4bc5b..9dfabfa7 100755 --- a/tools/jenkins/configurations/kvm.sh +++ b/tools/jenkins/configurations/kvm.sh @@ -26,7 +26,8 @@ TOP_DIR=(pwd) # Name test instance based on executor BASE_NAME=executor-`printf "%02d" $EXECUTOR_NUMBER` GUEST_NAME=$BASE_NAME.$ADAPTER -virsh destroy `virsh list | grep $BASE_NAME | cut -d " " -f1` || true +virsh list | grep $BASE_NAME | cut -d " " -f1 | xargs -n 1 virsh destroy || true +virsh net-list | grep $BASE_NAME | cut -d " " -f1 | xargs -n 1 virsh net-destroy || true # Configure localrc cat <localrc From ea1a5869fd6ef897ed92223e3d5cca5200a2cfd4 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 10 Nov 2011 11:27:51 -0800 Subject: [PATCH 151/967] net name should be base --- tools/jenkins/configurations/kvm.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/jenkins/configurations/kvm.sh b/tools/jenkins/configurations/kvm.sh index 9dfabfa7..d7b12b44 100755 --- a/tools/jenkins/configurations/kvm.sh +++ b/tools/jenkins/configurations/kvm.sh @@ -43,7 +43,7 @@ SERVICE_TOKEN=chicken ADMIN_PASSWORD=chicken USERNAME=admin TENANT=admin -NET_NAME=$GUEST_NAME +NET_NAME=$BASE_NAME EOF cd tools sudo ./build_uec.sh From 053906d137822f8c3c7f7686879648ecdab0aee4 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 10 Nov 2011 11:38:09 -0800 Subject: [PATCH 152/967] add in rc --- tools/jenkins/build_configuration.sh | 7 ++++--- tools/jenkins/configurations/kvm.sh | 4 +++- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/tools/jenkins/build_configuration.sh b/tools/jenkins/build_configuration.sh index cab133df..e295ef20 100755 --- a/tools/jenkins/build_configuration.sh +++ b/tools/jenkins/build_configuration.sh @@ -3,18 +3,19 @@ EXECUTOR_NUMBER=$1 CONFIGURATION=$2 ADAPTER=$3 +RC=$4 function usage() { echo "Usage: $0 - Build a configuration" echo "" - echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER]" + echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER] [RC (optional)]" exit 1 } # Validate inputs -if [[ "$EXECUTOR_NUMBER" = "" || "$CONFIGURATION" = "" ]]; then +if [[ "$EXECUTOR_NUMBER" = "" || "$CONFIGURATION" = "" || "$ADAPTER" = "" ]]; then usage fi # Execute configuration script -cd configurations && ./$CONFIGURATION.sh $EXECUTOR_NUMBER $CONFIGURATION $ADAPTER +cd configurations && ./$CONFIGURATION.sh $EXECUTOR_NUMBER $CONFIGURATION $ADAPTER "$RC" diff --git a/tools/jenkins/configurations/kvm.sh b/tools/jenkins/configurations/kvm.sh index d7b12b44..7c641d28 100755 --- a/tools/jenkins/configurations/kvm.sh +++ b/tools/jenkins/configurations/kvm.sh @@ -3,11 +3,12 @@ EXECUTOR_NUMBER=$1 CONFIGURATION=$2 ADAPTER=$3 +RC=$4 function usage() { echo "Usage: $0 - Build a test configuration" echo "" - echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER]" + echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER] [RC (optional)]" exit 1 } @@ -44,6 +45,7 @@ ADMIN_PASSWORD=chicken USERNAME=admin TENANT=admin NET_NAME=$BASE_NAME +$RC EOF cd tools sudo ./build_uec.sh From 63f84bbfb60fca96a2412009f4e81c9c548afb0d Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 10 Nov 2011 11:43:41 -0800 Subject: [PATCH 153/967] pass rc to tests --- tools/jenkins/run_test.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/jenkins/run_test.sh b/tools/jenkins/run_test.sh index cf09cd55..46495637 100755 --- a/tools/jenkins/run_test.sh +++ b/tools/jenkins/run_test.sh @@ -2,11 +2,12 @@ EXECUTOR_NUMBER=$1 ADAPTER=$2 +RC=$3 function usage() { echo "Usage: $0 - Run a test" echo "" - echo "$0 [EXECUTOR_NUMBER] [ADAPTER]" + echo "$0 [EXECUTOR_NUMBER] [ADAPTER] [RC (optional)]" exit 1 } @@ -16,4 +17,4 @@ if [[ "$EXECUTOR_NUMBER" = "" || "$ADAPTER" = "" ]]; then fi # Execute configuration script -cd adapters && ./$ADAPTER.sh $EXECUTOR_NUMBER $ADAPTER +cd adapters && ./$ADAPTER.sh $EXECUTOR_NUMBER $ADAPTER "$RC" From b74b74a2b321adfc976590a58685b6a506e5db64 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Thu, 10 Nov 2011 11:47:34 -0800 Subject: [PATCH 154/967] permissions --- exercises/floating_ips.sh | 0 exercises/swift.sh | 0 2 files changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 exercises/floating_ips.sh mode change 100644 => 100755 exercises/swift.sh diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh old mode 100644 new mode 100755 diff --git a/exercises/swift.sh b/exercises/swift.sh old mode 100644 new mode 100755 From a4e6d13656dae54d419f7dd94763e7775f1572e3 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 10 Nov 2011 11:55:29 -0800 Subject: [PATCH 155/967] add clean script --- tools/jenkins/jenkins_home/clean.sh | 21 +++++++++++++++++++ .../jobs/diablo-kvm_ha/config.xml | 16 +++++++++++--- 2 files changed, 34 insertions(+), 3 deletions(-) create mode 100755 tools/jenkins/jenkins_home/clean.sh diff --git a/tools/jenkins/jenkins_home/clean.sh b/tools/jenkins/jenkins_home/clean.sh new file mode 100755 index 00000000..f92e0d23 --- /dev/null +++ b/tools/jenkins/jenkins_home/clean.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# This script is not yet for general consumption. + +set -o errexit + +if [ ! "$FORCE" = 1 ]; then + echo "FORCE not set to 1. Make sure this is something you really want to do. Exiting." + exit 1 +fi + +exit +virsh list | cut -d " " -f1 | grep -v "-" | egrep -e "[0-9]" | xargs -n 1 virsh destroy || true +virsh net-list | grep active | cut -d " " -f1 | xargs -n 1 virsh net-destroy || true +killall dnsmasq +rm -rf jobs +rm /var/lib/jenkins/jobs +git checkout -f +git fetch +git merge origin/jenkins +./build_jenkins.sh + diff --git a/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/config.xml b/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/config.xml index 32ce7684..820e9d6d 100644 --- a/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/config.xml +++ b/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/config.xml @@ -3,7 +3,17 @@ false - + + + + + RC + + + + + + 2 @@ -58,12 +68,12 @@ set -o errexit cd tools/jenkins -sudo ./build_configuration.sh $EXECUTOR_NUMBER kvm $ADAPTER +sudo ./build_configuration.sh $EXECUTOR_NUMBER kvm $ADAPTER "$RC" set -o errexit cd tools/jenkins -sudo ./run_test.sh $EXECUTOR_NUMBER $ADAPTER +sudo ./run_test.sh $EXECUTOR_NUMBER $ADAPTER $RC "$RC" From b225682189b872f490fa285a67e5d0d1e5351ed0 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 10 Nov 2011 12:57:59 -0800 Subject: [PATCH 156/967] progress on getting a working test configuration --- tools/build_uec.sh | 23 +++++++++++++++++++++ tools/jenkins/adapters/euca.sh | 7 ++++++- tools/jenkins/configurations/kvm.sh | 3 +++ tools/jenkins/jenkins_home/build_jenkins.sh | 7 +++++++ tools/setup_stack_user.sh | 4 ---- 5 files changed, 39 insertions(+), 5 deletions(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index bbb8b37c..2fb8b500 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -187,6 +187,29 @@ cat > localrc < $vm_dir/uec/user-data< /opt/stack/.ssh/authorized_keys +chown -R stack /opt/stack +chmod 700 /opt/stack/.ssh +chmod 600 /opt/stack/.ssh/authorized_keys + +grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || + echo "#includedir /etc/sudoers.d" >> /etc/sudoers +( umask 226 && echo "stack ALL=(ALL) NOPASSWD:ALL" \ + > /etc/sudoers.d/50_stack_sh ) +EOF +fi + +# Run stack.sh +cat > $vm_dir/uec/user-data< $TOP_DIR/addresses diff --git a/tools/jenkins/jenkins_home/build_jenkins.sh b/tools/jenkins/jenkins_home/build_jenkins.sh index 567a0562..1a3407f2 100755 --- a/tools/jenkins/jenkins_home/build_jenkins.sh +++ b/tools/jenkins/jenkins_home/build_jenkins.sh @@ -10,6 +10,13 @@ if [[ $EUID -ne 0 ]]; then exit 1 fi +# Make sure user has configured an ssh pubkey +if [ ! -e /root/.ssh/id_rsa.pub ]; then + echo "Public key is missing. This is used to ssh into your instances." + echo "Please run ssh-keygen before proceeding" + exit 1 +fi + # This directory CUR_DIR=$(cd $(dirname "$0") && pwd) diff --git a/tools/setup_stack_user.sh b/tools/setup_stack_user.sh index 231a20f3..fcb97333 100755 --- a/tools/setup_stack_user.sh +++ b/tools/setup_stack_user.sh @@ -49,10 +49,6 @@ grep -q "^#includedir.*/etc/sudoers.d" $STAGING_DIR/etc/sudoers || cp $TOP_DIR/files/sudo/* $STAGING_DIR/etc/sudoers.d/ sed -e "s,%USER%,$USER,g" -i $STAGING_DIR/etc/sudoers.d/* -# and has sudo ability (in the future this should be limited to only what -# stack requires) -echo "stack ALL=(ALL) NOPASSWD: ALL" >> $STAGING_DIR/etc/sudoers - # Gracefully cp only if source file/dir exists function cp_it { if [ -e $1 ] || [ -d $1 ]; then From 2838f12e75d608249c9c2f2c3e2751ad7859e9d5 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 10 Nov 2011 13:04:40 -0800 Subject: [PATCH 157/967] fix appends on user data --- tools/build_uec.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 2fb8b500..4ffd02a2 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -171,7 +171,7 @@ instance-type: m1.ignore local-hostname: $GUEST_NAME.local EOF -# set metadata +# set user-data cat > $vm_dir/uec/user-data< $vm_dir/uec/user-data<> $vm_dir/uec/user-data< $vm_dir/uec/user-data<> $vm_dir/uec/user-data< Date: Thu, 10 Nov 2011 13:05:13 -0800 Subject: [PATCH 158/967] Don't clone or install disabled services. --- stack.sh | 94 ++++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 64 insertions(+), 30 deletions(-) diff --git a/stack.sh b/stack.sh index 6b3e09c4..56d91358 100755 --- a/stack.sh +++ b/stack.sh @@ -405,25 +405,39 @@ function git_clone { # compute service git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH -# storage service -git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH -# swift + keystone middleware -git_clone $SWIFT_KEYSTONE_REPO $SWIFT_KEYSTONE_DIR $SWIFT_KEYSTONE_BRANCH -# image catalog service -git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH -# unified auth system (manages accounts/tokens) -git_clone $KEYSTONE_REPO $KEYSTONE_DIR $KEYSTONE_BRANCH -# a websockets/html5 or flash powered VNC console for vm instances -git_clone $NOVNC_REPO $NOVNC_DIR $NOVNC_BRANCH -# django powered web control panel for openstack -git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH $HORIZON_TAG # python client library to nova that horizon (and others) use git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH -# openstackx is a collection of extensions to openstack.compute & nova -# that is *deprecated*. The code is being moved into python-novaclient & nova. -git_clone $OPENSTACKX_REPO $OPENSTACKX_DIR $OPENSTACKX_BRANCH -# quantum -git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH +if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then + # storage service + git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH + # swift + keystone middleware + git_clone $SWIFT_KEYSTONE_REPO $SWIFT_KEYSTONE_DIR $SWIFT_KEYSTONE_BRANCH +fi +if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then + # image catalog service + git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH +fi +if [[ "$ENABLED_SERVICES" =~ "key" ]]; then + # unified auth system (manages accounts/tokens) + git_clone $KEYSTONE_REPO $KEYSTONE_DIR $KEYSTONE_BRANCH +fi +if [[ "$ENABLED_SERVICES" =~ "n-vnc" ]]; then + # a websockets/html5 or flash powered VNC console for vm instances + git_clone $NOVNC_REPO $NOVNC_DIR $NOVNC_BRANCH +fi +if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then + # django powered web control panel for openstack + git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH $HORIZON_TAG +fi +if [[ "$ENABLED_SERVICES" =~ "openstackx" ]]; then + # openstackx is a collection of extensions to openstack.compute & nova + # that is *deprecated*. The code is being moved into python-novaclient & nova. + git_clone $OPENSTACKX_REPO $OPENSTACKX_DIR $OPENSTACKX_BRANCH +fi +if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then + # quantum + git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH +fi # Initialization # ============== @@ -431,16 +445,28 @@ git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH # setup our checkouts so they are installed into python path # allowing ``import nova`` or ``import glance.client`` -cd $KEYSTONE_DIR; sudo python setup.py develop -cd $SWIFT_DIR; sudo python setup.py develop -cd $SWIFT_KEYSTONE_DIR; sudo python setup.py develop -cd $GLANCE_DIR; sudo python setup.py develop +if [[ "$ENABLED_SERVICES" =~ "key" ]]; then + cd $KEYSTONE_DIR; sudo python setup.py develop +fi +if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then + cd $SWIFT_DIR; sudo python setup.py develop + cd $SWIFT_KEYSTONE_DIR; sudo python setup.py develop +fi +if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then + cd $GLANCE_DIR; sudo python setup.py develop +fi cd $NOVACLIENT_DIR; sudo python setup.py develop cd $NOVA_DIR; sudo python setup.py develop -cd $OPENSTACKX_DIR; sudo python setup.py develop -cd $HORIZON_DIR/django-openstack; sudo python setup.py develop -cd $HORIZON_DIR/openstack-dashboard; sudo python setup.py develop -cd $QUANTUM_DIR; sudo python setup.py develop +if [[ "$ENABLED_SERVICES" =~ "openstackx" ]]; then + cd $OPENSTACKX_DIR; sudo python setup.py develop +fi +if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then + cd $HORIZON_DIR/django-openstack; sudo python setup.py develop + cd $HORIZON_DIR/openstack-dashboard; sudo python setup.py develop +fi +if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then + cd $QUANTUM_DIR; sudo python setup.py develop +fi # Add a useful screenrc. This isn't required to run openstack but is we do # it since we are going to run the services in screen for simple @@ -819,9 +845,13 @@ add_nova_flag "--public_interface=$PUBLIC_INTERFACE" add_nova_flag "--vlan_interface=$VLAN_INTERFACE" add_nova_flag "--sql_connection=$BASE_SQL_CONN/nova" add_nova_flag "--libvirt_type=$LIBVIRT_TYPE" -add_nova_flag "--osapi_extensions_path=$OPENSTACKX_DIR/extensions" -add_nova_flag "--vncproxy_url=http://$HOST_IP:6080" -add_nova_flag "--vncproxy_wwwroot=$NOVNC_DIR/" +if [[ "$ENABLED_SERVICES" =~ "openstackx" ]]; then + add_nova_flag "--osapi_extensions_path=$OPENSTACKX_DIR/extensions" +fi +if [[ "$ENABLED_SERVICES" =~ "n-vnc" ]]; then + add_nova_flag "--vncproxy_url=http://$HOST_IP:6080" + add_nova_flag "--vncproxy_wwwroot=$NOVNC_DIR/" +fi add_nova_flag "--api_paste_config=$NOVA_DIR/bin/nova-api-paste.ini" add_nova_flag "--image_service=nova.image.glance.GlanceImageService" add_nova_flag "--ec2_dmz_host=$EC2_DMZ_HOST" @@ -1014,8 +1044,12 @@ screen_it n-cpu "cd $NOVA_DIR && sg libvirtd $NOVA_DIR/bin/nova-compute" screen_it n-vol "cd $NOVA_DIR && $NOVA_DIR/bin/nova-volume" screen_it n-net "cd $NOVA_DIR && $NOVA_DIR/bin/nova-network" screen_it n-sch "cd $NOVA_DIR && $NOVA_DIR/bin/nova-scheduler" -screen_it n-vnc "cd $NOVNC_DIR && ./utils/nova-wsproxy.py --flagfile $NOVA_DIR/bin/nova.conf --web . 6080" -screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/apache2/error.log" +if [[ "$ENABLED_SERVICES" =~ "n-vnc" ]]; then + screen_it n-vnc "cd $NOVNC_DIR && ./utils/nova-wsproxy.py --flagfile $NOVA_DIR/bin/nova.conf --web . 6080" +fi +if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then + screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/apache2/error.log" +fi # Install Images # ============== From ddb44b4e09709c4d084c65003034d02ed417c5bd Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 10 Nov 2011 13:06:44 -0800 Subject: [PATCH 159/967] missing quotes around pub key --- tools/build_uec.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 4ffd02a2..174ac473 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -196,7 +196,7 @@ mkdir -p /opt/stack useradd stack -s /bin/bash -d /opt/stack -G libvirtd || true echo stack:pass | chpasswd mkdir -p /opt/stack/.ssh -echo `cat ~/.ssh/id_rsa.pub` > /opt/stack/.ssh/authorized_keys +echo "`cat ~/.ssh/id_rsa.pub`" > /opt/stack/.ssh/authorized_keys chown -R stack /opt/stack chmod 700 /opt/stack/.ssh chmod 600 /opt/stack/.ssh/authorized_keys From b7661282d10eefe9df260841286f49d04f248c7a Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 10 Nov 2011 13:09:25 -0800 Subject: [PATCH 160/967] still trying to get pub key working --- tools/build_uec.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 174ac473..eabc1e73 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -191,12 +191,13 @@ EOF # Setup stack user with our key if [ -e ~/.ssh/id_rsa.pub ]; then + PUB_KEY=`cat ~/.ssh/id_rsa.pub` cat >> $vm_dir/uec/user-data< /opt/stack/.ssh/authorized_keys +echo "$PUB_KEY" > /opt/stack/.ssh/authorized_keys chown -R stack /opt/stack chmod 700 /opt/stack/.ssh chmod 600 /opt/stack/.ssh/authorized_keys From 33d2a4e2d9318443c1e2b1b362ac02d127078a09 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 10 Nov 2011 13:26:16 -0800 Subject: [PATCH 161/967] fix typo --- tools/jenkins/configurations/kvm.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/jenkins/configurations/kvm.sh b/tools/jenkins/configurations/kvm.sh index a6ac2976..65fec292 100755 --- a/tools/jenkins/configurations/kvm.sh +++ b/tools/jenkins/configurations/kvm.sh @@ -22,7 +22,7 @@ CUR_DIR=$(cd $(dirname "$0") && pwd) # devstack directory cd ../../.. -TOP_DIR=(pwd) +TOP_DIR=$(pwd) # Name test instance based on executor BASE_NAME=executor-`printf "%02d" $EXECUTOR_NUMBER` From ff7771e5fb8ac249480abc1e39a27b8abe310e6c Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 10 Nov 2011 13:33:31 -0800 Subject: [PATCH 162/967] fix useradd --- tools/build_uec.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index eabc1e73..16ecd64d 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -194,7 +194,7 @@ if [ -e ~/.ssh/id_rsa.pub ]; then PUB_KEY=`cat ~/.ssh/id_rsa.pub` cat >> $vm_dir/uec/user-data< /opt/stack/.ssh/authorized_keys From 7faa17e3683e7749392497c913dbc516bc0a49f0 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 10 Nov 2011 13:43:01 -0800 Subject: [PATCH 163/967] byobu-disable --- tools/build_uec.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 16ecd64d..90c8e9cf 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -206,6 +206,8 @@ grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || echo "#includedir /etc/sudoers.d" >> /etc/sudoers ( umask 226 && echo "stack ALL=(ALL) NOPASSWD:ALL" \ > /etc/sudoers.d/50_stack_sh ) +# Disable byobu +byobu-disable EOF fi From 760ddde15747261a1528e04880e69400b2144bee Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 10 Nov 2011 13:46:52 -0800 Subject: [PATCH 164/967] configuring stack user is optional --- tools/build_uec.sh | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 90c8e9cf..7e91e22a 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -187,10 +187,13 @@ cat > localrc <> $vm_dir/uec/user-data<> /etc/sudoers ( umask 226 && echo "stack ALL=(ALL) NOPASSWD:ALL" \ > /etc/sudoers.d/50_stack_sh ) -# Disable byobu -byobu-disable EOF fi From 1277eab04d0040ae02c7c1a4daabecb57e7cea53 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 10 Nov 2011 14:06:13 -0800 Subject: [PATCH 165/967] run tests for floaitng ips --- tools/jenkins/adapters/floating_ips.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tools/jenkins/adapters/floating_ips.sh b/tools/jenkins/adapters/floating_ips.sh index 3cd97101..a97f9357 100755 --- a/tools/jenkins/adapters/floating_ips.sh +++ b/tools/jenkins/adapters/floating_ips.sh @@ -1,3 +1,8 @@ #!/bin/bash +# Echo commands, exit on error +set -o xtrace +set -o errexit -echo "$0 SUCCESS!" +TOP_DIR=$(cd ../../.. && pwd) +HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2` +ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises && ./floating_ips.sh' From 305e3f5524603f700eaeea31586f6a015b5aec29 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 10 Nov 2011 14:36:42 -0800 Subject: [PATCH 166/967] tweaks to the hacky clean script --- tools/jenkins/jenkins_home/clean.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tools/jenkins/jenkins_home/clean.sh b/tools/jenkins/jenkins_home/clean.sh index f92e0d23..9d17f976 100755 --- a/tools/jenkins/jenkins_home/clean.sh +++ b/tools/jenkins/jenkins_home/clean.sh @@ -3,19 +3,19 @@ set -o errexit -if [ ! "$FORCE" = 1 ]; then +if [ ! "$FORCE" = "yes" ]; then echo "FORCE not set to 1. Make sure this is something you really want to do. Exiting." exit 1 fi -exit virsh list | cut -d " " -f1 | grep -v "-" | egrep -e "[0-9]" | xargs -n 1 virsh destroy || true virsh net-list | grep active | cut -d " " -f1 | xargs -n 1 virsh net-destroy || true -killall dnsmasq -rm -rf jobs +killall dnsmasq || true +if [ "$CLEAN" = "yes" ]; then + rm -rf jobs +fi rm /var/lib/jenkins/jobs git checkout -f git fetch git merge origin/jenkins ./build_jenkins.sh - From 2f2160eac2be76d778abe1c8d85147799f7186cc Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 10 Nov 2011 23:46:08 +0100 Subject: [PATCH 167/967] Force creation of the symlink. It would allow to not fail when we run stack.sh again. --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 78851b96..2577d140 100755 --- a/stack.sh +++ b/stack.sh @@ -691,7 +691,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then # swift-init has a bug using /etc/swift until bug #885595 is fixed # we have to create a link - sudo ln -s ${SWIFT_CONFIG_LOCATION} /etc/swift + sudo ln -sf ${SWIFT_CONFIG_LOCATION} /etc/swift # Swift use rsync to syncronize between all the different # partitions (which make more sense when you have a multi-node From 674db1ab2c041cae7f2fb00c31a6d4370f1e76d6 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 10 Nov 2011 14:46:52 -0800 Subject: [PATCH 168/967] fix usage for clean.sh --- tools/jenkins/jenkins_home/clean.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/jenkins/jenkins_home/clean.sh b/tools/jenkins/jenkins_home/clean.sh index 9d17f976..eb03022a 100755 --- a/tools/jenkins/jenkins_home/clean.sh +++ b/tools/jenkins/jenkins_home/clean.sh @@ -4,7 +4,7 @@ set -o errexit if [ ! "$FORCE" = "yes" ]; then - echo "FORCE not set to 1. Make sure this is something you really want to do. Exiting." + echo "FORCE not set to 'yes'. Make sure this is something you really want to do. Exiting." exit 1 fi From c639ef0123b7c46eb79f3955a77469b955e2f95d Mon Sep 17 00:00:00 2001 From: "James E. Blair" Date: Thu, 10 Nov 2011 15:11:28 -0800 Subject: [PATCH 169/967] Source stackrc in exercises. Exit with error if there are failed tests. --- exercise.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/exercise.sh b/exercise.sh index 7703f401..dd45c5ac 100755 --- a/exercise.sh +++ b/exercise.sh @@ -1,5 +1,6 @@ #!/usr/bin/env bash +source ./stackrc # Run everything in the exercises/ directory that isn't explicitly disabled # comma separated list of script basenames to skip @@ -44,3 +45,7 @@ done for script in $failures; do echo FAILED $script done + +if [ -n "$failures" ] ; then + exit 1 +fi From 7f91d5e3115adeba4aad1c7ca9767d0173deb4d8 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 11 Nov 2011 09:49:47 -0800 Subject: [PATCH 170/967] add in xs configuration --- tools/jenkins/configurations/xs.sh | 53 ++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 tools/jenkins/configurations/xs.sh diff --git a/tools/jenkins/configurations/xs.sh b/tools/jenkins/configurations/xs.sh new file mode 100644 index 00000000..cfcf0f8c --- /dev/null +++ b/tools/jenkins/configurations/xs.sh @@ -0,0 +1,53 @@ +#!/bin/bash +set -o errexit +set -o xtrace + + +EXECUTOR_NUMBER=$1 +CONFIGURATION=$2 +ADAPTER=$3 +RC=$4 + +function usage() { + echo "Usage: $0 - Build a test configuration" + echo "" + echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER] [RC (optional)]" + exit 1 +} + +# Validate inputs +if [[ "$EXECUTOR_NUMBER" = "" || "$CONFIGURATION" = "" || "$ADAPTER" = "" ]]; then + usage +fi + +# Configuration of xenrc +XENRC=/var/lib/jenkins/xenrc +if [ ! -e $XENRC ]; then + echo "/var/lib/jenkins/xenrc is not present! See README.md" + exit 1 +fi + +# Move to top of devstack +cd ../.. + +# Use xenrc as the start of our localrc +cp $XENRC localrc + +# Set the PUB_IP +PUB_IP=192.168.1.1$EXECUTOR_NUMBER +echo "PUB_IP=$PUB_IP" >> localrc + +# Overrides +echo "$RC" >> localrc + +# Source localrc +. localrc + +# Make host ip available to tester +echo "HEAD=$PUB_IP" > addresses + +# Build configuration +REMOTE_DEVSTACK=/root/devstack +ssh root@$XEN_IP "rm -rf $REMOTE_DEVSTACK" +scp -pr . root@$XEN_IP:$REMOTE_DEVSTACK +ssh root@$XEN_IP "cd $REMOTE_DEVSTACK/tools/xen && ./build_domU.sh" From aae02c09a7ad202341adefa87eba50617726c972 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 11 Nov 2011 09:51:14 -0800 Subject: [PATCH 171/967] +x --- tools/jenkins/configurations/xs.sh | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 tools/jenkins/configurations/xs.sh diff --git a/tools/jenkins/configurations/xs.sh b/tools/jenkins/configurations/xs.sh old mode 100644 new mode 100755 From ad0378613b44bea53db74fcb26909c2f815458ad Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 11 Nov 2011 09:53:22 -0800 Subject: [PATCH 172/967] fix path --- tools/jenkins/configurations/xs.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/jenkins/configurations/xs.sh b/tools/jenkins/configurations/xs.sh index cfcf0f8c..864f9491 100755 --- a/tools/jenkins/configurations/xs.sh +++ b/tools/jenkins/configurations/xs.sh @@ -28,7 +28,7 @@ if [ ! -e $XENRC ]; then fi # Move to top of devstack -cd ../.. +cd ../../.. # Use xenrc as the start of our localrc cp $XENRC localrc From fa4ecc6e2be8b2e08b489cba4a15334c5db68ad8 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 11 Nov 2011 10:23:22 -0800 Subject: [PATCH 173/967] destroy orphaned vdis --- tools/xen/build_domU.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/xen/build_domU.sh b/tools/xen/build_domU.sh index 6362849e..d79d5c3e 100755 --- a/tools/xen/build_domU.sh +++ b/tools/xen/build_domU.sh @@ -240,6 +240,11 @@ if [ "$DO_SHUTDOWN" = "1" ]; then xe vm-shutdown uuid=$uuid xe vm-destroy uuid=$uuid done + + # Destroy orphaned vdis + for uuid in `xe vdi-list | grep -1 Glance | grep uuid | sed "s/.*\: //g"`; do + xe vdi-destroy uuid=$uuid + done fi # Path to head xva. By default keep overwriting the same one to save space From 3bff7b3d51aff27e215cb5b7fccbe632ecdd7aa7 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 11 Nov 2011 10:51:24 -0800 Subject: [PATCH 174/967] increase timeouts for kvm --- tools/jenkins/configurations/kvm.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/jenkins/configurations/kvm.sh b/tools/jenkins/configurations/kvm.sh index 65fec292..ea308712 100755 --- a/tools/jenkins/configurations/kvm.sh +++ b/tools/jenkins/configurations/kvm.sh @@ -45,6 +45,8 @@ ADMIN_PASSWORD=chicken USERNAME=admin TENANT=admin NET_NAME=$BASE_NAME +ACTIVE_TIMEOUT=45 +BOOT_TIMEOUT=45 $RC EOF cd tools From c8af0c5d8980374610e8c5c5dac4f3af70144920 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 11 Nov 2011 11:17:47 -0800 Subject: [PATCH 175/967] tweak defaults for kvm config --- tools/jenkins/configurations/kvm.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/jenkins/configurations/kvm.sh b/tools/jenkins/configurations/kvm.sh index ea308712..3e07113f 100755 --- a/tools/jenkins/configurations/kvm.sh +++ b/tools/jenkins/configurations/kvm.sh @@ -36,8 +36,8 @@ RECLONE=yes GUEST_NETWORK=$EXECUTOR_NUMBER GUEST_NAME=$GUEST_NAME FLOATING_RANGE=192.168.$EXECUTOR_NUMBER.128/27 -GUEST_CORES=4 -GUEST_RAM=1000000 +GUEST_CORES=1 +GUEST_RAM=12574720 MYSQL_PASSWORD=chicken RABBIT_PASSWORD=chicken SERVICE_TOKEN=chicken From 715944008accc1990f41573000f5c65284f9adc9 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 11 Nov 2011 11:19:57 -0800 Subject: [PATCH 176/967] byobu is nor getting disabled :/ --- tools/build_uec.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 7e91e22a..3eafe5e0 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -188,7 +188,7 @@ ROOTSLEEP=0 `cat $TOP_DIR/localrc` LOCAL_EOF # Disable byobu -byobu-disable +/usr/bin/byobu-disable EOF # Setup stack user with our key From 723e2d2246b86ba9ca5c9cfe63c240dc98e25325 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 11 Nov 2011 13:42:09 -0800 Subject: [PATCH 177/967] trunk jenkins! --- tools/jenkins/jenkins_home/build_jenkins.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/jenkins/jenkins_home/build_jenkins.sh b/tools/jenkins/jenkins_home/build_jenkins.sh index 1a3407f2..2bc856d1 100755 --- a/tools/jenkins/jenkins_home/build_jenkins.sh +++ b/tools/jenkins/jenkins_home/build_jenkins.sh @@ -20,6 +20,11 @@ fi # This directory CUR_DIR=$(cd $(dirname "$0") && pwd) +# Configure trunk jenkins! +echo "deb http://pkg.jenkins-ci.org/debian binary/" > /etc/apt/sources.list.d/jenkins.list +wget -q -O - http://pkg.jenkins-ci.org/debian/jenkins-ci.org.key | sudo apt-key add - +apt-get update + # Install software DEPS="jenkins cloud-utils" apt-get install -y --force-yes $DEPS From c0d4e678d4bbbdcae1f09ef18319bf4f3c2950ff Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 11 Nov 2011 13:46:44 -0800 Subject: [PATCH 178/967] use /etc/init.d/jenkins to start/stop server --- tools/jenkins/jenkins_home/build_jenkins.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/jenkins/jenkins_home/build_jenkins.sh b/tools/jenkins/jenkins_home/build_jenkins.sh index 2bc856d1..9a32a0a1 100755 --- a/tools/jenkins/jenkins_home/build_jenkins.sh +++ b/tools/jenkins/jenkins_home/build_jenkins.sh @@ -97,5 +97,5 @@ for plugin in ${PLUGINS//,/ }; do done # Restart jenkins -stop jenkins || true -start jenkins +/etc/init.d/jenkins stop || true +/etc/init.d/jenkins start From ec74eef1870112478a2593bcaa622efbc260bd94 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Fri, 11 Nov 2011 13:51:55 -0800 Subject: [PATCH 179/967] build pxe env tweaks --- tools/{build_pxe_boot.sh => build_pxe_env.sh} | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) rename tools/{build_pxe_boot.sh => build_pxe_env.sh} (93%) diff --git a/tools/build_pxe_boot.sh b/tools/build_pxe_env.sh similarity index 93% rename from tools/build_pxe_boot.sh rename to tools/build_pxe_env.sh index ab640983..1ab51f89 100755 --- a/tools/build_pxe_boot.sh +++ b/tools/build_pxe_env.sh @@ -1,11 +1,14 @@ #!/bin/bash -e -# build_pxe_boot.sh - Create a PXE boot environment +# build_pxe_env.sh - Create a PXE boot environment # -# build_pxe_boot.sh destdir +# build_pxe_env.sh destdir +# +# Requires Ubuntu Oneiric # -# Assumes syslinux is installed # Only needs to run as root if the destdir permissions require it +dpkg -l syslinux || apt-get install -y syslinux + DEST_DIR=${1:-/tmp}/tftpboot PXEDIR=${PXEDIR:-/var/cache/devstack/pxe} OPWD=`pwd` From 57346b7fcd63fe746491d088864e5f8d2c0822c3 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 11 Nov 2011 13:53:56 -0800 Subject: [PATCH 180/967] show how to uninstall jenkins if there are issues upgrading --- tools/jenkins/jenkins_home/build_jenkins.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools/jenkins/jenkins_home/build_jenkins.sh b/tools/jenkins/jenkins_home/build_jenkins.sh index 9a32a0a1..7d68679e 100755 --- a/tools/jenkins/jenkins_home/build_jenkins.sh +++ b/tools/jenkins/jenkins_home/build_jenkins.sh @@ -25,6 +25,13 @@ echo "deb http://pkg.jenkins-ci.org/debian binary/" > /etc/apt/sources.list.d/je wget -q -O - http://pkg.jenkins-ci.org/debian/jenkins-ci.org.key | sudo apt-key add - apt-get update + +# Clean out old jenkins - useful if you are having issues upgrading +CLEAN_JENKINS=${CLEAN_JENKINS:-no} +if [ "$CLEAN_JENKINS" = "yes" ] then; + apt-get remove jenkins jenkins-common +fi + # Install software DEPS="jenkins cloud-utils" apt-get install -y --force-yes $DEPS From 2679303c89334cbd0c79cd143e22c807788fcde3 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Fri, 11 Nov 2011 13:56:29 -0800 Subject: [PATCH 181/967] build_uec requires libvirt --- tools/build_uec.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index d57cb29e..80373dc3 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -33,7 +33,7 @@ if [ ! -e $TOP_DIR/localrc ]; then fi # Install deps if needed -DEPS="kvm libvirt-bin kpartx cloud-utils" +DEPS="kvm libvirt-bin kpartx cloud-utils curl" dpkg -l $DEPS || apt-get install -y --force-yes $DEPS # Where to store files and instances From c20428241ad8f6d034436b1bc3507d5e56b28631 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Fri, 11 Nov 2011 13:59:05 -0800 Subject: [PATCH 182/967] always check deps --- tools/build_uec.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 80373dc3..d95ba77d 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -34,7 +34,7 @@ fi # Install deps if needed DEPS="kvm libvirt-bin kpartx cloud-utils curl" -dpkg -l $DEPS || apt-get install -y --force-yes $DEPS +apt-get install -y --force-yes $DEPS # Where to store files and instances WORK_DIR=${WORK_DIR:-/opt/kvmstack} From 4a10d436c0b6ca57b29eece5334bf95edf4dba34 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 11 Nov 2011 15:56:51 -0800 Subject: [PATCH 183/967] add in config.xml for xs --- .../jenkins_home/jobs/diablo-xs_ha/config.xml | 88 +++++++++++++++++++ 1 file changed, 88 insertions(+) create mode 100644 tools/jenkins/jenkins_home/jobs/diablo-xs_ha/config.xml diff --git a/tools/jenkins/jenkins_home/jobs/diablo-xs_ha/config.xml b/tools/jenkins/jenkins_home/jobs/diablo-xs_ha/config.xml new file mode 100644 index 00000000..21cd496b --- /dev/null +++ b/tools/jenkins/jenkins_home/jobs/diablo-xs_ha/config.xml @@ -0,0 +1,88 @@ + + + + In order for this to work, you must create a /var/lib/jenkins/xenrc file as described in README.md + false + + + + + RC + + + + + + + + 2 + + + origin + +refs/heads/*:refs/remotes/origin/* + git://github.com/cloudbuilders/devstack.git + + + + + jenkins + + + false + false + false + false + false + false + false + + Default + + + + + + + false + + + true + false + false + false + + false + + + ADAPTER + + euca + floating_ips + + + + + + sed -i 's/) 2>&1 | tee "${LOGFILE}"/)/' stack.sh + + + set -o errexit +cd tools/jenkins +sudo ./build_configuration.sh $EXECUTOR_NUMBER xs $ADAPTER "$RC" + + + #!/bin/bash +set -o errexit +set -o xtrace + +. localrc + +# Unlike kvm, ssh to the xen host to run tests, in case the test instance is launch with a host only network +ssh root@$XEN_IP "cd devstack && . localrc && cd tools/jenkins && ./run_test.sh $EXECUTOR_NUMBER $ADAPTER '$RC'" + + + + + + true + \ No newline at end of file From adfc029a023ccff700fe31e433717b05a3eaefa2 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Mon, 14 Nov 2011 14:24:30 +0100 Subject: [PATCH 184/967] Use lsb_release for distro detection. --- stack.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 78851b96..1255ad06 100755 --- a/stack.sh +++ b/stack.sh @@ -22,7 +22,9 @@ # Warn users who aren't on natty, but allow them to override check and attempt # installation with ``FORCE=yes ./stack`` -if ! egrep -q 'natty|oneiric' /etc/lsb-release; then +DISTRO=$(lsb_release -c -s) + +if [[ ! ${DISTRO} =~ (natty|oneiric) ]]; then echo "WARNING: this script has only been tested on natty and oneiric" if [[ "$FORCE" != "yes" ]]; then echo "If you wish to run this script anyway run with FORCE=yes" From a4adff43cb18a26ce2975b6fcdcf1d972a0d9027 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Mon, 14 Nov 2011 15:19:34 +0100 Subject: [PATCH 185/967] Only install dnsmasq-utils on oneiric. --- files/apts/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/apts/nova b/files/apts/nova index 9eefed77..31dd86ac 100644 --- a/files/apts/nova +++ b/files/apts/nova @@ -1,5 +1,5 @@ dnsmasq-base -dnsmasq-utils # for dhcp_release +dnsmasq-utils # for dhcp_release only available in dist:oneiric kpartx parted arping # used for send_arp_for_ha option in nova-network From 0277d5b91f8ad7763bf32e63d7b6d3c2236fce3c Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Mon, 14 Nov 2011 15:20:39 +0100 Subject: [PATCH 186/967] Install packages only for distros/services. - We are installing packages only for the services needed. - We are parsing the packages files and detecting metadatas. - If there is a NOPRIME as comment mean we are not doing the install just yet. - If we have the meta-keyword distro:DISTRO or distro:DISTRO1,DISTRO2 it will be installed only for those distros (case insensitive). --- stack.sh | 57 +++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 56 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 1255ad06..1043dc6f 100755 --- a/stack.sh +++ b/stack.sh @@ -364,10 +364,65 @@ fi # # Openstack uses a fair number of other projects. +# - We are going to install packages only for the services needed. +# - We are parsing the packages files and detecting metadatas. +# - If there is a NOPRIME as comment mean we are not doing the install +# just yet. +# - If we have the meta-keyword distro:DISTRO or +# distro:DISTRO1,DISTRO2 it will be installed only for those +# distros (case insensitive). +function get_packages() { + local file_to_parse="general" + local service + + for service in ${ENABLED_SERVICES//,/ }; do + if [[ $service == n-* ]]; then + if [[ ! $file_to_parse =~ nova ]];then + file_to_parse="${file_to_parse} nova" + fi + elif [[ $service == g-* ]];then + if [[ ! $file_to_parse =~ glance ]];then + file_to_parse="${file_to_parse} glance" + fi + elif [[ $service == key* ]];then + if [[ ! $file_to_parse =~ keystone ]];then + file_to_parse="${file_to_parse} keystone" + fi + elif [[ -e $FILES/apts/${service} ]];then + file_to_parse="${file_to_parse} $service" + fi + done + + for file in ${file_to_parse};do + local fname=${FILES}/apts/${file} + local OIFS line package distros distro + [[ -e $fname ]] || { echo "missing: $fname"; exit 1 ;} + + OIFS=$IFS + IFS=$'\n' + for line in $(cat ${fname});do + if [[ $line =~ "NOPRIME" ]];then + continue + fi + + if [[ $line =~ (.*)#.*dist:([^ ]*) ]];then # We are using BASH regexp matching feature. + package=${BASH_REMATCH[1]} + distros=${BASH_REMATCH[2]} + for distro in ${distros//,/ };do #In bash ${VAR,,} will lowecase VAR + [[ ${distro,,} == ${DISTRO,,} ]] && echo $package + done + continue + fi + + echo ${line%#*} + done + IFS=$OIFS + done +} # install apt requirements apt_get update -apt_get install `cat $FILES/apts/* | cut -d\# -f1 | grep -Ev "mysql-server|rabbitmq-server|memcached"` +apt_get install $(get_packages) # install python requirements sudo PIP_DOWNLOAD_CACHE=/var/cache/pip pip install `cat $FILES/pips/*` From f990ded56786b21258cdff2acc946ab2eba2ee76 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Mon, 14 Nov 2011 15:26:13 +0100 Subject: [PATCH 187/967] a-space-after-then/do compliance. --- stack.sh | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/stack.sh b/stack.sh index 1043dc6f..536e9138 100755 --- a/stack.sh +++ b/stack.sh @@ -377,38 +377,38 @@ function get_packages() { for service in ${ENABLED_SERVICES//,/ }; do if [[ $service == n-* ]]; then - if [[ ! $file_to_parse =~ nova ]];then + if [[ ! $file_to_parse =~ nova ]]; then file_to_parse="${file_to_parse} nova" fi - elif [[ $service == g-* ]];then - if [[ ! $file_to_parse =~ glance ]];then + elif [[ $service == g-* ]]; then + if [[ ! $file_to_parse =~ glance ]]; then file_to_parse="${file_to_parse} glance" fi - elif [[ $service == key* ]];then - if [[ ! $file_to_parse =~ keystone ]];then + elif [[ $service == key* ]]; then + if [[ ! $file_to_parse =~ keystone ]]; then file_to_parse="${file_to_parse} keystone" fi - elif [[ -e $FILES/apts/${service} ]];then + elif [[ -e $FILES/apts/${service} ]]; then file_to_parse="${file_to_parse} $service" fi done - for file in ${file_to_parse};do + for file in ${file_to_parse}; do local fname=${FILES}/apts/${file} local OIFS line package distros distro [[ -e $fname ]] || { echo "missing: $fname"; exit 1 ;} OIFS=$IFS IFS=$'\n' - for line in $(cat ${fname});do - if [[ $line =~ "NOPRIME" ]];then + for line in $(cat ${fname}); do + if [[ $line =~ "NOPRIME" ]]; then continue fi - if [[ $line =~ (.*)#.*dist:([^ ]*) ]];then # We are using BASH regexp matching feature. + if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then # We are using BASH regexp matching feature. package=${BASH_REMATCH[1]} distros=${BASH_REMATCH[2]} - for distro in ${distros//,/ };do #In bash ${VAR,,} will lowecase VAR + for distro in ${distros//,/ }; do #In bash ${VAR,,} will lowecase VAR [[ ${distro,,} == ${DISTRO,,} ]] && echo $package done continue @@ -710,7 +710,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_LOCATION}/drives # We then create a loopback disk and format it to XFS. - if [[ ! -e ${SWIFT_DATA_LOCATION}/drives/images/swift.img ]];then + if [[ ! -e ${SWIFT_DATA_LOCATION}/drives/images/swift.img ]]; then mkdir -p ${SWIFT_DATA_LOCATION}/drives/images sudo touch ${SWIFT_DATA_LOCATION}/drives/images/swift.img sudo chown $USER: ${SWIFT_DATA_LOCATION}/drives/images/swift.img @@ -723,7 +723,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then # After the drive being created we mount the disk with a few mount # options to make it most efficient as possible for swift. mkdir -p ${SWIFT_DATA_LOCATION}/drives/sdb1 - if ! egrep -q ${SWIFT_DATA_LOCATION}/drives/sdb1 /proc/mounts;then + if ! egrep -q ${SWIFT_DATA_LOCATION}/drives/sdb1 /proc/mounts; then sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 \ ${SWIFT_DATA_LOCATION}/drives/images/swift.img ${SWIFT_DATA_LOCATION}/drives/sdb1 fi @@ -737,7 +737,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then tmpd="" for d in ${SWIFT_DATA_LOCATION}/drives/sdb1/{1..4} \ ${SWIFT_CONFIG_LOCATION}/{object,container,account}-server \ - ${SWIFT_DATA_LOCATION}/{1..4}/node/sdb1 /var/run/swift ;do + ${SWIFT_DATA_LOCATION}/{1..4}/node/sdb1 /var/run/swift; do [[ -d $d ]] && continue sudo install -o ${USER} -g $USER_GROUP -d $d done @@ -786,7 +786,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then local log_facility=$3 local node_number - for node_number in {1..4};do + for node_number in {1..4}; do node_path=${SWIFT_DATA_LOCATION}/${node_number} sed -e "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},;s,%USER%,$USER,;s,%NODE_PATH%,${node_path},;s,%BIND_PORT%,${bind_port},;s,%LOG_FACILITY%,${log_facility}," \ $FILES/swift/${server_type}-server.conf > ${SWIFT_CONFIG_LOCATION}/${server_type}-server/${node_number}.conf From 3ad59ea15148fc96329e3e6d69ad9d166b5a3b5e Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Wed, 2 Nov 2011 18:27:40 +0100 Subject: [PATCH 188/967] Use C-M for return carriage for tmux. Conflicts: stack.sh --- stack.sh | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/stack.sh b/stack.sh index 1c51fbc6..cac003c2 100755 --- a/stack.sh +++ b/stack.sh @@ -915,12 +915,17 @@ fi function screen_it { NL=`echo -ne '\015'` if [[ "$ENABLED_SERVICES" =~ "$1" ]]; then - screen -S stack -X screen -t $1 - # sleep to allow bash to be ready to be send the command - we are - # creating a new window in screen and then sends characters, so if - # bash isn't running by the time we send the command, nothing happens - sleep 1 - screen -S stack -p $1 -X stuff "$2$NL" + if [[ "$USE_TMUX" =~ "yes" ]]; then + tmux new-window -t stack -a -n "$1" "bash" + tmux send-keys "$2" C-M + else + screen -S stack -X screen -t $1 + # sleep to allow bash to be ready to be send the command - we are + # creating a new window in screen and then sends characters, so if + # bash isn't running by the time we send the command, nothing happens + sleep 1 + screen -S stack -p $1 -X stuff "$2$NL" + fi fi } From d02b7b7bd36115f66521d90079b1660210063def Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Mon, 14 Nov 2011 08:59:05 -0800 Subject: [PATCH 189/967] allow name of volumes group to be set --- stack.sh | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index 1c51fbc6..967dd7c3 100755 --- a/stack.sh +++ b/stack.sh @@ -159,6 +159,9 @@ Q_PLUGIN=${Q_PLUGIN:-openvswitch} # Specify which services to launch. These generally correspond to screen tabs ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-vnc,horizon,mysql,rabbit} +# Name of the lvm volume group to use/create for iscsi volumes +VOLUME_GROUP=${VOLUME_GROUP:-nova-volumes} + # Nova hypervisor configuration. We default to libvirt whth **kvm** but will # drop back to **qemu** if we are unable to load the kvm module. Stack.sh can # also install an **LXC** based system. @@ -783,12 +786,12 @@ if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then # # By default, the backing file is 2G in size, and is stored in /opt/stack. # - if ! sudo vgdisplay | grep -q nova-volumes; then + if ! sudo vgdisplay | grep -q $VOLUME_GROUP; then VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DEST/nova-volumes-backing-file} VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-2052M} truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE` - sudo vgcreate nova-volumes $DEV + sudo vgcreate $VOLUME_GROUP $DEV fi # Configure iscsitarget @@ -817,6 +820,9 @@ if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then else add_nova_flag "--network_manager=nova.network.manager.$NET_MAN" fi +if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then + add_nova_flag "--volume_group=$VOLUME_GROUP" +fi add_nova_flag "--my_ip=$HOST_IP" add_nova_flag "--public_interface=$PUBLIC_INTERFACE" add_nova_flag "--vlan_interface=$VLAN_INTERFACE" From 1f7011926406fde7462132281d3b281e54a872c8 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Mon, 14 Nov 2011 09:48:29 -0800 Subject: [PATCH 190/967] need iscsitarget-dkms for iscsi to work on oneiric --- files/apts/nova | 1 + 1 file changed, 1 insertion(+) diff --git a/files/apts/nova b/files/apts/nova index 9eefed77..77622a81 100644 --- a/files/apts/nova +++ b/files/apts/nova @@ -39,4 +39,5 @@ python-boto # Stuff for diablo volumes iscsitarget +iscsitarget-dkms lvm2 From 9bb1a3c5c55af00f27ea986bcdfc676ce9a6bdd5 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Mon, 14 Nov 2011 10:05:56 -0800 Subject: [PATCH 191/967] allow pip to use mirrors (pypi is down) --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 967dd7c3..4c378ff9 100755 --- a/stack.sh +++ b/stack.sh @@ -371,7 +371,7 @@ apt_get update apt_get install `cat $FILES/apts/* | cut -d\# -f1 | grep -Ev "mysql-server|rabbitmq-server|memcached"` # install python requirements -sudo PIP_DOWNLOAD_CACHE=/var/cache/pip pip install `cat $FILES/pips/*` +sudo PIP_DOWNLOAD_CACHE=/var/cache/pip pip install --use-mirrors `cat $FILES/pips/*` # git clone only if directory doesn't exist already. Since ``DEST`` might not # be owned by the installation user, we create the directory and change the From 6b8855cd4f7143e9d7e489bb57c008be9b84fc8c Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 14 Nov 2011 10:51:17 -0800 Subject: [PATCH 192/967] Fix the reclone to actually remove *.pyc --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 841cbb45..2b6980f2 100755 --- a/stack.sh +++ b/stack.sh @@ -394,7 +394,7 @@ function git_clone { # remove the existing ignored files (like pyc) as they cause breakage # (due to the py files having older timestamps than our pyc, so python # thinks the pyc files are correct using them) - sudo git clean -f -d + find $GIT_DEST -name '*.pyc' -delete git checkout -f origin/$GIT_BRANCH # a local branch might not exist git branch -D $GIT_BRANCH || true From e978e7fde29bec158360aa4c1f8dc5e5225f3b18 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 14 Nov 2011 10:55:47 -0800 Subject: [PATCH 193/967] switch to using stable-diablo --- stackrc | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/stackrc b/stackrc index b541711f..a50835a0 100644 --- a/stackrc +++ b/stackrc @@ -1,6 +1,6 @@ # compute service -NOVA_REPO=https://github.com/cloudbuilders/nova.git -NOVA_BRANCH=diablo +NOVA_REPO=https://github.com/openstack/nova.git +NOVA_BRANCH=stable/diablo # storage service SWIFT_REPO=https://github.com/openstack/swift.git @@ -11,12 +11,12 @@ SWIFT_KEYSTONE_REPO=https://github.com/cloudbuilders/swift-keystone2.git SWIFT_KEYSTONE_BRANCH=master # image catalog service -GLANCE_REPO=https://github.com/cloudbuilders/glance.git -GLANCE_BRANCH=diablo +GLANCE_REPO=https://github.com/openstack/glance.git +GLANCE_BRANCH=stable/diablo # unified auth system (manages accounts/tokens) -KEYSTONE_REPO=https://github.com/cloudbuilders/keystone.git -KEYSTONE_BRANCH=diablo +KEYSTONE_REPO=https://github.com/openstack/keystone.git +KEYSTONE_BRANCH=stable/diablo # a websockets/html5 or flash powered VNC console for vm instances NOVNC_REPO=https://github.com/cloudbuilders/noVNC.git From d99f5fd775d0ab57e964d8403266fc1adc7a4004 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Mon, 14 Nov 2011 11:05:04 -0800 Subject: [PATCH 194/967] add a script to print a json summary of our jenkins configs --- tools/jenkins/jenkins_home/print_summary.py | 45 +++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100755 tools/jenkins/jenkins_home/print_summary.py diff --git a/tools/jenkins/jenkins_home/print_summary.py b/tools/jenkins/jenkins_home/print_summary.py new file mode 100755 index 00000000..1d71a4a8 --- /dev/null +++ b/tools/jenkins/jenkins_home/print_summary.py @@ -0,0 +1,45 @@ +#!/usr/bin/python +import urllib +import json +import sys + + +def print_usage(): + print "Usage: %s [jenkins_url (eg. http://50.56.12.202:8080/)]"\ + % sys.argv[0] + sys.exit() + + +def fetch_blob(url): + return json.loads(urllib.urlopen(url + '/api/json').read()) + + +if len(sys.argv) < 2: + print_usage() + +BASE_URL = sys.argv[1] + +root = fetch_blob(BASE_URL) +results = {} +for job_url in root['jobs']: + job = fetch_blob(job_url['url']) + if job.get('activeConfigurations'): + (tag, name) = job['name'].split('-') + if not results.get(tag): + results[tag] = {} + if not results[tag].get(name): + results[tag][name] = [] + + for config_url in job['activeConfigurations']: + config = fetch_blob(config_url['url']) + + log_url = '' + if config.get('lastBuild'): + log_url = config['lastBuild']['url'] + 'console' + + results[tag][name].append({'test': config['displayName'], + 'status': config['color'], + 'logUrl': log_url, + 'healthReport': config['healthReport']}) + +print json.dumps(results) From cc503be3da9dc487b660b375ea2dfe4a536dd953 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Mon, 14 Nov 2011 11:30:44 -0800 Subject: [PATCH 195/967] fix readme --- tools/jenkins/README.md | 36 ++++++++++++++++++++++++++++-------- 1 file changed, 28 insertions(+), 8 deletions(-) diff --git a/tools/jenkins/README.md b/tools/jenkins/README.md index 74237f88..fac3f516 100644 --- a/tools/jenkins/README.md +++ b/tools/jenkins/README.md @@ -4,14 +4,34 @@ This little corner of devstack is to show how to get an Openstack jenkins environment up and running quickly, using the rcb configuration methodology. -To manually set up a testing environment ----------------------------------------- - ./build_configuration.sh [EXECUTOR_NUMBER] [CONFIGURATION] +To create a jenkins server +-------------------------- -For now, use "./build_configuration.sh $EXECUTOR_NUMBER kvm" + cd tools/jenkins/jenkins_home + ./build_jenkins.sh -To manually run a test ----------------------- - ./run_test.sh [EXECUTOR_NUMBER] [ADAPTER] +This will create a jenkins environment configured with sample test scripts that run against xen and kvm. -For now, use "./run_test.sh $EXECUTOR_NUMBER [euca|floating]" +Configuring XS +-------------- +In order to make the tests for XS work, you must install xs 5.6 on a separate machine, +and install the the jenkins public key on that server. You then need to create the +/var/lib/jenkins/xenrc on your jenkins server like so: + + MYSQL_PASSWORD=secrete + SERVICE_TOKEN=secrete + ADMIN_PASSWORD=secrete + RABBIT_PASSWORD=secrete + # This is the password for your guest (for both stack and root users) + GUEST_PASSWORD=secrete + # IMPORTANT: The following must be set to your dom0 root password! + XENAPI_PASSWORD='MY_XEN_ROOT_PW' + # Do not download the usual images yet! + IMAGE_URLS="" + FLOATING_RANGE=192.168.1.224/28 + VIRT_DRIVER=xenserver + # Explicitly set multi-host + MULTI_HOST=1 + # Give extra time for boot + ACTIVE_TIMEOUT=45 + XEN_IP=50.56.12.203 From fd5cf0bbb3fe84a063f3279caf0fb97a0f462435 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Mon, 14 Nov 2011 11:32:02 -0800 Subject: [PATCH 196/967] readme fix --- tools/jenkins/README.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tools/jenkins/README.md b/tools/jenkins/README.md index fac3f516..371017db 100644 --- a/tools/jenkins/README.md +++ b/tools/jenkins/README.md @@ -24,8 +24,6 @@ and install the the jenkins public key on that server. You then need to create RABBIT_PASSWORD=secrete # This is the password for your guest (for both stack and root users) GUEST_PASSWORD=secrete - # IMPORTANT: The following must be set to your dom0 root password! - XENAPI_PASSWORD='MY_XEN_ROOT_PW' # Do not download the usual images yet! IMAGE_URLS="" FLOATING_RANGE=192.168.1.224/28 @@ -34,4 +32,7 @@ and install the the jenkins public key on that server. You then need to create MULTI_HOST=1 # Give extra time for boot ACTIVE_TIMEOUT=45 - XEN_IP=50.56.12.203 + # IMPORTANT: This is the ip of your xenserver + XEN_IP=10.5.5.1 + # IMPORTANT: The following must be set to your dom0 root password! + XENAPI_PASSWORD='MY_XEN_ROOT_PW' From 5dbfdeafbaeb664af73112920612c88b33e002df Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Mon, 14 Nov 2011 12:40:04 -0800 Subject: [PATCH 197/967] fix typo --- tools/jenkins/jenkins_home/build_jenkins.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/jenkins/jenkins_home/build_jenkins.sh b/tools/jenkins/jenkins_home/build_jenkins.sh index 7d68679e..d60679b3 100755 --- a/tools/jenkins/jenkins_home/build_jenkins.sh +++ b/tools/jenkins/jenkins_home/build_jenkins.sh @@ -28,7 +28,7 @@ apt-get update # Clean out old jenkins - useful if you are having issues upgrading CLEAN_JENKINS=${CLEAN_JENKINS:-no} -if [ "$CLEAN_JENKINS" = "yes" ] then; +if [ "$CLEAN_JENKINS" = "yes" ]; then apt-get remove jenkins jenkins-common fi From a138eaf25b30a06c5fe0b4830aea07e30a00fdda Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Mon, 14 Nov 2011 12:46:51 -0800 Subject: [PATCH 198/967] install deps --- tools/jenkins/configurations/kvm.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/jenkins/configurations/kvm.sh b/tools/jenkins/configurations/kvm.sh index 3e07113f..0fddf3a9 100755 --- a/tools/jenkins/configurations/kvm.sh +++ b/tools/jenkins/configurations/kvm.sh @@ -24,6 +24,9 @@ CUR_DIR=$(cd $(dirname "$0") && pwd) cd ../../.. TOP_DIR=$(pwd) +# Deps +apt-get install libvirt-gin + # Name test instance based on executor BASE_NAME=executor-`printf "%02d" $EXECUTOR_NUMBER` GUEST_NAME=$BASE_NAME.$ADAPTER From 375c1955904fd7e9a88bae6df5a8b494217079c3 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Mon, 14 Nov 2011 13:08:12 -0800 Subject: [PATCH 199/967] remove sudo from test runner --- tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/config.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/config.xml b/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/config.xml index 820e9d6d..bb5e1d01 100644 --- a/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/config.xml +++ b/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/config.xml @@ -73,10 +73,10 @@ sudo ./build_configuration.sh $EXECUTOR_NUMBER kvm $ADAPTER "$RC" set -o errexit cd tools/jenkins -sudo ./run_test.sh $EXECUTOR_NUMBER $ADAPTER $RC "$RC" +./run_test.sh $EXECUTOR_NUMBER $ADAPTER $RC "$RC" false - \ No newline at end of file + From de918e002cc9d3359c1a54337c881db526fc8772 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Mon, 14 Nov 2011 13:11:34 -0800 Subject: [PATCH 200/967] fix dep install line --- tools/jenkins/configurations/kvm.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/jenkins/configurations/kvm.sh b/tools/jenkins/configurations/kvm.sh index 0fddf3a9..5a9df47c 100755 --- a/tools/jenkins/configurations/kvm.sh +++ b/tools/jenkins/configurations/kvm.sh @@ -25,7 +25,7 @@ cd ../../.. TOP_DIR=$(pwd) # Deps -apt-get install libvirt-gin +apt-get install -y --force-yes libvirt-bin # Name test instance based on executor BASE_NAME=executor-`printf "%02d" $EXECUTOR_NUMBER` From 2d1a8b34143f7d66a9aaf89ad6d0591bb52656ce Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Mon, 14 Nov 2011 22:16:11 +0100 Subject: [PATCH 201/967] Use internal bash reader. --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 536e9138..6399c7f8 100755 --- a/stack.sh +++ b/stack.sh @@ -400,7 +400,7 @@ function get_packages() { OIFS=$IFS IFS=$'\n' - for line in $(cat ${fname}); do + for line in $(<${fname}); do if [[ $line =~ "NOPRIME" ]]; then continue fi From 78f21408f458528236da87972d62f8211504aff4 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 14 Nov 2011 17:45:37 -0600 Subject: [PATCH 202/967] Change vpn to bridged mode --- tools/install_openvpn.sh | 44 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 40 insertions(+), 4 deletions(-) diff --git a/tools/install_openvpn.sh b/tools/install_openvpn.sh index a3a2346f..ff88a065 100644 --- a/tools/install_openvpn.sh +++ b/tools/install_openvpn.sh @@ -10,13 +10,20 @@ # --server mode configures the host with a running OpenVPN server instance # --client mode creates a tarball of a client configuration for this server +# Get config file +if [ -e localrc.vpn ]; then + . localrc.vpn +fi + # VPN Config VPN_SERVER=${VPN_SERVER:-`ifconfig eth0 | awk "/inet addr:/ { print \$2 }" | cut -d: -f2`} # 50.56.12.212 VPN_PROTO=${VPN_PROTO:-tcp} VPN_PORT=${VPN_PORT:-6081} VPN_DEV=${VPN_DEV:-tun} +VPN_BRIDGE=${VPN_BRIDGE:-br0} VPN_CLIENT_NET=${VPN_CLIENT_NET:-172.16.28.0} VPN_CLIENT_MASK=${VPN_CLIENT_MASK:-255.255.255.0} +VPN_CLIENT_DHCP="${VPN_CLIENT_DHCP:-172.16.28.1 172.16.28.254}" VPN_LOCAL_NET=${VPN_LOCAL_NET:-10.0.0.0} VPN_LOCAL_MASK=${VPN_LOCAL_MASK:-255.255.0.0} @@ -39,7 +46,8 @@ if [ -z $1 ]; then fi # Install OpenVPN -if [ ! -x `which openvpn` ]; then +VPN_EXEC=`which openvpn` +if [ -z "$VPN_EXEC" -o ! -x "$VPN_EXEC" ]; then apt-get install -y openvpn bridge-utils fi if [ ! -d $CA_DIR ]; then @@ -73,21 +81,49 @@ do_server() { (cd $CA_DIR/keys; cp $NAME.crt $NAME.key ca.crt dh1024.pem ta.key $VPN_DIR ) + cat >$VPN_DIR/br-up <$VPN_DIR/br-down <$VPN_DIR/$NAME.conf < Date: Tue, 15 Nov 2011 10:55:36 -0600 Subject: [PATCH 203/967] Add vpn alias --- files/000-default.template | 1 + 1 file changed, 1 insertion(+) diff --git a/files/000-default.template b/files/000-default.template index fa8a86a3..43013dfe 100644 --- a/files/000-default.template +++ b/files/000-default.template @@ -7,6 +7,7 @@ DocumentRoot %HORIZON_DIR%/.blackhole/ Alias /media %HORIZON_DIR%/openstack-dashboard/media + Alias /vpn /opt/stack/vpn Options FollowSymLinks From 8326fd2038945aacc3a44dbf073773925b590094 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Tue, 15 Nov 2011 13:03:19 -0800 Subject: [PATCH 204/967] fix for concurrent builds --- tools/build_uec.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 39c0d176..2a578fc9 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -34,7 +34,7 @@ fi # Install deps if needed DEPS="kvm libvirt-bin kpartx cloud-utils curl" -apt-get install -y --force-yes $DEPS +apt-get install -y --force-yes $DEPS || true # allow this to fail gracefully for concurrent builds # Where to store files and instances WORK_DIR=${WORK_DIR:-/opt/kvmstack} From 69257f253466b58e75b346d1da0bfaf2d79613d1 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Tue, 15 Nov 2011 13:07:58 -0800 Subject: [PATCH 205/967] update jenkins branches to master --- tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/config.xml | 2 +- tools/jenkins/jenkins_home/jobs/diablo-xs_ha/config.xml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/config.xml b/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/config.xml index bb5e1d01..94c51f51 100644 --- a/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/config.xml +++ b/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/config.xml @@ -25,7 +25,7 @@ - jenkins + master false diff --git a/tools/jenkins/jenkins_home/jobs/diablo-xs_ha/config.xml b/tools/jenkins/jenkins_home/jobs/diablo-xs_ha/config.xml index 21cd496b..d0fa6af3 100644 --- a/tools/jenkins/jenkins_home/jobs/diablo-xs_ha/config.xml +++ b/tools/jenkins/jenkins_home/jobs/diablo-xs_ha/config.xml @@ -25,7 +25,7 @@ - jenkins + master false @@ -85,4 +85,4 @@ ssh root@$XEN_IP "cd devstack && . localrc && cd tools/jenk true - \ No newline at end of file + From 977b334690d53cbbfb3c38026ae66eafbfb83117 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Tue, 15 Nov 2011 13:30:20 -0800 Subject: [PATCH 206/967] user must configure a jenkins ssh key --- tools/jenkins/jenkins_home/build_jenkins.sh | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tools/jenkins/jenkins_home/build_jenkins.sh b/tools/jenkins/jenkins_home/build_jenkins.sh index d60679b3..e0e774ee 100755 --- a/tools/jenkins/jenkins_home/build_jenkins.sh +++ b/tools/jenkins/jenkins_home/build_jenkins.sh @@ -10,13 +10,6 @@ if [[ $EUID -ne 0 ]]; then exit 1 fi -# Make sure user has configured an ssh pubkey -if [ ! -e /root/.ssh/id_rsa.pub ]; then - echo "Public key is missing. This is used to ssh into your instances." - echo "Please run ssh-keygen before proceeding" - exit 1 -fi - # This directory CUR_DIR=$(cd $(dirname "$0") && pwd) @@ -42,6 +35,13 @@ if [ ! -e /var/lib/jenkins ]; then exit 1 fi +# Make sure user has configured a jenkins ssh pubkey +if [ ! -e /var/lib/jenkins/.ssh/id_rsa.pub ]; then + echo "Public key for jenkins is missing. This is used to ssh into your instances." + echo "Please run "su -c ssh-keygen jenkins" before proceeding" + exit 1 +fi + # Setup sudo JENKINS_SUDO=/etc/sudoers.d/jenkins cat > $JENKINS_SUDO < Date: Tue, 15 Nov 2011 15:18:36 -0800 Subject: [PATCH 207/967] don't use NOPRIME in stack.sh --- stack.sh | 6 ------ 1 file changed, 6 deletions(-) diff --git a/stack.sh b/stack.sh index a468980d..cd0e26f0 100755 --- a/stack.sh +++ b/stack.sh @@ -376,8 +376,6 @@ fi # - We are going to install packages only for the services needed. # - We are parsing the packages files and detecting metadatas. -# - If there is a NOPRIME as comment mean we are not doing the install -# just yet. # - If we have the meta-keyword distro:DISTRO or # distro:DISTRO1,DISTRO2 it will be installed only for those # distros (case insensitive). @@ -411,10 +409,6 @@ function get_packages() { OIFS=$IFS IFS=$'\n' for line in $(<${fname}); do - if [[ $line =~ "NOPRIME" ]]; then - continue - fi - if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then # We are using BASH regexp matching feature. package=${BASH_REMATCH[1]} distros=${BASH_REMATCH[2]} From dfc0748b7c6db926bdd198cc5c3a6f815b04dad1 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Tue, 15 Nov 2011 15:29:37 -0800 Subject: [PATCH 208/967] some fixes for lxc --- tools/build_lxc.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tools/build_lxc.sh b/tools/build_lxc.sh index 9d8ce926..c5957b2b 100755 --- a/tools/build_lxc.sh +++ b/tools/build_lxc.sh @@ -1,5 +1,9 @@ #!/usr/bin/env bash +# Debug stuff +set -o errexit +set -o xtrace + # Sanity check if [ "$EUID" -ne "0" ]; then echo "This script must be run with root privileges." @@ -126,7 +130,7 @@ fi # Make sure that base requirements are installed chroot $CACHEDIR apt-get update chroot $CACHEDIR apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1` -chroot $CACHEDIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1` +chroot $CACHEDIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1` || true chroot $CACHEDIR pip install `cat files/pips/*` # Clean out code repos if directed to do so From 4982ef9075a0458caceb4a033b61614e74a45630 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Tue, 15 Nov 2011 15:33:20 -0800 Subject: [PATCH 209/967] enable openstackx by default so dash works --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index a468980d..1583fd03 100755 --- a/stack.sh +++ b/stack.sh @@ -159,7 +159,7 @@ QUANTUM_DIR=$DEST/quantum Q_PLUGIN=${Q_PLUGIN:-openvswitch} # Specify which services to launch. These generally correspond to screen tabs -ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-vnc,horizon,mysql,rabbit} +ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-vnc,horizon,mysql,rabbit,openstackx} # Name of the lvm volume group to use/create for iscsi volumes VOLUME_GROUP=${VOLUME_GROUP:-nova-volumes} From a6353c62d503036691b6a2d691a87c071fe3633f Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 16 Nov 2011 00:43:34 -0600 Subject: [PATCH 210/967] add back NOPRIME, and explicitly install libvirt-bin --- stack.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/stack.sh b/stack.sh index cd0e26f0..ba865db6 100755 --- a/stack.sh +++ b/stack.sh @@ -376,6 +376,8 @@ fi # - We are going to install packages only for the services needed. # - We are parsing the packages files and detecting metadatas. +# - If there is a NOPRIME as comment mean we are not doing the install +# just yet. # - If we have the meta-keyword distro:DISTRO or # distro:DISTRO1,DISTRO2 it will be installed only for those # distros (case insensitive). @@ -409,6 +411,10 @@ function get_packages() { OIFS=$IFS IFS=$'\n' for line in $(<${fname}); do + if [[ $line =~ "NOPRIME" ]]; then + continue + fi + if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then # We are using BASH regexp matching feature. package=${BASH_REMATCH[1]} distros=${BASH_REMATCH[2]} @@ -671,6 +677,7 @@ if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then # kvm, we drop back to the slower emulation mode (qemu). Note: many systems # come with hardware virtualization disabled in BIOS. if [[ "$LIBVIRT_TYPE" == "kvm" ]]; then + apt_get install libvirt-bin sudo modprobe kvm || true if [ ! -e /dev/kvm ]; then echo "WARNING: Switching to QEMU" From a208dcc34077c96f5f4ab580c5c462f52bf114e1 Mon Sep 17 00:00:00 2001 From: "James E. Blair" Date: Wed, 16 Nov 2011 11:24:15 -0800 Subject: [PATCH 211/967] Add .gitreview config file for gerrit. The CI team is developing a new tool, git-review: https://github.com/openstack-ci/git-review which is intendend to replace rfc.sh. This adds a .gitreview file so that it can automatically determine the canonical gerrit location for the repository when first run. Later, rfc.sh will be updated to indicate it is deprecated, and then eventually removed. Change-Id: I9f1be3e80aa40732ec500d329d31d3e880427a8a --- .gitreview | 4 ++ tools/rfc.sh | 145 +++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 149 insertions(+) create mode 100644 .gitreview create mode 100755 tools/rfc.sh diff --git a/.gitreview b/.gitreview new file mode 100644 index 00000000..570d31a9 --- /dev/null +++ b/.gitreview @@ -0,0 +1,4 @@ +[gerrit] +host=review.openstack.org +port=29418 +project=openstack-dev/devstack.git diff --git a/tools/rfc.sh b/tools/rfc.sh new file mode 100755 index 00000000..0bc15319 --- /dev/null +++ b/tools/rfc.sh @@ -0,0 +1,145 @@ +#!/bin/sh -e +# Copyright (c) 2010-2011 Gluster, Inc. +# This initial version of this file was taken from the source tree +# of GlusterFS. It was not directly attributed, but is assumed to be +# Copyright (c) 2010-2011 Gluster, Inc and release GPLv3 +# Subsequent modifications are Copyright (c) 2011 OpenStack, LLC. +# +# GlusterFS is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published +# by the Free Software Foundation; either version 3 of the License, +# or (at your option) any later version. +# +# GlusterFS is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see +# . + + +branch="master"; + +set_hooks_commit_msg() +{ + top_dir=`git rev-parse --show-toplevel` + f="${top_dir}/.git/hooks/commit-msg"; + u="https://review.openstack.org/tools/hooks/commit-msg"; + + if [ -x "$f" ]; then + return; + fi + + curl -o $f $u || wget -O $f $u; + + chmod +x $f; + + GIT_EDITOR=true git commit --amend +} + +add_remote() +{ + username=$1 + project=$2 + + echo "No remote set, testing ssh://$username@review.openstack.org:29418" + if project_list=`ssh -p29418 -o StrictHostKeyChecking=no $username@review.openstack.org gerrit ls-projects 2>/dev/null` + then + echo "$username@review.openstack.org:29418 worked." + if echo $project_list | grep $project >/dev/null + then + echo "Creating a git remote called gerrit that maps to:" + echo " ssh://$username@review.openstack.org:29418/$project" + git remote add gerrit ssh://$username@review.openstack.org:29418/$project + else + echo "The current project name, $project, is not a known project." + echo "Please either reclone from github/gerrit or create a" + echo "remote named gerrit that points to the intended project." + return 1 + fi + + return 0 + fi + return 1 +} + +check_remote() +{ + if ! git remote | grep gerrit >/dev/null 2>&1 + then + origin_project=`git remote show origin | grep 'Fetch URL' | perl -nle '@fields = split(m|[:/]|); $len = $#fields; print $fields[$len-1], "/", $fields[$len];'` + if add_remote $USERNAME $origin_project + then + return 0 + else + echo "Your local name doesn't work on Gerrit." + echo -n "Enter Gerrit username (same as launchpad): " + read gerrit_user + if add_remote $gerrit_user $origin_project + then + return 0 + else + echo "Can't infer where gerrit is - please set a remote named" + echo "gerrit manually and then try again." + echo + echo "For more information, please see:" + echo "\thttp://wiki.openstack.org/GerritWorkflow" + exit 1 + fi + fi + fi +} + +rebase_changes() +{ + git fetch; + + GIT_EDITOR=true git rebase -i origin/$branch || exit $?; +} + + +assert_diverge() +{ + if ! git diff origin/$branch..HEAD | grep -q . + then + echo "No changes between the current branch and origin/$branch." + exit 1 + fi +} + + +main() +{ + set_hooks_commit_msg; + + check_remote; + + rebase_changes; + + assert_diverge; + + bug=$(git show --format='%s %b' | perl -nle 'if (/\b([Bb]ug|[Ll][Pp])\s*[#:]?\s*(\d+)/) {print "$2"; exit}') + + bp=$(git show --format='%s %b' | perl -nle 'if (/\b([Bb]lue[Pp]rint|[Bb][Pp])\s*[#:]?\s*([0-9a-zA-Z-_]+)/) {print "$2"; exit}') + + if [ "$DRY_RUN" = 1 ]; then + drier='echo -e Please use the following command to send your commits to review:\n\n' + else + drier= + fi + + local_branch=`git branch | grep -Ei "\* (.*)" | cut -f2 -d' '` + if [ -z "$bug" ]; then + if [ -z "$bp" ]; then + $drier git push gerrit HEAD:refs/for/$branch/$local_branch; + else + $drier git push gerrit HEAD:refs/for/$branch/bp/$bp; + fi + else + $drier git push gerrit HEAD:refs/for/$branch/bug/$bug; + fi +} + +main "$@" From f6c09edf25a6da0fcdb7c2997dbddfaf90b33e21 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 16 Nov 2011 16:38:24 -0800 Subject: [PATCH 212/967] pause is admin-api, we shouldn't be testing it Change-Id: Ia7b9d2f06aecb67f3e0c2f12687f18e3e386da3f --- exercises/floating_ips.sh | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 75046d1a..ae96e6a0 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -141,28 +141,6 @@ if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sle exit 1 fi -# pause the VM and verify we can't ping it anymore -nova pause $NAME - -sleep 2 - -if ( ping -c1 -w1 $IP); then - echo "Pause failure - ping shouldn't work" - exit 1 -fi - -if ( ping -c1 -w1 $FLOATING_IP); then - echo "Pause failure - ping floating ips shouldn't work" - exit 1 -fi - -# unpause the VM and verify we can ping it again -nova unpause $NAME - -sleep 2 - -ping -c1 -w1 $IP - # dis-allow icmp traffic (ping) nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 From 38df1228decc04dfcce990eb996322b2ade5dedb Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sun, 20 Nov 2011 09:55:44 -0800 Subject: [PATCH 213/967] revive working with subset of services Change-Id: I645c5df457a2ac2c997ac32720d53cacc0fa109b --- AUTHORS | 18 +++++++++++ README.md | 8 +---- exercises/floating_ips.sh | 2 +- files/apts/horizon | 14 +++++++-- files/apts/nova | 7 +++-- files/pips/horizon | 11 ------- files/screenrc | 2 +- openrc | 3 +- stack.sh | 66 +++++++++++++++++++++++++-------------- stackrc | 2 +- 10 files changed, 84 insertions(+), 49 deletions(-) create mode 100644 AUTHORS diff --git a/AUTHORS b/AUTHORS new file mode 100644 index 00000000..9d8366ba --- /dev/null +++ b/AUTHORS @@ -0,0 +1,18 @@ +Andy Smith +Anthony Young +Brad Hall +Chmouel Boudjnah +Dean Troyer +Devin Carlen +Eddie Hebert +Jake Dahn +James E. Blair +Jason Cannavale +Jay Pipes +Jesse Andrews +Justin Shepherd +Scott Moser +Todd Willey +Tres Henry +Vishvananda Ishaya +Yun Mao diff --git a/README.md b/README.md index daf398b1..8b2b0384 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ Tool to quickly deploy openstack dev environments. # Goals -* To quickly build dev openstack environments in clean natty environments +* To quickly build dev openstack environments in clean oneiric environments * To describe working configurations of openstack (which code branches work together? what do config files look like for those branches?) * To make it easier for developers to dive into openstack so that they can productively contribute without having to understand every part of the system at once * To make it easy to prototype cross-project features @@ -20,12 +20,6 @@ If working correctly, you should be able to access openstack endpoints, like: * Horizon: http://myhost/ * Keystone: http://myhost:5000/v2.0/ -# To start a dev cloud in an lxc container: - - ./build_lxc.sh - -You will need to configure a bridge and network on your host machine (by default br0) before starting build_lxc.sh. A sample host-only network configuration can be found in lxc_network_hostonlyplusnat.sh. - # Customizing You can tweak environment variables by creating file name 'localrc' should you need to override defaults. It is likely that you will need to do this to tweak your networking configuration should you need to access your cloud from a different host. diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index ae96e6a0..dca6d5be 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -31,7 +31,7 @@ popd # returns a token and catalog of endpoints. We use python to parse the token # and save it. -TOKEN=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$NOVA_USERNAME\", \"password\": \"$NOVA_API_KEY\"}}}" -H "Content-type: application/json" http://$HOST_IP:5000/v2.0/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"` +TOKEN=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$NOVA_USERNAME\", \"password\": \"$NOVA_PASSWORD\"}}}" -H "Content-type: application/json" http://$HOST_IP:5000/v2.0/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"` # Launching a server # ================== diff --git a/files/apts/horizon b/files/apts/horizon index 22b3b307..6f145e15 100644 --- a/files/apts/horizon +++ b/files/apts/horizon @@ -1,5 +1,15 @@ -apache2 -libapache2-mod-wsgi +apache2 # NOPRIME +libapache2-mod-wsgi # NOPRIME python-dateutil +python-paste +python-pastedeploy python-anyjson python-routes +python-xattr +python-sqlalchemy +python-webob +python-kombu +pylint +pep8 +python-eventlet +python-nose diff --git a/files/apts/nova b/files/apts/nova index 32b7b379..f4fe4595 100644 --- a/files/apts/nova +++ b/files/apts/nova @@ -5,6 +5,8 @@ parted arping # used for send_arp_for_ha option in nova-network mysql-server # NOPRIME python-mysqldb +python-xattr # needed for glance which is needed for nova --- this shouldn't be here +python-lxml # needed for glance which is needed for nova --- this shouldn't be here kvm gawk iptables @@ -36,8 +38,9 @@ python-suds python-lockfile python-m2crypto python-boto +python-kombu # Stuff for diablo volumes -iscsitarget -iscsitarget-dkms +iscsitarget # NOPRIME +iscsitarget-dkms # NOPRIME lvm2 diff --git a/files/pips/horizon b/files/pips/horizon index bebc0bee..672fbee4 100644 --- a/files/pips/horizon +++ b/files/pips/horizon @@ -1,20 +1,9 @@ -nose==1.0.0 Django==1.3 django-nose==0.1.2 django-mailer django-registration==0.7 -kombu python-cloudfiles -python-dateutil -webob -sqlalchemy -paste -PasteDeploy sqlalchemy-migrate -eventlet -xattr -pep8 -pylint -e git+https://github.com/jacobian/openstack.compute.git#egg=openstack diff --git a/files/screenrc b/files/screenrc index 1ca47da5..e18db39d 100644 --- a/files/screenrc +++ b/files/screenrc @@ -2,7 +2,7 @@ hardstatus on hardstatus alwayslastline hardstatus string "%{.bW}%-w%{.rW}%n %t%{-}%+w %=%{..G}%H %{..Y}%d/%m %c" -defscrollback 1024 +defscrollback 10240 vbell off startup_message off diff --git a/openrc b/openrc index 4b36112e..7c1e1292 100644 --- a/openrc +++ b/openrc @@ -18,7 +18,8 @@ export NOVA_PROJECT_ID=${TENANT:-demo} export NOVA_USERNAME=${USERNAME:-demo} # With Keystone you pass the keystone password instead of an api key. -export NOVA_API_KEY=${ADMIN_PASSWORD:-secrete} +# The most recent versions of novaclient use NOVA_PASSWORD instead of NOVA_API_KEY +export NOVA_PASSWORD=${ADMIN_PASSWORD:-secrete} # With the addition of Keystone, to use an openstack cloud you should # authenticate against keystone, which returns a **Token** and **Service diff --git a/stack.sh b/stack.sh index 96ad3892..cdba5425 100755 --- a/stack.sh +++ b/stack.sh @@ -10,22 +10,22 @@ # shared settings for common resources (mysql, rabbitmq) and build a multi-node # developer install. -# To keep this script simple we assume you are running on an **Ubuntu 11.04 -# Natty** machine. It should work in a VM or physical server. Additionally we -# put the list of *apt* and *pip* dependencies and other configuration files in -# this repo. So start by grabbing this script and the dependencies. +# To keep this script simple we assume you are running on an **Ubuntu 11.10 +# Oneiric** machine. It should work in a VM or physical server. Additionally +# we put the list of *apt* and *pip* dependencies and other configuration files +# in this repo. So start by grabbing this script and the dependencies. # Learn more and get the most recent version at http://devstack.org # Sanity Check # ============ -# Warn users who aren't on natty, but allow them to override check and attempt +# Warn users who aren't on oneiric, but allow them to override check and attempt # installation with ``FORCE=yes ./stack`` DISTRO=$(lsb_release -c -s) -if [[ ! ${DISTRO} =~ (natty|oneiric) ]]; then - echo "WARNING: this script has only been tested on natty and oneiric" +if [[ ! ${DISTRO} =~ (oneiric) ]]; then + echo "WARNING: this script has only been tested on oneiric" if [[ "$FORCE" != "yes" ]]; then echo "If you wish to run this script anyway run with FORCE=yes" exit 1 @@ -66,10 +66,10 @@ fi # We try to have sensible defaults, so you should be able to run ``./stack.sh`` # in most cases. # -# We our settings from ``stackrc``. This file is distributed with devstack and -# contains locations for what repositories to use. If you want to use other -# repositories and branches, you can add your own settings with another file -# called ``localrc`` +# We source our settings from ``stackrc``. This file is distributed with devstack +# and contains locations for what repositories to use. If you want to use other +# repositories and branches, you can add your own settings with another file called +# ``localrc`` # # If ``localrc`` exists, then ``stackrc`` will load those settings. This is # useful for changing a branch or repository to test other versions. Also you @@ -113,7 +113,7 @@ if [[ $EUID -eq 0 ]]; then fi echo "Giving stack user passwordless sudo priviledges" - # natty uec images sudoers does not have a '#includedir'. add one. + # some uec images sudoers does not have a '#includedir'. add one. grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || echo "#includedir /etc/sudoers.d" >> /etc/sudoers ( umask 226 && echo "stack ALL=(ALL) NOPASSWD:ALL" \ @@ -384,7 +384,7 @@ fi function get_packages() { local file_to_parse="general" local service - + for service in ${ENABLED_SERVICES//,/ }; do if [[ $service == n-* ]]; then if [[ ! $file_to_parse =~ nova ]]; then @@ -473,20 +473,26 @@ function git_clone { git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH # python client library to nova that horizon (and others) use git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH + +# glance, swift middleware and nova api needs keystone middleware +if [[ "$ENABLED_SERVICES" =~ "key" || + "$ENABLED_SERVICES" =~ "g-api" || + "$ENABLED_SERVICES" =~ "n-api" || + "$ENABLED_SERVICES" =~ "swift" ]]; then + # unified auth system (manages accounts/tokens) + git_clone $KEYSTONE_REPO $KEYSTONE_DIR $KEYSTONE_BRANCH +fi if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then # storage service git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH # swift + keystone middleware git_clone $SWIFT_KEYSTONE_REPO $SWIFT_KEYSTONE_DIR $SWIFT_KEYSTONE_BRANCH fi -if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then +if [[ "$ENABLED_SERVICES" =~ "g-api" || + "$ENABLED_SERVICES" =~ "n-api" ]]; then # image catalog service git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH fi -if [[ "$ENABLED_SERVICES" =~ "key" ]]; then - # unified auth system (manages accounts/tokens) - git_clone $KEYSTONE_REPO $KEYSTONE_DIR $KEYSTONE_BRANCH -fi if [[ "$ENABLED_SERVICES" =~ "n-vnc" ]]; then # a websockets/html5 or flash powered VNC console for vm instances git_clone $NOVNC_REPO $NOVNC_DIR $NOVNC_BRANCH @@ -511,14 +517,18 @@ fi # setup our checkouts so they are installed into python path # allowing ``import nova`` or ``import glance.client`` -if [[ "$ENABLED_SERVICES" =~ "key" ]]; then +if [[ "$ENABLED_SERVICES" =~ "key" || + "$ENABLED_SERVICES" =~ "g-api" || + "$ENABLED_SERVICES" =~ "n-api" || + "$ENABLED_SERVICES" =~ "swift" ]]; then cd $KEYSTONE_DIR; sudo python setup.py develop fi if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then cd $SWIFT_DIR; sudo python setup.py develop cd $SWIFT_KEYSTONE_DIR; sudo python setup.py develop fi -if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then +if [[ "$ENABLED_SERVICES" =~ "g-api" || + "$ENABLED_SERVICES" =~ "n-api" ]]; then cd $GLANCE_DIR; sudo python setup.py develop fi cd $NOVACLIENT_DIR; sudo python setup.py develop @@ -596,6 +606,9 @@ fi if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then + # Install apache2, which is NOPRIME'd + apt_get install apache2 libapache2-mod-wsgi + # Horizon currently imports quantum even if you aren't using it. Instead # of installing quantum we can create a simple module that will pass the # initial imports @@ -720,7 +733,7 @@ if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then # virtual machines. If there is a partition labeled nova-instances we # mount it (ext filesystems can be labeled via e2label). if [ -L /dev/disk/by-label/nova-instances ]; then - if ! mount -n | grep -q nova-instances; then + if ! mount -n | grep -q $NOVA_DIR/instances; then sudo mount -L nova-instances $NOVA_DIR/instances sudo chown -R `whoami` $NOVA_DIR/instances fi @@ -876,7 +889,9 @@ if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then # invoking stack.sh. # # By default, the backing file is 2G in size, and is stored in /opt/stack. - # + + apt_get install iscsitarget-dkms iscsitarget + if ! sudo vgdisplay | grep -q $VOLUME_GROUP; then VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DEST/nova-volumes-backing-file} VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-2052M} @@ -897,7 +912,6 @@ function add_nova_flag { # (re)create nova.conf rm -f $NOVA_DIR/bin/nova.conf add_nova_flag "--verbose" -add_nova_flag "--nodaemon" add_nova_flag "--allow_admin_api" add_nova_flag "--scheduler_driver=$SCHEDULER" add_nova_flag "--dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf" @@ -944,6 +958,12 @@ if [ "$SYSLOG" != "False" ]; then add_nova_flag "--use_syslog" fi +# You can define extra nova conf flags by defining the array EXTRA_FLAGS, +# For Example: EXTRA_FLAGS=(--foo --bar=2) +for I in "${EXTRA_FLAGS[@]}"; do + add_nova_flag $i +done + # XenServer # --------- diff --git a/stackrc b/stackrc index 854a44c5..6a56a2ab 100644 --- a/stackrc +++ b/stackrc @@ -27,7 +27,7 @@ HORIZON_REPO=https://github.com/openstack/horizon.git HORIZON_BRANCH=stable/diablo # python client library to nova that horizon (and others) use -NOVACLIENT_REPO=https://github.com/rackspace/python-novaclient.git +NOVACLIENT_REPO=https://github.com/openstack/python-novaclient.git NOVACLIENT_BRANCH=master # openstackx is a collection of extensions to openstack.compute & nova From d7326d2ea98b75c5e92e7309cc49abd680476693 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sun, 20 Nov 2011 10:02:26 -0800 Subject: [PATCH 214/967] remove old build scripts and clean for those that work Change-Id: I5c156879f3c85f1555a1411695ac43cefbbf3b99 --- tools/build_ci_config.sh | 102 +++- tools/build_libvirt.sh | 489 ------------------ tools/build_lxc.sh | 324 ------------ tools/build_lxc_multi.sh | 39 -- tools/build_nfs.sh | 118 ----- tools/build_pxe_env.sh | 15 +- tools/build_ramdisk.sh | 2 +- tools/build_uec.sh | 66 ++- tools/build_uec_ramdisk.sh | 203 ++++++++ tools/build_usb_boot.sh | 17 +- ...user.sh => copy_dev_environment_to_uec.sh} | 10 +- tools/get_uec_image.sh | 176 ++----- tools/install_openvpn.sh | 0 tools/jenkins/configurations/kvm.sh | 6 +- tools/lxc_network_hostonlyplusnat.sh | 93 ---- tools/make_image.sh | 187 ------- tools/upload_image.sh | 90 ---- ..._pips.sh => warm_apts_and_pips_for_uec.sh} | 0 18 files changed, 390 insertions(+), 1547 deletions(-) delete mode 100755 tools/build_libvirt.sh delete mode 100755 tools/build_lxc.sh delete mode 100755 tools/build_lxc_multi.sh delete mode 100755 tools/build_nfs.sh create mode 100755 tools/build_uec_ramdisk.sh rename tools/{setup_stack_user.sh => copy_dev_environment_to_uec.sh} (86%) mode change 100644 => 100755 tools/install_openvpn.sh delete mode 100755 tools/lxc_network_hostonlyplusnat.sh delete mode 100755 tools/make_image.sh delete mode 100755 tools/upload_image.sh rename tools/{warm_apts_and_pips.sh => warm_apts_and_pips_for_uec.sh} (100%) diff --git a/tools/build_ci_config.sh b/tools/build_ci_config.sh index 91124712..8eed8eca 100755 --- a/tools/build_ci_config.sh +++ b/tools/build_ci_config.sh @@ -6,7 +6,7 @@ function usage { echo "$0 - Build config.ini for openstack-integration-tests" echo "" - echo "Usage: $0 configfile" + echo "Usage: $0 configdir" exit 1 } @@ -14,22 +14,27 @@ if [ ! "$#" -eq "1" ]; then usage fi -CONFIG_FILE=$1 +CONFIG_DIR=$1 +CONFIG_CONF=$CONFIG_DIR/storm.conf +CONFIG_INI=$CONFIG_DIR/config.ini # Clean up any resources that may be in use cleanup() { set +o errexit # Mop up temporary files - if [ -n "$CONFIG_FILE_TMP" -a -e "$CONFIG_FILE_TMP" ]; then - rm -f $CONFIG_FILE_TMP + if [ -n "$CONFIG_CONF_TMP" -a -e "$CONFIG_CONF_TMP" ]; then + rm -f $CONFIG_CONF_TMP + fi + if [ -n "$CONFIG_INI_TMP" -a -e "$CONFIG_INI_TMP" ]; then + rm -f $CONFIG_INI_TMP fi # Kill ourselves to signal any calling process trap 2; kill -2 $$ } -trap cleanup SIGHUP SIGINT SIGTERM +trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT # Keep track of the current directory TOOLS_DIR=$(cd $(dirname "$0") && pwd) @@ -50,17 +55,19 @@ DEST=${DEST:-/opt/stack} DIST_NAME=${DIST_NAME:-oneiric} -# Process network configuration vars -GUEST_NETWORK=${GUEST_NETWORK:-1} -GUEST_RECREATE_NET=${GUEST_RECREATE_NET:-yes} - -GUEST_IP=${GUEST_IP:-192.168.$GUEST_NETWORK.50} -GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24} -GUEST_NETMASK=${GUEST_NETMASK:-255.255.255.0} -GUEST_GATEWAY=${GUEST_GATEWAY:-192.168.$GUEST_NETWORK.1} -GUEST_MAC=${GUEST_MAC:-"02:16:3e:07:69:`printf '%02X' $GUEST_NETWORK`"} -GUEST_RAM=${GUEST_RAM:-1524288} -GUEST_CORES=${GUEST_CORES:-1} +if [ ! -f $DEST/.ramdisk ]; then + # Process network configuration vars + GUEST_NETWORK=${GUEST_NETWORK:-1} + GUEST_RECREATE_NET=${GUEST_RECREATE_NET:-yes} + + GUEST_IP=${GUEST_IP:-192.168.$GUEST_NETWORK.50} + GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24} + GUEST_NETMASK=${GUEST_NETMASK:-255.255.255.0} + GUEST_GATEWAY=${GUEST_GATEWAY:-192.168.$GUEST_NETWORK.1} + GUEST_MAC=${GUEST_MAC:-"02:16:3e:07:69:`printf '%02X' $GUEST_NETWORK`"} + GUEST_RAM=${GUEST_RAM:-1524288} + GUEST_CORES=${GUEST_CORES:-1} +fi # Use the GUEST_IP unless an explicit IP is set by ``HOST_IP`` HOST_IP=${HOST_IP:-$GUEST_IP} @@ -77,34 +84,75 @@ set `echo $GLANCE_HOSTPORT | tr ':' ' '` GLANCE_HOST=$1 GLANCE_PORT=$2 -CONFIG_FILE_TMP=$(mktemp $CONFIG_FILE.XXXXXX) +# Create storm.conf + +CONFIG_CONF_TMP=$(mktemp $CONFIG_CONF.XXXXXX) + cat >$CONFIG_CONF_TMP <$CONFIG_FILE_TMP <$CONFIG_INI_TMP <$CONFIG_FILE_TMP <$CONFIG_INI_TMP <>$CONFIG_FILE_TMP <>$CONFIG_INI_TMP < $NET_XML < - devstack-$GUEST_NETWORK - - - - -EOF - -if [[ "$GUEST_RECREATE_NET" == "yes" ]]; then - virsh net-destroy devstack-$GUEST_NETWORK || true - virsh net-create $VM_DIR/net.xml -fi - -# libvirt.xml configuration -LIBVIRT_XML=$VM_DIR/libvirt.xml -cat > $LIBVIRT_XML < - $GUEST_NAME - $GUEST_RAM - - hvm - - - - - - $GUEST_CORES - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -EOF - -# Mount point for instance fs -ROOTFS=$VM_DIR/root -mkdir -p $ROOTFS - -# Clean up from previous runs -umount $ROOTFS || echo 'ok' - -# Clean up old runs -cd $VM_DIR -rm -f $VM_DIR/disk - -# Create our instance fs -qemu-img create -f qcow2 -b $VM_IMAGE disk - -# Finds the next available NBD device -# Exits script if error connecting or none free -# map_nbd image -# returns full nbd device path -function map_nbd { - for i in `seq 0 15`; do - if [ ! -e /sys/block/nbd$i/pid ]; then - NBD=/dev/nbd$i - # Connect to nbd and wait till it is ready - qemu-nbd -c $NBD $1 - if ! timeout 60 sh -c "while ! [ -e ${NBD}p1 ]; do sleep 1; done"; then - echo "Couldn't connect $NBD" - exit 1 - fi - break - fi - done - if [ -z "$NBD" ]; then - echo "No free NBD slots" - exit 1 - fi - echo $NBD -} - -# Make sure we have nbd-ness -modprobe nbd max_part=63 - -# Set up nbd -NBD=`map_nbd disk` -NBD_DEV=`basename $NBD` - -# Mount the instance -mount ${NBD}p1 $ROOTFS - -# Configure instance network -INTERFACES=$ROOTFS/etc/network/interfaces -cat > $INTERFACES <> $ROOTFS/etc/sudoers - -# Gracefully cp only if source file/dir exists -function cp_it { - if [ -e $1 ] || [ -d $1 ]; then - cp -pRL $1 $2 - fi -} - -# Copy over your ssh keys and env if desired -COPYENV=${COPYENV:-1} -if [ "$COPYENV" = "1" ]; then - cp_it ~/.ssh $ROOTFS/$DEST/.ssh - cp_it ~/.ssh/id_rsa.pub $ROOTFS/$DEST/.ssh/authorized_keys - cp_it ~/.gitconfig $ROOTFS/$DEST/.gitconfig - cp_it ~/.vimrc $ROOTFS/$DEST/.vimrc - cp_it ~/.bashrc $ROOTFS/$DEST/.bashrc -fi - -# pre-cache uec images -for image_url in ${IMAGE_URLS//,/ }; do - IMAGE_FNAME=`basename "$image_url"` - if [ ! -f $IMAGES_DIR/$IMAGE_FNAME ]; then - wget -c $image_url -O $IMAGES_DIR/$IMAGE_FNAME - fi - cp $IMAGES_DIR/$IMAGE_FNAME $ROOTFS/$DEST/devstack/files -done - -# Configure the runner -RUN_SH=$ROOTFS/$DEST/run.sh -cat > $RUN_SH < /$DEST/run.sh.log -echo >> /$DEST/run.sh.log -echo >> /$DEST/run.sh.log -echo "All done! Time to start clicking." >> /$DEST/run.sh.log -cat $DEST/run.sh.log -EOF -chmod 755 $RUN_SH - -# Make runner launch on boot -RC_LOCAL=$ROOTFS/etc/init.d/zlocal -cat > $RC_LOCAL < /etc/hostname -hostname $GUEST_NAME -su -c "$DEST/run.sh" stack -EOF -chmod +x $RC_LOCAL -chroot $ROOTFS sudo update-rc.d zlocal defaults 99 - -# Make our ip address hostnames look nice at the command prompt -echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $ROOTFS/$DEST/.bashrc -echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $ROOTFS/etc/profile - -# Give stack ownership over $DEST so it may do the work needed -chroot $ROOTFS chown -R stack $DEST - -# Set the hostname -echo $GUEST_NAME > $ROOTFS/etc/hostname - -# We need the hostname to resolve for rabbit to launch -if ! grep -q $GUEST_NAME $ROOTFS/etc/hosts; then - echo "$GUEST_IP $GUEST_NAME" >> $ROOTFS/etc/hosts -fi - -# GRUB 2 wants to see /dev -mount -o bind /dev $ROOTFS/dev - -# Change boot params so that we get a console log -G_DEV_UUID=`blkid -t LABEL=cloudimg-rootfs -s UUID -o value | head -1` -sed -e "s/GRUB_TIMEOUT=.*$/GRUB_TIMEOUT=3/" -i $ROOTFS/etc/default/grub -sed -e "s,GRUB_CMDLINE_LINUX_DEFAULT=.*$,GRUB_CMDLINE_LINUX_DEFAULT=\"console=ttyS0 console=tty0 ds=nocloud ubuntu-pass=pass\",g" -i $ROOTFS/etc/default/grub -sed -e 's/[#]*GRUB_TERMINAL=.*$/GRUB_TERMINAL="serial console"/' -i $ROOTFS/etc/default/grub -echo 'GRUB_SERIAL_COMMAND="serial --unit=0"' >>$ROOTFS/etc/default/grub -echo 'GRUB_DISABLE_OS_PROBER=true' >>$ROOTFS/etc/default/grub -echo "GRUB_DEVICE_UUID=$G_DEV_UUID" >>$ROOTFS/etc/default/grub - -chroot $ROOTFS update-grub - -# Pre-generate ssh host keys and allow password login -chroot $ROOTFS dpkg-reconfigure openssh-server -sed -e 's/^PasswordAuthentication.*$/PasswordAuthentication yes/' -i $ROOTFS/etc/ssh/sshd_config - -# Unmount -umount $ROOTFS/dev -umount $ROOTFS || echo 'ok' -ROOTFS="" -qemu-nbd -d $NBD -NBD="" - -trap - SIGHUP SIGINT SIGTERM SIGQUIT EXIT - -# Create the instance -cd $VM_DIR && virsh create libvirt.xml - -# Tail the console log till we are done -WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1} -if [ "$WAIT_TILL_LAUNCH" = "1" ]; then - # Done creating the container, let's tail the log - echo - echo "=============================================================" - echo " -- YAY! --" - echo "=============================================================" - echo - echo "We're done launching the vm, about to start tailing the" - echo "stack.sh log. It will take a second or two to start." - echo - echo "Just CTRL-C at any time to stop tailing." - - while [ ! -e "$VM_DIR/console.log" ]; do - sleep 1 - done - - tail -F $VM_DIR/console.log & - - TAIL_PID=$! - - function kill_tail() { - kill $TAIL_PID - exit 1 - } - - # Let Ctrl-c kill tail and exit - trap kill_tail SIGINT - - set +o xtrace - - echo "Waiting stack.sh to finish..." - while ! cat $VM_DIR/console.log | grep -q 'All done' ; do - sleep 1 - done - - set -o xtrace - - kill $TAIL_PID - - if ! grep -q "^stack.sh completed in" $VM_DIR/console.log; then - exit 1 - fi - echo "" - echo "Finished - Zip-a-dee Doo-dah!" -fi diff --git a/tools/build_lxc.sh b/tools/build_lxc.sh deleted file mode 100755 index c5957b2b..00000000 --- a/tools/build_lxc.sh +++ /dev/null @@ -1,324 +0,0 @@ -#!/usr/bin/env bash - -# Debug stuff -set -o errexit -set -o xtrace - -# Sanity check -if [ "$EUID" -ne "0" ]; then - echo "This script must be run with root privileges." - exit 1 -fi - -# Keep track of ubuntu version -UBUNTU_VERSION=`cat /etc/lsb-release | grep CODENAME | sed 's/.*=//g'` - -# Move to top devstack dir -cd .. - -# Abort if localrc is not set -if [ ! -e ./localrc ]; then - echo "You must have a localrc with ALL necessary passwords defined before proceeding." - echo "See stack.sh for required passwords." - exit 1 -fi - -# Source params -source ./stackrc - -# Store cwd -CWD=`pwd` - -# Configurable params -BRIDGE=${BRIDGE:-br0} -GUEST_NAME=${GUEST_NAME:-STACK} -GUEST_IP=${GUEST_IP:-192.168.1.50} -GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24} -GUEST_NETMASK=${GUEST_NETMASK:-255.255.255.0} -GUEST_GATEWAY=${GUEST_GATEWAY:-192.168.1.1} -NAMESERVER=${NAMESERVER:-`cat /etc/resolv.conf | grep nameserver | head -1 | cut -d " " -f2`} -COPYENV=${COPYENV:-1} -DEST=${DEST:-/opt/stack} -WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1} - -# Param string to pass to stack.sh. Like "EC2_DMZ_HOST=192.168.1.1 MYSQL_USER=nova" -# By default, n-vol is disabled for lxc, as iscsitarget doesn't work properly in lxc -STACKSH_PARAMS=${STACKSH_PARAMS:-"ENABLED_SERVICES=g-api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-vnc,horizon,mysql,rabbit"} - -# Option to use the version of devstack on which we are currently working -USE_CURRENT_DEVSTACK=${USE_CURRENT_DEVSTACK:-1} - - -# Install deps -apt-get install -y lxc debootstrap - -# Install cgroup-bin from source, since the packaging is buggy and possibly incompatible with our setup -if ! which cgdelete | grep -q cgdelete; then - apt-get install -y g++ bison flex libpam0g-dev make - wget http://sourceforge.net/projects/libcg/files/libcgroup/v0.37.1/libcgroup-0.37.1.tar.bz2/download -O /tmp/libcgroup-0.37.1.tar.bz2 - cd /tmp && bunzip2 libcgroup-0.37.1.tar.bz2 && tar xfv libcgroup-0.37.1.tar - cd libcgroup-0.37.1 - ./configure - make install - ldconfig -fi - -# Create lxc configuration -LXC_CONF=/tmp/$GUEST_NAME.conf -cat > $LXC_CONF <> $ROOTFS/etc/sudoers - -# Copy kernel modules -mkdir -p $ROOTFS/lib/modules/`uname -r`/kernel -cp -p /lib/modules/`uname -r`/modules.dep $ROOTFS/lib/modules/`uname -r`/ -cp -pR /lib/modules/`uname -r`/kernel/net $ROOTFS/lib/modules/`uname -r`/kernel/ - -# Gracefully cp only if source file/dir exists -function cp_it { - if [ -e $1 ] || [ -d $1 ]; then - cp -pRL $1 $2 - fi -} - -# Copy over your ssh keys and env if desired -if [ "$COPYENV" = "1" ]; then - cp_it ~/.ssh $ROOTFS/$DEST/.ssh - cp_it ~/.ssh/id_rsa.pub $ROOTFS/$DEST/.ssh/authorized_keys - cp_it ~/.gitconfig $ROOTFS/$DEST/.gitconfig - cp_it ~/.vimrc $ROOTFS/$DEST/.vimrc - cp_it ~/.bashrc $ROOTFS/$DEST/.bashrc -fi - -# Make our ip address hostnames look nice at the command prompt -echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $ROOTFS/$DEST/.bashrc -echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $ROOTFS/etc/profile - -# Give stack ownership over $DEST so it may do the work needed -chroot $ROOTFS chown -R stack $DEST - -# Configure instance network -INTERFACES=$ROOTFS/etc/network/interfaces -cat > $INTERFACES < $RUN_SH < /$DEST/run.sh.log -echo >> /$DEST/run.sh.log -echo >> /$DEST/run.sh.log -echo "All done! Time to start clicking." >> /$DEST/run.sh.log -EOF - -# Make the run.sh executable -chmod 755 $RUN_SH - -# Make runner launch on boot -RC_LOCAL=$ROOTFS/etc/init.d/local -cat > $RC_LOCAL <> $CHROOTCACHE/natty-dev/etc/sudoers -fi - -# clone git repositories onto the system -# ====================================== - -if [ ! -d $CHROOTCACHE/natty-stack ]; then - rsync -azH $CHROOTCACHE/natty-dev/ $CHROOTCACHE/natty-stack/ -fi - -# git clone only if directory doesn't exist already. Since ``DEST`` might not -# be owned by the installation user, we create the directory and change the -# ownership to the proper user. -function git_clone { - - # clone new copy or fetch latest changes - CHECKOUT=$CHROOTCACHE/natty-stack$2 - if [ ! -d $CHECKOUT ]; then - mkdir -p $CHECKOUT - git clone $1 $CHECKOUT - else - pushd $CHECKOUT - git fetch - popd - fi - - # FIXME(ja): checkout specified version (should works for branches and tags) - - pushd $CHECKOUT - # checkout the proper branch/tag - git checkout $3 - # force our local version to be the same as the remote version - git reset --hard origin/$3 - popd - - # give ownership to the stack user - chroot $CHROOTCACHE/natty-stack/ chown -R stack $2 -} - -git_clone $NOVA_REPO $DEST/nova $NOVA_BRANCH -git_clone $GLANCE_REPO $DEST/glance $GLANCE_BRANCH -git_clone $KEYSTONE_REPO $DEST/keystone $KEYSTONE_BRANCH -git_clone $NOVNC_REPO $DEST/novnc $NOVNC_BRANCH -git_clone $HORIZON_REPO $DEST/horizon $HORIZON_BRANCH $HORIZON_TAG -git_clone $NOVACLIENT_REPO $DEST/python-novaclient $NOVACLIENT_BRANCH -git_clone $OPENSTACKX_REPO $DEST/openstackx $OPENSTACKX_BRANCH - -chroot $CHROOTCACHE/natty-stack mkdir -p $DEST/files -wget -c http://images.ansolabs.com/tty.tgz -O $CHROOTCACHE/natty-stack$DEST/files/tty.tgz - -# Use this version of devstack? -if [ "$USE_CURRENT_DEVSTACK" = "1" ]; then - rm -rf $CHROOTCACHE/natty-stack/$DEST/devstack - cp -pr $CWD $CHROOTCACHE/natty-stack/$DEST/devstack -fi - -cp -pr $CHROOTCACHE/natty-stack $NFSDIR - -# set hostname -echo $NAME > $NFSDIR/etc/hostname -echo "127.0.0.1 localhost $NAME" > $NFSDIR/etc/hosts - -# injecting root's public ssh key if it exists -if [ -f /root/.ssh/id_rsa.pub ]; then - mkdir $NFSDIR/root/.ssh - chmod 700 $NFSDIR/root/.ssh - cp /root/.ssh/id_rsa.pub $NFSDIR/root/.ssh/authorized_keys -fi diff --git a/tools/build_pxe_env.sh b/tools/build_pxe_env.sh index 1ab51f89..d01dad0d 100755 --- a/tools/build_pxe_env.sh +++ b/tools/build_pxe_env.sh @@ -10,8 +10,7 @@ dpkg -l syslinux || apt-get install -y syslinux DEST_DIR=${1:-/tmp}/tftpboot -PXEDIR=${PXEDIR:-/var/cache/devstack/pxe} -OPWD=`pwd` +PXEDIR=${PXEDIR:-/opt/ramstack/pxe} PROGDIR=`dirname $0` # Clean up any resources that may be in use @@ -28,7 +27,11 @@ cleanup() { trap 2; kill -2 $$ } -trap cleanup SIGHUP SIGINT SIGTERM +trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT + +# Keep track of the current directory +TOOLS_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=`cd $TOOLS_DIR/..; pwd` mkdir -p $DEST_DIR/pxelinux.cfg cd $DEST_DIR @@ -42,7 +45,7 @@ default menu.c32 prompt 0 timeout 0 -MENU TITLE PXE Boot Menu +MENU TITLE devstack PXE Boot Menu EOF @@ -54,7 +57,7 @@ fi # Get image into place if [ ! -r $PXEDIR/stack-initrd.img ]; then - cd $OPWD + cd $TOP_DIR $PROGDIR/build_ramdisk.sh $PXEDIR/stack-initrd.img fi if [ ! -r $PXEDIR/stack-initrd.gz ]; then @@ -110,3 +113,5 @@ LABEL local MENU LABEL ^Local disk LOCALBOOT 0 EOF + +trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT diff --git a/tools/build_ramdisk.sh b/tools/build_ramdisk.sh index 2c914dc7..feaa8a97 100755 --- a/tools/build_ramdisk.sh +++ b/tools/build_ramdisk.sh @@ -57,7 +57,7 @@ cd $TOP_DIR # Source params source ./stackrc -CACHEDIR=${CACHEDIR:-/var/cache/devstack} +CACHEDIR=${CACHEDIR:-/opt/stack/cache} DEST=${DEST:-/opt/stack} diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 2a578fc9..81671050 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -37,27 +37,34 @@ DEPS="kvm libvirt-bin kpartx cloud-utils curl" apt-get install -y --force-yes $DEPS || true # allow this to fail gracefully for concurrent builds # Where to store files and instances -WORK_DIR=${WORK_DIR:-/opt/kvmstack} +WORK_DIR=${WORK_DIR:-/opt/uecstack} # Where to store images image_dir=$WORK_DIR/images/$DIST_NAME mkdir -p $image_dir -# Original version of built image -uec_url=http://uec-images.ubuntu.com/$DIST_NAME/current/$DIST_NAME-server-cloudimg-amd64.tar.gz -tarball=$image_dir/$(basename $uec_url) +# Start over with a clean base image, if desired +if [ $CLEAN_BASE ]; then + rm -f $image_dir/disk +fi -# download the base uec image if we haven't already -if [ ! -f $tarball ]; then - curl $uec_url -o $tarball - (cd $image_dir && tar -Sxvzf $tarball) - resize-part-image $image_dir/*.img $GUEST_SIZE $image_dir/disk - cp $image_dir/*-vmlinuz-virtual $image_dir/kernel +# Get the base image if it does not yet exist +if [ ! -e $image_dir/disk ]; then + $TOOLS_DIR/get_uec_image.sh -r $GUEST_SIZE $DIST_NAME $image_dir/disk $image_dir/kernel fi +# Copy over dev environment if COPY_ENV is set. +# This will also copy over your current devstack. +if [ $COPY_ENV ]; then + cd $TOOLS_DIR + ./copy_dev_environment_to_uec.sh $image_dir/disk +fi -# Configure the root password of the vm to be the same as ``ADMIN_PASSWORD`` -ROOT_PASSWORD=${ADMIN_PASSWORD:-password} +# Option to warm the base image with software requirements. +if [ $WARM_CACHE ]; then + cd $TOOLS_DIR + ./warm_apts_and_pips_for_uec.sh $image_dir/disk +fi # Name of our instance, used by libvirt GUEST_NAME=${GUEST_NAME:-devstack} @@ -178,22 +185,23 @@ cat > $vm_dir/uec/user-data< localrc < localrc <> $vm_dir/uec/user-data< $MNT_DIR/etc/network/interfaces <$MNT_DIR/etc/hostname +echo "127.0.0.1 localhost ramstack" >$MNT_DIR/etc/hosts + +# Configure the runner +RUN_SH=$MNT_DIR/$DEST/run.sh +cat > $RUN_SH < $DEST/run.sh.log +echo >> $DEST/run.sh.log +echo >> $DEST/run.sh.log +echo "All done! Time to start clicking." >> $DEST/run.sh.log +EOF + +# Make the run.sh executable +chmod 755 $RUN_SH +chroot $MNT_DIR chown stack $DEST/run.sh + +umount $MNT_DIR/dev +umount $MNT_DIR +rmdir $MNT_DIR +mv $DEST_FILE_TMP $DEST_FILE +rm -f $DEST_FILE_TMP + +trap - SIGHUP SIGINT SIGTERM SIGQUIT EXIT diff --git a/tools/build_usb_boot.sh b/tools/build_usb_boot.sh index e4dabc0e..cca2a681 100755 --- a/tools/build_usb_boot.sh +++ b/tools/build_usb_boot.sh @@ -7,8 +7,7 @@ # Needs to run as root DEST_DIR=${1:-/tmp/syslinux-boot} -PXEDIR=${PXEDIR:-/var/cache/devstack/pxe} -OPWD=`pwd` +PXEDIR=${PXEDIR:-/opt/ramstack/pxe} PROGDIR=`dirname $0` # Clean up any resources that may be in use @@ -29,7 +28,11 @@ cleanup() { trap 2; kill -2 $$ } -trap cleanup SIGHUP SIGINT SIGTERM +trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT + +# Keep track of the current directory +TOOLS_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=`cd $TOOLS_DIR/..; pwd` if [ -b $DEST_DIR ]; then # We have a block device, install syslinux and mount it @@ -62,7 +65,7 @@ default /syslinux/menu.c32 prompt 0 timeout 0 -MENU TITLE Boot Menu +MENU TITLE devstack Boot Menu EOF @@ -74,8 +77,8 @@ fi # Get image into place if [ ! -r $PXEDIR/stack-initrd.img ]; then - cd $OPWD - $PROGDIR/build_ramdisk.sh $PXEDIR/stack-initrd.img + cd $TOP_DIR + $PROGDIR/build_uec_ramdisk.sh $PXEDIR/stack-initrd.img fi if [ ! -r $PXEDIR/stack-initrd.gz ]; then gzip -1 -c $PXEDIR/stack-initrd.img >$PXEDIR/stack-initrd.gz @@ -139,3 +142,5 @@ if [ -n "$DEST_DEV" ]; then umount $DEST_DIR rmdir $DEST_DIR fi + +trap - SIGHUP SIGINT SIGTERM SIGQUIT EXIT diff --git a/tools/setup_stack_user.sh b/tools/copy_dev_environment_to_uec.sh similarity index 86% rename from tools/setup_stack_user.sh rename to tools/copy_dev_environment_to_uec.sh index fcb97333..c949b329 100755 --- a/tools/setup_stack_user.sh +++ b/tools/copy_dev_environment_to_uec.sh @@ -44,10 +44,8 @@ chroot $STAGING_DIR useradd stack -s /bin/bash -d $DEST -G libvirtd || true echo stack:pass | chroot $STAGING_DIR chpasswd # Configure sudo -grep -q "^#includedir.*/etc/sudoers.d" $STAGING_DIR/etc/sudoers || - echo "#includedir /etc/sudoers.d" | sudo tee -a $STAGING_DIR/etc/sudoers -cp $TOP_DIR/files/sudo/* $STAGING_DIR/etc/sudoers.d/ -sed -e "s,%USER%,$USER,g" -i $STAGING_DIR/etc/sudoers.d/* +( umask 226 && echo "stack ALL=(ALL) NOPASSWD:ALL" \ + > $STAGING_DIR/etc/sudoers.d/50_stack_sh ) # Gracefully cp only if source file/dir exists function cp_it { @@ -63,6 +61,10 @@ cp_it ~/.gitconfig $STAGING_DIR/$DEST/.gitconfig cp_it ~/.vimrc $STAGING_DIR/$DEST/.vimrc cp_it ~/.bashrc $STAGING_DIR/$DEST/.bashrc +# Copy devstack +rm -rf $STAGING_DIR/$DEST/devstack +cp_it . $STAGING_DIR/$DEST/devstack + # Give stack ownership over $DEST so it may do the work needed chroot $STAGING_DIR chown -R stack $DEST diff --git a/tools/get_uec_image.sh b/tools/get_uec_image.sh index 7b95aab8..7fa920ec 100755 --- a/tools/get_uec_image.sh +++ b/tools/get_uec_image.sh @@ -1,14 +1,8 @@ #!/bin/bash -# get_uec_image.sh - Prepare Ubuntu images in various formats -# -# Supported formats: qcow (kvm), vmdk (vmserver), vdi (vbox), vhd (vpc), raw -# -# Required to run as root - -CACHEDIR=${CACHEDIR:-/var/cache/devstack} -FORMAT=${FORMAT:-qcow2} +# get_uec_image.sh - Prepare Ubuntu UEC images + +CACHEDIR=${CACHEDIR:-/opt/stack/cache} ROOTSIZE=${ROOTSIZE:-2000} -MIN_PKGS=${MIN_PKGS:-"apt-utils gpgv openssh-server"} # Keep track of the current directory TOOLS_DIR=$(cd $(dirname "$0") && pwd) @@ -18,14 +12,14 @@ TOP_DIR=`cd $TOOLS_DIR/..; pwd` set -o errexit usage() { - echo "Usage: $0 - Prepare Ubuntu images" + echo "Usage: $0 - Fetch and prepare Ubuntu images" echo "" - echo "$0 [-f format] [-r rootsize] release imagefile" + echo "$0 [-r rootsize] release imagefile [kernel]" echo "" - echo "-f format - image format: qcow2 (default), vmdk, vdi, vhd, xen, raw, fs" - echo "-r size - root fs size in MB (min 2000MB)" + echo "-r size - root fs size (min 2000MB)" echo "release - Ubuntu release: jaunty - oneric" echo "imagefile - output image file" + echo "kernel - output kernel" exit 1 } @@ -38,42 +32,21 @@ cleanup() { rm -f $IMG_FILE_TMP fi - # Release NBD devices - if [ -n "$NBD" ]; then - qemu-nbd -d $NBD - fi - # Kill ourselves to signal any calling process trap 2; kill -2 $$ } -# apt-get wrapper to just get arguments set correctly -function apt_get() { - local sudo="sudo" - [ "$(id -u)" = "0" ] && sudo="env" - $sudo DEBIAN_FRONTEND=noninteractive apt-get \ - --option "Dpkg::Options::=--force-confold" --assume-yes "$@" -} - -while getopts f:hmr: c; do +while getopts hr: c; do case $c in - f) FORMAT=$OPTARG - ;; h) usage ;; - m) MINIMAL=1 - ;; r) ROOTSIZE=$OPTARG - if [[ $ROOTSIZE < 2000 ]]; then - echo "root size must be greater than 2000MB" - exit 1 - fi ;; esac done shift `expr $OPTIND - 1` -if [ ! "$#" -eq "2" ]; then +if [[ ! "$#" -eq "2" && ! "$#" -eq "3" ]]; then usage fi @@ -81,134 +54,49 @@ fi DIST_NAME=$1 IMG_FILE=$2 IMG_FILE_TMP=`mktemp $IMG_FILE.XXXXXX` - -case $FORMAT in - kvm|qcow2) FORMAT=qcow2 - QFORMAT=qcow2 - ;; - vmserver|vmdk) - FORMAT=vmdk - QFORMAT=vmdk - ;; - vbox|vdi) FORMAT=vdi - QFORMAT=vdi - ;; - vhd|vpc) FORMAT=vhd - QFORMAT=vpc - ;; - xen) FORMAT=raw - QFORMAT=raw - ;; - raw) FORMAT=raw - QFORMAT=raw - ;; - *) echo "Unknown format: $FORMAT" - usage -esac +KERNEL=$3 case $DIST_NAME in oneiric) ;; natty) ;; maverick) ;; lucid) ;; - karmic) ;; - jaunty) ;; *) echo "Unknown release: $DIST_NAME" usage ;; esac -trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT +trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT -# Check for dependencies - -if [ ! -x "`which qemu-img`" -o ! -x "`which qemu-nbd`" ]; then +# Check dependencies +if [ ! -x "`which qemu-img`" -o -z "`dpkg -l | grep cloud-utils`" ]; then # Missing KVM? - apt_get install qemu-kvm + apt_get install qemu-kvm cloud-utils fi -# Prepare the base image +# Find resize script +RESIZE=`which resize-part-image || which uec-resize-image` +if [ -z "$RESIZE" ]; then + echo "resize tool from cloud-utils not found" + exit 1 +fi # Get the UEC image UEC_NAME=$DIST_NAME-server-cloudimg-amd64 -if [ ! -e $CACHEDIR/$UEC_NAME-disk1.img ]; then - mkdir -p $CACHEDIR - (cd $CACHEDIR && wget -N http://uec-images.ubuntu.com/$DIST_NAME/current/$UEC_NAME-disk1.img) +if [ ! -d $CACHEDIR ]; then + mkdir -p $CACHEDIR/$DIST_NAME fi - -if [ "$FORMAT" = "qcow2" ]; then - # Just copy image - cp -p $CACHEDIR/$UEC_NAME-disk1.img $IMG_FILE_TMP -else - # Convert image - qemu-img convert -O $QFORMAT $CACHEDIR/$UEC_NAME-disk1.img $IMG_FILE_TMP +if [ ! -e $CACHEDIR/$DIST_NAME/$UEC_NAME.tar.gz ]; then + (cd $CACHEDIR/$DIST_NAME && wget -N http://uec-images.ubuntu.com/$DIST_NAME/current/$UEC_NAME.tar.gz) + (cd $CACHEDIR/$DIST_NAME && tar Sxvzf $UEC_NAME.tar.gz) fi -# Resize the image if necessary -if [ $ROOTSIZE -gt 2000 ]; then - # Resize the container - qemu-img resize $IMG_FILE_TMP +$((ROOTSIZE - 2000))M -fi - -# Finds the next available NBD device -# Exits script if error connecting or none free -# map_nbd image -# returns full nbd device path -function map_nbd { - for i in `seq 0 15`; do - if [ ! -e /sys/block/nbd$i/pid ]; then - NBD=/dev/nbd$i - # Connect to nbd and wait till it is ready - qemu-nbd -c $NBD $1 - if ! timeout 60 sh -c "while ! [ -e ${NBD}p1 ]; do sleep 1; done"; then - echo "Couldn't connect $NBD" - exit 1 - fi - break - fi - done - if [ -z "$NBD" ]; then - echo "No free NBD slots" - exit 1 - fi - echo $NBD -} +$RESIZE $CACHEDIR/$DIST_NAME/$UEC_NAME.img ${ROOTSIZE} $IMG_FILE_TMP +mv $IMG_FILE_TMP $IMG_FILE -# Set up nbd -modprobe nbd max_part=63 -NBD=`map_nbd $IMG_FILE_TMP` - -# Resize partition 1 to full size of the disk image -echo "d -n -p -1 -2 - -t -83 -a -1 -w -" | fdisk $NBD -e2fsck -f -p ${NBD}p1 -resize2fs ${NBD}p1 - -# Do some preliminary installs -MNTDIR=`mktemp -d mntXXXXXXXX` -mount -t ext4 ${NBD}p1 $MNTDIR - -# Install our required packages -cp -p files/sources.list $MNTDIR/etc/apt/sources.list -sed -e "s,%DIST%,$DIST_NAME,g" -i $MNTDIR/etc/apt/sources.list -cp -p /etc/resolv.conf $MNTDIR/etc/resolv.conf -chroot $MNTDIR apt-get update -chroot $MNTDIR apt-get install -y $MIN_PKGS -rm -f $MNTDIR/etc/resolv.conf - -umount $MNTDIR -rmdir $MNTDIR -qemu-nbd -d $NBD -NBD="" +# Copy kernel to destination +if [ -n "$KERNEL" ]; then + cp -p $CACHEDIR/$DIST_NAME/*-vmlinuz-virtual $KERNEL +fi -mv $IMG_FILE_TMP $IMG_FILE +trap - SIGHUP SIGINT SIGTERM SIGQUIT EXIT diff --git a/tools/install_openvpn.sh b/tools/install_openvpn.sh old mode 100644 new mode 100755 diff --git a/tools/jenkins/configurations/kvm.sh b/tools/jenkins/configurations/kvm.sh index 5a9df47c..727b42a4 100755 --- a/tools/jenkins/configurations/kvm.sh +++ b/tools/jenkins/configurations/kvm.sh @@ -1,5 +1,9 @@ #!/bin/bash +# exit on error to stop unexpected errors +set -o errexit +set -o xtrace + EXECUTOR_NUMBER=$1 CONFIGURATION=$2 ADAPTER=$3 @@ -25,7 +29,7 @@ cd ../../.. TOP_DIR=$(pwd) # Deps -apt-get install -y --force-yes libvirt-bin +apt-get install -y --force-yes libvirt-bin || true # Name test instance based on executor BASE_NAME=executor-`printf "%02d" $EXECUTOR_NUMBER` diff --git a/tools/lxc_network_hostonlyplusnat.sh b/tools/lxc_network_hostonlyplusnat.sh deleted file mode 100755 index 4e29ed70..00000000 --- a/tools/lxc_network_hostonlyplusnat.sh +++ /dev/null @@ -1,93 +0,0 @@ -#!/bin/bash - -# Print some usage info -function usage { - echo "Usage: $0 [OPTION] [host_ip]" - echo "Set up temporary networking for LXC" - echo "" - echo " -n, --dry-run Just print the commands that would execute." - echo " -h, --help Print this usage message." - echo "" - exit -} - -# Allow passing the ip address on the command line. -function process_option { - case "$1" in - -h|--help) usage;; - -n|--dry-run) dry_run=1;; - *) host_ip="$1" - esac -} - -# Set up some defaults -host_ip= -dry_run=0 -bridge=br0 -DRIER= - -# Process the args -for arg in "$@"; do - process_option $arg -done - -if [ $dry_run ]; then - DRIER=echo -fi - -if [ "$UID" -ne "0" ]; then - echo "This script must be run with root privileges." - exit 1 -fi - -# Check for bridge-utils. -BRCTL=`which brctl` -if [ ! -x "$BRCTL" ]; then - echo "This script requires you to install bridge-utils." - echo "Try: sudo apt-get install bridge-utils." - exit 1 -fi - -# Scare off the nubs. -echo "=====================================================" -echo -echo "WARNING" -echo -echo "This script will modify your current network setup," -echo "this can be a scary thing and it is recommended that" -echo "you have something equivalent to physical access to" -echo "this machine before continuing in case your network" -echo "gets all funky." -echo -echo "If you don't want to continue, hit CTRL-C now." - -if [ -z "$host_ip" ]; -then - echo "Otherwise, please type in your host's ip address and" - echo "hit enter." - echo - echo "=====================================================" - read host_ip -else - echo "Otherwise hit enter." - echo - echo "=====================================================" - read accept -fi - - -# Add a bridge interface, this will choke if there is already -# a bridge named $bridge -$DRIER $BRCTL addbr $bridge -$DRIER ip addr add 192.168.1.1/24 dev $bridge -if [ $dry_run ]; then - echo "echo 1 > /proc/sys/net/ipv4/ip_forward" -else - echo 1 > /proc/sys/net/ipv4/ip_forward -fi -$DRIER ifconfig $bridge up - -# Set up the NAT for the instances -$DRIER iptables -t nat -A POSTROUTING -s 192.168.1.0/24 -j SNAT --to-source $host_ip -$DRIER iptables -I FORWARD -s 192.168.1.0/24 -j ACCEPT - diff --git a/tools/make_image.sh b/tools/make_image.sh deleted file mode 100755 index a69f5e33..00000000 --- a/tools/make_image.sh +++ /dev/null @@ -1,187 +0,0 @@ -#!/bin/bash -# make_image.sh - Create Ubuntu images in various formats -# -# Supported formats: qcow (kvm), vmdk (vmserver), vdi (vbox), vhd (vpc), raw -# -# Requires sudo to root - -ROOTSIZE=${ROOTSIZE:-8192} -SWAPSIZE=${SWAPSIZE:-1024} -MIN_PKGS=${MIN_PKGS:-"apt-utils gpgv openssh-server"} - -usage() { - echo "Usage: $0 - Create Ubuntu images" - echo "" - echo "$0 [-m] [-r rootsize] [-s swapsize] release format" - echo "$0 -C [-m] release chrootdir" - echo "$0 -I [-r rootsize] [-s swapsize] chrootdir format" - echo "" - echo "-C - Create the initial chroot dir" - echo "-I - Create the final image from a chroot" - echo "-m - minimal installation" - echo "-r size - root fs size in MB" - echo "-s size - swap fs size in MB" - echo "release - Ubuntu release: jaunty - oneric" - echo "format - image format: qcow2, vmdk, vdi, vhd, xen, raw, fs" - exit 1 -} - -while getopts CIhmr:s: c; do - case $c in - C) CHROOTONLY=1 - ;; - I) IMAGEONLY=1 - ;; - h) usage - ;; - m) MINIMAL=1 - ;; - r) ROOTSIZE=$OPTARG - ;; - s) SWAPSIZE=$OPTARG - ;; - esac -done -shift `expr $OPTIND - 1` - -if [ ! "$#" -eq "2" -o -n "$CHROOTONLY" -a -n "$IMAGEONLY" ]; then - usage -fi - -# Default args -RELEASE=$1 -FORMAT=$2 -CHROOTDIR="" - -if [ -n "$CHROOTONLY" ]; then - RELEASE=$1 - CHROOTDIR=$2 - FORMAT="pass" -fi - -if [ -n "$IMAGEONLY" ]; then - CHROOTDIR=$1 - FORMAT=$2 - RELEASE="pass" -fi - -# Make sure that we have the proper version of ubuntu -UBUNTU_VERSION=`cat /etc/lsb-release | grep CODENAME | sed 's/.*=//g'` -if [ "$UBUNTU_VERSION" = "natty" -a "$RELEASE" = "oneiric" ]; then - echo "natty installs can't build oneiric images" - exit 1 -fi - -case $FORMAT in - kvm|qcow2) FORMAT=qcow2 - QFORMAT=qcow2 - HYPER=kvm - ;; - vmserver|vmdk) - FORMAT=vmdk - QFORMAT=vmdk - HYPER=vmserver - ;; - vbox|vdi) FORMAT=vdi - QFORMAT=vdi - HYPER=kvm - ;; - vhd|vpc) FORMAT=vhd - QFORMAT=vpc - HYPER=kvm - ;; - xen) FORMAT=raw - QFORMAT=raw - HYPER=xen - ;; - raw) FORMAT=raw - QFORMAT=raw - HYPER=kvm - ;; - pass) ;; - *) echo "Unknown format: $FORMAT" - usage -esac - -case $RELEASE in - oneiric) ;; - natty) ;; - maverick) ;; - lucid) ;; - karmic) ;; - jaunty) ;; - pass) ;; - *) echo "Unknown release: $RELEASE" - usage - ;; -esac - -# Install stuff if necessary -if [ -z `which vmbuilder` ]; then - sudo apt-get install -y ubuntu-vm-builder -fi - -if [ -n "$CHROOTONLY" ]; then - # Build a chroot directory - HYPER=kvm - if [ "$MINIMAL" = 1 ]; then - ARGS="--variant=minbase" - for i in $MIN_PKGS; do - ARGS="$ARGS --addpkg=$i" - done - fi - sudo vmbuilder $HYPER ubuntu $ARGS \ - --suite $RELEASE \ - --only-chroot \ - --chroot-dir=$CHROOTDIR \ - --overwrite \ - --addpkg=$MIN_PKGS \ - - sudo cp -p files/sources.list $CHROOTDIR/etc/apt/sources.list - sed -e "s,%DIST%,$RELEASE,g" -i $CHROOTDIR/etc/apt/sources.list - sudo chroot $CHROOTDIR apt-get update - - exit 0 -fi - -# Build the image -TMPDIR=tmp -TMPDISK=`mktemp imgXXXXXXXX` -SIZE=$[$ROOTSIZE+$SWAPSIZE+1] -dd if=/dev/null of=$TMPDISK bs=1M seek=$SIZE count=1 - -if [ -n "$IMAGEONLY" ]; then - # Build image from chroot - sudo vmbuilder $HYPER ubuntu $ARGS \ - --existing-chroot=$CHROOTDIR \ - --overwrite \ - --rootsize=$ROOTSIZE \ - --swapsize=$SWAPSIZE \ - --tmpfs - \ - --raw=$TMPDISK \ - -else - # Do the whole shebang in one pass - ARGS="--variant=minbase" - for i in $MIN_PKGS; do - ARGS="$ARGS --addpkg=$i" - done - sudo vmbuilder $HYPER ubuntu $ARGS \ - --suite $RELEASE \ - --overwrite \ - --rootsize=$ROOTSIZE \ - --swapsize=$SWAPSIZE \ - --tmpfs - \ - --raw=$TMPDISK \ - -fi - -if [ "$FORMAT" = "raw" ]; then - # Get image - mv $TMPDISK $RELEASE.$FORMAT -else - # Convert image - qemu-img convert -O $QFORMAT $TMPDISK $RELEASE.$FORMAT - rm $TMPDISK -fi -rm -rf ubuntu-$HYPER diff --git a/tools/upload_image.sh b/tools/upload_image.sh deleted file mode 100755 index da73f16a..00000000 --- a/tools/upload_image.sh +++ /dev/null @@ -1,90 +0,0 @@ -#!/bin/bash -# upload_image.sh - Upload Ubuntu images (create if necessary) in various formats -# Supported formats: qcow (kvm), vmdk (vmserver), vdi (vbox), vhd (vpc) -# Requires sudo to root - -usage() { - echo "$0 - Upload images to OpenStack" - echo "" - echo "$0 [-h host] [-p port] release format" - exit 1 -} - -HOST=${HOST:-localhost} -PORT=${PORT:-9292} -DEST=${DEST:-/opt/stack} - -while getopts h:p: c; do - case $c in - h) HOST=$OPTARG - ;; - p) PORT=$OPTARG - ;; - esac -done -shift `expr $OPTIND - 1` - -RELEASE=$1 -FORMAT=$2 - -case $FORMAT in - kvm|qcow2) FORMAT=qcow2 - TARGET=kvm - ;; - vmserver|vmdk) - FORMAT=vmdk - TARGET=vmserver - ;; - vbox|vdi) TARGET=kvm - FORMAT=vdi - ;; - vhd|vpc) TARGET=kvm - FORMAT=vhd - ;; - *) echo "Unknown format: $FORMAT" - usage -esac - -case $RELEASE in - natty) ;; - maverick) ;; - lucid) ;; - karmic) ;; - jaunty) ;; - *) if [ ! -r $RELEASE.$FORMAT ]; then - echo "Unknown release: $RELEASE" - usage - fi - ;; -esac - -GLANCE=`which glance` -if [ -z "$GLANCE" ]; then - if [ -x "$DEST/glance/bin/glance" ]; then - # Look for stack.sh's install - GLANCE="$DEST/glance/bin/glance" - else - # Install Glance client in $DEST - echo "Glance not found, must install client" - OWD=`pwd` - cd $DEST - sudo apt-get install python-pip python-eventlet python-routes python-greenlet python-argparse python-sqlalchemy python-wsgiref python-pastedeploy python-xattr - sudo pip install kombu - sudo git clone https://github.com/cloudbuilders/glance.git - cd glance - sudo python setup.py develop - cd $OWD - GLANCE=`which glance` - fi -fi - -# Create image if it doesn't exist -if [ ! -r $RELEASE.$FORMAT ]; then - DIR=`dirname $0` - echo "$RELEASE.$FORMAT not found, creating..." - $DIR/make_image.sh $RELEASE $FORMAT -fi - -# Upload the image -echo "Uploading image $RELEASE.$FORMAT to $HOST" -$GLANCE add name=$RELEASE.$FORMAT is_public=true disk_format=$FORMAT --host $HOST --port $PORT <$RELEASE.$FORMAT diff --git a/tools/warm_apts_and_pips.sh b/tools/warm_apts_and_pips_for_uec.sh similarity index 100% rename from tools/warm_apts_and_pips.sh rename to tools/warm_apts_and_pips_for_uec.sh From b39ed2f496f984e0bff46d20f894a116fb7e441e Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 21 Nov 2011 15:15:43 -0800 Subject: [PATCH 215/967] fix extra flags handling Change-Id: Ifecf908b1949e2afc8874f2fcccd741e34c50b8d --- stack.sh | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/stack.sh b/stack.sh index cdba5425..caa2d598 100755 --- a/stack.sh +++ b/stack.sh @@ -475,9 +475,9 @@ git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH # glance, swift middleware and nova api needs keystone middleware -if [[ "$ENABLED_SERVICES" =~ "key" || - "$ENABLED_SERVICES" =~ "g-api" || - "$ENABLED_SERVICES" =~ "n-api" || +if [[ "$ENABLED_SERVICES" =~ "key" || + "$ENABLED_SERVICES" =~ "g-api" || + "$ENABLED_SERVICES" =~ "n-api" || "$ENABLED_SERVICES" =~ "swift" ]]; then # unified auth system (manages accounts/tokens) git_clone $KEYSTONE_REPO $KEYSTONE_DIR $KEYSTONE_BRANCH @@ -517,9 +517,9 @@ fi # setup our checkouts so they are installed into python path # allowing ``import nova`` or ``import glance.client`` -if [[ "$ENABLED_SERVICES" =~ "key" || - "$ENABLED_SERVICES" =~ "g-api" || - "$ENABLED_SERVICES" =~ "n-api" || +if [[ "$ENABLED_SERVICES" =~ "key" || + "$ENABLED_SERVICES" =~ "g-api" || + "$ENABLED_SERVICES" =~ "n-api" || "$ENABLED_SERVICES" =~ "swift" ]]; then cd $KEYSTONE_DIR; sudo python setup.py develop fi @@ -961,7 +961,7 @@ fi # You can define extra nova conf flags by defining the array EXTRA_FLAGS, # For Example: EXTRA_FLAGS=(--foo --bar=2) for I in "${EXTRA_FLAGS[@]}"; do - add_nova_flag $i + add_nova_flag $I done # XenServer From c9d9851bf333ae132e039d0e8dab3ddd8e5ea4f6 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Tue, 22 Nov 2011 11:13:56 +0100 Subject: [PATCH 216/967] Use -e to read Which use readline for interactive read command (allow to do control-a/e and such). Change-Id: I143d6f14dd7e853197f2e30909d4f7b33262351a --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index cdba5425..ecd0a68d 100755 --- a/stack.sh +++ b/stack.sh @@ -214,7 +214,7 @@ function read_password { echo "It is probably best to avoid spaces and weird characters." echo "If you leave this blank, a random default value will be used." echo "Enter a password now:" - read $var + read -e $var pw=${!var} if [ ! $pw ]; then pw=`openssl rand -hex 10` From 769eb1cf4592802409d9487de68ef3cd430f84b2 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Tue, 22 Nov 2011 13:04:40 +0100 Subject: [PATCH 217/967] Configure swift logging. Split logs by storage node proxy and don't fill up the /var/log/messages. Set proxy-server in debug. Only one worker by proxy-server since we are usually in debugging mode. Change-Id: If39f35b98ad821e8a62e36e4c22e723f83e01db8 --- files/swift/proxy-server.conf | 5 ++++- files/swift/rsyslog.conf | 26 ++++++++++++++++++++++++++ stack.sh | 14 +++++++++++++- 3 files changed, 43 insertions(+), 2 deletions(-) create mode 100644 files/swift/rsyslog.conf diff --git a/files/swift/proxy-server.conf b/files/swift/proxy-server.conf index 2db6d322..1c567c30 100644 --- a/files/swift/proxy-server.conf +++ b/files/swift/proxy-server.conf @@ -1,8 +1,11 @@ [DEFAULT] bind_port = 8080 user = %USER% -log_facility = LOG_LOCAL1 swift_dir = %SWIFT_CONFIG_LOCATION% +workers = 1 +log_name = swift +log_facility = LOG_LOCAL1 +log_level = DEBUG [pipeline:main] pipeline = healthcheck cache %AUTH_SERVER% proxy-server diff --git a/files/swift/rsyslog.conf b/files/swift/rsyslog.conf new file mode 100644 index 00000000..011c893b --- /dev/null +++ b/files/swift/rsyslog.conf @@ -0,0 +1,26 @@ +# Uncomment the following to have a log containing all logs together +#local1,local2,local3,local4,local5.* %SWIFT_LOGDIR%/all.log + +# Uncomment the following to have hourly proxy logs for stats processing +#$template HourlyProxyLog,"%SWIFT_LOGDIR%/hourly/%$YEAR%%$MONTH%%$DAY%%$HOUR%" +#local1.*;local1.!notice ?HourlyProxyLog + +local1.*;local1.!notice %SWIFT_LOGDIR%/proxy.log +local1.notice %SWIFT_LOGDIR%/proxy.error +local1.* ~ + +local2.*;local2.!notice %SWIFT_LOGDIR%/storage1.log +local2.notice %SWIFT_LOGDIR%/storage1.error +local2.* ~ + +local3.*;local3.!notice %SWIFT_LOGDIR%/storage2.log +local3.notice %SWIFT_LOGDIR%/storage2.error +local3.* ~ + +local4.*;local4.!notice %SWIFT_LOGDIR%/storage3.log +local4.notice %SWIFT_LOGDIR%/storage3.error +local4.* ~ + +local5.*;local5.!notice %SWIFT_LOGDIR%/storage4.log +local5.notice %SWIFT_LOGDIR%/storage4.error +local5.* ~ diff --git a/stack.sh b/stack.sh index cdba5425..bc7af659 100755 --- a/stack.sh +++ b/stack.sh @@ -757,9 +757,10 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then USER_GROUP=$(id -g) sudo mkdir -p ${SWIFT_DATA_LOCATION}/drives - sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_LOCATION}/drives + sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_LOCATION} # We then create a loopback disk and format it to XFS. + # TODO: Reset disks on new pass. if [[ ! -e ${SWIFT_DATA_LOCATION}/drives/images/swift.img ]]; then mkdir -p ${SWIFT_DATA_LOCATION}/drives/images sudo touch ${SWIFT_DATA_LOCATION}/drives/images/swift.img @@ -853,6 +854,17 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then generate_swift_configuration container 6011 2 generate_swift_configuration account 6012 2 + + # We have some specific configuration for swift for rsyslog. See + # the file /etc/rsyslog.d/10-swift.conf for more info. + swift_log_dir=${SWIFT_DATA_LOCATION}/logs + rm -rf ${swift_log_dir} + mkdir -p ${swift_log_dir}/hourly + sudo chown -R syslog:adm ${swift_log_dir} + sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \ + tee /etc/rsyslog.d/10-swift.conf + sudo restart rsyslog + # We create two helper scripts : # # - swift-remakerings From 9726215da3471c06a01d41a10143cf4dec3de23e Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Mon, 21 Nov 2011 18:11:18 -0600 Subject: [PATCH 218/967] Don't use NOVACMDS, as that conflicts with packages Change-Id: I7968177ceaac3a3e3c96e83b76763aa0f73e5154 --- files/sudo/nova | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/files/sudo/nova b/files/sudo/nova index 62685b31..0a79c210 100644 --- a/files/sudo/nova +++ b/files/sudo/nova @@ -1,4 +1,4 @@ -Cmnd_Alias NOVACMDS = /bin/chmod /var/lib/nova/tmp/*/root/.ssh, \ +Cmnd_Alias NOVADEVCMDS = /bin/chmod /var/lib/nova/tmp/*/root/.ssh, \ /bin/chown /var/lib/nova/tmp/*/root/.ssh, \ /bin/chown, \ /bin/chmod, \ @@ -43,5 +43,5 @@ Cmnd_Alias NOVACMDS = /bin/chmod /var/lib/nova/tmp/*/root/.ssh, \ /usr/sbin/dnsmasq, \ /usr/sbin/arping -%USER% ALL = (root) NOPASSWD: SETENV: NOVACMDS +%USER% ALL = (root) NOPASSWD: SETENV: NOVADEVCMDS From 9c7c9083a4866e4c225fe8dac7d17054c640ba03 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Wed, 23 Nov 2011 10:10:53 -0800 Subject: [PATCH 219/967] initial devstack + essex * update horizon apache config * updates apts for horizon * better logging for horizon * keystone conf settings * only add swift endpoint if it is enabled * new nova paste Change-Id: I1edacbe5d8adc4bd5265d36abcaf01ce5490aefd --- exercises/euca.sh | 4 +- files/000-default.template | 2 +- files/apts/horizon | 5 ++ files/apts/{novnc => n-vnc} | 0 files/horizon_settings.py | 74 +++++++++++++++++------------- files/keystone.conf | 40 ++++++++++++++-- files/keystone_data.sh | 8 +++- files/nova-api-paste.ini | 91 +++++++++++++++++++++---------------- files/pips/horizon | 12 +++-- stack.sh | 20 +++++--- stackrc | 14 +++--- tools/get_uec_image.sh | 1 + 12 files changed, 174 insertions(+), 97 deletions(-) rename files/apts/{novnc => n-vnc} (100%) diff --git a/exercises/euca.sh b/exercises/euca.sh index 9605ace2..f9996094 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# we will use the ``euca2ools`` cli tool that wraps the python boto +# we will use the ``euca2ools`` cli tool that wraps the python boto # library to test ec2 compatibility # @@ -22,7 +22,7 @@ source ./openrc popd # find a machine image to boot -IMAGE=`euca-describe-images | grep machine | cut -f2` +IMAGE=`euca-describe-images | grep machine | cut -f2 | head -n1` # launch it INSTANCE=`euca-run-instances $IMAGE | grep INSTANCE | cut -f2` diff --git a/files/000-default.template b/files/000-default.template index 43013dfe..1d7380d9 100644 --- a/files/000-default.template +++ b/files/000-default.template @@ -6,7 +6,7 @@ WSGIProcessGroup horizon DocumentRoot %HORIZON_DIR%/.blackhole/ - Alias /media %HORIZON_DIR%/openstack-dashboard/media + Alias /media %HORIZON_DIR%/openstack-dashboard/dashboard/static Alias /vpn /opt/stack/vpn diff --git a/files/apts/horizon b/files/apts/horizon index 6f145e15..aa08a316 100644 --- a/files/apts/horizon +++ b/files/apts/horizon @@ -13,3 +13,8 @@ pylint pep8 python-eventlet python-nose +python-sphinx +python-mox +python-kombu +python-coverage +python-cherrypy3 # why? diff --git a/files/apts/novnc b/files/apts/n-vnc similarity index 100% rename from files/apts/novnc rename to files/apts/n-vnc diff --git a/files/horizon_settings.py b/files/horizon_settings.py index 3a17db2c..05ddfe7b 100644 --- a/files/horizon_settings.py +++ b/files/horizon_settings.py @@ -12,23 +12,13 @@ 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(LOCAL_PATH, 'dashboard_openstack.sqlite3'), + 'TEST_NAME': os.path.join(LOCAL_PATH, 'test.sqlite3'), }, } +# The default values for these two settings seem to cause issues with apache CACHE_BACKEND = 'dummy://' - -# Add apps to horizon installation. -INSTALLED_APPS = ( - 'dashboard', - 'django.contrib.contenttypes', - 'django.contrib.sessions', - 'django.contrib.messages', - 'django.contrib.staticfiles', - 'django_openstack', - 'django_openstack.templatetags', - 'mailer', -) - +SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db' # Send email to the console by default EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' @@ -44,32 +34,40 @@ # EMAIL_HOST_USER = 'djangomail' # EMAIL_HOST_PASSWORD = 'top-secret!' -# FIXME: This needs to be changed to allow for multi-node setup. -OPENSTACK_KEYSTONE_URL = "http://localhost:5000/v2.0/" -OPENSTACK_KEYSTONE_ADMIN_URL = "http://localhost:35357/v2.0" +HORIZON_CONFIG = { + 'dashboards': ('nova', 'syspanel', 'settings',), + 'default_dashboard': 'nova', + 'user_home': 'dashboard.views.user_home', +} + +OPENSTACK_HOST = "127.0.0.1" +OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST +# FIXME: this is only needed until keystone fixes its GET /tenants call +# so that it doesn't return everything for admins +OPENSTACK_KEYSTONE_ADMIN_URL = "http://%s:35357/v2.0" % OPENSTACK_HOST OPENSTACK_KEYSTONE_DEFAULT_ROLE = "Member" -# NOTE(tres): Available services should come from the service -# catalog in Keystone. -SWIFT_ENABLED = False +SWIFT_PAGINATE_LIMIT = 100 # Configure quantum connection details for networking QUANTUM_ENABLED = False -QUANTUM_URL = '127.0.0.1' +QUANTUM_URL = '%s' % OPENSTACK_HOST QUANTUM_PORT = '9696' QUANTUM_TENANT = '1234' QUANTUM_CLIENT_VERSION='0.1' -# No monitoring links currently -EXTERNAL_MONITORING = [] +# If you have external monitoring links, eg: +# EXTERNAL_MONITORING = [ +# ['Nagios','http://foo.com'], +# ['Ganglia','http://bar.com'], +# ] -# Uncomment the following segment to silence most logging -# django.db and boto DEBUG logging is extremely verbose. #LOGGING = { # 'version': 1, -# # set to True will disable all logging except that specified, unless -# # nothing is specified except that django.db.backends will still log, -# # even when set to True, so disable explicitly +# # When set to True this will disable all logging except +# # for loggers specified in this configuration dictionary. Note that +# # if nothing is specified here and disable_existing_loggers is True, +# # django.db.backends will still log unless it is disabled explicitly. # 'disable_existing_loggers': False, # 'handlers': { # 'null': { @@ -77,20 +75,34 @@ # 'class': 'django.utils.log.NullHandler', # }, # 'console': { -# 'level': 'DEBUG', +# # Set the level to "DEBUG" for verbose output logging. +# 'level': 'INFO', # 'class': 'logging.StreamHandler', # }, # }, # 'loggers': { -# # Comment or Uncomment these to turn on/off logging output +# # Logging from django.db.backends is VERY verbose, send to null +# # by default. # 'django.db.backends': { # 'handlers': ['null'], # 'propagate': False, # }, -# 'django_openstack': { -# 'handlers': ['null'], +# 'horizon': { +# 'handlers': ['console'], # 'propagate': False, # }, +# 'novaclient': { +# 'handlers': ['console'], +# 'propagate': False, +# }, +# 'keystoneclient': { +# 'handlers': ['console'], +# 'propagate': False, +# }, +# 'nose.plugins.manager': { +# 'handlers': ['console'], +# 'propagate': False, +# } # } #} diff --git a/files/keystone.conf b/files/keystone.conf index 687273b4..0c0d0e26 100644 --- a/files/keystone.conf +++ b/files/keystone.conf @@ -25,6 +25,9 @@ service-header-mappings = { 'swift' : 'X-Storage-Url', 'cdn' : 'X-CDN-Management-Url'} +#List of extensions currently supported +extensions= osksadm,oskscatalog + # Address to bind the API server # TODO Properties defined within app not available via pipeline. service_host = 0.0.0.0 @@ -32,23 +35,47 @@ service_host = 0.0.0.0 # Port the bind the API server to service_port = 5000 +# SSL for API server +service_ssl = False + # Address to bind the Admin API server admin_host = 0.0.0.0 # Port the bind the Admin API server to admin_port = 35357 +# SSL for API Admin server +admin_ssl = False + +# Keystone certificate file (modify as needed) +# Only required if *_ssl is set to True +certfile = /etc/keystone/ssl/certs/keystone.pem + +# Keystone private key file (modify as needed) +# Only required if *_ssl is set to True +keyfile = /etc/keystone/ssl/private/keystonekey.pem + +# Keystone trusted CA certificates (modify as needed) +# Only required if *_ssl is set to True +ca_certs = /etc/keystone/ssl/certs/ca.pem + +# Client certificate required +# Only relevant if *_ssl is set to True +cert_required = True + #Role that allows to perform admin operations. -keystone-admin-role = KeystoneAdmin +keystone-admin-role = Admin #Role that allows to perform service admin operations. keystone-service-admin-role = KeystoneServiceAdmin +#Tells whether password user need to be hashed in the backend +hash-password = True + [keystone.backends.sqlalchemy] # SQLAlchemy connection string for the reference implementation registry # server. Any valid SQLAlchemy connection string is fine. # See: http://bit.ly/ideIpI -#sql_connection = sqlite:///keystone.db sql_connection = %SQL_CONN% backend_entities = ['UserRoleAssociation', 'Endpoints', 'Role', 'Tenant', 'User', 'Credentials', 'EndpointTemplates', 'Token', @@ -60,12 +87,12 @@ sql_idle_timeout = 30 [pipeline:admin] pipeline = - urlrewritefilter - admin_api + urlrewritefilter + admin_api [pipeline:keystone-legacy-auth] pipeline = - urlrewritefilter + urlrewritefilter legacy_auth RAX-KEY-extension service_api @@ -84,3 +111,6 @@ paste.filter_factory = keystone.frontends.legacy_token_auth:filter_factory [filter:RAX-KEY-extension] paste.filter_factory = keystone.contrib.extensions.service.raxkey.frontend:filter_factory + +[filter:debug] +paste.filter_factory = keystone.common.wsgi:debug_filter_factory diff --git a/files/keystone_data.sh b/files/keystone_data.sh index d926c52d..be2d5767 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -30,13 +30,17 @@ $BIN_DIR/keystone-manage $* role grant KeystoneServiceAdmin admin $BIN_DIR/keystone-manage $* service add nova compute "Nova Compute Service" $BIN_DIR/keystone-manage $* service add glance image "Glance Image Service" $BIN_DIR/keystone-manage $* service add keystone identity "Keystone Identity Service" -$BIN_DIR/keystone-manage $* service add swift object-store "Swift Service" +if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then + $BIN_DIR/keystone-manage $* service add swift object-store "Swift Service" +fi #endpointTemplates $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne nova http://%HOST_IP%:8774/v1.1/%tenant_id% http://%HOST_IP%:8774/v1.1/%tenant_id% http://%HOST_IP%:8774/v1.1/%tenant_id% 1 1 $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne glance http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% 1 1 $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne keystone http://%HOST_IP%:5000/v2.0 http://%HOST_IP%:35357/v2.0 http://%HOST_IP%:5000/v2.0 1 1 -$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne swift http://%HOST_IP%:8080/v1/AUTH_%tenant_id% http://%HOST_IP%:8080/ http://%HOST_IP%:8080/v1/AUTH_%tenant_id% 1 1 +if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then + $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne swift http://%HOST_IP%:8080/v1/AUTH_%tenant_id% http://%HOST_IP%:8080/ http://%HOST_IP%:8080/v1/AUTH_%tenant_id% 1 1 +fi # Tokens $BIN_DIR/keystone-manage $* token add %SERVICE_TOKEN% admin admin 2015-02-05T00:00 diff --git a/files/nova-api-paste.ini b/files/nova-api-paste.ini index 2c642f8d..7f27fdcb 100644 --- a/files/nova-api-paste.ini +++ b/files/nova-api-paste.ini @@ -1,34 +1,54 @@ +############ +# Metadata # +############ +[composite:metadata] +use = egg:Paste#urlmap +/: metaversions +/latest: meta +/2007-01-19: meta +/2007-03-01: meta +/2007-08-29: meta +/2007-10-10: meta +/2007-12-15: meta +/2008-02-01: meta +/2008-09-01: meta +/2009-04-04: meta + +[pipeline:metaversions] +pipeline = ec2faultwrap logrequest metaverapp + +[pipeline:meta] +pipeline = ec2faultwrap logrequest metaapp + +[app:metaverapp] +paste.app_factory = nova.api.metadata.handler:Versions.factory + +[app:metaapp] +paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory + ####### # EC2 # ####### [composite:ec2] use = egg:Paste#urlmap -/: ec2versions /services/Cloud: ec2cloud /services/Admin: ec2admin -/latest: ec2metadata -/2007-01-19: ec2metadata -/2007-03-01: ec2metadata -/2007-08-29: ec2metadata -/2007-10-10: ec2metadata -/2007-12-15: ec2metadata -/2008-02-01: ec2metadata -/2008-09-01: ec2metadata -/2009-04-04: ec2metadata -/1.0: ec2metadata [pipeline:ec2cloud] -pipeline = logrequest totoken authtoken keystonecontext cloudrequest authorizer ec2executor +pipeline = ec2faultwrap logrequest totoken authtoken keystonecontext cloudrequest authorizer ec2executor [pipeline:ec2admin] -pipeline = logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor +pipeline = ec2faultwrap logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor [pipeline:ec2metadata] -pipeline = logrequest ec2md +pipeline = ec2faultwrap logrequest ec2md [pipeline:ec2versions] -pipeline = logrequest ec2ver +pipeline = ec2faultwrap logrequest ec2ver + +[filter:ec2faultwrap] +paste.filter_factory = nova.api.ec2:FaultWrapper.factory [filter:logrequest] paste.filter_factory = nova.api.ec2:RequestLogging.factory @@ -59,54 +79,45 @@ paste.filter_factory = nova.api.ec2:Authorizer.factory [app:ec2executor] paste.app_factory = nova.api.ec2:Executor.factory -[app:ec2ver] -paste.app_factory = nova.api.ec2:Versions.factory - -[app:ec2md] -paste.app_factory = nova.api.ec2.metadatarequesthandler:MetadataRequestHandler.factory - ############# # Openstack # ############# [composite:osapi] -use = egg:Paste#urlmap +use = call:nova.api.openstack.v2.urlmap:urlmap_factory /: osversions -/v1.0: openstackapi10 -/v1.1: openstackapi11 +/v1.1: openstack_api_v2 +/v2: openstack_api_v2 -[pipeline:openstackapi10] -pipeline = faultwrap authtoken keystonecontext ratelimit osapiapp10 - -[pipeline:openstackapi11] -pipeline = faultwrap authtoken keystonecontext ratelimit extensions osapiapp11 +[pipeline:openstack_api_v2] +pipeline = faultwrap authtoken keystonecontext ratelimit serialize extensions osapi_app_v2 [filter:faultwrap] -paste.filter_factory = nova.api.openstack:FaultWrapper.factory +paste.filter_factory = nova.api.openstack.v2:FaultWrapper.factory [filter:auth] -paste.filter_factory = nova.api.openstack.auth:AuthMiddleware.factory +paste.filter_factory = nova.api.openstack.v2.auth:AuthMiddleware.factory [filter:noauth] -paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory +paste.filter_factory = nova.api.openstack.v2.auth:NoAuthMiddleware.factory [filter:ratelimit] -paste.filter_factory = nova.api.openstack.limits:RateLimitingMiddleware.factory +paste.filter_factory = nova.api.openstack.v2.limits:RateLimitingMiddleware.factory -[filter:extensions] -paste.filter_factory = nova.api.openstack.extensions:ExtensionMiddleware.factory +[filter:serialize] +paste.filter_factory = nova.api.openstack.wsgi:LazySerializationMiddleware.factory -[app:osapiapp10] -paste.app_factory = nova.api.openstack:APIRouterV10.factory +[filter:extensions] +paste.filter_factory = nova.api.openstack.v2.extensions:ExtensionMiddleware.factory -[app:osapiapp11] -paste.app_factory = nova.api.openstack:APIRouterV11.factory +[app:osapi_app_v2] +paste.app_factory = nova.api.openstack.v2:APIRouter.factory [pipeline:osversions] pipeline = faultwrap osversionapp [app:osversionapp] -paste.app_factory = nova.api.openstack.versions:Versions.factory +paste.app_factory = nova.api.openstack.v2.versions:Versions.factory ########## # Shared # diff --git a/files/pips/horizon b/files/pips/horizon index 672fbee4..5a214de4 100644 --- a/files/pips/horizon +++ b/files/pips/horizon @@ -1,9 +1,15 @@ Django==1.3 -django-nose==0.1.2 django-mailer +django-nose==0.1.2 +django-nose-selenium django-registration==0.7 -python-cloudfiles +glance==2011.3 +pycrypto==2.3 +quantum sqlalchemy-migrate +python-cloudfiles +-e git+https://github.com/cloudbuilders/openstackx.git#egg=openstackx -e git+https://github.com/jacobian/openstack.compute.git#egg=openstack - +-e git+https://github.com/rackspace/python-novaclient.git#egg=python-novaclient +-e git+https://github.com/4P/python-keystoneclient.git#egg=python-keystoneclient diff --git a/stack.sh b/stack.sh index caa2d598..be34a86a 100755 --- a/stack.sh +++ b/stack.sh @@ -537,7 +537,7 @@ if [[ "$ENABLED_SERVICES" =~ "openstackx" ]]; then cd $OPENSTACKX_DIR; sudo python setup.py develop fi if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then - cd $HORIZON_DIR/django-openstack; sudo python setup.py develop + cd $HORIZON_DIR/horizon; sudo python setup.py develop cd $HORIZON_DIR/openstack-dashboard; sudo python setup.py develop fi if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then @@ -618,7 +618,13 @@ if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then # ``local_settings.py`` is used to override horizon default settings. - cp $FILES/horizon_settings.py $HORIZON_DIR/openstack-dashboard/local/local_settings.py + local_settings=$HORIZON_DIR/openstack-dashboard/local/local_settings.py + cp $FILES/horizon_settings.py $local_settings + + # Enable quantum in dashboard, if requested + if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then + sudo sed -e "s,QUANTUM_ENABLED = False,QUANTUM_ENABLED = True,g" -i $local_settings + fi # Initialize the horizon database (it stores sessions and notices shown to # users). The user system is external (keystone). @@ -934,10 +940,12 @@ add_nova_flag "--vlan_interface=$VLAN_INTERFACE" add_nova_flag "--sql_connection=$BASE_SQL_CONN/nova" add_nova_flag "--libvirt_type=$LIBVIRT_TYPE" if [[ "$ENABLED_SERVICES" =~ "openstackx" ]]; then - add_nova_flag "--osapi_extensions_path=$OPENSTACKX_DIR/extensions" + add_nova_flag "--osapi_extension=nova.api.openstack.v2.contrib.standard_extensions" + add_nova_flag "--osapi_extension=extensions.admin.Admin" fi if [[ "$ENABLED_SERVICES" =~ "n-vnc" ]]; then - add_nova_flag "--vncproxy_url=http://$HOST_IP:6080" + VNCPROXY_URL=${VNCPROXY_URL:-"http://$HOST_IP:6080"} + add_nova_flag "--vncproxy_url=$VNCPROXY_URL" add_nova_flag "--vncproxy_wwwroot=$NOVNC_DIR/" fi add_nova_flag "--api_paste_config=$NOVA_DIR/bin/nova-api-paste.ini" @@ -1008,7 +1016,7 @@ if [[ "$ENABLED_SERVICES" =~ "key" ]]; then mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS keystone;' mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE keystone;' - # FIXME (anthony) keystone should use keystone.conf.example + # Configure keystone.conf KEYSTONE_CONF=$KEYSTONE_DIR/etc/keystone.conf cp $FILES/keystone.conf $KEYSTONE_CONF sudo sed -e "s,%SQL_CONN%,$BASE_SQL_CONN/keystone,g" -i $KEYSTONE_CONF @@ -1021,7 +1029,7 @@ if [[ "$ENABLED_SERVICES" =~ "key" ]]; then sudo sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $KEYSTONE_DATA sudo sed -e "s,%ADMIN_PASSWORD%,$ADMIN_PASSWORD,g" -i $KEYSTONE_DATA # initialize keystone with default users/endpoints - BIN_DIR=$KEYSTONE_DIR/bin bash $KEYSTONE_DATA + ENABLED_SERVICES=$ENABLED_SERVICES BIN_DIR=$KEYSTONE_DIR/bin bash $KEYSTONE_DATA fi diff --git a/stackrc b/stackrc index 6a56a2ab..7da7258c 100644 --- a/stackrc +++ b/stackrc @@ -1,10 +1,10 @@ # compute service NOVA_REPO=https://github.com/openstack/nova.git -NOVA_BRANCH=stable/diablo +NOVA_BRANCH=master # storage service SWIFT_REPO=https://github.com/openstack/swift.git -SWIFT_BRANCH=stable/diablo +SWIFT_BRANCH=master # swift and keystone integration SWIFT_KEYSTONE_REPO=https://github.com/cloudbuilders/swift-keystone2.git @@ -12,7 +12,7 @@ SWIFT_KEYSTONE_BRANCH=master # image catalog service GLANCE_REPO=https://github.com/openstack/glance.git -GLANCE_BRANCH=stable/diablo +GLANCE_BRANCH=master # unified auth system (manages accounts/tokens) KEYSTONE_REPO=https://github.com/openstack/keystone.git @@ -20,11 +20,11 @@ KEYSTONE_BRANCH=stable/diablo # a websockets/html5 or flash powered VNC console for vm instances NOVNC_REPO=https://github.com/cloudbuilders/noVNC.git -NOVNC_BRANCH=diablo +NOVNC_BRANCH=master # django powered web control panel for openstack HORIZON_REPO=https://github.com/openstack/horizon.git -HORIZON_BRANCH=stable/diablo +HORIZON_BRANCH=master # python client library to nova that horizon (and others) use NOVACLIENT_REPO=https://github.com/openstack/python-novaclient.git @@ -33,11 +33,11 @@ NOVACLIENT_BRANCH=master # openstackx is a collection of extensions to openstack.compute & nova # that is *deprecated*. The code is being moved into python-novaclient & nova. OPENSTACKX_REPO=https://github.com/cloudbuilders/openstackx.git -OPENSTACKX_BRANCH=diablo +OPENSTACKX_BRANCH=master # quantum service QUANTUM_REPO=https://github.com/openstack/quantum -QUANTUM_BRANCH=stable/diablo +QUANTUM_BRANCH=master # CI test suite CITEST_REPO=https://github.com/openstack/openstack-integration-tests.git diff --git a/tools/get_uec_image.sh b/tools/get_uec_image.sh index 7fa920ec..f66f2bc2 100755 --- a/tools/get_uec_image.sh +++ b/tools/get_uec_image.sh @@ -10,6 +10,7 @@ TOP_DIR=`cd $TOOLS_DIR/..; pwd` # exit on error to stop unexpected errors set -o errexit +set -o xtrace usage() { echo "Usage: $0 - Fetch and prepare Ubuntu images" From 073d17d07273fcfac2a3c9e4abf811719bc37fe2 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 23 Nov 2011 12:50:46 -0800 Subject: [PATCH 220/967] Update README.md to let users know how to access stable/[milestone] versions of devstack. Other text cleanup as well. Change-Id: Ic40106e40f65289cd276cb680509e0ff62745aa0 --- README.md | 43 ++++++++++++++++++++++++------------------- 1 file changed, 24 insertions(+), 19 deletions(-) diff --git a/README.md b/README.md index 8b2b0384..a185f34f 100644 --- a/README.md +++ b/README.md @@ -1,37 +1,42 @@ -Tool to quickly deploy openstack dev environments. +Devstack is a set of scripts and utilities to quickly deploy an OpenStack cloud. # Goals -* To quickly build dev openstack environments in clean oneiric environments -* To describe working configurations of openstack (which code branches work together? what do config files look like for those branches?) -* To make it easier for developers to dive into openstack so that they can productively contribute without having to understand every part of the system at once +* To quickly build dev OpenStack environments in a clean oneiric environment +* To describe working configurations of OpenStack (which code branches work together? what do config files look like for those branches?) +* To make it easier for developers to dive into OpenStack so that they can productively contribute without having to understand every part of the system at once * To make it easy to prototype cross-project features Read more at http://devstack.org (built from the gh-pages branch) -Be sure to carefully read these scripts before you run them as they install software and may alter your networking configuration. +IMPORTANT: Be sure to carefully read stack.sh and any other scripts you execute before you run them, as they install software and may alter your networking configuration. We strongly recommend that you run stack.sh in a clean and disposable vm when you are first getting started. -# To start a dev cloud on your local machine (installing on a dedicated vm is safer!): +# Versions +The devstack master branch generally points to trunk versions of OpenStack components. For older, stable versions, look for branches named stable/[mil +estone]. For example, you can do the following to create a diablo OpenStack cloud: + + git checkout stable/diablo ./stack.sh -If working correctly, you should be able to access openstack endpoints, like: +# To start a dev cloud (Installing in a dedicated, disposable vm is safer than installing on your dev machine!): -* Horizon: http://myhost/ -* Keystone: http://myhost:5000/v2.0/ + ./stack.sh -# Customizing +When the script finishes executing, you should be able to access OpenStack endpoints, like so: -You can tweak environment variables by creating file name 'localrc' should you need to override defaults. It is likely that you will need to do this to tweak your networking configuration should you need to access your cloud from a different host. +* Horizon: http://myhost/ +* Keystone: http://myhost:5000/v2.0/ -# Todo +We also provide an environment file that you can use to interact with your cloud via CLI: -* Add python-novaclient cli support -* syslog -* Add volume support -* Add quantum support + # source openrc file to load your environment with osapi and ec2 creds + . openrc + # list instances + nova list + # list instances using ec2 api + euca-describe-instances -# Future +# Customizing -* idea: move from screen to tmux? -* idea: create a live-cd / vmware preview image using this? +You can override environment variables used in stack.sh by creating file name 'localrc'. It is likely that you will need to do this to tweak your networking configuration should you need to access your cloud from a different host. From 44d8f8f826a962eabcd219316d9f5e9650a807c3 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 23 Nov 2011 23:21:06 -0600 Subject: [PATCH 221/967] Check out integration tests and install prereqs Change-Id: I2a70a91c69005051f149a247faef42cfa88856a0 --- tools/build_ci_config.sh | 51 +++++++++++++++++++++++++++++++++++----- 1 file changed, 45 insertions(+), 6 deletions(-) diff --git a/tools/build_ci_config.sh b/tools/build_ci_config.sh index 8eed8eca..f627f435 100755 --- a/tools/build_ci_config.sh +++ b/tools/build_ci_config.sh @@ -6,18 +6,14 @@ function usage { echo "$0 - Build config.ini for openstack-integration-tests" echo "" - echo "Usage: $0 configdir" + echo "Usage: $0 [configdir]" exit 1 } -if [ ! "$#" -eq "1" ]; then +if [ "$1" = "-h" ]; then usage fi -CONFIG_DIR=$1 -CONFIG_CONF=$CONFIG_DIR/storm.conf -CONFIG_INI=$CONFIG_DIR/config.ini - # Clean up any resources that may be in use cleanup() { set +o errexit @@ -53,8 +49,51 @@ source ./stackrc # Where Openstack code lives DEST=${DEST:-/opt/stack} +CITEST_DIR=$DEST/openstack-integration-tests + +CONFIG_DIR=${1:-$CITEST_DIR/etc} +CONFIG_CONF=$CONFIG_DIR/storm.conf +CONFIG_INI=$CONFIG_DIR/config.ini + DIST_NAME=${DIST_NAME:-oneiric} +# git clone only if directory doesn't exist already. Since ``DEST`` might not +# be owned by the installation user, we create the directory and change the +# ownership to the proper user. +function git_clone { + + GIT_REMOTE=$1 + GIT_DEST=$2 + GIT_BRANCH=$3 + + # do a full clone only if the directory doesn't exist + if [ ! -d $GIT_DEST ]; then + git clone $GIT_REMOTE $GIT_DEST + cd $2 + # This checkout syntax works for both branches and tags + git checkout $GIT_BRANCH + elif [[ "$RECLONE" == "yes" ]]; then + # if it does exist then simulate what clone does if asked to RECLONE + cd $GIT_DEST + # set the url to pull from and fetch + git remote set-url origin $GIT_REMOTE + git fetch origin + # remove the existing ignored files (like pyc) as they cause breakage + # (due to the py files having older timestamps than our pyc, so python + # thinks the pyc files are correct using them) + find $GIT_DEST -name '*.pyc' -delete + git checkout -f origin/$GIT_BRANCH + # a local branch might not exist + git branch -D $GIT_BRANCH || true + git checkout -b $GIT_BRANCH + fi +} + +# Install tests and prerequisites +sudo PIP_DOWNLOAD_CACHE=/var/cache/pip pip install --use-mirrors `cat $TOP_DIR/files/pips/openstack-integration-tests` + +git_clone $CITEST_REPO $CITEST_DIR $CITEST_BRANCH + if [ ! -f $DEST/.ramdisk ]; then # Process network configuration vars GUEST_NETWORK=${GUEST_NETWORK:-1} From f6811ee06e394dc2ab26e08b6a0857737f2981f7 Mon Sep 17 00:00:00 2001 From: Brad Hall Date: Tue, 29 Nov 2011 06:36:03 +0000 Subject: [PATCH 222/967] Fixes to work with Quantum trunk (also, added dhcp support if we're using ovs) Change-Id: I2f3ee8fb3b02551ca99bafaf9d1ea2b9af3aa164 --- stack.sh | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index df70a8d5..f4ed7dbc 100755 --- a/stack.sh +++ b/stack.sh @@ -927,6 +927,8 @@ if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then if [[ "$Q_PLUGIN" = "openvswitch" ]]; then add_nova_flag "--libvirt_vif_type=ethernet" add_nova_flag "--libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtOpenVswitchDriver" + add_nova_flag "--linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver" + add_nova_flag "--quantum-use-dhcp" fi else add_nova_flag "--network_manager=nova.network.manager.$NET_MAN" @@ -1113,10 +1115,10 @@ if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then fi fi - QUANTUM_PLUGIN_INI_FILE=$QUANTUM_DIR/quantum/plugins.ini + QUANTUM_PLUGIN_INI_FILE=$QUANTUM_DIR/etc/plugins.ini # Make sure we're using the openvswitch plugin sed -i -e "s/^provider =.*$/provider = quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin/g" $QUANTUM_PLUGIN_INI_FILE - screen_it q-svc "cd $QUANTUM_DIR && export PYTHONPATH=.:$PYTHONPATH; python $QUANTUM_DIR/bin/quantum $QUANTUM_DIR/etc/quantum.conf" + screen_it q-svc "cd $QUANTUM_DIR && PYTHONPATH=.:$PYTHONPATH python $QUANTUM_DIR/bin/quantum-server $QUANTUM_DIR/etc/quantum.conf" fi # Quantum agent (for compute nodes) @@ -1130,7 +1132,7 @@ if [[ "$ENABLED_SERVICES" =~ "q-agt" ]]; then fi # Start up the quantum <-> openvswitch agent - screen_it q-agt "sleep 4; sudo python $QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py $QUANTUM_DIR/quantum/plugins/openvswitch/ovs_quantum_plugin.ini -v" + screen_it q-agt "sleep 4; sudo python $QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py $QUANTUM_DIR/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini -v" fi # If we're using Quantum (i.e. q-svc is enabled), network creation has to From eacd755212309a4807dc6f2c60cffc096c0b17c3 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Tue, 29 Nov 2011 12:46:54 -0800 Subject: [PATCH 223/967] these deps should come via git_clone Change-Id: I98ab1dc6a25076be9653c0b79d4d903ea9a04e39 --- files/pips/horizon | 3 --- 1 file changed, 3 deletions(-) diff --git a/files/pips/horizon b/files/pips/horizon index 5a214de4..dddf0110 100644 --- a/files/pips/horizon +++ b/files/pips/horizon @@ -3,13 +3,10 @@ django-mailer django-nose==0.1.2 django-nose-selenium django-registration==0.7 -glance==2011.3 pycrypto==2.3 -quantum sqlalchemy-migrate python-cloudfiles -e git+https://github.com/cloudbuilders/openstackx.git#egg=openstackx -e git+https://github.com/jacobian/openstack.compute.git#egg=openstack --e git+https://github.com/rackspace/python-novaclient.git#egg=python-novaclient -e git+https://github.com/4P/python-keystoneclient.git#egg=python-keystoneclient From f44e98d1c7baf7dde9b56c559bb372cf63ffa267 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 29 Nov 2011 17:39:51 -0600 Subject: [PATCH 224/967] Set sane defaults, get config info from localrc Change-Id: If8f942723c5e796207f3caf15a65c8501cd63d83 --- tools/install_openvpn.sh | 58 +++++++++++++++++++++++++++++----------- 1 file changed, 43 insertions(+), 15 deletions(-) diff --git a/tools/install_openvpn.sh b/tools/install_openvpn.sh index ff88a065..44eee728 100755 --- a/tools/install_openvpn.sh +++ b/tools/install_openvpn.sh @@ -11,24 +11,41 @@ # --client mode creates a tarball of a client configuration for this server # Get config file -if [ -e localrc.vpn ]; then - . localrc.vpn +if [ -e localrc ]; then + . localrc fi +if [ -e vpnrc ]; then + . vpnrc +fi + +# Do some IP manipulation +function cidr2netmask() { + set -- $(( 5 - ($1 / 8) )) 255 255 255 255 $(( (255 << (8 - ($1 % 8))) & 255 )) 0 0 0 + if [[ $1 -gt 1 ]]; then + shift $1 + else + shift + fi + echo ${1-0}.${2-0}.${3-0}.${4-0} +} + +FIXED_NET=`echo $FIXED_RANGE | cut -d'/' -f1` +FIXED_CIDR=`echo $FIXED_RANGE | cut -d'/' -f2` +FIXED_MASK=`cidr2netmask $FIXED_CIDR` # VPN Config VPN_SERVER=${VPN_SERVER:-`ifconfig eth0 | awk "/inet addr:/ { print \$2 }" | cut -d: -f2`} # 50.56.12.212 VPN_PROTO=${VPN_PROTO:-tcp} VPN_PORT=${VPN_PORT:-6081} -VPN_DEV=${VPN_DEV:-tun} -VPN_BRIDGE=${VPN_BRIDGE:-br0} -VPN_CLIENT_NET=${VPN_CLIENT_NET:-172.16.28.0} -VPN_CLIENT_MASK=${VPN_CLIENT_MASK:-255.255.255.0} -VPN_CLIENT_DHCP="${VPN_CLIENT_DHCP:-172.16.28.1 172.16.28.254}" -VPN_LOCAL_NET=${VPN_LOCAL_NET:-10.0.0.0} -VPN_LOCAL_MASK=${VPN_LOCAL_MASK:-255.255.0.0} +VPN_DEV=${VPN_DEV:-tap0} +VPN_BRIDGE=${VPN_BRIDGE:-br100} +VPN_BRIDGE_IF=${VPN_BRIDGE_IF:-$FLAT_INTERFACE} +VPN_CLIENT_NET=${VPN_CLIENT_NET:-$FIXED_NET} +VPN_CLIENT_MASK=${VPN_CLIENT_MASK:-$FIXED_MASK} +VPN_CLIENT_DHCP="${VPN_CLIENT_DHCP:-net.1 net.254}" VPN_DIR=/etc/openvpn -CA_DIR=/etc/openvpn/easy-rsa +CA_DIR=$VPN_DIR/easy-rsa usage() { echo "$0 - OpenVPN install and certificate generation" @@ -54,7 +71,16 @@ if [ ! -d $CA_DIR ]; then cp -pR /usr/share/doc/openvpn/examples/easy-rsa/2.0/ $CA_DIR fi -OPWD=`pwd` +# Keep track of the current directory +TOOLS_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $TOOLS_DIR/.. && pwd) + +WEB_DIR=$TOP_DIR/../vpn +if [[ ! -d $WEB_DIR ]]; then + mkdir -p $WEB_DIR +fi +WEB_DIR=$(cd $TOP_DIR/../vpn && pwd) + cd $CA_DIR source ./vars @@ -87,6 +113,10 @@ do_server() { BR="$VPN_BRIDGE" TAP="\$1" +if [[ ! -d /sys/class/net/\$BR ]]; then + brctl addbr \$BR +fi + for t in \$TAP; do openvpn --mktun --dev \$t brctl addif \$BR \$t @@ -117,10 +147,8 @@ key $NAME.key # This file should be kept secret ca ca.crt dh dh1024.pem duplicate-cn -#server $VPN_CLIENT_NET $VPN_CLIENT_MASK server-bridge $VPN_CLIENT_NET $VPN_CLIENT_MASK $VPN_CLIENT_DHCP ifconfig-pool-persist ipp.txt -push "route $VPN_LOCAL_NET $VPN_LOCAL_MASK" comp-lzo user nobody group nogroup @@ -163,9 +191,9 @@ persist-tun comp-lzo verb 3 EOF - (cd $TMP_DIR; tar cf $OPWD/$NAME.tar *) + (cd $TMP_DIR; tar cf $WEB_DIR/$NAME.tar *) rm -rf $TMP_DIR - echo "Client certificate and configuration is in $OPWD/$NAME.tar" + echo "Client certificate and configuration is in $WEB_DIR/$NAME.tar" } # Process command line args From 628f58f7a695e8a9bd67cbc4927b2ad6d1d9ee07 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 30 Nov 2011 17:44:10 -0600 Subject: [PATCH 225/967] Fix glance config A change to glance-api.conf hit master this morning, need to update our local copy to match. Change-Id: I2bd3d6fe6a268164aea12ea3a3b45a301f47ccc9 --- files/glance-api.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/glance-api.conf b/files/glance-api.conf index bb758afb..1bbd58e0 100644 --- a/files/glance-api.conf +++ b/files/glance-api.conf @@ -155,7 +155,7 @@ pipeline = versionsapp paste.app_factory = glance.api.versions:app_factory [app:apiv1app] -paste.app_factory = glance.api.v1:app_factory +paste.app_factory = glance.api.v1.router:app_factory [filter:versionnegotiation] paste.filter_factory = glance.api.middleware.version_negotiation:filter_factory From f79cc42d0ddda1369d1b7f2d28b327fb77c6c138 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 1 Dec 2011 10:21:42 -0600 Subject: [PATCH 226/967] Rename openstack-integration-tests to tempest Change-Id: Ib565edd1278c609796e1f8692a2f6ef276152328 --- files/pips/{openstack-integration-tests => tempest} | 0 stackrc | 2 +- tools/build_ci_config.sh | 10 +++++----- tools/build_uec_ramdisk.sh | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) rename files/pips/{openstack-integration-tests => tempest} (100%) diff --git a/files/pips/openstack-integration-tests b/files/pips/tempest similarity index 100% rename from files/pips/openstack-integration-tests rename to files/pips/tempest diff --git a/stackrc b/stackrc index 7da7258c..524cc992 100644 --- a/stackrc +++ b/stackrc @@ -40,7 +40,7 @@ QUANTUM_REPO=https://github.com/openstack/quantum QUANTUM_BRANCH=master # CI test suite -CITEST_REPO=https://github.com/openstack/openstack-integration-tests.git +CITEST_REPO=https://github.com/openstack/tempest.git CITEST_BRANCH=master # Specify a comma-separated list of uec images to download and install into glance. diff --git a/tools/build_ci_config.sh b/tools/build_ci_config.sh index f627f435..0e43aa49 100755 --- a/tools/build_ci_config.sh +++ b/tools/build_ci_config.sh @@ -1,10 +1,10 @@ #!/usr/bin/env bash # -# build_ci_config.sh - Build a config.ini for openstack-integration-tests -# (https://github.com/openstack/openstack-integration-tests) +# build_ci_config.sh - Build a config.ini for tempest (openstack-integration-tests) +# (https://github.com/openstack/tempest.git) function usage { - echo "$0 - Build config.ini for openstack-integration-tests" + echo "$0 - Build config.ini for tempest" echo "" echo "Usage: $0 [configdir]" exit 1 @@ -49,7 +49,7 @@ source ./stackrc # Where Openstack code lives DEST=${DEST:-/opt/stack} -CITEST_DIR=$DEST/openstack-integration-tests +CITEST_DIR=$DEST/tempest CONFIG_DIR=${1:-$CITEST_DIR/etc} CONFIG_CONF=$CONFIG_DIR/storm.conf @@ -90,7 +90,7 @@ function git_clone { } # Install tests and prerequisites -sudo PIP_DOWNLOAD_CACHE=/var/cache/pip pip install --use-mirrors `cat $TOP_DIR/files/pips/openstack-integration-tests` +sudo PIP_DOWNLOAD_CACHE=/var/cache/pip pip install --use-mirrors `cat $TOP_DIR/files/pips/tempest` git_clone $CITEST_REPO $CITEST_DIR $CITEST_BRANCH diff --git a/tools/build_uec_ramdisk.sh b/tools/build_uec_ramdisk.sh index 0c277341..3bd704ba 100755 --- a/tools/build_uec_ramdisk.sh +++ b/tools/build_uec_ramdisk.sh @@ -149,7 +149,7 @@ git_clone $NOVNC_REPO $DEST/novnc $NOVNC_BRANCH git_clone $HORIZON_REPO $DEST/horizon $HORIZON_BRANCH git_clone $NOVACLIENT_REPO $DEST/python-novaclient $NOVACLIENT_BRANCH git_clone $OPENSTACKX_REPO $DEST/openstackx $OPENSTACKX_BRANCH -git_clone $CITEST_REPO $DEST/openstack-integration-tests $CITEST_BRANCH +git_clone $CITEST_REPO $DEST/tempest $CITEST_BRANCH # Use this version of devstack rm -rf $MNT_DIR/$DEST/devstack From ff603ef5c5eab962591af15930386d7c545ab8fb Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 22 Nov 2011 17:48:10 -0600 Subject: [PATCH 227/967] Support rsyslog and RELP protocol Configure rsyslog and services if SYSLOG=True in localrc. Support logging to head node if SYSLOG_HOST has head note IP. Use RELP for remote logging to prevent dropped records. Change-Id: I960a1b4d2a24cbd9a900e68c758f362ec3d8c78e --- stack.sh | 52 +++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 49 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index f4ed7dbc..5fb88017 100755 --- a/stack.sh +++ b/stack.sh @@ -80,9 +80,6 @@ source ./stackrc # Destination path for installation ``DEST`` DEST=${DEST:-/opt/stack} -# Configure services to syslog instead of writing to individual log files -SYSLOG=${SYSLOG:-False} - # apt-get wrapper to just get arguments set correctly function apt_get() { local sudo="sudo" @@ -186,6 +183,23 @@ if [ ! -n "$HOST_IP" ]; then fi fi +# Normalize config values to True or False +# VAR=`trueorfalse default-value test-value` +function trueorfalse() { + local default=$1 + local testval=$2 + + [[ -z "$testval" ]] && { echo "$default"; return; } + [[ "0 no false" =~ "$testval" ]] && { echo "False"; return; } + [[ "1 yes true" =~ "$testval" ]] && { echo "True"; return; } + echo "$default" +} + +# Configure services to syslog instead of writing to individual log files +SYSLOG=`trueorfalse False $SYSLOG` +SYSLOG_HOST=${SYSLOG_HOST:-$HOST_IP} +SYSLOG_PORT=${SYSLOG_PORT:-516} + # Service startup timeout SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} @@ -548,6 +562,28 @@ fi # it since we are going to run the services in screen for simple cp $FILES/screenrc ~/.screenrc +# Syslog +# --------- + +if [[ $SYSLOG != "False" ]]; then + apt_get install -y rsyslog-relp + if [[ "$SYSLOG_HOST" = "$HOST_IP" ]]; then + # Configure the master host to receive + cat </tmp/90-stack-m.conf +\$ModLoad imrelp +\$InputRELPServerRun $SYSLOG_PORT +EOF + sudo mv /tmp/90-stack-m.conf /etc/rsyslog.d + else + # Set rsyslog to send to remote host + cat </tmp/90-stack-s.conf +*.* :omrelp:$SYSLOG_HOST:$SYSLOG_PORT +EOF + sudo mv /tmp/90-stack-s.conf /etc/rsyslog.d + fi + sudo /usr/sbin/service rsyslog restart +fi + # Rabbit # --------- @@ -1032,6 +1068,16 @@ if [[ "$ENABLED_SERVICES" =~ "key" ]]; then sudo sed -e "s,%ADMIN_PASSWORD%,$ADMIN_PASSWORD,g" -i $KEYSTONE_DATA # initialize keystone with default users/endpoints ENABLED_SERVICES=$ENABLED_SERVICES BIN_DIR=$KEYSTONE_DIR/bin bash $KEYSTONE_DATA + + if [ "$SYSLOG" != "False" ]; then + sed -i -e '/^handlers=devel$/s/=devel/=production/' \ + $KEYSTONE_DIR/etc/logging.cnf + sed -i -e " + /^log_file/s/log_file/\#log_file/; \ + /^log_config/d;/^\[DEFAULT\]/a\ + log_config=$KEYSTONE_DIR/etc/logging.cnf" \ + $KEYSTONE_DIR/etc/keystone.conf + fi fi From ef4e5367d20cfeadb77e706bf81cd8401e36feb4 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 1 Dec 2011 13:44:51 -0800 Subject: [PATCH 228/967] Allow individual services to specify dependencies. Also fixes lp897879: https://bugs.launchpad.net/bugs/897879 Change-Id: Ib9de4571501771f12b1aeb6550e94eea03643290 --- stack.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index f4ed7dbc..15ab5383 100755 --- a/stack.sh +++ b/stack.sh @@ -386,6 +386,10 @@ function get_packages() { local service for service in ${ENABLED_SERVICES//,/ }; do + # Allow individual services to specify dependencies + if [[ -e $FILES/apts/${service} ]]; then + file_to_parse="${file_to_parse} $service" + fi if [[ $service == n-* ]]; then if [[ ! $file_to_parse =~ nova ]]; then file_to_parse="${file_to_parse} nova" @@ -398,8 +402,6 @@ function get_packages() { if [[ ! $file_to_parse =~ keystone ]]; then file_to_parse="${file_to_parse} keystone" fi - elif [[ -e $FILES/apts/${service} ]]; then - file_to_parse="${file_to_parse} $service" fi done From 1a52a02f109b9c6a24a8ace789ce5f361b0c5bc7 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Mon, 5 Dec 2011 13:38:29 -0800 Subject: [PATCH 229/967] support gerrit style refs/changes/... for branch names To use a gerrit "branch" with devstack I would find the repo/remote and set it for the specific project. Example: https://review.openstack.org/2059 Would mean I update my localrc with the following settings: KEYSTONE_REPO=https://review.openstack.org/p/openstack/keystone KEYSTONE_BRANCH=refs/changes/59/2059/2 Change-Id: I0793415fb03cc08d1eb1a3faf1b8ec3e723beb31 --- stack.sh | 47 ++++++++++++++++++++++++++++------------------- 1 file changed, 28 insertions(+), 19 deletions(-) diff --git a/stack.sh b/stack.sh index 15ab5383..7d66e319 100755 --- a/stack.sh +++ b/stack.sh @@ -448,26 +448,35 @@ function git_clone { GIT_DEST=$2 GIT_BRANCH=$3 - # do a full clone only if the directory doesn't exist - if [ ! -d $GIT_DEST ]; then - git clone $GIT_REMOTE $GIT_DEST - cd $2 - # This checkout syntax works for both branches and tags - git checkout $GIT_BRANCH - elif [[ "$RECLONE" == "yes" ]]; then - # if it does exist then simulate what clone does if asked to RECLONE + if echo $GIT_BRANCH | egrep -q "^refs"; then + # If our branch name is a gerrit style refs/changes/... + if [ ! -d $GIT_DEST ]; then + git clone $GIT_REMOTE $GIT_DEST + fi cd $GIT_DEST - # set the url to pull from and fetch - git remote set-url origin $GIT_REMOTE - git fetch origin - # remove the existing ignored files (like pyc) as they cause breakage - # (due to the py files having older timestamps than our pyc, so python - # thinks the pyc files are correct using them) - find $GIT_DEST -name '*.pyc' -delete - git checkout -f origin/$GIT_BRANCH - # a local branch might not exist - git branch -D $GIT_BRANCH || true - git checkout -b $GIT_BRANCH + git fetch $GIT_REMOTE $GIT_BRANCH && git checkout FETCH_HEAD + else + # do a full clone only if the directory doesn't exist + if [ ! -d $GIT_DEST ]; then + git clone $GIT_REMOTE $GIT_DEST + cd $GIT_DEST + # This checkout syntax works for both branches and tags + git checkout $GIT_BRANCH + elif [[ "$RECLONE" == "yes" ]]; then + # if it does exist then simulate what clone does if asked to RECLONE + cd $GIT_DEST + # set the url to pull from and fetch + git remote set-url origin $GIT_REMOTE + git fetch origin + # remove the existing ignored files (like pyc) as they cause breakage + # (due to the py files having older timestamps than our pyc, so python + # thinks the pyc files are correct using them) + find $GIT_DEST -name '*.pyc' -delete + git checkout -f origin/$GIT_BRANCH + # a local branch might not exist + git branch -D $GIT_BRANCH || true + git checkout -b $GIT_BRANCH + fi fi } From ee3fc417d5256956a6afbbb5dd659d8f09124d18 Mon Sep 17 00:00:00 2001 From: Don Dugger Date: Tue, 6 Dec 2011 12:07:20 -0700 Subject: [PATCH 230/967] Use iputils-arping package for Ubuntu Oneiric distribution Turns out the `arping' package, currently installed by the script, is incompatible with `network-manager', the default network configuration package for recent Ubuntu distributions. (Losing network connectivity on a reboot after running `stack.sh' is a little disconcerting.) Forturnately, the `iputils-apring' package provides the same functionality and is compatible with `network-manager' so install that one instead. Change-Id: Id6e89cdf3e590481f870127697baa453b34fbc24 Signed-off-by: Don Dugger --- files/apts/nova | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/files/apts/nova b/files/apts/nova index f4fe4595..b034509c 100644 --- a/files/apts/nova +++ b/files/apts/nova @@ -2,7 +2,8 @@ dnsmasq-base dnsmasq-utils # for dhcp_release only available in dist:oneiric kpartx parted -arping # used for send_arp_for_ha option in nova-network +arping # only available in dist:natty +iputils-arping # only available in dist:oneiric mysql-server # NOPRIME python-mysqldb python-xattr # needed for glance which is needed for nova --- this shouldn't be here From 3f5e1893e5603f3a353c4db4fe6e443813bd4823 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 8 Dec 2011 16:21:52 -0500 Subject: [PATCH 231/967] use cgroup-lite package Instead of managing cgroup via /etc/fstab ourselves, let the cgroup-lite package do it. Change-Id: I3c4b8e6583bcf644f3840819bdad972c3b52f088 --- stack.sh | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/stack.sh b/stack.sh index 7d66e319..5e3432d9 100755 --- a/stack.sh +++ b/stack.sh @@ -719,15 +719,17 @@ if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then # splitting a system into many smaller parts. LXC uses cgroups and chroot # to simulate multiple systems. if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then - apt_get install lxc - # lxc uses cgroups (a kernel interface via virtual filesystem) configured - # and mounted to ``/cgroup`` - sudo mkdir -p /cgroup - if ! grep -q cgroup /etc/fstab; then - echo none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0 | sudo tee -a /etc/fstab - fi - if ! mount -n | grep -q cgroup; then - sudo mount /cgroup + if [[ "$DISTRO" > natty ]]; then + apt_get install cgroup-lite + else + cgline="none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0" + sudo mkdir -p /cgroup + if ! grep -q cgroup /etc/fstab; then + echo "$cgline" | sudo tee -a /etc/fstab + fi + if ! mount -n | grep -q cgroup; then + sudo mount /cgroup + fi fi fi From 6b549fdb13f88d76a8dc948bc1b7316bb2c84206 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 8 Dec 2011 16:22:27 -0500 Subject: [PATCH 232/967] install libvirt as a dependency of nova nova compute depends on libvirt, but it was not being installed in the case of LIBVIRT_TYPE=lxc. Change-Id: Iedae29e476ad529daa7c7b1be39a58a1c86c3b7c --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 5e3432d9..c7a61025 100755 --- a/stack.sh +++ b/stack.sh @@ -699,6 +699,7 @@ if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then # Virtualization Configuration # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + apt_get install libvirt-bin # attempt to load modules: network block device - used to manage qcow images sudo modprobe nbd || true @@ -707,7 +708,6 @@ if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then # kvm, we drop back to the slower emulation mode (qemu). Note: many systems # come with hardware virtualization disabled in BIOS. if [[ "$LIBVIRT_TYPE" == "kvm" ]]; then - apt_get install libvirt-bin sudo modprobe kvm || true if [ ! -e /dev/kvm ]; then echo "WARNING: Switching to QEMU" From 4f6d7b61b38c380ff28f3d859414ca37c372e600 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 8 Dec 2011 16:22:51 -0500 Subject: [PATCH 233/967] support non "uec style" IMAGE_URLS This adds support for image urls that end in .img.gz or .img. The assumption is that they're a full disk image or a compressed disk image. Some examples: https://cloud-images.ubuntu.com/server/releases/11.10/release/ubuntu-11.10-server-cloudimg-i386-disk1.img http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-rootfs.img.gz http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img Change-Id: I79b67b461fb02f2403dad3e15f630fa9817eb0db --- stack.sh | 47 +++++++++++++++++++++++++++++++++++++++++------ stackrc | 12 ++++++++++++ 2 files changed, 53 insertions(+), 6 deletions(-) diff --git a/stack.sh b/stack.sh index c7a61025..08661578 100755 --- a/stack.sh +++ b/stack.sh @@ -1215,20 +1215,55 @@ if [[ "$ENABLED_SERVICES" =~ "g-reg" ]]; then for image_url in ${IMAGE_URLS//,/ }; do # Downloads the image (uec ami+aki style), then extracts it. IMAGE_FNAME=`basename "$image_url"` - IMAGE_NAME=`basename "$IMAGE_FNAME" .tar.gz` if [ ! -f $FILES/$IMAGE_FNAME ]; then wget -c $image_url -O $FILES/$IMAGE_FNAME fi - # Extract ami and aki files - tar -zxf $FILES/$IMAGE_FNAME -C $FILES/images + KERNEL="" + RAMDISK="" + case "$IMAGE_FNAME" in + *.tar.gz|*.tgz) + # Extract ami and aki files + [ "${IMAGE_FNAME%.tar.gz}" != "$IMAGE_FNAME" ] && + IMAGE_NAME="${IMAGE_FNAME%.tar.gz}" || + IMAGE_NAME="${IMAGE_FNAME%.tgz}" + xdir="$FILES/images/$IMAGE_NAME" + rm -Rf "$xdir"; + mkdir "$xdir" + tar -zxf $FILES/$IMAGE_FNAME -C "$xdir" + KERNEL=$(for f in "$xdir/"*-vmlinuz*; do + [ -f "$f" ] && echo "$f" && break; done; true) + RAMDISK=$(for f in "$xdir/"*-initrd*; do + [ -f "$f" ] && echo "$f" && break; done; true) + IMAGE=$(for f in "$xdir/"*.img; do + [ -f "$f" ] && echo "$f" && break; done; true) + [ -n "$IMAGE_NAME" ] + IMAGE_NAME=$(basename "$IMAGE" ".img") + ;; + *.img) + IMAGE="$FILES/$IMAGE_FNAME"; + IMAGE_NAME=$(basename "$IMAGE" ".img") + ;; + *.img.gz) + IMAGE="$FILES/${IMAGE_FNAME}" + IMAGE_NAME=$(basename "$IMAGE" ".img.gz") + ;; + *) echo "Do not know what to do with $IMAGE_FNAME"; false;; + esac # Use glance client to add the kernel the root filesystem. # We parse the results of the first upload to get the glance ID of the # kernel for use when uploading the root filesystem. - RVAL=`glance add -A $SERVICE_TOKEN name="$IMAGE_NAME-kernel" is_public=true container_format=aki disk_format=aki < $FILES/images/$IMAGE_NAME-vmlinuz*` - KERNEL_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "` - glance add -A $SERVICE_TOKEN name="$IMAGE_NAME" is_public=true container_format=ami disk_format=ami kernel_id=$KERNEL_ID < $FILES/images/$IMAGE_NAME.img + KERNEL_ID=""; RAMDISK_ID=""; + if [ -n "$KERNEL" ]; then + RVAL=`glance add -A $SERVICE_TOKEN name="$IMAGE_NAME-kernel" is_public=true container_format=aki disk_format=aki < "$KERNEL"` + KERNEL_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "` + fi + if [ -n "$RAMDISK" ]; then + RVAL=`glance add -A $SERVICE_TOKEN name="$IMAGE_NAME-ramdisk" is_public=true container_format=ari disk_format=ari < "$RAMDISK"` + RAMDISK_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "` + fi + glance add -A $SERVICE_TOKEN name="${IMAGE_NAME%.img}" is_public=true container_format=ami disk_format=ami ${KERNEL_ID:+kernel_id=$KERNEL_ID} ${RAMDISK_ID:+ramdisk_id=$RAMDISK_ID} < <(zcat --force "${IMAGE}") done fi diff --git a/stackrc b/stackrc index 524cc992..c420fc87 100644 --- a/stackrc +++ b/stackrc @@ -44,6 +44,18 @@ CITEST_REPO=https://github.com/openstack/tempest.git CITEST_BRANCH=master # Specify a comma-separated list of uec images to download and install into glance. +# supported urls here are: +# * "uec-style" images: +# If the file ends in .tar.gz, uncompress the tarball and and select the first +# .img file inside it as the image. If present, use "*-vmlinuz*" as the kernel +# and "*-initrd*" as the ramdisk +# example: http://cloud-images.ubuntu.com/releases/oneiric/release/ubuntu-11.10-server-cloudimg-amd64.tar.gz +# * disk image (*.img,*.img.gz) +# if file ends in .img, then it will be uploaded and registered as a to +# glance as a disk image. If it ends in .gz, it is uncompressed first. +# example: +# http://cloud-images.ubuntu.com/releases/oneiric/release/ubuntu-11.10-server-cloudimg-armel-disk1.img +# http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-rootfs.img.gz IMAGE_URLS=http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz # allow local overrides of env variables From d9eafd58913453b70b2664c272ac3063ae14280c Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sat, 10 Dec 2011 13:55:44 -0800 Subject: [PATCH 234/967] move back to nova.sh method of configuring screen instead of overwriting screenrc (which is useful for new VMs), we manually set the screen status line - fixes bug 902297 Change-Id: I507dc36e85e2bc3635503cde426bab8a2e966f06 --- files/screenrc | 9 --------- stack.sh | 6 ++---- 2 files changed, 2 insertions(+), 13 deletions(-) delete mode 100644 files/screenrc diff --git a/files/screenrc b/files/screenrc deleted file mode 100644 index e18db39d..00000000 --- a/files/screenrc +++ /dev/null @@ -1,9 +0,0 @@ -hardstatus on -hardstatus alwayslastline -hardstatus string "%{.bW}%-w%{.rW}%n %t%{-}%+w %=%{..G}%H %{..Y}%d/%m %c" - -defscrollback 10240 - -vbell off -startup_message off - diff --git a/stack.sh b/stack.sh index 9d991f21..6cf5f4a0 100755 --- a/stack.sh +++ b/stack.sh @@ -569,10 +569,6 @@ if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then cd $QUANTUM_DIR; sudo python setup.py develop fi -# Add a useful screenrc. This isn't required to run openstack but is we do -# it since we are going to run the services in screen for simple -cp $FILES/screenrc ~/.screenrc - # Syslog # --------- @@ -1122,6 +1118,8 @@ function screen_it { # create a new named screen to run processes in screen -d -m -S stack -t stack sleep 1 +# set a reasonable statusbar +screen -r stack -X hardstatus alwayslastline "%-Lw%{= BW}%50>%n%f* %t%{-}%+Lw%< %= %H" # launch the glance registry service if [[ "$ENABLED_SERVICES" =~ "g-reg" ]]; then From 2229a6e34b0c31994bb1ae5525708c55cdc6d28a Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 1 Dec 2011 17:02:07 -0600 Subject: [PATCH 235/967] Clean out old volumes from an existing VOLUME_GROUP if present On subsequent runs of stack.sh clean out old volumes from $VOLUME_GROUP (if it exists) and remove the iSCSI targets. Also clean up the handling when using a backing file rather than a physical disk for the PV. VOLUME_NAME_PREFIX is used to determine which volumes to delete and to set volume_name_template in nova.conf. Change-Id: Iaf9effcc7e0ea5cdfad7bb180e67089e7f8b3583 --- stack.sh | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index 9d991f21..c1eb411b 100755 --- a/stack.sh +++ b/stack.sh @@ -160,6 +160,7 @@ ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-v # Name of the lvm volume group to use/create for iscsi volumes VOLUME_GROUP=${VOLUME_GROUP:-nova-volumes} +VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} # Nova hypervisor configuration. We default to libvirt whth **kvm** but will # drop back to **qemu** if we are unable to load the kvm module. Stack.sh can @@ -947,12 +948,29 @@ if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then apt_get install iscsitarget-dkms iscsitarget - if ! sudo vgdisplay | grep -q $VOLUME_GROUP; then + if ! sudo vgs $VOLUME_GROUP; then VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DEST/nova-volumes-backing-file} VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-2052M} - truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE + # Only create if the file doesn't already exists + [[ -f $VOLUME_BACKING_FILE ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE` - sudo vgcreate $VOLUME_GROUP $DEV + # Only create if the loopback device doesn't contain $VOLUME_GROUP + if ! sudo vgs $VOLUME_GROUP; then sudo vgcreate $VOLUME_GROUP $DEV; fi + fi + + if sudo vgs $VOLUME_GROUP; then + # Clean out existing volumes + for lv in `sudo lvs --noheadings -o lv_name $VOLUME_GROUP`; do + # VOLUME_NAME_PREFIX prefixes the LVs we want + if [[ "${lv#$VOLUME_NAME_PREFIX}" != "$lv" ]]; then + tid=`egrep "^tid.+$lv" /proc/net/iet/volume | cut -f1 -d' ' | tr ':' '='` + if [[ -n "$tid" ]]; then + lun=`egrep "lun.+$lv" /proc/net/iet/volume | cut -f1 -d' ' | tr ':' '=' | tr -d '\t'` + sudo ietadm --op delete --$tid --$lun + fi + sudo lvremove -f $VOLUME_GROUP/$lv + fi + done fi # Configure iscsitarget @@ -984,6 +1002,7 @@ else fi if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then add_nova_flag "--volume_group=$VOLUME_GROUP" + add_nova_flag "--volume_name_template=${VOLUME_NAME_PREFIX}%08x" fi add_nova_flag "--my_ip=$HOST_IP" add_nova_flag "--public_interface=$PUBLIC_INTERFACE" From f1f3a8fbd88c8941fffd3964857c8bee69bbac6b Mon Sep 17 00:00:00 2001 From: Brad Hall Date: Mon, 12 Dec 2011 23:04:58 +0000 Subject: [PATCH 236/967] Add support for specifying an alternate Quantum host/port Also change the Quantum support around a bit.. now if you want Quantum networking you can specify "quantum" in enabled_services -- if you actually want to run the service then you need to add "q-svc" to enabled services. Finally, move some of the openvswitch plugin logic into the openvswitch block. Change-Id: I4b45e40f9670d2288d7714864a76e83972bec2c0 --- stack.sh | 37 +++++++++++++++++++++---------------- 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/stack.sh b/stack.sh index 6cf5f4a0..6d7faaf6 100755 --- a/stack.sh +++ b/stack.sh @@ -154,6 +154,10 @@ QUANTUM_DIR=$DEST/quantum # Default Quantum Plugin Q_PLUGIN=${Q_PLUGIN:-openvswitch} +# Default Quantum Port +Q_PORT=${Q_PORT:-9696} +# Default Quantum Host +Q_HOST=${Q_HOST:-localhost} # Specify which services to launch. These generally correspond to screen tabs ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-vnc,horizon,mysql,rabbit,openstackx} @@ -280,8 +284,9 @@ FLAT_INTERFACE=${FLAT_INTERFACE:-eth0} # Using Quantum networking: # -# Make sure that q-svc is enabled in ENABLED_SERVICES. If it is the network -# manager will be set to the QuantumManager. +# Make sure that quantum is enabled in ENABLED_SERVICES. If it is the network +# manager will be set to the QuantumManager. If you want to run Quantum on +# this host, make sure that q-svc is also in ENABLED_SERVICES. # # If you're planning to use the Quantum openvswitch plugin, set Q_PLUGIN to # "openvswitch" and make sure the q-agt service is enabled in @@ -531,7 +536,7 @@ if [[ "$ENABLED_SERVICES" =~ "openstackx" ]]; then # that is *deprecated*. The code is being moved into python-novaclient & nova. git_clone $OPENSTACKX_REPO $OPENSTACKX_DIR $OPENSTACKX_BRANCH fi -if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then +if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then # quantum git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH fi @@ -565,7 +570,7 @@ if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then cd $HORIZON_DIR/horizon; sudo python setup.py develop cd $HORIZON_DIR/openstack-dashboard; sudo python setup.py develop fi -if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then +if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then cd $QUANTUM_DIR; sudo python setup.py develop fi @@ -967,9 +972,11 @@ add_nova_flag "--allow_admin_api" add_nova_flag "--scheduler_driver=$SCHEDULER" add_nova_flag "--dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf" add_nova_flag "--fixed_range=$FIXED_RANGE" -if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then +if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then add_nova_flag "--network_manager=nova.network.quantum.manager.QuantumManager" - if [[ "$Q_PLUGIN" = "openvswitch" ]]; then + add_nova_flag "--quantum_connection_host=$Q_HOST" + add_nova_flag "--quantum_connection_port=$Q_PORT" + if [[ "$ENABLED_SERVICES" =~ "q-svc" && "$Q_PLUGIN" = "openvswitch" ]]; then add_nova_flag "--libvirt_vif_type=ethernet" add_nova_flag "--libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtOpenVswitchDriver" add_nova_flag "--linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver" @@ -1156,25 +1163,23 @@ if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then fi fi -# Quantum +# Quantum service if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then - # Install deps - # FIXME add to files/apts/quantum, but don't install if not needed! - apt_get install openvswitch-switch openvswitch-datapath-dkms - - # Create database for the plugin/agent if [[ "$Q_PLUGIN" = "openvswitch" ]]; then + # Install deps + # FIXME add to files/apts/quantum, but don't install if not needed! + apt_get install openvswitch-switch openvswitch-datapath-dkms + # Create database for the plugin/agent if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE IF NOT EXISTS ovs_quantum;' else echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin." exit 1 fi + QUANTUM_PLUGIN_INI_FILE=$QUANTUM_DIR/etc/plugins.ini + # Make sure we're using the openvswitch plugin + sed -i -e "s/^provider =.*$/provider = quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin/g" $QUANTUM_PLUGIN_INI_FILE fi - - QUANTUM_PLUGIN_INI_FILE=$QUANTUM_DIR/etc/plugins.ini - # Make sure we're using the openvswitch plugin - sed -i -e "s/^provider =.*$/provider = quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin/g" $QUANTUM_PLUGIN_INI_FILE screen_it q-svc "cd $QUANTUM_DIR && PYTHONPATH=.:$PYTHONPATH python $QUANTUM_DIR/bin/quantum-server $QUANTUM_DIR/etc/quantum.conf" fi From 60e6e3468778a6738894f76f6de90972146989f6 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 12 Dec 2011 17:42:24 -0600 Subject: [PATCH 237/967] Add open-iscsi packages for Nova compute nodes. Begin splitting packages into service-specific files, n-cpu and n-vol are the first. Change-Id: I3e7c412ff125dbadd18b59af55fb7dea9ea17b07 --- files/apts/n-cpu | 4 ++++ files/apts/n-vol | 3 +++ files/apts/nova | 5 ----- 3 files changed, 7 insertions(+), 5 deletions(-) create mode 100644 files/apts/n-cpu create mode 100644 files/apts/n-vol diff --git a/files/apts/n-cpu b/files/apts/n-cpu new file mode 100644 index 00000000..06c21a23 --- /dev/null +++ b/files/apts/n-cpu @@ -0,0 +1,4 @@ +# Stuff for diablo volumes +lvm2 +open-iscsi +open-iscsi-utils diff --git a/files/apts/n-vol b/files/apts/n-vol new file mode 100644 index 00000000..edaee2c8 --- /dev/null +++ b/files/apts/n-vol @@ -0,0 +1,3 @@ +iscsitarget # NOPRIME +iscsitarget-dkms # NOPRIME +lvm2 diff --git a/files/apts/nova b/files/apts/nova index f4fe4595..2d881516 100644 --- a/files/apts/nova +++ b/files/apts/nova @@ -39,8 +39,3 @@ python-lockfile python-m2crypto python-boto python-kombu - -# Stuff for diablo volumes -iscsitarget # NOPRIME -iscsitarget-dkms # NOPRIME -lvm2 From 3584e555bc55cd2c62dce4120b326c7296f337dd Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 8 Dec 2011 16:23:27 -0500 Subject: [PATCH 238/967] stackrc: replace ttylinux-uec images with cirros http://launchpad.net/cirros is basically "ttylinux-uec 2.0". These images should function wherever ttylinux-uec did, and work well better under lxc. Change-Id: I5b8485a7bb3518e05172b297fccd16e497463525 --- stackrc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stackrc b/stackrc index c420fc87..0e700d55 100644 --- a/stackrc +++ b/stackrc @@ -56,7 +56,9 @@ CITEST_BRANCH=master # example: # http://cloud-images.ubuntu.com/releases/oneiric/release/ubuntu-11.10-server-cloudimg-armel-disk1.img # http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-rootfs.img.gz -IMAGE_URLS=http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz +#IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image +#IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img" # cirros full disk image +IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-uec.tar.gz" # uec style cirros image # allow local overrides of env variables if [ -f ./localrc ]; then From e46f8891676898e9cfb7735c767719d5e69236b4 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Wed, 14 Dec 2011 08:44:52 -0800 Subject: [PATCH 239/967] move horizon pips to apts Change-Id: Ia855da34d6ae22ed88501cc3b6bcc74cc4ed2f3a --- files/apts/horizon | 6 ++++++ files/pips/horizon | 6 ------ 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/files/apts/horizon b/files/apts/horizon index aa08a316..1e0b0e60 100644 --- a/files/apts/horizon +++ b/files/apts/horizon @@ -18,3 +18,9 @@ python-mox python-kombu python-coverage python-cherrypy3 # why? +python-django +python-django-mailer +python-django-nose +python-django-registration +python-cloudfiles +python-migrate diff --git a/files/pips/horizon b/files/pips/horizon index dddf0110..f35a01da 100644 --- a/files/pips/horizon +++ b/files/pips/horizon @@ -1,11 +1,5 @@ -Django==1.3 -django-mailer -django-nose==0.1.2 django-nose-selenium -django-registration==0.7 pycrypto==2.3 -sqlalchemy-migrate -python-cloudfiles -e git+https://github.com/cloudbuilders/openstackx.git#egg=openstackx -e git+https://github.com/jacobian/openstack.compute.git#egg=openstack From 4541e1497d0cf253ada25a5370c0240498fbb205 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Wed, 14 Dec 2011 13:55:55 -0800 Subject: [PATCH 240/967] nova needs python-feedparser Change-Id: I46b90d7643e043b1624874e6919e1acba9b2b3d8 --- files/apts/nova | 1 + 1 file changed, 1 insertion(+) diff --git a/files/apts/nova b/files/apts/nova index 2d881516..bb0e0ceb 100644 --- a/files/apts/nova +++ b/files/apts/nova @@ -39,3 +39,4 @@ python-lockfile python-m2crypto python-boto python-kombu +python-feedparser From cebdd8fcff8da6919b94ac040d3b593bfb25e2c9 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 14 Dec 2011 15:52:02 -0600 Subject: [PATCH 241/967] Update glance config files for https://review.openstack.org/#change,2139 Change-Id: I61d10508282ebbc1a1910f1723b25f360068f366 --- files/glance-api.conf | 32 +++++++++++++++++++------------- files/glance-registry.conf | 14 +++++++++----- 2 files changed, 28 insertions(+), 18 deletions(-) diff --git a/files/glance-api.conf b/files/glance-api.conf index 1bbd58e0..6c670b56 100644 --- a/files/glance-api.conf +++ b/files/glance-api.conf @@ -141,30 +141,32 @@ scrubber_datadir = /var/lib/glance/scrubber [pipeline:glance-api] #pipeline = versionnegotiation context apiv1app # NOTE: use the following pipeline for keystone -pipeline = versionnegotiation authtoken context apiv1app +pipeline = versionnegotiation authtoken auth-context apiv1app # To enable Image Cache Management API replace pipeline with below: # pipeline = versionnegotiation context imagecache apiv1app # NOTE: use the following pipeline for keystone auth (with caching) -# pipeline = versionnegotiation authtoken context imagecache apiv1app - -[pipeline:versions] -pipeline = versionsapp - -[app:versionsapp] -paste.app_factory = glance.api.versions:app_factory +# pipeline = versionnegotiation authtoken auth-context imagecache apiv1app [app:apiv1app] -paste.app_factory = glance.api.v1.router:app_factory +paste.app_factory = glance.common.wsgi:app_factory +glance.app_factory = glance.api.v1.router:API [filter:versionnegotiation] -paste.filter_factory = glance.api.middleware.version_negotiation:filter_factory +paste.filter_factory = glance.common.wsgi:filter_factory +glance.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter -[filter:imagecache] -paste.filter_factory = glance.api.middleware.image_cache:filter_factory +[filter:cache] +paste.filter_factory = glance.common.wsgi:filter_factory +glance.filter_factory = glance.api.middleware.cache:CacheFilter + +[filter:cachemanage] +paste.filter_factory = glance.common.wsgi:filter_factory +glance.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter [filter:context] -paste.filter_factory = glance.common.context:filter_factory +paste.filter_factory = glance.common.wsgi:filter_factory +glance.filter_factory = glance.common.context:ContextMiddleware [filter:authtoken] paste.filter_factory = keystone.middleware.auth_token:filter_factory @@ -176,3 +178,7 @@ auth_port = 35357 auth_protocol = http auth_uri = http://127.0.0.1:5000/ admin_token = %SERVICE_TOKEN% + +[filter:auth-context] +paste.filter_factory = glance.common.wsgi:filter_factory +glance.filter_factory = keystone.middleware.glance_auth_token:KeystoneContextMiddleware diff --git a/files/glance-registry.conf b/files/glance-registry.conf index 1e041860..e732e869 100644 --- a/files/glance-registry.conf +++ b/files/glance-registry.conf @@ -46,14 +46,16 @@ limit_param_default = 25 [pipeline:glance-registry] #pipeline = context registryapp # NOTE: use the following pipeline for keystone -pipeline = authtoken keystone_shim context registryapp +pipeline = authtoken auth-context context registryapp [app:registryapp] -paste.app_factory = glance.registry.server:app_factory +paste.app_factory = glance.common.wsgi:app_factory +glance.app_factory = glance.registry.api.v1:API [filter:context] context_class = glance.registry.context.RequestContext -paste.filter_factory = glance.common.context:filter_factory +paste.filter_factory = glance.common.wsgi:filter_factory +glance.filter_factory = glance.common.context:ContextMiddleware [filter:authtoken] paste.filter_factory = keystone.middleware.auth_token:filter_factory @@ -66,5 +68,7 @@ auth_protocol = http auth_uri = http://127.0.0.1:5000/ admin_token = %SERVICE_TOKEN% -[filter:keystone_shim] -paste.filter_factory = keystone.middleware.glance_auth_token:filter_factory +[filter:auth-context] +context_class = glance.registry.context.RequestContext +paste.filter_factory = glance.common.wsgi:filter_factory +glance.filter_factory = keystone.middleware.glance_auth_token:KeystoneContextMiddleware From cb9fbd905b3a35d372a5eff1e2d15c4cdef85582 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 15 Dec 2011 18:02:15 +0000 Subject: [PATCH 242/967] Rename of keystone_admin_group variable. It has been moved to keystone_swift_operator_roles Change-Id: I74db43379fac55c75efcfbb5f7328c41661b5578 --- files/swift/proxy-server.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/swift/proxy-server.conf b/files/swift/proxy-server.conf index 2db6d322..d7ed4851 100644 --- a/files/swift/proxy-server.conf +++ b/files/swift/proxy-server.conf @@ -16,7 +16,7 @@ account_autocreate = true use = egg:swiftkeystone2#keystone2 keystone_admin_token = %SERVICE_TOKEN% keystone_url = http://localhost:35357/v2.0 -keystone_admin_group = Member +keystone_swift_operator_roles = Member [filter:tempauth] use = egg:swift#tempauth From 0c3db253b52949f3d14d955f3251c8424111f9f6 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 15 Dec 2011 12:00:31 -0600 Subject: [PATCH 243/967] Prevent nesting of devstack directories Fix a comment typo Change-Id: I8704c1a6d4c91d2e51a38b7aadc29d56d5eedd95 --- stack.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index c7c5293a..e125b9ad 100755 --- a/stack.sh +++ b/stack.sh @@ -118,7 +118,7 @@ if [[ $EUID -eq 0 ]]; then echo "Copying files to stack user" STACK_DIR="$DEST/${PWD##*/}" - cp -r -f "$PWD" "$STACK_DIR" + cp -r -f -T "$PWD" "$STACK_DIR" chown -R stack "$STACK_DIR" if [[ "$SHELL_AFTER_RUN" != "no" ]]; then exec su -c "set -e; cd $STACK_DIR; bash stack.sh; bash" stack @@ -398,8 +398,8 @@ fi # - We are parsing the packages files and detecting metadatas. # - If there is a NOPRIME as comment mean we are not doing the install # just yet. -# - If we have the meta-keyword distro:DISTRO or -# distro:DISTRO1,DISTRO2 it will be installed only for those +# - If we have the meta-keyword dist:DISTRO or +# dist:DISTRO1,DISTRO2 it will be installed only for those # distros (case insensitive). function get_packages() { local file_to_parse="general" From 22a0f5fdedf91cb1b4b6b35da6bedfc0404c398e Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Thu, 15 Dec 2011 13:42:56 -0800 Subject: [PATCH 244/967] Alternate capitalization schemes for True/False. Change-Id: I3c6a9443e9c80dbe7a13caefd5be9121d8335f69 --- stack.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index c7c5293a..a4543efc 100755 --- a/stack.sh +++ b/stack.sh @@ -195,8 +195,8 @@ function trueorfalse() { local testval=$2 [[ -z "$testval" ]] && { echo "$default"; return; } - [[ "0 no false" =~ "$testval" ]] && { echo "False"; return; } - [[ "1 yes true" =~ "$testval" ]] && { echo "True"; return; } + [[ "0 no false False FALSE" =~ "$testval" ]] && { echo "False"; return; } + [[ "1 yes true True TRUE" =~ "$testval" ]] && { echo "True"; return; } echo "$default" } From 50e3229c799c2ef3dce45146ba6229209350a3f8 Mon Sep 17 00:00:00 2001 From: Dolph Mathews Date: Thu, 15 Dec 2011 16:04:49 -0600 Subject: [PATCH 245/967] Keystone tenant ID's are no longer predictable (UUID's) Applicable to keystone master post essex-m2 Depends on: https://review.openstack.org/#change,2394 Change-Id: I1431f7177ff22e03cdf89bc7513473fdbbb1415d --- tools/build_ci_config.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/build_ci_config.sh b/tools/build_ci_config.sh index 0e43aa49..90b8abf8 100755 --- a/tools/build_ci_config.sh +++ b/tools/build_ci_config.sh @@ -199,7 +199,7 @@ service_port = 5000 apiver = v2.0 user = admin password = $ADMIN_PASSWORD -tenant_id = 1 +tenant_name = admin [nova] host = $HOST_IP From a368218a7e46037e6962290a386fc10efc41c9d0 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 16 Dec 2011 10:29:10 -0500 Subject: [PATCH 246/967] if using lxc, use cirros rootfs image The cirros 'uec' image contains a kernel, a ramdisk, and a rootfs. However, the rootfs is empty. cirros copies its ramdisk to it on its first boot. That means, if you try this with lxc, there is no filesystem for lxc to boot. So, in the case of lxc, import the rootfs image, which is a populated ext3 filesystem, which is what nova lxc expects. Change-Id: I3ada380c61044a08697b0a964a962b269ea5224c --- stackrc | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 0e700d55..5be96fc9 100644 --- a/stackrc +++ b/stackrc @@ -58,7 +58,12 @@ CITEST_BRANCH=master # http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-rootfs.img.gz #IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image #IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img" # cirros full disk image -IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-uec.tar.gz" # uec style cirros image +case "$LIBVIRT_TYPE" in + lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc + IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-rootfs.img.gz";; + *) # otherwise, use the uec style image (with kernel, ramdisk, disk) + IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-uec.tar.gz";; +esac # allow local overrides of env variables if [ -f ./localrc ]; then From a8dda1709f4cb6b64ea9664bf22aa88ae691a047 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 16 Dec 2011 12:22:02 -0600 Subject: [PATCH 247/967] Add volume.sh exercise Change-Id: Ic339c34c85493d21f9fbf5280bb5ff1660644f98 --- exercises/volumes.sh | 160 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 160 insertions(+) create mode 100755 exercises/volumes.sh diff --git a/exercises/volumes.sh b/exercises/volumes.sh new file mode 100755 index 00000000..fe06b6ed --- /dev/null +++ b/exercises/volumes.sh @@ -0,0 +1,160 @@ +#!/usr/bin/env bash + +# Test nova volumes with the nova command from python-novaclient + +# This script exits on an error so that errors don't compound and you see +# only the first error that occured. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + + +# Settings +# ======== + +# Use openrc + stackrc + localrc for settings +pushd $(cd $(dirname "$0")/.. && pwd) +source ./openrc +popd + +# Get a token for clients that don't support service catalog +# ========================================================== + +# manually create a token by querying keystone (sending JSON data). Keystone +# returns a token and catalog of endpoints. We use python to parse the token +# and save it. + +TOKEN=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$NOVA_USERNAME\", \"password\": \"$NOVA_PASSWORD\"}}}" -H "Content-type: application/json" http://$HOST_IP:5000/v2.0/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"` + +# Launching a server +# ================== + +# List servers for tenant: +nova list + +# Images +# ------ + +# Nova has a **deprecated** way of listing images. +nova image-list + +# But we recommend using glance directly +glance -A $TOKEN index + +# Let's grab the id of the first AMI image to launch +IMAGE=`glance -A $TOKEN index | egrep ami | head -1 | cut -d" " -f1` + +# determinine instance type +# ------------------------- + +# List of instance types: +nova flavor-list + +INSTANCE_NAME=${DEFAULT_INSTANCE_TYPE:-m1.tiny} +INSTANCE_TYPE=`nova flavor-list | grep $INSTANCE_NAME | cut -d"|" -f2` +if [[ -z "`nova flavor-list | grep $INSTANCE_NAME | cut -d"|" -f2`" ]]; then + # and grab the first flavor in the list to launch + INSTANCE_TYPE=`nova flavor-list | head -n 4 | tail -n 1 | cut -d"|" -f2` +fi + +NAME="myserver" + +VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE $NAME --security_groups=$SECGROUP | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'` + +# Testing +# ======= + +# First check if it spins up (becomes active and responds to ping on +# internal ip). If you run this script from a nova node, you should +# bypass security groups and have direct access to the server. + +# Waiting for boot +# ---------------- + +# Max time to wait while vm goes from build to active state +ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30} + +# Max time till the vm is bootable +BOOT_TIMEOUT=${BOOT_TIMEOUT:-15} + +# Max time to wait for proper association and dis-association. +ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10} + +# check that the status is active within ACTIVE_TIMEOUT seconds +if ! timeout $BOOT_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then + echo "server didn't become active!" + exit 1 +fi + +# get the IP of the server +IP=`nova show $VM_UUID | grep "private network" | cut -d"|" -f3` +#VM_UUID=`nova list | grep $NAME | head -1 | cut -d'|' -f2 | sed 's/ //g'` + +# for single node deployments, we can ping private ips +MULTI_HOST=${MULTI_HOST:-0} +if [ "$MULTI_HOST" = "0" ]; then + # sometimes the first ping fails (10 seconds isn't enough time for the VM's + # network to respond?), so let's ping for a default of 15 seconds with a + # timeout of a second for each ping. + if ! timeout $BOOT_TIMEOUT sh -c "while ! ping -c1 -w1 $IP; do sleep 1; done"; then + echo "Couldn't ping server" + exit 1 + fi +else + # On a multi-host system, without vm net access, do a sleep to wait for the boot + sleep $BOOT_TIMEOUT +fi + +# Volumes +# ------- + +VOL_NAME="myvol-$(openssl rand -hex 4)" + +# Verify it doesn't exist +if [[ -n "`nova volume-list | grep $VOL_NAME | head -1 | cut -d'|' -f3 | sed 's/ //g'`" ]]; then + echo "Volume $VOL_NAME already exists" + exit 1 +fi + +# Create a new volume +nova volume-create --display_name $VOL_NAME --display_description "test volume: $VOL_NAME" 1 +if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep available; do sleep 1; done"; then + echo "Volume $VOL_NAME not created" + exit 1 +fi + +# Get volume ID +VOL_ID=`nova volume-list | grep $VOL_NAME | head -1 | cut -d'|' -f2 | sed 's/ //g'` + +# Attach to server +DEVICE=/dev/vdb +nova volume-attach $VM_UUID $VOL_ID $DEVICE +if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep in-use; do sleep 1; done"; then + echo "Volume $VOL_NAME not attached to $NAME" + exit 1 +fi + +VOL_ATTACH=`nova volume-list | grep $VOL_NAME | head -1 | cut -d'|' -f6 | sed 's/ //g'` +if [[ "$VOL_ATTACH" != $VM_UUID ]]; then + echo "Volume not attached to correct instance" + exit 1 +fi + +# Detach volume +nova volume-detach $VM_UUID $VOL_ID +if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep available; do sleep 1; done"; then + echo "Volume $VOL_NAME not detached from $NAME" + exit 1 +fi + +# Delete volume +nova volume-delete $VOL_ID +if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME; do sleep 1; done"; then + echo "Volume $VOL_NAME not deleted" + exit 1 +fi + +# shutdown the server +nova delete $NAME From abda427a10873a6d11026f767d53d2cecf967d1c Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 16 Dec 2011 20:16:20 +0000 Subject: [PATCH 248/967] Improve euca exercise to use floating ips and secgroups. This ensures that the full instance lifecycle gets 'worked.' Change-Id: Ibf22054ae3fb864242ff3df2b8066985a43803d7 --- exercises/euca.sh | 45 ++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 40 insertions(+), 5 deletions(-) diff --git a/exercises/euca.sh b/exercises/euca.sh index f9996094..67150e44 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -12,7 +12,6 @@ set -o errexit # an error. It is also useful for following allowing as the install occurs. set -o xtrace - # Settings # ======== @@ -21,16 +20,52 @@ pushd $(cd $(dirname "$0")/.. && pwd) source ./openrc popd -# find a machine image to boot +# Find a machine image to boot IMAGE=`euca-describe-images | grep machine | cut -f2 | head -n1` -# launch it -INSTANCE=`euca-run-instances $IMAGE | grep INSTANCE | cut -f2` +# Define secgroup +SECGROUP=euca_secgroup + +# Add a secgroup +euca-add-group -d description $SECGROUP -# assure it has booted within a reasonable time +# Launch it +INSTANCE=`euca-run-instances -g $SECGROUP -t m1.tiny $IMAGE | grep INSTANCE | cut -f2` + +# Assure it has booted within a reasonable time if ! timeout $RUNNING_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then echo "server didn't become active within $RUNNING_TIMEOUT seconds" exit 1 fi +# Allocate floating address +FLOATING_IP=`euca-allocate-address | cut -f2` + +# Release floating address +euca-associate-address -i $INSTANCE $FLOATING_IP + + +# Authorize pinging +euca-authorize -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP + +# Max time till the vm is bootable +BOOT_TIMEOUT=${BOOT_TIMEOUT:-15} +if ! timeout $BOOT_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then + echo "Couldn't ping server" + exit 1 +fi + +# Revoke pinging +euca-revoke -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP + +# Delete group +euca-delete-group $SECGROUP + +# Release floating address +euca-disassociate-address $FLOATING_IP + +# Release floating address +euca-release-address $FLOATING_IP + +# Terminate instance euca-terminate-instances $INSTANCE From 55458455c2f851297a66c9065f05d06c463b1542 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Sat, 17 Dec 2011 00:21:49 +0000 Subject: [PATCH 249/967] Clean out old instances and iptables rules Fixes bug 905344 Change-Id: I844a90246bace792d293ef2df9504fb654104e4b --- stack.sh | 34 +++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index f978e444..838b5167 100755 --- a/stack.sh +++ b/stack.sh @@ -88,6 +88,13 @@ function apt_get() { --option "Dpkg::Options::=--force-confold" --assume-yes "$@" } +# Check to see if we are already running a stack.sh +if screen -ls | egrep -q "[0-9].stack"; then + echo "You are already running a stack.sh session." + echo "To rejoin this session type 'screen -x stack'." + echo "To destroy this session, kill the running screen." + exit 1 +fi # OpenStack is designed to be run as a regular user (Horizon will fail to run # as root, since apache refused to startup serve content from root user). If @@ -165,6 +172,7 @@ ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-v # Name of the lvm volume group to use/create for iscsi volumes VOLUME_GROUP=${VOLUME_GROUP:-nova-volumes} VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} +INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-} # Nova hypervisor configuration. We default to libvirt whth **kvm** but will # drop back to **qemu** if we are unable to load the kvm module. Stack.sh can @@ -733,6 +741,18 @@ if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini fi +# Helper to clean iptables rules +function clean_iptables() { + # Delete rules + sudo iptables -S -v | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-A" | sed "s/-A/-D/g" | awk '{print "sudo iptables",$0}' | bash + # Delete nat rules + sudo iptables -S -v -t nat | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-A" | sed "s/-A/-D/g" | awk '{print "sudo iptables -t nat",$0}' | bash + # Delete chains + sudo iptables -S -v | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-N" | sed "s/-N/-X/g" | awk '{print "sudo iptables",$0}' | bash + # Delete nat chains + sudo iptables -S -v -t nat | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-N" | sed "s/-N/-X/g" | awk '{print "sudo iptables -t nat",$0}' | bash +} + if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then # Virtualization Configuration @@ -796,13 +816,24 @@ if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then fi fi + # Clean iptables from previous runs + clean_iptables + + # Destroy old instances + instances=`virsh list | grep $INSTANCE_NAME_PREFIX | cut -d " " -f3` + if [ ! $instances = "" ]; then + echo $instances | xargs -n1 virsh destroy + echo $instances | xargs -n1 virsh undefine + fi + # Clean out the instances directory. sudo rm -rf $NOVA_DIR/instances/* fi if [[ "$ENABLED_SERVICES" =~ "n-net" ]]; then - # delete traces of nova networks from prior runs + # Delete traces of nova networks from prior runs sudo killall dnsmasq || true + clean_iptables rm -rf $NOVA_DIR/networks mkdir -p $NOVA_DIR/networks fi @@ -1012,6 +1043,7 @@ add_nova_flag "--public_interface=$PUBLIC_INTERFACE" add_nova_flag "--vlan_interface=$VLAN_INTERFACE" add_nova_flag "--sql_connection=$BASE_SQL_CONN/nova" add_nova_flag "--libvirt_type=$LIBVIRT_TYPE" +add_nova_flag "--instance_name_template=${INSTANCE_NAME_PREFIX}%08x" if [[ "$ENABLED_SERVICES" =~ "openstackx" ]]; then add_nova_flag "--osapi_extension=nova.api.openstack.v2.contrib.standard_extensions" add_nova_flag "--osapi_extension=extensions.admin.Admin" From e23f6de4d2d634278e92d31c40298cc79aeee866 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Wed, 21 Dec 2011 09:29:32 -0800 Subject: [PATCH 250/967] Add ec2 endpoint to service catalog Change-Id: Id779d4d818621db042c420e034083577a3d3ab0d blueprint: ec2-service-endpoint --- files/keystone_data.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index be2d5767..6a495518 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -28,6 +28,7 @@ $BIN_DIR/keystone-manage $* role grant KeystoneServiceAdmin admin # Services $BIN_DIR/keystone-manage $* service add nova compute "Nova Compute Service" +$BIN_DIR/keystone-manage $* service add ec2 ec2 "EC2 Compatability Layer" $BIN_DIR/keystone-manage $* service add glance image "Glance Image Service" $BIN_DIR/keystone-manage $* service add keystone identity "Keystone Identity Service" if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then @@ -36,6 +37,7 @@ fi #endpointTemplates $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne nova http://%HOST_IP%:8774/v1.1/%tenant_id% http://%HOST_IP%:8774/v1.1/%tenant_id% http://%HOST_IP%:8774/v1.1/%tenant_id% 1 1 +$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne ec2 http://%HOST_IP%:8773/services/Cloud http://%HOST_IP%:8773/services/Admin http://%HOST_IP%:8773/services/Cloud 1 1 $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne glance http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% 1 1 $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne keystone http://%HOST_IP%:5000/v2.0 http://%HOST_IP%:35357/v2.0 http://%HOST_IP%:5000/v2.0 1 1 if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then From 331ae29e4ffc17f13eb9c73297ba9bc18d4998d9 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 21 Dec 2011 11:55:35 -0800 Subject: [PATCH 251/967] Fix build_uec to properly respect the devstack directory that is used when COPY_ENV=1 Change-Id: Ib9fd5d18d58349a145d50faa33228dd161965a9e --- tools/build_uec.sh | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 81671050..04e1a459 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -185,17 +185,6 @@ cat > $vm_dir/uec/user-data< localrc <> $vm_dir/uec/user-data< localrc <> $vm_dir/uec/user-data< Date: Fri, 16 Dec 2011 22:40:46 -0600 Subject: [PATCH 252/967] Add OFFLINE support to allow stack.sh to run cleanly without Internet access after having initialized /opt/stack with access. Good for those long flights on a Friday night. Change-Id: If97c22eef91bbd88aed644f05c56bf815036e78f --- stack.sh | 38 +++++++++++++++++++++++++------------- 1 file changed, 25 insertions(+), 13 deletions(-) diff --git a/stack.sh b/stack.sh index 838b5167..5c978141 100755 --- a/stack.sh +++ b/stack.sh @@ -82,6 +82,7 @@ DEST=${DEST:-/opt/stack} # apt-get wrapper to just get arguments set correctly function apt_get() { + [[ "$OFFLINE" = "True" ]] && return local sudo="sudo" [ "$(id -u)" = "0" ] && sudo="env" $sudo DEBIAN_FRONTEND=noninteractive apt-get \ @@ -147,6 +148,23 @@ else sudo mv $TEMPFILE /etc/sudoers.d/stack_sh_nova fi +# Normalize config values to True or False +# VAR=`trueorfalse default-value test-value` +function trueorfalse() { + local default=$1 + local testval=$2 + + [[ -z "$testval" ]] && { echo "$default"; return; } + [[ "0 no false False FALSE" =~ "$testval" ]] && { echo "False"; return; } + [[ "1 yes true True TRUE" =~ "$testval" ]] && { echo "True"; return; } + echo "$default" +} + +# Set True to configure stack.sh to run cleanly without Internet access. +# stack.sh must have been previously run with Internet access to install +# prerequisites and initialize $DEST. +OFFLINE=`trueorfalse False $OFFLINE` + # Set the destination directories for openstack projects NOVA_DIR=$DEST/nova HORIZON_DIR=$DEST/horizon @@ -196,18 +214,6 @@ if [ ! -n "$HOST_IP" ]; then fi fi -# Normalize config values to True or False -# VAR=`trueorfalse default-value test-value` -function trueorfalse() { - local default=$1 - local testval=$2 - - [[ -z "$testval" ]] && { echo "$default"; return; } - [[ "0 no false False FALSE" =~ "$testval" ]] && { echo "False"; return; } - [[ "1 yes true True TRUE" =~ "$testval" ]] && { echo "True"; return; } - echo "$default" -} - # Configure services to syslog instead of writing to individual log files SYSLOG=`trueorfalse False $SYSLOG` SYSLOG_HOST=${SYSLOG_HOST:-$HOST_IP} @@ -460,17 +466,23 @@ function get_packages() { done } +function pip_install { + [[ "$OFFLINE" = "True" ]] && return + sudo PIP_DOWNLOAD_CACHE=/var/cache/pip pip install --use-mirrors $@ +} + # install apt requirements apt_get update apt_get install $(get_packages) # install python requirements -sudo PIP_DOWNLOAD_CACHE=/var/cache/pip pip install --use-mirrors `cat $FILES/pips/*` +pip_install `cat $FILES/pips/* | uniq` # git clone only if directory doesn't exist already. Since ``DEST`` might not # be owned by the installation user, we create the directory and change the # ownership to the proper user. function git_clone { + [[ "$OFFLINE" = "True" ]] && return GIT_REMOTE=$1 GIT_DEST=$2 From e8d970c20e0bf8b3556ef1a52adc74fe17a970d6 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 22 Dec 2011 02:27:00 +0000 Subject: [PATCH 253/967] Updated the horizon pips to use gerrit. Change-Id: Ice5139a41b6f99e99e892a1d73c1453f7fcd554a --- files/pips/horizon | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/pips/horizon b/files/pips/horizon index f35a01da..62575b8b 100644 --- a/files/pips/horizon +++ b/files/pips/horizon @@ -3,4 +3,4 @@ pycrypto==2.3 -e git+https://github.com/cloudbuilders/openstackx.git#egg=openstackx -e git+https://github.com/jacobian/openstack.compute.git#egg=openstack --e git+https://github.com/4P/python-keystoneclient.git#egg=python-keystoneclient +-e git+https://review.openstack.org/p/openstack/python-keystoneclient#egg=python-keystoneclient From b89c75178b9d35d0c2ed562d955c0790faa94afe Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Wed, 21 Dec 2011 23:17:42 -0800 Subject: [PATCH 254/967] remove unused splats Change-Id: I7a321ba31c2b6cdcb9f60255055af71391934ae2 --- files/keystone_data.sh | 66 +++++++++++++++++++++--------------------- 1 file changed, 33 insertions(+), 33 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 6a495518..6d298d27 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -1,54 +1,54 @@ #!/bin/bash BIN_DIR=${BIN_DIR:-.} # Tenants -$BIN_DIR/keystone-manage $* tenant add admin -$BIN_DIR/keystone-manage $* tenant add demo -$BIN_DIR/keystone-manage $* tenant add invisible_to_admin +$BIN_DIR/keystone-manage tenant add admin +$BIN_DIR/keystone-manage tenant add demo +$BIN_DIR/keystone-manage tenant add invisible_to_admin # Users -$BIN_DIR/keystone-manage $* user add admin %ADMIN_PASSWORD% -$BIN_DIR/keystone-manage $* user add demo %ADMIN_PASSWORD% +$BIN_DIR/keystone-manage user add admin %ADMIN_PASSWORD% +$BIN_DIR/keystone-manage user add demo %ADMIN_PASSWORD% # Roles -$BIN_DIR/keystone-manage $* role add Admin -$BIN_DIR/keystone-manage $* role add Member -$BIN_DIR/keystone-manage $* role add KeystoneAdmin -$BIN_DIR/keystone-manage $* role add KeystoneServiceAdmin -$BIN_DIR/keystone-manage $* role add sysadmin -$BIN_DIR/keystone-manage $* role add netadmin -$BIN_DIR/keystone-manage $* role grant Admin admin admin -$BIN_DIR/keystone-manage $* role grant Member demo demo -$BIN_DIR/keystone-manage $* role grant sysadmin demo demo -$BIN_DIR/keystone-manage $* role grant netadmin demo demo -$BIN_DIR/keystone-manage $* role grant Member demo invisible_to_admin -$BIN_DIR/keystone-manage $* role grant Admin admin demo -$BIN_DIR/keystone-manage $* role grant Admin admin -$BIN_DIR/keystone-manage $* role grant KeystoneAdmin admin -$BIN_DIR/keystone-manage $* role grant KeystoneServiceAdmin admin +$BIN_DIR/keystone-manage role add Admin +$BIN_DIR/keystone-manage role add Member +$BIN_DIR/keystone-manage role add KeystoneAdmin +$BIN_DIR/keystone-manage role add KeystoneServiceAdmin +$BIN_DIR/keystone-manage role add sysadmin +$BIN_DIR/keystone-manage role add netadmin +$BIN_DIR/keystone-manage role grant Admin admin admin +$BIN_DIR/keystone-manage role grant Member demo demo +$BIN_DIR/keystone-manage role grant sysadmin demo demo +$BIN_DIR/keystone-manage role grant netadmin demo demo +$BIN_DIR/keystone-manage role grant Member demo invisible_to_admin +$BIN_DIR/keystone-manage role grant Admin admin demo +$BIN_DIR/keystone-manage role grant Admin admin +$BIN_DIR/keystone-manage role grant KeystoneAdmin admin +$BIN_DIR/keystone-manage role grant KeystoneServiceAdmin admin # Services -$BIN_DIR/keystone-manage $* service add nova compute "Nova Compute Service" -$BIN_DIR/keystone-manage $* service add ec2 ec2 "EC2 Compatability Layer" -$BIN_DIR/keystone-manage $* service add glance image "Glance Image Service" -$BIN_DIR/keystone-manage $* service add keystone identity "Keystone Identity Service" +$BIN_DIR/keystone-manage service add nova compute "Nova Compute Service" +$BIN_DIR/keystone-manage service add ec2 ec2 "EC2 Compatability Layer" +$BIN_DIR/keystone-manage service add glance image "Glance Image Service" +$BIN_DIR/keystone-manage service add keystone identity "Keystone Identity Service" if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then - $BIN_DIR/keystone-manage $* service add swift object-store "Swift Service" + $BIN_DIR/keystone-manage service add swift object-store "Swift Service" fi #endpointTemplates -$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne nova http://%HOST_IP%:8774/v1.1/%tenant_id% http://%HOST_IP%:8774/v1.1/%tenant_id% http://%HOST_IP%:8774/v1.1/%tenant_id% 1 1 -$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne ec2 http://%HOST_IP%:8773/services/Cloud http://%HOST_IP%:8773/services/Admin http://%HOST_IP%:8773/services/Cloud 1 1 -$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne glance http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% 1 1 -$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne keystone http://%HOST_IP%:5000/v2.0 http://%HOST_IP%:35357/v2.0 http://%HOST_IP%:5000/v2.0 1 1 +$BIN_DIR/keystone-manage endpointTemplates add RegionOne nova http://%HOST_IP%:8774/v1.1/%tenant_id% http://%HOST_IP%:8774/v1.1/%tenant_id% http://%HOST_IP%:8774/v1.1/%tenant_id% 1 1 +$BIN_DIR/keystone-manage endpointTemplates add RegionOne ec2 http://%HOST_IP%:8773/services/Cloud http://%HOST_IP%:8773/services/Admin http://%HOST_IP%:8773/services/Cloud 1 1 +$BIN_DIR/keystone-manage endpointTemplates add RegionOne glance http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% 1 1 +$BIN_DIR/keystone-manage endpointTemplates add RegionOne keystone http://%HOST_IP%:5000/v2.0 http://%HOST_IP%:35357/v2.0 http://%HOST_IP%:5000/v2.0 1 1 if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then - $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne swift http://%HOST_IP%:8080/v1/AUTH_%tenant_id% http://%HOST_IP%:8080/ http://%HOST_IP%:8080/v1/AUTH_%tenant_id% 1 1 + $BIN_DIR/keystone-manage endpointTemplates add RegionOne swift http://%HOST_IP%:8080/v1/AUTH_%tenant_id% http://%HOST_IP%:8080/ http://%HOST_IP%:8080/v1/AUTH_%tenant_id% 1 1 fi # Tokens -$BIN_DIR/keystone-manage $* token add %SERVICE_TOKEN% admin admin 2015-02-05T00:00 +$BIN_DIR/keystone-manage token add %SERVICE_TOKEN% admin admin 2015-02-05T00:00 # EC2 related creds - note we are setting the secret key to ADMIN_PASSWORD # but keystone doesn't parse them - it is just a blob from keystone's # point of view -$BIN_DIR/keystone-manage $* credentials add admin EC2 'admin' '%ADMIN_PASSWORD%' admin || echo "no support for adding credentials" -$BIN_DIR/keystone-manage $* credentials add demo EC2 'demo' '%ADMIN_PASSWORD%' demo || echo "no support for adding credentials" +$BIN_DIR/keystone-manage credentials add admin EC2 'admin' '%ADMIN_PASSWORD%' admin || echo "no support for adding credentials" +$BIN_DIR/keystone-manage credentials add demo EC2 'demo' '%ADMIN_PASSWORD%' demo || echo "no support for adding credentials" From 2f15df8a2997adc8c2f4a9a97660f8930ad2d376 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Fri, 23 Dec 2011 12:28:34 -0800 Subject: [PATCH 255/967] add adapters for exercises jenkins integration) Change-Id: If15570612e784d6a1b9fbd54c83f9cd3a9a36941 --- tools/jenkins/adapters/swift.sh | 8 ++++++++ tools/jenkins/adapters/volumes.sh | 8 ++++++++ 2 files changed, 16 insertions(+) create mode 100755 tools/jenkins/adapters/swift.sh create mode 100755 tools/jenkins/adapters/volumes.sh diff --git a/tools/jenkins/adapters/swift.sh b/tools/jenkins/adapters/swift.sh new file mode 100755 index 00000000..c1362ee4 --- /dev/null +++ b/tools/jenkins/adapters/swift.sh @@ -0,0 +1,8 @@ +#!/bin/bash +# Echo commands, exit on error +set -o xtrace +set -o errexit + +TOP_DIR=$(cd ../../.. && pwd) +HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2` +ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises && ./swift.sh' diff --git a/tools/jenkins/adapters/volumes.sh b/tools/jenkins/adapters/volumes.sh new file mode 100755 index 00000000..ec292097 --- /dev/null +++ b/tools/jenkins/adapters/volumes.sh @@ -0,0 +1,8 @@ +#!/bin/bash +# Echo commands, exit on error +set -o xtrace +set -o errexit + +TOP_DIR=$(cd ../../.. && pwd) +HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2` +ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises && ./volumes.sh' From c245fd8bdead54202c3347a4bcd379d1d4a301ad Mon Sep 17 00:00:00 2001 From: Ziad Sawalha Date: Fri, 23 Dec 2011 16:03:52 -0600 Subject: [PATCH 256/967] Remove deprecated Keystone middleware: RAX-KSKEY It is no longer supported and will fail in the Essex release Change-Id: I6520149f1ecbe4966c786af40db987557d9101f6 --- files/keystone.conf | 4 ---- 1 file changed, 4 deletions(-) diff --git a/files/keystone.conf b/files/keystone.conf index 0c0d0e26..a646513b 100644 --- a/files/keystone.conf +++ b/files/keystone.conf @@ -94,7 +94,6 @@ pipeline = pipeline = urlrewritefilter legacy_auth - RAX-KEY-extension service_api [app:service_api] @@ -109,8 +108,5 @@ paste.filter_factory = keystone.middleware.url:filter_factory [filter:legacy_auth] paste.filter_factory = keystone.frontends.legacy_token_auth:filter_factory -[filter:RAX-KEY-extension] -paste.filter_factory = keystone.contrib.extensions.service.raxkey.frontend:filter_factory - [filter:debug] paste.filter_factory = keystone.common.wsgi:debug_filter_factory From 1097c7ca9156c8bd40a015f00fdda3d757b7a1af Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Tue, 27 Dec 2011 23:22:14 -0800 Subject: [PATCH 257/967] Port work on SERVICE_HOST from stable/diablo. Allows user to specify service endpoint host separately from HOST_IP * Improve openrc comment Change-Id: Ib542b39350bae8d92b6c3c4cf6b5d9aabff7a61c --- files/keystone_data.sh | 10 +++++----- openrc | 8 +++++--- stack.sh | 20 +++++++++++++------- 3 files changed, 23 insertions(+), 15 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 6d298d27..e7e67dad 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -36,12 +36,12 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then fi #endpointTemplates -$BIN_DIR/keystone-manage endpointTemplates add RegionOne nova http://%HOST_IP%:8774/v1.1/%tenant_id% http://%HOST_IP%:8774/v1.1/%tenant_id% http://%HOST_IP%:8774/v1.1/%tenant_id% 1 1 -$BIN_DIR/keystone-manage endpointTemplates add RegionOne ec2 http://%HOST_IP%:8773/services/Cloud http://%HOST_IP%:8773/services/Admin http://%HOST_IP%:8773/services/Cloud 1 1 -$BIN_DIR/keystone-manage endpointTemplates add RegionOne glance http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% 1 1 -$BIN_DIR/keystone-manage endpointTemplates add RegionOne keystone http://%HOST_IP%:5000/v2.0 http://%HOST_IP%:35357/v2.0 http://%HOST_IP%:5000/v2.0 1 1 +$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne nova http://%SERVICE_HOST%:8774/v1.1/%tenant_id% http://%SERVICE_HOST%:8774/v1.1/%tenant_id% http://%SERVICE_HOST%:8774/v1.1/%tenant_id% 1 1 +$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne ec2 http://%SERVICE_HOST%:8773/services/Cloud http://%SERVICE_HOST%:8773/services/Admin http://%SERVICE_HOST%:8773/services/Cloud 1 1 +$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne glance http://%SERVICE_HOST%:9292/v1.1/%tenant_id% http://%SERVICE_HOST%:9292/v1.1/%tenant_id% http://%SERVICE_HOST%:9292/v1.1/%tenant_id% 1 1 +$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne keystone http://%SERVICE_HOST%:5000/v2.0 http://%SERVICE_HOST%:35357/v2.0 http://%SERVICE_HOST%:5000/v2.0 1 1 if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then - $BIN_DIR/keystone-manage endpointTemplates add RegionOne swift http://%HOST_IP%:8080/v1/AUTH_%tenant_id% http://%HOST_IP%:8080/ http://%HOST_IP%:8080/v1/AUTH_%tenant_id% 1 1 + $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne swift http://%SERVICE_HOST%:8080/v1/AUTH_%tenant_id% http://%SERVICE_HOST%:8080/ http://%SERVICE_HOST%:8080/v1/AUTH_%tenant_id% 1 1 fi # Tokens diff --git a/openrc b/openrc index 7c1e1292..43959755 100644 --- a/openrc +++ b/openrc @@ -3,8 +3,10 @@ # Load local configuration source ./stackrc -# Set api host endpoint +# Set api HOST_IP endpoint. SERVICE_HOST may also be used to specify the endpoint, +# which is convenient for some localrc configurations. HOST_IP=${HOST_IP:-127.0.0.1} +SERVICE_HOST=${SERVICE_HOST:-$HOST_IP} # Nova original used project_id as the *account* that owned resources (servers, # ip address, ...) With the addition of Keystone we have standardized on the @@ -29,7 +31,7 @@ export NOVA_PASSWORD=${ADMIN_PASSWORD:-secrete} # # *NOTE*: Using the 2.0 *auth api* does not mean that compute api is 2.0. We # will use the 1.1 *compute api* -export NOVA_URL=${NOVA_URL:-http://$HOST_IP:5000/v2.0/} +export NOVA_URL=${NOVA_URL:-http://$SERVICE_HOST:5000/v2.0/} # Currently novaclient needs you to specify the *compute api* version. This # needs to match the config of your catalog returned by Keystone. @@ -39,7 +41,7 @@ export NOVA_VERSION=${NOVA_VERSION:-1.1} export NOVA_REGION_NAME=${NOVA_REGION_NAME:-RegionOne} # Set the ec2 url so euca2ools works -export EC2_URL=${EC2_URL:-http://$HOST_IP:8773/services/Cloud} +export EC2_URL=${EC2_URL:-http://$SERVICE_HOST:8773/services/Cloud} # Access key is set in the initial keystone data to be the same as username export EC2_ACCESS_KEY=${USERNAME:-demo} diff --git a/stack.sh b/stack.sh index 5c978141..15a52fc7 100755 --- a/stack.sh +++ b/stack.sh @@ -214,6 +214,9 @@ if [ ! -n "$HOST_IP" ]; then fi fi +# Allow the use of an alternate hostname (such as localhost/127.0.0.1) for service endpoints. +SERVICE_HOST=${SERVICE_HOST:-$HOST_IP} + # Configure services to syslog instead of writing to individual log files SYSLOG=`trueorfalse False $SYSLOG` SYSLOG_HOST=${SYSLOG_HOST:-$HOST_IP} @@ -270,7 +273,7 @@ FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24} FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256} FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.224/28} NET_MAN=${NET_MAN:-FlatDHCPManager} -EC2_DMZ_HOST=${EC2_DMZ_HOST:-$HOST_IP} +EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST} FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-br100} VLAN_INTERFACE=${VLAN_INTERFACE:-$PUBLIC_INTERFACE} @@ -333,7 +336,7 @@ RABBIT_HOST=${RABBIT_HOST:-localhost} read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT." # Glance connection info. Note the port must be specified. -GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$HOST_IP:9292} +GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$SERVICE_HOST:9292} # SWIFT # ----- @@ -1061,7 +1064,7 @@ if [[ "$ENABLED_SERVICES" =~ "openstackx" ]]; then add_nova_flag "--osapi_extension=extensions.admin.Admin" fi if [[ "$ENABLED_SERVICES" =~ "n-vnc" ]]; then - VNCPROXY_URL=${VNCPROXY_URL:-"http://$HOST_IP:6080"} + VNCPROXY_URL=${VNCPROXY_URL:-"http://$SERVICE_HOST:6080"} add_nova_flag "--vncproxy_url=$VNCPROXY_URL" add_nova_flag "--vncproxy_wwwroot=$NOVNC_DIR/" fi @@ -1142,7 +1145,7 @@ if [[ "$ENABLED_SERVICES" =~ "key" ]]; then # keystone_data.sh creates our admin user and our ``SERVICE_TOKEN``. KEYSTONE_DATA=$KEYSTONE_DIR/bin/keystone_data.sh cp $FILES/keystone_data.sh $KEYSTONE_DATA - sudo sed -e "s,%HOST_IP%,$HOST_IP,g" -i $KEYSTONE_DATA + sudo sed -e "s,%SERVICE_HOST%,$SERVICE_HOST,g" -i $KEYSTONE_DATA sudo sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $KEYSTONE_DATA sudo sed -e "s,%ADMIN_PASSWORD%,$ADMIN_PASSWORD,g" -i $KEYSTONE_DATA # initialize keystone with default users/endpoints @@ -1399,18 +1402,21 @@ echo "" # If you installed the horizon on this server, then you should be able # to access the site using your browser. if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then - echo "horizon is now available at http://$HOST_IP/" + echo "horizon is now available at http://$SERVICE_HOST/" fi # If keystone is present, you can point nova cli to this server if [[ "$ENABLED_SERVICES" =~ "key" ]]; then - echo "keystone is serving at http://$HOST_IP:5000/v2.0/" + echo "keystone is serving at http://$SERVICE_HOST:5000/v2.0/" echo "examples on using novaclient command line is in exercise.sh" echo "the default users are: admin and demo" echo "the password: $ADMIN_PASSWORD" fi -# indicate how long this took to run (bash maintained variable 'SECONDS') +# Echo HOST_IP - useful for build_uec.sh, which uses dhcp to give the instance an address +echo "This is your host ip: $HOST_IP" + +# Indicate how long this took to run (bash maintained variable 'SECONDS') echo "stack.sh completed in $SECONDS seconds." ) | tee -a "$LOGFILE" From 1d6e0e196e6f2ee5fef7c5cf2571e4b3ab947300 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 23 Dec 2011 12:45:13 -0600 Subject: [PATCH 258/967] Use DEFAULT_INSTANCE_TYPE in exercises Change-Id: I5a98a3a4e8057f613a94054b08ff5439f1ccf7cd --- exercises/euca.sh | 3 ++- exercises/floating_ips.sh | 10 +++++++--- exercises/volumes.sh | 8 ++++---- 3 files changed, 13 insertions(+), 8 deletions(-) diff --git a/exercises/euca.sh b/exercises/euca.sh index 67150e44..d0ca6c18 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -30,7 +30,8 @@ SECGROUP=euca_secgroup euca-add-group -d description $SECGROUP # Launch it -INSTANCE=`euca-run-instances -g $SECGROUP -t m1.tiny $IMAGE | grep INSTANCE | cut -f2` +DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} +INSTANCE=`euca-run-instances -g $SECGROUP -t $DEFAULT_INSTANCE_TYPE $IMAGE | grep INSTANCE | cut -f2` # Assure it has booted within a reasonable time if ! timeout $RUNNING_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index dca6d5be..135c8c1c 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -67,12 +67,16 @@ nova secgroup-create $SECGROUP "test_secgroup description" # List of flavors: nova flavor-list -# and grab the first flavor in the list to launch -FLAVOR=`nova flavor-list | head -n 4 | tail -n 1 | cut -d"|" -f2` +DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} +INSTANCE_TYPE=`nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | cut -d"|" -f2` +if [[ -z "$INSTANCE_TYPE" ]]; then + # grab the first flavor in the list to launch if default doesn't exist + INSTANCE_TYPE=`nova flavor-list | head -n 4 | tail -n 1 | cut -d"|" -f2` +fi NAME="myserver" -nova boot --flavor $FLAVOR --image $IMAGE $NAME --security_groups=$SECGROUP +nova boot --flavor $INSTANCE_TYPE --image $IMAGE $NAME --security_groups=$SECGROUP # Testing # ======= diff --git a/exercises/volumes.sh b/exercises/volumes.sh index fe06b6ed..6ea9a516 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -52,10 +52,10 @@ IMAGE=`glance -A $TOKEN index | egrep ami | head -1 | cut -d" " -f1` # List of instance types: nova flavor-list -INSTANCE_NAME=${DEFAULT_INSTANCE_TYPE:-m1.tiny} -INSTANCE_TYPE=`nova flavor-list | grep $INSTANCE_NAME | cut -d"|" -f2` -if [[ -z "`nova flavor-list | grep $INSTANCE_NAME | cut -d"|" -f2`" ]]; then - # and grab the first flavor in the list to launch +DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} +INSTANCE_TYPE=`nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | cut -d"|" -f2` +if [[ -z "$INSTANCE_TYPE" ]]; then + # grab the first flavor in the list to launch if default doesn't exist INSTANCE_TYPE=`nova flavor-list | head -n 4 | tail -n 1 | cut -d"|" -f2` fi From 52e631d8aace4bc50dba5f9d84dd4b0c1e2cbc44 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Tue, 27 Dec 2011 22:22:14 -0800 Subject: [PATCH 259/967] make python-keystoneclient configurable via devstack, remove pip requirement for horizon. * Update to use https://github.com/openstack/python-keystoneclient Change-Id: I54c1ba7fe11de9a6a8f53bdfe8b1c0bd2f5b8f58 --- files/pips/horizon | 2 -- stack.sh | 3 +++ stackrc | 4 ++++ 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/files/pips/horizon b/files/pips/horizon index 62575b8b..893efb77 100644 --- a/files/pips/horizon +++ b/files/pips/horizon @@ -1,6 +1,4 @@ django-nose-selenium pycrypto==2.3 --e git+https://github.com/cloudbuilders/openstackx.git#egg=openstackx -e git+https://github.com/jacobian/openstack.compute.git#egg=openstack --e git+https://review.openstack.org/p/openstack/python-keystoneclient#egg=python-keystoneclient diff --git a/stack.sh b/stack.sh index 5c978141..19543ca6 100755 --- a/stack.sh +++ b/stack.sh @@ -171,6 +171,7 @@ HORIZON_DIR=$DEST/horizon GLANCE_DIR=$DEST/glance KEYSTONE_DIR=$DEST/keystone NOVACLIENT_DIR=$DEST/python-novaclient +KEYSTONECLIENT_DIR=$DEST/python-keystoneclient OPENSTACKX_DIR=$DEST/openstackx NOVNC_DIR=$DEST/noVNC SWIFT_DIR=$DEST/swift @@ -551,6 +552,7 @@ fi if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then # django powered web control panel for openstack git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH $HORIZON_TAG + git_clone $KEYSTONECLIENT_REPO $KEYSTONECLIENT_DIR $KEYSTONECLIENT_BRANCH fi if [[ "$ENABLED_SERVICES" =~ "openstackx" ]]; then # openstackx is a collection of extensions to openstack.compute & nova @@ -588,6 +590,7 @@ if [[ "$ENABLED_SERVICES" =~ "openstackx" ]]; then cd $OPENSTACKX_DIR; sudo python setup.py develop fi if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then + cd $KEYSTONECLIENT_DIR; sudo python setup.py develop cd $HORIZON_DIR/horizon; sudo python setup.py develop cd $HORIZON_DIR/openstack-dashboard; sudo python setup.py develop fi diff --git a/stackrc b/stackrc index 0e700d55..2d5079fd 100644 --- a/stackrc +++ b/stackrc @@ -30,6 +30,10 @@ HORIZON_BRANCH=master NOVACLIENT_REPO=https://github.com/openstack/python-novaclient.git NOVACLIENT_BRANCH=master +# python keystone client library to nova that horizon uses +KEYSTONECLIENT_REPO=https://github.com/openstack/python-keystoneclient +KEYSTONECLIENT_BRANCH=master + # openstackx is a collection of extensions to openstack.compute & nova # that is *deprecated*. The code is being moved into python-novaclient & nova. OPENSTACKX_REPO=https://github.com/cloudbuilders/openstackx.git From 98f4ce82f013b64f5f93034931d0bfd362573e5c Mon Sep 17 00:00:00 2001 From: Gabriel Hurley Date: Wed, 28 Dec 2011 16:15:41 -0800 Subject: [PATCH 260/967] Added "Admin" as an allowed role in swift proxy-server.conf. Change-Id: Ia98ef5c5ff02a7a9bd24729ca37d87338087ab66 --- files/swift/proxy-server.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/swift/proxy-server.conf b/files/swift/proxy-server.conf index d7ed4851..5752d748 100644 --- a/files/swift/proxy-server.conf +++ b/files/swift/proxy-server.conf @@ -16,7 +16,7 @@ account_autocreate = true use = egg:swiftkeystone2#keystone2 keystone_admin_token = %SERVICE_TOKEN% keystone_url = http://localhost:35357/v2.0 -keystone_swift_operator_roles = Member +keystone_swift_operator_roles = Member,Admin [filter:tempauth] use = egg:swift#tempauth From 4e6a2b71e398d34267a3aec93fa2f261855ab774 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 29 Dec 2011 17:27:45 -0600 Subject: [PATCH 261/967] Allow only aphanum chars in user-entered passwords Fixes bug 885345 Change-Id: Ib41319676d2fd24144a1493bd58543ad71eb8d6c --- stack.sh | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/stack.sh b/stack.sh index 19543ca6..4307d1f8 100755 --- a/stack.sh +++ b/stack.sh @@ -244,12 +244,17 @@ function read_password { echo '################################################################################' echo $msg echo '################################################################################' - echo "This value will be written to your localrc file so you don't have to enter it again." - echo "It is probably best to avoid spaces and weird characters." + echo "This value will be written to your localrc file so you don't have to enter it " + echo "again. Use only alphanumeric characters." echo "If you leave this blank, a random default value will be used." - echo "Enter a password now:" - read -e $var - pw=${!var} + pw=" " + while true; do + echo "Enter a password now:" + read -e $var + pw=${!var} + [[ "$pw" = "`echo $pw | tr -cd [:alnum:]`" ]] && break + echo "Invalid chars in password. Try again:" + done if [ ! $pw ]; then pw=`openssl rand -hex 10` fi From 34694f156874e10199e29d4d92f35626b68d88d2 Mon Sep 17 00:00:00 2001 From: Kiall Mac Innes Date: Fri, 30 Dec 2011 18:53:26 +0000 Subject: [PATCH 262/967] Fix the glance endpoint template URL. Fixes bug 909075. Conflicts: files/keystone_data.sh Change-Id: I82a45ef1e653a7de9ab3b48e7675f1440172b3f9 --- AUTHORS | 1 + files/keystone_data.sh | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/AUTHORS b/AUTHORS index 9d8366ba..84a565ed 100644 --- a/AUTHORS +++ b/AUTHORS @@ -11,6 +11,7 @@ Jason Cannavale Jay Pipes Jesse Andrews Justin Shepherd +Kiall Mac Innes Scott Moser Todd Willey Tres Henry diff --git a/files/keystone_data.sh b/files/keystone_data.sh index e7e67dad..a25ba20b 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -38,7 +38,7 @@ fi #endpointTemplates $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne nova http://%SERVICE_HOST%:8774/v1.1/%tenant_id% http://%SERVICE_HOST%:8774/v1.1/%tenant_id% http://%SERVICE_HOST%:8774/v1.1/%tenant_id% 1 1 $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne ec2 http://%SERVICE_HOST%:8773/services/Cloud http://%SERVICE_HOST%:8773/services/Admin http://%SERVICE_HOST%:8773/services/Cloud 1 1 -$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne glance http://%SERVICE_HOST%:9292/v1.1/%tenant_id% http://%SERVICE_HOST%:9292/v1.1/%tenant_id% http://%SERVICE_HOST%:9292/v1.1/%tenant_id% 1 1 +$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne glance http://%SERVICE_HOST%:9292/v1 http://%SERVICE_HOST%:9292/v1 http://%SERVICE_HOST%:9292/v1 1 1 $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne keystone http://%SERVICE_HOST%:5000/v2.0 http://%SERVICE_HOST%:35357/v2.0 http://%SERVICE_HOST%:5000/v2.0 1 1 if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne swift http://%SERVICE_HOST%:8080/v1/AUTH_%tenant_id% http://%SERVICE_HOST%:8080/ http://%SERVICE_HOST%:8080/v1/AUTH_%tenant_id% 1 1 From c384424e94f7d4a84670630860bf1848a0563342 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 30 Dec 2011 14:27:02 -0600 Subject: [PATCH 263/967] exercises/euca.sh timing fixes Adjust timing in the script for testing on slower systems, such as VMs on laptops. Change-Id: I657fe54c7a4b75169b84ae1af37c88e2941e918a --- exercises/euca.sh | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/exercises/euca.sh b/exercises/euca.sh index 67150e44..dc2d9cac 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -33,7 +33,7 @@ euca-add-group -d description $SECGROUP INSTANCE=`euca-run-instances -g $SECGROUP -t m1.tiny $IMAGE | grep INSTANCE | cut -f2` # Assure it has booted within a reasonable time -if ! timeout $RUNNING_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then +if ! timeout $RUNNING_TIMEOUT sh -c "while ! euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then echo "server didn't become active within $RUNNING_TIMEOUT seconds" exit 1 fi @@ -48,10 +48,10 @@ euca-associate-address -i $INSTANCE $FLOATING_IP # Authorize pinging euca-authorize -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP -# Max time till the vm is bootable -BOOT_TIMEOUT=${BOOT_TIMEOUT:-15} -if ! timeout $BOOT_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then - echo "Couldn't ping server" +# Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds +ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10} +if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then + echo "Couldn't ping server with floating ip" exit 1 fi @@ -67,5 +67,11 @@ euca-disassociate-address $FLOATING_IP # Release floating address euca-release-address $FLOATING_IP +# Wait just a tick for everything above to complete so terminate doesn't fail +if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep -q $FLOATING_IP; do sleep 1; done"; then + echo "Floating ip $FLOATING_IP not released within $ASSOCIATE_TIMEOUT seconds" + exit 1 +fi + # Terminate instance euca-terminate-instances $INSTANCE From 0a22780dddbd35ef1464b0cdbe95b98c36d755ee Mon Sep 17 00:00:00 2001 From: "James E. Blair" Date: Sat, 31 Dec 2011 11:14:56 -0800 Subject: [PATCH 264/967] Use --log-config with keystone. Keystone does not support a log_config config file option. It only respects the command line argument. Fixes bug 910436. Change-Id: I276ad5da239d5d9b6a1dcdd1013972c7948c511b --- stack.sh | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/stack.sh b/stack.sh index 9f9bed9c..e1b005ed 100755 --- a/stack.sh +++ b/stack.sh @@ -1162,11 +1162,9 @@ if [[ "$ENABLED_SERVICES" =~ "key" ]]; then if [ "$SYSLOG" != "False" ]; then sed -i -e '/^handlers=devel$/s/=devel/=production/' \ $KEYSTONE_DIR/etc/logging.cnf - sed -i -e " - /^log_file/s/log_file/\#log_file/; \ - /^log_config/d;/^\[DEFAULT\]/a\ - log_config=$KEYSTONE_DIR/etc/logging.cnf" \ + sed -i -e "/^log_file/s/log_file/\#log_file/" \ $KEYSTONE_DIR/etc/keystone.conf + KEYSTONE_LOG_CONFIG="--log-config $KEYSTONE_DIR/etc/logging.cnf" fi fi @@ -1219,7 +1217,7 @@ fi # launch the keystone and wait for it to answer before continuing if [[ "$ENABLED_SERVICES" =~ "key" ]]; then - screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone --config-file $KEYSTONE_CONF -d" + screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d" echo "Waiting for keystone to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget -q -O- http://127.0.0.1:5000; do sleep 1; done"; then echo "keystone did not start" From dec00f61fce0c2669f833a048fc49975548cf49e Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 30 Dec 2011 17:43:20 -0600 Subject: [PATCH 265/967] Add info.sh Change-Id: I4394482df2db4d4b251d97678d2692a2849715a1 --- tools/info.sh | 213 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 213 insertions(+) create mode 100755 tools/info.sh diff --git a/tools/info.sh b/tools/info.sh new file mode 100755 index 00000000..edff617f --- /dev/null +++ b/tools/info.sh @@ -0,0 +1,213 @@ +#!/usr/bin/env bash +# info.sh - Produce a report on the state of devstack installs +# +# Output fields are separated with '|' chars +# Output types are git,localrc,os,pip,pkg: +# git||[] +# localtc|= +# os|= +# pip|| +# pkg|| + +function usage { + echo "$0 - Report on the devstack configuration" + echo "" + echo "Usage: $0" + exit 1 +} + +if [ "$1" = "-h" ]; then + usage +fi + +# Keep track of the current directory +TOOLS_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $TOOLS_DIR/..; pwd) +cd $TOP_DIR + +# Source params +source $TOP_DIR/stackrc + +DEST=${DEST:-/opt/stack} +FILES=$TOP_DIR/files +if [[ ! -d $FILES ]]; then + echo "ERROR: missing devstack/files - did you grab more than just stack.sh?" + exit 1 +fi + +# Repos +# ----- + +# git_report +function git_report() { + local dir=$1 + local proj ref branch head + if [[ -d $dir/.git ]]; then + pushd $dir >/dev/null + proj=$(basename $dir) + ref=$(git symbolic-ref HEAD) + branch=${ref##refs/heads/} + head=$(git show-branch --sha1-name $branch | cut -d' ' -f1) + echo "git|${proj}|${branch}${head}" + popd >/dev/null + fi +} + +for i in $DEST/*; do + if [[ -d $i ]]; then + git_report $i + fi +done + +# OS +# -- + +GetOSInfo() { + # Figure out which vedor we are + if [ -r /etc/lsb-release ]; then + . /etc/lsb-release + VENDORNAME=$DISTRIB_ID + RELEASE=$DISTRIB_RELEASE + else + for r in RedHat CentOS Fedora; do + VENDORPKG="`echo $r | tr [:upper:] [:lower:]`-release" + VENDORNAME=$r + RELEASE=`rpm -q --queryformat '%{VERSION}' $VENDORPKG` + if [ $? = 0 ]; then + break + fi + VENDORNAME="" + done + # Get update level + if [ -n "`grep Update /etc/redhat-release`" ]; then + # Get update + UPDATE=`cat /etc/redhat-release | sed s/.*Update\ // | sed s/\)$//` + else + # Assume update 0 + UPDATE=0 + fi + fi + + echo "os|vendor=$VENDORNAME" + echo "os|release=$RELEASE" + if [ -n "$UPDATE" ]; then + echo "os|version=$UPDATE" + fi +} + +GetOSInfo + +# Packages +# -------- + +# - We are going to check packages only for the services needed. +# - We are parsing the packages files and detecting metadatas. +# - If we have the meta-keyword dist:DISTRO or +# dist:DISTRO1,DISTRO2 it will be installed only for those +# distros (case insensitive). +function get_packages() { + local file_to_parse="general" + local service + + for service in ${ENABLED_SERVICES//,/ }; do + # Allow individual services to specify dependencies + if [[ -e $FILES/apts/${service} ]]; then + file_to_parse="${file_to_parse} $service" + fi + if [[ $service == n-* ]]; then + if [[ ! $file_to_parse =~ nova ]]; then + file_to_parse="${file_to_parse} nova" + fi + elif [[ $service == g-* ]]; then + if [[ ! $file_to_parse =~ glance ]]; then + file_to_parse="${file_to_parse} glance" + fi + elif [[ $service == key* ]]; then + if [[ ! $file_to_parse =~ keystone ]]; then + file_to_parse="${file_to_parse} keystone" + fi + fi + done + + for file in ${file_to_parse}; do + local fname=${FILES}/apts/${file} + local OIFS line package distros distro + [[ -e $fname ]] || { echo "missing: $fname"; exit 1; } + + OIFS=$IFS + IFS=$'\n' + for line in $(<${fname}); do + if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then # We are using BASH regexp matching feature. + package=${BASH_REMATCH[1]} + distros=${BASH_REMATCH[2]} + for distro in ${distros//,/ }; do #In bash ${VAR,,} will lowecase VAR + [[ ${distro,,} == ${DISTRO,,} ]] && echo $package + done + continue + fi + + echo ${line%#*} + done + IFS=$OIFS + done +} + +for p in $(get_packages); do + ver=$(dpkg -s $p 2>/dev/null | grep '^Version: ' | cut -d' ' -f2) + echo "pkg|${p}|${ver}" +done + +# Pips +# ---- + +function get_pips() { + cat $FILES/pips/* | uniq +} + +# Pip tells us what is currently installed +FREEZE_FILE=$(mktemp --tmpdir freeze.XXXXXX) +pip freeze >$FREEZE_FILE 2>/dev/null + +# Loop through our requirements and look for matches +for p in $(get_pips); do + [[ "$p" = "-e" ]] && continue + if [[ "$p" =~ \+?([^#]*)#? ]]; then + # Get the URL from a remote reference + p=${BASH_REMATCH[1]} + fi + line="`grep -i $p $FREEZE_FILE`" + if [[ -n "$line" ]]; then + if [[ "$line" =~ \+(.*)@(.*)#egg=(.*) ]]; then + # Handle URLs + p=${BASH_REMATCH[1]} + ver=${BASH_REMATCH[2]} + elif [[ "$line" =~ (.*)[=\<\>]=(.*) ]]; then + # Normal pip packages + p=${BASH_REMATCH[1]} + ver=${BASH_REMATCH[2]} + else + # Unhandled format in freeze file + #echo "unknown: $p" + continue + fi + echo "pip|${p}|${ver}" + else + # No match in freeze file + #echo "unknown: $p" + continue + fi +done + +rm $FREEZE_FILE + +# localrc +# ------- + +# Dump localrc with 'localrc|' prepended and comments and passwords left out +if [[ -r $TOP_DIR/localrc ]]; then + sed -e ' + /PASSWORD/d; + /^#/d; + s/^/localrc\|/; + ' $TOP_DIR/localrc | sort +fi From 33d5029041a3f5678f759ac7eda29b9aff99b13a Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 4 Jan 2012 09:32:48 -0800 Subject: [PATCH 266/967] Better domain parsing and improved instance cleanup * Fixes bug 911506 * Fix typo - logical or not pipe Change-Id: Id92a7e1d7e974710635899d43a7d428ce525227c --- stack.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index e1b005ed..f3477cd1 100755 --- a/stack.sh +++ b/stack.sh @@ -843,10 +843,10 @@ if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then clean_iptables # Destroy old instances - instances=`virsh list | grep $INSTANCE_NAME_PREFIX | cut -d " " -f3` + instances=`virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"` if [ ! $instances = "" ]; then - echo $instances | xargs -n1 virsh destroy - echo $instances | xargs -n1 virsh undefine + echo $instances | xargs -n1 virsh destroy || true + echo $instances | xargs -n1 virsh undefine || true fi # Clean out the instances directory. From 19b2f9b44fda7e8d0553373c9504aa586811a5db Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 5 Jan 2012 22:21:08 +0000 Subject: [PATCH 267/967] Use moved manage.py in horizion Change-Id: Idb769634fd7d7fc4efce974690b5f24cf632d83d --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index e1b005ed..3ecfc781 100755 --- a/stack.sh +++ b/stack.sh @@ -709,7 +709,7 @@ if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then # Initialize the horizon database (it stores sessions and notices shown to # users). The user system is external (keystone). cd $HORIZON_DIR/openstack-dashboard - dashboard/manage.py syncdb + python manage.py syncdb # create an empty directory that apache uses as docroot sudo mkdir -p $HORIZON_DIR/.blackhole From 471de7a34ba3a3842576fda56602884bf149a9fc Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 27 Dec 2011 11:45:55 -0600 Subject: [PATCH 268/967] Improve log file handling: * Elimiate subshells to produce logfiles (fixes bug 885091) * Clean up log files older than 7 days (default) * Append date/time to specified log file name * Default LOGFILE='', now must set to get logging This changes the default behaviour of stack.sh to not write a log file unless LOGFILE is set. Change-Id: I5d3fb65e12ccdb52fca5a41ee8f5777c046cd375 --- stack.sh | 36 ++++++++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/stack.sh b/stack.sh index 420fc254..a79f7949 100755 --- a/stack.sh +++ b/stack.sh @@ -390,8 +390,31 @@ read_password SERVICE_TOKEN "ENTER A SERVICE_TOKEN TO USE FOR THE SERVICE ADMIN # Horizon currently truncates usernames and passwords at 20 characters read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (20 CHARS OR LESS)." -LOGFILE=${LOGFILE:-"$PWD/stack.sh.$$.log"} -( +# Log files +# --------- + +# Set up logging for stack.sh +# Set LOGFILE to turn on logging +# We append '.xxxxxxxx' to the given name to maintain history +# where xxxxxxxx is a representation of the date the file was created +if [[ -n "$LOGFILE" ]]; then + # First clean up old log files. Use the user-specified LOGFILE + # as the template to search for, appending '.*' to match the date + # we added on earlier runs. + LOGDAYS=${LOGDAYS:-7} + LOGDIR=$(dirname "$LOGFILE") + LOGNAME=$(basename "$LOGFILE") + find $LOGDIR -maxdepth 1 -name $LOGNAME.\* -mtime +$LOGDAYS -exec rm {} \; + + TIMESTAMP_FORMAT=${TIMESTAMP_FORMAT:-"%F-%H%M%S"} + LOGFILE=$LOGFILE.$(date "+$TIMESTAMP_FORMAT") + # Redirect stdout/stderr to tee to write the log file + exec 1> >( tee "${LOGFILE}" ) 2>&1 + echo "stack.sh log $LOGFILE" + # Specified logfile name always links to the most recent log + ln -sf $LOGFILE $LOGDIR/$LOGNAME +fi + # So that errors don't compound we exit on any errors so you see only the # first error that occurred. trap failed ERR @@ -1403,13 +1426,8 @@ fi # Fin # === +set +o xtrace -) 2>&1 | tee "${LOGFILE}" - -# Check that the left side of the above pipe succeeded -for ret in "${PIPESTATUS[@]}"; do [ $ret -eq 0 ] || exit $ret; done - -( # Using the cloud # =============== @@ -1436,5 +1454,3 @@ echo "This is your host ip: $HOST_IP" # Indicate how long this took to run (bash maintained variable 'SECONDS') echo "stack.sh completed in $SECONDS seconds." - -) | tee -a "$LOGFILE" From 3320c55d10b65d370f1b91bde1bcbd4d78a6a5ca Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 23 Nov 2011 23:19:10 -0600 Subject: [PATCH 269/967] Set up downloaded images for testing Change-Id: Ia08d314e26fcf4a443b567432d0c36202d151c9f --- tools/build_ci_config.sh | 43 ++++++++++++++++++++++++++++++++-------- 1 file changed, 35 insertions(+), 8 deletions(-) diff --git a/tools/build_ci_config.sh b/tools/build_ci_config.sh index 90b8abf8..79f6ead0 100755 --- a/tools/build_ci_config.sh +++ b/tools/build_ci_config.sh @@ -123,6 +123,33 @@ set `echo $GLANCE_HOSTPORT | tr ':' ' '` GLANCE_HOST=$1 GLANCE_PORT=$2 +# Set up downloaded images +# Defaults to use first image + +IMAGE_DIR="" +for imagedir in $TOP_DIR/files/images/*; do + KERNEL="" + RAMDISK="" + IMAGE="" + IMAGE_RAMDISK="" + KERNEL=$(for f in "$imagedir/"*-vmlinuz*; do + [ -f "$f" ] && echo "$f" && break; done; true) + [ -n "$KERNEL" ] && ln -sf $KERNEL $imagedir/kernel + RAMDISK=$(for f in "$imagedir/"*-initrd*; do + [ -f "$f" ] && echo "$f" && break; done; true) + [ -n "$RAMDISK" ] && ln -sf $RAMDISK $imagedir/ramdisk && \ + IMAGE_RAMDISK="ari_location = $imagedir/ramdisk" + IMAGE=$(for f in "$imagedir/"*.img; do + [ -f "$f" ] && echo "$f" && break; done; true) + if [ -n "$IMAGE" ]; then + ln -sf $IMAGE $imagedir/disk + # Save the first image directory that contains a disk image link + if [ -z "$IMAGE_DIR" ]; then + IMAGE_DIR=$imagedir + fi + fi +done + # Create storm.conf CONFIG_CONF_TMP=$(mktemp $CONFIG_CONF.XXXXXX) @@ -154,9 +181,9 @@ CONFIG_INI_TMP=$(mktemp $CONFIG_INI.XXXXXX) if [ "$UPLOAD_LEGACY_TTY" ]; then cat >$CONFIG_INI_TMP <$CONFIG_INI_TMP < Date: Wed, 11 Jan 2012 11:34:13 -0800 Subject: [PATCH 270/967] Stop devstack from exploding in paste config changes Change-Id: I842691479c7c1b46bab627a1f436d9cef3f5148d --- files/nova-api-paste.ini | 126 ++------------------------------------- stack.sh | 22 +++++-- 2 files changed, 21 insertions(+), 127 deletions(-) diff --git a/files/nova-api-paste.ini b/files/nova-api-paste.ini index 7f27fdcb..76c8aae3 100644 --- a/files/nova-api-paste.ini +++ b/files/nova-api-paste.ini @@ -1,131 +1,13 @@ -############ -# Metadata # -############ -[composite:metadata] -use = egg:Paste#urlmap -/: metaversions -/latest: meta -/2007-01-19: meta -/2007-03-01: meta -/2007-08-29: meta -/2007-10-10: meta -/2007-12-15: meta -/2008-02-01: meta -/2008-09-01: meta -/2009-04-04: meta - -[pipeline:metaversions] -pipeline = ec2faultwrap logrequest metaverapp - -[pipeline:meta] -pipeline = ec2faultwrap logrequest metaapp - -[app:metaverapp] -paste.app_factory = nova.api.metadata.handler:Versions.factory - -[app:metaapp] -paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory - -####### -# EC2 # -####### - -[composite:ec2] -use = egg:Paste#urlmap -/services/Cloud: ec2cloud -/services/Admin: ec2admin - -[pipeline:ec2cloud] -pipeline = ec2faultwrap logrequest totoken authtoken keystonecontext cloudrequest authorizer ec2executor - -[pipeline:ec2admin] -pipeline = ec2faultwrap logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor - -[pipeline:ec2metadata] -pipeline = ec2faultwrap logrequest ec2md - -[pipeline:ec2versions] -pipeline = ec2faultwrap logrequest ec2ver - -[filter:ec2faultwrap] -paste.filter_factory = nova.api.ec2:FaultWrapper.factory - -[filter:logrequest] -paste.filter_factory = nova.api.ec2:RequestLogging.factory - -[filter:ec2lockout] -paste.filter_factory = nova.api.ec2:Lockout.factory - -[filter:totoken] -paste.filter_factory = keystone.middleware.ec2_token:EC2Token.factory - -[filter:ec2noauth] -paste.filter_factory = nova.api.ec2:NoAuth.factory - -[filter:authenticate] -paste.filter_factory = nova.api.ec2:Authenticate.factory - -[filter:cloudrequest] -controller = nova.api.ec2.cloud.CloudController -paste.filter_factory = nova.api.ec2:Requestify.factory - -[filter:adminrequest] -controller = nova.api.ec2.admin.AdminController -paste.filter_factory = nova.api.ec2:Requestify.factory - -[filter:authorizer] -paste.filter_factory = nova.api.ec2:Authorizer.factory - -[app:ec2executor] -paste.app_factory = nova.api.ec2:Executor.factory - -############# -# Openstack # -############# - -[composite:osapi] -use = call:nova.api.openstack.v2.urlmap:urlmap_factory -/: osversions -/v1.1: openstack_api_v2 -/v2: openstack_api_v2 - -[pipeline:openstack_api_v2] -pipeline = faultwrap authtoken keystonecontext ratelimit serialize extensions osapi_app_v2 - -[filter:faultwrap] -paste.filter_factory = nova.api.openstack.v2:FaultWrapper.factory - -[filter:auth] -paste.filter_factory = nova.api.openstack.v2.auth:AuthMiddleware.factory - -[filter:noauth] -paste.filter_factory = nova.api.openstack.v2.auth:NoAuthMiddleware.factory - -[filter:ratelimit] -paste.filter_factory = nova.api.openstack.v2.limits:RateLimitingMiddleware.factory - -[filter:serialize] -paste.filter_factory = nova.api.openstack.wsgi:LazySerializationMiddleware.factory - -[filter:extensions] -paste.filter_factory = nova.api.openstack.v2.extensions:ExtensionMiddleware.factory - -[app:osapi_app_v2] -paste.app_factory = nova.api.openstack.v2:APIRouter.factory - -[pipeline:osversions] -pipeline = faultwrap osversionapp - -[app:osversionapp] -paste.app_factory = nova.api.openstack.v2.versions:Versions.factory - ########## -# Shared # +# Extras # ########## [filter:keystonecontext] paste.filter_factory = keystone.middleware.nova_keystone_context:NovaKeystoneContext.factory +[filter:totoken] +paste.filter_factory = keystone.middleware.ec2_token:EC2Token.factory + [filter:authtoken] paste.filter_factory = keystone.middleware.auth_token:filter_factory service_protocol = http diff --git a/stack.sh b/stack.sh index a79f7949..c0763d46 100755 --- a/stack.sh +++ b/stack.sh @@ -777,14 +777,26 @@ fi # Nova # ---- - if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then # We are going to use a sample http middleware configuration based on the # one from the keystone project to launch nova. This paste config adds - # the configuration required for nova to validate keystone tokens. We add - # our own service token to the configuration. - cp $FILES/nova-api-paste.ini $NOVA_DIR/bin + # the configuration required for nova to validate keystone tokens. + + # First we add a some extra data to the default paste config from nova + cat $NOVA_DIR/etc/nova/api-paste.ini $FILES/nova-api-paste.ini > $NOVA_DIR/bin/nova-api-paste.ini + + # Then we add our own service token to the configuration sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini + + # Finally, we change the pipelines in nova to use keystone + function replace_pipeline() { + sed "/\[pipeline:$1\]/,/\[/s/^pipeline = .*/pipeline = $2/" -i $NOVA_DIR/bin/nova-api-paste.ini + } + replace_pipeline "ec2cloud" "ec2faultwrap logrequest totoken authtoken keystonecontext cloudrequest authorizer ec2executor" + replace_pipeline "ec2admin" "ec2faultwrap logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor" + replace_pipeline "openstack_api_v2" "faultwrap authtoken keystonecontext ratelimit serialize extensions osapi_app_v2" + replace_pipeline "openstack_compute_api_v2" "faultwrap authtoken keystonecontext ratelimit serialize compute_extensions osapi_compute_app_v2" + replace_pipeline "openstack_volume_api_v1" "faultwrap authtoken keystonecontext ratelimit serialize volume_extensions osapi_volume_app_v1" fi # Helper to clean iptables rules @@ -998,7 +1010,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \ tee /etc/rsyslog.d/10-swift.conf sudo restart rsyslog - + # We create two helper scripts : # # - swift-remakerings From 419770faadd5390277d94098518cc439f83f7997 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 11 Jan 2012 17:35:40 -0800 Subject: [PATCH 271/967] Changes to make devstack work with the essex + xen Change-Id: If932d82ec72494d871ad65ae863947816e719624 --- stack.sh | 4 ++-- tools/xen/build_domU.sh | 9 +++++---- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/stack.sh b/stack.sh index c0763d46..0e131822 100755 --- a/stack.sh +++ b/stack.sh @@ -1156,9 +1156,9 @@ if [ "$VIRT_DRIVER" = 'xenserver' ]; then add_nova_flag "--xenapi_connection_url=http://169.254.0.1" add_nova_flag "--xenapi_connection_username=root" add_nova_flag "--xenapi_connection_password=$XENAPI_PASSWORD" - add_nova_flag "--flat_injected=False" + add_nova_flag "--noflat_injected" add_nova_flag "--flat_interface=eth1" - add_nova_flag "--flat_network_bridge=xenbr1" + add_nova_flag "--flat_network_bridge=xapi1" add_nova_flag "--public_interface=eth3" else add_nova_flag "--flat_network_bridge=$FLAT_NETWORK_BRIDGE" diff --git a/tools/xen/build_domU.sh b/tools/xen/build_domU.sh index d79d5c3e..642b40f7 100755 --- a/tools/xen/build_domU.sh +++ b/tools/xen/build_domU.sh @@ -33,12 +33,12 @@ PUB_NETMASK=${PUB_NETMASK:-255.255.255.0} # VM network params VM_NETMASK=${VM_NETMASK:-255.255.255.0} -VM_BR=${VM_BR:-xenbr1} +VM_BR=${VM_BR:-xapi1} VM_VLAN=${VM_VLAN:-100} # MGMT network params MGT_NETMASK=${MGT_NETMASK:-255.255.255.0} -MGT_BR=${MGT_BR:-xenbr2} +MGT_BR=${MGT_BR:-xapi2} MGT_VLAN=${MGT_VLAN:-101} # VM Password @@ -205,8 +205,9 @@ fi # Checkout nova if [ ! -d $TOP_DIR/nova ]; then - git clone git://github.com/cloudbuilders/nova.git - git checkout diablo + git clone $NOVA_REPO + cd $TOP_DIR/nova + git checkout $NOVA_BRANCH fi # Run devstack on launch From 696ad331a4af3a0d259b1db7eed5ef6b691ba045 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 10 Jan 2012 15:34:34 -0600 Subject: [PATCH 272/967] Add some additional floating ip pool testing Change-Id: I62cd6b43e3250dc28d66dc84d3759f47e81ff134 --- exercises/floating_ips.sh | 28 ++++++++++++++++++++++++---- stack.sh | 8 ++++++++ 2 files changed, 32 insertions(+), 4 deletions(-) diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 135c8c1c..9c207cd8 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -24,6 +24,11 @@ pushd $(cd $(dirname "$0")/.. && pwd) source ./openrc popd +# Set some defaults + +DEFAULT_FLOATING_POOL=${DEFAULT_FLOATING_POOL:-nova} +TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test} + # Get a token for clients that don't support service catalog # ========================================================== @@ -130,11 +135,14 @@ nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 # List rules for a secgroup nova secgroup-list-rules $SECGROUP -# allocate a floating ip -nova floating-ip-create +# allocate a floating ip from default pool +FLOATING_IP=`nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | cut -d '|' -f2` -# store floating address -FLOATING_IP=`nova floating-ip-list | grep None | head -1 | cut -d '|' -f2 | sed 's/ //g'` +# list floating addresses +if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep -q $FLOATING_IP; do sleep 1; done"; then + echo "Floating IP not allocated" + exit 1 +fi # add floating ip to our server nova add-floating-ip $NAME $FLOATING_IP @@ -145,6 +153,15 @@ if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sle exit 1 fi +# Allocate an IP from it +TEST_FLOATING_IP=`nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | cut -d '|' -f2` + +# list floating addresses +if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep $TEST_FLOATING_POOL | grep -q $TEST_FLOATING_IP; do sleep 1; done"; then + echo "Floating IP not allocated" + exit 1 +fi + # dis-allow icmp traffic (ping) nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 @@ -161,6 +178,9 @@ fi # de-allocate the floating ip nova floating-ip-delete $FLOATING_IP +# Delete second floating IP +nova floating-ip-delete $TEST_FLOATING_IP + # shutdown the server nova delete $NAME diff --git a/stack.sh b/stack.sh index c0763d46..c3d84d24 100755 --- a/stack.sh +++ b/stack.sh @@ -283,6 +283,11 @@ EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST} FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-br100} VLAN_INTERFACE=${VLAN_INTERFACE:-$PUBLIC_INTERFACE} +# Test floating pool and range are used for testing. They are defined +# here until the admin APIs can replace nova-manage +TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test} +TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29} + # Multi-host is a mode where each compute node runs its own network node. This # allows network operations and routing for a VM to occur on the server that is # running the VM - removing a SPOF and bandwidth bottleneck. @@ -1327,6 +1332,9 @@ if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then else # create some floating ips $NOVA_DIR/bin/nova-manage floating create $FLOATING_RANGE + + # create a second pool + $NOVA_DIR/bin/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL fi fi From f5fb057a8d58fdf1316f54b7c528740f1dc0a907 Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Mon, 16 Jan 2012 14:46:01 -0800 Subject: [PATCH 273/967] Fix several issues with devstack and Quantum + OVS plugin - flag telling nova quantum manager to use DHCP is incorrect - OVS quantum plugin no longer has default sql_connection string. - only run quantum OVS agent is openvswitch is enabled - add ovs-vsctl to sudoers file. Change-Id: I1e9b3fe987b3e29d0d4f670b2d964777d9cc3e1b --- files/sudo/nova | 1 + stack.sh | 9 ++++++--- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/files/sudo/nova b/files/sudo/nova index 0a79c210..bde15193 100644 --- a/files/sudo/nova +++ b/files/sudo/nova @@ -41,6 +41,7 @@ Cmnd_Alias NOVADEVCMDS = /bin/chmod /var/lib/nova/tmp/*/root/.ssh, \ /usr/bin/socat, \ /sbin/parted, \ /usr/sbin/dnsmasq, \ + /usr/bin/ovs-vsctl, \ /usr/sbin/arping %USER% ALL = (root) NOPASSWD: SETENV: NOVADEVCMDS diff --git a/stack.sh b/stack.sh index c3fe990f..c4655e75 100755 --- a/stack.sh +++ b/stack.sh @@ -1104,7 +1104,7 @@ if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then add_nova_flag "--libvirt_vif_type=ethernet" add_nova_flag "--libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtOpenVswitchDriver" add_nova_flag "--linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver" - add_nova_flag "--quantum-use-dhcp" + add_nova_flag "--quantum_use_dhcp" fi else add_nova_flag "--network_manager=nova.network.manager.$NET_MAN" @@ -1315,10 +1315,13 @@ if [[ "$ENABLED_SERVICES" =~ "q-agt" ]]; then sudo ovs-vsctl --no-wait -- --if-exists del-br $OVS_BRIDGE sudo ovs-vsctl --no-wait add-br $OVS_BRIDGE sudo ovs-vsctl --no-wait br-set-external-id $OVS_BRIDGE bridge-id br-int + + # Start up the quantum <-> openvswitch agent + QUANTUM_OVS_CONFIG_FILE=$QUANTUM_DIR/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini + sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/ovs_quantum/g" $QUANTUM_OVS_CONFIG_FILE + screen_it q-agt "sleep 4; sudo python $QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py $QUANTUM_OVS_CONFIG_FILE -v" fi - # Start up the quantum <-> openvswitch agent - screen_it q-agt "sleep 4; sudo python $QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py $QUANTUM_DIR/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini -v" fi # If we're using Quantum (i.e. q-svc is enabled), network creation has to From 524aa547e7900ceb3310fad23ecf34fe1ccddef9 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sat, 14 Jan 2012 01:08:34 +0000 Subject: [PATCH 274/967] Make volumes work again with oneiric Change-Id: I70ba7f8b1d5493e795262e433f8c5783e08482bc --- files/apts/n-vol | 3 +-- stack.sh | 12 ++++++++---- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/files/apts/n-vol b/files/apts/n-vol index edaee2c8..5db06eac 100644 --- a/files/apts/n-vol +++ b/files/apts/n-vol @@ -1,3 +1,2 @@ -iscsitarget # NOPRIME -iscsitarget-dkms # NOPRIME +tgt lvm2 diff --git a/stack.sh b/stack.sh index c4655e75..1fc49fac 100755 --- a/stack.sh +++ b/stack.sh @@ -1053,7 +1053,8 @@ if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then # # By default, the backing file is 2G in size, and is stored in /opt/stack. - apt_get install iscsitarget-dkms iscsitarget + # install the package + apt_get install tgt if ! sudo vgs $VOLUME_GROUP; then VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DEST/nova-volumes-backing-file} @@ -1080,9 +1081,10 @@ if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then done fi - # Configure iscsitarget - sudo sed 's/ISCSITARGET_ENABLE=false/ISCSITARGET_ENABLE=true/' -i /etc/default/iscsitarget - sudo /etc/init.d/iscsitarget restart + # tgt in oneiric doesn't restart properly if tgtd isn't running + # do it in two steps + sudo stop tgt || true + sudo start tgt fi function add_nova_flag { @@ -1112,6 +1114,8 @@ fi if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then add_nova_flag "--volume_group=$VOLUME_GROUP" add_nova_flag "--volume_name_template=${VOLUME_NAME_PREFIX}%08x" + # oneiric no longer supports ietadm + add_nova_flag "--iscsi_helper=tgtadm" fi add_nova_flag "--my_ip=$HOST_IP" add_nova_flag "--public_interface=$PUBLIC_INTERFACE" From d38f942bc3819d0c896d313e8fdfef8409b1a27f Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Tue, 17 Jan 2012 15:57:04 -0800 Subject: [PATCH 275/967] Fixes dashboard for new nova api changes * Fixes bug 917457 Change-Id: I55babe184666b9a327086f915a9fa9c79af738ed --- stack.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index 1fc49fac..41cf1fbe 100755 --- a/stack.sh +++ b/stack.sh @@ -1124,8 +1124,8 @@ add_nova_flag "--sql_connection=$BASE_SQL_CONN/nova" add_nova_flag "--libvirt_type=$LIBVIRT_TYPE" add_nova_flag "--instance_name_template=${INSTANCE_NAME_PREFIX}%08x" if [[ "$ENABLED_SERVICES" =~ "openstackx" ]]; then - add_nova_flag "--osapi_extension=nova.api.openstack.v2.contrib.standard_extensions" - add_nova_flag "--osapi_extension=extensions.admin.Admin" + add_nova_flag "--osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions" + add_nova_flag "--osapi_compute_extension=extensions.admin.Admin" fi if [[ "$ENABLED_SERVICES" =~ "n-vnc" ]]; then VNCPROXY_URL=${VNCPROXY_URL:-"http://$SERVICE_HOST:6080"} From 6577b4685aebc9303eafff2b6ac3430c7c759c5c Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 16 Jan 2012 22:27:20 -0600 Subject: [PATCH 276/967] Glance changes to stack.sh for https://review.openstack.org/3092 * Handle before/after upcoming Glance configuration file changes * Add configuration vars for Keystone URIs to configure Glance and other services Change-Id: I465065592ade11fb86c40cd04d29c4ae3f1706d7 --- files/keystone_data.sh | 2 +- stack.sh | 72 ++++++++++++++++++++++++++++++++++-------- 2 files changed, 59 insertions(+), 15 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index a25ba20b..818919cf 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -39,7 +39,7 @@ fi $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne nova http://%SERVICE_HOST%:8774/v1.1/%tenant_id% http://%SERVICE_HOST%:8774/v1.1/%tenant_id% http://%SERVICE_HOST%:8774/v1.1/%tenant_id% 1 1 $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne ec2 http://%SERVICE_HOST%:8773/services/Cloud http://%SERVICE_HOST%:8773/services/Admin http://%SERVICE_HOST%:8773/services/Cloud 1 1 $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne glance http://%SERVICE_HOST%:9292/v1 http://%SERVICE_HOST%:9292/v1 http://%SERVICE_HOST%:9292/v1 1 1 -$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne keystone http://%SERVICE_HOST%:5000/v2.0 http://%SERVICE_HOST%:35357/v2.0 http://%SERVICE_HOST%:5000/v2.0 1 1 +$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne keystone %KEYSTONE_SERVICE_PROTOCOL%://%KEYSTONE_SERVICE_HOST%:%KEYSTONE_SERVICE_PORT%/v2.0 %KEYSTONE_AUTH_PROTOCOL%://%KEYSTONE_AUTH_HOST%:%KEYSTONE_AUTH_PORT%/v2.0 %KEYSTONE_SERVICE_PROTOCOL%://%KEYSTONE_SERVICE_HOST%:%KEYSTONE_SERVICE_PORT%/v2.0 1 1 if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne swift http://%SERVICE_HOST%:8080/v1/AUTH_%tenant_id% http://%SERVICE_HOST%:8080/ http://%SERVICE_HOST%:8080/v1/AUTH_%tenant_id% 1 1 fi diff --git a/stack.sh b/stack.sh index 41cf1fbe..8d65ad8f 100755 --- a/stack.sh +++ b/stack.sh @@ -395,6 +395,14 @@ read_password SERVICE_TOKEN "ENTER A SERVICE_TOKEN TO USE FOR THE SERVICE ADMIN # Horizon currently truncates usernames and passwords at 20 characters read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (20 CHARS OR LESS)." +# Set Keystone interface configuration +KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST} +KEYSTONE_AUTH_PORT=${KEYSTONE_AUTH_PORT:-35357} +KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-http} +KEYSTONE_SERVICE_HOST=${KEYSTONE_SERVICE_HOST:-$SERVICE_HOST} +KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000} +KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-http} + # Log files # --------- @@ -765,19 +773,47 @@ if [[ "$ENABLED_SERVICES" =~ "g-reg" ]]; then mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS glance;' mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE glance;' + function glance_config { + sudo sed -e " + s,%KEYSTONE_AUTH_HOST%,$KEYSTONE_AUTH_HOST,g; + s,%KEYSTONE_AUTH_PORT%,$KEYSTONE_AUTH_PORT,g; + s,%KEYSTONE_AUTH_PROTOCOL%,$KEYSTONE_AUTH_PROTOCOL,g; + s,%KEYSTONE_SERVICE_HOST%,$KEYSTONE_SERVICE_HOST,g; + s,%KEYSTONE_SERVICE_PORT%,$KEYSTONE_SERVICE_PORT,g; + s,%KEYSTONE_SERVICE_PROTOCOL%,$KEYSTONE_SERVICE_PROTOCOL,g; + s,%SQL_CONN%,$BASE_SQL_CONN/glance,g; + s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g; + s,%DEST%,$DEST,g; + s,%SYSLOG%,$SYSLOG,g; + " -i $1 + } + # Copy over our glance configurations and update them - GLANCE_CONF=$GLANCE_DIR/etc/glance-registry.conf - cp $FILES/glance-registry.conf $GLANCE_CONF - sudo sed -e "s,%SQL_CONN%,$BASE_SQL_CONN/glance,g" -i $GLANCE_CONF - sudo sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $GLANCE_CONF - sudo sed -e "s,%DEST%,$DEST,g" -i $GLANCE_CONF - sudo sed -e "s,%SYSLOG%,$SYSLOG,g" -i $GLANCE_CONF + GLANCE_REGISTRY_CONF=$GLANCE_DIR/etc/glance-registry.conf + cp $FILES/glance-registry.conf $GLANCE_REGISTRY_CONF + glance_config $GLANCE_REGISTRY_CONF + + if [[ -e $FILES/glance-registry-paste.ini ]]; then + GLANCE_REGISTRY_PASTE_INI=$GLANCE_DIR/etc/glance-registry-paste.ini + cp $FILES/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI + glance_config $GLANCE_REGISTRY_PASTE_INI + # During the transition for Glance to the split config files + # we cat them together to handle both pre- and post-merge + cat $GLANCE_REGISTRY_PASTE_INI >>$GLANCE_REGISTRY_CONF + fi GLANCE_API_CONF=$GLANCE_DIR/etc/glance-api.conf cp $FILES/glance-api.conf $GLANCE_API_CONF - sudo sed -e "s,%DEST%,$DEST,g" -i $GLANCE_API_CONF - sudo sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $GLANCE_API_CONF - sudo sed -e "s,%SYSLOG%,$SYSLOG,g" -i $GLANCE_API_CONF + glance_config $GLANCE_API_CONF + + if [[ -e $FILES/glance-api-paste.ini ]]; then + GLANCE_API_PASTE_INI=$GLANCE_DIR/etc/glance-api-paste.ini + cp $FILES/glance-api-paste.ini $GLANCE_API_PASTE_INI + glance_config $GLANCE_API_PASTE_INI + # During the transition for Glance to the split config files + # we cat them together to handle both pre- and post-merge + cat $GLANCE_API_PASTE_INI >>$GLANCE_API_CONF + fi fi # Nova @@ -1209,9 +1245,17 @@ if [[ "$ENABLED_SERVICES" =~ "key" ]]; then # keystone_data.sh creates our admin user and our ``SERVICE_TOKEN``. KEYSTONE_DATA=$KEYSTONE_DIR/bin/keystone_data.sh cp $FILES/keystone_data.sh $KEYSTONE_DATA - sudo sed -e "s,%SERVICE_HOST%,$SERVICE_HOST,g" -i $KEYSTONE_DATA - sudo sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $KEYSTONE_DATA - sudo sed -e "s,%ADMIN_PASSWORD%,$ADMIN_PASSWORD,g" -i $KEYSTONE_DATA + sudo sed -e " + s,%KEYSTONE_AUTH_HOST%,$KEYSTONE_AUTH_HOST,g; + s,%KEYSTONE_AUTH_PORT%,$KEYSTONE_AUTH_PORT,g; + s,%KEYSTONE_AUTH_PROTOCOL%,$KEYSTONE_AUTH_PROTOCOL,g; + s,%KEYSTONE_SERVICE_HOST%,$KEYSTONE_SERVICE_HOST,g; + s,%KEYSTONE_SERVICE_PORT%,$KEYSTONE_SERVICE_PORT,g; + s,%KEYSTONE_SERVICE_PROTOCOL%,$KEYSTONE_SERVICE_PROTOCOL,g; + s,%SERVICE_HOST%,$SERVICE_HOST,g; + s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g; + s,%ADMIN_PASSWORD%,$ADMIN_PASSWORD,g; + " -i $KEYSTONE_DATA # initialize keystone with default users/endpoints ENABLED_SERVICES=$ENABLED_SERVICES BIN_DIR=$KEYSTONE_DIR/bin bash $KEYSTONE_DATA @@ -1275,7 +1319,7 @@ fi if [[ "$ENABLED_SERVICES" =~ "key" ]]; then screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d" echo "Waiting for keystone to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget -q -O- http://127.0.0.1:5000; do sleep 1; done"; then + if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget -q -O- $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT; do sleep 1; done"; then echo "keystone did not start" exit 1 fi @@ -1470,7 +1514,7 @@ fi # If keystone is present, you can point nova cli to this server if [[ "$ENABLED_SERVICES" =~ "key" ]]; then - echo "keystone is serving at http://$SERVICE_HOST:5000/v2.0/" + echo "keystone is serving at $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/" echo "examples on using novaclient command line is in exercise.sh" echo "the default users are: admin and demo" echo "the password: $ADMIN_PASSWORD" From 8cafc80551a0c471e0b1ed55fff258daad3302e2 Mon Sep 17 00:00:00 2001 From: Jay Pipes Date: Mon, 16 Jan 2012 22:01:06 -0500 Subject: [PATCH 277/967] Corresponds to the Glance patch that splits paste The patchset https://review.openstack.org/#change,3036 splits paste.deploy application pipeline stuff from regular configuration file options. This is the change to devstack that will be needed to go along with that change in Glance. Change-Id: I606aa81c8ebdf50cdc554611ab3781c554991205 --- files/glance-api-paste.ini | 44 ++++++++++++++++++++++++++++++++ files/glance-api.conf | 45 --------------------------------- files/glance-registry-paste.ini | 29 +++++++++++++++++++++ files/glance-registry.conf | 30 ---------------------- 4 files changed, 73 insertions(+), 75 deletions(-) create mode 100644 files/glance-api-paste.ini create mode 100644 files/glance-registry-paste.ini diff --git a/files/glance-api-paste.ini b/files/glance-api-paste.ini new file mode 100644 index 00000000..b8832ad6 --- /dev/null +++ b/files/glance-api-paste.ini @@ -0,0 +1,44 @@ +[pipeline:glance-api] +#pipeline = versionnegotiation context apiv1app +# NOTE: use the following pipeline for keystone +pipeline = versionnegotiation authtoken auth-context apiv1app + +# To enable Image Cache Management API replace pipeline with below: +# pipeline = versionnegotiation context imagecache apiv1app +# NOTE: use the following pipeline for keystone auth (with caching) +# pipeline = versionnegotiation authtoken auth-context imagecache apiv1app + +[app:apiv1app] +paste.app_factory = glance.common.wsgi:app_factory +glance.app_factory = glance.api.v1.router:API + +[filter:versionnegotiation] +paste.filter_factory = glance.common.wsgi:filter_factory +glance.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter + +[filter:cache] +paste.filter_factory = glance.common.wsgi:filter_factory +glance.filter_factory = glance.api.middleware.cache:CacheFilter + +[filter:cachemanage] +paste.filter_factory = glance.common.wsgi:filter_factory +glance.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter + +[filter:context] +paste.filter_factory = glance.common.wsgi:filter_factory +glance.filter_factory = glance.common.context:ContextMiddleware + +[filter:authtoken] +paste.filter_factory = keystone.middleware.auth_token:filter_factory +service_host = %KEYSTONE_SERVICE_HOST% +service_port = %KEYSTONE_SERVICE_PORT% +service_protocol = %KEYSTONE_SERVICE_PROTOCOL% +auth_host = %KEYSTONE_AUTH_HOST% +auth_port = %KEYSTONE_AUTH_PORT% +auth_protocol = %KEYSTONE_AUTH_PROTOCOL% +auth_uri = %KEYSTONE_SERVICE_PROTOCOL%://%KEYSTONE_SERVICE_HOST%:%KEYSTONE_SERVICE_PORT%/ +admin_token = %SERVICE_TOKEN% + +[filter:auth-context] +paste.filter_factory = glance.common.wsgi:filter_factory +glance.filter_factory = keystone.middleware.glance_auth_token:KeystoneContextMiddleware diff --git a/files/glance-api.conf b/files/glance-api.conf index 6c670b56..b4ba098a 100644 --- a/files/glance-api.conf +++ b/files/glance-api.conf @@ -137,48 +137,3 @@ scrub_time = 43200 # Directory that the scrubber will use to remind itself of what to delete # Make sure this is also set in glance-scrubber.conf scrubber_datadir = /var/lib/glance/scrubber - -[pipeline:glance-api] -#pipeline = versionnegotiation context apiv1app -# NOTE: use the following pipeline for keystone -pipeline = versionnegotiation authtoken auth-context apiv1app - -# To enable Image Cache Management API replace pipeline with below: -# pipeline = versionnegotiation context imagecache apiv1app -# NOTE: use the following pipeline for keystone auth (with caching) -# pipeline = versionnegotiation authtoken auth-context imagecache apiv1app - -[app:apiv1app] -paste.app_factory = glance.common.wsgi:app_factory -glance.app_factory = glance.api.v1.router:API - -[filter:versionnegotiation] -paste.filter_factory = glance.common.wsgi:filter_factory -glance.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter - -[filter:cache] -paste.filter_factory = glance.common.wsgi:filter_factory -glance.filter_factory = glance.api.middleware.cache:CacheFilter - -[filter:cachemanage] -paste.filter_factory = glance.common.wsgi:filter_factory -glance.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter - -[filter:context] -paste.filter_factory = glance.common.wsgi:filter_factory -glance.filter_factory = glance.common.context:ContextMiddleware - -[filter:authtoken] -paste.filter_factory = keystone.middleware.auth_token:filter_factory -service_protocol = http -service_host = 127.0.0.1 -service_port = 5000 -auth_host = 127.0.0.1 -auth_port = 35357 -auth_protocol = http -auth_uri = http://127.0.0.1:5000/ -admin_token = %SERVICE_TOKEN% - -[filter:auth-context] -paste.filter_factory = glance.common.wsgi:filter_factory -glance.filter_factory = keystone.middleware.glance_auth_token:KeystoneContextMiddleware diff --git a/files/glance-registry-paste.ini b/files/glance-registry-paste.ini new file mode 100644 index 00000000..f4130ec9 --- /dev/null +++ b/files/glance-registry-paste.ini @@ -0,0 +1,29 @@ +[pipeline:glance-registry] +#pipeline = context registryapp +# NOTE: use the following pipeline for keystone +pipeline = authtoken auth-context context registryapp + +[app:registryapp] +paste.app_factory = glance.common.wsgi:app_factory +glance.app_factory = glance.registry.api.v1:API + +[filter:context] +context_class = glance.registry.context.RequestContext +paste.filter_factory = glance.common.wsgi:filter_factory +glance.filter_factory = glance.common.context:ContextMiddleware + +[filter:authtoken] +paste.filter_factory = keystone.middleware.auth_token:filter_factory +service_host = %KEYSTONE_SERVICE_HOST% +service_port = %KEYSTONE_SERVICE_PORT% +service_protocol = %KEYSTONE_SERVICE_PROTOCOL% +auth_host = %KEYSTONE_AUTH_HOST% +auth_port = %KEYSTONE_AUTH_PORT% +auth_protocol = %KEYSTONE_AUTH_PROTOCOL% +auth_uri = %KEYSTONE_SERVICE_PROTOCOL%://%KEYSTONE_SERVICE_HOST%:%KEYSTONE_SERVICE_PORT%/ +admin_token = %SERVICE_TOKEN% + +[filter:auth-context] +context_class = glance.registry.context.RequestContext +paste.filter_factory = glance.common.wsgi:filter_factory +glance.filter_factory = keystone.middleware.glance_auth_token:KeystoneContextMiddleware diff --git a/files/glance-registry.conf b/files/glance-registry.conf index e732e869..2c327457 100644 --- a/files/glance-registry.conf +++ b/files/glance-registry.conf @@ -42,33 +42,3 @@ api_limit_max = 1000 # If a `limit` query param is not provided in an api request, it will # default to `limit_param_default` limit_param_default = 25 - -[pipeline:glance-registry] -#pipeline = context registryapp -# NOTE: use the following pipeline for keystone -pipeline = authtoken auth-context context registryapp - -[app:registryapp] -paste.app_factory = glance.common.wsgi:app_factory -glance.app_factory = glance.registry.api.v1:API - -[filter:context] -context_class = glance.registry.context.RequestContext -paste.filter_factory = glance.common.wsgi:filter_factory -glance.filter_factory = glance.common.context:ContextMiddleware - -[filter:authtoken] -paste.filter_factory = keystone.middleware.auth_token:filter_factory -service_protocol = http -service_host = 127.0.0.1 -service_port = 5000 -auth_host = 127.0.0.1 -auth_port = 35357 -auth_protocol = http -auth_uri = http://127.0.0.1:5000/ -admin_token = %SERVICE_TOKEN% - -[filter:auth-context] -context_class = glance.registry.context.RequestContext -paste.filter_factory = glance.common.wsgi:filter_factory -glance.filter_factory = keystone.middleware.glance_auth_token:KeystoneContextMiddleware From c727aa8992f6ab28e643d88e6d6ab8fc304f02b7 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 13 Jan 2012 12:13:59 -0600 Subject: [PATCH 278/967] Add support for proxy servers Set http_proxy and https_proxy as usual outside of devstack, or in localrc. All clients running under sudo need env vars passed explicitly (apt-get, pip, curl). Some tests using wget need proxy turned off since they point to our services (i.e. glance, keystone). Change-Id: Ie87aa2d3502ed5a1312f148db12bb61e5eaf1054 --- stack.sh | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/stack.sh b/stack.sh index 8d65ad8f..3aeef172 100755 --- a/stack.sh +++ b/stack.sh @@ -66,6 +66,12 @@ fi # We try to have sensible defaults, so you should be able to run ``./stack.sh`` # in most cases. # +# We support HTTP and HTTPS proxy servers via the usual environment variables +# http_proxy and https_proxy. They can be set in localrc if necessary or +# on the command line:: +# +# http_proxy=http://proxy.example.com:3128/ ./stack.sh +# # We source our settings from ``stackrc``. This file is distributed with devstack # and contains locations for what repositories to use. If you want to use other # repositories and branches, you can add your own settings with another file called @@ -85,8 +91,9 @@ function apt_get() { [[ "$OFFLINE" = "True" ]] && return local sudo="sudo" [ "$(id -u)" = "0" ] && sudo="env" - $sudo DEBIAN_FRONTEND=noninteractive apt-get \ - --option "Dpkg::Options::=--force-confold" --assume-yes "$@" + $sudo DEBIAN_FRONTEND=noninteractive \ + http_proxy=$http_proxy https_proxy=$https_proxy \ + apt-get --option "Dpkg::Options::=--force-confold" --assume-yes "$@" } # Check to see if we are already running a stack.sh @@ -513,7 +520,10 @@ function get_packages() { function pip_install { [[ "$OFFLINE" = "True" ]] && return - sudo PIP_DOWNLOAD_CACHE=/var/cache/pip pip install --use-mirrors $@ + sudo PIP_DOWNLOAD_CACHE=/var/cache/pip \ + HTTP_PROXY=$http_proxy \ + HTTPS_PROXY=$https_proxy \ + pip install --use-mirrors $@ } # install apt requirements @@ -1007,7 +1017,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then # We need a special version of bin/swift which understand the # OpenStack api 2.0, we download it until this is getting # integrated in swift. - sudo curl -s -o/usr/local/bin/swift \ + sudo https_proxy=$https_proxy curl -s -o/usr/local/bin/swift \ 'https://review.openstack.org/gitweb?p=openstack/swift.git;a=blob_plain;f=bin/swift;hb=48bfda6e2fdf3886c98bd15649887d54b9a2574e' else swift_auth_server=tempauth @@ -1309,7 +1319,7 @@ fi if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then screen_it g-api "cd $GLANCE_DIR; bin/glance-api --config-file=etc/glance-api.conf" echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then + if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then echo "g-api did not start" exit 1 fi @@ -1319,7 +1329,7 @@ fi if [[ "$ENABLED_SERVICES" =~ "key" ]]; then screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d" echo "Waiting for keystone to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget -q -O- $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT; do sleep 1; done"; then + if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT; do sleep 1; done"; then echo "keystone did not start" exit 1 fi @@ -1329,7 +1339,7 @@ fi if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then screen_it n-api "cd $NOVA_DIR && $NOVA_DIR/bin/nova-api" echo "Waiting for nova-api to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget -q -O- http://127.0.0.1:8774; do sleep 1; done"; then + if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:8774; do sleep 1; done"; then echo "nova-api did not start" exit 1 fi From 751c15243fff7a805031397e85d3dec6d3033aa3 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 10 Jan 2012 15:34:34 -0600 Subject: [PATCH 279/967] Increase exercise robustness * increase some timeouts * tolerate existing security groups and rules * add optional DEFAULT_IMAGE_NAME to select the image to boot * fix image lists via glance Change-Id: I31ae743e602f69a2c9f872273273f542fc4afda3 --- exercises/euca.sh | 31 +++++++++++++--- exercises/floating_ips.sh | 75 ++++++++++++++++++++++++--------------- exercises/volumes.sh | 34 ++++++++++-------- openrc | 6 ++-- 4 files changed, 95 insertions(+), 51 deletions(-) diff --git a/exercises/euca.sh b/exercises/euca.sh index 2f7a17b0..e569196a 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -20,6 +20,18 @@ pushd $(cd $(dirname "$0")/.. && pwd) source ./openrc popd +# Max time to wait while vm goes from build to active state +ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30} + +# Max time till the vm is bootable +BOOT_TIMEOUT=${BOOT_TIMEOUT:-30} + +# Max time to wait for proper association and dis-association. +ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-15} + +# Instance type to create +DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} + # Find a machine image to boot IMAGE=`euca-describe-images | grep machine | cut -f2 | head -n1` @@ -27,10 +39,15 @@ IMAGE=`euca-describe-images | grep machine | cut -f2 | head -n1` SECGROUP=euca_secgroup # Add a secgroup -euca-add-group -d description $SECGROUP +if ! euca-describe-group | grep -q $SECGROUP; then + euca-add-group -d "$SECGROUP description" $SECGROUP + if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! euca-describe-group | grep -q $SECGROUP; do sleep 1; done"; then + echo "Security group not created" + exit 1 + fi +fi # Launch it -DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} INSTANCE=`euca-run-instances -g $SECGROUP -t $DEFAULT_INSTANCE_TYPE $IMAGE | grep INSTANCE | cut -f2` # Assure it has booted within a reasonable time @@ -42,15 +59,13 @@ fi # Allocate floating address FLOATING_IP=`euca-allocate-address | cut -f2` -# Release floating address +# Associate floating address euca-associate-address -i $INSTANCE $FLOATING_IP - # Authorize pinging euca-authorize -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP # Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds -ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10} if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then echo "Couldn't ping server with floating ip" exit 1 @@ -65,6 +80,12 @@ euca-delete-group $SECGROUP # Release floating address euca-disassociate-address $FLOATING_IP +# Wait just a tick for everything above to complete so release doesn't fail +if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep $INSTANCE | grep -q $FLOATING_IP; do sleep 1; done"; then + echo "Floating ip $FLOATING_IP not disassociated within $ASSOCIATE_TIMEOUT seconds" + exit 1 +fi + # Release floating address euca-release-address $FLOATING_IP diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 9c207cd8..f7b5240b 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -24,9 +24,28 @@ pushd $(cd $(dirname "$0")/.. && pwd) source ./openrc popd -# Set some defaults +# Max time to wait while vm goes from build to active state +ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30} + +# Max time till the vm is bootable +BOOT_TIMEOUT=${BOOT_TIMEOUT:-30} + +# Max time to wait for proper association and dis-association. +ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-15} + +# Instance type to create +DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} +# Boot this image, use first AMi image if unset +DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} + +# Security group name +SECGROUP=${SECGROUP:-test_secgroup} + +# Default floating IP pool name DEFAULT_FLOATING_POOL=${DEFAULT_FLOATING_POOL:-nova} + +# Additional floating IP pool and range TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test} # Get a token for clients that don't support service catalog @@ -51,28 +70,32 @@ nova list nova image-list # But we recommend using glance directly -glance -A $TOKEN index +glance -f -A $TOKEN index -# Let's grab the id of the first AMI image to launch -IMAGE=`glance -A $TOKEN index | egrep ami | cut -d" " -f1` +# Grab the id of the image to launch +IMAGE=`glance -f -A $TOKEN index | egrep $DEFAULT_IMAGE_NAME | head -1 | cut -d" " -f1` # Security Groups # --------------- -SECGROUP=test_secgroup # List of secgroups: nova secgroup-list # Create a secgroup -nova secgroup-create $SECGROUP "test_secgroup description" +if ! nova secgroup-list | grep -q $SECGROUP; then + nova secgroup-create $SECGROUP "$SECGROUP description" + if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then + echo "Security group not created" + exit 1 + fi +fi -# determine flavor -# ---------------- +# determinine instance type +# ------------------------- -# List of flavors: +# List of instance types: nova flavor-list -DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} INSTANCE_TYPE=`nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | cut -d"|" -f2` if [[ -z "$INSTANCE_TYPE" ]]; then # grab the first flavor in the list to launch if default doesn't exist @@ -81,7 +104,7 @@ fi NAME="myserver" -nova boot --flavor $INSTANCE_TYPE --image $IMAGE $NAME --security_groups=$SECGROUP +VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE $NAME --security_groups=$SECGROUP | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'` # Testing # ======= @@ -93,23 +116,14 @@ nova boot --flavor $INSTANCE_TYPE --image $IMAGE $NAME --security_groups=$SECGRO # Waiting for boot # ---------------- -# Max time to wait while vm goes from build to active state -ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-10} - -# Max time till the vm is bootable -BOOT_TIMEOUT=${BOOT_TIMEOUT:-15} - -# Max time to wait for proper association and dis-association. -ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10} - # check that the status is active within ACTIVE_TIMEOUT seconds -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $NAME | grep status | grep -q ACTIVE; do sleep 1; done"; then +if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then echo "server didn't become active!" exit 1 fi # get the IP of the server -IP=`nova show $NAME | grep "private network" | cut -d"|" -f3` +IP=`nova show $VM_UUID | grep "private network" | cut -d"|" -f3` # for single node deployments, we can ping private ips MULTI_HOST=${MULTI_HOST:-0} @@ -129,8 +143,14 @@ fi # Security Groups & Floating IPs # ------------------------------ -# allow icmp traffic (ping) -nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 +if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then + # allow icmp traffic (ping) + nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 + if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list-rules $SECGROUP | grep -q icmp; do sleep 1; done"; then + echo "Security group rule not created" + exit 1 + fi +fi # List rules for a secgroup nova secgroup-list-rules $SECGROUP @@ -145,7 +165,7 @@ if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep -q $ fi # add floating ip to our server -nova add-floating-ip $NAME $FLOATING_IP +nova add-floating-ip $VM_UUID $FLOATING_IP # test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then @@ -153,7 +173,7 @@ if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sle exit 1 fi -# Allocate an IP from it +# Allocate an IP from second floating pool TEST_FLOATING_IP=`nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | cut -d '|' -f2` # list floating addresses @@ -182,11 +202,10 @@ nova floating-ip-delete $FLOATING_IP nova floating-ip-delete $TEST_FLOATING_IP # shutdown the server -nova delete $NAME +nova delete $VM_UUID # Delete a secgroup nova secgroup-delete $SECGROUP # FIXME: validate shutdown within 5 seconds # (nova show $NAME returns 1 or status != ACTIVE)? - diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 6ea9a516..c2288de2 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -19,6 +19,21 @@ pushd $(cd $(dirname "$0")/.. && pwd) source ./openrc popd +# Max time to wait while vm goes from build to active state +ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30} + +# Max time till the vm is bootable +BOOT_TIMEOUT=${BOOT_TIMEOUT:-30} + +# Max time to wait for proper association and dis-association. +ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-15} + +# Instance type to create +DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} + +# Boot this image, use first AMi image if unset +DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} + # Get a token for clients that don't support service catalog # ========================================================== @@ -41,10 +56,10 @@ nova list nova image-list # But we recommend using glance directly -glance -A $TOKEN index +glance -f -A $TOKEN index -# Let's grab the id of the first AMI image to launch -IMAGE=`glance -A $TOKEN index | egrep ami | head -1 | cut -d" " -f1` +# Grab the id of the image to launch +IMAGE=`glance -f -A $TOKEN index | egrep $DEFAULT_IMAGE_NAME | head -1 | cut -d" " -f1` # determinine instance type # ------------------------- @@ -52,7 +67,6 @@ IMAGE=`glance -A $TOKEN index | egrep ami | head -1 | cut -d" " -f1` # List of instance types: nova flavor-list -DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} INSTANCE_TYPE=`nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | cut -d"|" -f2` if [[ -z "$INSTANCE_TYPE" ]]; then # grab the first flavor in the list to launch if default doesn't exist @@ -73,24 +87,14 @@ VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE $NAME --security_group # Waiting for boot # ---------------- -# Max time to wait while vm goes from build to active state -ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30} - -# Max time till the vm is bootable -BOOT_TIMEOUT=${BOOT_TIMEOUT:-15} - -# Max time to wait for proper association and dis-association. -ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10} - # check that the status is active within ACTIVE_TIMEOUT seconds -if ! timeout $BOOT_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then +if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then echo "server didn't become active!" exit 1 fi # get the IP of the server IP=`nova show $VM_UUID | grep "private network" | cut -d"|" -f3` -#VM_UUID=`nova list | grep $NAME | head -1 | cut -d'|' -f2 | sed 's/ //g'` # for single node deployments, we can ping private ips MULTI_HOST=${MULTI_HOST:-0} diff --git a/openrc b/openrc index 43959755..0f327d26 100644 --- a/openrc +++ b/openrc @@ -53,13 +53,13 @@ export EC2_SECRET_KEY=${ADMIN_PASSWORD:-secrete} # export NOVACLIENT_DEBUG=1 # Max time till the vm is bootable -export BOOT_TIMEOUT=${BOOT_TIMEOUT:-15} +export BOOT_TIMEOUT=${BOOT_TIMEOUT:-30} # Max time to wait while vm goes from build to active state -export ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-10} +export ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30} # Max time from run instance command until it is running export RUNNING_TIMEOUT=${RUNNING_TIMEOUT:-$(($BOOT_TIMEOUT + $ACTIVE_TIMEOUT))} # Max time to wait for proper IP association and dis-association. -export ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10} +export ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-15} From ce116914884694a04a8692b350df456ba582fe47 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Tue, 17 Jan 2012 15:46:53 -0800 Subject: [PATCH 280/967] Add nova's new vnc support to devstack * Fixes 917933 * Use cloudbuilders/master for noVNC * s/n-nvnc/n-novnc/g Change-Id: I45361489a584325ed6a7fbcf3026e8ff0b25e667 --- files/apts/{n-vnc => n-novnc} | 0 stack.sh | 36 +++++++++++++++++++++++++--------- tools/xen/templates/ova.xml.in | 2 +- 3 files changed, 28 insertions(+), 10 deletions(-) rename files/apts/{n-vnc => n-novnc} (100%) diff --git a/files/apts/n-vnc b/files/apts/n-novnc similarity index 100% rename from files/apts/n-vnc rename to files/apts/n-novnc diff --git a/stack.sh b/stack.sh index 8d65ad8f..758cc21f 100755 --- a/stack.sh +++ b/stack.sh @@ -186,7 +186,7 @@ Q_PORT=${Q_PORT:-9696} Q_HOST=${Q_HOST:-localhost} # Specify which services to launch. These generally correspond to screen tabs -ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-vnc,horizon,mysql,rabbit,openstackx} +ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-novnc,n-xvnc,n-cauth,horizon,mysql,rabbit,openstackx} # Name of the lvm volume group to use/create for iscsi volumes VOLUME_GROUP=${VOLUME_GROUP:-nova-volumes} @@ -589,7 +589,7 @@ if [[ "$ENABLED_SERVICES" =~ "g-api" || # image catalog service git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH fi -if [[ "$ENABLED_SERVICES" =~ "n-vnc" ]]; then +if [[ "$ENABLED_SERVICES" =~ "n-novnc" ]]; then # a websockets/html5 or flash powered VNC console for vm instances git_clone $NOVNC_REPO $NOVNC_DIR $NOVNC_BRANCH fi @@ -1163,11 +1163,20 @@ if [[ "$ENABLED_SERVICES" =~ "openstackx" ]]; then add_nova_flag "--osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions" add_nova_flag "--osapi_compute_extension=extensions.admin.Admin" fi -if [[ "$ENABLED_SERVICES" =~ "n-vnc" ]]; then - VNCPROXY_URL=${VNCPROXY_URL:-"http://$SERVICE_HOST:6080"} - add_nova_flag "--vncproxy_url=$VNCPROXY_URL" - add_nova_flag "--vncproxy_wwwroot=$NOVNC_DIR/" +if [[ "$ENABLED_SERVICES" =~ "n-novnc" ]]; then + NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"} + add_nova_flag "--novncproxy_base_url=$NOVNCPROXY_URL" fi +if [[ "$ENABLED_SERVICES" =~ "n-xvnc" ]]; then + XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"} + add_nova_flag "--xvpvncproxy_base_url=$XVPVNCPROXY_URL" +fi +if [ "$VIRT_DRIVER" = 'xenserver' ]; then + VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1} +else + VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1} +fi +add_nova_flag "--vncserver_proxyclient_address=$VNCSERVER_PROXYCLIENT_ADDRESS" add_nova_flag "--api_paste_config=$NOVA_DIR/bin/nova-api-paste.ini" add_nova_flag "--image_service=nova.image.glance.GlanceImageService" add_nova_flag "--ec2_dmz_host=$EC2_DMZ_HOST" @@ -1205,6 +1214,9 @@ if [ "$VIRT_DRIVER" = 'xenserver' ]; then add_nova_flag "--flat_interface=eth1" add_nova_flag "--flat_network_bridge=xapi1" add_nova_flag "--public_interface=eth3" + # Need to avoid crash due to new firewall support + XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"} + add_nova_flag "--firewall_driver=$XEN_FIREWALL_DRIVER" else add_nova_flag "--flat_network_bridge=$FLAT_NETWORK_BRIDGE" if [ -n "$FLAT_INTERFACE" ]; then @@ -1288,7 +1300,7 @@ function screen_it { # sleep to allow bash to be ready to be send the command - we are # creating a new window in screen and then sends characters, so if # bash isn't running by the time we send the command, nothing happens - sleep 1 + sleep 1.5 screen -S stack -p $1 -X stuff "$2$NL" fi fi @@ -1398,8 +1410,14 @@ screen_it n-cpu "cd $NOVA_DIR && sg libvirtd $NOVA_DIR/bin/nova-compute" screen_it n-vol "cd $NOVA_DIR && $NOVA_DIR/bin/nova-volume" screen_it n-net "cd $NOVA_DIR && $NOVA_DIR/bin/nova-network" screen_it n-sch "cd $NOVA_DIR && $NOVA_DIR/bin/nova-scheduler" -if [[ "$ENABLED_SERVICES" =~ "n-vnc" ]]; then - screen_it n-vnc "cd $NOVNC_DIR && ./utils/nova-wsproxy.py --flagfile $NOVA_DIR/bin/nova.conf --web . 6080" +if [[ "$ENABLED_SERVICES" =~ "n-novnc" ]]; then + screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --flagfile $NOVA_DIR/bin/nova.conf --web ." +fi +if [[ "$ENABLED_SERVICES" =~ "n-xvnc" ]]; then + screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --flagfile $NOVA_DIR/bin/nova.conf" +fi +if [[ "$ENABLED_SERVICES" =~ "n-cauth" ]]; then + screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth" fi if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/apache2/error.log" diff --git a/tools/xen/templates/ova.xml.in b/tools/xen/templates/ova.xml.in index 8443dcb8..01041e20 100644 --- a/tools/xen/templates/ova.xml.in +++ b/tools/xen/templates/ova.xml.in @@ -5,7 +5,7 @@ @PRODUCT_BRAND@ @PRODUCT_VERSION@-@BUILD_NUMBER@ - + From af0f7cadb93c484207a707d4108369f9e3262f08 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 20 Jan 2012 12:10:39 -0800 Subject: [PATCH 281/967] Downcase Admin user role to support nova's new policy engine * Fixes bug 919373 Change-Id: If786f59995327f15fe72420950c7109e2eb0a307 --- files/keystone.conf | 2 +- files/keystone_data.sh | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/files/keystone.conf b/files/keystone.conf index a646513b..6d0fd7e1 100644 --- a/files/keystone.conf +++ b/files/keystone.conf @@ -64,7 +64,7 @@ ca_certs = /etc/keystone/ssl/certs/ca.pem cert_required = True #Role that allows to perform admin operations. -keystone-admin-role = Admin +keystone-admin-role = admin #Role that allows to perform service admin operations. keystone-service-admin-role = KeystoneServiceAdmin diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 818919cf..77f6b933 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -10,19 +10,19 @@ $BIN_DIR/keystone-manage user add admin %ADMIN_PASSWORD% $BIN_DIR/keystone-manage user add demo %ADMIN_PASSWORD% # Roles -$BIN_DIR/keystone-manage role add Admin +$BIN_DIR/keystone-manage role add admin $BIN_DIR/keystone-manage role add Member $BIN_DIR/keystone-manage role add KeystoneAdmin $BIN_DIR/keystone-manage role add KeystoneServiceAdmin $BIN_DIR/keystone-manage role add sysadmin $BIN_DIR/keystone-manage role add netadmin -$BIN_DIR/keystone-manage role grant Admin admin admin +$BIN_DIR/keystone-manage role grant admin admin admin $BIN_DIR/keystone-manage role grant Member demo demo $BIN_DIR/keystone-manage role grant sysadmin demo demo $BIN_DIR/keystone-manage role grant netadmin demo demo $BIN_DIR/keystone-manage role grant Member demo invisible_to_admin -$BIN_DIR/keystone-manage role grant Admin admin demo -$BIN_DIR/keystone-manage role grant Admin admin +$BIN_DIR/keystone-manage role grant admin admin demo +$BIN_DIR/keystone-manage role grant admin admin $BIN_DIR/keystone-manage role grant KeystoneAdmin admin $BIN_DIR/keystone-manage role grant KeystoneServiceAdmin admin From edef244e4019696c2e31474eb4ba4ba23b712709 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 20 Jan 2012 12:45:32 -0800 Subject: [PATCH 282/967] Fix test that checks whether insances from previous runs need to be removed * Fixes bug 919389 Change-Id: I11e0c46426e627448562802e56fca186a5a365ef --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index cf121af0..5e05956f 100755 --- a/stack.sh +++ b/stack.sh @@ -930,7 +930,7 @@ if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then # Destroy old instances instances=`virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"` - if [ ! $instances = "" ]; then + if [ ! "$instances" = "" ]; then echo $instances | xargs -n1 virsh destroy || true echo $instances | xargs -n1 virsh undefine || true fi From 94c67aafa7f14f33a09f753c3f36c09430f9c268 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Sun, 22 Jan 2012 11:56:47 -0800 Subject: [PATCH 283/967] Remove openstack.compute dep for horizon * Fixes bug 920142 Change-Id: I4c522dfdca2ddb0b46d0377d60485976ef61f916 --- files/pips/horizon | 2 -- 1 file changed, 2 deletions(-) diff --git a/files/pips/horizon b/files/pips/horizon index 893efb77..44bf6db3 100644 --- a/files/pips/horizon +++ b/files/pips/horizon @@ -1,4 +1,2 @@ django-nose-selenium pycrypto==2.3 - --e git+https://github.com/jacobian/openstack.compute.git#egg=openstack From de37dbd14f452cefdca1d126000c2f98463dd15c Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Mon, 23 Jan 2012 01:56:22 -0800 Subject: [PATCH 284/967] bug 920364: clean-up ovs_quantum DB between runs. Change-Id: Ibf2417c2ceade2ae04d1f08da0f61fbb1bd7dc5a --- stack.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/stack.sh b/stack.sh index cf121af0..c6d78334 100755 --- a/stack.sh +++ b/stack.sh @@ -1365,6 +1365,7 @@ if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then apt_get install openvswitch-switch openvswitch-datapath-dkms # Create database for the plugin/agent if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then + mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS ovs_quantum;' mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE IF NOT EXISTS ovs_quantum;' else echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin." From 5d1b965fe6dcfd59c46ba5cb891101f988a577cd Mon Sep 17 00:00:00 2001 From: "Kevin L. Mitchell" Date: Mon, 23 Jan 2012 14:41:50 -0600 Subject: [PATCH 285/967] Update pipelines The change https://review.openstack.org/#change,3190 updates the pipelines in api-paste.ini to remove deprecated middleware, but devstack was still using the old pipelines and thus barfed. This updates the pipelines in devstack. Change-Id: I3088d3cd6c9e1bbf5762e74cbc8eaf8d06c44405 --- stack.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index cf121af0..99a08287 100755 --- a/stack.sh +++ b/stack.sh @@ -845,9 +845,9 @@ if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then } replace_pipeline "ec2cloud" "ec2faultwrap logrequest totoken authtoken keystonecontext cloudrequest authorizer ec2executor" replace_pipeline "ec2admin" "ec2faultwrap logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor" - replace_pipeline "openstack_api_v2" "faultwrap authtoken keystonecontext ratelimit serialize extensions osapi_app_v2" - replace_pipeline "openstack_compute_api_v2" "faultwrap authtoken keystonecontext ratelimit serialize compute_extensions osapi_compute_app_v2" - replace_pipeline "openstack_volume_api_v1" "faultwrap authtoken keystonecontext ratelimit serialize volume_extensions osapi_volume_app_v1" + replace_pipeline "openstack_api_v2" "faultwrap authtoken keystonecontext ratelimit osapi_app_v2" + replace_pipeline "openstack_compute_api_v2" "faultwrap authtoken keystonecontext ratelimit osapi_compute_app_v2" + replace_pipeline "openstack_volume_api_v1" "faultwrap authtoken keystonecontext ratelimit osapi_volume_app_v1" fi # Helper to clean iptables rules From 965716e16841b28e34ef296ab861d9e31e9d0cdf Mon Sep 17 00:00:00 2001 From: Johannes Erdfelt Date: Mon, 23 Jan 2012 14:45:21 -0800 Subject: [PATCH 286/967] Explicitly set connection_type to libvirt A future merge to nova (http://review.openstack.org/2918) will require this to be set Change-Id: I92c28336a6022c325a84d6594764779d8fe5e655 --- AUTHORS | 1 + stack.sh | 1 + 2 files changed, 2 insertions(+) diff --git a/AUTHORS b/AUTHORS index 84a565ed..34e04742 100644 --- a/AUTHORS +++ b/AUTHORS @@ -10,6 +10,7 @@ James E. Blair Jason Cannavale Jay Pipes Jesse Andrews +Johannes Erdfelt Justin Shepherd Kiall Mac Innes Scott Moser diff --git a/stack.sh b/stack.sh index 99a08287..307be944 100755 --- a/stack.sh +++ b/stack.sh @@ -1228,6 +1228,7 @@ if [ "$VIRT_DRIVER" = 'xenserver' ]; then XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"} add_nova_flag "--firewall_driver=$XEN_FIREWALL_DRIVER" else + add_nova_flag "--connection_type=libvirt" add_nova_flag "--flat_network_bridge=$FLAT_NETWORK_BRIDGE" if [ -n "$FLAT_INTERFACE" ]; then add_nova_flag "--flat_interface=$FLAT_INTERFACE" From cb5dd679190e8a637f0d5131e0641a1b78fa6dfd Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 23 Jan 2012 17:36:42 -0800 Subject: [PATCH 287/967] Removes the additions to api-paste.conf * Keystone middleware moved back into nova in b160d731 * Removaes old pipeline replace * Adds validator in to ec2 pipeline Change-Id: Idf7a274e7961bf18b9dd7533f7a0f0a606a291d8 --- files/nova-api-paste.ini | 20 -------------------- stack.sh | 5 ++--- 2 files changed, 2 insertions(+), 23 deletions(-) delete mode 100644 files/nova-api-paste.ini diff --git a/files/nova-api-paste.ini b/files/nova-api-paste.ini deleted file mode 100644 index 76c8aae3..00000000 --- a/files/nova-api-paste.ini +++ /dev/null @@ -1,20 +0,0 @@ -########## -# Extras # -########## - -[filter:keystonecontext] -paste.filter_factory = keystone.middleware.nova_keystone_context:NovaKeystoneContext.factory - -[filter:totoken] -paste.filter_factory = keystone.middleware.ec2_token:EC2Token.factory - -[filter:authtoken] -paste.filter_factory = keystone.middleware.auth_token:filter_factory -service_protocol = http -service_host = 127.0.0.1 -service_port = 5000 -auth_host = 127.0.0.1 -auth_port = 35357 -auth_protocol = http -auth_uri = http://127.0.0.1:5000/ -admin_token = %SERVICE_TOKEN% diff --git a/stack.sh b/stack.sh index 307be944..608e7400 100755 --- a/stack.sh +++ b/stack.sh @@ -834,7 +834,7 @@ if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then # the configuration required for nova to validate keystone tokens. # First we add a some extra data to the default paste config from nova - cat $NOVA_DIR/etc/nova/api-paste.ini $FILES/nova-api-paste.ini > $NOVA_DIR/bin/nova-api-paste.ini + cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_DIR/bin/nova-api-paste.ini # Then we add our own service token to the configuration sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini @@ -843,9 +843,8 @@ if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then function replace_pipeline() { sed "/\[pipeline:$1\]/,/\[/s/^pipeline = .*/pipeline = $2/" -i $NOVA_DIR/bin/nova-api-paste.ini } - replace_pipeline "ec2cloud" "ec2faultwrap logrequest totoken authtoken keystonecontext cloudrequest authorizer ec2executor" + replace_pipeline "ec2cloud" "ec2faultwrap logrequest totoken authtoken keystonecontext cloudrequest authorizer validator ec2executor" replace_pipeline "ec2admin" "ec2faultwrap logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor" - replace_pipeline "openstack_api_v2" "faultwrap authtoken keystonecontext ratelimit osapi_app_v2" replace_pipeline "openstack_compute_api_v2" "faultwrap authtoken keystonecontext ratelimit osapi_compute_app_v2" replace_pipeline "openstack_volume_api_v1" "faultwrap authtoken keystonecontext ratelimit osapi_volume_app_v1" fi From 9bb84f0a706ddf18a550c75ecaf18e25a087a273 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 24 Jan 2012 11:45:52 -0600 Subject: [PATCH 288/967] Allow configuration of Apache user and group * Fixes bug 915064 * APACHE_USER defaults to $USER * APACHE_GROUP defaults to $APACHE_USER Change-Id: I06cf39cfd884d8f858ab98b84b3f40c3f6ff3a40 --- files/000-default.template | 4 ++-- stack.sh | 15 +++++++++++++-- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/files/000-default.template b/files/000-default.template index 1d7380d9..d97f3659 100644 --- a/files/000-default.template +++ b/files/000-default.template @@ -1,8 +1,8 @@ WSGIScriptAlias / %HORIZON_DIR%/openstack-dashboard/dashboard/wsgi/django.wsgi - WSGIDaemonProcess horizon user=%USER% group=%USER% processes=3 threads=10 + WSGIDaemonProcess horizon user=%USER% group=%GROUP% processes=3 threads=10 SetEnv APACHE_RUN_USER %USER% - SetEnv APACHE_RUN_GROUP %USER% + SetEnv APACHE_RUN_GROUP %GROUP% WSGIProcessGroup horizon DocumentRoot %HORIZON_DIR%/.blackhole/ diff --git a/stack.sh b/stack.sh index 307be944..a35cbba6 100755 --- a/stack.sh +++ b/stack.sh @@ -410,6 +410,14 @@ KEYSTONE_SERVICE_HOST=${KEYSTONE_SERVICE_HOST:-$SERVICE_HOST} KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000} KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-http} +# Horizon +# ------- + +# Allow overriding the default Apache user and group, default both to +# current user. +APACHE_USER=${APACHE_USER:-$USER} +APACHE_GROUP=${APACHE_GROUP:-$APACHE_USER} + # Log files # --------- @@ -762,8 +770,11 @@ if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then ## Configure apache's 000-default to run horizon sudo cp $FILES/000-default.template /etc/apache2/sites-enabled/000-default - sudo sed -e "s,%USER%,$USER,g" -i /etc/apache2/sites-enabled/000-default - sudo sed -e "s,%HORIZON_DIR%,$HORIZON_DIR,g" -i /etc/apache2/sites-enabled/000-default + sudo sed -e " + s,%USER%,$APACHE_USER,g; + s,%GROUP%,$APACHE_GROUP,g; + s,%HORIZON_DIR%,$HORIZON_DIR,g; + " -i /etc/apache2/sites-enabled/000-default sudo service apache2 restart fi From 75bbd75d9189a380b254ffd2f89cbf5108e2efb0 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 19 Jan 2012 23:28:46 +0000 Subject: [PATCH 289/967] Allow euca-upload-bundle to work with devstack * modifies stack.sh to run nova-cert and nova-objectstore * adds exercises/bundle.sh to test for bundling * requires https://review.openstack.org/#change,3200 Change-Id: I850891948fbdfdf5890225f94df755e5dbc733d0 --- exercises/bundle.sh | 48 +++++++++++++++++++++++++++++++++++++++++++++ openrc | 14 +++++++++++++ stack.sh | 7 ++++++- 3 files changed, 68 insertions(+), 1 deletion(-) create mode 100755 exercises/bundle.sh diff --git a/exercises/bundle.sh b/exercises/bundle.sh new file mode 100755 index 00000000..ca35c9af --- /dev/null +++ b/exercises/bundle.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash + +# we will use the ``euca2ools`` cli tool that wraps the python boto +# library to test ec2 compatibility +# + +# This script exits on an error so that errors don't compound and you see +# only the first error that occured. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + +# Settings +# ======== + +# Use openrc + stackrc + localrc for settings +pushd $(cd $(dirname "$0")/.. && pwd) +source ./openrc + +# Remove old certificates +rm -f cacert.pem +rm -f cert.pem +rm -f pk.pem + +# Get Certificates +nova x509-get-root-cert +nova x509-create-cert +popd + +# Max time to wait for image to be registered +REGISTER_TIMEOUT=${REGISTER_TIMEOUT:-15} + +BUCKET=testbucket +IMAGE=bundle.img +truncate -s 5M /tmp/$IMAGE +euca-bundle-image -i /tmp/$IMAGE + + +euca-upload-bundle -b $BUCKET -m /tmp/$IMAGE.manifest.xml +AMI=`euca-register $BUCKET/$IMAGE.manifest.xml | cut -f2` + +# Wait for the image to become available +if ! timeout $REGISTER_TIMEOUT sh -c "while euca-describe-images | grep '$AMI' | grep 'available'; do sleep 1; done"; then + echo "Image $AMI not available within $REGISTER_TIMEOUT seconds" + exit 1 +fi diff --git a/openrc b/openrc index 0f327d26..9c941413 100644 --- a/openrc +++ b/openrc @@ -49,6 +49,20 @@ export EC2_ACCESS_KEY=${USERNAME:-demo} # Secret key is set in the initial keystone data to the admin password export EC2_SECRET_KEY=${ADMIN_PASSWORD:-secrete} +# Euca2ools Certificate stuff for uploading bundles +# You can get your certs using ./tools/get_certs.sh +NOVARC=$(readlink -f "${BASH_SOURCE:-${0}}" 2>/dev/null) || + NOVARC=$(python -c 'import os,sys; print os.path.abspath(os.path.realpath(sys.argv[1]))' "${BASH_SOURCE:-${0}}") +NOVA_KEY_DIR=${NOVARC%/*} +export S3_URL=http://$SERVICE_HOST:3333 +export EC2_USER_ID=42 # nova does not use user id, but bundling requires it +export EC2_PRIVATE_KEY=${NOVA_KEY_DIR}/pk.pem +export EC2_CERT=${NOVA_KEY_DIR}/cert.pem +export NOVA_CERT=${NOVA_KEY_DIR}/cacert.pem +export EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this set +alias ec2-bundle-image="ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_PRIVATE_KEY} --user 42 --ec2cert ${NOVA_CERT}" +alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}" + # set log level to DEBUG (helps debug issues) # export NOVACLIENT_DEBUG=1 diff --git a/stack.sh b/stack.sh index d6a78696..32813a9b 100755 --- a/stack.sh +++ b/stack.sh @@ -193,7 +193,7 @@ Q_PORT=${Q_PORT:-9696} Q_HOST=${Q_HOST:-localhost} # Specify which services to launch. These generally correspond to screen tabs -ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-novnc,n-xvnc,n-cauth,horizon,mysql,rabbit,openstackx} +ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-sch,n-novnc,n-xvnc,n-cauth,horizon,mysql,rabbit,openstackx} # Name of the lvm volume group to use/create for iscsi volumes VOLUME_GROUP=${VOLUME_GROUP:-nova-volumes} @@ -1143,6 +1143,9 @@ add_nova_flag "--allow_admin_api" add_nova_flag "--scheduler_driver=$SCHEDULER" add_nova_flag "--dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf" add_nova_flag "--fixed_range=$FIXED_RANGE" +if [[ "$ENABLED_SERVICES" =~ "n-obj" ]]; then + add_nova_flag "--s3_host=$SERVICE_HOST" +fi if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then add_nova_flag "--network_manager=nova.network.quantum.manager.QuantumManager" add_nova_flag "--quantum_connection_host=$Q_HOST" @@ -1418,6 +1421,8 @@ fi # within the context of our original shell (so our groups won't be updated). # Use 'sg' to execute nova-compute as a member of the libvirtd group. screen_it n-cpu "cd $NOVA_DIR && sg libvirtd $NOVA_DIR/bin/nova-compute" +screen_it n-crt "cd $NOVA_DIR && $NOVA_DIR/bin/nova-cert" +screen_it n-obj "cd $NOVA_DIR && $NOVA_DIR/bin/nova-objectstore" screen_it n-vol "cd $NOVA_DIR && $NOVA_DIR/bin/nova-volume" screen_it n-net "cd $NOVA_DIR && $NOVA_DIR/bin/nova-network" screen_it n-sch "cd $NOVA_DIR && $NOVA_DIR/bin/nova-scheduler" From 5a09c9220b7be5101c123487fb9330a616f25fae Mon Sep 17 00:00:00 2001 From: Dave Lapsley Date: Wed, 25 Jan 2012 17:22:15 -0500 Subject: [PATCH 290/967] Fix bug 921845 Update stack.sh and stackrc to support new Quantum server/client repo split. Change-Id: Iea8df5270f765a635a444c0e39a22809473a605f --- stack.sh | 13 ++++++------- stackrc | 4 ++++ 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/stack.sh b/stack.sh index f2ca9d51..aafeaacd 100755 --- a/stack.sh +++ b/stack.sh @@ -184,6 +184,7 @@ NOVNC_DIR=$DEST/noVNC SWIFT_DIR=$DEST/swift SWIFT_KEYSTONE_DIR=$DEST/swift-keystone2 QUANTUM_DIR=$DEST/quantum +QUANTUM_CLIENT_DIR=$DEST/python-quantumclient # Default Quantum Plugin Q_PLUGIN=${Q_PLUGIN:-openvswitch} @@ -624,6 +625,7 @@ fi if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then # quantum git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH + git_clone $QUANTUM_CLIENT_REPO $QUANTUM_CLIENT_DIR $QUANTUM_CLIENT_BRANCH fi # Initialization @@ -743,12 +745,9 @@ if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then # Install apache2, which is NOPRIME'd apt_get install apache2 libapache2-mod-wsgi - # Horizon currently imports quantum even if you aren't using it. Instead - # of installing quantum we can create a simple module that will pass the - # initial imports - mkdir -p $HORIZON_DIR/openstack-dashboard/quantum || true - touch $HORIZON_DIR/openstack-dashboard/quantum/__init__.py - touch $HORIZON_DIR/openstack-dashboard/quantum/client.py + # Link to quantum client directory. + rm -fr ${HORIZON_DIR}/openstack-dashboard/quantum + ln -s ${QUANTUM_CLIENT_DIR}/quantum ${HORIZON_DIR}/openstack-dashboard/quantum # ``local_settings.py`` is used to override horizon default settings. @@ -1386,7 +1385,7 @@ if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then # Make sure we're using the openvswitch plugin sed -i -e "s/^provider =.*$/provider = quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin/g" $QUANTUM_PLUGIN_INI_FILE fi - screen_it q-svc "cd $QUANTUM_DIR && PYTHONPATH=.:$PYTHONPATH python $QUANTUM_DIR/bin/quantum-server $QUANTUM_DIR/etc/quantum.conf" + screen_it q-svc "cd $QUANTUM_DIR && PYTHONPATH=.:$QUANTUM_CLIENT_DIR:$PYTHONPATH python $QUANTUM_DIR/bin/quantum-server $QUANTUM_DIR/etc/quantum.conf" fi # Quantum agent (for compute nodes) diff --git a/stackrc b/stackrc index 9bc3be6f..f782ff3c 100644 --- a/stackrc +++ b/stackrc @@ -43,6 +43,10 @@ OPENSTACKX_BRANCH=master QUANTUM_REPO=https://github.com/openstack/quantum QUANTUM_BRANCH=master +# quantum client +QUANTUM_CLIENT_REPO=https://github.com/openstack/python-quantumclient +QUANTUM_CLIENT_BRANCH=master + # CI test suite CITEST_REPO=https://github.com/openstack/tempest.git CITEST_BRANCH=master From 7d9543075ac9533749e53e808bc3ad179a74b2b6 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 25 Jan 2012 16:35:57 -0600 Subject: [PATCH 291/967] Keystone updates * use master branch (was stable/diablo) * add database sync Change-Id: Ie3bd383e2fca14ec37379bd21d93d92d62451f3b --- stack.sh | 4 ++++ stackrc | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index f2ca9d51..21195fc2 100755 --- a/stack.sh +++ b/stack.sh @@ -1289,6 +1289,10 @@ if [[ "$ENABLED_SERVICES" =~ "key" ]]; then s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g; s,%ADMIN_PASSWORD%,$ADMIN_PASSWORD,g; " -i $KEYSTONE_DATA + + # Prepare up the database + $KEYSTONE_DIR/bin/keystone-manage sync_database + # initialize keystone with default users/endpoints ENABLED_SERVICES=$ENABLED_SERVICES BIN_DIR=$KEYSTONE_DIR/bin bash $KEYSTONE_DATA diff --git a/stackrc b/stackrc index 9bc3be6f..6a844698 100644 --- a/stackrc +++ b/stackrc @@ -16,7 +16,7 @@ GLANCE_BRANCH=master # unified auth system (manages accounts/tokens) KEYSTONE_REPO=https://github.com/openstack/keystone.git -KEYSTONE_BRANCH=stable/diablo +KEYSTONE_BRANCH=master # a websockets/html5 or flash powered VNC console for vm instances NOVNC_REPO=https://github.com/cloudbuilders/noVNC.git From d4f44b49d944ae0498108f33c5ff2a9e9ad24376 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 26 Jan 2012 11:09:25 -0600 Subject: [PATCH 292/967] Add python-dateutil for nova api Change-Id: I6926fec02e432cbbe49312aa51b0a931d8a091f3 --- files/apts/n-api | 1 + 1 file changed, 1 insertion(+) create mode 100644 files/apts/n-api diff --git a/files/apts/n-api b/files/apts/n-api new file mode 100644 index 00000000..0f08daac --- /dev/null +++ b/files/apts/n-api @@ -0,0 +1 @@ +python-dateutil From 50fc5c64ec496cba6dfa494b9cbc1faae95d0775 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 26 Jan 2012 09:38:33 -0800 Subject: [PATCH 293/967] Make --vnc_listen configurable Change-Id: I3499306f5fd56f602657794632dddb0bb11f2958 --- stack.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/stack.sh b/stack.sh index 776279a9..1dadb235 100755 --- a/stack.sh +++ b/stack.sh @@ -1198,6 +1198,10 @@ if [ "$VIRT_DRIVER" = 'xenserver' ]; then else VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1} fi +# Address on which instance vncservers will listen on compute hosts. +# For multi-host, this should be the management ip of the compute host. +VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1} +add_nova_flag "--vncserver_listen=$VNCSERVER_LISTEN" add_nova_flag "--vncserver_proxyclient_address=$VNCSERVER_PROXYCLIENT_ADDRESS" add_nova_flag "--api_paste_config=$NOVA_DIR/bin/nova-api-paste.ini" add_nova_flag "--image_service=nova.image.glance.GlanceImageService" From fc3c62d17cff9e5b7b54d60f6dd135bde8e8bd91 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 26 Jan 2012 12:29:51 -0800 Subject: [PATCH 294/967] Remove openstackx from devstack * Fixes bug 917457 Change-Id: I590eb54195b2783adb78f64486c300f6db650ac2 --- stack.sh | 16 ++-------------- stackrc | 5 ----- 2 files changed, 2 insertions(+), 19 deletions(-) diff --git a/stack.sh b/stack.sh index 1dadb235..84bf9b57 100755 --- a/stack.sh +++ b/stack.sh @@ -179,7 +179,6 @@ GLANCE_DIR=$DEST/glance KEYSTONE_DIR=$DEST/keystone NOVACLIENT_DIR=$DEST/python-novaclient KEYSTONECLIENT_DIR=$DEST/python-keystoneclient -OPENSTACKX_DIR=$DEST/openstackx NOVNC_DIR=$DEST/noVNC SWIFT_DIR=$DEST/swift SWIFT_KEYSTONE_DIR=$DEST/swift-keystone2 @@ -194,7 +193,7 @@ Q_PORT=${Q_PORT:-9696} Q_HOST=${Q_HOST:-localhost} # Specify which services to launch. These generally correspond to screen tabs -ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-sch,n-novnc,n-xvnc,n-cauth,horizon,mysql,rabbit,openstackx} +ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-sch,n-novnc,n-xvnc,n-cauth,horizon,mysql,rabbit} # Name of the lvm volume group to use/create for iscsi volumes VOLUME_GROUP=${VOLUME_GROUP:-nova-volumes} @@ -617,11 +616,6 @@ if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH $HORIZON_TAG git_clone $KEYSTONECLIENT_REPO $KEYSTONECLIENT_DIR $KEYSTONECLIENT_BRANCH fi -if [[ "$ENABLED_SERVICES" =~ "openstackx" ]]; then - # openstackx is a collection of extensions to openstack.compute & nova - # that is *deprecated*. The code is being moved into python-novaclient & nova. - git_clone $OPENSTACKX_REPO $OPENSTACKX_DIR $OPENSTACKX_BRANCH -fi if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then # quantum git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH @@ -650,9 +644,6 @@ if [[ "$ENABLED_SERVICES" =~ "g-api" || fi cd $NOVACLIENT_DIR; sudo python setup.py develop cd $NOVA_DIR; sudo python setup.py develop -if [[ "$ENABLED_SERVICES" =~ "openstackx" ]]; then - cd $OPENSTACKX_DIR; sudo python setup.py develop -fi if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then cd $KEYSTONECLIENT_DIR; sudo python setup.py develop cd $HORIZON_DIR/horizon; sudo python setup.py develop @@ -1175,16 +1166,13 @@ if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then # oneiric no longer supports ietadm add_nova_flag "--iscsi_helper=tgtadm" fi +add_nova_flag "--osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions" add_nova_flag "--my_ip=$HOST_IP" add_nova_flag "--public_interface=$PUBLIC_INTERFACE" add_nova_flag "--vlan_interface=$VLAN_INTERFACE" add_nova_flag "--sql_connection=$BASE_SQL_CONN/nova" add_nova_flag "--libvirt_type=$LIBVIRT_TYPE" add_nova_flag "--instance_name_template=${INSTANCE_NAME_PREFIX}%08x" -if [[ "$ENABLED_SERVICES" =~ "openstackx" ]]; then - add_nova_flag "--osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions" - add_nova_flag "--osapi_compute_extension=extensions.admin.Admin" -fi if [[ "$ENABLED_SERVICES" =~ "n-novnc" ]]; then NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"} add_nova_flag "--novncproxy_base_url=$NOVNCPROXY_URL" diff --git a/stackrc b/stackrc index 9cc93ff5..ce349716 100644 --- a/stackrc +++ b/stackrc @@ -34,11 +34,6 @@ NOVACLIENT_BRANCH=master KEYSTONECLIENT_REPO=https://github.com/openstack/python-keystoneclient KEYSTONECLIENT_BRANCH=master -# openstackx is a collection of extensions to openstack.compute & nova -# that is *deprecated*. The code is being moved into python-novaclient & nova. -OPENSTACKX_REPO=https://github.com/cloudbuilders/openstackx.git -OPENSTACKX_BRANCH=master - # quantum service QUANTUM_REPO=https://github.com/openstack/quantum QUANTUM_BRANCH=master From a575d500bae42646471cafef72d39a4f61c1c56f Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 26 Jan 2012 12:59:26 -0800 Subject: [PATCH 295/967] Remove horizon session db. * Removes stale sessions * Forcefully logs out the user between runs Change-Id: I5374076283b8bc63826004c455b86afc86009b00 --- stack.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stack.sh b/stack.sh index 1dadb235..86764f04 100755 --- a/stack.sh +++ b/stack.sh @@ -749,6 +749,8 @@ if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then rm -fr ${HORIZON_DIR}/openstack-dashboard/quantum ln -s ${QUANTUM_CLIENT_DIR}/quantum ${HORIZON_DIR}/openstack-dashboard/quantum + # Remove stale session database. + rm -f $HORIZON_DIR/openstack-dashboard/local/dashboard_openstack.sqlite3 # ``local_settings.py`` is used to override horizon default settings. local_settings=$HORIZON_DIR/openstack-dashboard/local/local_settings.py From ce05e03213847da1220356e5d95adfde1bd3ae32 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Mon, 30 Jan 2012 14:43:14 -0800 Subject: [PATCH 296/967] exercises: Multihost support for Glance client floating_ips and volumes exercises both access Glance directly, but assume it is running locally. To better accomodate exercising a multi-host cloud, specify glance host via GLANCE_HOST setting which defaults to HOST_IP to maintain current single-node functionality. Change-Id: Iad06044af031083afa477204d446ada5161ca521 --- AUTHORS | 1 + exercises/floating_ips.sh | 4 ++-- exercises/volumes.sh | 4 ++-- openrc | 4 ++++ 4 files changed, 9 insertions(+), 4 deletions(-) diff --git a/AUTHORS b/AUTHORS index 34e04742..ec71326f 100644 --- a/AUTHORS +++ b/AUTHORS @@ -1,3 +1,4 @@ +Adam Gandelman Andy Smith Anthony Young Brad Hall diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index f7b5240b..8afa3ccd 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -70,10 +70,10 @@ nova list nova image-list # But we recommend using glance directly -glance -f -A $TOKEN index +glance -f -A $TOKEN -H $GLANCE_HOST index # Grab the id of the image to launch -IMAGE=`glance -f -A $TOKEN index | egrep $DEFAULT_IMAGE_NAME | head -1 | cut -d" " -f1` +IMAGE=`glance -f -A $TOKEN -H $GLANCE_HOST index | egrep $DEFAULT_IMAGE_NAME | head -1 | cut -d" " -f1` # Security Groups # --------------- diff --git a/exercises/volumes.sh b/exercises/volumes.sh index c2288de2..3f754839 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -56,10 +56,10 @@ nova list nova image-list # But we recommend using glance directly -glance -f -A $TOKEN index +glance -f -A $TOKEN -H $GLANCE_HOST index # Grab the id of the image to launch -IMAGE=`glance -f -A $TOKEN index | egrep $DEFAULT_IMAGE_NAME | head -1 | cut -d" " -f1` +IMAGE=`glance -f -A $TOKEN -H $GLANCE_HOST index | egrep $DEFAULT_IMAGE_NAME | head -1 | cut -d" " -f1` # determinine instance type # ------------------------- diff --git a/openrc b/openrc index 9c941413..195a8fe8 100644 --- a/openrc +++ b/openrc @@ -8,6 +8,10 @@ source ./stackrc HOST_IP=${HOST_IP:-127.0.0.1} SERVICE_HOST=${SERVICE_HOST:-$HOST_IP} +# Some exercises call glance directly. On a single-node installation, Glance +# should be listening on HOST_IP. If its running elsewhere, it can be set here +GLANCE_HOST=${GLANCE_HOST:-$HOST_IP} + # Nova original used project_id as the *account* that owned resources (servers, # ip address, ...) With the addition of Keystone we have standardized on the # term **tenant** as the entity that owns the resources. **novaclient** still From 608bb12ae5fe22f3e9984706a2484d6839975e96 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 10 Jan 2012 14:43:17 -0600 Subject: [PATCH 297/967] Update devstack's Tempest integration Split build_ci_config.sh into: * tools/build_tempest.sh - checks out Tempest and installs prereqs * tools/configure_tempest.sh - creates tempest.conf from devstack Change-Id: I22f40226afb617a5a6e32f2631b07d5509f10b13 --- stackrc | 6 +- tools/build_tempest.sh | 85 ++++++++++++++ tools/build_uec_ramdisk.sh | 2 +- ...uild_ci_config.sh => configure_tempest.sh} | 105 ++++++------------ 4 files changed, 124 insertions(+), 74 deletions(-) create mode 100755 tools/build_tempest.sh rename tools/{build_ci_config.sh => configure_tempest.sh} (65%) diff --git a/stackrc b/stackrc index ce349716..acc5ac06 100644 --- a/stackrc +++ b/stackrc @@ -42,9 +42,9 @@ QUANTUM_BRANCH=master QUANTUM_CLIENT_REPO=https://github.com/openstack/python-quantumclient QUANTUM_CLIENT_BRANCH=master -# CI test suite -CITEST_REPO=https://github.com/openstack/tempest.git -CITEST_BRANCH=master +# Tempest test suite +TEMPEST_REPO=https://github.com/openstack/tempest.git +TEMPEST_BRANCH=master # Specify a comma-separated list of uec images to download and install into glance. # supported urls here are: diff --git a/tools/build_tempest.sh b/tools/build_tempest.sh new file mode 100755 index 00000000..aa44766a --- /dev/null +++ b/tools/build_tempest.sh @@ -0,0 +1,85 @@ +#!/usr/bin/env bash +# +# build_tempest.sh - Checkout and prepare a Tempest repo +# (https://github.com/openstack/tempest.git) + +function usage { + echo "$0 - Check out and prepare a Tempest repo" + echo "" + echo "Usage: $0" + exit 1 +} + +if [ "$1" = "-h" ]; then + usage +fi + +# Clean up any resources that may be in use +cleanup() { + set +o errexit + + # Kill ourselves to signal any calling process + trap 2; kill -2 $$ +} + +trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT + +# Keep track of the current directory +TOOLS_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=`cd $TOOLS_DIR/..; pwd` + +# Abort if localrc is not set +if [ ! -e $TOP_DIR/localrc ]; then + echo "You must have a localrc with ALL necessary passwords and configuration defined before proceeding." + echo "See stack.sh for required passwords." + exit 1 +fi + +# Source params +source ./stackrc + +# Where Openstack code lives +DEST=${DEST:-/opt/stack} + +TEMPEST_DIR=$DEST/tempest + +DIST_NAME=${DIST_NAME:-oneiric} + +# git clone only if directory doesn't exist already. Since ``DEST`` might not +# be owned by the installation user, we create the directory and change the +# ownership to the proper user. +function git_clone { + + GIT_REMOTE=$1 + GIT_DEST=$2 + GIT_BRANCH=$3 + + # do a full clone only if the directory doesn't exist + if [ ! -d $GIT_DEST ]; then + git clone $GIT_REMOTE $GIT_DEST + cd $2 + # This checkout syntax works for both branches and tags + git checkout $GIT_BRANCH + elif [[ "$RECLONE" == "yes" ]]; then + # if it does exist then simulate what clone does if asked to RECLONE + cd $GIT_DEST + # set the url to pull from and fetch + git remote set-url origin $GIT_REMOTE + git fetch origin + # remove the existing ignored files (like pyc) as they cause breakage + # (due to the py files having older timestamps than our pyc, so python + # thinks the pyc files are correct using them) + find $GIT_DEST -name '*.pyc' -delete + git checkout -f origin/$GIT_BRANCH + # a local branch might not exist + git branch -D $GIT_BRANCH || true + git checkout -b $GIT_BRANCH + fi +} + +# Install tests and prerequisites +sudo PIP_DOWNLOAD_CACHE=/var/cache/pip pip install --use-mirrors `cat $TOP_DIR/files/pips/tempest` + +git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH + +trap - SIGHUP SIGINT SIGTERM SIGQUIT EXIT diff --git a/tools/build_uec_ramdisk.sh b/tools/build_uec_ramdisk.sh index 3bd704ba..174eaac7 100755 --- a/tools/build_uec_ramdisk.sh +++ b/tools/build_uec_ramdisk.sh @@ -149,7 +149,7 @@ git_clone $NOVNC_REPO $DEST/novnc $NOVNC_BRANCH git_clone $HORIZON_REPO $DEST/horizon $HORIZON_BRANCH git_clone $NOVACLIENT_REPO $DEST/python-novaclient $NOVACLIENT_BRANCH git_clone $OPENSTACKX_REPO $DEST/openstackx $OPENSTACKX_BRANCH -git_clone $CITEST_REPO $DEST/tempest $CITEST_BRANCH +git_clone $TEMPEST_REPO $DEST/tempest $TEMPEST_BRANCH # Use this version of devstack rm -rf $MNT_DIR/$DEST/devstack diff --git a/tools/build_ci_config.sh b/tools/configure_tempest.sh similarity index 65% rename from tools/build_ci_config.sh rename to tools/configure_tempest.sh index 79f6ead0..00add9a3 100755 --- a/tools/build_ci_config.sh +++ b/tools/configure_tempest.sh @@ -1,10 +1,9 @@ #!/usr/bin/env bash # -# build_ci_config.sh - Build a config.ini for tempest (openstack-integration-tests) -# (https://github.com/openstack/tempest.git) +# configure_tempest.sh - Build a tempest configuration file from devstack function usage { - echo "$0 - Build config.ini for tempest" + echo "$0 - Build tempest.conf" echo "" echo "Usage: $0 [configdir]" exit 1 @@ -19,9 +18,6 @@ cleanup() { set +o errexit # Mop up temporary files - if [ -n "$CONFIG_CONF_TMP" -a -e "$CONFIG_CONF_TMP" ]; then - rm -f $CONFIG_CONF_TMP - fi if [ -n "$CONFIG_INI_TMP" -a -e "$CONFIG_INI_TMP" ]; then rm -f $CONFIG_INI_TMP fi @@ -46,53 +42,21 @@ fi # Source params source ./stackrc +# Set defaults not configured by stackrc +TENANT=${TENANT:-admin} +USERNAME=${USERNAME:-admin} +IDENTITY_HOST=${IDENTITY_HOST:-$HOST_IP} +IDENTITY_PORT=${IDENTITY_PORT:-5000} +IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-2.0} + # Where Openstack code lives DEST=${DEST:-/opt/stack} -CITEST_DIR=$DEST/tempest +TEMPEST_DIR=$DEST/tempest -CONFIG_DIR=${1:-$CITEST_DIR/etc} -CONFIG_CONF=$CONFIG_DIR/storm.conf +CONFIG_DIR=${1:-$TEMPEST_DIR/etc} CONFIG_INI=$CONFIG_DIR/config.ini - -DIST_NAME=${DIST_NAME:-oneiric} - -# git clone only if directory doesn't exist already. Since ``DEST`` might not -# be owned by the installation user, we create the directory and change the -# ownership to the proper user. -function git_clone { - - GIT_REMOTE=$1 - GIT_DEST=$2 - GIT_BRANCH=$3 - - # do a full clone only if the directory doesn't exist - if [ ! -d $GIT_DEST ]; then - git clone $GIT_REMOTE $GIT_DEST - cd $2 - # This checkout syntax works for both branches and tags - git checkout $GIT_BRANCH - elif [[ "$RECLONE" == "yes" ]]; then - # if it does exist then simulate what clone does if asked to RECLONE - cd $GIT_DEST - # set the url to pull from and fetch - git remote set-url origin $GIT_REMOTE - git fetch origin - # remove the existing ignored files (like pyc) as they cause breakage - # (due to the py files having older timestamps than our pyc, so python - # thinks the pyc files are correct using them) - find $GIT_DEST -name '*.pyc' -delete - git checkout -f origin/$GIT_BRANCH - # a local branch might not exist - git branch -D $GIT_BRANCH || true - git checkout -b $GIT_BRANCH - fi -} - -# Install tests and prerequisites -sudo PIP_DOWNLOAD_CACHE=/var/cache/pip pip install --use-mirrors `cat $TOP_DIR/files/pips/tempest` - -git_clone $CITEST_REPO $CITEST_DIR $CITEST_BRANCH +TEMPEST_CONF=$CONFIG_DIR/tempest.conf if [ ! -f $DEST/.ramdisk ]; then # Process network configuration vars @@ -127,6 +91,7 @@ GLANCE_PORT=$2 # Defaults to use first image IMAGE_DIR="" +IMAGE_NAME="" for imagedir in $TOP_DIR/files/images/*; do KERNEL="" RAMDISK="" @@ -146,34 +111,34 @@ for imagedir in $TOP_DIR/files/images/*; do # Save the first image directory that contains a disk image link if [ -z "$IMAGE_DIR" ]; then IMAGE_DIR=$imagedir + IMAGE_NAME=$(basename ${IMAGE%.img}) fi fi done +if [[ -n "$IMAGE_NAME" ]]; then + # Get the image UUID + IMAGE_UUID=$(nova image-list | grep " $IMAGE_NAME " | cut -d'|' -f2) + # Strip spaces off + IMAGE_UUID=$(echo $IMAGE_UUID) +fi -# Create storm.conf +# Create tempest.conf from tempest.conf.sample -CONFIG_CONF_TMP=$(mktemp $CONFIG_CONF.XXXXXX) - cat >$CONFIG_CONF_TMP < Date: Tue, 31 Jan 2012 14:33:19 +0000 Subject: [PATCH 298/967] bug 924267: Explicitly set firewall_driver to nova.virt.libvirt.firewall.IptablesFirewallDriver Because of https://bugs.launchpad.net/nova/+bug/924266, the default firewall_driver should not be the libvirt's one. So set the driver explicitly. Change-Id: I1e6a0d824e857bb082eef2a684445241d4c31aae --- AUTHORS | 1 + stack.sh | 2 ++ 2 files changed, 3 insertions(+) diff --git a/AUTHORS b/AUTHORS index ec71326f..dc12105f 100644 --- a/AUTHORS +++ b/AUTHORS @@ -1,6 +1,7 @@ Adam Gandelman Andy Smith Anthony Young +Armando Migliaccio Brad Hall Chmouel Boudjnah Dean Troyer diff --git a/stack.sh b/stack.sh index f0830a17..17972c9d 100755 --- a/stack.sh +++ b/stack.sh @@ -1235,6 +1235,8 @@ if [ "$VIRT_DRIVER" = 'xenserver' ]; then add_nova_flag "--firewall_driver=$XEN_FIREWALL_DRIVER" else add_nova_flag "--connection_type=libvirt" + LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} + add_nova_flag "--firewall_driver=$LIBVIRT_FIREWALL_DRIVER" add_nova_flag "--flat_network_bridge=$FLAT_NETWORK_BRIDGE" if [ -n "$FLAT_INTERFACE" ]; then add_nova_flag "--flat_interface=$FLAT_INTERFACE" From 7f9aa71bcea98a27d144a71841842069106bb3bc Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 31 Jan 2012 12:11:56 -0600 Subject: [PATCH 299/967] Extract common functions into a separate file This is the start of an effort to organize devstack's code to better document the requirements for configuring the individual components. Change-Id: I3476b76b9d1f9ee63687fb9898a98729118cbd84 --- functions | 91 ++++++++++++++++++++++++++++ stack.sh | 75 +---------------------- tools/build_ramdisk.sh | 34 ++--------- tools/build_tempest.sh | 41 ++----------- tools/build_uec.sh | 7 ++- tools/build_uec_ramdisk.sh | 36 ++--------- tools/configure_tempest.sh | 5 +- tools/copy_dev_environment_to_uec.sh | 12 ++-- tools/get_uec_image.sh | 5 +- 9 files changed, 125 insertions(+), 181 deletions(-) create mode 100644 functions diff --git a/functions b/functions new file mode 100644 index 00000000..01c4758f --- /dev/null +++ b/functions @@ -0,0 +1,91 @@ +# functions - Common functions used by DevStack components + + +# apt-get wrapper to set arguments correctly +# apt_get package [package ...] +function apt_get() { + [[ "$OFFLINE" = "True" ]] && return + local sudo="sudo" + [[ "$(id -u)" = "0" ]] && sudo="env" + $sudo DEBIAN_FRONTEND=noninteractive \ + http_proxy=$http_proxy https_proxy=$https_proxy \ + apt-get --option "Dpkg::Options::=--force-confold" --assume-yes "$@" +} + + +# Gracefully cp only if source file/dir exists +# cp_it source destination +function cp_it { + if [ -e $1 ] || [ -d $1 ]; then + cp -pRL $1 $2 + fi +} + + +# git clone only if directory doesn't exist already. Since ``DEST`` might not +# be owned by the installation user, we create the directory and change the +# ownership to the proper user. +# Set global RECLONE=yes to simulate a clone when dest-dir exists +# git_clone remote dest-dir branch +function git_clone { + [[ "$OFFLINE" = "True" ]] && return + + GIT_REMOTE=$1 + GIT_DEST=$2 + GIT_BRANCH=$3 + + if echo $GIT_BRANCH | egrep -q "^refs"; then + # If our branch name is a gerrit style refs/changes/... + if [[ ! -d $GIT_DEST ]]; then + git clone $GIT_REMOTE $GIT_DEST + fi + cd $GIT_DEST + git fetch $GIT_REMOTE $GIT_BRANCH && git checkout FETCH_HEAD + else + # do a full clone only if the directory doesn't exist + if [[ ! -d $GIT_DEST ]]; then + git clone $GIT_REMOTE $GIT_DEST + cd $GIT_DEST + # This checkout syntax works for both branches and tags + git checkout $GIT_BRANCH + elif [[ "$RECLONE" == "yes" ]]; then + # if it does exist then simulate what clone does if asked to RECLONE + cd $GIT_DEST + # set the url to pull from and fetch + git remote set-url origin $GIT_REMOTE + git fetch origin + # remove the existing ignored files (like pyc) as they cause breakage + # (due to the py files having older timestamps than our pyc, so python + # thinks the pyc files are correct using them) + find $GIT_DEST -name '*.pyc' -delete + git checkout -f origin/$GIT_BRANCH + # a local branch might not exist + git branch -D $GIT_BRANCH || true + git checkout -b $GIT_BRANCH + fi + fi +} + + +# pip install wrapper to set cache and proxy environment variables +# pip_install package [package ...] +function pip_install { + [[ "$OFFLINE" = "True" ]] && return + sudo PIP_DOWNLOAD_CACHE=/var/cache/pip \ + HTTP_PROXY=$http_proxy \ + HTTPS_PROXY=$https_proxy \ + pip install --use-mirrors $@ +} + + +# Normalize config values to True or False +# VAR=`trueorfalse default-value test-value` +function trueorfalse() { + local default=$1 + local testval=$2 + + [[ -z "$testval" ]] && { echo "$default"; return; } + [[ "0 no false False FALSE" =~ "$testval" ]] && { echo "False"; return; } + [[ "1 yes true True TRUE" =~ "$testval" ]] && { echo "True"; return; } + echo "$default" +} diff --git a/stack.sh b/stack.sh index 17972c9d..706e7ef6 100755 --- a/stack.sh +++ b/stack.sh @@ -35,6 +35,9 @@ fi # Keep track of the current devstack directory. TOP_DIR=$(cd $(dirname "$0") && pwd) +# Import common functions +. $TOP_DIR/functions + # stack.sh keeps the list of **apt** and **pip** dependencies in external # files, along with config templates and other useful files. You can find these # in the ``files`` directory (next to this script). We will reference this @@ -86,16 +89,6 @@ source ./stackrc # Destination path for installation ``DEST`` DEST=${DEST:-/opt/stack} -# apt-get wrapper to just get arguments set correctly -function apt_get() { - [[ "$OFFLINE" = "True" ]] && return - local sudo="sudo" - [ "$(id -u)" = "0" ] && sudo="env" - $sudo DEBIAN_FRONTEND=noninteractive \ - http_proxy=$http_proxy https_proxy=$https_proxy \ - apt-get --option "Dpkg::Options::=--force-confold" --assume-yes "$@" -} - # Check to see if we are already running a stack.sh if screen -ls | egrep -q "[0-9].stack"; then echo "You are already running a stack.sh session." @@ -155,18 +148,6 @@ else sudo mv $TEMPFILE /etc/sudoers.d/stack_sh_nova fi -# Normalize config values to True or False -# VAR=`trueorfalse default-value test-value` -function trueorfalse() { - local default=$1 - local testval=$2 - - [[ -z "$testval" ]] && { echo "$default"; return; } - [[ "0 no false False FALSE" =~ "$testval" ]] && { echo "False"; return; } - [[ "1 yes true True TRUE" =~ "$testval" ]] && { echo "True"; return; } - echo "$default" -} - # Set True to configure stack.sh to run cleanly without Internet access. # stack.sh must have been previously run with Internet access to install # prerequisites and initialize $DEST. @@ -526,14 +507,6 @@ function get_packages() { done } -function pip_install { - [[ "$OFFLINE" = "True" ]] && return - sudo PIP_DOWNLOAD_CACHE=/var/cache/pip \ - HTTP_PROXY=$http_proxy \ - HTTPS_PROXY=$https_proxy \ - pip install --use-mirrors $@ -} - # install apt requirements apt_get update apt_get install $(get_packages) @@ -541,48 +514,6 @@ apt_get install $(get_packages) # install python requirements pip_install `cat $FILES/pips/* | uniq` -# git clone only if directory doesn't exist already. Since ``DEST`` might not -# be owned by the installation user, we create the directory and change the -# ownership to the proper user. -function git_clone { - [[ "$OFFLINE" = "True" ]] && return - - GIT_REMOTE=$1 - GIT_DEST=$2 - GIT_BRANCH=$3 - - if echo $GIT_BRANCH | egrep -q "^refs"; then - # If our branch name is a gerrit style refs/changes/... - if [ ! -d $GIT_DEST ]; then - git clone $GIT_REMOTE $GIT_DEST - fi - cd $GIT_DEST - git fetch $GIT_REMOTE $GIT_BRANCH && git checkout FETCH_HEAD - else - # do a full clone only if the directory doesn't exist - if [ ! -d $GIT_DEST ]; then - git clone $GIT_REMOTE $GIT_DEST - cd $GIT_DEST - # This checkout syntax works for both branches and tags - git checkout $GIT_BRANCH - elif [[ "$RECLONE" == "yes" ]]; then - # if it does exist then simulate what clone does if asked to RECLONE - cd $GIT_DEST - # set the url to pull from and fetch - git remote set-url origin $GIT_REMOTE - git fetch origin - # remove the existing ignored files (like pyc) as they cause breakage - # (due to the py files having older timestamps than our pyc, so python - # thinks the pyc files are correct using them) - find $GIT_DEST -name '*.pyc' -delete - git checkout -f origin/$GIT_BRANCH - # a local branch might not exist - git branch -D $GIT_BRANCH || true - git checkout -b $GIT_BRANCH - fi - fi -} - # compute service git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH # python client library to nova that horizon (and others) use diff --git a/tools/build_ramdisk.sh b/tools/build_ramdisk.sh index feaa8a97..7c1600b1 100755 --- a/tools/build_ramdisk.sh +++ b/tools/build_ramdisk.sh @@ -47,7 +47,10 @@ IMG_FILE=$1 # Keep track of the current directory TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=`cd $TOOLS_DIR/..; pwd` +TOP_DIR=$(cd $TOOLS_DIR/..; pwd) + +# Import common functions +. $TOP_DIR/functions # Store cwd CWD=`pwd` @@ -170,35 +173,6 @@ if [ ! -r "`ls $MNTDIR/boot/vmlinuz-*-generic | head -1`" ]; then chroot $MNTDIR apt-get install -y linux-generic fi -# git clone only if directory doesn't exist already. Since ``DEST`` might not -# be owned by the installation user, we create the directory and change the -# ownership to the proper user. -function git_clone { - - # clone new copy or fetch latest changes - CHECKOUT=${MNTDIR}$2 - if [ ! -d $CHECKOUT ]; then - mkdir -p $CHECKOUT - git clone $1 $CHECKOUT - else - pushd $CHECKOUT - git fetch - popd - fi - - # FIXME(ja): checkout specified version (should works for branches and tags) - - pushd $CHECKOUT - # checkout the proper branch/tag - git checkout $3 - # force our local version to be the same as the remote version - git reset --hard origin/$3 - popd - - # give ownership to the stack user - chroot $MNTDIR chown -R stack $2 -} - git_clone $NOVA_REPO $DEST/nova $NOVA_BRANCH git_clone $GLANCE_REPO $DEST/glance $GLANCE_BRANCH git_clone $KEYSTONE_REPO $DEST/keystone $KEYSTONE_BRANCH diff --git a/tools/build_tempest.sh b/tools/build_tempest.sh index aa44766a..230e8f9b 100755 --- a/tools/build_tempest.sh +++ b/tools/build_tempest.sh @@ -26,7 +26,10 @@ trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT # Keep track of the current directory TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=`cd $TOOLS_DIR/..; pwd` +TOP_DIR=$(cd $TOOLS_DIR/..; pwd) + +# Import common functions +. $TOP_DIR/functions # Abort if localrc is not set if [ ! -e $TOP_DIR/localrc ]; then @@ -43,42 +46,8 @@ DEST=${DEST:-/opt/stack} TEMPEST_DIR=$DEST/tempest -DIST_NAME=${DIST_NAME:-oneiric} - -# git clone only if directory doesn't exist already. Since ``DEST`` might not -# be owned by the installation user, we create the directory and change the -# ownership to the proper user. -function git_clone { - - GIT_REMOTE=$1 - GIT_DEST=$2 - GIT_BRANCH=$3 - - # do a full clone only if the directory doesn't exist - if [ ! -d $GIT_DEST ]; then - git clone $GIT_REMOTE $GIT_DEST - cd $2 - # This checkout syntax works for both branches and tags - git checkout $GIT_BRANCH - elif [[ "$RECLONE" == "yes" ]]; then - # if it does exist then simulate what clone does if asked to RECLONE - cd $GIT_DEST - # set the url to pull from and fetch - git remote set-url origin $GIT_REMOTE - git fetch origin - # remove the existing ignored files (like pyc) as they cause breakage - # (due to the py files having older timestamps than our pyc, so python - # thinks the pyc files are correct using them) - find $GIT_DEST -name '*.pyc' -delete - git checkout -f origin/$GIT_BRANCH - # a local branch might not exist - git branch -D $GIT_BRANCH || true - git checkout -b $GIT_BRANCH - fi -} - # Install tests and prerequisites -sudo PIP_DOWNLOAD_CACHE=/var/cache/pip pip install --use-mirrors `cat $TOP_DIR/files/pips/tempest` +pip_install `cat $TOP_DIR/files/pips/tempest` git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 04e1a459..ed5a0171 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -8,7 +8,10 @@ fi # Keep track of the current directory TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=`cd $TOOLS_DIR/..; pwd` +TOP_DIR=$(cd $TOOLS_DIR/..; pwd) + +# Import common functions +. $TOP_DIR/functions cd $TOP_DIR @@ -34,7 +37,7 @@ fi # Install deps if needed DEPS="kvm libvirt-bin kpartx cloud-utils curl" -apt-get install -y --force-yes $DEPS || true # allow this to fail gracefully for concurrent builds +apt_get install -y --force-yes $DEPS || true # allow this to fail gracefully for concurrent builds # Where to store files and instances WORK_DIR=${WORK_DIR:-/opt/uecstack} diff --git a/tools/build_uec_ramdisk.sh b/tools/build_uec_ramdisk.sh index 174eaac7..32f90c05 100755 --- a/tools/build_uec_ramdisk.sh +++ b/tools/build_uec_ramdisk.sh @@ -40,7 +40,10 @@ DEST_FILE=$1 # Keep track of the current directory TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=`cd $TOOLS_DIR/..; pwd` +TOP_DIR=$(cd $TOOLS_DIR/..; pwd) + +# Import common functions +. $TOP_DIR/functions cd $TOP_DIR @@ -68,7 +71,7 @@ fi # Install deps if needed DEPS="kvm libvirt-bin kpartx cloud-utils curl" -apt-get install -y --force-yes $DEPS +apt_get install -y --force-yes $DEPS # Where to store files and instances CACHEDIR=${CACHEDIR:-/opt/stack/cache} @@ -113,35 +116,6 @@ if [ ! -r "`ls $MNT_DIR/boot/vmlinuz-*-generic | head -1`" ]; then chroot $MNT_DIR apt-get install -y linux-generic fi -# git clone only if directory doesn't exist already. Since ``DEST`` might not -# be owned by the installation user, we create the directory and change the -# ownership to the proper user. -function git_clone { - - # clone new copy or fetch latest changes - CHECKOUT=${MNT_DIR}$2 - if [ ! -d $CHECKOUT ]; then - mkdir -p $CHECKOUT - git clone $1 $CHECKOUT - else - pushd $CHECKOUT - git fetch - popd - fi - - # FIXME(ja): checkout specified version (should works for branches and tags) - - pushd $CHECKOUT - # checkout the proper branch/tag - git checkout $3 - # force our local version to be the same as the remote version - git reset --hard origin/$3 - popd - - # give ownership to the stack user - chroot $MNT_DIR chown -R stack $2 -} - git_clone $NOVA_REPO $DEST/nova $NOVA_BRANCH git_clone $GLANCE_REPO $DEST/glance $GLANCE_BRANCH git_clone $KEYSTONE_REPO $DEST/keystone $KEYSTONE_BRANCH diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index 00add9a3..f6ef0d3b 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -30,7 +30,10 @@ trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT # Keep track of the current directory TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=`cd $TOOLS_DIR/..; pwd` +TOP_DIR=$(cd $TOOLS_DIR/..; pwd) + +# Import common functions +. $TOP_DIR/functions # Abort if localrc is not set if [ ! -e $TOP_DIR/localrc ]; then diff --git a/tools/copy_dev_environment_to_uec.sh b/tools/copy_dev_environment_to_uec.sh index c949b329..d5687dc1 100755 --- a/tools/copy_dev_environment_to_uec.sh +++ b/tools/copy_dev_environment_to_uec.sh @@ -8,7 +8,10 @@ set -o errexit # Keep track of the current directory TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=`cd $TOOLS_DIR/..; pwd` +TOP_DIR=$(cd $TOOLS_DIR/..; pwd) + +# Import common functions +. $TOP_DIR/functions # Change dir to top of devstack cd $TOP_DIR @@ -47,13 +50,6 @@ echo stack:pass | chroot $STAGING_DIR chpasswd ( umask 226 && echo "stack ALL=(ALL) NOPASSWD:ALL" \ > $STAGING_DIR/etc/sudoers.d/50_stack_sh ) -# Gracefully cp only if source file/dir exists -function cp_it { - if [ -e $1 ] || [ -d $1 ]; then - cp -pRL $1 $2 - fi -} - # Copy over your ssh keys and env if desired cp_it ~/.ssh $STAGING_DIR/$DEST/.ssh cp_it ~/.ssh/id_rsa.pub $STAGING_DIR/$DEST/.ssh/authorized_keys diff --git a/tools/get_uec_image.sh b/tools/get_uec_image.sh index f66f2bc2..09630740 100755 --- a/tools/get_uec_image.sh +++ b/tools/get_uec_image.sh @@ -6,7 +6,10 @@ ROOTSIZE=${ROOTSIZE:-2000} # Keep track of the current directory TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=`cd $TOOLS_DIR/..; pwd` +TOP_DIR=$(cd $TOOLS_DIR/..; pwd) + +# Import common functions +. $TOP_DIR/functions # exit on error to stop unexpected errors set -o errexit From 10670d1bad6b4afd3dce9af61d22c8cd5eeded59 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 24 Jan 2012 11:26:15 -0600 Subject: [PATCH 300/967] Add support for OS_* environment vars Add the OS_* env variables to mirror the NOVA_* vars; example: setting OS_USERNAME will override NOVA_USERNAME in the clients and tools, but if left unset it defaults to NOVA_USERNAME. Adds exercises/client-env.sh to test operation of command-line clients with only the OS_* variables set Addresses bug 897304, http://wiki.openstack.org/CLIAuth Change-Id: I72450153541072fe8026a82748cfcd1cf5ed31d8 --- exercises/client-env.sh | 166 ++++++++++++++++++++++++++++++++++++++ exercises/floating_ips.sh | 2 +- exercises/volumes.sh | 2 +- openrc | 10 ++- 4 files changed, 177 insertions(+), 3 deletions(-) create mode 100755 exercises/client-env.sh diff --git a/exercises/client-env.sh b/exercises/client-env.sh new file mode 100755 index 00000000..f4172bcb --- /dev/null +++ b/exercises/client-env.sh @@ -0,0 +1,166 @@ +#!/usr/bin/env bash + +# Test OpenStack client enviroment variable handling + +# Verify client workage +VERIFY=${1:-""} + +# Settings +# ======== + +# Use openrc + stackrc + localrc for settings +pushd $(cd $(dirname "$0")/.. && pwd) >/dev/null +source ./openrc +popd >/dev/null + +# Unset all of the known NOVA_ vars +unset NOVA_API_KEY +unset NOVA_ENDPOINT_NAME +unset NOVA_PASSWORD +unset NOVA_PROJECT_ID +unset NOVA_REGION_NAME +unset NOVA_URL +unset NOVA_USERNAME +unset NOVA_VERSION + +# Make sure we have the vars we are expecting +function is_set() { + local var=\$"$1" + eval echo $1=$var + if eval "[ -z $var ]"; then + return 1 + fi + return 0 +} + +for i in OS_TENANT_NAME OS_USERNAME OS_PASSWORD OS_AUTH_URL; do + is_set $i + if [[ $? -ne 0 ]]; then + ABORT=1 + fi +done +if [[ -n "$ABORT" ]]; then + exit 1 +fi + +# Set global return +RETURN=0 + +# Keystone client +# --------------- +if [[ "$ENABLED_SERVICES" =~ "key" ]]; then + if [[ "$SKIP_EXERCISES" =~ "key" ]] ; then + STATUS_KEYSTONE="Skipped" + else + if [[ -n "$VERIFY" ]]; then + # Save original environment + xOS_AUTH_URL=$OS_AUTH_URL + xOS_TENANT_NAME=$OS_TENANT_NAME + xOS_USERNAME=$OS_USERNAME + xOS_PASSWORD=$OS_PASSWORD + # keystone can't handle a trailing '/' + export OS_AUTH_URL=${OS_AUTH_URL%/} + # does any non-admin request work? + export OS_USERNAME=admin + export OS_TENANT_NAME=admin + fi + + echo -e "\nTest Keystone" + if keystone service-list; then + STATUS_KEYSTONE="Succeeded" + else + STATUS_KEYSTONE="Failed" + RETURN=1 + fi + if [[ -n "$VERIFY" ]]; then + # Save original environment + OS_AUTH_URL=$xOS_AUTH_URL + OS_TENANT_NAME=$xOS_TENANT_NAME + OS_USERNAME=$xOS_USERNAME + OS_PASSWORD=$xOS_PASSWORD + fi + fi +fi + +# Nova client +# ----------- + +if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then + if [[ "$SKIP_EXERCISES" =~ "n-api" ]] ; then + STATUS_NOVA="Skipped" + else + if [[ -n "$VERIFY" ]]; then + # Known novaclient breakage: + # NOVA_VERSION must be set or nova silently fails + export NOVA_VERSION=2 + fi + + echo -e "\nTest Nova" + if nova flavor-list; then + STATUS_NOVA="Succeeded" + else + STATUS_NOVA="Failed" + RETURN=1 + fi + fi +fi + +# Glance client +# ------------- + +if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then + if [[ "$SKIP_EXERCISES" =~ "g-api" ]] ; then + STATUS_GLANCE="Skipped" + else + if [[ -n "$VERIFY" ]]; then + # Known glance client differage: + export OS_AUTH_TENANT=$OS_TENANT_NAME + export OS_AUTH_USER=$OS_USERNAME + export OS_AUTH_KEY=$OS_PASSWORD + export OS_AUTH_STRATEGY=keystone + fi + + echo -e "\nTest Glance" + if glance index; then + STATUS_GLANCE="Succeeded" + else + STATUS_GLANCE="Failed" + RETURN=1 + fi + fi +fi + +# Swift client +# ------------ + +if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then + if [[ "$SKIP_EXERCISES" =~ "swift" ]] ; then + STATUS_SWIFT="Skipped" + else + echo -e "\nTest Swift" + # FIXME(dtroyer): implement swift test + if true; then + STATUS_SWIFT="Succeeded" + else + STATUS_SWIFT="Failed" + RETURN=1 + fi + fi +fi + +# Results +# ------- + +function report() { + if [[ -n "$2" ]]; then + echo "$1: $2" + fi +} + +echo -e "\n" +report "Keystone" $STATUS_KEYSTONE +report "Nova" $STATUS_NOVA +report "Glance" $STATUS_GLANCE +report "Swift" $STATUS_SWIFT + +exit $RETURN diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 8afa3ccd..c1cffa4a 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -55,7 +55,7 @@ TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test} # returns a token and catalog of endpoints. We use python to parse the token # and save it. -TOKEN=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$NOVA_USERNAME\", \"password\": \"$NOVA_PASSWORD\"}}}" -H "Content-type: application/json" http://$HOST_IP:5000/v2.0/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"` +TOKEN=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$OS_USERNAME\", \"password\": \"$OS_PASSWORD\"}}}" -H "Content-type: application/json" ${OS_AUTH_URL%/}/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"` # Launching a server # ================== diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 3f754839..1f7c25f5 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -41,7 +41,7 @@ DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} # returns a token and catalog of endpoints. We use python to parse the token # and save it. -TOKEN=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$NOVA_USERNAME\", \"password\": \"$NOVA_PASSWORD\"}}}" -H "Content-type: application/json" http://$HOST_IP:5000/v2.0/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"` +TOKEN=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$OS_USERNAME\", \"password\": \"$OS_PASSWORD\"}}}" -H "Content-type: application/json" ${OS_AUTH_URL%/}/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"` # Launching a server # ================== diff --git a/openrc b/openrc index 195a8fe8..4c4b1d31 100644 --- a/openrc +++ b/openrc @@ -12,20 +12,27 @@ SERVICE_HOST=${SERVICE_HOST:-$HOST_IP} # should be listening on HOST_IP. If its running elsewhere, it can be set here GLANCE_HOST=${GLANCE_HOST:-$HOST_IP} +# novaclient now supports the new OS_* configuration variables in addition to +# the older NOVA_* variables. Set them both for now... + # Nova original used project_id as the *account* that owned resources (servers, # ip address, ...) With the addition of Keystone we have standardized on the # term **tenant** as the entity that owns the resources. **novaclient** still # uses the old deprecated terms project_id. Note that this field should now be # set to tenant_name, not tenant_id. export NOVA_PROJECT_ID=${TENANT:-demo} +export OS_TENANT_NAME=${NOVA_PROJECT_ID} # In addition to the owning entity (tenant), nova stores the entity performing # the action as the **user**. export NOVA_USERNAME=${USERNAME:-demo} +export OS_USERNAME=${NOVA_USERNAME} # With Keystone you pass the keystone password instead of an api key. -# The most recent versions of novaclient use NOVA_PASSWORD instead of NOVA_API_KEY +# Recent versions of novaclient use NOVA_PASSWORD instead of NOVA_API_KEY +# The most recent versions of novaclient use OS_PASSWORD in addition to NOVA_PASSWORD export NOVA_PASSWORD=${ADMIN_PASSWORD:-secrete} +export OS_PASSWORD=${NOVA_PASSWORD} # With the addition of Keystone, to use an openstack cloud you should # authenticate against keystone, which returns a **Token** and **Service @@ -36,6 +43,7 @@ export NOVA_PASSWORD=${ADMIN_PASSWORD:-secrete} # *NOTE*: Using the 2.0 *auth api* does not mean that compute api is 2.0. We # will use the 1.1 *compute api* export NOVA_URL=${NOVA_URL:-http://$SERVICE_HOST:5000/v2.0/} +export OS_AUTH_URL=${NOVA_URL} # Currently novaclient needs you to specify the *compute api* version. This # needs to match the config of your catalog returned by Keystone. From 64a9066018d21626cff7389766c89de83db1e0ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jason=20K=C3=B6lker?= Date: Mon, 23 Jan 2012 11:17:27 -0600 Subject: [PATCH 301/967] Add Melange Support Change-Id: I3961a007b10aed5ef47422bcf0eedd43d35a2cff --- stack.sh | 61 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ stackrc | 10 +++++++++- 2 files changed, 70 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 17972c9d..18d1695c 100755 --- a/stack.sh +++ b/stack.sh @@ -184,6 +184,8 @@ SWIFT_DIR=$DEST/swift SWIFT_KEYSTONE_DIR=$DEST/swift-keystone2 QUANTUM_DIR=$DEST/quantum QUANTUM_CLIENT_DIR=$DEST/python-quantumclient +MELANGE_DIR=$DEST/melange +MELANGECLIENT_DIR=$DEST/python-melangeclient # Default Quantum Plugin Q_PLUGIN=${Q_PLUGIN:-openvswitch} @@ -192,6 +194,13 @@ Q_PORT=${Q_PORT:-9696} # Default Quantum Host Q_HOST=${Q_HOST:-localhost} +# Default Melange Port +M_PORT=${M_PORT:-9898} +# Default Melange Host +M_HOST=${M_HOST:-localhost} +# Melange MAC Address Range +M_MAC_RANGE=${M_MAC_RANGE:-404040/24} + # Specify which services to launch. These generally correspond to screen tabs ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-sch,n-novnc,n-xvnc,n-cauth,horizon,mysql,rabbit} @@ -330,6 +339,13 @@ FLAT_INTERFACE=${FLAT_INTERFACE:-eth0} # # With Quantum networking the NET_MAN variable is ignored. +# Using Melange IPAM: +# +# Make sure that quantum and melange are enabled in ENABLED_SERVICES. +# If they are then the melange IPAM lib will be set in the QuantumManager. +# Adding m-svc to ENABLED_SERVICES will start the melange service on this +# host. + # MySQL & RabbitMQ # ---------------- @@ -622,6 +638,15 @@ if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then git_clone $QUANTUM_CLIENT_REPO $QUANTUM_CLIENT_DIR $QUANTUM_CLIENT_BRANCH fi +if [[ "$ENABLED_SERVICES" =~ "m-svc" ]]; then + # melange + git_clone $MELANGE_REPO $MELANGE_DIR $MELANGE_BRANCH +fi + +if [[ "$ENABLED_SERVICES" =~ "melange" ]]; then + git_clone $MELANGECLIENT_REPO $MELANGECLIENT_DIR $MELANGECLIENT_BRANCH +fi + # Initialization # ============== @@ -652,6 +677,12 @@ fi if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then cd $QUANTUM_DIR; sudo python setup.py develop fi +if [[ "$ENABLED_SERVICES" =~ "m-svc" ]]; then + cd $MELANGE_DIR; sudo python setup.py develop +fi +if [[ "$ENABLED_SERVICES" =~ "melange" ]]; then + cd $MELANGECLIENT_DIR; sudo python setup.py develop +fi # Syslog # --------- @@ -1153,6 +1184,13 @@ if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then add_nova_flag "--network_manager=nova.network.quantum.manager.QuantumManager" add_nova_flag "--quantum_connection_host=$Q_HOST" add_nova_flag "--quantum_connection_port=$Q_PORT" + + if [[ "$ENABLED_SERVICES" =~ "melange" ]]; then + add_nova_flag "--quantum_ipam_lib=nova.network.quantum.melange_ipam_lib" + add_nova_flag "--use_melange_mac_generation" + add_nova_flag "--melange_host=$M_HOST" + add_nova_flag "--melange_port=$M_PORT" + fi if [[ "$ENABLED_SERVICES" =~ "q-svc" && "$Q_PLUGIN" = "openvswitch" ]]; then add_nova_flag "--libvirt_vif_type=ethernet" add_nova_flag "--libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtOpenVswitchDriver" @@ -1408,6 +1446,28 @@ if [[ "$ENABLED_SERVICES" =~ "q-agt" ]]; then fi +# Melange service +if [[ "$ENABLED_SERVICES" =~ "m-svc" ]]; then + if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then + mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS melange;' + mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE melange;' + else + echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin." + exit 1 + fi + MELANGE_CONFIG_FILE=$MELANGE_DIR/etc/melange/melange.conf + cp $MELANGE_CONFIG_FILE.sample $MELANGE_CONFIG_FILE + sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/melange/g" $MELANGE_CONFIG_FILE + cd $MELANGE_DIR && PYTHONPATH=.:$PYTHONPATH python $MELANGE_DIR/bin/melange-manage --config-file=$MELANGE_CONFIG_FILE db_sync + screen_it m-svc "cd $MELANGE_DIR && PYTHONPATH=.:$PYTHONPATH python $MELANGE_DIR/bin/melange-server --config-file=$MELANGE_CONFIG_FILE" + echo "Waiting for melange to start..." + if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:9898; do sleep 1; done"; then + echo "melange-server did not start" + exit 1 + fi + melange mac_address_range create cidr=$M_MAC_RANGE +fi + # If we're using Quantum (i.e. q-svc is enabled), network creation has to # happen after we've started the Quantum service. if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then @@ -1425,6 +1485,7 @@ if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then fi fi + # Launching nova-compute should be as simple as running ``nova-compute`` but # have to do a little more than that in our script. Since we add the group # ``libvirtd`` to our user in this script, when nova-compute is run it is diff --git a/stackrc b/stackrc index acc5ac06..e87b3cde 100644 --- a/stackrc +++ b/stackrc @@ -46,6 +46,14 @@ QUANTUM_CLIENT_BRANCH=master TEMPEST_REPO=https://github.com/openstack/tempest.git TEMPEST_BRANCH=master +# melange service +MELANGE_REPO=https://github.com/openstack/melange.git +MELANGE_BRANCH=master + +# python melange client library +MELANGECLIENT_REPO=https://github.com/openstack/python-melangeclient.git +MELANGECLIENT_BRANCH=master + # Specify a comma-separated list of uec images to download and install into glance. # supported urls here are: # * "uec-style" images: @@ -56,7 +64,7 @@ TEMPEST_BRANCH=master # * disk image (*.img,*.img.gz) # if file ends in .img, then it will be uploaded and registered as a to # glance as a disk image. If it ends in .gz, it is uncompressed first. -# example: +# example: # http://cloud-images.ubuntu.com/releases/oneiric/release/ubuntu-11.10-server-cloudimg-armel-disk1.img # http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-rootfs.img.gz #IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image From e60c9d03594b27368c3b8bb5bdeac799a97735eb Mon Sep 17 00:00:00 2001 From: Dolph Mathews Date: Thu, 2 Feb 2012 21:02:23 +0000 Subject: [PATCH 302/967] Hyphens in var names produce warnings (bug 925718) Change-Id: Ib1b81273e2b2ec5e56dead920252684a997ce12d --- files/keystone.conf | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/files/keystone.conf b/files/keystone.conf index 6d0fd7e1..056f16b6 100644 --- a/files/keystone.conf +++ b/files/keystone.conf @@ -20,7 +20,7 @@ backends = keystone.backends.sqlalchemy # Dictionary Maps every service to a header.Missing services would get header # X_(SERVICE_NAME) Key => Service Name, Value => Header Name -service-header-mappings = { +service_header_mappings = { 'nova' : 'X-Server-Management-Url', 'swift' : 'X-Storage-Url', 'cdn' : 'X-CDN-Management-Url'} @@ -64,13 +64,13 @@ ca_certs = /etc/keystone/ssl/certs/ca.pem cert_required = True #Role that allows to perform admin operations. -keystone-admin-role = admin +keystone_admin_role = admin #Role that allows to perform service admin operations. -keystone-service-admin-role = KeystoneServiceAdmin +keystone_service_admin_role = KeystoneServiceAdmin #Tells whether password user need to be hashed in the backend -hash-password = True +hash_password = True [keystone.backends.sqlalchemy] # SQLAlchemy connection string for the reference implementation registry From a96a418171f8762e55938186b85443f47c215ad3 Mon Sep 17 00:00:00 2001 From: termie Date: Mon, 9 Jan 2012 22:13:29 -0800 Subject: [PATCH 303/967] new keystone support --- files/default_catalog.templates | 30 +++++++ files/keystone.conf | 135 +++++++++------------------- files/keystone_data.sh | 150 +++++++++++++++++++++++++------- stack.sh | 5 ++ stackrc | 2 +- 5 files changed, 195 insertions(+), 127 deletions(-) create mode 100644 files/default_catalog.templates diff --git a/files/default_catalog.templates b/files/default_catalog.templates new file mode 100644 index 00000000..b527ae50 --- /dev/null +++ b/files/default_catalog.templates @@ -0,0 +1,30 @@ +# config for TemplatedCatalog, using camelCase because I don't want to do +# translations for legacy compat +catalog.RegionOne.identity.publicURL = http://%SERVICE_HOST%:$(public_port)s/v2.0 +catalog.RegionOne.identity.adminURL = http://%SERVICE_HOST%:$(admin_port)s/v2.0 +catalog.RegionOne.identity.internalURL = http://%SERVICE_HOST%:$(public_port)s/v2.0 +catalog.RegionOne.identity.name = 'Identity Service' + + +catalog.RegionOne.compute.publicURL = http://%SERVICE_HOST%:8774/v1.1/$(tenant_id)s +catalog.RegionOne.compute.adminURL = http://%SERVICE_HOST%:8774/v1.1/$(tenant_id)s +catalog.RegionOne.compute.internalURL = http://%SERVICE_HOST%:8774/v1.1/$(tenant_id)s +catalog.RegionOne.compute.name = 'Compute Service' + + +catalog.RegionOne.ec2.publicURL = http://%SERVICE_HOST%:8773/services/Cloud +catalog.RegionOne.ec2.adminURL = http://%SERVICE_HOST%:8773/services/Admin +catalog.RegionOne.ec2.internalURL = http://%SERVICE_HOST%:8773/services/Cloud +catalog.RegionOne.ec2.name = 'EC2 Service' + + +catalog.RegionOne.image.publicURL = http://%SERVICE_HOST%:9292/v1 +catalog.RegionOne.image.adminURL = http://%SERVICE_HOST%:9292/v1 +catalog.RegionOne.image.internalURL = http://%SERVICE_HOST%:9292/v1 +catalog.RegionOne.image.name = 'Image Service' + + +catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_$(tenant_id)s +catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:8080/ +catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:8080/v1/AUTH_$(tenant_id)s +catalog.RegionOne.object_store.name = 'Swift Service' diff --git a/files/keystone.conf b/files/keystone.conf index 6d0fd7e1..0649e907 100644 --- a/files/keystone.conf +++ b/files/keystone.conf @@ -1,112 +1,61 @@ [DEFAULT] -# Show more verbose log output (sets INFO log level output) -verbose = False - -# Show debugging output in logs (sets DEBUG log level output) -debug = False - -# Which backend store should Keystone use by default. -# Default: 'sqlite' -# Available choices are 'sqlite' [future will include LDAP, PAM, etc] -default_store = sqlite - -# Log to this file. Make sure you do not set the same log -# file for both the API and registry servers! -log_file = %DEST%/keystone/keystone.log - -# List of backends to be configured -backends = keystone.backends.sqlalchemy -#For LDAP support, add: ,keystone.backends.ldap - -# Dictionary Maps every service to a header.Missing services would get header -# X_(SERVICE_NAME) Key => Service Name, Value => Header Name -service-header-mappings = { - 'nova' : 'X-Server-Management-Url', - 'swift' : 'X-Storage-Url', - 'cdn' : 'X-CDN-Management-Url'} - -#List of extensions currently supported -extensions= osksadm,oskscatalog - -# Address to bind the API server -# TODO Properties defined within app not available via pipeline. -service_host = 0.0.0.0 - -# Port the bind the API server to -service_port = 5000 - -# SSL for API server -service_ssl = False - -# Address to bind the Admin API server -admin_host = 0.0.0.0 - -# Port the bind the Admin API server to +public_port = 5000 admin_port = 35357 +admin_token = %SERVICE_TOKEN% +log_file = %DEST%/keystone/keystone.log -# SSL for API Admin server -admin_ssl = False +[sql] +connection = %SQL_CONN% +idle_timeout = 30 +min_pool_size = 5 +max_pool_size = 10 +pool_timeout = 200 -# Keystone certificate file (modify as needed) -# Only required if *_ssl is set to True -certfile = /etc/keystone/ssl/certs/keystone.pem +[identity] +driver = keystone.backends.sql.SqlIdentity -# Keystone private key file (modify as needed) -# Only required if *_ssl is set to True -keyfile = /etc/keystone/ssl/private/keystonekey.pem +[catalog] +driver = keystone.backends.templated.TemplatedCatalog +template_file = ./etc/default_catalog.templates -# Keystone trusted CA certificates (modify as needed) -# Only required if *_ssl is set to True -ca_certs = /etc/keystone/ssl/certs/ca.pem +[token] +driver = keystone.backends.kvs.KvsToken -# Client certificate required -# Only relevant if *_ssl is set to True -cert_required = True +[policy] +driver = keystone.backends.policy.SimpleMatch -#Role that allows to perform admin operations. -keystone-admin-role = admin +[filter:debug] +paste.filter_factory = keystone.wsgi:Debug.factory -#Role that allows to perform service admin operations. -keystone-service-admin-role = KeystoneServiceAdmin +[filter:token_auth] +paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory -#Tells whether password user need to be hashed in the backend -hash-password = True +[filter:admin_token_auth] +paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory -[keystone.backends.sqlalchemy] -# SQLAlchemy connection string for the reference implementation registry -# server. Any valid SQLAlchemy connection string is fine. -# See: http://bit.ly/ideIpI -sql_connection = %SQL_CONN% -backend_entities = ['UserRoleAssociation', 'Endpoints', 'Role', 'Tenant', - 'User', 'Credentials', 'EndpointTemplates', 'Token', - 'Service'] +[filter:json_body] +paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory -# Period in seconds after which SQLAlchemy should reestablish its connection -# to the database. -sql_idle_timeout = 30 +[filter:crud_extension] +paste.filter_factory = keystone.service:AdminCrudExtension.factory -[pipeline:admin] -pipeline = - urlrewritefilter - admin_api -[pipeline:keystone-legacy-auth] -pipeline = - urlrewritefilter - legacy_auth - service_api +[app:public_service] +paste.app_factory = keystone.service:public_app_factory -[app:service_api] -paste.app_factory = keystone.server:service_app_factory +[app:admin_service] +paste.app_factory = keystone.service:admin_app_factory -[app:admin_api] -paste.app_factory = keystone.server:admin_app_factory +[pipeline:public_api] +pipeline = token_auth admin_token_auth json_body debug public_service -[filter:urlrewritefilter] -paste.filter_factory = keystone.middleware.url:filter_factory +[pipeline:admin_api] +pipeline = token_auth admin_token_auth json_body debug crud_extension admin_service -[filter:legacy_auth] -paste.filter_factory = keystone.frontends.legacy_token_auth:filter_factory +[composite:main] +use = egg:Paste#urlmap +/v2.0 = public_api -[filter:debug] -paste.filter_factory = keystone.common.wsgi:debug_filter_factory +[composite:admin] +use = egg:Paste#urlmap +/v2.0 = admin_api diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 77f6b933..8ec529a5 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -1,54 +1,138 @@ #!/bin/bash BIN_DIR=${BIN_DIR:-.} # Tenants -$BIN_DIR/keystone-manage tenant add admin -$BIN_DIR/keystone-manage tenant add demo -$BIN_DIR/keystone-manage tenant add invisible_to_admin +ADMIN_TENANT=`$BIN_DIR/keystone-manage tenant --ks-id-only + create \ + tenant_name=admin` +DEMO_TENANT=`$BIN_DIR/keystone-manage tenant --ks-id-only create \ + tenant_name=demo` +INVIS_TENANT=`$BIN_DIR/keystone-manage tenant --ks-id-only create \ + tenant_name=invisible_to_admin` + # Users -$BIN_DIR/keystone-manage user add admin %ADMIN_PASSWORD% -$BIN_DIR/keystone-manage user add demo %ADMIN_PASSWORD% +ADMIN_USER=`$BIN_DIR/keystone-manage user --ks-id-only create \ + name=admin \ + "password=%ADMIN_PASSWORD%" \ + email=admin@example.com` +DEMO_USER=`$BIN_DIR/keystone-manage user --ks-id-only create \ + name=demo \ + "password=%ADMIN_PASSWORD%" \ + email=demo@example.com` # Roles -$BIN_DIR/keystone-manage role add admin -$BIN_DIR/keystone-manage role add Member -$BIN_DIR/keystone-manage role add KeystoneAdmin -$BIN_DIR/keystone-manage role add KeystoneServiceAdmin -$BIN_DIR/keystone-manage role add sysadmin -$BIN_DIR/keystone-manage role add netadmin -$BIN_DIR/keystone-manage role grant admin admin admin -$BIN_DIR/keystone-manage role grant Member demo demo -$BIN_DIR/keystone-manage role grant sysadmin demo demo -$BIN_DIR/keystone-manage role grant netadmin demo demo -$BIN_DIR/keystone-manage role grant Member demo invisible_to_admin -$BIN_DIR/keystone-manage role grant admin admin demo -$BIN_DIR/keystone-manage role grant admin admin -$BIN_DIR/keystone-manage role grant KeystoneAdmin admin -$BIN_DIR/keystone-manage role grant KeystoneServiceAdmin admin +ADMIN_ROLE=`$BIN_DIR/keystone-manage role --ks-id-only create \ + name=Admin` +MEMBER_ROLE=`$BIN_DIR/keystone-manage role --ks-id-only create \ + name=Member` +KEYSTONEADMIN_ROLE=`$BIN_DIR/keystone-manage role --ks-id-only create \ + name=KeystoneAdmin` +KEYSTONESERVICE_ROLE=`$BIN_DIR/keystone-manage role --ks-id-only create \ + name=KeystoneServiceAdmin` +SYSADMIN_ROLE=`$BIN_DIR/keystone-manage role --ks-id-only create \ + name=sysadmin` +NETADMIN_ROLE=`$BIN_DIR/keystone-manage role --ks-id-only create \ + name=netadmin` + + +# Add Roles to Users in Tenants + +$BIN_DIR/keystone-manage role add_user_to_tenant \ + role_id=$ADMIN_ROLE \ + user_id=$ADMIN_USER \ + tenant_id=$ADMIN_TENANT +$BIN_DIR/keystone-manage role add_user_to_tenant \ + role_id=$MEMBER_ROLE \ + user_id=$DEMO_USER \ + tenant_id=$DEMO_TENANT +$BIN_DIR/keystone-manage role add_user_to_tenant \ + role_id=$SYSADMIN_ROLE \ + user_id=$DEMO_USER \ + tenant_id=$DEMO_TENANT +$BIN_DIR/keystone-manage role add_user_to_tenant \ + role_id=$NETADMIN_ROLE \ + user_id=$DEMO_USER \ + tenant_id=$DEMO_TENANT +$BIN_DIR/keystone-manage role add_user_to_tenant \ + role_id=$MEMBER_ROLE \ + user_id=$DEMO_USER \ + tenant_id=$INVIS_TENANT +$BIN_DIR/keystone-manage role add_user_to_tenant \ + role_id=$ADMIN_ROLE \ + user_id=$ADMIN_USER \ + tenant_id=$DEMO_TENANT + +# TODO(termie): these two might be dubious +$BIN_DIR/keystone-manage role add_user_to_tenant \ + role_id=$KEYSTONEADMIN_ROLE \ + user_id=$ADMIN_USER \ + tenant_id=$ADMIN_TENANT +$BIN_DIR/keystone-manage role add_user_to_tenant \ + role_id=$KEYSTONESERVICE_ROLE \ + user_id=$ADMIN_USER \ + tenant_id=$ADMIN_TENANT # Services -$BIN_DIR/keystone-manage service add nova compute "Nova Compute Service" -$BIN_DIR/keystone-manage service add ec2 ec2 "EC2 Compatability Layer" -$BIN_DIR/keystone-manage service add glance image "Glance Image Service" -$BIN_DIR/keystone-manage service add keystone identity "Keystone Identity Service" +$BIN_DIR/keystone-manage service create \ + name=nova \ + service_type=compute \ + "description=Nova Compute Service" + +$BIN_DIR/keystone-manage service create \ + name=ec2 \ + service_type=ec2 \ + "description=EC2 Compatibility Layer" + +$BIN_DIR/keystone-manage service create \ + name=glance \ + service_type=image \ + "description=Glance Image Service" + +$BIN_DIR/keystone-manage service create \ + name=keystone \ + service_type=identity \ + "description=Keystone Identity Service" if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then - $BIN_DIR/keystone-manage service add swift object-store "Swift Service" + $BIN_DIR/keystone-manage service create \ + name=swift \ + service_type=object-store \ + "description=Swift Service" fi #endpointTemplates -$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne nova http://%SERVICE_HOST%:8774/v1.1/%tenant_id% http://%SERVICE_HOST%:8774/v1.1/%tenant_id% http://%SERVICE_HOST%:8774/v1.1/%tenant_id% 1 1 -$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne ec2 http://%SERVICE_HOST%:8773/services/Cloud http://%SERVICE_HOST%:8773/services/Admin http://%SERVICE_HOST%:8773/services/Cloud 1 1 -$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne glance http://%SERVICE_HOST%:9292/v1 http://%SERVICE_HOST%:9292/v1 http://%SERVICE_HOST%:9292/v1 1 1 -$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne keystone %KEYSTONE_SERVICE_PROTOCOL%://%KEYSTONE_SERVICE_HOST%:%KEYSTONE_SERVICE_PORT%/v2.0 %KEYSTONE_AUTH_PROTOCOL%://%KEYSTONE_AUTH_HOST%:%KEYSTONE_AUTH_PORT%/v2.0 %KEYSTONE_SERVICE_PROTOCOL%://%KEYSTONE_SERVICE_HOST%:%KEYSTONE_SERVICE_PORT%/v2.0 1 1 +$BIN_DIR/keystone-manage $* endpointTemplates add \ + RegionOne nova + http://%SERVICE_HOST%:8774/v1.1/%tenant_id% + http://%SERVICE_HOST%:8774/v1.1/%tenant_id% + http://%SERVICE_HOST%:8774/v1.1/%tenant_id% 1 1 +$BIN_DIR/keystone-manage $* endpointTemplates add + RegionOne ec2 + http://%SERVICE_HOST%:8773/services/Cloud + http://%SERVICE_HOST%:8773/services/Admin + http://%SERVICE_HOST%:8773/services/Cloud 1 1 +$BIN_DIR/keystone-manage $* endpointTemplates add + RegionOne glance + http://%SERVICE_HOST%:9292/v1 + http://%SERVICE_HOST%:9292/v1 + http://%SERVICE_HOST%:9292/v1 1 1 +$BIN_DIR/keystone-manage $* endpointTemplates add + RegionOne keystone + http://%SERVICE_HOST%:5000/v2.0 + http://%SERVICE_HOST%:35357/v2.0 + http://%SERVICE_HOST%:5000/v2.0 1 1 if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then - $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne swift http://%SERVICE_HOST%:8080/v1/AUTH_%tenant_id% http://%SERVICE_HOST%:8080/ http://%SERVICE_HOST%:8080/v1/AUTH_%tenant_id% 1 1 + $BIN_DIR/keystone-manage $* endpointTemplates add + RegionOne swift + http://%SERVICE_HOST%:8080/v1/AUTH_%tenant_id% + http://%SERVICE_HOST%:8080/ + http://%SERVICE_HOST%:8080/v1/AUTH_%tenant_id% 1 1 fi # Tokens -$BIN_DIR/keystone-manage token add %SERVICE_TOKEN% admin admin 2015-02-05T00:00 +#$BIN_DIR/keystone-manage token add %SERVICE_TOKEN% admin admin 2015-02-05T00:00 # EC2 related creds - note we are setting the secret key to ADMIN_PASSWORD # but keystone doesn't parse them - it is just a blob from keystone's # point of view -$BIN_DIR/keystone-manage credentials add admin EC2 'admin' '%ADMIN_PASSWORD%' admin || echo "no support for adding credentials" -$BIN_DIR/keystone-manage credentials add demo EC2 'demo' '%ADMIN_PASSWORD%' demo || echo "no support for adding credentials" +#$BIN_DIR/keystone-manage credentials add admin EC2 'admin' '%ADMIN_PASSWORD%' admin || echo "no support for adding credentials" +#$BIN_DIR/keystone-manage credentials add demo EC2 'demo' '%ADMIN_PASSWORD%' demo || echo "no support for adding credentials" diff --git a/stack.sh b/stack.sh index 18d1695c..d54d7711 100755 --- a/stack.sh +++ b/stack.sh @@ -1310,6 +1310,11 @@ if [[ "$ENABLED_SERVICES" =~ "key" ]]; then cp $FILES/keystone.conf $KEYSTONE_CONF sudo sed -e "s,%SQL_CONN%,$BASE_SQL_CONN/keystone,g" -i $KEYSTONE_CONF sudo sed -e "s,%DEST%,$DEST,g" -i $KEYSTONE_CONF + sudo sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $KEYSTONE_CONF + + KEYSTONE_CATALOG=$KEYSTONE_DIR/etc/default_catalog.template + cp $FILES/default_catalog.template $KEYSTONE_CATALOG + sudo sed -e "s,%SERVICE_HOST%,$SERVICE_HOST,g" -i $KEYSTONE_CATALOG # keystone_data.sh creates our admin user and our ``SERVICE_TOKEN``. KEYSTONE_DATA=$KEYSTONE_DIR/bin/keystone_data.sh diff --git a/stackrc b/stackrc index e87b3cde..c9acdbee 100644 --- a/stackrc +++ b/stackrc @@ -15,7 +15,7 @@ GLANCE_REPO=https://github.com/openstack/glance.git GLANCE_BRANCH=master # unified auth system (manages accounts/tokens) -KEYSTONE_REPO=https://github.com/openstack/keystone.git +KEYSTONE_REPO=https://github.com/termie/keystonelight.git KEYSTONE_BRANCH=master # a websockets/html5 or flash powered VNC console for vm instances From 82aa41a2bd92d693309a325492df2e4ae3ef88a2 Mon Sep 17 00:00:00 2001 From: termie Date: Mon, 9 Jan 2012 22:15:40 -0800 Subject: [PATCH 304/967] oh, comment that out --- files/keystone_data.sh | 54 +++++++++++++++++++++--------------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 8ec529a5..edc89258 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -100,33 +100,33 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then fi #endpointTemplates -$BIN_DIR/keystone-manage $* endpointTemplates add \ - RegionOne nova - http://%SERVICE_HOST%:8774/v1.1/%tenant_id% - http://%SERVICE_HOST%:8774/v1.1/%tenant_id% - http://%SERVICE_HOST%:8774/v1.1/%tenant_id% 1 1 -$BIN_DIR/keystone-manage $* endpointTemplates add - RegionOne ec2 - http://%SERVICE_HOST%:8773/services/Cloud - http://%SERVICE_HOST%:8773/services/Admin - http://%SERVICE_HOST%:8773/services/Cloud 1 1 -$BIN_DIR/keystone-manage $* endpointTemplates add - RegionOne glance - http://%SERVICE_HOST%:9292/v1 - http://%SERVICE_HOST%:9292/v1 - http://%SERVICE_HOST%:9292/v1 1 1 -$BIN_DIR/keystone-manage $* endpointTemplates add - RegionOne keystone - http://%SERVICE_HOST%:5000/v2.0 - http://%SERVICE_HOST%:35357/v2.0 - http://%SERVICE_HOST%:5000/v2.0 1 1 -if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then - $BIN_DIR/keystone-manage $* endpointTemplates add - RegionOne swift - http://%SERVICE_HOST%:8080/v1/AUTH_%tenant_id% - http://%SERVICE_HOST%:8080/ - http://%SERVICE_HOST%:8080/v1/AUTH_%tenant_id% 1 1 -fi +#$BIN_DIR/keystone-manage $* endpointTemplates add \ +# RegionOne nova +# http://%SERVICE_HOST%:8774/v1.1/%tenant_id% +# http://%SERVICE_HOST%:8774/v1.1/%tenant_id% +# http://%SERVICE_HOST%:8774/v1.1/%tenant_id% 1 1 +#$BIN_DIR/keystone-manage $* endpointTemplates add +# RegionOne ec2 +# http://%SERVICE_HOST%:8773/services/Cloud +# http://%SERVICE_HOST%:8773/services/Admin +# http://%SERVICE_HOST%:8773/services/Cloud 1 1 +#$BIN_DIR/keystone-manage $* endpointTemplates add +# RegionOne glance +# http://%SERVICE_HOST%:9292/v1 +# http://%SERVICE_HOST%:9292/v1 +# http://%SERVICE_HOST%:9292/v1 1 1 +#$BIN_DIR/keystone-manage $* endpointTemplates add +# RegionOne keystone +# http://%SERVICE_HOST%:5000/v2.0 +# http://%SERVICE_HOST%:35357/v2.0 +# http://%SERVICE_HOST%:5000/v2.0 1 1 +#if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then +# $BIN_DIR/keystone-manage $* endpointTemplates add +# RegionOne swift +# http://%SERVICE_HOST%:8080/v1/AUTH_%tenant_id% +# http://%SERVICE_HOST%:8080/ +# http://%SERVICE_HOST%:8080/v1/AUTH_%tenant_id% 1 1 +#fi # Tokens #$BIN_DIR/keystone-manage token add %SERVICE_TOKEN% admin admin 2015-02-05T00:00 From eacc5953d26a8d1c4b0db4fb4a9cd9b6e201e10b Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 11 Jan 2012 01:59:00 +0000 Subject: [PATCH 305/967] bunch of fixes --- files/keystone_data.sh | 3 +- files/pips/keystone | 1 + stack.sh | 104 ++++++++++++++++++++--------------------- 3 files changed, 52 insertions(+), 56 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index edc89258..4738e8d6 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -1,8 +1,7 @@ #!/bin/bash BIN_DIR=${BIN_DIR:-.} # Tenants -ADMIN_TENANT=`$BIN_DIR/keystone-manage tenant --ks-id-only - create \ +ADMIN_TENANT=`$BIN_DIR/keystone-manage tenant --ks-id-only create \ tenant_name=admin` DEMO_TENANT=`$BIN_DIR/keystone-manage tenant --ks-id-only create \ tenant_name=demo` diff --git a/files/pips/keystone b/files/pips/keystone index 09636e49..fef9f8b0 100644 --- a/files/pips/keystone +++ b/files/pips/keystone @@ -1 +1,2 @@ PassLib +pycli diff --git a/stack.sh b/stack.sh index d54d7711..6ce86706 100755 --- a/stack.sh +++ b/stack.sh @@ -1297,56 +1297,6 @@ if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then fi -# Keystone -# -------- - -if [[ "$ENABLED_SERVICES" =~ "key" ]]; then - # (re)create keystone database - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS keystone;' - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE keystone;' - - # Configure keystone.conf - KEYSTONE_CONF=$KEYSTONE_DIR/etc/keystone.conf - cp $FILES/keystone.conf $KEYSTONE_CONF - sudo sed -e "s,%SQL_CONN%,$BASE_SQL_CONN/keystone,g" -i $KEYSTONE_CONF - sudo sed -e "s,%DEST%,$DEST,g" -i $KEYSTONE_CONF - sudo sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $KEYSTONE_CONF - - KEYSTONE_CATALOG=$KEYSTONE_DIR/etc/default_catalog.template - cp $FILES/default_catalog.template $KEYSTONE_CATALOG - sudo sed -e "s,%SERVICE_HOST%,$SERVICE_HOST,g" -i $KEYSTONE_CATALOG - - # keystone_data.sh creates our admin user and our ``SERVICE_TOKEN``. - KEYSTONE_DATA=$KEYSTONE_DIR/bin/keystone_data.sh - cp $FILES/keystone_data.sh $KEYSTONE_DATA - sudo sed -e " - s,%KEYSTONE_AUTH_HOST%,$KEYSTONE_AUTH_HOST,g; - s,%KEYSTONE_AUTH_PORT%,$KEYSTONE_AUTH_PORT,g; - s,%KEYSTONE_AUTH_PROTOCOL%,$KEYSTONE_AUTH_PROTOCOL,g; - s,%KEYSTONE_SERVICE_HOST%,$KEYSTONE_SERVICE_HOST,g; - s,%KEYSTONE_SERVICE_PORT%,$KEYSTONE_SERVICE_PORT,g; - s,%KEYSTONE_SERVICE_PROTOCOL%,$KEYSTONE_SERVICE_PROTOCOL,g; - s,%SERVICE_HOST%,$SERVICE_HOST,g; - s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g; - s,%ADMIN_PASSWORD%,$ADMIN_PASSWORD,g; - " -i $KEYSTONE_DATA - - # Prepare up the database - $KEYSTONE_DIR/bin/keystone-manage sync_database - - # initialize keystone with default users/endpoints - ENABLED_SERVICES=$ENABLED_SERVICES BIN_DIR=$KEYSTONE_DIR/bin bash $KEYSTONE_DATA - - if [ "$SYSLOG" != "False" ]; then - sed -i -e '/^handlers=devel$/s/=devel/=production/' \ - $KEYSTONE_DIR/etc/logging.cnf - sed -i -e "/^log_file/s/log_file/\#log_file/" \ - $KEYSTONE_DIR/etc/keystone.conf - KEYSTONE_LOG_CONFIG="--log-config $KEYSTONE_DIR/etc/logging.cnf" - fi -fi - - # Launch Services # =============== @@ -1362,18 +1312,18 @@ function screen_it { tmux new-window -t stack -a -n "$1" "bash" tmux send-keys "$2" C-M else - screen -S stack -X screen -t $1 + screen -L -S stack -X screen -t $1 # sleep to allow bash to be ready to be send the command - we are # creating a new window in screen and then sends characters, so if # bash isn't running by the time we send the command, nothing happens sleep 1.5 - screen -S stack -p $1 -X stuff "$2$NL" + screen -L -S stack -p $1 -X stuff "$2$NL" fi fi } # create a new named screen to run processes in -screen -d -m -S stack -t stack +screen -L -d -m -S stack -t stack sleep 1 # set a reasonable statusbar screen -r stack -X hardstatus alwayslastline "%-Lw%{= BW}%50>%n%f* %t%{-}%+Lw%< %= %H" @@ -1393,16 +1343,62 @@ if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then fi fi +if [[ "$ENABLED_SERVICES" =~ "key" ]]; then + # (re)create keystone database + mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS keystone;' + mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE keystone;' + + # Configure keystone.conf + KEYSTONE_CONF=$KEYSTONE_DIR/etc/keystone.conf + cp $FILES/keystone.conf $KEYSTONE_CONF + sudo sed -e "s,%SQL_CONN%,$BASE_SQL_CONN/keystone,g" -i $KEYSTONE_CONF + sudo sed -e "s,%DEST%,$DEST,g" -i $KEYSTONE_CONF + sudo sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $KEYSTONE_CONF + + KEYSTONE_CATALOG=$KEYSTONE_DIR/etc/default_catalog.templates + cp $FILES/default_catalog.templates $KEYSTONE_CATALOG + sudo sed -e "s,%SERVICE_HOST%,$SERVICE_HOST,g" -i $KEYSTONE_CATALOG + + + if [ "$SYSLOG" != "False" ]; then + sed -i -e '/^handlers=devel$/s/=devel/=production/' \ + $KEYSTONE_DIR/etc/logging.cnf + sed -i -e "/^log_file/s/log_file/\#log_file/" \ + $KEYSTONE_DIR/etc/keystone.conf + KEYSTONE_LOG_CONFIG="--log-config $KEYSTONE_DIR/etc/logging.cnf" + fi +fi + # launch the keystone and wait for it to answer before continuing if [[ "$ENABLED_SERVICES" =~ "key" ]]; then screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d" echo "Waiting for keystone to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT; do sleep 1; done"; then + if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/; do sleep 1; done"; then echo "keystone did not start" exit 1 fi + + # keystone_data.sh creates our admin user and our ``SERVICE_TOKEN``. + KEYSTONE_DATA=$KEYSTONE_DIR/bin/keystone_data.sh + cp $FILES/keystone_data.sh $KEYSTONE_DATA + sudo sed -e " + s,%KEYSTONE_AUTH_HOST%,$KEYSTONE_AUTH_HOST,g; + s,%KEYSTONE_AUTH_PORT%,$KEYSTONE_AUTH_PORT,g; + s,%KEYSTONE_AUTH_PROTOCOL%,$KEYSTONE_AUTH_PROTOCOL,g; + s,%KEYSTONE_SERVICE_HOST%,$KEYSTONE_SERVICE_HOST,g; + s,%KEYSTONE_SERVICE_PORT%,$KEYSTONE_SERVICE_PORT,g; + s,%KEYSTONE_SERVICE_PROTOCOL%,$KEYSTONE_SERVICE_PROTOCOL,g; + s,%SERVICE_HOST%,$SERVICE_HOST,g; + s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g; + s,%ADMIN_PASSWORD%,$ADMIN_PASSWORD,g; + " -i $KEYSTONE_DATA + + # initialize keystone with default users/endpoints + $KEYSTONE_DIR/bin/keystone-manage db_sync + ENABLED_SERVICES=$ENABLED_SERVICES BIN_DIR=$KEYSTONE_DIR/bin bash $KEYSTONE_DATA fi + # launch the nova-api and wait for it to answer before continuing if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then screen_it n-api "cd $NOVA_DIR && $NOVA_DIR/bin/nova-api" From 50edca6dd4fa226643e0f714c73c145845d3a6af Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 11 Jan 2012 02:04:39 +0000 Subject: [PATCH 306/967] proper conf --- files/keystone.conf | 2 +- stack.sh | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/files/keystone.conf b/files/keystone.conf index 0649e907..1f5c4453 100644 --- a/files/keystone.conf +++ b/files/keystone.conf @@ -16,7 +16,7 @@ driver = keystone.backends.sql.SqlIdentity [catalog] driver = keystone.backends.templated.TemplatedCatalog -template_file = ./etc/default_catalog.templates +template_file = %KEYSTONE_DIR%/etc/default_catalog.templates [token] driver = keystone.backends.kvs.KvsToken diff --git a/stack.sh b/stack.sh index 6ce86706..a342a8b5 100755 --- a/stack.sh +++ b/stack.sh @@ -1354,6 +1354,7 @@ if [[ "$ENABLED_SERVICES" =~ "key" ]]; then sudo sed -e "s,%SQL_CONN%,$BASE_SQL_CONN/keystone,g" -i $KEYSTONE_CONF sudo sed -e "s,%DEST%,$DEST,g" -i $KEYSTONE_CONF sudo sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $KEYSTONE_CONF + sudo sed -e "s,%KEYSTONE_DIR%,$KEYSTONE_DIR,g" -i $KEYSTONE_CONF KEYSTONE_CATALOG=$KEYSTONE_DIR/etc/default_catalog.templates cp $FILES/default_catalog.templates $KEYSTONE_CATALOG From 747ee33efd9c4142cc46c4e0eb4772f0ca3e149a Mon Sep 17 00:00:00 2001 From: termie Date: Wed, 11 Jan 2012 22:31:59 +0000 Subject: [PATCH 307/967] working now --- stack.sh | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/stack.sh b/stack.sh index a342a8b5..c7eb2aa2 100755 --- a/stack.sh +++ b/stack.sh @@ -1372,7 +1372,7 @@ fi # launch the keystone and wait for it to answer before continuing if [[ "$ENABLED_SERVICES" =~ "key" ]]; then - screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d" + screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug" echo "Waiting for keystone to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/; do sleep 1; done"; then echo "keystone did not start" @@ -1532,6 +1532,10 @@ if [[ "$ENABLED_SERVICES" =~ "g-reg" ]]; then # Create a directory for the downloaded image tarballs. mkdir -p $FILES/images + ADMIN_USER=admin + ADMIN_TENANT=admin + TOKEN=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$ADMIN_USER\", \"password\": \"$ADMIN_PASSWORD\"}, \"tenantName\": \"$ADMIN_TENANT\"}}" -H "Content-type: application/json" http://$HOST_IP:5000/v2.0/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"` + # Option to upload legacy ami-tty, which works with xenserver if [ $UPLOAD_LEGACY_TTY ]; then if [ ! -f $FILES/tty.tgz ]; then @@ -1539,11 +1543,11 @@ if [[ "$ENABLED_SERVICES" =~ "g-reg" ]]; then fi tar -zxf $FILES/tty.tgz -C $FILES/images - RVAL=`glance add -A $SERVICE_TOKEN name="tty-kernel" is_public=true container_format=aki disk_format=aki < $FILES/images/aki-tty/image` + RVAL=`glance add -A $TOKEN name="tty-kernel" is_public=true container_format=aki disk_format=aki < $FILES/images/aki-tty/image` KERNEL_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "` - RVAL=`glance add -A $SERVICE_TOKEN name="tty-ramdisk" is_public=true container_format=ari disk_format=ari < $FILES/images/ari-tty/image` + RVAL=`glance add -A $TOKEN name="tty-ramdisk" is_public=true container_format=ari disk_format=ari < $FILES/images/ari-tty/image` RAMDISK_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "` - glance add -A $SERVICE_TOKEN name="tty" is_public=true container_format=ami disk_format=ami kernel_id=$KERNEL_ID ramdisk_id=$RAMDISK_ID < $FILES/images/ami-tty/image + glance add -A $TOKEN name="tty" is_public=true container_format=ami disk_format=ami kernel_id=$KERNEL_ID ramdisk_id=$RAMDISK_ID < $FILES/images/ami-tty/image fi for image_url in ${IMAGE_URLS//,/ }; do @@ -1590,14 +1594,14 @@ if [[ "$ENABLED_SERVICES" =~ "g-reg" ]]; then # kernel for use when uploading the root filesystem. KERNEL_ID=""; RAMDISK_ID=""; if [ -n "$KERNEL" ]; then - RVAL=`glance add -A $SERVICE_TOKEN name="$IMAGE_NAME-kernel" is_public=true container_format=aki disk_format=aki < "$KERNEL"` + RVAL=`glance add -A $TOKEN name="$IMAGE_NAME-kernel" is_public=true container_format=aki disk_format=aki < "$KERNEL"` KERNEL_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "` fi if [ -n "$RAMDISK" ]; then - RVAL=`glance add -A $SERVICE_TOKEN name="$IMAGE_NAME-ramdisk" is_public=true container_format=ari disk_format=ari < "$RAMDISK"` + RVAL=`glance add -A $TOKEN name="$IMAGE_NAME-ramdisk" is_public=true container_format=ari disk_format=ari < "$RAMDISK"` RAMDISK_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "` fi - glance add -A $SERVICE_TOKEN name="${IMAGE_NAME%.img}" is_public=true container_format=ami disk_format=ami ${KERNEL_ID:+kernel_id=$KERNEL_ID} ${RAMDISK_ID:+ramdisk_id=$RAMDISK_ID} < <(zcat --force "${IMAGE}") + glance add -A $TOKEN name="${IMAGE_NAME%.img}" is_public=true container_format=ami disk_format=ami ${KERNEL_ID:+kernel_id=$KERNEL_ID} ${RAMDISK_ID:+ramdisk_id=$RAMDISK_ID} < <(zcat --force "${IMAGE}") done fi From 708a2ad10d1cab4a928b38c7455e0b853faae73e Mon Sep 17 00:00:00 2001 From: Joe Heck Date: Wed, 18 Jan 2012 10:19:15 -0800 Subject: [PATCH 308/967] logging tweaks for devstack/keystone and cd for keystone db_sync --- stack.sh | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index c7eb2aa2..33665e21 100755 --- a/stack.sh +++ b/stack.sh @@ -1362,11 +1362,12 @@ if [[ "$ENABLED_SERVICES" =~ "key" ]]; then if [ "$SYSLOG" != "False" ]; then + cp $KEYSTONE_DIR/etc/logging.conf.sample $KEYSTONE_DIR/etc/logging.conf sed -i -e '/^handlers=devel$/s/=devel/=production/' \ - $KEYSTONE_DIR/etc/logging.cnf + $KEYSTONE_DIR/etc/logging.conf sed -i -e "/^log_file/s/log_file/\#log_file/" \ $KEYSTONE_DIR/etc/keystone.conf - KEYSTONE_LOG_CONFIG="--log-config $KEYSTONE_DIR/etc/logging.cnf" + KEYSTONE_LOG_CONFIG="--log-config $KEYSTONE_DIR/etc/logging.conf" fi fi @@ -1395,8 +1396,10 @@ if [[ "$ENABLED_SERVICES" =~ "key" ]]; then " -i $KEYSTONE_DATA # initialize keystone with default users/endpoints + pushd $KEYSTONE_DIR $KEYSTONE_DIR/bin/keystone-manage db_sync ENABLED_SERVICES=$ENABLED_SERVICES BIN_DIR=$KEYSTONE_DIR/bin bash $KEYSTONE_DATA + popd fi From 950bb4555aad563fc91ba2d09382fd5a0d66f03f Mon Sep 17 00:00:00 2001 From: Joe Heck Date: Sun, 29 Jan 2012 12:54:57 -0800 Subject: [PATCH 309/967] updating to match master ksl CLI options --- files/keystone_data.sh | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 4738e8d6..d6ddcb34 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -1,36 +1,36 @@ #!/bin/bash BIN_DIR=${BIN_DIR:-.} # Tenants -ADMIN_TENANT=`$BIN_DIR/keystone-manage tenant --ks-id-only create \ +ADMIN_TENANT=`$BIN_DIR/keystone-manage tenant --id-only create \ tenant_name=admin` -DEMO_TENANT=`$BIN_DIR/keystone-manage tenant --ks-id-only create \ +DEMO_TENANT=`$BIN_DIR/keystone-manage tenant --id-only create \ tenant_name=demo` -INVIS_TENANT=`$BIN_DIR/keystone-manage tenant --ks-id-only create \ +INVIS_TENANT=`$BIN_DIR/keystone-manage tenant --id-only create \ tenant_name=invisible_to_admin` # Users -ADMIN_USER=`$BIN_DIR/keystone-manage user --ks-id-only create \ +ADMIN_USER=`$BIN_DIR/keystone-manage user --id-only create \ name=admin \ "password=%ADMIN_PASSWORD%" \ email=admin@example.com` -DEMO_USER=`$BIN_DIR/keystone-manage user --ks-id-only create \ +DEMO_USER=`$BIN_DIR/keystone-manage user --id-only create \ name=demo \ "password=%ADMIN_PASSWORD%" \ email=demo@example.com` # Roles -ADMIN_ROLE=`$BIN_DIR/keystone-manage role --ks-id-only create \ +ADMIN_ROLE=`$BIN_DIR/keystone-manage role --id-only create \ name=Admin` -MEMBER_ROLE=`$BIN_DIR/keystone-manage role --ks-id-only create \ +MEMBER_ROLE=`$BIN_DIR/keystone-manage role --id-only create \ name=Member` -KEYSTONEADMIN_ROLE=`$BIN_DIR/keystone-manage role --ks-id-only create \ +KEYSTONEADMIN_ROLE=`$BIN_DIR/keystone-manage role --id-only create \ name=KeystoneAdmin` -KEYSTONESERVICE_ROLE=`$BIN_DIR/keystone-manage role --ks-id-only create \ +KEYSTONESERVICE_ROLE=`$BIN_DIR/keystone-manage role --id-only create \ name=KeystoneServiceAdmin` -SYSADMIN_ROLE=`$BIN_DIR/keystone-manage role --ks-id-only create \ +SYSADMIN_ROLE=`$BIN_DIR/keystone-manage role --id-only create \ name=sysadmin` -NETADMIN_ROLE=`$BIN_DIR/keystone-manage role --ks-id-only create \ +NETADMIN_ROLE=`$BIN_DIR/keystone-manage role --id-only create \ name=netadmin` From 3b15b0566435ba4a11fe948b4c8c74306406da34 Mon Sep 17 00:00:00 2001 From: termie Date: Thu, 2 Feb 2012 16:45:48 -0800 Subject: [PATCH 310/967] update keystone.conf --- files/keystone.conf | 34 +++++++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/files/keystone.conf b/files/keystone.conf index 1f5c4453..31620329 100644 --- a/files/keystone.conf +++ b/files/keystone.conf @@ -2,7 +2,18 @@ public_port = 5000 admin_port = 35357 admin_token = %SERVICE_TOKEN% -log_file = %DEST%/keystone/keystone.log +compute_port = 3000 +verbose = True +debug = True +log_config = %DEST%/keystone/keystone.log + +# ================= Syslog Options ============================ +# Send logs to syslog (/dev/log) instead of to file specified +# by `log-file` +use_syslog = False + +# Facility to use. If unset defaults to LOG_USER. +# syslog_log_facility = LOG_LOCAL0 [sql] connection = %SQL_CONN% @@ -12,20 +23,23 @@ max_pool_size = 10 pool_timeout = 200 [identity] -driver = keystone.backends.sql.SqlIdentity +driver = keystone.identity.backends.kvs.Identity [catalog] -driver = keystone.backends.templated.TemplatedCatalog +driver = keystone.catalog.backends.templated.TemplatedCatalog template_file = %KEYSTONE_DIR%/etc/default_catalog.templates [token] -driver = keystone.backends.kvs.KvsToken +driver = keystone.token.backends.kvs.Token [policy] -driver = keystone.backends.policy.SimpleMatch +driver = keystone.policy.backends.simple.SimpleMatch + +[ec2] +driver = keystone.contrib.ec2.backends.kvs.Ec2 [filter:debug] -paste.filter_factory = keystone.wsgi:Debug.factory +paste.filter_factory = keystone.common.wsgi:Debug.factory [filter:token_auth] paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory @@ -37,8 +51,10 @@ paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory [filter:crud_extension] -paste.filter_factory = keystone.service:AdminCrudExtension.factory +paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory +[filter:ec2_extension] +paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory [app:public_service] paste.app_factory = keystone.service:public_app_factory @@ -47,10 +63,10 @@ paste.app_factory = keystone.service:public_app_factory paste.app_factory = keystone.service:admin_app_factory [pipeline:public_api] -pipeline = token_auth admin_token_auth json_body debug public_service +pipeline = token_auth admin_token_auth json_body debug ec2_extension public_service [pipeline:admin_api] -pipeline = token_auth admin_token_auth json_body debug crud_extension admin_service +pipeline = token_auth admin_token_auth json_body debug ec2_extension crud_extension admin_service [composite:main] use = egg:Paste#urlmap From 221367c0a3f278b923f32881b585787a826ea9da Mon Sep 17 00:00:00 2001 From: termie Date: Thu, 2 Feb 2012 16:53:42 -0800 Subject: [PATCH 311/967] yup --- files/keystone.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/keystone.conf b/files/keystone.conf index 31620329..745abeea 100644 --- a/files/keystone.conf +++ b/files/keystone.conf @@ -5,7 +5,7 @@ admin_token = %SERVICE_TOKEN% compute_port = 3000 verbose = True debug = True -log_config = %DEST%/keystone/keystone.log +log_file = %DEST%/keystone/keystone.log # ================= Syslog Options ============================ # Send logs to syslog (/dev/log) instead of to file specified From 8a41c9dad483c1da89f8763e76d76a0c9a9984b4 Mon Sep 17 00:00:00 2001 From: termie Date: Thu, 2 Feb 2012 17:31:19 -0800 Subject: [PATCH 312/967] update to get keystone working --- files/apts/keystone | 2 +- files/keystone.conf | 2 +- files/keystone_data.sh | 64 +++++++++++++++++++++--------------------- stack.sh | 2 +- 4 files changed, 35 insertions(+), 35 deletions(-) diff --git a/files/apts/keystone b/files/apts/keystone index 6e6d3d53..94479c92 100644 --- a/files/apts/keystone +++ b/files/apts/keystone @@ -12,4 +12,4 @@ python-greenlet python-routes libldap2-dev libsasl2-dev - +python-bcrypt diff --git a/files/keystone.conf b/files/keystone.conf index 745abeea..1155d925 100644 --- a/files/keystone.conf +++ b/files/keystone.conf @@ -23,7 +23,7 @@ max_pool_size = 10 pool_timeout = 200 [identity] -driver = keystone.identity.backends.kvs.Identity +driver = keystone.identity.backends.sql.Identity [catalog] driver = keystone.catalog.backends.templated.TemplatedCatalog diff --git a/files/keystone_data.sh b/files/keystone_data.sh index d6ddcb34..75260841 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -36,40 +36,40 @@ NETADMIN_ROLE=`$BIN_DIR/keystone-manage role --id-only create \ # Add Roles to Users in Tenants -$BIN_DIR/keystone-manage role add_user_to_tenant \ - role_id=$ADMIN_ROLE \ - user_id=$ADMIN_USER \ - tenant_id=$ADMIN_TENANT -$BIN_DIR/keystone-manage role add_user_to_tenant \ - role_id=$MEMBER_ROLE \ - user_id=$DEMO_USER \ - tenant_id=$DEMO_TENANT -$BIN_DIR/keystone-manage role add_user_to_tenant \ - role_id=$SYSADMIN_ROLE \ - user_id=$DEMO_USER \ - tenant_id=$DEMO_TENANT -$BIN_DIR/keystone-manage role add_user_to_tenant \ - role_id=$NETADMIN_ROLE \ - user_id=$DEMO_USER \ - tenant_id=$DEMO_TENANT -$BIN_DIR/keystone-manage role add_user_to_tenant \ - role_id=$MEMBER_ROLE \ - user_id=$DEMO_USER \ - tenant_id=$INVIS_TENANT -$BIN_DIR/keystone-manage role add_user_to_tenant \ - role_id=$ADMIN_ROLE \ - user_id=$ADMIN_USER \ - tenant_id=$DEMO_TENANT +$BIN_DIR/keystone-manage role add_user_role \ + role=$ADMIN_ROLE \ + user=$ADMIN_USER \ + tenant=$ADMIN_TENANT +$BIN_DIR/keystone-manage role add_user_role \ + role=$MEMBER_ROLE \ + user=$DEMO_USER \ + tenant=$DEMO_TENANT +$BIN_DIR/keystone-manage role add_user_role \ + role=$SYSADMIN_ROLE \ + user=$DEMO_USER \ + tenant=$DEMO_TENANT +$BIN_DIR/keystone-manage role add_user_role \ + role=$NETADMIN_ROLE \ + user=$DEMO_USER \ + tenant=$DEMO_TENANT +$BIN_DIR/keystone-manage role add_user_role \ + role=$MEMBER_ROLE \ + user=$DEMO_USER \ + tenant=$INVIS_TENANT +$BIN_DIR/keystone-manage role add_user_role \ + role=$ADMIN_ROLE \ + user=$ADMIN_USER \ + tenant=$DEMO_TENANT # TODO(termie): these two might be dubious -$BIN_DIR/keystone-manage role add_user_to_tenant \ - role_id=$KEYSTONEADMIN_ROLE \ - user_id=$ADMIN_USER \ - tenant_id=$ADMIN_TENANT -$BIN_DIR/keystone-manage role add_user_to_tenant \ - role_id=$KEYSTONESERVICE_ROLE \ - user_id=$ADMIN_USER \ - tenant_id=$ADMIN_TENANT +$BIN_DIR/keystone-manage role add_user_role \ + role=$KEYSTONEADMIN_ROLE \ + user=$ADMIN_USER \ + tenant=$ADMIN_TENANT +$BIN_DIR/keystone-manage role add_user_role \ + role=$KEYSTONESERVICE_ROLE \ + user=$ADMIN_USER \ + tenant=$ADMIN_TENANT # Services $BIN_DIR/keystone-manage service create \ diff --git a/stack.sh b/stack.sh index 33665e21..2bac11cd 100755 --- a/stack.sh +++ b/stack.sh @@ -1373,7 +1373,7 @@ fi # launch the keystone and wait for it to answer before continuing if [[ "$ENABLED_SERVICES" =~ "key" ]]; then - screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug" + screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug" echo "Waiting for keystone to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/; do sleep 1; done"; then echo "keystone did not start" From 2e8bb8851fcb633edb0acbb00bd8b7b228638041 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 3 Feb 2012 11:23:32 -0600 Subject: [PATCH 313/967] Fixes bug 920762: change Admin to admin in swift/proxy-server.conf Change-Id: I6cb564ed2a6eaaf325a9573b4a6d372ae8a73c8d --- files/swift/proxy-server.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/swift/proxy-server.conf b/files/swift/proxy-server.conf index da6b1fa2..3ef02769 100644 --- a/files/swift/proxy-server.conf +++ b/files/swift/proxy-server.conf @@ -19,7 +19,7 @@ account_autocreate = true use = egg:swiftkeystone2#keystone2 keystone_admin_token = %SERVICE_TOKEN% keystone_url = http://localhost:35357/v2.0 -keystone_swift_operator_roles = Member,Admin +keystone_swift_operator_roles = Member,admin [filter:tempauth] use = egg:swift#tempauth From b0b6d315bd12f952d95c9dc02ca9f5e6c4e53a01 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 3 Feb 2012 21:40:32 -0800 Subject: [PATCH 314/967] Changes to get devstack working again with ksl + trunk Change-Id: I7261b2d88325d4f9cb3337478dbcbd04c25e52ed --- files/keystone.conf | 1 + files/keystone_data.sh | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/files/keystone.conf b/files/keystone.conf index 1155d925..ca8e31ea 100644 --- a/files/keystone.conf +++ b/files/keystone.conf @@ -5,6 +5,7 @@ admin_token = %SERVICE_TOKEN% compute_port = 3000 verbose = True debug = True +# should use stdout for devstack, but leaving this for now log_file = %DEST%/keystone/keystone.log # ================= Syslog Options ============================ diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 75260841..35eaa5dd 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -21,7 +21,7 @@ DEMO_USER=`$BIN_DIR/keystone-manage user --id-only create \ # Roles ADMIN_ROLE=`$BIN_DIR/keystone-manage role --id-only create \ - name=Admin` + name=admin` MEMBER_ROLE=`$BIN_DIR/keystone-manage role --id-only create \ name=Member` KEYSTONEADMIN_ROLE=`$BIN_DIR/keystone-manage role --id-only create \ From d281376de8059d0f29bb2df725c0924800ab06db Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 6 Feb 2012 21:21:52 +0000 Subject: [PATCH 315/967] fix logging and move keystone client earlier in the install chain --- files/keystone.conf | 4 ++-- stack.sh | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/files/keystone.conf b/files/keystone.conf index ca8e31ea..0ee08279 100644 --- a/files/keystone.conf +++ b/files/keystone.conf @@ -5,8 +5,8 @@ admin_token = %SERVICE_TOKEN% compute_port = 3000 verbose = True debug = True -# should use stdout for devstack, but leaving this for now -log_file = %DEST%/keystone/keystone.log +# commented out so devstack logs to stdout +# log_file = %DEST%/keystone/keystone.log # ================= Syslog Options ============================ # Send logs to syslog (/dev/log) instead of to file specified diff --git a/stack.sh b/stack.sh index 2bac11cd..cedf597f 100755 --- a/stack.sh +++ b/stack.sh @@ -653,6 +653,8 @@ fi # setup our checkouts so they are installed into python path # allowing ``import nova`` or ``import glance.client`` +cd $KEYSTONECLIENT_DIR; sudo python setup.py develop +cd $NOVACLIENT_DIR; sudo python setup.py develop if [[ "$ENABLED_SERVICES" =~ "key" || "$ENABLED_SERVICES" =~ "g-api" || "$ENABLED_SERVICES" =~ "n-api" || @@ -667,10 +669,8 @@ if [[ "$ENABLED_SERVICES" =~ "g-api" || "$ENABLED_SERVICES" =~ "n-api" ]]; then cd $GLANCE_DIR; sudo python setup.py develop fi -cd $NOVACLIENT_DIR; sudo python setup.py develop cd $NOVA_DIR; sudo python setup.py develop if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then - cd $KEYSTONECLIENT_DIR; sudo python setup.py develop cd $HORIZON_DIR/horizon; sudo python setup.py develop cd $HORIZON_DIR/openstack-dashboard; sudo python setup.py develop fi From 1188904eee9a1473b154850e5ae77a48a71ef9f4 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 12 Jan 2012 17:11:56 -0800 Subject: [PATCH 316/967] Separate out build_xva process so that it can be run on a machine besides dom0 Change-Id: I3a6e1ef874c83d79d09a8df1f086ec06d39db2df --- tools/xen/README.md | 13 ++- tools/xen/build_domU.sh | 200 +++------------------------------------- tools/xen/build_xva.sh | 164 ++++++++++++++++++++++++++++++++ tools/xen/xenrc | 38 ++++++++ 4 files changed, 227 insertions(+), 188 deletions(-) create mode 100755 tools/xen/build_xva.sh create mode 100644 tools/xen/xenrc diff --git a/tools/xen/README.md b/tools/xen/README.md index 63350ea7..a3398a78 100644 --- a/tools/xen/README.md +++ b/tools/xen/README.md @@ -54,7 +54,16 @@ Of course, use real passwords if this machine is exposed. ACTIVE_TIMEOUT=45 EOF -Step 4: Run ./build_domU.sh +Step 4: Run ./build_xva.sh +-------------------------- +This script prpares your nova xva image. This script can be run on a separate machine +and copied to dom0. If you run this on a different machine, copy the resulting xva +file to tools/xen/xvas/[GUEST_NAME].xva (by default tools/xen/xvas/ALLINONE.xva) + +It is likely that for XS6 you will need to build_xva.sh on a separate machine due +to dom0 space constraints. + +Step 5: Run ./build_domU.sh -------------------------- This script does a lot of stuff, it is probably best to read it in its entirety. But in a nutshell, it performs the following: @@ -63,7 +72,7 @@ But in a nutshell, it performs the following: * Creates and installs a OpenStack all-in-one domU in an HA-FlatDHCP configuration * A script to create a multi-domU (ie. head node separated from compute) configuration is coming soon! -Step 5: Do cloudy stuff! +Step 6: Do cloudy stuff! -------------------------- * Play with horizon * Play with the CLI diff --git a/tools/xen/build_domU.sh b/tools/xen/build_domU.sh index 642b40f7..cd28f155 100755 --- a/tools/xen/build_domU.sh +++ b/tools/xen/build_domU.sh @@ -10,42 +10,18 @@ fi # This directory TOP_DIR=$(cd $(dirname "$0") && pwd) -# Source params -cd ../.. && source ./stackrc && cd $TOP_DIR +# Source params - override xenrc params in your localrc to suite your taste +source xenrc # Echo commands set -o xtrace -# Name of this guest -GUEST_NAME=${GUEST_NAME:-ALLINONE} - -# dom0 ip -HOST_IP=${HOST_IP:-`ifconfig xenbr0 | grep "inet addr" | cut -d ":" -f2 | sed "s/ .*//"`} - -# Our nova host's network info -VM_IP=${VM_IP:-10.255.255.255} # A host-only ip that let's the interface come up, otherwise unused -MGT_IP=${MGT_IP:-172.16.100.55} -PUB_IP=${PUB_IP:-192.168.1.55} - -# Public network -PUB_BR=${PUB_BR:-xenbr0} -PUB_NETMASK=${PUB_NETMASK:-255.255.255.0} - -# VM network params -VM_NETMASK=${VM_NETMASK:-255.255.255.0} -VM_BR=${VM_BR:-xapi1} -VM_VLAN=${VM_VLAN:-100} - -# MGMT network params -MGT_NETMASK=${MGT_NETMASK:-255.255.255.0} -MGT_BR=${MGT_BR:-xapi2} -MGT_VLAN=${MGT_VLAN:-101} - -# VM Password -GUEST_PASSWORD=${GUEST_PASSWORD:-secrete} - -# Size of image -VDI_MB=${VDI_MB:-2500} +# Check for xva file +if [ ! -e $XVA ]; then + echo "Missing xva file. Please run build_xva.sh (ideally on a non dom0 host since the build can require lots of space)." + echo "Place the resulting xva file in $XVA" + exit 1 +fi # Make sure we have git if ! which git; then @@ -95,6 +71,9 @@ function create_vlan() { create_vlan $PIF $VM_VLAN $VM_NET create_vlan $PIF $MGT_VLAN $MGT_NET +# dom0 ip +HOST_IP=${HOST_IP:-`ifconfig xenbr0 | grep "inet addr" | cut -d ":" -f2 | sed "s/ .*//"`} + # Setup host-only nat rules HOST_NET=169.254.0.0/16 if ! iptables -L -v -t nat | grep -q $HOST_NET; then @@ -117,86 +96,9 @@ fi # Enable ip forwarding at runtime as well echo 1 > /proc/sys/net/ipv4/ip_forward -# Directory where we stage the build -STAGING_DIR=$TOP_DIR/stage - -# Option to clean out old stuff -CLEAN=${CLEAN:-0} -if [ "$CLEAN" = "1" ]; then - rm -rf $STAGING_DIR -fi - -# Download our base image. This image is made using prepare_guest.sh -BASE_IMAGE_URL=${BASE_IMAGE_URL:-http://images.ansolabs.com/xen/stage.tgz} -if [ ! -e $STAGING_DIR ]; then - if [ ! -e /tmp/stage.tgz ]; then - wget $BASE_IMAGE_URL -O /tmp/stage.tgz - fi - tar xfz /tmp/stage.tgz - cd $TOP_DIR -fi - -# Free up precious disk space -rm -f /tmp/stage.tgz - -# Make sure we have a stage -if [ ! -d $STAGING_DIR/etc ]; then - echo "Stage is not properly set up!" - exit 1 -fi - -# Directory where our conf files are stored -FILES_DIR=$TOP_DIR/files -TEMPLATES_DIR=$TOP_DIR/templates - -# Directory for supporting script files -SCRIPT_DIR=$TOP_DIR/scripts - -# Version of ubuntu with which we are working -UBUNTU_VERSION=`cat $STAGING_DIR/etc/lsb-release | grep "DISTRIB_CODENAME=" | sed "s/DISTRIB_CODENAME=//"` -KERNEL_VERSION=`ls $STAGING_DIR/boot/vmlinuz* | head -1 | sed "s/.*vmlinuz-//"` - -# Setup fake grub -rm -rf $STAGING_DIR/boot/grub/ -mkdir -p $STAGING_DIR/boot/grub/ -cp $TEMPLATES_DIR/menu.lst.in $STAGING_DIR/boot/grub/menu.lst -sed -e "s,@KERNEL_VERSION@,$KERNEL_VERSION,g" -i $STAGING_DIR/boot/grub/menu.lst - -# Setup fstab, tty, and other system stuff -cp $FILES_DIR/fstab $STAGING_DIR/etc/fstab -cp $FILES_DIR/hvc0.conf $STAGING_DIR/etc/init/ - -# Put the VPX into UTC. -rm -f $STAGING_DIR/etc/localtime - -# Configure dns (use same dns as dom0) -cp /etc/resolv.conf $STAGING_DIR/etc/resolv.conf - -# Copy over devstack -rm -f /tmp/devstack.tar -tar --exclude='stage' --exclude='xen/xvas' --exclude='xen/nova' -cvf /tmp/devstack.tar $TOP_DIR/../../../devstack -cd $STAGING_DIR/opt/stack/ -tar xf /tmp/devstack.tar -cd $TOP_DIR - -# Configure OVA -VDI_SIZE=$(($VDI_MB*1024*1024)) -PRODUCT_BRAND=${PRODUCT_BRAND:-openstack} -PRODUCT_VERSION=${PRODUCT_VERSION:-001} -BUILD_NUMBER=${BUILD_NUMBER:-001} -LABEL="$PRODUCT_BRAND $PRODUCT_VERSION-$BUILD_NUMBER" -OVA=$STAGING_DIR/tmp/ova.xml -cp $TEMPLATES_DIR/ova.xml.in $OVA -sed -e "s,@VDI_SIZE@,$VDI_SIZE,g" -i $OVA -sed -e "s,@PRODUCT_BRAND@,$PRODUCT_BRAND,g" -i $OVA -sed -e "s,@PRODUCT_VERSION@,$PRODUCT_VERSION,g" -i $OVA -sed -e "s,@BUILD_NUMBER@,$BUILD_NUMBER,g" -i $OVA - -# Directory for xvas -XVA_DIR=$TOP_DIR/xvas - -# Create xva dir -mkdir -p $XVA_DIR +# Set local storage il8n +SR_UUID=`xe sr-list --minimal name-label="Local storage"` +xe sr-param-set uuid=$SR_UUID other-config:i18n-key=local-storage # Clean nova if desired if [ "$CLEAN" = "1" ]; then @@ -210,24 +112,12 @@ if [ ! -d $TOP_DIR/nova ]; then git checkout $NOVA_BRANCH fi -# Run devstack on launch -cat <$STAGING_DIR/etc/rc.local -GUEST_PASSWORD=$GUEST_PASSWORD STAGING_DIR=/ DO_TGZ=0 bash /opt/stack/devstack/tools/xen/prepare_guest.sh -su -c "/opt/stack/run.sh > /opt/stack/run.sh.log" stack -exit 0 -EOF - # Install plugins cp -pr $TOP_DIR/nova/plugins/xenserver/xenapi/etc/xapi.d /etc/ chmod a+x /etc/xapi.d/plugins/* yum --enablerepo=base install -y parted mkdir -p /boot/guest -# Set local storage il8n -SR_UUID=`xe sr-list --minimal name-label="Local storage"` -xe sr-param-set uuid=$SR_UUID other-config:i18n-key=local-storage - - # Shutdown previous runs DO_SHUTDOWN=${DO_SHUTDOWN:-1} if [ "$DO_SHUTDOWN" = "1" ]; then @@ -248,68 +138,6 @@ if [ "$DO_SHUTDOWN" = "1" ]; then done fi -# Path to head xva. By default keep overwriting the same one to save space -USE_SEPARATE_XVAS=${USE_SEPARATE_XVAS:-0} -if [ "$USE_SEPARATE_XVAS" = "0" ]; then - XVA=$XVA_DIR/$UBUNTU_VERSION.xva -else - XVA=$XVA_DIR/$UBUNTU_VERSION.$GUEST_NAME.xva -fi - -# Clean old xva. In the future may not do this every time. -rm -f $XVA - -# Configure the hostname -echo $GUEST_NAME > $STAGING_DIR/etc/hostname - -# Hostname must resolve for rabbit -cat <$STAGING_DIR/etc/hosts -$MGT_IP $GUEST_NAME -127.0.0.1 localhost localhost.localdomain -EOF - -# Configure the network -INTERFACES=$STAGING_DIR/etc/network/interfaces -cp $TEMPLATES_DIR/interfaces.in $INTERFACES -sed -e "s,@ETH1_IP@,$VM_IP,g" -i $INTERFACES -sed -e "s,@ETH1_NETMASK@,$VM_NETMASK,g" -i $INTERFACES -sed -e "s,@ETH2_IP@,$MGT_IP,g" -i $INTERFACES -sed -e "s,@ETH2_NETMASK@,$MGT_NETMASK,g" -i $INTERFACES -sed -e "s,@ETH3_IP@,$PUB_IP,g" -i $INTERFACES -sed -e "s,@ETH3_NETMASK@,$PUB_NETMASK,g" -i $INTERFACES - -# Gracefully cp only if source file/dir exists -function cp_it { - if [ -e $1 ] || [ -d $1 ]; then - cp -pRL $1 $2 - fi -} - -# Copy over your ssh keys and env if desired -COPYENV=${COPYENV:-1} -if [ "$COPYENV" = "1" ]; then - cp_it ~/.ssh $STAGING_DIR/opt/stack/.ssh - cp_it ~/.ssh/id_rsa.pub $STAGING_DIR/opt/stack/.ssh/authorized_keys - cp_it ~/.gitconfig $STAGING_DIR/opt/stack/.gitconfig - cp_it ~/.vimrc $STAGING_DIR/opt/stack/.vimrc - cp_it ~/.bashrc $STAGING_DIR/opt/stack/.bashrc -fi - -# Configure run.sh -cat <$STAGING_DIR/opt/stack/run.sh -#!/bin/bash -cd /opt/stack/devstack -killall screen -UPLOAD_LEGACY_TTY=yes HOST_IP=$PUB_IP VIRT_DRIVER=xenserver FORCE=yes MULTI_HOST=1 $STACKSH_PARAMS ./stack.sh -EOF -chmod 755 $STAGING_DIR/opt/stack/run.sh - -# Create xva -if [ ! -e $XVA ]; then - rm -rf /tmp/mkxva* - UID=0 $SCRIPT_DIR/mkxva -o $XVA -t xva -x $OVA $STAGING_DIR $VDI_MB /tmp/ -fi - # Start guest $TOP_DIR/scripts/install-os-vpx.sh -f $XVA -v $VM_BR -m $MGT_BR -p $PUB_BR diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh new file mode 100755 index 00000000..e4de2a1a --- /dev/null +++ b/tools/xen/build_xva.sh @@ -0,0 +1,164 @@ +#!/bin/bash + +# Abort if localrc is not set +if [ ! -e ../../localrc ]; then + echo "You must have a localrc with ALL necessary passwords defined before proceeding." + echo "See the xen README for required passwords." + exit 1 +fi + +# This directory +TOP_DIR=$(cd $(dirname "$0") && pwd) + +# Source params - override xenrc params in your localrc to suite your taste +source xenrc + +# Echo commands +set -o xtrace + +# Directory where we stage the build +STAGING_DIR=$TOP_DIR/stage + +# Option to clean out old stuff +CLEAN=${CLEAN:-0} +if [ "$CLEAN" = "1" ]; then + rm -rf $STAGING_DIR +fi + +# Download our base image. This image is made using prepare_guest.sh +BASE_IMAGE_URL=${BASE_IMAGE_URL:-http://images.ansolabs.com/xen/stage.tgz} +if [ ! -e $STAGING_DIR ]; then + if [ ! -e /tmp/stage.tgz ]; then + wget $BASE_IMAGE_URL -O /tmp/stage.tgz + fi + tar xfz /tmp/stage.tgz + cd $TOP_DIR +fi + +# Free up precious disk space +rm -f /tmp/stage.tgz + +# Make sure we have a stage +if [ ! -d $STAGING_DIR/etc ]; then + echo "Stage is not properly set up!" + exit 1 +fi + +# Directory where our conf files are stored +FILES_DIR=$TOP_DIR/files +TEMPLATES_DIR=$TOP_DIR/templates + +# Directory for supporting script files +SCRIPT_DIR=$TOP_DIR/scripts + +# Version of ubuntu with which we are working +UBUNTU_VERSION=`cat $STAGING_DIR/etc/lsb-release | grep "DISTRIB_CODENAME=" | sed "s/DISTRIB_CODENAME=//"` +KERNEL_VERSION=`ls $STAGING_DIR/boot/vmlinuz* | head -1 | sed "s/.*vmlinuz-//"` + +# Directory for xvas +XVA_DIR=$TOP_DIR/xvas + +# Create xva dir +mkdir -p $XVA_DIR + +# Path to xva +XVA=$XVA_DIR/$GUEST_NAME.xva + +# Setup fake grub +rm -rf $STAGING_DIR/boot/grub/ +mkdir -p $STAGING_DIR/boot/grub/ +cp $TEMPLATES_DIR/menu.lst.in $STAGING_DIR/boot/grub/menu.lst +sed -e "s,@KERNEL_VERSION@,$KERNEL_VERSION,g" -i $STAGING_DIR/boot/grub/menu.lst + +# Setup fstab, tty, and other system stuff +cp $FILES_DIR/fstab $STAGING_DIR/etc/fstab +cp $FILES_DIR/hvc0.conf $STAGING_DIR/etc/init/ + +# Put the VPX into UTC. +rm -f $STAGING_DIR/etc/localtime + +# Configure dns (use same dns as dom0) +cp /etc/resolv.conf $STAGING_DIR/etc/resolv.conf + +# Copy over devstack +rm -f /tmp/devstack.tar +tar --exclude='stage' --exclude='xen/xvas' --exclude='xen/nova' -cvf /tmp/devstack.tar $TOP_DIR/../../../devstack +cd $STAGING_DIR/opt/stack/ +tar xf /tmp/devstack.tar +cd $TOP_DIR + +# Configure OVA +VDI_SIZE=$(($VDI_MB*1024*1024)) +PRODUCT_BRAND=${PRODUCT_BRAND:-openstack} +PRODUCT_VERSION=${PRODUCT_VERSION:-001} +BUILD_NUMBER=${BUILD_NUMBER:-001} +LABEL="$PRODUCT_BRAND $PRODUCT_VERSION-$BUILD_NUMBER" +OVA=$STAGING_DIR/tmp/ova.xml +cp $TEMPLATES_DIR/ova.xml.in $OVA +sed -e "s,@VDI_SIZE@,$VDI_SIZE,g" -i $OVA +sed -e "s,@PRODUCT_BRAND@,$PRODUCT_BRAND,g" -i $OVA +sed -e "s,@PRODUCT_VERSION@,$PRODUCT_VERSION,g" -i $OVA +sed -e "s,@BUILD_NUMBER@,$BUILD_NUMBER,g" -i $OVA + +# Run devstack on launch +cat <$STAGING_DIR/etc/rc.local +GUEST_PASSWORD=$GUEST_PASSWORD STAGING_DIR=/ DO_TGZ=0 bash /opt/stack/devstack/tools/xen/prepare_guest.sh +su -c "/opt/stack/run.sh > /opt/stack/run.sh.log" stack +exit 0 +EOF + +# Clean old xva. In the future may not do this every time. +rm -f $XVA + +# Configure the hostname +echo $GUEST_NAME > $STAGING_DIR/etc/hostname + +# Hostname must resolve for rabbit +cat <$STAGING_DIR/etc/hosts +$MGT_IP $GUEST_NAME +127.0.0.1 localhost localhost.localdomain +EOF + +# Configure the network +INTERFACES=$STAGING_DIR/etc/network/interfaces +cp $TEMPLATES_DIR/interfaces.in $INTERFACES +sed -e "s,@ETH1_IP@,$VM_IP,g" -i $INTERFACES +sed -e "s,@ETH1_NETMASK@,$VM_NETMASK,g" -i $INTERFACES +sed -e "s,@ETH2_IP@,$MGT_IP,g" -i $INTERFACES +sed -e "s,@ETH2_NETMASK@,$MGT_NETMASK,g" -i $INTERFACES +sed -e "s,@ETH3_IP@,$PUB_IP,g" -i $INTERFACES +sed -e "s,@ETH3_NETMASK@,$PUB_NETMASK,g" -i $INTERFACES + +# Gracefully cp only if source file/dir exists +function cp_it { + if [ -e $1 ] || [ -d $1 ]; then + cp -pRL $1 $2 + fi +} + +# Copy over your ssh keys and env if desired +COPYENV=${COPYENV:-1} +if [ "$COPYENV" = "1" ]; then + cp_it ~/.ssh $STAGING_DIR/opt/stack/.ssh + cp_it ~/.ssh/id_rsa.pub $STAGING_DIR/opt/stack/.ssh/authorized_keys + cp_it ~/.gitconfig $STAGING_DIR/opt/stack/.gitconfig + cp_it ~/.vimrc $STAGING_DIR/opt/stack/.vimrc + cp_it ~/.bashrc $STAGING_DIR/opt/stack/.bashrc +fi + +# Configure run.sh +cat <$STAGING_DIR/opt/stack/run.sh +#!/bin/bash +cd /opt/stack/devstack +killall screen +UPLOAD_LEGACY_TTY=yes HOST_IP=$PUB_IP VIRT_DRIVER=xenserver FORCE=yes MULTI_HOST=1 $STACKSH_PARAMS ./stack.sh +EOF +chmod 755 $STAGING_DIR/opt/stack/run.sh + +# Create xva +if [ ! -e $XVA ]; then + rm -rf /tmp/mkxva* + UID=0 $SCRIPT_DIR/mkxva -o $XVA -t xva -x $OVA $STAGING_DIR $VDI_MB /tmp/ +fi + +echo "Built $XVA. If your dom0 is on a different machine, copy this to [devstackdir]/tools/xen/$XVA" diff --git a/tools/xen/xenrc b/tools/xen/xenrc new file mode 100644 index 00000000..246ac16b --- /dev/null +++ b/tools/xen/xenrc @@ -0,0 +1,38 @@ +#!/bin/bash + +# Name of this guest +GUEST_NAME=${GUEST_NAME:-ALLINONE} + +# Size of image +VDI_MB=${VDI_MB:-2500} + +# VM Password +GUEST_PASSWORD=${GUEST_PASSWORD:-secrete} + +# Our nova host's network info +VM_IP=${VM_IP:-10.255.255.255} # A host-only ip that let's the interface come up, otherwise unused +MGT_IP=${MGT_IP:-172.16.100.55} +PUB_IP=${PUB_IP:-192.168.1.55} + +# Public network +PUB_BR=${PUB_BR:-xenbr0} +PUB_NETMASK=${PUB_NETMASK:-255.255.255.0} + +# VM network params +VM_NETMASK=${VM_NETMASK:-255.255.255.0} +VM_BR=${VM_BR:-xapi1} +VM_VLAN=${VM_VLAN:-100} + +# MGMT network params +MGT_NETMASK=${MGT_NETMASK:-255.255.255.0} +MGT_BR=${MGT_BR:-xapi2} +MGT_VLAN=${MGT_VLAN:-101} + +# XVA Directory +XVA_DIR=${XVA_DIR:-xvas} + +# Path to xva file +XVA=${XVA:-$XVA_DIR/$GUEST_NAME.xva } + +# Source params +cd ../.. && source ./stackrc && cd $TOP_DIR From 658ac7a4cc0f54ce64e9704dcabd2d4a262bb24f Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 6 Feb 2012 22:56:37 +0000 Subject: [PATCH 317/967] create and store secret and access keys --- files/keystone.conf | 2 +- files/keystone_data.sh | 24 +++++++++++++++++++----- openrc | 4 ++-- stack.sh | 2 +- stackrc | 5 +++++ 5 files changed, 28 insertions(+), 9 deletions(-) diff --git a/files/keystone.conf b/files/keystone.conf index 0ee08279..3167c0f4 100644 --- a/files/keystone.conf +++ b/files/keystone.conf @@ -37,7 +37,7 @@ driver = keystone.token.backends.kvs.Token driver = keystone.policy.backends.simple.SimpleMatch [ec2] -driver = keystone.contrib.ec2.backends.kvs.Ec2 +driver = keystone.contrib.ec2.backends.sql.Ec2 [filter:debug] paste.filter_factory = keystone.common.wsgi:Debug.factory diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 35eaa5dd..39952b16 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -98,6 +98,25 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then "description=Swift Service" fi +# create ec2 creds and parse the secret and access key returned +RESULT=`$BIN_DIR/keystone-manage ec2 create user_id=$ADMIN_USER tenant_id=$ADMIN_TENANT` +ADMIN_ACCESS=`echo $RESULT | python -c "import sys; import json; result = json.loads(sys.stdin.read()); print result['access'];"` +ADMIN_SECRET=`echo $RESULT | python -c "import sys; import json; result = json.loads(sys.stdin.read()); print result['secret'];"` + + +RESULT=`$BIN_DIR/keystone-manage ec2 create user_id=$DEMO_USER tenant_id=$DEMO_TENANT` +DEMO_ACCESS=`echo $RESULT | python -c "import sys; import json; result = json.loads(sys.stdin.read()); print result['access'];"` +DEMO_SECRET=`echo $RESULT | python -c "import sys; import json; result = json.loads(sys.stdin.read()); print result['secret'];"` + +# write the secret and access to ec2rc +cat > $DEVSTACK_DIR/ec2rc < Date: Fri, 3 Feb 2012 21:40:32 -0800 Subject: [PATCH 318/967] Move the clone of keystoneclient Change-Id: I82eb457e36e9f43636bc27242fb4a1b4b1ecad36 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 8421c3c2..8dc54ce2 100755 --- a/stack.sh +++ b/stack.sh @@ -602,6 +602,7 @@ function git_clone { # compute service git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH # python client library to nova that horizon (and others) use +git_clone $KEYSTONECLIENT_REPO $KEYSTONECLIENT_DIR $KEYSTONECLIENT_BRANCH git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH # glance, swift middleware and nova api needs keystone middleware @@ -630,7 +631,6 @@ fi if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then # django powered web control panel for openstack git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH $HORIZON_TAG - git_clone $KEYSTONECLIENT_REPO $KEYSTONECLIENT_DIR $KEYSTONECLIENT_BRANCH fi if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then # quantum From 06980ea0aeaec4d4f6b7df756ca1a9238efc7b92 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Tue, 7 Feb 2012 18:13:44 +0100 Subject: [PATCH 319/967] Check if screen is installed before using it. Fixes 928377 Change-Id: Ie34860e6d08a457dd52a6c3106d63b54c284f6f3 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 3ec0cb8e..c0c30c28 100755 --- a/stack.sh +++ b/stack.sh @@ -90,7 +90,7 @@ source ./stackrc DEST=${DEST:-/opt/stack} # Check to see if we are already running a stack.sh -if screen -ls | egrep -q "[0-9].stack"; then +if type -p screen >/dev/null && screen -ls | egrep -q "[0-9].stack"; then echo "You are already running a stack.sh session." echo "To rejoin this session type 'screen -x stack'." echo "To destroy this session, kill the running screen." From 2f9cdef933f6ae96e1a660fac773a07657f0d694 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 8 Feb 2012 00:54:20 +0000 Subject: [PATCH 320/967] Update code to remove iscsi targets * Fixes bug 928475 Change-Id: I9b54436522422d865c7add750d612c371945817a --- stack.sh | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/stack.sh b/stack.sh index c0c30c28..10aafeaa 100755 --- a/stack.sh +++ b/stack.sh @@ -1077,15 +1077,12 @@ if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then fi if sudo vgs $VOLUME_GROUP; then + # Remove nova iscsi targets + sudo tgtadm --op show --mode target | grep $VOLUME_NAME_PREFIX | grep Target | cut -f3 -d ' ' | sudo xargs -n1 tgt-admin --delete || true # Clean out existing volumes for lv in `sudo lvs --noheadings -o lv_name $VOLUME_GROUP`; do # VOLUME_NAME_PREFIX prefixes the LVs we want if [[ "${lv#$VOLUME_NAME_PREFIX}" != "$lv" ]]; then - tid=`egrep "^tid.+$lv" /proc/net/iet/volume | cut -f1 -d' ' | tr ':' '='` - if [[ -n "$tid" ]]; then - lun=`egrep "lun.+$lv" /proc/net/iet/volume | cut -f1 -d' ' | tr ':' '=' | tr -d '\t'` - sudo ietadm --op delete --$tid --$lun - fi sudo lvremove -f $VOLUME_GROUP/$lv fi done From a94784135e5c420e967e14ad535926ddc2580a9c Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 8 Feb 2012 11:49:28 -0600 Subject: [PATCH 321/967] Not all distros include a symlink from euca-describe-group to euca-describe-groups Change-Id: Iaba71c36b405d2891aebdb45c1b9fd84853988f5 --- exercises/euca.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/exercises/euca.sh b/exercises/euca.sh index e569196a..834e4ecf 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -39,9 +39,9 @@ IMAGE=`euca-describe-images | grep machine | cut -f2 | head -n1` SECGROUP=euca_secgroup # Add a secgroup -if ! euca-describe-group | grep -q $SECGROUP; then +if ! euca-describe-groups | grep -q $SECGROUP; then euca-add-group -d "$SECGROUP description" $SECGROUP - if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! euca-describe-group | grep -q $SECGROUP; do sleep 1; done"; then + if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! euca-describe-groups | grep -q $SECGROUP; do sleep 1; done"; then echo "Security group not created" exit 1 fi From 6563a3ce76c5ea8e20056350d68c19a6366bca5d Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 31 Jan 2012 12:11:56 -0600 Subject: [PATCH 322/967] Extract common functions into a separate file This is the start of an effort to organize devstack's code to better document the requirements for configuring the individual components. Change-Id: I3476b76b9d1f9ee63687fb9898a98729118cbd84 --- functions | 91 ++++++++++++++++++++++++++++ stack.sh | 75 +---------------------- tools/build_ramdisk.sh | 34 ++--------- tools/build_tempest.sh | 41 ++----------- tools/build_uec.sh | 7 ++- tools/build_uec_ramdisk.sh | 36 ++--------- tools/configure_tempest.sh | 5 +- tools/copy_dev_environment_to_uec.sh | 12 ++-- tools/get_uec_image.sh | 5 +- 9 files changed, 125 insertions(+), 181 deletions(-) create mode 100644 functions diff --git a/functions b/functions new file mode 100644 index 00000000..01c4758f --- /dev/null +++ b/functions @@ -0,0 +1,91 @@ +# functions - Common functions used by DevStack components + + +# apt-get wrapper to set arguments correctly +# apt_get package [package ...] +function apt_get() { + [[ "$OFFLINE" = "True" ]] && return + local sudo="sudo" + [[ "$(id -u)" = "0" ]] && sudo="env" + $sudo DEBIAN_FRONTEND=noninteractive \ + http_proxy=$http_proxy https_proxy=$https_proxy \ + apt-get --option "Dpkg::Options::=--force-confold" --assume-yes "$@" +} + + +# Gracefully cp only if source file/dir exists +# cp_it source destination +function cp_it { + if [ -e $1 ] || [ -d $1 ]; then + cp -pRL $1 $2 + fi +} + + +# git clone only if directory doesn't exist already. Since ``DEST`` might not +# be owned by the installation user, we create the directory and change the +# ownership to the proper user. +# Set global RECLONE=yes to simulate a clone when dest-dir exists +# git_clone remote dest-dir branch +function git_clone { + [[ "$OFFLINE" = "True" ]] && return + + GIT_REMOTE=$1 + GIT_DEST=$2 + GIT_BRANCH=$3 + + if echo $GIT_BRANCH | egrep -q "^refs"; then + # If our branch name is a gerrit style refs/changes/... + if [[ ! -d $GIT_DEST ]]; then + git clone $GIT_REMOTE $GIT_DEST + fi + cd $GIT_DEST + git fetch $GIT_REMOTE $GIT_BRANCH && git checkout FETCH_HEAD + else + # do a full clone only if the directory doesn't exist + if [[ ! -d $GIT_DEST ]]; then + git clone $GIT_REMOTE $GIT_DEST + cd $GIT_DEST + # This checkout syntax works for both branches and tags + git checkout $GIT_BRANCH + elif [[ "$RECLONE" == "yes" ]]; then + # if it does exist then simulate what clone does if asked to RECLONE + cd $GIT_DEST + # set the url to pull from and fetch + git remote set-url origin $GIT_REMOTE + git fetch origin + # remove the existing ignored files (like pyc) as they cause breakage + # (due to the py files having older timestamps than our pyc, so python + # thinks the pyc files are correct using them) + find $GIT_DEST -name '*.pyc' -delete + git checkout -f origin/$GIT_BRANCH + # a local branch might not exist + git branch -D $GIT_BRANCH || true + git checkout -b $GIT_BRANCH + fi + fi +} + + +# pip install wrapper to set cache and proxy environment variables +# pip_install package [package ...] +function pip_install { + [[ "$OFFLINE" = "True" ]] && return + sudo PIP_DOWNLOAD_CACHE=/var/cache/pip \ + HTTP_PROXY=$http_proxy \ + HTTPS_PROXY=$https_proxy \ + pip install --use-mirrors $@ +} + + +# Normalize config values to True or False +# VAR=`trueorfalse default-value test-value` +function trueorfalse() { + local default=$1 + local testval=$2 + + [[ -z "$testval" ]] && { echo "$default"; return; } + [[ "0 no false False FALSE" =~ "$testval" ]] && { echo "False"; return; } + [[ "1 yes true True TRUE" =~ "$testval" ]] && { echo "True"; return; } + echo "$default" +} diff --git a/stack.sh b/stack.sh index 8dc54ce2..c40b24c9 100755 --- a/stack.sh +++ b/stack.sh @@ -35,6 +35,9 @@ fi # Keep track of the current devstack directory. TOP_DIR=$(cd $(dirname "$0") && pwd) +# Import common functions +. $TOP_DIR/functions + # stack.sh keeps the list of **apt** and **pip** dependencies in external # files, along with config templates and other useful files. You can find these # in the ``files`` directory (next to this script). We will reference this @@ -86,16 +89,6 @@ source ./stackrc # Destination path for installation ``DEST`` DEST=${DEST:-/opt/stack} -# apt-get wrapper to just get arguments set correctly -function apt_get() { - [[ "$OFFLINE" = "True" ]] && return - local sudo="sudo" - [ "$(id -u)" = "0" ] && sudo="env" - $sudo DEBIAN_FRONTEND=noninteractive \ - http_proxy=$http_proxy https_proxy=$https_proxy \ - apt-get --option "Dpkg::Options::=--force-confold" --assume-yes "$@" -} - # Check to see if we are already running a stack.sh if screen -ls | egrep -q "[0-9].stack"; then echo "You are already running a stack.sh session." @@ -155,18 +148,6 @@ else sudo mv $TEMPFILE /etc/sudoers.d/stack_sh_nova fi -# Normalize config values to True or False -# VAR=`trueorfalse default-value test-value` -function trueorfalse() { - local default=$1 - local testval=$2 - - [[ -z "$testval" ]] && { echo "$default"; return; } - [[ "0 no false False FALSE" =~ "$testval" ]] && { echo "False"; return; } - [[ "1 yes true True TRUE" =~ "$testval" ]] && { echo "True"; return; } - echo "$default" -} - # Set True to configure stack.sh to run cleanly without Internet access. # stack.sh must have been previously run with Internet access to install # prerequisites and initialize $DEST. @@ -542,14 +523,6 @@ function get_packages() { done } -function pip_install { - [[ "$OFFLINE" = "True" ]] && return - sudo PIP_DOWNLOAD_CACHE=/var/cache/pip \ - HTTP_PROXY=$http_proxy \ - HTTPS_PROXY=$https_proxy \ - pip install --use-mirrors $@ -} - # install apt requirements apt_get update apt_get install $(get_packages) @@ -557,48 +530,6 @@ apt_get install $(get_packages) # install python requirements pip_install `cat $FILES/pips/* | uniq` -# git clone only if directory doesn't exist already. Since ``DEST`` might not -# be owned by the installation user, we create the directory and change the -# ownership to the proper user. -function git_clone { - [[ "$OFFLINE" = "True" ]] && return - - GIT_REMOTE=$1 - GIT_DEST=$2 - GIT_BRANCH=$3 - - if echo $GIT_BRANCH | egrep -q "^refs"; then - # If our branch name is a gerrit style refs/changes/... - if [ ! -d $GIT_DEST ]; then - git clone $GIT_REMOTE $GIT_DEST - fi - cd $GIT_DEST - git fetch $GIT_REMOTE $GIT_BRANCH && git checkout FETCH_HEAD - else - # do a full clone only if the directory doesn't exist - if [ ! -d $GIT_DEST ]; then - git clone $GIT_REMOTE $GIT_DEST - cd $GIT_DEST - # This checkout syntax works for both branches and tags - git checkout $GIT_BRANCH - elif [[ "$RECLONE" == "yes" ]]; then - # if it does exist then simulate what clone does if asked to RECLONE - cd $GIT_DEST - # set the url to pull from and fetch - git remote set-url origin $GIT_REMOTE - git fetch origin - # remove the existing ignored files (like pyc) as they cause breakage - # (due to the py files having older timestamps than our pyc, so python - # thinks the pyc files are correct using them) - find $GIT_DEST -name '*.pyc' -delete - git checkout -f origin/$GIT_BRANCH - # a local branch might not exist - git branch -D $GIT_BRANCH || true - git checkout -b $GIT_BRANCH - fi - fi -} - # compute service git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH # python client library to nova that horizon (and others) use diff --git a/tools/build_ramdisk.sh b/tools/build_ramdisk.sh index feaa8a97..7c1600b1 100755 --- a/tools/build_ramdisk.sh +++ b/tools/build_ramdisk.sh @@ -47,7 +47,10 @@ IMG_FILE=$1 # Keep track of the current directory TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=`cd $TOOLS_DIR/..; pwd` +TOP_DIR=$(cd $TOOLS_DIR/..; pwd) + +# Import common functions +. $TOP_DIR/functions # Store cwd CWD=`pwd` @@ -170,35 +173,6 @@ if [ ! -r "`ls $MNTDIR/boot/vmlinuz-*-generic | head -1`" ]; then chroot $MNTDIR apt-get install -y linux-generic fi -# git clone only if directory doesn't exist already. Since ``DEST`` might not -# be owned by the installation user, we create the directory and change the -# ownership to the proper user. -function git_clone { - - # clone new copy or fetch latest changes - CHECKOUT=${MNTDIR}$2 - if [ ! -d $CHECKOUT ]; then - mkdir -p $CHECKOUT - git clone $1 $CHECKOUT - else - pushd $CHECKOUT - git fetch - popd - fi - - # FIXME(ja): checkout specified version (should works for branches and tags) - - pushd $CHECKOUT - # checkout the proper branch/tag - git checkout $3 - # force our local version to be the same as the remote version - git reset --hard origin/$3 - popd - - # give ownership to the stack user - chroot $MNTDIR chown -R stack $2 -} - git_clone $NOVA_REPO $DEST/nova $NOVA_BRANCH git_clone $GLANCE_REPO $DEST/glance $GLANCE_BRANCH git_clone $KEYSTONE_REPO $DEST/keystone $KEYSTONE_BRANCH diff --git a/tools/build_tempest.sh b/tools/build_tempest.sh index aa44766a..230e8f9b 100755 --- a/tools/build_tempest.sh +++ b/tools/build_tempest.sh @@ -26,7 +26,10 @@ trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT # Keep track of the current directory TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=`cd $TOOLS_DIR/..; pwd` +TOP_DIR=$(cd $TOOLS_DIR/..; pwd) + +# Import common functions +. $TOP_DIR/functions # Abort if localrc is not set if [ ! -e $TOP_DIR/localrc ]; then @@ -43,42 +46,8 @@ DEST=${DEST:-/opt/stack} TEMPEST_DIR=$DEST/tempest -DIST_NAME=${DIST_NAME:-oneiric} - -# git clone only if directory doesn't exist already. Since ``DEST`` might not -# be owned by the installation user, we create the directory and change the -# ownership to the proper user. -function git_clone { - - GIT_REMOTE=$1 - GIT_DEST=$2 - GIT_BRANCH=$3 - - # do a full clone only if the directory doesn't exist - if [ ! -d $GIT_DEST ]; then - git clone $GIT_REMOTE $GIT_DEST - cd $2 - # This checkout syntax works for both branches and tags - git checkout $GIT_BRANCH - elif [[ "$RECLONE" == "yes" ]]; then - # if it does exist then simulate what clone does if asked to RECLONE - cd $GIT_DEST - # set the url to pull from and fetch - git remote set-url origin $GIT_REMOTE - git fetch origin - # remove the existing ignored files (like pyc) as they cause breakage - # (due to the py files having older timestamps than our pyc, so python - # thinks the pyc files are correct using them) - find $GIT_DEST -name '*.pyc' -delete - git checkout -f origin/$GIT_BRANCH - # a local branch might not exist - git branch -D $GIT_BRANCH || true - git checkout -b $GIT_BRANCH - fi -} - # Install tests and prerequisites -sudo PIP_DOWNLOAD_CACHE=/var/cache/pip pip install --use-mirrors `cat $TOP_DIR/files/pips/tempest` +pip_install `cat $TOP_DIR/files/pips/tempest` git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 04e1a459..ed5a0171 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -8,7 +8,10 @@ fi # Keep track of the current directory TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=`cd $TOOLS_DIR/..; pwd` +TOP_DIR=$(cd $TOOLS_DIR/..; pwd) + +# Import common functions +. $TOP_DIR/functions cd $TOP_DIR @@ -34,7 +37,7 @@ fi # Install deps if needed DEPS="kvm libvirt-bin kpartx cloud-utils curl" -apt-get install -y --force-yes $DEPS || true # allow this to fail gracefully for concurrent builds +apt_get install -y --force-yes $DEPS || true # allow this to fail gracefully for concurrent builds # Where to store files and instances WORK_DIR=${WORK_DIR:-/opt/uecstack} diff --git a/tools/build_uec_ramdisk.sh b/tools/build_uec_ramdisk.sh index 174eaac7..32f90c05 100755 --- a/tools/build_uec_ramdisk.sh +++ b/tools/build_uec_ramdisk.sh @@ -40,7 +40,10 @@ DEST_FILE=$1 # Keep track of the current directory TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=`cd $TOOLS_DIR/..; pwd` +TOP_DIR=$(cd $TOOLS_DIR/..; pwd) + +# Import common functions +. $TOP_DIR/functions cd $TOP_DIR @@ -68,7 +71,7 @@ fi # Install deps if needed DEPS="kvm libvirt-bin kpartx cloud-utils curl" -apt-get install -y --force-yes $DEPS +apt_get install -y --force-yes $DEPS # Where to store files and instances CACHEDIR=${CACHEDIR:-/opt/stack/cache} @@ -113,35 +116,6 @@ if [ ! -r "`ls $MNT_DIR/boot/vmlinuz-*-generic | head -1`" ]; then chroot $MNT_DIR apt-get install -y linux-generic fi -# git clone only if directory doesn't exist already. Since ``DEST`` might not -# be owned by the installation user, we create the directory and change the -# ownership to the proper user. -function git_clone { - - # clone new copy or fetch latest changes - CHECKOUT=${MNT_DIR}$2 - if [ ! -d $CHECKOUT ]; then - mkdir -p $CHECKOUT - git clone $1 $CHECKOUT - else - pushd $CHECKOUT - git fetch - popd - fi - - # FIXME(ja): checkout specified version (should works for branches and tags) - - pushd $CHECKOUT - # checkout the proper branch/tag - git checkout $3 - # force our local version to be the same as the remote version - git reset --hard origin/$3 - popd - - # give ownership to the stack user - chroot $MNT_DIR chown -R stack $2 -} - git_clone $NOVA_REPO $DEST/nova $NOVA_BRANCH git_clone $GLANCE_REPO $DEST/glance $GLANCE_BRANCH git_clone $KEYSTONE_REPO $DEST/keystone $KEYSTONE_BRANCH diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index 00add9a3..f6ef0d3b 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -30,7 +30,10 @@ trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT # Keep track of the current directory TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=`cd $TOOLS_DIR/..; pwd` +TOP_DIR=$(cd $TOOLS_DIR/..; pwd) + +# Import common functions +. $TOP_DIR/functions # Abort if localrc is not set if [ ! -e $TOP_DIR/localrc ]; then diff --git a/tools/copy_dev_environment_to_uec.sh b/tools/copy_dev_environment_to_uec.sh index c949b329..d5687dc1 100755 --- a/tools/copy_dev_environment_to_uec.sh +++ b/tools/copy_dev_environment_to_uec.sh @@ -8,7 +8,10 @@ set -o errexit # Keep track of the current directory TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=`cd $TOOLS_DIR/..; pwd` +TOP_DIR=$(cd $TOOLS_DIR/..; pwd) + +# Import common functions +. $TOP_DIR/functions # Change dir to top of devstack cd $TOP_DIR @@ -47,13 +50,6 @@ echo stack:pass | chroot $STAGING_DIR chpasswd ( umask 226 && echo "stack ALL=(ALL) NOPASSWD:ALL" \ > $STAGING_DIR/etc/sudoers.d/50_stack_sh ) -# Gracefully cp only if source file/dir exists -function cp_it { - if [ -e $1 ] || [ -d $1 ]; then - cp -pRL $1 $2 - fi -} - # Copy over your ssh keys and env if desired cp_it ~/.ssh $STAGING_DIR/$DEST/.ssh cp_it ~/.ssh/id_rsa.pub $STAGING_DIR/$DEST/.ssh/authorized_keys diff --git a/tools/get_uec_image.sh b/tools/get_uec_image.sh index f66f2bc2..09630740 100755 --- a/tools/get_uec_image.sh +++ b/tools/get_uec_image.sh @@ -6,7 +6,10 @@ ROOTSIZE=${ROOTSIZE:-2000} # Keep track of the current directory TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=`cd $TOOLS_DIR/..; pwd` +TOP_DIR=$(cd $TOOLS_DIR/..; pwd) + +# Import common functions +. $TOP_DIR/functions # exit on error to stop unexpected errors set -o errexit From efa578aabe8a2fa3b1d432856186b0e193fcad89 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 3 Feb 2012 11:23:32 -0600 Subject: [PATCH 323/967] Fixes bug 920762: change Admin to admin in swift/proxy-server.conf Change-Id: I6cb564ed2a6eaaf325a9573b4a6d372ae8a73c8d --- files/swift/proxy-server.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/swift/proxy-server.conf b/files/swift/proxy-server.conf index da6b1fa2..3ef02769 100644 --- a/files/swift/proxy-server.conf +++ b/files/swift/proxy-server.conf @@ -19,7 +19,7 @@ account_autocreate = true use = egg:swiftkeystone2#keystone2 keystone_admin_token = %SERVICE_TOKEN% keystone_url = http://localhost:35357/v2.0 -keystone_swift_operator_roles = Member,Admin +keystone_swift_operator_roles = Member,admin [filter:tempauth] use = egg:swift#tempauth From af6d47cb3da92df0a49adbad1d4c9d4f50124147 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 12 Jan 2012 17:11:56 -0800 Subject: [PATCH 324/967] Separate out build_xva process so that it can be run on a machine besides dom0 Change-Id: I3a6e1ef874c83d79d09a8df1f086ec06d39db2df --- tools/xen/README.md | 13 ++- tools/xen/build_domU.sh | 200 +++------------------------------------- tools/xen/build_xva.sh | 164 ++++++++++++++++++++++++++++++++ tools/xen/xenrc | 38 ++++++++ 4 files changed, 227 insertions(+), 188 deletions(-) create mode 100755 tools/xen/build_xva.sh create mode 100644 tools/xen/xenrc diff --git a/tools/xen/README.md b/tools/xen/README.md index 63350ea7..a3398a78 100644 --- a/tools/xen/README.md +++ b/tools/xen/README.md @@ -54,7 +54,16 @@ Of course, use real passwords if this machine is exposed. ACTIVE_TIMEOUT=45 EOF -Step 4: Run ./build_domU.sh +Step 4: Run ./build_xva.sh +-------------------------- +This script prpares your nova xva image. This script can be run on a separate machine +and copied to dom0. If you run this on a different machine, copy the resulting xva +file to tools/xen/xvas/[GUEST_NAME].xva (by default tools/xen/xvas/ALLINONE.xva) + +It is likely that for XS6 you will need to build_xva.sh on a separate machine due +to dom0 space constraints. + +Step 5: Run ./build_domU.sh -------------------------- This script does a lot of stuff, it is probably best to read it in its entirety. But in a nutshell, it performs the following: @@ -63,7 +72,7 @@ But in a nutshell, it performs the following: * Creates and installs a OpenStack all-in-one domU in an HA-FlatDHCP configuration * A script to create a multi-domU (ie. head node separated from compute) configuration is coming soon! -Step 5: Do cloudy stuff! +Step 6: Do cloudy stuff! -------------------------- * Play with horizon * Play with the CLI diff --git a/tools/xen/build_domU.sh b/tools/xen/build_domU.sh index 642b40f7..cd28f155 100755 --- a/tools/xen/build_domU.sh +++ b/tools/xen/build_domU.sh @@ -10,42 +10,18 @@ fi # This directory TOP_DIR=$(cd $(dirname "$0") && pwd) -# Source params -cd ../.. && source ./stackrc && cd $TOP_DIR +# Source params - override xenrc params in your localrc to suite your taste +source xenrc # Echo commands set -o xtrace -# Name of this guest -GUEST_NAME=${GUEST_NAME:-ALLINONE} - -# dom0 ip -HOST_IP=${HOST_IP:-`ifconfig xenbr0 | grep "inet addr" | cut -d ":" -f2 | sed "s/ .*//"`} - -# Our nova host's network info -VM_IP=${VM_IP:-10.255.255.255} # A host-only ip that let's the interface come up, otherwise unused -MGT_IP=${MGT_IP:-172.16.100.55} -PUB_IP=${PUB_IP:-192.168.1.55} - -# Public network -PUB_BR=${PUB_BR:-xenbr0} -PUB_NETMASK=${PUB_NETMASK:-255.255.255.0} - -# VM network params -VM_NETMASK=${VM_NETMASK:-255.255.255.0} -VM_BR=${VM_BR:-xapi1} -VM_VLAN=${VM_VLAN:-100} - -# MGMT network params -MGT_NETMASK=${MGT_NETMASK:-255.255.255.0} -MGT_BR=${MGT_BR:-xapi2} -MGT_VLAN=${MGT_VLAN:-101} - -# VM Password -GUEST_PASSWORD=${GUEST_PASSWORD:-secrete} - -# Size of image -VDI_MB=${VDI_MB:-2500} +# Check for xva file +if [ ! -e $XVA ]; then + echo "Missing xva file. Please run build_xva.sh (ideally on a non dom0 host since the build can require lots of space)." + echo "Place the resulting xva file in $XVA" + exit 1 +fi # Make sure we have git if ! which git; then @@ -95,6 +71,9 @@ function create_vlan() { create_vlan $PIF $VM_VLAN $VM_NET create_vlan $PIF $MGT_VLAN $MGT_NET +# dom0 ip +HOST_IP=${HOST_IP:-`ifconfig xenbr0 | grep "inet addr" | cut -d ":" -f2 | sed "s/ .*//"`} + # Setup host-only nat rules HOST_NET=169.254.0.0/16 if ! iptables -L -v -t nat | grep -q $HOST_NET; then @@ -117,86 +96,9 @@ fi # Enable ip forwarding at runtime as well echo 1 > /proc/sys/net/ipv4/ip_forward -# Directory where we stage the build -STAGING_DIR=$TOP_DIR/stage - -# Option to clean out old stuff -CLEAN=${CLEAN:-0} -if [ "$CLEAN" = "1" ]; then - rm -rf $STAGING_DIR -fi - -# Download our base image. This image is made using prepare_guest.sh -BASE_IMAGE_URL=${BASE_IMAGE_URL:-http://images.ansolabs.com/xen/stage.tgz} -if [ ! -e $STAGING_DIR ]; then - if [ ! -e /tmp/stage.tgz ]; then - wget $BASE_IMAGE_URL -O /tmp/stage.tgz - fi - tar xfz /tmp/stage.tgz - cd $TOP_DIR -fi - -# Free up precious disk space -rm -f /tmp/stage.tgz - -# Make sure we have a stage -if [ ! -d $STAGING_DIR/etc ]; then - echo "Stage is not properly set up!" - exit 1 -fi - -# Directory where our conf files are stored -FILES_DIR=$TOP_DIR/files -TEMPLATES_DIR=$TOP_DIR/templates - -# Directory for supporting script files -SCRIPT_DIR=$TOP_DIR/scripts - -# Version of ubuntu with which we are working -UBUNTU_VERSION=`cat $STAGING_DIR/etc/lsb-release | grep "DISTRIB_CODENAME=" | sed "s/DISTRIB_CODENAME=//"` -KERNEL_VERSION=`ls $STAGING_DIR/boot/vmlinuz* | head -1 | sed "s/.*vmlinuz-//"` - -# Setup fake grub -rm -rf $STAGING_DIR/boot/grub/ -mkdir -p $STAGING_DIR/boot/grub/ -cp $TEMPLATES_DIR/menu.lst.in $STAGING_DIR/boot/grub/menu.lst -sed -e "s,@KERNEL_VERSION@,$KERNEL_VERSION,g" -i $STAGING_DIR/boot/grub/menu.lst - -# Setup fstab, tty, and other system stuff -cp $FILES_DIR/fstab $STAGING_DIR/etc/fstab -cp $FILES_DIR/hvc0.conf $STAGING_DIR/etc/init/ - -# Put the VPX into UTC. -rm -f $STAGING_DIR/etc/localtime - -# Configure dns (use same dns as dom0) -cp /etc/resolv.conf $STAGING_DIR/etc/resolv.conf - -# Copy over devstack -rm -f /tmp/devstack.tar -tar --exclude='stage' --exclude='xen/xvas' --exclude='xen/nova' -cvf /tmp/devstack.tar $TOP_DIR/../../../devstack -cd $STAGING_DIR/opt/stack/ -tar xf /tmp/devstack.tar -cd $TOP_DIR - -# Configure OVA -VDI_SIZE=$(($VDI_MB*1024*1024)) -PRODUCT_BRAND=${PRODUCT_BRAND:-openstack} -PRODUCT_VERSION=${PRODUCT_VERSION:-001} -BUILD_NUMBER=${BUILD_NUMBER:-001} -LABEL="$PRODUCT_BRAND $PRODUCT_VERSION-$BUILD_NUMBER" -OVA=$STAGING_DIR/tmp/ova.xml -cp $TEMPLATES_DIR/ova.xml.in $OVA -sed -e "s,@VDI_SIZE@,$VDI_SIZE,g" -i $OVA -sed -e "s,@PRODUCT_BRAND@,$PRODUCT_BRAND,g" -i $OVA -sed -e "s,@PRODUCT_VERSION@,$PRODUCT_VERSION,g" -i $OVA -sed -e "s,@BUILD_NUMBER@,$BUILD_NUMBER,g" -i $OVA - -# Directory for xvas -XVA_DIR=$TOP_DIR/xvas - -# Create xva dir -mkdir -p $XVA_DIR +# Set local storage il8n +SR_UUID=`xe sr-list --minimal name-label="Local storage"` +xe sr-param-set uuid=$SR_UUID other-config:i18n-key=local-storage # Clean nova if desired if [ "$CLEAN" = "1" ]; then @@ -210,24 +112,12 @@ if [ ! -d $TOP_DIR/nova ]; then git checkout $NOVA_BRANCH fi -# Run devstack on launch -cat <$STAGING_DIR/etc/rc.local -GUEST_PASSWORD=$GUEST_PASSWORD STAGING_DIR=/ DO_TGZ=0 bash /opt/stack/devstack/tools/xen/prepare_guest.sh -su -c "/opt/stack/run.sh > /opt/stack/run.sh.log" stack -exit 0 -EOF - # Install plugins cp -pr $TOP_DIR/nova/plugins/xenserver/xenapi/etc/xapi.d /etc/ chmod a+x /etc/xapi.d/plugins/* yum --enablerepo=base install -y parted mkdir -p /boot/guest -# Set local storage il8n -SR_UUID=`xe sr-list --minimal name-label="Local storage"` -xe sr-param-set uuid=$SR_UUID other-config:i18n-key=local-storage - - # Shutdown previous runs DO_SHUTDOWN=${DO_SHUTDOWN:-1} if [ "$DO_SHUTDOWN" = "1" ]; then @@ -248,68 +138,6 @@ if [ "$DO_SHUTDOWN" = "1" ]; then done fi -# Path to head xva. By default keep overwriting the same one to save space -USE_SEPARATE_XVAS=${USE_SEPARATE_XVAS:-0} -if [ "$USE_SEPARATE_XVAS" = "0" ]; then - XVA=$XVA_DIR/$UBUNTU_VERSION.xva -else - XVA=$XVA_DIR/$UBUNTU_VERSION.$GUEST_NAME.xva -fi - -# Clean old xva. In the future may not do this every time. -rm -f $XVA - -# Configure the hostname -echo $GUEST_NAME > $STAGING_DIR/etc/hostname - -# Hostname must resolve for rabbit -cat <$STAGING_DIR/etc/hosts -$MGT_IP $GUEST_NAME -127.0.0.1 localhost localhost.localdomain -EOF - -# Configure the network -INTERFACES=$STAGING_DIR/etc/network/interfaces -cp $TEMPLATES_DIR/interfaces.in $INTERFACES -sed -e "s,@ETH1_IP@,$VM_IP,g" -i $INTERFACES -sed -e "s,@ETH1_NETMASK@,$VM_NETMASK,g" -i $INTERFACES -sed -e "s,@ETH2_IP@,$MGT_IP,g" -i $INTERFACES -sed -e "s,@ETH2_NETMASK@,$MGT_NETMASK,g" -i $INTERFACES -sed -e "s,@ETH3_IP@,$PUB_IP,g" -i $INTERFACES -sed -e "s,@ETH3_NETMASK@,$PUB_NETMASK,g" -i $INTERFACES - -# Gracefully cp only if source file/dir exists -function cp_it { - if [ -e $1 ] || [ -d $1 ]; then - cp -pRL $1 $2 - fi -} - -# Copy over your ssh keys and env if desired -COPYENV=${COPYENV:-1} -if [ "$COPYENV" = "1" ]; then - cp_it ~/.ssh $STAGING_DIR/opt/stack/.ssh - cp_it ~/.ssh/id_rsa.pub $STAGING_DIR/opt/stack/.ssh/authorized_keys - cp_it ~/.gitconfig $STAGING_DIR/opt/stack/.gitconfig - cp_it ~/.vimrc $STAGING_DIR/opt/stack/.vimrc - cp_it ~/.bashrc $STAGING_DIR/opt/stack/.bashrc -fi - -# Configure run.sh -cat <$STAGING_DIR/opt/stack/run.sh -#!/bin/bash -cd /opt/stack/devstack -killall screen -UPLOAD_LEGACY_TTY=yes HOST_IP=$PUB_IP VIRT_DRIVER=xenserver FORCE=yes MULTI_HOST=1 $STACKSH_PARAMS ./stack.sh -EOF -chmod 755 $STAGING_DIR/opt/stack/run.sh - -# Create xva -if [ ! -e $XVA ]; then - rm -rf /tmp/mkxva* - UID=0 $SCRIPT_DIR/mkxva -o $XVA -t xva -x $OVA $STAGING_DIR $VDI_MB /tmp/ -fi - # Start guest $TOP_DIR/scripts/install-os-vpx.sh -f $XVA -v $VM_BR -m $MGT_BR -p $PUB_BR diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh new file mode 100755 index 00000000..e4de2a1a --- /dev/null +++ b/tools/xen/build_xva.sh @@ -0,0 +1,164 @@ +#!/bin/bash + +# Abort if localrc is not set +if [ ! -e ../../localrc ]; then + echo "You must have a localrc with ALL necessary passwords defined before proceeding." + echo "See the xen README for required passwords." + exit 1 +fi + +# This directory +TOP_DIR=$(cd $(dirname "$0") && pwd) + +# Source params - override xenrc params in your localrc to suite your taste +source xenrc + +# Echo commands +set -o xtrace + +# Directory where we stage the build +STAGING_DIR=$TOP_DIR/stage + +# Option to clean out old stuff +CLEAN=${CLEAN:-0} +if [ "$CLEAN" = "1" ]; then + rm -rf $STAGING_DIR +fi + +# Download our base image. This image is made using prepare_guest.sh +BASE_IMAGE_URL=${BASE_IMAGE_URL:-http://images.ansolabs.com/xen/stage.tgz} +if [ ! -e $STAGING_DIR ]; then + if [ ! -e /tmp/stage.tgz ]; then + wget $BASE_IMAGE_URL -O /tmp/stage.tgz + fi + tar xfz /tmp/stage.tgz + cd $TOP_DIR +fi + +# Free up precious disk space +rm -f /tmp/stage.tgz + +# Make sure we have a stage +if [ ! -d $STAGING_DIR/etc ]; then + echo "Stage is not properly set up!" + exit 1 +fi + +# Directory where our conf files are stored +FILES_DIR=$TOP_DIR/files +TEMPLATES_DIR=$TOP_DIR/templates + +# Directory for supporting script files +SCRIPT_DIR=$TOP_DIR/scripts + +# Version of ubuntu with which we are working +UBUNTU_VERSION=`cat $STAGING_DIR/etc/lsb-release | grep "DISTRIB_CODENAME=" | sed "s/DISTRIB_CODENAME=//"` +KERNEL_VERSION=`ls $STAGING_DIR/boot/vmlinuz* | head -1 | sed "s/.*vmlinuz-//"` + +# Directory for xvas +XVA_DIR=$TOP_DIR/xvas + +# Create xva dir +mkdir -p $XVA_DIR + +# Path to xva +XVA=$XVA_DIR/$GUEST_NAME.xva + +# Setup fake grub +rm -rf $STAGING_DIR/boot/grub/ +mkdir -p $STAGING_DIR/boot/grub/ +cp $TEMPLATES_DIR/menu.lst.in $STAGING_DIR/boot/grub/menu.lst +sed -e "s,@KERNEL_VERSION@,$KERNEL_VERSION,g" -i $STAGING_DIR/boot/grub/menu.lst + +# Setup fstab, tty, and other system stuff +cp $FILES_DIR/fstab $STAGING_DIR/etc/fstab +cp $FILES_DIR/hvc0.conf $STAGING_DIR/etc/init/ + +# Put the VPX into UTC. +rm -f $STAGING_DIR/etc/localtime + +# Configure dns (use same dns as dom0) +cp /etc/resolv.conf $STAGING_DIR/etc/resolv.conf + +# Copy over devstack +rm -f /tmp/devstack.tar +tar --exclude='stage' --exclude='xen/xvas' --exclude='xen/nova' -cvf /tmp/devstack.tar $TOP_DIR/../../../devstack +cd $STAGING_DIR/opt/stack/ +tar xf /tmp/devstack.tar +cd $TOP_DIR + +# Configure OVA +VDI_SIZE=$(($VDI_MB*1024*1024)) +PRODUCT_BRAND=${PRODUCT_BRAND:-openstack} +PRODUCT_VERSION=${PRODUCT_VERSION:-001} +BUILD_NUMBER=${BUILD_NUMBER:-001} +LABEL="$PRODUCT_BRAND $PRODUCT_VERSION-$BUILD_NUMBER" +OVA=$STAGING_DIR/tmp/ova.xml +cp $TEMPLATES_DIR/ova.xml.in $OVA +sed -e "s,@VDI_SIZE@,$VDI_SIZE,g" -i $OVA +sed -e "s,@PRODUCT_BRAND@,$PRODUCT_BRAND,g" -i $OVA +sed -e "s,@PRODUCT_VERSION@,$PRODUCT_VERSION,g" -i $OVA +sed -e "s,@BUILD_NUMBER@,$BUILD_NUMBER,g" -i $OVA + +# Run devstack on launch +cat <$STAGING_DIR/etc/rc.local +GUEST_PASSWORD=$GUEST_PASSWORD STAGING_DIR=/ DO_TGZ=0 bash /opt/stack/devstack/tools/xen/prepare_guest.sh +su -c "/opt/stack/run.sh > /opt/stack/run.sh.log" stack +exit 0 +EOF + +# Clean old xva. In the future may not do this every time. +rm -f $XVA + +# Configure the hostname +echo $GUEST_NAME > $STAGING_DIR/etc/hostname + +# Hostname must resolve for rabbit +cat <$STAGING_DIR/etc/hosts +$MGT_IP $GUEST_NAME +127.0.0.1 localhost localhost.localdomain +EOF + +# Configure the network +INTERFACES=$STAGING_DIR/etc/network/interfaces +cp $TEMPLATES_DIR/interfaces.in $INTERFACES +sed -e "s,@ETH1_IP@,$VM_IP,g" -i $INTERFACES +sed -e "s,@ETH1_NETMASK@,$VM_NETMASK,g" -i $INTERFACES +sed -e "s,@ETH2_IP@,$MGT_IP,g" -i $INTERFACES +sed -e "s,@ETH2_NETMASK@,$MGT_NETMASK,g" -i $INTERFACES +sed -e "s,@ETH3_IP@,$PUB_IP,g" -i $INTERFACES +sed -e "s,@ETH3_NETMASK@,$PUB_NETMASK,g" -i $INTERFACES + +# Gracefully cp only if source file/dir exists +function cp_it { + if [ -e $1 ] || [ -d $1 ]; then + cp -pRL $1 $2 + fi +} + +# Copy over your ssh keys and env if desired +COPYENV=${COPYENV:-1} +if [ "$COPYENV" = "1" ]; then + cp_it ~/.ssh $STAGING_DIR/opt/stack/.ssh + cp_it ~/.ssh/id_rsa.pub $STAGING_DIR/opt/stack/.ssh/authorized_keys + cp_it ~/.gitconfig $STAGING_DIR/opt/stack/.gitconfig + cp_it ~/.vimrc $STAGING_DIR/opt/stack/.vimrc + cp_it ~/.bashrc $STAGING_DIR/opt/stack/.bashrc +fi + +# Configure run.sh +cat <$STAGING_DIR/opt/stack/run.sh +#!/bin/bash +cd /opt/stack/devstack +killall screen +UPLOAD_LEGACY_TTY=yes HOST_IP=$PUB_IP VIRT_DRIVER=xenserver FORCE=yes MULTI_HOST=1 $STACKSH_PARAMS ./stack.sh +EOF +chmod 755 $STAGING_DIR/opt/stack/run.sh + +# Create xva +if [ ! -e $XVA ]; then + rm -rf /tmp/mkxva* + UID=0 $SCRIPT_DIR/mkxva -o $XVA -t xva -x $OVA $STAGING_DIR $VDI_MB /tmp/ +fi + +echo "Built $XVA. If your dom0 is on a different machine, copy this to [devstackdir]/tools/xen/$XVA" diff --git a/tools/xen/xenrc b/tools/xen/xenrc new file mode 100644 index 00000000..246ac16b --- /dev/null +++ b/tools/xen/xenrc @@ -0,0 +1,38 @@ +#!/bin/bash + +# Name of this guest +GUEST_NAME=${GUEST_NAME:-ALLINONE} + +# Size of image +VDI_MB=${VDI_MB:-2500} + +# VM Password +GUEST_PASSWORD=${GUEST_PASSWORD:-secrete} + +# Our nova host's network info +VM_IP=${VM_IP:-10.255.255.255} # A host-only ip that let's the interface come up, otherwise unused +MGT_IP=${MGT_IP:-172.16.100.55} +PUB_IP=${PUB_IP:-192.168.1.55} + +# Public network +PUB_BR=${PUB_BR:-xenbr0} +PUB_NETMASK=${PUB_NETMASK:-255.255.255.0} + +# VM network params +VM_NETMASK=${VM_NETMASK:-255.255.255.0} +VM_BR=${VM_BR:-xapi1} +VM_VLAN=${VM_VLAN:-100} + +# MGMT network params +MGT_NETMASK=${MGT_NETMASK:-255.255.255.0} +MGT_BR=${MGT_BR:-xapi2} +MGT_VLAN=${MGT_VLAN:-101} + +# XVA Directory +XVA_DIR=${XVA_DIR:-xvas} + +# Path to xva file +XVA=${XVA:-$XVA_DIR/$GUEST_NAME.xva } + +# Source params +cd ../.. && source ./stackrc && cd $TOP_DIR From 44b5736d23aa544ea4ac1c9feadd4724b18102be Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Tue, 7 Feb 2012 18:13:44 +0100 Subject: [PATCH 325/967] Check if screen is installed before using it. Fixes 928377 Change-Id: Ie34860e6d08a457dd52a6c3106d63b54c284f6f3 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index c40b24c9..093d5d83 100755 --- a/stack.sh +++ b/stack.sh @@ -90,7 +90,7 @@ source ./stackrc DEST=${DEST:-/opt/stack} # Check to see if we are already running a stack.sh -if screen -ls | egrep -q "[0-9].stack"; then +if type -p screen >/dev/null && screen -ls | egrep -q "[0-9].stack"; then echo "You are already running a stack.sh session." echo "To rejoin this session type 'screen -x stack'." echo "To destroy this session, kill the running screen." From d1f5243d91de2756fecd52c3b3a15a4f0a488fdf Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 9 Feb 2012 03:50:57 +0000 Subject: [PATCH 326/967] Uses keystone client instead of keystone-manage * Depends on https://review.openstack.org/#change,3948 * Depends on https://review.openstack.org/#change,3945 * Fixes bug 928558 Change-Id: I83c337e3b92a9ab0dac254afe83673185867b7b0 --- files/keystone_data.sh | 184 ++++++++++++++--------------------------- openrc | 2 +- stack.sh | 20 +---- 3 files changed, 67 insertions(+), 139 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 39952b16..408e36d3 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -1,112 +1,86 @@ #!/bin/bash -BIN_DIR=${BIN_DIR:-.} # Tenants -ADMIN_TENANT=`$BIN_DIR/keystone-manage tenant --id-only create \ - tenant_name=admin` -DEMO_TENANT=`$BIN_DIR/keystone-manage tenant --id-only create \ - tenant_name=demo` -INVIS_TENANT=`$BIN_DIR/keystone-manage tenant --id-only create \ - tenant_name=invisible_to_admin` +export SERVICE_TOKEN=$SERVICE_TOKEN +export SERVICE_ENDPOINT=$SERVICE_ENDPOINT + +function get_id () { + echo `$@ | grep id | awk '{print $4}'` +} + +ADMIN_TENANT=`get_id keystone tenant-create --name=admin` +DEMO_TENANT=`get_id keystone tenant-create --name=demo` +INVIS_TENANT=`get_id keystone tenant-create --name=invisible_to_admin` # Users -ADMIN_USER=`$BIN_DIR/keystone-manage user --id-only create \ - name=admin \ - "password=%ADMIN_PASSWORD%" \ - email=admin@example.com` -DEMO_USER=`$BIN_DIR/keystone-manage user --id-only create \ - name=demo \ - "password=%ADMIN_PASSWORD%" \ - email=demo@example.com` +ADMIN_USER=`get_id keystone user-create \ + --name=admin \ + --pass="$ADMIN_PASSWORD" \ + --email=admin@example.com` +DEMO_USER=`get_id keystone user-create \ + --name=demo \ + --pass="$ADMIN_PASSWORD" \ + --email=admin@example.com` # Roles -ADMIN_ROLE=`$BIN_DIR/keystone-manage role --id-only create \ - name=admin` -MEMBER_ROLE=`$BIN_DIR/keystone-manage role --id-only create \ - name=Member` -KEYSTONEADMIN_ROLE=`$BIN_DIR/keystone-manage role --id-only create \ - name=KeystoneAdmin` -KEYSTONESERVICE_ROLE=`$BIN_DIR/keystone-manage role --id-only create \ - name=KeystoneServiceAdmin` -SYSADMIN_ROLE=`$BIN_DIR/keystone-manage role --id-only create \ - name=sysadmin` -NETADMIN_ROLE=`$BIN_DIR/keystone-manage role --id-only create \ - name=netadmin` +ADMIN_ROLE=`get_id keystone role-create --name=admin` +MEMBER_ROLE=`get_id keystone role-create --name=Member` +KEYSTONEADMIN_ROLE=`get_id keystone role-create --name=KeystoneAdmin` +KEYSTONESERVICE_ROLE=`get_id keystone role-create --name=KeystoneServiceAdmin` +SYSADMIN_ROLE=`get_id keystone role-create --name=sysadmin` +NETADMIN_ROLE=`get_id keystone role-create --name=netadmin` # Add Roles to Users in Tenants -$BIN_DIR/keystone-manage role add_user_role \ - role=$ADMIN_ROLE \ - user=$ADMIN_USER \ - tenant=$ADMIN_TENANT -$BIN_DIR/keystone-manage role add_user_role \ - role=$MEMBER_ROLE \ - user=$DEMO_USER \ - tenant=$DEMO_TENANT -$BIN_DIR/keystone-manage role add_user_role \ - role=$SYSADMIN_ROLE \ - user=$DEMO_USER \ - tenant=$DEMO_TENANT -$BIN_DIR/keystone-manage role add_user_role \ - role=$NETADMIN_ROLE \ - user=$DEMO_USER \ - tenant=$DEMO_TENANT -$BIN_DIR/keystone-manage role add_user_role \ - role=$MEMBER_ROLE \ - user=$DEMO_USER \ - tenant=$INVIS_TENANT -$BIN_DIR/keystone-manage role add_user_role \ - role=$ADMIN_ROLE \ - user=$ADMIN_USER \ - tenant=$DEMO_TENANT +keystone add-user-role $ADMIN_USER $ADMIN_ROLE $ADMIN_TENANT +keystone add-user-role $DEMO_USER $MEMBER_ROLE $DEMO_TENANT +keystone add-user-role $DEMO_USER $SYSADMIN_ROLE $DEMO_TENANT +keystone add-user-role $DEMO_USER $NETADMIN_ROLE $DEMO_TENANT +keystone add-user-role $DEMO_USER $MEMBER_ROLE $INVIS_TENANT +keystone add-user-role $ADMIN_USER $ADMIN_ROLE $DEMO_TENANT # TODO(termie): these two might be dubious -$BIN_DIR/keystone-manage role add_user_role \ - role=$KEYSTONEADMIN_ROLE \ - user=$ADMIN_USER \ - tenant=$ADMIN_TENANT -$BIN_DIR/keystone-manage role add_user_role \ - role=$KEYSTONESERVICE_ROLE \ - user=$ADMIN_USER \ - tenant=$ADMIN_TENANT +keystone add-user-role $ADMIN_USER $KEYSTONEADMIN_ROLE $ADMIN_TENANT +keystone add-user-role $ADMIN_USER $KEYSTONESERVICE_ROLE $ADMIN_TENANT # Services -$BIN_DIR/keystone-manage service create \ - name=nova \ - service_type=compute \ - "description=Nova Compute Service" - -$BIN_DIR/keystone-manage service create \ - name=ec2 \ - service_type=ec2 \ - "description=EC2 Compatibility Layer" - -$BIN_DIR/keystone-manage service create \ - name=glance \ - service_type=image \ - "description=Glance Image Service" - -$BIN_DIR/keystone-manage service create \ - name=keystone \ - service_type=identity \ - "description=Keystone Identity Service" +keystone service-create \ + --name=nova \ + --type=compute \ + --description="Nova Compute Service" + +keystone service-create \ + --name=ec2 \ + --type=ec2 \ + --description="EC2 Compatibility Layer" + +keystone service-create \ + --name=glance \ + --type=image \ + --description="Glance Image Service" + +keystone service-create \ + --name=keystone \ + --type=identity \ + --description="Keystone Identity Service" if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then - $BIN_DIR/keystone-manage service create \ - name=swift \ - service_type=object-store \ - "description=Swift Service" + keystone service-create \ + --name=swift \ + --type="object-store" \ + --description="Swift Service" fi # create ec2 creds and parse the secret and access key returned -RESULT=`$BIN_DIR/keystone-manage ec2 create user_id=$ADMIN_USER tenant_id=$ADMIN_TENANT` -ADMIN_ACCESS=`echo $RESULT | python -c "import sys; import json; result = json.loads(sys.stdin.read()); print result['access'];"` -ADMIN_SECRET=`echo $RESULT | python -c "import sys; import json; result = json.loads(sys.stdin.read()); print result['secret'];"` +RESULT=`keystone ec2-create-credentials --tenant_id=$ADMIN_TENANT --user_id=$ADMIN_USER` + echo `$@ | grep id | awk '{print $4}'` +ADMIN_ACCESS=`echo "$RESULT" | grep access | awk '{print $4}'` +ADMIN_SECRET=`echo "$RESULT" | grep secret | awk '{print $4}'` -RESULT=`$BIN_DIR/keystone-manage ec2 create user_id=$DEMO_USER tenant_id=$DEMO_TENANT` -DEMO_ACCESS=`echo $RESULT | python -c "import sys; import json; result = json.loads(sys.stdin.read()); print result['access'];"` -DEMO_SECRET=`echo $RESULT | python -c "import sys; import json; result = json.loads(sys.stdin.read()); print result['secret'];"` +RESULT=`keystone ec2-create-credentials --tenant_id=$DEMO_TENANT --user_id=$DEMO_USER` +DEMO_ACCESS=`echo "$RESULT" | grep access | awk '{print $4}'` +DEMO_SECRET=`echo "$RESULT" | grep secret | awk '{print $4}'` # write the secret and access to ec2rc cat > $DEVSTACK_DIR/ec2rc < Date: Thu, 9 Feb 2012 16:27:58 +0100 Subject: [PATCH 327/967] Remove TMUX support. After discussion on the mailing list tmux is not used much and since it was broken for a while remove that support. Fixes bug 928967. Change-Id: Iff1eea45190f4ef873c60b3563fe94359702446d --- stack.sh | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/stack.sh b/stack.sh index c0c30c28..6968c909 100755 --- a/stack.sh +++ b/stack.sh @@ -1284,17 +1284,12 @@ fi function screen_it { NL=`echo -ne '\015'` if [[ "$ENABLED_SERVICES" =~ "$1" ]]; then - if [[ "$USE_TMUX" =~ "yes" ]]; then - tmux new-window -t stack -a -n "$1" "bash" - tmux send-keys "$2" C-M - else - screen -S stack -X screen -t $1 - # sleep to allow bash to be ready to be send the command - we are - # creating a new window in screen and then sends characters, so if - # bash isn't running by the time we send the command, nothing happens - sleep 1.5 - screen -S stack -p $1 -X stuff "$2$NL" - fi + screen -S stack -X screen -t $1 + # sleep to allow bash to be ready to be send the command - we are + # creating a new window in screen and then sends characters, so if + # bash isn't running by the time we send the command, nothing happens + sleep 1.5 + screen -S stack -p $1 -X stuff "$2$NL" fi } From 12c7d4b30327c50d7fa35595e9854ee9d600eb5f Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 9 Feb 2012 16:44:32 +0100 Subject: [PATCH 328/967] Add object-expirer. Fixes bug 928198. Change-Id: I0f3bbe323cdb285714d1c80fe7e7b7d9842ece06 --- files/swift/object-server.conf | 2 ++ 1 file changed, 2 insertions(+) diff --git a/files/swift/object-server.conf b/files/swift/object-server.conf index 06fbffea..2f888a27 100644 --- a/files/swift/object-server.conf +++ b/files/swift/object-server.conf @@ -18,3 +18,5 @@ vm_test_mode = yes [object-updater] [object-auditor] + +[object-expirer] From 0cbe851b294a114e7ffd7c669f88cad41d019292 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 9 Feb 2012 16:36:15 +0100 Subject: [PATCH 329/967] Launch screen with bash as shell Since screen commands are bash specifics make sure we launch screen with bash login shell instead of other shells. Fixes bug928883. Change-Id: I62b4c7182682deb4ef16ed0cf1a3b5a130c1c4ee --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index c0c30c28..f54ef349 100755 --- a/stack.sh +++ b/stack.sh @@ -1299,7 +1299,7 @@ function screen_it { } # create a new named screen to run processes in -screen -d -m -S stack -t stack +screen -d -m -S stack -t stack -s /bin/bash sleep 1 # set a reasonable statusbar screen -r stack -X hardstatus alwayslastline "%-Lw%{= BW}%50>%n%f* %t%{-}%+Lw%< %= %H" From febded24995fc10c7890e84c6df71bc48b5f2a8c Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 3 Feb 2012 20:17:22 +0000 Subject: [PATCH 330/967] Configure vnc url endpoints on compute hosts * The compute manager constructs access urls, and so needs this info Change-Id: Ifa5f515a071d5006f6fc5cc6103eff91eafdb2b7 --- stack.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index c4d08d9d..beb94688 100755 --- a/stack.sh +++ b/stack.sh @@ -1141,11 +1141,11 @@ add_nova_flag "--vlan_interface=$VLAN_INTERFACE" add_nova_flag "--sql_connection=$BASE_SQL_CONN/nova" add_nova_flag "--libvirt_type=$LIBVIRT_TYPE" add_nova_flag "--instance_name_template=${INSTANCE_NAME_PREFIX}%08x" -if [[ "$ENABLED_SERVICES" =~ "n-novnc" ]]; then +# All nova-compute workers need to know the vnc configuration options +# These settings don't hurt anything if n-xvnc and n-novnc are disabled +if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"} add_nova_flag "--novncproxy_base_url=$NOVNCPROXY_URL" -fi -if [[ "$ENABLED_SERVICES" =~ "n-xvnc" ]]; then XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"} add_nova_flag "--xvpvncproxy_base_url=$XVPVNCPROXY_URL" fi From 53284c8ed04e249e81c07532bdeb6876f2404bb7 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 8 Feb 2012 23:33:41 +0000 Subject: [PATCH 331/967] Change to use the right keystonelight repo Change-Id: Ie216b9eade59f09c76d193a6d0fd353a02dd575f --- stackrc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackrc b/stackrc index d30bf667..2274e624 100644 --- a/stackrc +++ b/stackrc @@ -15,8 +15,8 @@ GLANCE_REPO=https://github.com/openstack/glance.git GLANCE_BRANCH=master # unified auth system (manages accounts/tokens) -KEYSTONE_REPO=https://github.com/termie/keystonelight.git -KEYSTONE_BRANCH=master +KEYSTONE_REPO=https://github.com/openstack/keystone.git +KEYSTONE_BRANCH=redux # a websockets/html5 or flash powered VNC console for vm instances NOVNC_REPO=https://github.com/cloudbuilders/noVNC.git From 53fca56da2e257dff90ba5400945bb1e227e7f38 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Sat, 11 Feb 2012 00:17:31 -0800 Subject: [PATCH 332/967] Logout and delete iscsi sessions Change-Id: I4906f943adc226c2dba9faf5e1595f47c4466432 --- stack.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/stack.sh b/stack.sh index beb94688..3d77a6fb 100755 --- a/stack.sh +++ b/stack.sh @@ -899,6 +899,10 @@ if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then echo $instances | xargs -n1 virsh undefine || true fi + # Logout and delete iscsi sessions + sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d " " -f2 | xargs sudo iscsiadm --mode node --logout || true + sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d " " -f2 | sudo iscsiadm --mode node --op delete || true + # Clean out the instances directory. sudo rm -rf $NOVA_DIR/instances/* fi From 689b2ac1b925e3957bb5af7a5efa1cc33e8070d7 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 16 Dec 2011 20:23:07 +0000 Subject: [PATCH 333/967] switch public_interface to br100 so that floating ips don't get bumped between interfaces * Set VLAN_INTERFACE to eth0 by default Change-Id: I8a85465fa389af3af04d91775a99df58b2da575a --- stack.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index beb94688..9c91f031 100755 --- a/stack.sh +++ b/stack.sh @@ -271,14 +271,14 @@ function read_password { # FIXME: more documentation about why these are important flags. Also # we should make sure we use the same variable names as the flag names. -PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-eth0} +PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-br100} FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24} FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256} FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.224/28} NET_MAN=${NET_MAN:-FlatDHCPManager} EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST} FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-br100} -VLAN_INTERFACE=${VLAN_INTERFACE:-$PUBLIC_INTERFACE} +VLAN_INTERFACE=${VLAN_INTERFACE:-eth0} # Test floating pool and range are used for testing. They are defined # here until the admin APIs can replace nova-manage From d1dd25e6a60d7987f94cb39f2150d8083f562032 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 13 Feb 2012 11:22:36 -0600 Subject: [PATCH 334/967] Fixes bug 885087: /etc/nova/nova.conf Move Nova config files (nova.conf, api-paste.ini) to /etc/nova so all binaries can find them, particularly /usr/local/bin/nova-manage. Change-Id: I2fb71159c16dffc610dcfda4158593dbd6423c5e --- stack.sh | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/stack.sh b/stack.sh index f023f036..79c61163 100755 --- a/stack.sh +++ b/stack.sh @@ -793,20 +793,28 @@ fi # Nova # ---- + +# Put config files in /etc/nova for everyone to find +NOVA_CONF=/etc/nova +if [[ ! -d $NOVA_CONF ]]; then + sudo mkdir -p $NOVA_CONF +fi +sudo chown `whoami` $NOVA_CONF + if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then # We are going to use a sample http middleware configuration based on the # one from the keystone project to launch nova. This paste config adds # the configuration required for nova to validate keystone tokens. # First we add a some extra data to the default paste config from nova - cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_DIR/bin/nova-api-paste.ini + cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_CONF # Then we add our own service token to the configuration - sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini + sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $NOVA_CONF/api-paste.ini # Finally, we change the pipelines in nova to use keystone function replace_pipeline() { - sed "/\[pipeline:$1\]/,/\[/s/^pipeline = .*/pipeline = $2/" -i $NOVA_DIR/bin/nova-api-paste.ini + sed "/\[pipeline:$1\]/,/\[/s/^pipeline = .*/pipeline = $2/" -i $NOVA_CONF/api-paste.ini } replace_pipeline "ec2cloud" "ec2faultwrap logrequest totoken authtoken keystonecontext cloudrequest authorizer validator ec2executor" replace_pipeline "ec2admin" "ec2faultwrap logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor" @@ -1099,11 +1107,11 @@ if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then fi function add_nova_flag { - echo "$1" >> $NOVA_DIR/bin/nova.conf + echo "$1" >> $NOVA_CONF/nova.conf } # (re)create nova.conf -rm -f $NOVA_DIR/bin/nova.conf +rm -f $NOVA_CONF/nova.conf add_nova_flag "--verbose" add_nova_flag "--allow_admin_api" add_nova_flag "--scheduler_driver=$SCHEDULER" @@ -1163,7 +1171,7 @@ fi VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1} add_nova_flag "--vncserver_listen=$VNCSERVER_LISTEN" add_nova_flag "--vncserver_proxyclient_address=$VNCSERVER_PROXYCLIENT_ADDRESS" -add_nova_flag "--api_paste_config=$NOVA_DIR/bin/nova-api-paste.ini" +add_nova_flag "--api_paste_config=$NOVA_CONF/api-paste.ini" add_nova_flag "--image_service=nova.image.glance.GlanceImageService" add_nova_flag "--ec2_dmz_host=$EC2_DMZ_HOST" add_nova_flag "--rabbit_host=$RABBIT_HOST" From 80756ea71a86e91b2ec45ac25875a2e928b183be Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 1 Feb 2012 18:01:01 -0600 Subject: [PATCH 335/967] Blueprint cli-auth * Add proper test for swift in client-env.sh * Remove all VERIFY variables; nothing beyond the 4 OS_ vars is set now * Update exercises for Keystone 2.0 auth * swift understands Keystone 2.0 auth now, don't download custom binary * cleanup uploaded bundle in bundle.sh Change-Id: I99c818d81534bbf93c0e142513acb80e5fe613ea --- exercises/bundle.sh | 3 +++ exercises/client-env.sh | 46 ++++++++++----------------------------- exercises/floating_ips.sh | 13 ++--------- exercises/volumes.sh | 13 ++--------- stack.sh | 6 ----- 5 files changed, 18 insertions(+), 63 deletions(-) diff --git a/exercises/bundle.sh b/exercises/bundle.sh index ca35c9af..d5c78af3 100755 --- a/exercises/bundle.sh +++ b/exercises/bundle.sh @@ -46,3 +46,6 @@ if ! timeout $REGISTER_TIMEOUT sh -c "while euca-describe-images | grep '$AMI' | echo "Image $AMI not available within $REGISTER_TIMEOUT seconds" exit 1 fi + +# Clean up +euca-deregister $AMI diff --git a/exercises/client-env.sh b/exercises/client-env.sh index f4172bcb..a15a5c04 100755 --- a/exercises/client-env.sh +++ b/exercises/client-env.sh @@ -52,18 +52,13 @@ if [[ "$ENABLED_SERVICES" =~ "key" ]]; then if [[ "$SKIP_EXERCISES" =~ "key" ]] ; then STATUS_KEYSTONE="Skipped" else - if [[ -n "$VERIFY" ]]; then - # Save original environment - xOS_AUTH_URL=$OS_AUTH_URL - xOS_TENANT_NAME=$OS_TENANT_NAME - xOS_USERNAME=$OS_USERNAME - xOS_PASSWORD=$OS_PASSWORD - # keystone can't handle a trailing '/' - export OS_AUTH_URL=${OS_AUTH_URL%/} - # does any non-admin request work? - export OS_USERNAME=admin - export OS_TENANT_NAME=admin - fi + # We need to run the keystone test as admin since there doesn't + # seem to be anything to test the cli vars that runs as a user + # tenant-list should do that, it isn't implemented (yet) + xOS_TENANT_NAME=$OS_TENANT_NAME + xOS_USERNAME=$OS_USERNAME + export OS_USERNAME=admin + export OS_TENANT_NAME=admin echo -e "\nTest Keystone" if keystone service-list; then @@ -72,13 +67,9 @@ if [[ "$ENABLED_SERVICES" =~ "key" ]]; then STATUS_KEYSTONE="Failed" RETURN=1 fi - if [[ -n "$VERIFY" ]]; then - # Save original environment - OS_AUTH_URL=$xOS_AUTH_URL - OS_TENANT_NAME=$xOS_TENANT_NAME - OS_USERNAME=$xOS_USERNAME - OS_PASSWORD=$xOS_PASSWORD - fi + + OS_TENANT_NAME=$xOS_TENANT_NAME + OS_USERNAME=$xOS_USERNAME fi fi @@ -89,12 +80,6 @@ if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then if [[ "$SKIP_EXERCISES" =~ "n-api" ]] ; then STATUS_NOVA="Skipped" else - if [[ -n "$VERIFY" ]]; then - # Known novaclient breakage: - # NOVA_VERSION must be set or nova silently fails - export NOVA_VERSION=2 - fi - echo -e "\nTest Nova" if nova flavor-list; then STATUS_NOVA="Succeeded" @@ -112,14 +97,6 @@ if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then if [[ "$SKIP_EXERCISES" =~ "g-api" ]] ; then STATUS_GLANCE="Skipped" else - if [[ -n "$VERIFY" ]]; then - # Known glance client differage: - export OS_AUTH_TENANT=$OS_TENANT_NAME - export OS_AUTH_USER=$OS_USERNAME - export OS_AUTH_KEY=$OS_PASSWORD - export OS_AUTH_STRATEGY=keystone - fi - echo -e "\nTest Glance" if glance index; then STATUS_GLANCE="Succeeded" @@ -138,8 +115,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then STATUS_SWIFT="Skipped" else echo -e "\nTest Swift" - # FIXME(dtroyer): implement swift test - if true; then + if swift stat; then STATUS_SWIFT="Succeeded" else STATUS_SWIFT="Failed" diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index c1cffa4a..233313e8 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -48,15 +48,6 @@ DEFAULT_FLOATING_POOL=${DEFAULT_FLOATING_POOL:-nova} # Additional floating IP pool and range TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test} -# Get a token for clients that don't support service catalog -# ========================================================== - -# manually create a token by querying keystone (sending JSON data). Keystone -# returns a token and catalog of endpoints. We use python to parse the token -# and save it. - -TOKEN=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$OS_USERNAME\", \"password\": \"$OS_PASSWORD\"}}}" -H "Content-type: application/json" ${OS_AUTH_URL%/}/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"` - # Launching a server # ================== @@ -70,10 +61,10 @@ nova list nova image-list # But we recommend using glance directly -glance -f -A $TOKEN -H $GLANCE_HOST index +glance -f index # Grab the id of the image to launch -IMAGE=`glance -f -A $TOKEN -H $GLANCE_HOST index | egrep $DEFAULT_IMAGE_NAME | head -1 | cut -d" " -f1` +IMAGE=`glance -f index | egrep $DEFAULT_IMAGE_NAME | head -1 | cut -d" " -f1` # Security Groups # --------------- diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 1f7c25f5..1fcc034b 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -34,15 +34,6 @@ DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} # Boot this image, use first AMi image if unset DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} -# Get a token for clients that don't support service catalog -# ========================================================== - -# manually create a token by querying keystone (sending JSON data). Keystone -# returns a token and catalog of endpoints. We use python to parse the token -# and save it. - -TOKEN=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$OS_USERNAME\", \"password\": \"$OS_PASSWORD\"}}}" -H "Content-type: application/json" ${OS_AUTH_URL%/}/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"` - # Launching a server # ================== @@ -56,10 +47,10 @@ nova list nova image-list # But we recommend using glance directly -glance -f -A $TOKEN -H $GLANCE_HOST index +glance -f index # Grab the id of the image to launch -IMAGE=`glance -f -A $TOKEN -H $GLANCE_HOST index | egrep $DEFAULT_IMAGE_NAME | head -1 | cut -d" " -f1` +IMAGE=`glance -f index | egrep $DEFAULT_IMAGE_NAME | head -1 | cut -d" " -f1` # determinine instance type # ------------------------- diff --git a/stack.sh b/stack.sh index f023f036..2d5e0b8a 100755 --- a/stack.sh +++ b/stack.sh @@ -981,12 +981,6 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then # We install the memcache server as this is will be used by the # middleware to cache the tokens auths for a long this is needed. apt_get install memcached - - # We need a special version of bin/swift which understand the - # OpenStack api 2.0, we download it until this is getting - # integrated in swift. - sudo https_proxy=$https_proxy curl -s -o/usr/local/bin/swift \ - 'https://review.openstack.org/gitweb?p=openstack/swift.git;a=blob_plain;f=bin/swift;hb=48bfda6e2fdf3886c98bd15649887d54b9a2574e' else swift_auth_server=tempauth fi From 6325216cde36c3af89161c0be0d7b652fdc0fccd Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 8 Feb 2012 00:54:20 +0000 Subject: [PATCH 336/967] Update code to remove iscsi targets * Fixes bug 928475 Change-Id: I9b54436522422d865c7add750d612c371945817a --- stack.sh | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/stack.sh b/stack.sh index cccde295..0ecbdf8a 100755 --- a/stack.sh +++ b/stack.sh @@ -1077,15 +1077,12 @@ if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then fi if sudo vgs $VOLUME_GROUP; then + # Remove nova iscsi targets + sudo tgtadm --op show --mode target | grep $VOLUME_NAME_PREFIX | grep Target | cut -f3 -d ' ' | sudo xargs -n1 tgt-admin --delete || true # Clean out existing volumes for lv in `sudo lvs --noheadings -o lv_name $VOLUME_GROUP`; do # VOLUME_NAME_PREFIX prefixes the LVs we want if [[ "${lv#$VOLUME_NAME_PREFIX}" != "$lv" ]]; then - tid=`egrep "^tid.+$lv" /proc/net/iet/volume | cut -f1 -d' ' | tr ':' '='` - if [[ -n "$tid" ]]; then - lun=`egrep "lun.+$lv" /proc/net/iet/volume | cut -f1 -d' ' | tr ':' '=' | tr -d '\t'` - sudo ietadm --op delete --$tid --$lun - fi sudo lvremove -f $VOLUME_GROUP/$lv fi done From e830a780f03753b18e423c0e66abc5813e84d0cf Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 8 Feb 2012 11:49:28 -0600 Subject: [PATCH 337/967] Not all distros include a symlink from euca-describe-group to euca-describe-groups Change-Id: Iaba71c36b405d2891aebdb45c1b9fd84853988f5 --- exercises/euca.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/exercises/euca.sh b/exercises/euca.sh index e569196a..834e4ecf 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -39,9 +39,9 @@ IMAGE=`euca-describe-images | grep machine | cut -f2 | head -n1` SECGROUP=euca_secgroup # Add a secgroup -if ! euca-describe-group | grep -q $SECGROUP; then +if ! euca-describe-groups | grep -q $SECGROUP; then euca-add-group -d "$SECGROUP description" $SECGROUP - if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! euca-describe-group | grep -q $SECGROUP; do sleep 1; done"; then + if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! euca-describe-groups | grep -q $SECGROUP; do sleep 1; done"; then echo "Security group not created" exit 1 fi From 36867add0ea38dc88f58a1b77bf28f762f384a91 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 9 Feb 2012 16:27:58 +0100 Subject: [PATCH 338/967] Remove TMUX support. After discussion on the mailing list tmux is not used much and since it was broken for a while remove that support. Fixes bug 928967. Change-Id: Iff1eea45190f4ef873c60b3563fe94359702446d --- stack.sh | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/stack.sh b/stack.sh index 0ecbdf8a..00cc1dd2 100755 --- a/stack.sh +++ b/stack.sh @@ -1236,17 +1236,12 @@ fi function screen_it { NL=`echo -ne '\015'` if [[ "$ENABLED_SERVICES" =~ "$1" ]]; then - if [[ "$USE_TMUX" =~ "yes" ]]; then - tmux new-window -t stack -a -n "$1" "bash" - tmux send-keys "$2" C-M - else - screen -L -S stack -X screen -t $1 - # sleep to allow bash to be ready to be send the command - we are - # creating a new window in screen and then sends characters, so if - # bash isn't running by the time we send the command, nothing happens - sleep 1.5 - screen -L -S stack -p $1 -X stuff "$2$NL" - fi + screen -S stack -X screen -t $1 + # sleep to allow bash to be ready to be send the command - we are + # creating a new window in screen and then sends characters, so if + # bash isn't running by the time we send the command, nothing happens + sleep 1.5 + screen -S stack -p $1 -X stuff "$2$NL" fi } From 6869d1ab7812c378ae65cab92ed6b84b699ec019 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 9 Feb 2012 16:44:32 +0100 Subject: [PATCH 339/967] Add object-expirer. Fixes bug 928198. Change-Id: I0f3bbe323cdb285714d1c80fe7e7b7d9842ece06 --- files/swift/object-server.conf | 2 ++ 1 file changed, 2 insertions(+) diff --git a/files/swift/object-server.conf b/files/swift/object-server.conf index 06fbffea..2f888a27 100644 --- a/files/swift/object-server.conf +++ b/files/swift/object-server.conf @@ -18,3 +18,5 @@ vm_test_mode = yes [object-updater] [object-auditor] + +[object-expirer] From 73c70890785e2c9e7312fe5ad579bf9a9d400de1 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 9 Feb 2012 16:36:15 +0100 Subject: [PATCH 340/967] Launch screen with bash as shell Since screen commands are bash specifics make sure we launch screen with bash login shell instead of other shells. Fixes bug928883. Change-Id: I62b4c7182682deb4ef16ed0cf1a3b5a130c1c4ee --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 00cc1dd2..f9ac5bab 100755 --- a/stack.sh +++ b/stack.sh @@ -1246,7 +1246,7 @@ function screen_it { } # create a new named screen to run processes in -screen -L -d -m -S stack -t stack +screen -d -m -S stack -t stack -s /bin/bash sleep 1 # set a reasonable statusbar screen -r stack -X hardstatus alwayslastline "%-Lw%{= BW}%50>%n%f* %t%{-}%+Lw%< %= %H" From 2fcb6661cc13d77add8133e769b3c1650e093272 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 3 Feb 2012 20:17:22 +0000 Subject: [PATCH 341/967] Configure vnc url endpoints on compute hosts * The compute manager constructs access urls, and so needs this info Change-Id: Ifa5f515a071d5006f6fc5cc6103eff91eafdb2b7 --- stack.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index f9ac5bab..596ebd6f 100755 --- a/stack.sh +++ b/stack.sh @@ -1141,11 +1141,11 @@ add_nova_flag "--vlan_interface=$VLAN_INTERFACE" add_nova_flag "--sql_connection=$BASE_SQL_CONN/nova" add_nova_flag "--libvirt_type=$LIBVIRT_TYPE" add_nova_flag "--instance_name_template=${INSTANCE_NAME_PREFIX}%08x" -if [[ "$ENABLED_SERVICES" =~ "n-novnc" ]]; then +# All nova-compute workers need to know the vnc configuration options +# These settings don't hurt anything if n-xvnc and n-novnc are disabled +if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"} add_nova_flag "--novncproxy_base_url=$NOVNCPROXY_URL" -fi -if [[ "$ENABLED_SERVICES" =~ "n-xvnc" ]]; then XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"} add_nova_flag "--xvpvncproxy_base_url=$XVPVNCPROXY_URL" fi From 4f830e11f3e2497693243a423913d5799af71012 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Sat, 11 Feb 2012 00:17:31 -0800 Subject: [PATCH 342/967] Logout and delete iscsi sessions Change-Id: I4906f943adc226c2dba9faf5e1595f47c4466432 --- stack.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/stack.sh b/stack.sh index 596ebd6f..5cc1424b 100755 --- a/stack.sh +++ b/stack.sh @@ -899,6 +899,10 @@ if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then echo $instances | xargs -n1 virsh undefine || true fi + # Logout and delete iscsi sessions + sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d " " -f2 | xargs sudo iscsiadm --mode node --logout || true + sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d " " -f2 | sudo iscsiadm --mode node --op delete || true + # Clean out the instances directory. sudo rm -rf $NOVA_DIR/instances/* fi From 00596bb5f227732045c0aa0fab38f2e0c6054aca Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 16 Dec 2011 20:23:07 +0000 Subject: [PATCH 343/967] switch public_interface to br100 so that floating ips don't get bumped between interfaces * Set VLAN_INTERFACE to eth0 by default Change-Id: I8a85465fa389af3af04d91775a99df58b2da575a --- stack.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index 5cc1424b..788c1604 100755 --- a/stack.sh +++ b/stack.sh @@ -271,14 +271,14 @@ function read_password { # FIXME: more documentation about why these are important flags. Also # we should make sure we use the same variable names as the flag names. -PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-eth0} +PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-br100} FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24} FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256} FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.224/28} NET_MAN=${NET_MAN:-FlatDHCPManager} EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST} FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-br100} -VLAN_INTERFACE=${VLAN_INTERFACE:-$PUBLIC_INTERFACE} +VLAN_INTERFACE=${VLAN_INTERFACE:-eth0} # Test floating pool and range are used for testing. They are defined # here until the admin APIs can replace nova-manage From 31cce21808d259037703cc3386ad99b50482d5f5 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 1 Feb 2012 18:01:01 -0600 Subject: [PATCH 344/967] Blueprint cli-auth * Add proper test for swift in client-env.sh * Remove all VERIFY variables; nothing beyond the 4 OS_ vars is set now * Update exercises for Keystone 2.0 auth * swift understands Keystone 2.0 auth now, don't download custom binary * cleanup uploaded bundle in bundle.sh Change-Id: I99c818d81534bbf93c0e142513acb80e5fe613ea --- exercises/bundle.sh | 3 +++ exercises/client-env.sh | 46 ++++++++++----------------------------- exercises/floating_ips.sh | 13 ++--------- exercises/volumes.sh | 13 ++--------- stack.sh | 6 ----- 5 files changed, 18 insertions(+), 63 deletions(-) diff --git a/exercises/bundle.sh b/exercises/bundle.sh index ca35c9af..d5c78af3 100755 --- a/exercises/bundle.sh +++ b/exercises/bundle.sh @@ -46,3 +46,6 @@ if ! timeout $REGISTER_TIMEOUT sh -c "while euca-describe-images | grep '$AMI' | echo "Image $AMI not available within $REGISTER_TIMEOUT seconds" exit 1 fi + +# Clean up +euca-deregister $AMI diff --git a/exercises/client-env.sh b/exercises/client-env.sh index f4172bcb..a15a5c04 100755 --- a/exercises/client-env.sh +++ b/exercises/client-env.sh @@ -52,18 +52,13 @@ if [[ "$ENABLED_SERVICES" =~ "key" ]]; then if [[ "$SKIP_EXERCISES" =~ "key" ]] ; then STATUS_KEYSTONE="Skipped" else - if [[ -n "$VERIFY" ]]; then - # Save original environment - xOS_AUTH_URL=$OS_AUTH_URL - xOS_TENANT_NAME=$OS_TENANT_NAME - xOS_USERNAME=$OS_USERNAME - xOS_PASSWORD=$OS_PASSWORD - # keystone can't handle a trailing '/' - export OS_AUTH_URL=${OS_AUTH_URL%/} - # does any non-admin request work? - export OS_USERNAME=admin - export OS_TENANT_NAME=admin - fi + # We need to run the keystone test as admin since there doesn't + # seem to be anything to test the cli vars that runs as a user + # tenant-list should do that, it isn't implemented (yet) + xOS_TENANT_NAME=$OS_TENANT_NAME + xOS_USERNAME=$OS_USERNAME + export OS_USERNAME=admin + export OS_TENANT_NAME=admin echo -e "\nTest Keystone" if keystone service-list; then @@ -72,13 +67,9 @@ if [[ "$ENABLED_SERVICES" =~ "key" ]]; then STATUS_KEYSTONE="Failed" RETURN=1 fi - if [[ -n "$VERIFY" ]]; then - # Save original environment - OS_AUTH_URL=$xOS_AUTH_URL - OS_TENANT_NAME=$xOS_TENANT_NAME - OS_USERNAME=$xOS_USERNAME - OS_PASSWORD=$xOS_PASSWORD - fi + + OS_TENANT_NAME=$xOS_TENANT_NAME + OS_USERNAME=$xOS_USERNAME fi fi @@ -89,12 +80,6 @@ if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then if [[ "$SKIP_EXERCISES" =~ "n-api" ]] ; then STATUS_NOVA="Skipped" else - if [[ -n "$VERIFY" ]]; then - # Known novaclient breakage: - # NOVA_VERSION must be set or nova silently fails - export NOVA_VERSION=2 - fi - echo -e "\nTest Nova" if nova flavor-list; then STATUS_NOVA="Succeeded" @@ -112,14 +97,6 @@ if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then if [[ "$SKIP_EXERCISES" =~ "g-api" ]] ; then STATUS_GLANCE="Skipped" else - if [[ -n "$VERIFY" ]]; then - # Known glance client differage: - export OS_AUTH_TENANT=$OS_TENANT_NAME - export OS_AUTH_USER=$OS_USERNAME - export OS_AUTH_KEY=$OS_PASSWORD - export OS_AUTH_STRATEGY=keystone - fi - echo -e "\nTest Glance" if glance index; then STATUS_GLANCE="Succeeded" @@ -138,8 +115,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then STATUS_SWIFT="Skipped" else echo -e "\nTest Swift" - # FIXME(dtroyer): implement swift test - if true; then + if swift stat; then STATUS_SWIFT="Succeeded" else STATUS_SWIFT="Failed" diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index c1cffa4a..233313e8 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -48,15 +48,6 @@ DEFAULT_FLOATING_POOL=${DEFAULT_FLOATING_POOL:-nova} # Additional floating IP pool and range TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test} -# Get a token for clients that don't support service catalog -# ========================================================== - -# manually create a token by querying keystone (sending JSON data). Keystone -# returns a token and catalog of endpoints. We use python to parse the token -# and save it. - -TOKEN=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$OS_USERNAME\", \"password\": \"$OS_PASSWORD\"}}}" -H "Content-type: application/json" ${OS_AUTH_URL%/}/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"` - # Launching a server # ================== @@ -70,10 +61,10 @@ nova list nova image-list # But we recommend using glance directly -glance -f -A $TOKEN -H $GLANCE_HOST index +glance -f index # Grab the id of the image to launch -IMAGE=`glance -f -A $TOKEN -H $GLANCE_HOST index | egrep $DEFAULT_IMAGE_NAME | head -1 | cut -d" " -f1` +IMAGE=`glance -f index | egrep $DEFAULT_IMAGE_NAME | head -1 | cut -d" " -f1` # Security Groups # --------------- diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 1f7c25f5..1fcc034b 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -34,15 +34,6 @@ DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} # Boot this image, use first AMi image if unset DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} -# Get a token for clients that don't support service catalog -# ========================================================== - -# manually create a token by querying keystone (sending JSON data). Keystone -# returns a token and catalog of endpoints. We use python to parse the token -# and save it. - -TOKEN=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$OS_USERNAME\", \"password\": \"$OS_PASSWORD\"}}}" -H "Content-type: application/json" ${OS_AUTH_URL%/}/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"` - # Launching a server # ================== @@ -56,10 +47,10 @@ nova list nova image-list # But we recommend using glance directly -glance -f -A $TOKEN -H $GLANCE_HOST index +glance -f index # Grab the id of the image to launch -IMAGE=`glance -f -A $TOKEN -H $GLANCE_HOST index | egrep $DEFAULT_IMAGE_NAME | head -1 | cut -d" " -f1` +IMAGE=`glance -f index | egrep $DEFAULT_IMAGE_NAME | head -1 | cut -d" " -f1` # determinine instance type # ------------------------- diff --git a/stack.sh b/stack.sh index 788c1604..d803816c 100755 --- a/stack.sh +++ b/stack.sh @@ -981,12 +981,6 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then # We install the memcache server as this is will be used by the # middleware to cache the tokens auths for a long this is needed. apt_get install memcached - - # We need a special version of bin/swift which understand the - # OpenStack api 2.0, we download it until this is getting - # integrated in swift. - sudo https_proxy=$https_proxy curl -s -o/usr/local/bin/swift \ - 'https://review.openstack.org/gitweb?p=openstack/swift.git;a=blob_plain;f=bin/swift;hb=48bfda6e2fdf3886c98bd15649887d54b9a2574e' else swift_auth_server=tempauth fi From 258c95de099309bd6b26f441d004ca3f67e9c25a Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Tue, 14 Feb 2012 14:20:36 -0800 Subject: [PATCH 345/967] Update paste config to return version info Change-Id: I67edb5c9aab6899c105666a4541918bad9116386 --- files/keystone.conf | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/files/keystone.conf b/files/keystone.conf index 3167c0f4..45044f76 100644 --- a/files/keystone.conf +++ b/files/keystone.conf @@ -69,10 +69,24 @@ pipeline = token_auth admin_token_auth json_body debug ec2_extension public_serv [pipeline:admin_api] pipeline = token_auth admin_token_auth json_body debug ec2_extension crud_extension admin_service +[app:public_version_service] +paste.app_factory = keystone.service:public_version_app_factory + +[app:admin_version_service] +paste.app_factory = keystone.service:admin_version_app_factory + +[pipeline:public_version_api] +pipeline = public_version_service + +[pipeline:admin_version_api] +pipeline = admin_version_service + [composite:main] use = egg:Paste#urlmap /v2.0 = public_api +/ = public_version_api [composite:admin] use = egg:Paste#urlmap /v2.0 = admin_api +/ = admin_version_service From b115341253b30fd51b5ac2fa763c701737eaae6c Mon Sep 17 00:00:00 2001 From: root Date: Thu, 19 Jan 2012 13:28:21 -0800 Subject: [PATCH 346/967] Generalize xen network config Allow dhcp for IP addresses. dhclient3 bug workaround. Refactor code to improve network creation logic. Change-Id: Ia3e2e65bbe8b68cf4832595cb7c283c3dc84db19 --- stack.sh | 9 ++- tools/xen/build_domU.sh | 97 ++++++++++++++++++++++--------- tools/xen/build_xva.sh | 42 ++++++++++--- tools/xen/templates/interfaces.in | 17 ++++-- tools/xen/xenrc | 15 +++-- 5 files changed, 130 insertions(+), 50 deletions(-) diff --git a/stack.sh b/stack.sh index 08fcafe4..e1f3083a 100755 --- a/stack.sh +++ b/stack.sh @@ -200,14 +200,13 @@ LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm} # cases unless you are working on multi-zone mode. SCHEDULER=${SCHEDULER:-nova.scheduler.simple.SimpleScheduler} +HOST_IP_IFACE=${HOST_IP_IFACE:-eth0} # Use the eth0 IP unless an explicit is set by ``HOST_IP`` environment variable -if [ ! -n "$HOST_IP" ]; then - HOST_IP=`LC_ALL=C /sbin/ifconfig eth0 | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'` +if [ -z "$HOST_IP" -o "$HOST_IP" == "dhcp" ]; then + HOST_IP=`LC_ALL=C /sbin/ifconfig ${HOST_IP_IFACE} | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'` if [ "$HOST_IP" = "" ]; then echo "Could not determine host ip address." - echo "If this is not your first run of stack.sh, it is " - echo "possible that nova moved your eth0 ip address to the FLAT_NETWORK_BRIDGE." - echo "Please specify your HOST_IP in your localrc." + echo "Either localrc specified dhcp on ${HOST_IP_IFACE} or defaulted to eth0" exit 1 fi fi diff --git a/tools/xen/build_domU.sh b/tools/xen/build_domU.sh index cd28f155..5ea03dad 100755 --- a/tools/xen/build_domU.sh +++ b/tools/xen/build_domU.sh @@ -37,51 +37,85 @@ if ! which git; then fi # Helper to create networks +# Uses echo trickery to return network uuid function create_network() { - if ! xe network-list | grep bridge | grep -q $1; then - echo "Creating bridge $1" - xe network-create name-label=$1 + br=$1 + dev=$2 + vlan=$3 + netname=$4 + if [ -z $br ] + then + pif=$(xe pif-list --minimal device=$dev VLAN=$vlan) + if [ -z $pif ] + then + net=$(xe network-create name-label=$netname) + else + net=$(xe network-list --minimal PIF-uuids=$pif) + fi + echo $net + return 0 + fi + if [ ! $(xe network-list --minimal params=bridge | grep -w --only-matching $br) ] + then + echo "Specified bridge $br does not exist" + echo "If you wish to use defaults, please keep the bridge name empty" + exit 1 + else + net=$(xe network-list --minimal bridge=$br) + echo $net fi } -# Create host, vm, mgmt, pub networks -create_network xapi0 -create_network $VM_BR -create_network $MGT_BR -create_network $PUB_BR - -# Get the uuid for our physical (public) interface -PIF=`xe pif-list --minimal device=eth0` +function errorcheck() { + rc=$? + if [ $rc -ne 0 ] + then + exit $rc + fi +} -# Create networks/bridges for vm and management -VM_NET=`xe network-list --minimal bridge=$VM_BR` -MGT_NET=`xe network-list --minimal bridge=$MGT_BR` +# Create host, vm, mgmt, pub networks +VM_NET=$(create_network "$VM_BR" "$VM_DEV" "$VM_VLAN" "vmbr") +errorcheck +MGT_NET=$(create_network "$MGT_BR" "$MGT_DEV" "$MGT_VLAN" "mgtbr") +errorcheck +PUB_NET=$(create_network "$PUB_BR" "$PUB_DEV" "$PUB_VLAN" "pubbr") +errorcheck # Helper to create vlans function create_vlan() { - pif=$1 + dev=$1 vlan=$2 net=$3 - if ! xe vlan-list | grep tag | grep -q $vlan; then - xe vlan-create pif-uuid=$pif vlan=$vlan network-uuid=$net + # VLAN -1 refers to no VLAN (physical network) + if [ $vlan -eq -1 ] + then + return + fi + if [ -z $(xe vlan-list --minimal tag=$vlan) ] + then + pif=$(xe pif-list --minimal network-uuid=$net) + # We created a brand new network this time + if [ -z $pif ] + then + pif=$(xe pif-list --minimal device=$dev VLAN=-1) + xe vlan-create pif-uuid=$pif vlan=$vlan network-uuid=$net + else + echo "VLAN does not exist but PIF attached to this network" + echo "How did we reach here?" + exit 1 + fi fi } # Create vlans for vm and management -create_vlan $PIF $VM_VLAN $VM_NET -create_vlan $PIF $MGT_VLAN $MGT_NET +create_vlan $PUB_DEV $PUB_VLAN $PUB_NET +create_vlan $VM_DEV $VM_VLAN $VM_NET +create_vlan $MGT_DEV $MGT_VLAN $MGT_NET # dom0 ip HOST_IP=${HOST_IP:-`ifconfig xenbr0 | grep "inet addr" | cut -d ":" -f2 | sed "s/ .*//"`} -# Setup host-only nat rules -HOST_NET=169.254.0.0/16 -if ! iptables -L -v -t nat | grep -q $HOST_NET; then - iptables -t nat -A POSTROUTING -s $HOST_NET -j SNAT --to-source $HOST_IP - iptables -I FORWARD 1 -s $HOST_NET -j ACCEPT - /etc/init.d/iptables save -fi - # Set up ip forwarding if ! grep -q "FORWARD_IPV4=YES" /etc/sysconfig/network; then # FIXME: This doesn't work on reboot! @@ -139,6 +173,15 @@ if [ "$DO_SHUTDOWN" = "1" ]; then fi # Start guest +if [ -z $VM_BR ]; then + VM_BR=$(xe network-list --minimal uuid=$VM_NET params=bridge) +fi +if [ -z $MGT_BR ]; then + MGT_BR=$(xe network-list --minimal uuid=$MGT_NET params=bridge) +fi +if [ -z $PUB_BR ]; then + PUB_BR=$(xe network-list --minimal uuid=$PUB_NET params=bridge) +fi $TOP_DIR/scripts/install-os-vpx.sh -f $XVA -v $VM_BR -m $MGT_BR -p $PUB_BR # If we have copied our ssh credentials, use ssh to monitor while the installation runs diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh index e4de2a1a..c8721aa9 100755 --- a/tools/xen/build_xva.sh +++ b/tools/xen/build_xva.sh @@ -62,7 +62,7 @@ XVA_DIR=$TOP_DIR/xvas mkdir -p $XVA_DIR # Path to xva -XVA=$XVA_DIR/$GUEST_NAME.xva +XVA=$XVA_DIR/$GUEST_NAME.xva # Setup fake grub rm -rf $STAGING_DIR/boot/grub/ @@ -102,6 +102,8 @@ sed -e "s,@BUILD_NUMBER@,$BUILD_NUMBER,g" -i $OVA # Run devstack on launch cat <$STAGING_DIR/etc/rc.local +# network restart required for getting the right gateway +/etc/init.d/networking restart GUEST_PASSWORD=$GUEST_PASSWORD STAGING_DIR=/ DO_TGZ=0 bash /opt/stack/devstack/tools/xen/prepare_guest.sh su -c "/opt/stack/run.sh > /opt/stack/run.sh.log" stack exit 0 @@ -122,12 +124,36 @@ EOF # Configure the network INTERFACES=$STAGING_DIR/etc/network/interfaces cp $TEMPLATES_DIR/interfaces.in $INTERFACES -sed -e "s,@ETH1_IP@,$VM_IP,g" -i $INTERFACES -sed -e "s,@ETH1_NETMASK@,$VM_NETMASK,g" -i $INTERFACES -sed -e "s,@ETH2_IP@,$MGT_IP,g" -i $INTERFACES -sed -e "s,@ETH2_NETMASK@,$MGT_NETMASK,g" -i $INTERFACES -sed -e "s,@ETH3_IP@,$PUB_IP,g" -i $INTERFACES -sed -e "s,@ETH3_NETMASK@,$PUB_NETMASK,g" -i $INTERFACES +if [ $VM_IP == "dhcp" ]; then + echo 'eth1 on dhcp' + sed -e "s,iface eth1 inet static,iface eth1 inet dhcp,g" -i $INTERFACES + sed -e '/@ETH1_/d' -i $INTERFACES +else + sed -e "s,@ETH1_IP@,$VM_IP,g" -i $INTERFACES + sed -e "s,@ETH1_NETMASK@,$VM_NETMASK,g" -i $INTERFACES +fi + +if [ $MGT_IP == "dhcp" ]; then + echo 'eth2 on dhcp' + sed -e "s,iface eth2 inet static,iface eth2 inet dhcp,g" -i $INTERFACES + sed -e '/@ETH2_/d' -i $INTERFACES +else + sed -e "s,@ETH2_IP@,$MGT_IP,g" -i $INTERFACES + sed -e "s,@ETH2_NETMASK@,$MGT_NETMASK,g" -i $INTERFACES +fi + +if [ $PUB_IP == "dhcp" ]; then + echo 'eth3 on dhcp' + sed -e "s,iface eth3 inet static,iface eth3 inet dhcp,g" -i $INTERFACES + sed -e '/@ETH3_/d' -i $INTERFACES +else + sed -e "s,@ETH3_IP@,$PUB_IP,g" -i $INTERFACES + sed -e "s,@ETH3_NETMASK@,$PUB_NETMASK,g" -i $INTERFACES +fi + +if [ -h $STAGING_DIR/sbin/dhclient3 ]; then + rm -f $STAGING_DIR/sbin/dhclient3 +fi # Gracefully cp only if source file/dir exists function cp_it { @@ -151,7 +177,7 @@ cat <$STAGING_DIR/opt/stack/run.sh #!/bin/bash cd /opt/stack/devstack killall screen -UPLOAD_LEGACY_TTY=yes HOST_IP=$PUB_IP VIRT_DRIVER=xenserver FORCE=yes MULTI_HOST=1 $STACKSH_PARAMS ./stack.sh +UPLOAD_LEGACY_TTY=yes HOST_IP=$PUB_IP VIRT_DRIVER=xenserver FORCE=yes MULTI_HOST=1 HOST_IP_IFACE=$HOST_IP_IFACE $STACKSH_PARAMS ./stack.sh EOF chmod 755 $STAGING_DIR/opt/stack/run.sh diff --git a/tools/xen/templates/interfaces.in b/tools/xen/templates/interfaces.in index 49c3d681..e315a8c3 100644 --- a/tools/xen/templates/interfaces.in +++ b/tools/xen/templates/interfaces.in @@ -1,8 +1,15 @@ auto lo iface lo inet loopback -auto eth0 -iface eth0 inet dhcp +# If eth3 is static, the order should not matter +# and eth0 will have the default gateway. If not, +# we probably want the default gateway to be +# what is on the public interface. Hence changed +# the order here. +auto eth3 +iface eth3 inet static + address @ETH3_IP@ + netmask @ETH3_NETMASK@ auto eth1 iface eth1 inet static @@ -15,7 +22,5 @@ iface eth2 inet static address @ETH2_IP@ netmask @ETH2_NETMASK@ -auto eth3 -iface eth3 inet static - address @ETH3_IP@ - netmask @ETH3_NETMASK@ +auto eth0 +iface eth0 inet dhcp diff --git a/tools/xen/xenrc b/tools/xen/xenrc index 246ac16b..73f9c025 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -9,24 +9,31 @@ VDI_MB=${VDI_MB:-2500} # VM Password GUEST_PASSWORD=${GUEST_PASSWORD:-secrete} -# Our nova host's network info +# Host Interface, i.e. the public facing interface on the nova vm +HOST_IP_IFACE=${HOST_IP_IFACE:-eth0} + +# Our nova host's network info VM_IP=${VM_IP:-10.255.255.255} # A host-only ip that let's the interface come up, otherwise unused MGT_IP=${MGT_IP:-172.16.100.55} PUB_IP=${PUB_IP:-192.168.1.55} # Public network -PUB_BR=${PUB_BR:-xenbr0} +PUB_BR=${PUB_BR:-"xenbr0"} +PUB_DEV=${PUB_DEV:-eth0} +PUB_VLAN=${PUB_VLAN:--1} PUB_NETMASK=${PUB_NETMASK:-255.255.255.0} # VM network params VM_NETMASK=${VM_NETMASK:-255.255.255.0} -VM_BR=${VM_BR:-xapi1} +VM_BR=${VM_BR:-""} VM_VLAN=${VM_VLAN:-100} +VM_DEV=${VM_DEV:-eth0} # MGMT network params MGT_NETMASK=${MGT_NETMASK:-255.255.255.0} -MGT_BR=${MGT_BR:-xapi2} +MGT_BR=${MGT_BR:-""} MGT_VLAN=${MGT_VLAN:-101} +MGT_DEV=${MGT_DEV:-eth0} # XVA Directory XVA_DIR=${XVA_DIR:-xvas} From ce59d643f5d7771be25e285da1efc6372847b9d1 Mon Sep 17 00:00:00 2001 From: Renuka Apte Date: Thu, 2 Feb 2012 16:09:23 -0800 Subject: [PATCH 347/967] Add logging for prepare_guest Change-Id: I2dc31acb56ee64c0c69e614a2fc96ae6b31b2ffa --- tools/xen/build_xva.sh | 2 +- tools/xen/prepare_guest.sh | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh index c8721aa9..4eb4b911 100755 --- a/tools/xen/build_xva.sh +++ b/tools/xen/build_xva.sh @@ -104,7 +104,7 @@ sed -e "s,@BUILD_NUMBER@,$BUILD_NUMBER,g" -i $OVA cat <$STAGING_DIR/etc/rc.local # network restart required for getting the right gateway /etc/init.d/networking restart -GUEST_PASSWORD=$GUEST_PASSWORD STAGING_DIR=/ DO_TGZ=0 bash /opt/stack/devstack/tools/xen/prepare_guest.sh +GUEST_PASSWORD=$GUEST_PASSWORD STAGING_DIR=/ DO_TGZ=0 bash /opt/stack/devstack/tools/xen/prepare_guest.sh > /opt/stack/prepare_guest.log 2>&1 su -c "/opt/stack/run.sh > /opt/stack/run.sh.log" stack exit 0 EOF diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh index 2f19b9d0..77ce54a8 100644 --- a/tools/xen/prepare_guest.sh +++ b/tools/xen/prepare_guest.sh @@ -1,5 +1,7 @@ #!/bin/bash +set -x + # Configurable nuggets GUEST_PASSWORD=${GUEST_PASSWORD:-secrete} STAGING_DIR=${STAGING_DIR:-stage} From e98cc1220e0c70898bebec357f8f20ff5647d397 Mon Sep 17 00:00:00 2001 From: Renuka Apte Date: Thu, 26 Jan 2012 11:58:56 -0800 Subject: [PATCH 348/967] Multiple vpx for xen, post splitting of script Ensure building, installing multiple nova vms works after we split the build script as build nova vm, install nova vm. Change-Id: Iadb6e181caec511325a30727bf9e9c79e8afea5a --- tools/xen/build_domU.sh | 2 +- tools/xen/build_domU_multi.sh | 16 ++++++------ tools/xen/install_domU_multi.sh | 40 +++++++++++++++++++++++++++++ tools/xen/scripts/install-os-vpx.sh | 10 +++++--- 4 files changed, 56 insertions(+), 12 deletions(-) create mode 100755 tools/xen/install_domU_multi.sh diff --git a/tools/xen/build_domU.sh b/tools/xen/build_domU.sh index 5ea03dad..ce11b0a8 100755 --- a/tools/xen/build_domU.sh +++ b/tools/xen/build_domU.sh @@ -182,7 +182,7 @@ fi if [ -z $PUB_BR ]; then PUB_BR=$(xe network-list --minimal uuid=$PUB_NET params=bridge) fi -$TOP_DIR/scripts/install-os-vpx.sh -f $XVA -v $VM_BR -m $MGT_BR -p $PUB_BR +$TOP_DIR/scripts/install-os-vpx.sh -f $XVA -v $VM_BR -m $MGT_BR -p $PUB_BR -l $GUEST_NAME -w # If we have copied our ssh credentials, use ssh to monitor while the installation runs WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1} diff --git a/tools/xen/build_domU_multi.sh b/tools/xen/build_domU_multi.sh index 130bec5b..0285f42e 100755 --- a/tools/xen/build_domU_multi.sh +++ b/tools/xen/build_domU_multi.sh @@ -17,19 +17,19 @@ FLOATING_RANGE=${FLOATING_RANGE:-192.168.1.196/30} COMMON_VARS="$STACKSH_PARAMS MYSQL_HOST=$HEAD_MGT_IP RABBIT_HOST=$HEAD_MGT_IP GLANCE_HOSTPORT=$HEAD_MGT_IP:9292 FLOATING_RANGE=$FLOATING_RANGE" # Helper to launch containers -function build_domU { - GUEST_NAME=$1 PUB_IP=$2 MGT_IP=$3 DO_SHUTDOWN=$4 TERMINATE=$TERMINATE STACKSH_PARAMS="$COMMON_VARS $5" ./build_domU.sh +function build_xva { + GUEST_NAME=$1 PUB_IP=$2 MGT_IP=$3 DO_SHUTDOWN=$4 TERMINATE=$TERMINATE STACKSH_PARAMS="$COMMON_VARS $5" ./build_xva.sh } # Launch the head node - headnode uses a non-ip domain name, # because rabbit won't launch with an ip addr hostname :( -build_domU HEADNODE $HEAD_PUB_IP $HEAD_MGT_IP 1 "ENABLED_SERVICES=g-api,g-reg,key,n-api,n-sch,n-vnc,horizon,mysql,rabbit" +build_xva HEADNODE $HEAD_PUB_IP $HEAD_MGT_IP 1 "ENABLED_SERVICES=g-api,g-reg,key,n-api,n-sch,n-vnc,horizon,mysql,rabbit" # Wait till the head node is up -while ! curl -L http://$HEAD_PUB_IP | grep -q username; do - echo "Waiting for head node ($HEAD_PUB_IP) to start..." - sleep 5 -done +#while ! curl -L http://$HEAD_PUB_IP | grep -q username; do +# echo "Waiting for head node ($HEAD_PUB_IP) to start..." +# sleep 5 +#done # Build the HA compute host -build_domU $COMPUTE_PUB_IP $COMPUTE_PUB_IP $COMPUTE_MGT_IP 0 "ENABLED_SERVICES=n-cpu,n-net,n-api" +build_xva COMPUTENODE $COMPUTE_PUB_IP $COMPUTE_MGT_IP 0 "ENABLED_SERVICES=n-cpu,n-net,n-api" diff --git a/tools/xen/install_domU_multi.sh b/tools/xen/install_domU_multi.sh new file mode 100755 index 00000000..91129c57 --- /dev/null +++ b/tools/xen/install_domU_multi.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash + +# Echo commands +set -o xtrace + +# Head node host, which runs glance, api, keystone +HEAD_PUB_IP=${HEAD_PUB_IP:-192.168.1.57} +HEAD_MGT_IP=${HEAD_MGT_IP:-172.16.100.57} + +COMPUTE_PUB_IP=${COMPUTE_PUB_IP:-192.168.1.58} +COMPUTE_MGT_IP=${COMPUTE_MGT_IP:-172.16.100.58} + +# Networking params +FLOATING_RANGE=${FLOATING_RANGE:-192.168.1.196/30} + +# Variables common amongst all hosts in the cluster +COMMON_VARS="$STACKSH_PARAMS MYSQL_HOST=$HEAD_MGT_IP RABBIT_HOST=$HEAD_MGT_IP GLANCE_HOSTPORT=$HEAD_MGT_IP:9292 FLOATING_RANGE=$FLOATING_RANGE" + +# Helper to launch containers +function install_domU { + GUEST_NAME=$1 PUB_IP=$2 MGT_IP=$3 DO_SHUTDOWN=$4 TERMINATE=$TERMINATE STACKSH_PARAMS="$COMMON_VARS $5" ./build_domU.sh +} + +# Launch the head node - headnode uses a non-ip domain name, +# because rabbit won't launch with an ip addr hostname :( +install_domU HEADNODE $HEAD_PUB_IP $HEAD_MGT_IP 1 "ENABLED_SERVICES=g-api,g-reg,key,n-api,n-sch,n-vnc,horizon,mysql,rabbit" + +if [ $HEAD_PUB_IP == "dhcp" ] +then + guestnet=$(xe vm-list --minimal name-label=HEADNODE params=networks) + HEAD_PUB_IP=$(echo $guestnet | grep -w -o --only-matching "3/ip: [0-9,.]*;" | cut -d ':' -f2 | cut -d ';' -f 1) +fi +# Wait till the head node is up +while ! curl -L http://$HEAD_PUB_IP | grep -q username; do + echo "Waiting for head node ($HEAD_PUB_IP) to start..." + sleep 5 +done + +# Build the HA compute host +install_domU COMPUTENODE $COMPUTE_PUB_IP $COMPUTE_MGT_IP 0 "ENABLED_SERVICES=n-cpu,n-net,n-api" diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh index 9aebb138..f0dc3c2d 100755 --- a/tools/xen/scripts/install-os-vpx.sh +++ b/tools/xen/scripts/install-os-vpx.sh @@ -38,7 +38,7 @@ usage() cat << EOF Usage: $0 [-f FILE_PATH] [-d DISK_SIZE] [-v BRIDGE_NAME] [-m BRIDGE_NAME] [-p BRIDGE_NAME] - [-k PARAMS] [-r RAM] [-i|-c] [-w] [-b] + [-k PARAMS] [-r RAM] [-i|-c] [-w] [-b] [-l NAME_LABEL] Installs XenServer OpenStack VPX. @@ -60,6 +60,7 @@ cat << EOF -k params Specifies kernel parameters. -r MiB Specifies RAM used by the VPX, in MiB. By default it will take the value from the XVA. + -l name Specifies the name label for the VM. EXAMPLES: @@ -87,7 +88,7 @@ EOF get_params() { - while getopts "hicwbf:d:v:m:p:k:r:" OPTION; + while getopts "hicwbf:d:v:m:p:k:r:l:" OPTION; do case $OPTION in h) usage @@ -126,6 +127,9 @@ get_params() v) BRIDGE_V=$OPTARG ;; + l) + NAME_LABEL=$OPTARG + ;; ?) usage exit @@ -443,7 +447,7 @@ else renumber_system_disk "$vm_uuid" - nl=$(xe_min vm-list params=name-label uuid=$vm_uuid) + nl=${NAME_LABEL:-$(xe_min vm-list params=name-label uuid=$vm_uuid)} xe vm-param-set \ "name-label=${nl/ import/}" \ other-config:os-vpx=true \ From 0d44b8bd638bb63d34689b5ecc8f7be98c815b67 Mon Sep 17 00:00:00 2001 From: Renuka Apte Date: Thu, 2 Feb 2012 18:13:27 -0800 Subject: [PATCH 349/967] Remove cloning of devstack Now that we have split the scripts, we should not clone devstack. We now require users to copy the xva over. Change-Id: Ie7cb4858c3e5860ab5c990c5ed1f88462c6953dd --- tools/xen/prepare_dom0.sh | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tools/xen/prepare_dom0.sh b/tools/xen/prepare_dom0.sh index ce16ada4..a8381aba 100755 --- a/tools/xen/prepare_dom0.sh +++ b/tools/xen/prepare_dom0.sh @@ -34,8 +34,3 @@ if ! which git; then make install fi -# Clone devstack -DEVSTACK=/root/devstack -if [ ! -d $DEVSTACK ]; then - git clone git://github.com/cloudbuilders/devstack.git $DEVSTACK -fi From 7bf87af343688036fc5701a5b3f3da413d2c5dae Mon Sep 17 00:00:00 2001 From: Renuka Apte Date: Thu, 2 Feb 2012 18:25:35 -0800 Subject: [PATCH 350/967] Fix troubles with git cloning https Some xenservers get errors because git is not configured with curl and expat, and because of invalid SSL certs. Change-Id: Ibe7062255b90559b0d6ca5f52e33ec56451505df --- tools/xen/build_domU.sh | 4 ++-- tools/xen/prepare_dom0.sh | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tools/xen/build_domU.sh b/tools/xen/build_domU.sh index ce11b0a8..455ad264 100755 --- a/tools/xen/build_domU.sh +++ b/tools/xen/build_domU.sh @@ -31,7 +31,7 @@ if ! which git; then wget http://git-core.googlecode.com/files/git-1.7.7.tar.gz tar xfv git-1.7.7.tar.gz cd $GITDIR - ./configure + ./configure --with-curl --with-expat make install cd $TOP_DIR fi @@ -141,7 +141,7 @@ fi # Checkout nova if [ ! -d $TOP_DIR/nova ]; then - git clone $NOVA_REPO + env GIT_SSL_NO_VERIFY=true git clone $NOVA_REPO cd $TOP_DIR/nova git checkout $NOVA_BRANCH fi diff --git a/tools/xen/prepare_dom0.sh b/tools/xen/prepare_dom0.sh index a8381aba..d28a07f1 100755 --- a/tools/xen/prepare_dom0.sh +++ b/tools/xen/prepare_dom0.sh @@ -1,9 +1,9 @@ -#!/bin/sh +#i!/bin/sh set -o xtrace set -o errexit # Install basics for vi and git -yum -y --enablerepo=base install gcc make vim-enhanced zlib-devel openssl-devel +yum -y --enablerepo=base install gcc make vim-enhanced zlib-devel openssl-devel curl-devel.i386 # Simple but usable vimrc if [ ! -e /root/.vimrc ]; then @@ -30,7 +30,7 @@ if ! which git; then wget http://git-core.googlecode.com/files/git-1.7.7.tar.gz tar xfv git-1.7.7.tar.gz cd $GITDIR - ./configure + ./configure --with-curl --with-expat make install fi From bd13b708f2895a1e6e49096231ce3c60c3bbfa8e Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 13 Feb 2012 11:22:36 -0600 Subject: [PATCH 351/967] Fixes bug 885087: /etc/nova/nova.conf (This commit didn't get backported to redux, so needs to be re-applied now.) Move Nova config files (nova.conf, api-paste.ini) to /etc/nova so all binaries can find them, particularly /usr/local/bin/nova-manage. Change-Id: I002a9ae1c601894c943a49057cdcf0ab9e9d6c61 --- stack.sh | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/stack.sh b/stack.sh index d803816c..f2c47cf3 100755 --- a/stack.sh +++ b/stack.sh @@ -793,20 +793,28 @@ fi # Nova # ---- + +# Put config files in /etc/nova for everyone to find +NOVA_CONF=/etc/nova +if [[ ! -d $NOVA_CONF ]]; then + sudo mkdir -p $NOVA_CONF +fi +sudo chown `whoami` $NOVA_CONF + if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then # We are going to use a sample http middleware configuration based on the # one from the keystone project to launch nova. This paste config adds # the configuration required for nova to validate keystone tokens. # First we add a some extra data to the default paste config from nova - cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_DIR/bin/nova-api-paste.ini + cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_CONF # Then we add our own service token to the configuration - sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini + sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $NOVA_CONF/api-paste.ini # Finally, we change the pipelines in nova to use keystone function replace_pipeline() { - sed "/\[pipeline:$1\]/,/\[/s/^pipeline = .*/pipeline = $2/" -i $NOVA_DIR/bin/nova-api-paste.ini + sed "/\[pipeline:$1\]/,/\[/s/^pipeline = .*/pipeline = $2/" -i $NOVA_CONF/api-paste.ini } replace_pipeline "ec2cloud" "ec2faultwrap logrequest totoken authtoken keystonecontext cloudrequest authorizer validator ec2executor" replace_pipeline "ec2admin" "ec2faultwrap logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor" @@ -1093,11 +1101,11 @@ if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then fi function add_nova_flag { - echo "$1" >> $NOVA_DIR/bin/nova.conf + echo "$1" >> $NOVA_CONF/nova.conf } # (re)create nova.conf -rm -f $NOVA_DIR/bin/nova.conf +rm -f $NOVA_CONF/nova.conf add_nova_flag "--verbose" add_nova_flag "--allow_admin_api" add_nova_flag "--scheduler_driver=$SCHEDULER" @@ -1157,7 +1165,7 @@ fi VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1} add_nova_flag "--vncserver_listen=$VNCSERVER_LISTEN" add_nova_flag "--vncserver_proxyclient_address=$VNCSERVER_PROXYCLIENT_ADDRESS" -add_nova_flag "--api_paste_config=$NOVA_DIR/bin/nova-api-paste.ini" +add_nova_flag "--api_paste_config=$NOVA_CONF/api-paste.ini" add_nova_flag "--image_service=nova.image.glance.GlanceImageService" add_nova_flag "--ec2_dmz_host=$EC2_DMZ_HOST" add_nova_flag "--rabbit_host=$RABBIT_HOST" From a6651e94001a2f024902c3dc772a8be9a3218118 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 16 Feb 2012 10:16:52 +0000 Subject: [PATCH 352/967] Add is_service_enabled function. Add a function to detect if one of the feature/server is enabled. Some catch all for nova glance quantum Thanks for dtroyer reviews and suggestions. Change-Id: I082be08ebad666de16f0710627696516b3b3c48b --- stack.sh | 137 ++++++++++++++++++++++++++++++------------------------- 1 file changed, 75 insertions(+), 62 deletions(-) diff --git a/stack.sh b/stack.sh index f2c47cf3..1757b345 100755 --- a/stack.sh +++ b/stack.sh @@ -264,6 +264,27 @@ function read_password { set -o xtrace } +# This function will check if the service(s) specified in argument is +# enabled by the user in ENABLED_SERVICES. +# +# If there is multiple services specified as argument it will act as a +# boolean OR or if any of the services specified on the command line +# return true. +# +# There is a special cases for some 'catch-all' services : +# nova would catch if any service enabled start by n- +# glance would catch if any service enabled start by g- +# quantum would catch if any service enabled start by q- +function is_service_enabled() { + services=$@ + for service in ${services}; do + [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && return 0 + [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && return 0 + [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0 + [[ ${service} == "quantum" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0 + done + return 1 +} # Nova Network Configuration # -------------------------- @@ -384,7 +405,7 @@ SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000} SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9} # We only ask for Swift Hash if we have enabled swift service. -if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then +if is_service_enabled swift; then # SWIFT_HASH is a random unique string for a swift cluster that # can never change. read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH." @@ -537,44 +558,40 @@ git_clone $KEYSTONECLIENT_REPO $KEYSTONECLIENT_DIR $KEYSTONECLIENT_BRANCH git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH # glance, swift middleware and nova api needs keystone middleware -if [[ "$ENABLED_SERVICES" =~ "key" || - "$ENABLED_SERVICES" =~ "g-api" || - "$ENABLED_SERVICES" =~ "n-api" || - "$ENABLED_SERVICES" =~ "swift" ]]; then +if is_service_enabled key g-api n-api swift; then # unified auth system (manages accounts/tokens) git_clone $KEYSTONE_REPO $KEYSTONE_DIR $KEYSTONE_BRANCH fi -if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then +if is_service_enabled swift; then # storage service git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH # swift + keystone middleware git_clone $SWIFT_KEYSTONE_REPO $SWIFT_KEYSTONE_DIR $SWIFT_KEYSTONE_BRANCH fi -if [[ "$ENABLED_SERVICES" =~ "g-api" || - "$ENABLED_SERVICES" =~ "n-api" ]]; then +if is_service_enabled g-api n-api; then # image catalog service git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH fi -if [[ "$ENABLED_SERVICES" =~ "n-novnc" ]]; then +if is_service_enabled n-novnc; then # a websockets/html5 or flash powered VNC console for vm instances git_clone $NOVNC_REPO $NOVNC_DIR $NOVNC_BRANCH fi -if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then +if is_service_enabled horizon; then # django powered web control panel for openstack git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH $HORIZON_TAG fi -if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then +if is_service_enabled q-svc; then # quantum git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH git_clone $QUANTUM_CLIENT_REPO $QUANTUM_CLIENT_DIR $QUANTUM_CLIENT_BRANCH fi -if [[ "$ENABLED_SERVICES" =~ "m-svc" ]]; then +if is_service_enabled m-svc; then # melange git_clone $MELANGE_REPO $MELANGE_DIR $MELANGE_BRANCH fi -if [[ "$ENABLED_SERVICES" =~ "melange" ]]; then +if is_service_enabled melange; then git_clone $MELANGECLIENT_REPO $MELANGECLIENT_DIR $MELANGECLIENT_BRANCH fi @@ -586,32 +603,28 @@ fi # allowing ``import nova`` or ``import glance.client`` cd $KEYSTONECLIENT_DIR; sudo python setup.py develop cd $NOVACLIENT_DIR; sudo python setup.py develop -if [[ "$ENABLED_SERVICES" =~ "key" || - "$ENABLED_SERVICES" =~ "g-api" || - "$ENABLED_SERVICES" =~ "n-api" || - "$ENABLED_SERVICES" =~ "swift" ]]; then +if is_service_enabled key g-api n-api swift; then cd $KEYSTONE_DIR; sudo python setup.py develop fi -if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then +if is_service_enabled swift; then cd $SWIFT_DIR; sudo python setup.py develop cd $SWIFT_KEYSTONE_DIR; sudo python setup.py develop fi -if [[ "$ENABLED_SERVICES" =~ "g-api" || - "$ENABLED_SERVICES" =~ "n-api" ]]; then +if is_service_enabled g-api n-api; then cd $GLANCE_DIR; sudo python setup.py develop fi cd $NOVA_DIR; sudo python setup.py develop -if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then +if is_service_enabled horizon; then cd $HORIZON_DIR/horizon; sudo python setup.py develop cd $HORIZON_DIR/openstack-dashboard; sudo python setup.py develop fi -if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then +if is_service_enabled q-svc; then cd $QUANTUM_DIR; sudo python setup.py develop fi -if [[ "$ENABLED_SERVICES" =~ "m-svc" ]]; then +if is_service_enabled m-svc; then cd $MELANGE_DIR; sudo python setup.py develop fi -if [[ "$ENABLED_SERVICES" =~ "melange" ]]; then +if is_service_enabled melange; then cd $MELANGECLIENT_DIR; sudo python setup.py develop fi @@ -640,7 +653,7 @@ fi # Rabbit # --------- -if [[ "$ENABLED_SERVICES" =~ "rabbit" ]]; then +if is_service_enabled rabbit; then # Install and start rabbitmq-server # the temp file is necessary due to LP: #878600 tfile=$(mktemp) @@ -654,7 +667,7 @@ fi # Mysql # --------- -if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then +if is_service_enabled mysql; then # Seed configuration with mysql password so that apt-get install doesn't # prompt us for a password upon install. @@ -693,7 +706,7 @@ fi # Setup the django horizon application to serve via apache/wsgi -if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then +if is_service_enabled horizon; then # Install apache2, which is NOPRIME'd apt_get install apache2 libapache2-mod-wsgi @@ -710,7 +723,7 @@ if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then cp $FILES/horizon_settings.py $local_settings # Enable quantum in dashboard, if requested - if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then + if is_service_enabled quantum; then sudo sed -e "s,QUANTUM_ENABLED = False,QUANTUM_ENABLED = True,g" -i $local_settings fi @@ -736,7 +749,7 @@ fi # Glance # ------ -if [[ "$ENABLED_SERVICES" =~ "g-reg" ]]; then +if is_service_enabled g-reg; then GLANCE_IMAGE_DIR=$DEST/glance/images # Delete existing images rm -rf $GLANCE_IMAGE_DIR @@ -801,7 +814,7 @@ if [[ ! -d $NOVA_CONF ]]; then fi sudo chown `whoami` $NOVA_CONF -if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then +if is_service_enabled n-api; then # We are going to use a sample http middleware configuration based on the # one from the keystone project to launch nova. This paste config adds # the configuration required for nova to validate keystone tokens. @@ -834,7 +847,7 @@ function clean_iptables() { sudo iptables -S -v -t nat | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-N" | sed "s/-N/-X/g" | awk '{print "sudo iptables -t nat",$0}' | bash } -if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then +if is_service_enabled n-cpu; then # Virtualization Configuration # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -915,7 +928,7 @@ if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then sudo rm -rf $NOVA_DIR/instances/* fi -if [[ "$ENABLED_SERVICES" =~ "n-net" ]]; then +if is_service_enabled n-net; then # Delete traces of nova networks from prior runs sudo killall dnsmasq || true clean_iptables @@ -924,7 +937,7 @@ if [[ "$ENABLED_SERVICES" =~ "n-net" ]]; then fi # Storage Service -if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then +if is_service_enabled swift; then # We first do a bit of setup by creating the directories and # changing the permissions so we can run it as our user. @@ -983,7 +996,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then # By default Swift will be installed with the tempauth middleware # which has some default username and password if you have # configured keystone it will checkout the directory. - if [[ "$ENABLED_SERVICES" =~ "key" ]]; then + if is_service_enabled key; then swift_auth_server=keystone # We install the memcache server as this is will be used by the @@ -1060,7 +1073,7 @@ fi # Volume Service # -------------- -if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then +if is_service_enabled n-vol; then # # Configure a default volume group called 'nova-volumes' for the nova-volume # service if it does not yet exist. If you don't wish to use a file backed @@ -1111,21 +1124,21 @@ add_nova_flag "--allow_admin_api" add_nova_flag "--scheduler_driver=$SCHEDULER" add_nova_flag "--dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf" add_nova_flag "--fixed_range=$FIXED_RANGE" -if [[ "$ENABLED_SERVICES" =~ "n-obj" ]]; then +if is_service_enabled n-obj; then add_nova_flag "--s3_host=$SERVICE_HOST" fi -if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then +if is_service_enabled quantum; then add_nova_flag "--network_manager=nova.network.quantum.manager.QuantumManager" add_nova_flag "--quantum_connection_host=$Q_HOST" add_nova_flag "--quantum_connection_port=$Q_PORT" - if [[ "$ENABLED_SERVICES" =~ "melange" ]]; then + if is_service_enabled melange; then add_nova_flag "--quantum_ipam_lib=nova.network.quantum.melange_ipam_lib" add_nova_flag "--use_melange_mac_generation" add_nova_flag "--melange_host=$M_HOST" add_nova_flag "--melange_port=$M_PORT" fi - if [[ "$ENABLED_SERVICES" =~ "q-svc" && "$Q_PLUGIN" = "openvswitch" ]]; then + if is_service_enabled q-svc && [[ "$Q_PLUGIN" = "openvswitch" ]]; then add_nova_flag "--libvirt_vif_type=ethernet" add_nova_flag "--libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtOpenVswitchDriver" add_nova_flag "--linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver" @@ -1134,7 +1147,7 @@ if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then else add_nova_flag "--network_manager=nova.network.manager.$NET_MAN" fi -if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then +if is_service_enabled n-vol; then add_nova_flag "--volume_group=$VOLUME_GROUP" add_nova_flag "--volume_name_template=${VOLUME_NAME_PREFIX}%08x" # oneiric no longer supports ietadm @@ -1149,7 +1162,7 @@ add_nova_flag "--libvirt_type=$LIBVIRT_TYPE" add_nova_flag "--instance_name_template=${INSTANCE_NAME_PREFIX}%08x" # All nova-compute workers need to know the vnc configuration options # These settings don't hurt anything if n-xvnc and n-novnc are disabled -if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then +if is_service_enabled n-cpu; then NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"} add_nova_flag "--novncproxy_base_url=$NOVNCPROXY_URL" XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"} @@ -1221,7 +1234,7 @@ fi # All nova components talk to a central database. We will need to do this step # only once for an entire cluster. -if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then +if is_service_enabled mysql; then # (re)create nova database mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS nova;' mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE nova;' @@ -1241,7 +1254,7 @@ fi # our screen helper to launch a service in a hidden named screen function screen_it { NL=`echo -ne '\015'` - if [[ "$ENABLED_SERVICES" =~ "$1" ]]; then + if is_service_enabled $1; then screen -S stack -X screen -t $1 # sleep to allow bash to be ready to be send the command - we are # creating a new window in screen and then sends characters, so if @@ -1258,12 +1271,12 @@ sleep 1 screen -r stack -X hardstatus alwayslastline "%-Lw%{= BW}%50>%n%f* %t%{-}%+Lw%< %= %H" # launch the glance registry service -if [[ "$ENABLED_SERVICES" =~ "g-reg" ]]; then +if is_service_enabled g-reg; then screen_it g-reg "cd $GLANCE_DIR; bin/glance-registry --config-file=etc/glance-registry.conf" fi # launch the glance api and wait for it to answer before continuing -if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then +if is_service_enabled g-api; then screen_it g-api "cd $GLANCE_DIR; bin/glance-api --config-file=etc/glance-api.conf" echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then @@ -1272,7 +1285,7 @@ if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then fi fi -if [[ "$ENABLED_SERVICES" =~ "key" ]]; then +if is_service_enabled key; then # (re)create keystone database mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS keystone;' mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE keystone;' @@ -1301,7 +1314,7 @@ if [[ "$ENABLED_SERVICES" =~ "key" ]]; then fi # launch the keystone and wait for it to answer before continuing -if [[ "$ENABLED_SERVICES" =~ "key" ]]; then +if is_service_enabled key; then screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug" echo "Waiting for keystone to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/; do sleep 1; done"; then @@ -1321,7 +1334,7 @@ fi # launch the nova-api and wait for it to answer before continuing -if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then +if is_service_enabled n-api; then screen_it n-api "cd $NOVA_DIR && $NOVA_DIR/bin/nova-api" echo "Waiting for nova-api to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:8774; do sleep 1; done"; then @@ -1331,13 +1344,13 @@ if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then fi # Quantum service -if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then +if is_service_enabled q-svc; then if [[ "$Q_PLUGIN" = "openvswitch" ]]; then # Install deps # FIXME add to files/apts/quantum, but don't install if not needed! apt_get install openvswitch-switch openvswitch-datapath-dkms # Create database for the plugin/agent - if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then + if is_service_enabled mysql; then mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS ovs_quantum;' mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE IF NOT EXISTS ovs_quantum;' else @@ -1352,7 +1365,7 @@ if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then fi # Quantum agent (for compute nodes) -if [[ "$ENABLED_SERVICES" =~ "q-agt" ]]; then +if is_service_enabled q-agt; then if [[ "$Q_PLUGIN" = "openvswitch" ]]; then # Set up integration bridge OVS_BRIDGE=${OVS_BRIDGE:-br-int} @@ -1369,8 +1382,8 @@ if [[ "$ENABLED_SERVICES" =~ "q-agt" ]]; then fi # Melange service -if [[ "$ENABLED_SERVICES" =~ "m-svc" ]]; then - if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then +if is_service_enabled m-svc; then + if is_service_enabled mysql; then mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS melange;' mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE melange;' else @@ -1392,11 +1405,11 @@ fi # If we're using Quantum (i.e. q-svc is enabled), network creation has to # happen after we've started the Quantum service. -if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then +if is_service_enabled mysql; then # create a small network $NOVA_DIR/bin/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE - if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then + if is_service_enabled q-svc; then echo "Not creating floating IPs (not supported by QuantumManager)" else # create some floating ips @@ -1419,16 +1432,16 @@ screen_it n-obj "cd $NOVA_DIR && $NOVA_DIR/bin/nova-objectstore" screen_it n-vol "cd $NOVA_DIR && $NOVA_DIR/bin/nova-volume" screen_it n-net "cd $NOVA_DIR && $NOVA_DIR/bin/nova-network" screen_it n-sch "cd $NOVA_DIR && $NOVA_DIR/bin/nova-scheduler" -if [[ "$ENABLED_SERVICES" =~ "n-novnc" ]]; then +if is_service_enabled n-novnc; then screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --flagfile $NOVA_DIR/bin/nova.conf --web ." fi -if [[ "$ENABLED_SERVICES" =~ "n-xvnc" ]]; then +if is_service_enabled n-xvnc; then screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --flagfile $NOVA_DIR/bin/nova.conf" fi -if [[ "$ENABLED_SERVICES" =~ "n-cauth" ]]; then +if is_service_enabled n-cauth; then screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth" fi -if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then +if is_service_enabled horizon; then screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/apache2/error.log" fi @@ -1448,7 +1461,7 @@ fi # * **natty**: http://uec-images.ubuntu.com/natty/current/natty-server-cloudimg-amd64.tar.gz # * **oneiric**: http://uec-images.ubuntu.com/oneiric/current/oneiric-server-cloudimg-amd64.tar.gz -if [[ "$ENABLED_SERVICES" =~ "g-reg" ]]; then +if is_service_enabled g-reg; then # Create a directory for the downloaded image tarballs. mkdir -p $FILES/images @@ -1539,12 +1552,12 @@ echo "" # If you installed the horizon on this server, then you should be able # to access the site using your browser. -if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then +if is_service_enabled horizon; then echo "horizon is now available at http://$SERVICE_HOST/" fi # If keystone is present, you can point nova cli to this server -if [[ "$ENABLED_SERVICES" =~ "key" ]]; then +if is_service_enabled key; then echo "keystone is serving at $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/" echo "examples on using novaclient command line is in exercise.sh" echo "the default users are: admin and demo" From a95efab1c3031b5060bb02d2409237772419bab8 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 16 Feb 2012 10:35:26 +0000 Subject: [PATCH 353/967] Allow to configure a number of swift replicas. Remove the scripts swift-remakerings and swift-startmain along the way. Change-Id: I7c65303791689523f02e5ae44483a6c50b2eed1e --- files/swift/swift-remakerings | 26 ----------- files/swift/swift-startmain | 3 -- stack.sh | 84 +++++++++++++++++++++++------------ 3 files changed, 56 insertions(+), 57 deletions(-) delete mode 100755 files/swift/swift-remakerings delete mode 100755 files/swift/swift-startmain diff --git a/files/swift/swift-remakerings b/files/swift/swift-remakerings deleted file mode 100755 index c65353ce..00000000 --- a/files/swift/swift-remakerings +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -cd %SWIFT_CONFIG_LOCATION% - -rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz - -swift-ring-builder object.builder create %SWIFT_PARTITION_POWER_SIZE% 3 1 -swift-ring-builder object.builder add z1-127.0.0.1:6010/sdb1 1 -swift-ring-builder object.builder add z2-127.0.0.1:6020/sdb2 1 -swift-ring-builder object.builder add z3-127.0.0.1:6030/sdb3 1 -swift-ring-builder object.builder add z4-127.0.0.1:6040/sdb4 1 -swift-ring-builder object.builder rebalance - -swift-ring-builder container.builder create %SWIFT_PARTITION_POWER_SIZE% 3 1 -swift-ring-builder container.builder add z1-127.0.0.1:6011/sdb1 1 -swift-ring-builder container.builder add z2-127.0.0.1:6021/sdb2 1 -swift-ring-builder container.builder add z3-127.0.0.1:6031/sdb3 1 -swift-ring-builder container.builder add z4-127.0.0.1:6041/sdb4 1 -swift-ring-builder container.builder rebalance - -swift-ring-builder account.builder create %SWIFT_PARTITION_POWER_SIZE% 3 1 -swift-ring-builder account.builder add z1-127.0.0.1:6012/sdb1 1 -swift-ring-builder account.builder add z2-127.0.0.1:6022/sdb2 1 -swift-ring-builder account.builder add z3-127.0.0.1:6032/sdb3 1 -swift-ring-builder account.builder add z4-127.0.0.1:6042/sdb4 1 -swift-ring-builder account.builder rebalance diff --git a/files/swift/swift-startmain b/files/swift/swift-startmain deleted file mode 100755 index 69efebd9..00000000 --- a/files/swift/swift-startmain +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -swift-init all restart diff --git a/stack.sh b/stack.sh index 1757b345..26719279 100755 --- a/stack.sh +++ b/stack.sh @@ -404,6 +404,12 @@ SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000} # By default we define 9 for the partition count (which mean 512). SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9} +# This variable allows you to configure how many replicas you want to be +# configured for your Swift cluster. By default the three replicas would need a +# bit of IO and Memory on a VM you may want to lower that to 1 if you want to do +# only some quick testing. +SWIFT_REPLICAS=${SWIFT_REPLICAS:-3} + # We only ask for Swift Hash if we have enabled swift service. if is_service_enabled swift; then # SWIFT_HASH is a random unique string for a swift cluster that @@ -967,21 +973,24 @@ if is_service_enabled swift; then # We then create link to that mounted location so swift would know # where to go. - for x in {1..4}; do sudo ln -sf ${SWIFT_DATA_LOCATION}/drives/sdb1/$x ${SWIFT_DATA_LOCATION}/$x; done + for x in $(seq ${SWIFT_REPLICAS}); do + sudo ln -sf ${SWIFT_DATA_LOCATION}/drives/sdb1/$x ${SWIFT_DATA_LOCATION}/$x; done # We now have to emulate a few different servers into one we # create all the directories needed for swift - tmpd="" - for d in ${SWIFT_DATA_LOCATION}/drives/sdb1/{1..4} \ - ${SWIFT_CONFIG_LOCATION}/{object,container,account}-server \ - ${SWIFT_DATA_LOCATION}/{1..4}/node/sdb1 /var/run/swift; do - [[ -d $d ]] && continue - sudo install -o ${USER} -g $USER_GROUP -d $d + for x in $(seq ${SWIFT_REPLICAS}); do + drive=${SWIFT_DATA_LOCATION}/drives/sdb1/${x} + node=${SWIFT_DATA_LOCATION}/${x}/node + node_device=${node}/sdb1 + [[ -d $node ]] && continue + [[ -d $drive ]] && continue + sudo install -o ${USER} -g $USER_GROUP -d $drive + sudo install -o ${USER} -g $USER_GROUP -d $node_device + sudo chown -R $USER: ${node} done - # We do want to make sure this is all owned by our user. - sudo chown -R $USER: ${SWIFT_DATA_LOCATION}/{1..4}/node - sudo chown -R $USER: ${SWIFT_CONFIG_LOCATION} + sudo mkdir -p ${SWIFT_CONFIG_LOCATION}/{object,container,account}-server /var/run/swift + sudo chown -R $USER: ${SWIFT_CONFIG_LOCATION} /var/run/swift # swift-init has a bug using /etc/swift until bug #885595 is fixed # we have to create a link @@ -1022,7 +1031,7 @@ if is_service_enabled swift; then local log_facility=$3 local node_number - for node_number in {1..4}; do + for node_number in $(seq ${SWIFT_REPLICAS}); do node_path=${SWIFT_DATA_LOCATION}/${node_number} sed -e "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},;s,%USER%,$USER,;s,%NODE_PATH%,${node_path},;s,%BIND_PORT%,${bind_port},;s,%LOG_FACILITY%,${log_facility}," \ $FILES/swift/${server_type}-server.conf > ${SWIFT_CONFIG_LOCATION}/${server_type}-server/${node_number}.conf @@ -1045,29 +1054,48 @@ if is_service_enabled swift; then tee /etc/rsyslog.d/10-swift.conf sudo restart rsyslog - # We create two helper scripts : - # - # - swift-remakerings - # Allow to recreate rings from scratch. - # - swift-startmain - # Restart your full cluster. - # - sed -e "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},;s/%SWIFT_PARTITION_POWER_SIZE%/$SWIFT_PARTITION_POWER_SIZE/" $FILES/swift/swift-remakerings | \ - sudo tee /usr/local/bin/swift-remakerings - sudo install -m755 $FILES/swift/swift-startmain /usr/local/bin/ + # This is where we create three different rings for swift with + # different object servers binding on different ports. + pushd ${SWIFT_CONFIG_LOCATION} >/dev/null && { + + rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz + + port_number=6010 + swift-ring-builder object.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 + for x in $(seq ${SWIFT_REPLICAS}); do + swift-ring-builder object.builder add z${x}-127.0.0.1:${port_number}/sdb1 1 + port_number=$[port_number + 10] + done + swift-ring-builder object.builder rebalance + + port_number=6011 + swift-ring-builder container.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 + for x in $(seq ${SWIFT_REPLICAS}); do + swift-ring-builder container.builder add z${x}-127.0.0.1:${port_number}/sdb1 1 + port_number=$[port_number + 10] + done + swift-ring-builder container.builder rebalance + + port_number=6012 + swift-ring-builder account.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 + for x in $(seq ${SWIFT_REPLICAS}); do + swift-ring-builder account.builder add z${x}-127.0.0.1:${port_number}/sdb1 1 + port_number=$[port_number + 10] + done + swift-ring-builder account.builder rebalance + + } && popd >/dev/null + sudo chmod +x /usr/local/bin/swift-* # We then can start rsync. sudo /etc/init.d/rsync restart || : - # Create our ring for the object/container/account. - /usr/local/bin/swift-remakerings - - # And now we launch swift-startmain to get our cluster running - # ready to be tested. - /usr/local/bin/swift-startmain || : + # TODO: Bring some services in foreground. + # Launch all services. + swift-init all start - unset s swift_hash swift_auth_server tmpd + unset s swift_hash swift_auth_server fi # Volume Service From 6a3f6072729b4405d4201cf5f8340e3f4f40416d Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 16 Feb 2012 16:31:24 -0600 Subject: [PATCH 354/967] Use keystone master branch by default Change-Id: I25cf3908dcdde87e79d9a55ff3f7aef5610d7102 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 2274e624..a20426b3 100644 --- a/stackrc +++ b/stackrc @@ -16,7 +16,7 @@ GLANCE_BRANCH=master # unified auth system (manages accounts/tokens) KEYSTONE_REPO=https://github.com/openstack/keystone.git -KEYSTONE_BRANCH=redux +KEYSTONE_BRANCH=master # a websockets/html5 or flash powered VNC console for vm instances NOVNC_REPO=https://github.com/cloudbuilders/noVNC.git From 398de1b0da14d35fffa5c946e3d6ca1553b120b3 Mon Sep 17 00:00:00 2001 From: Tomoe Sugihara Date: Fri, 17 Feb 2012 11:28:06 +0900 Subject: [PATCH 355/967] Add ovs-ofctl to the sudo command list Change-Id: I0a7de0fb80aeb71b1ab67fd64e95376d2aa2afa6 --- files/sudo/nova | 1 + 1 file changed, 1 insertion(+) diff --git a/files/sudo/nova b/files/sudo/nova index bde15193..3231e2da 100644 --- a/files/sudo/nova +++ b/files/sudo/nova @@ -42,6 +42,7 @@ Cmnd_Alias NOVADEVCMDS = /bin/chmod /var/lib/nova/tmp/*/root/.ssh, \ /sbin/parted, \ /usr/sbin/dnsmasq, \ /usr/bin/ovs-vsctl, \ + /usr/bin/ovs-ofctl, \ /usr/sbin/arping %USER% ALL = (root) NOPASSWD: SETENV: NOVADEVCMDS From 18ee4eaf582d27012173ed017d57283f662d7cb5 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 17 Feb 2012 15:22:30 -0800 Subject: [PATCH 356/967] Fix admin version pipeline. * ala https://review.openstack.org/#change,4282 Change-Id: I1cb3ad4b3ff2c8f339c18db15e44a862bbc07282 --- files/keystone.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/keystone.conf b/files/keystone.conf index 45044f76..b26e70d6 100644 --- a/files/keystone.conf +++ b/files/keystone.conf @@ -89,4 +89,4 @@ use = egg:Paste#urlmap [composite:admin] use = egg:Paste#urlmap /v2.0 = admin_api -/ = admin_version_service +/ = admin_version_api From f62caf6db401b8d29e431081afd8e8817e5217e5 Mon Sep 17 00:00:00 2001 From: Eoghan Glynn Date: Sat, 18 Feb 2012 23:55:04 +0000 Subject: [PATCH 357/967] Add python-iso8601 to nova and glance dependencies Nova and glance will shortly require the python-iso8601 package in order to support non-UTC timestamps in the changes-since filter. In particular, the following nova patch is blocked by devstack failing due to the missing dependency: https://review.openstack.org/#change,4211 Change-Id: Iaae8812cf19c9ae1ad237b7fda7d99827aca1485 --- AUTHORS | 1 + files/apts/glance | 1 + files/apts/nova | 1 + 3 files changed, 3 insertions(+) diff --git a/AUTHORS b/AUTHORS index dc12105f..b1478273 100644 --- a/AUTHORS +++ b/AUTHORS @@ -7,6 +7,7 @@ Chmouel Boudjnah Dean Troyer Devin Carlen Eddie Hebert +Eoghan Glynn Jake Dahn James E. Blair Jason Cannavale diff --git a/files/apts/glance b/files/apts/glance index 1e87d589..71230c49 100644 --- a/files/apts/glance +++ b/files/apts/glance @@ -6,3 +6,4 @@ python-sqlalchemy python-wsgiref python-pastedeploy python-xattr +python-iso8601 diff --git a/files/apts/nova b/files/apts/nova index bc0c23b7..f2059ba7 100644 --- a/files/apts/nova +++ b/files/apts/nova @@ -41,3 +41,4 @@ python-m2crypto python-boto python-kombu python-feedparser +python-iso8601 From f5f72a15f8cdd142e1a769859af60140b9e2b097 Mon Sep 17 00:00:00 2001 From: Eoghan Glynn Date: Tue, 21 Feb 2012 14:49:39 +0000 Subject: [PATCH 358/967] Remove duplicated glance paste config. When glance paste config was split out from the core config, devstack duplicated the former in both files in order to work around the chicken-and-egg problem with the glance change being gated on devstack passing. This transitionary arrangement is no longer required so may now be removed. Change-Id: I42ff9cd8ea9cae62a2a0cb72490f1c8a6c17056c --- stack.sh | 6 ------ 1 file changed, 6 deletions(-) diff --git a/stack.sh b/stack.sh index 6e92a84f..6da985da 100755 --- a/stack.sh +++ b/stack.sh @@ -784,9 +784,6 @@ if is_service_enabled g-reg; then GLANCE_REGISTRY_PASTE_INI=$GLANCE_DIR/etc/glance-registry-paste.ini cp $FILES/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI glance_config $GLANCE_REGISTRY_PASTE_INI - # During the transition for Glance to the split config files - # we cat them together to handle both pre- and post-merge - cat $GLANCE_REGISTRY_PASTE_INI >>$GLANCE_REGISTRY_CONF fi GLANCE_API_CONF=$GLANCE_DIR/etc/glance-api.conf @@ -797,9 +794,6 @@ if is_service_enabled g-reg; then GLANCE_API_PASTE_INI=$GLANCE_DIR/etc/glance-api-paste.ini cp $FILES/glance-api-paste.ini $GLANCE_API_PASTE_INI glance_config $GLANCE_API_PASTE_INI - # During the transition for Glance to the split config files - # we cat them together to handle both pre- and post-merge - cat $GLANCE_API_PASTE_INI >>$GLANCE_API_CONF fi fi From a3a496f9c2de8ab161bbe1e5723e66659bb1072e Mon Sep 17 00:00:00 2001 From: Gabriel Hurley Date: Mon, 13 Feb 2012 12:29:23 -0800 Subject: [PATCH 359/967] Adds quantum service and endpoint to keystone if quantum is enabled. Change-Id: I41eac84a48e8e716b77b7c874244c626b5df3006 --- files/default_catalog.templates | 6 ++++++ files/keystone_data.sh | 7 ++++++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/files/default_catalog.templates b/files/default_catalog.templates index b527ae50..a5b45d73 100644 --- a/files/default_catalog.templates +++ b/files/default_catalog.templates @@ -28,3 +28,9 @@ catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_$( catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:8080/ catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:8080/v1/AUTH_$(tenant_id)s catalog.RegionOne.object_store.name = 'Swift Service' + + +catalog.RegionOne.network.publicURL = http://%SERVICE_HOST%:9696/ +catalog.RegionOne.network.adminURL = http://%SERVICE_HOST%:9696/ +catalog.RegionOne.network.internalURL = http://%SERVICE_HOST%:9696/ +catalog.RegionOne.network.name = 'Quantum Service' diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 408e36d3..cc2421c9 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -32,7 +32,6 @@ NETADMIN_ROLE=`get_id keystone role-create --name=netadmin` # Add Roles to Users in Tenants - keystone add-user-role $ADMIN_USER $ADMIN_ROLE $ADMIN_TENANT keystone add-user-role $DEMO_USER $MEMBER_ROLE $DEMO_TENANT keystone add-user-role $DEMO_USER $SYSADMIN_ROLE $DEMO_TENANT @@ -70,6 +69,12 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then --type="object-store" \ --description="Swift Service" fi +if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then + keystone service-create \ + --name=quantum \ + --type=network \ + --description="Quantum Service" +fi # create ec2 creds and parse the secret and access key returned RESULT=`keystone ec2-create-credentials --tenant_id=$ADMIN_TENANT --user_id=$ADMIN_USER` From e7ed17eea4975931401d80e360cbd66e797ef258 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Tue, 21 Feb 2012 17:43:33 -0500 Subject: [PATCH 360/967] Delete security group after terminating instance. This patch slightly modifies the order of operations in the euca exercise script. It moves the deletion of the security group to the end, after the instance has been terminated. The reason this change must be made is because of this change proposed to nova: https://review.openstack.org/#change,4154 Without this change, when exercise.sh is run against this patch, the euca exercises will fail when the security group is deleted. Moving it to the end should make it pass, as the security group will no longer be in use. Change-Id: I9095a02c42173ed1837ec20b38d5ef00fe4474ec --- AUTHORS | 1 + exercises/euca.sh | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/AUTHORS b/AUTHORS index b1478273..5769f4f7 100644 --- a/AUTHORS +++ b/AUTHORS @@ -16,6 +16,7 @@ Jesse Andrews Johannes Erdfelt Justin Shepherd Kiall Mac Innes +Russell Bryant Scott Moser Todd Willey Tres Henry diff --git a/exercises/euca.sh b/exercises/euca.sh index 834e4ecf..7f486957 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -74,9 +74,6 @@ fi # Revoke pinging euca-revoke -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP -# Delete group -euca-delete-group $SECGROUP - # Release floating address euca-disassociate-address $FLOATING_IP @@ -97,3 +94,6 @@ fi # Terminate instance euca-terminate-instances $INSTANCE + +# Delete group +euca-delete-group $SECGROUP From 243b26a84e0328eaa5e3051a17416b3323c2c703 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Wed, 22 Feb 2012 11:19:32 -0500 Subject: [PATCH 361/967] Wait for instance termination to complete. This patch waits for instance termination to complete before trying to delete the security group. The last change to simply move the security group deletion to after euca-terminate-instance was not sufficient, as it has to wait until the termination is complete. Change-Id: Icba579534f324afb4d44abd42d8c755834dd2a57 --- exercises/euca.sh | 6 ++++++ openrc | 3 +++ 2 files changed, 9 insertions(+) diff --git a/exercises/euca.sh b/exercises/euca.sh index 7f486957..86cd6732 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -95,5 +95,11 @@ fi # Terminate instance euca-terminate-instances $INSTANCE +# Assure it has terminated within a reasonable time +if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then + echo "server didn't terminate within $TERMINATE_TIMEOUT seconds" + exit 1 +fi + # Delete group euca-delete-group $SECGROUP diff --git a/openrc b/openrc index df011a32..d742ced4 100644 --- a/openrc +++ b/openrc @@ -89,3 +89,6 @@ export RUNNING_TIMEOUT=${RUNNING_TIMEOUT:-$(($BOOT_TIMEOUT + $ACTIVE_TIMEOUT))} # Max time to wait for proper IP association and dis-association. export ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-15} + +# Max time to wait for a vm to terminate +export TERMINATE_TIMEOUT=${TERMINATE_TIMEOUT:-30} From 55707079af9e711ac226dc1e47754f008ee40868 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 22 Feb 2012 10:18:31 -0600 Subject: [PATCH 362/967] Fix nova.conf paths https://review.openstack.org/4227 missed a couple of paths to nova.conf that need to be corrected. Fixes bug 938692 Change-Id: I083baacc18792088a2a3ab032768c895a4feae76 --- stack.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index 6da985da..89ea76e8 100755 --- a/stack.sh +++ b/stack.sh @@ -1115,7 +1115,7 @@ rm -f $NOVA_CONF/nova.conf add_nova_flag "--verbose" add_nova_flag "--allow_admin_api" add_nova_flag "--scheduler_driver=$SCHEDULER" -add_nova_flag "--dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf" +add_nova_flag "--dhcpbridge_flagfile=$NOVA_CONF/nova.conf" add_nova_flag "--fixed_range=$FIXED_RANGE" if is_service_enabled n-obj; then add_nova_flag "--s3_host=$SERVICE_HOST" @@ -1426,10 +1426,10 @@ screen_it n-vol "cd $NOVA_DIR && $NOVA_DIR/bin/nova-volume" screen_it n-net "cd $NOVA_DIR && $NOVA_DIR/bin/nova-network" screen_it n-sch "cd $NOVA_DIR && $NOVA_DIR/bin/nova-scheduler" if is_service_enabled n-novnc; then - screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --flagfile $NOVA_DIR/bin/nova.conf --web ." + screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --flagfile $NOVA_CONF/nova.conf --web ." fi if is_service_enabled n-xvnc; then - screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --flagfile $NOVA_DIR/bin/nova.conf" + screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --flagfile $NOVA_CONF/nova.conf" fi if is_service_enabled n-cauth; then screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth" From 1e98bdc5b4c3b49b00ba5471c86426a5fb24055b Mon Sep 17 00:00:00 2001 From: Deepak Garg Date: Wed, 22 Feb 2012 12:15:26 +0530 Subject: [PATCH 363/967] Bug #938403. Added the line for installing python-quantumclient. Manually Tested. Note: Fixes added regarding horizon's dependency on quantum-client Change-Id: I1d739f81ce2118363cc05ec4a858ac2e732e9d0d --- stack.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/stack.sh b/stack.sh index 89ea76e8..8e717604 100755 --- a/stack.sh +++ b/stack.sh @@ -582,6 +582,8 @@ fi if is_service_enabled q-svc; then # quantum git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH +fi +if is_service_enabled q-svc horizon; then git_clone $QUANTUM_CLIENT_REPO $QUANTUM_CLIENT_DIR $QUANTUM_CLIENT_BRANCH fi @@ -620,6 +622,9 @@ fi if is_service_enabled q-svc; then cd $QUANTUM_DIR; sudo python setup.py develop fi +if is_service_enabled q-svc horizon; then + cd $QUANTUM_CLIENT_DIR; sudo python setup.py develop +fi if is_service_enabled m-svc; then cd $MELANGE_DIR; sudo python setup.py develop fi From aaa0dbb2e7aab9a1b8ebeb2cc6c3d373a0f81d5c Mon Sep 17 00:00:00 2001 From: "Derrick J. Wippler" Date: Tue, 21 Feb 2012 09:53:53 -0600 Subject: [PATCH 364/967] Restart openstack services after running stack.sh * Added screen_rc function in stack.sh to save started services to stack-screenrc file * Added rejoin-stack.sh to rejoin the current screen session or start a new session Change-Id: I381a7832bc8a107bfbd51c5ecfbd2e9134cf0a0a blueprint: restart-services --- rejoin-stack.sh | 18 ++++++++++++++++++ stack.sh | 23 ++++++++++++++++++++++- 2 files changed, 40 insertions(+), 1 deletion(-) create mode 100755 rejoin-stack.sh diff --git a/rejoin-stack.sh b/rejoin-stack.sh new file mode 100755 index 00000000..a82c73cb --- /dev/null +++ b/rejoin-stack.sh @@ -0,0 +1,18 @@ +#! /usr/bin/env bash + +# This script rejoins an existing screen, or re-creates a +# screen session from a previous run of stack.sh. + +TOP_DIR=`dirname $0` + +# if screenrc exists, run screen +if [[ -e $TOP_DIR/stack-screenrc ]]; then + if screen -ls | egrep -q "[0-9].stack"; then + echo "Attaching to already started screen session.." + exec screen -r stack + fi + exec screen -c $TOP_DIR/stack-screenrc +fi + +echo "Couldn't find $TOP_DIR/stack-screenrc file; have you run stack.sh yet?" +exit 1 diff --git a/stack.sh b/stack.sh index 8e717604..e67520de 100755 --- a/stack.sh +++ b/stack.sh @@ -1249,10 +1249,31 @@ fi # so send the start command by forcing text into the window. # Only run the services specified in ``ENABLED_SERVICES`` -# our screen helper to launch a service in a hidden named screen +# Our screenrc file builder +function screen_rc { + SCREENRC=$TOP_DIR/stack-screenrc + if [[ ! -e $SCREENRC ]]; then + # Name the screen session + echo "sessionname stack" > $SCREENRC + # Set a reasonable statusbar + echo 'hardstatus alwayslastline "%-Lw%{= BW}%50>%n%f* %t%{-}%+Lw%< %= %H"' >> $SCREENRC + echo "screen -t stack bash" >> $SCREENRC + fi + # If this service doesn't already exist in the screenrc file + if ! grep $1 $SCREENRC 2>&1 > /dev/null; then + NL=`echo -ne '\015'` + echo "screen -t $1 bash" >> $SCREENRC + echo "stuff \"$2$NL\"" >> $SCREENRC + fi +} + +# Our screen helper to launch a service in a hidden named screen function screen_it { NL=`echo -ne '\015'` if is_service_enabled $1; then + # Append the service to the screen rc file + screen_rc "$1" "$2" + screen -S stack -X screen -t $1 # sleep to allow bash to be ready to be send the command - we are # creating a new window in screen and then sends characters, so if From 7bd3087e73c0683acf5df40d1eed51709d865c06 Mon Sep 17 00:00:00 2001 From: Gabriel Hurley Date: Thu, 23 Feb 2012 13:20:03 -0800 Subject: [PATCH 365/967] Only add Quantum and Swift to Keystone catalog if enabled. Incidentally removes some outdated (unused) settings from the Horizon settings file. Fixes bug 939820. Change-Id: I61e97c194070e46d21a6c0eb66eef88bd14efeee --- files/default_catalog.templates | 12 ------------ files/horizon_settings.py | 7 ------- stack.sh | 17 +++++++++++++++++ 3 files changed, 17 insertions(+), 19 deletions(-) diff --git a/files/default_catalog.templates b/files/default_catalog.templates index a5b45d73..f6125bb9 100644 --- a/files/default_catalog.templates +++ b/files/default_catalog.templates @@ -22,15 +22,3 @@ catalog.RegionOne.image.publicURL = http://%SERVICE_HOST%:9292/v1 catalog.RegionOne.image.adminURL = http://%SERVICE_HOST%:9292/v1 catalog.RegionOne.image.internalURL = http://%SERVICE_HOST%:9292/v1 catalog.RegionOne.image.name = 'Image Service' - - -catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_$(tenant_id)s -catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:8080/ -catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:8080/v1/AUTH_$(tenant_id)s -catalog.RegionOne.object_store.name = 'Swift Service' - - -catalog.RegionOne.network.publicURL = http://%SERVICE_HOST%:9696/ -catalog.RegionOne.network.adminURL = http://%SERVICE_HOST%:9696/ -catalog.RegionOne.network.internalURL = http://%SERVICE_HOST%:9696/ -catalog.RegionOne.network.name = 'Quantum Service' diff --git a/files/horizon_settings.py b/files/horizon_settings.py index 05ddfe7b..bbff08fa 100644 --- a/files/horizon_settings.py +++ b/files/horizon_settings.py @@ -49,13 +49,6 @@ SWIFT_PAGINATE_LIMIT = 100 -# Configure quantum connection details for networking -QUANTUM_ENABLED = False -QUANTUM_URL = '%s' % OPENSTACK_HOST -QUANTUM_PORT = '9696' -QUANTUM_TENANT = '1234' -QUANTUM_CLIENT_VERSION='0.1' - # If you have external monitoring links, eg: # EXTERNAL_MONITORING = [ # ['Nagios','http://foo.com'], diff --git a/stack.sh b/stack.sh index 8e717604..6789778f 100755 --- a/stack.sh +++ b/stack.sh @@ -1298,6 +1298,23 @@ if is_service_enabled key; then KEYSTONE_CATALOG=$KEYSTONE_DIR/etc/default_catalog.templates cp $FILES/default_catalog.templates $KEYSTONE_CATALOG + + # Add swift endpoints to service catalog if swift is enabled + if is_service_enabled swift; then + echo "catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_$(tenant_id)s" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:8080/" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:8080/v1/AUTH_$(tenant_id)s" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.object_store.name = 'Swift Service'" >> $KEYSTONE_CATALOG + fi + + # Add quantum endpoints to service catalog if quantum is enabled + if is_service_enabled quantum; then + echo "catalog.RegionOne.network.publicURL = http://%SERVICE_HOST%:9696/" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.network.adminURL = http://%SERVICE_HOST%:9696/" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.network.internalURL = http://%SERVICE_HOST%:9696/" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.network.name = 'Quantum Service'" >> $KEYSTONE_CATALOG + fi + sudo sed -e "s,%SERVICE_HOST%,$SERVICE_HOST,g" -i $KEYSTONE_CATALOG From 7a103dda1c7c40529d5eee4a2fdc9bf9aae883c9 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 23 Feb 2012 23:35:43 +0000 Subject: [PATCH 366/967] Remove legacy paste config and nova.conf * Fixes bug 939907 Change-Id: Icef50b51bca86f78214f68f027283f2fa00807e1 --- stack.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/stack.sh b/stack.sh index 6789778f..4e094cd0 100755 --- a/stack.sh +++ b/stack.sh @@ -817,6 +817,9 @@ if is_service_enabled n-api; then # one from the keystone project to launch nova. This paste config adds # the configuration required for nova to validate keystone tokens. + # Remove legacy paste config + rm -f $NOVA_DIR/bin/nova-api-paste.ini + # First we add a some extra data to the default paste config from nova cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_CONF @@ -1115,6 +1118,9 @@ function add_nova_flag { echo "$1" >> $NOVA_CONF/nova.conf } +# remove legacy nova.conf +rm -f $NOVA_DIR/bin/nova.conf + # (re)create nova.conf rm -f $NOVA_CONF/nova.conf add_nova_flag "--verbose" From 155266b94713da74065e5555cd42485f6e8915e7 Mon Sep 17 00:00:00 2001 From: Gabriel Hurley Date: Thu, 23 Feb 2012 16:54:01 -0800 Subject: [PATCH 367/967] Preserve swift tenant_id variable when writing into keystone catalog. Change-Id: Ib01e520f554dbf3f3a07e120e28c95a005172dfc --- AUTHORS | 1 + stack.sh | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/AUTHORS b/AUTHORS index 5769f4f7..561826cf 100644 --- a/AUTHORS +++ b/AUTHORS @@ -8,6 +8,7 @@ Dean Troyer Devin Carlen Eddie Hebert Eoghan Glynn +Gabriel Hurley Jake Dahn James E. Blair Jason Cannavale diff --git a/stack.sh b/stack.sh index 4b8208e2..61e73568 100755 --- a/stack.sh +++ b/stack.sh @@ -1328,9 +1328,9 @@ if is_service_enabled key; then # Add swift endpoints to service catalog if swift is enabled if is_service_enabled swift; then - echo "catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_$(tenant_id)s" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG echo "catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:8080/" >> $KEYSTONE_CATALOG - echo "catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:8080/v1/AUTH_$(tenant_id)s" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG echo "catalog.RegionOne.object_store.name = 'Swift Service'" >> $KEYSTONE_CATALOG fi From f4565c46940c1e2e5ab62b2fb05fc59633a15fc0 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 23 Feb 2012 11:21:10 -0600 Subject: [PATCH 368/967] Make keystone_data.sh handle existing and updated keystone client Keystone client commands and options have been normalized and keystone_data.sh needs to support both versions for a transition period. The merge prop for the updated keystone client is https://review.openstack.org/4375 Necessary for bug 396422 Also fix an intermittent problem extacting IDs from command output. Change-Id: Ib13445a0bd3029fb02b0b7a86e8e0b8278717b57 --- files/keystone_data.sh | 61 ++++++++++++++++++++++++++++++++---------- 1 file changed, 47 insertions(+), 14 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index cc2421c9..ed85aca0 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -4,9 +4,18 @@ export SERVICE_TOKEN=$SERVICE_TOKEN export SERVICE_ENDPOINT=$SERVICE_ENDPOINT function get_id () { - echo `$@ | grep id | awk '{print $4}'` + echo `$@ | grep ' id ' | awk '{print $4}'` } +# Detect if the keystone cli binary has the command names changed +# in https://review.openstack.org/4375 +# FIXME(dtroyer): Remove the keystone client command checking +# after a suitable transition period. add-user-role +# and ec2-create-credentials were renamed +if keystone help | grep -q user-role-add; then + KEYSTONE_COMMAND_4375=1 +fi + ADMIN_TENANT=`get_id keystone tenant-create --name=admin` DEMO_TENANT=`get_id keystone tenant-create --name=demo` INVIS_TENANT=`get_id keystone tenant-create --name=invisible_to_admin` @@ -31,17 +40,33 @@ SYSADMIN_ROLE=`get_id keystone role-create --name=sysadmin` NETADMIN_ROLE=`get_id keystone role-create --name=netadmin` -# Add Roles to Users in Tenants -keystone add-user-role $ADMIN_USER $ADMIN_ROLE $ADMIN_TENANT -keystone add-user-role $DEMO_USER $MEMBER_ROLE $DEMO_TENANT -keystone add-user-role $DEMO_USER $SYSADMIN_ROLE $DEMO_TENANT -keystone add-user-role $DEMO_USER $NETADMIN_ROLE $DEMO_TENANT -keystone add-user-role $DEMO_USER $MEMBER_ROLE $INVIS_TENANT -keystone add-user-role $ADMIN_USER $ADMIN_ROLE $DEMO_TENANT - -# TODO(termie): these two might be dubious -keystone add-user-role $ADMIN_USER $KEYSTONEADMIN_ROLE $ADMIN_TENANT -keystone add-user-role $ADMIN_USER $KEYSTONESERVICE_ROLE $ADMIN_TENANT +if [[ -n "$KEYSTONE_COMMAND_4375" ]]; then + # Add Roles to Users in Tenants + keystone user-role-add --user $ADMIN_USER --role $ADMIN_ROLE --tenant_id $ADMIN_TENANT + keystone user-role-add --user $DEMO_USER --role $MEMBER_ROLE --tenant_id $DEMO_TENANT + keystone user-role-add --user $DEMO_USER --role $SYSADMIN_ROLE --tenant_id $DEMO_TENANT + keystone user-role-add --user $DEMO_USER --role $NETADMIN_ROLE --tenant_id $DEMO_TENANT + keystone user-role-add --user $DEMO_USER --role $MEMBER_ROLE --tenant_id $INVIS_TENANT + keystone user-role-add --user $ADMIN_USER --role $ADMIN_ROLE --tenant_id $DEMO_TENANT + + # TODO(termie): these two might be dubious + keystone user-role-add --user $ADMIN_USER --role $KEYSTONEADMIN_ROLE --tenant_id $ADMIN_TENANT + keystone user-role-add --user $ADMIN_USER --role $KEYSTONESERVICE_ROLE --tenant_id $ADMIN_TENANT +else + ### compat + # Add Roles to Users in Tenants + keystone add-user-role $ADMIN_USER $ADMIN_ROLE $ADMIN_TENANT + keystone add-user-role $DEMO_USER $MEMBER_ROLE $DEMO_TENANT + keystone add-user-role $DEMO_USER $SYSADMIN_ROLE $DEMO_TENANT + keystone add-user-role $DEMO_USER $NETADMIN_ROLE $DEMO_TENANT + keystone add-user-role $DEMO_USER $MEMBER_ROLE $INVIS_TENANT + keystone add-user-role $ADMIN_USER $ADMIN_ROLE $DEMO_TENANT + + # TODO(termie): these two might be dubious + keystone add-user-role $ADMIN_USER $KEYSTONEADMIN_ROLE $ADMIN_TENANT + keystone add-user-role $ADMIN_USER $KEYSTONESERVICE_ROLE $ADMIN_TENANT + ### +fi # Services keystone service-create \ @@ -77,13 +102,21 @@ if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then fi # create ec2 creds and parse the secret and access key returned -RESULT=`keystone ec2-create-credentials --tenant_id=$ADMIN_TENANT --user_id=$ADMIN_USER` +if [[ -n "$KEYSTONE_COMMAND_4375" ]]; then + RESULT=`keystone ec2-credentials-create --tenant_id=$ADMIN_TENANT --user=$ADMIN_USER` +else + RESULT=`keystone ec2-create-credentials --tenant_id=$ADMIN_TENANT --user_id=$ADMIN_USER` +fi echo `$@ | grep id | awk '{print $4}'` ADMIN_ACCESS=`echo "$RESULT" | grep access | awk '{print $4}'` ADMIN_SECRET=`echo "$RESULT" | grep secret | awk '{print $4}'` -RESULT=`keystone ec2-create-credentials --tenant_id=$DEMO_TENANT --user_id=$DEMO_USER` +if [[ -n "$KEYSTONE_COMMAND_4375" ]]; then + RESULT=`keystone ec2-credentials-create --tenant_id=$DEMO_TENANT --user=$DEMO_USER` +else + RESULT=`keystone ec2-create-credentials --tenant_id=$DEMO_TENANT --user_id=$DEMO_USER` +fi DEMO_ACCESS=`echo "$RESULT" | grep access | awk '{print $4}'` DEMO_SECRET=`echo "$RESULT" | grep secret | awk '{print $4}'` From 231ce63a146003a38c336b2f1f539882c9380266 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 24 Feb 2012 02:01:31 +0000 Subject: [PATCH 369/967] Enable nova-volume by default Change-Id: I624eaab54619ef0ea6aec980df4a26e019b9674a --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 61e73568..c7448537 100755 --- a/stack.sh +++ b/stack.sh @@ -183,7 +183,7 @@ M_HOST=${M_HOST:-localhost} M_MAC_RANGE=${M_MAC_RANGE:-404040/24} # Specify which services to launch. These generally correspond to screen tabs -ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-sch,n-novnc,n-xvnc,n-cauth,horizon,mysql,rabbit} +ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,mysql,rabbit} # Name of the lvm volume group to use/create for iscsi volumes VOLUME_GROUP=${VOLUME_GROUP:-nova-volumes} From 5f9473e8b9bdc15f42db597d5d1e766b760f764e Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 24 Feb 2012 01:57:07 +0000 Subject: [PATCH 370/967] Add nova-volume endpoint to service catalog Change-Id: Id04568d7f8eecc8c8e7c1a92990d37a46923caf7 --- files/default_catalog.templates | 12 +++++++++--- files/keystone_data.sh | 8 ++++++++ 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/files/default_catalog.templates b/files/default_catalog.templates index f6125bb9..b9b18441 100644 --- a/files/default_catalog.templates +++ b/files/default_catalog.templates @@ -6,12 +6,18 @@ catalog.RegionOne.identity.internalURL = http://%SERVICE_HOST%:$(public_port)s/v catalog.RegionOne.identity.name = 'Identity Service' -catalog.RegionOne.compute.publicURL = http://%SERVICE_HOST%:8774/v1.1/$(tenant_id)s -catalog.RegionOne.compute.adminURL = http://%SERVICE_HOST%:8774/v1.1/$(tenant_id)s -catalog.RegionOne.compute.internalURL = http://%SERVICE_HOST%:8774/v1.1/$(tenant_id)s +catalog.RegionOne.compute.publicURL = http://%SERVICE_HOST%:8774/v2/$(tenant_id)s +catalog.RegionOne.compute.adminURL = http://%SERVICE_HOST%:8774/v2/$(tenant_id)s +catalog.RegionOne.compute.internalURL = http://%SERVICE_HOST%:8774/v2/$(tenant_id)s catalog.RegionOne.compute.name = 'Compute Service' +catalog.RegionOne.volume.publicURL = http://%SERVICE_HOST%:8776/v1/$(tenant_id)s +catalog.RegionOne.volume.adminURL = http://%SERVICE_HOST%:8776/v1/$(tenant_id)s +catalog.RegionOne.volume.internalURL = http://%SERVICE_HOST%:8776/v1/$(tenant_id)s +catalog.RegionOne.volume.name = 'Volume Service' + + catalog.RegionOne.ec2.publicURL = http://%SERVICE_HOST%:8773/services/Cloud catalog.RegionOne.ec2.adminURL = http://%SERVICE_HOST%:8773/services/Admin catalog.RegionOne.ec2.internalURL = http://%SERVICE_HOST%:8773/services/Cloud diff --git a/files/keystone_data.sh b/files/keystone_data.sh index ed85aca0..3f4841f9 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -88,6 +88,14 @@ keystone service-create \ --name=keystone \ --type=identity \ --description="Keystone Identity Service" + +if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then + keystone service-create \ + --name="nova-volume" \ + --type=volume \ + --description="Nova Volume Service" +fi + if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then keystone service-create \ --name=swift \ From 31986b247c9182e37d497588cfe26ff753ef50dc Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 23 Feb 2012 21:34:29 -0600 Subject: [PATCH 371/967] Remove pycli from keystone pips Removed from keystone pip-requires in https://review.openstack.org/4261 Change-Id: I629881f47fa198019e60fc81ca38917fa0b60dd8 --- files/pips/keystone | 1 - 1 file changed, 1 deletion(-) diff --git a/files/pips/keystone b/files/pips/keystone index fef9f8b0..09636e49 100644 --- a/files/pips/keystone +++ b/files/pips/keystone @@ -1,2 +1 @@ PassLib -pycli From ce043c4dbf38a4947274cbfcbee79bd8e6283a6c Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 3 Feb 2012 22:56:38 -0600 Subject: [PATCH 372/967] README updates Change-Id: I0732a26a50b736e89d35b03a9ff79fa068804677 --- README.md | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index a185f34f..0fb85811 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,7 @@ Devstack is a set of scripts and utilities to quickly deploy an OpenStack cloud. * To describe working configurations of OpenStack (which code branches work together? what do config files look like for those branches?) * To make it easier for developers to dive into OpenStack so that they can productively contribute without having to understand every part of the system at once * To make it easy to prototype cross-project features +* To sanity-check OpenStack builds (used in gating commits to the primary repos) Read more at http://devstack.org (built from the gh-pages branch) @@ -13,13 +14,19 @@ IMPORTANT: Be sure to carefully read stack.sh and any other scripts you execute # Versions -The devstack master branch generally points to trunk versions of OpenStack components. For older, stable versions, look for branches named stable/[mil -estone]. For example, you can do the following to create a diablo OpenStack cloud: +The devstack master branch generally points to trunk versions of OpenStack components. For older, stable versions, look for branches named stable/[release]. For example, you can do the following to create a diablo OpenStack cloud: git checkout stable/diablo ./stack.sh -# To start a dev cloud (Installing in a dedicated, disposable vm is safer than installing on your dev machine!): +Milestone builds are also available in this manner: + + git checkout essex-3 + ./stack.sh + +# Start A Dev Cloud + +Installing in a dedicated disposable vm is safer than installing on your dev machine! To start a dev cloud: ./stack.sh From 5836b1533a8d3cbe4dac7ad90c862948dadb1fbc Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Fri, 24 Feb 2012 10:23:33 -0500 Subject: [PATCH 373/967] Wait for VM to stop before deleting security group. Related to bug 938853. The patch for this bug prevents deleting security groups through the OpenStack API if they are still in use. This patch for devstack updates the floating_ips exercise script to wait until the VM has stopped before deleting the security group. Change-Id: If42f85934c2b92d4d001c419cabb09e2e3dc1aae --- exercises/floating_ips.sh | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 233313e8..b559965f 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -195,8 +195,11 @@ nova floating-ip-delete $TEST_FLOATING_IP # shutdown the server nova delete $VM_UUID +# make sure the VM shuts down within a reasonable time +if ! timeout $TERMINATE_TIMEOUT sh -c "while nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then + echo "server didn't shut down!" + exit 1 +fi + # Delete a secgroup nova secgroup-delete $SECGROUP - -# FIXME: validate shutdown within 5 seconds -# (nova show $NAME returns 1 or status != ACTIVE)? From 2e9158ebc61b349d4f238d2254bc1b9899d3f6fa Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Sat, 25 Feb 2012 08:02:18 +0000 Subject: [PATCH 374/967] Add S3 extension to keystone.conf Change-Id: I52bae49a8071cc0087a9ddc6b9857245ffdae556 --- files/keystone.conf | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/files/keystone.conf b/files/keystone.conf index b26e70d6..76c618a0 100644 --- a/files/keystone.conf +++ b/files/keystone.conf @@ -57,6 +57,9 @@ paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory [filter:ec2_extension] paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory +[filter:s3_extension] +paste.filter_factory = keystone.contrib.s3:S3Extension.factory + [app:public_service] paste.app_factory = keystone.service:public_app_factory @@ -64,7 +67,7 @@ paste.app_factory = keystone.service:public_app_factory paste.app_factory = keystone.service:admin_app_factory [pipeline:public_api] -pipeline = token_auth admin_token_auth json_body debug ec2_extension public_service +pipeline = token_auth admin_token_auth json_body debug ec2_extension s3_extension public_service [pipeline:admin_api] pipeline = token_auth admin_token_auth json_body debug ec2_extension crud_extension admin_service From 8534bcb6bb15db3d781228880040fd684a7cf228 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Sat, 25 Feb 2012 08:04:48 +0000 Subject: [PATCH 375/967] Update Swift configuration with keystone latest. - Update Swift to use the new tokenauth and swift_auth from keystone. - Drop swift-keystone2. - Add swift3 / s3token in swift proxy pipeline (TODO: figure out testing). - Fix exercises/swift.sh (workaround until review #3712 get merged). Change-Id: Ie85d30e14cee21c6f80043fccde92dfb229f0e80 --- exercises/swift.sh | 14 +++++++++----- files/swift/proxy-server.conf | 32 +++++++++++++++++++++++++++----- stack.sh | 23 ++++++++++++----------- 3 files changed, 48 insertions(+), 21 deletions(-) diff --git a/exercises/swift.sh b/exercises/swift.sh index f7be0994..3a577443 100755 --- a/exercises/swift.sh +++ b/exercises/swift.sh @@ -23,18 +23,22 @@ popd # Testing Swift # ============= +# FIXME(chmou): when review https://review.openstack.org/#change,3712 +# is merged we would be able to use the common openstack options and +# remove the trailing slash to v2.0 auth url. +# # Check if we have to swift via keystone -swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD stat +swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0/ -U admin -K $ADMIN_PASSWORD stat # We start by creating a test container -swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD post testcontainer +swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0/ -U admin -K $ADMIN_PASSWORD post testcontainer # add some files into it. -swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD upload testcontainer /etc/issue +swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0/ -U admin -K $ADMIN_PASSWORD upload testcontainer /etc/issue # list them -swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD list testcontainer +swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0/ -U admin -K $ADMIN_PASSWORD list testcontainer # And we may want to delete them now that we have tested that # everything works. -swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD delete testcontainer +swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0/ -U admin -K $ADMIN_PASSWORD delete testcontainer diff --git a/files/swift/proxy-server.conf b/files/swift/proxy-server.conf index 3ef02769..d6db117c 100644 --- a/files/swift/proxy-server.conf +++ b/files/swift/proxy-server.conf @@ -8,7 +8,7 @@ log_facility = LOG_LOCAL1 log_level = DEBUG [pipeline:main] -pipeline = healthcheck cache %AUTH_SERVER% proxy-server +pipeline = healthcheck cache swift3 %AUTH_SERVER% proxy-server [app:proxy-server] use = egg:swift#proxy @@ -16,10 +16,32 @@ allow_account_management = true account_autocreate = true [filter:keystone] -use = egg:swiftkeystone2#keystone2 -keystone_admin_token = %SERVICE_TOKEN% -keystone_url = http://localhost:35357/v2.0 -keystone_swift_operator_roles = Member,admin +paste.filter_factory = keystone.middleware.swift_auth:filter_factory +operator_roles = Member,admin + +[filter:s3token] +paste.filter_factory = keystone.middleware.s3_token:filter_factory +service_port = %KEYSTONE_SERVICE_PORT% +service_host = %KEYSTONE_SERVICE_HOST% +auth_port = %KEYSTONE_AUTH_PORT% +auth_host = %KEYSTONE_AUTH_HOST% +auth_protocol = %KEYSTONE_AUTH_PROTOCOL% +auth_token = %SERVICE_TOKEN% +admin_token = %SERVICE_TOKEN% + +[filter:tokenauth] +paste.filter_factory = keystone.middleware.auth_token:filter_factory +service_port = %KEYSTONE_SERVICE_PORT% +service_host = %KEYSTONE_SERVICE_HOST% +auth_port = %KEYSTONE_AUTH_PORT% +auth_host = %KEYSTONE_AUTH_HOST% +auth_protocol = %KEYSTONE_AUTH_PROTOCOL% +auth_token = %SERVICE_TOKEN% +admin_token = %SERVICE_TOKEN% +cache = swift.cache + +[filter:swift3] +use = egg:swift#swift3 [filter:tempauth] use = egg:swift#tempauth diff --git a/stack.sh b/stack.sh index fef0127c..067ea7eb 100755 --- a/stack.sh +++ b/stack.sh @@ -162,7 +162,6 @@ NOVACLIENT_DIR=$DEST/python-novaclient KEYSTONECLIENT_DIR=$DEST/python-keystoneclient NOVNC_DIR=$DEST/noVNC SWIFT_DIR=$DEST/swift -SWIFT_KEYSTONE_DIR=$DEST/swift-keystone2 QUANTUM_DIR=$DEST/quantum QUANTUM_CLIENT_DIR=$DEST/python-quantumclient MELANGE_DIR=$DEST/melange @@ -570,8 +569,6 @@ fi if is_service_enabled swift; then # storage service git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH - # swift + keystone middleware - git_clone $SWIFT_KEYSTONE_REPO $SWIFT_KEYSTONE_DIR $SWIFT_KEYSTONE_BRANCH fi if is_service_enabled g-api n-api; then # image catalog service @@ -615,7 +612,6 @@ if is_service_enabled key g-api n-api swift; then fi if is_service_enabled swift; then cd $SWIFT_DIR; sudo python setup.py develop - cd $SWIFT_KEYSTONE_DIR; sudo python setup.py develop fi if is_service_enabled g-api n-api; then cd $GLANCE_DIR; sudo python setup.py develop @@ -1007,19 +1003,24 @@ if is_service_enabled swift; then # which has some default username and password if you have # configured keystone it will checkout the directory. if is_service_enabled key; then - swift_auth_server=keystone - - # We install the memcache server as this is will be used by the - # middleware to cache the tokens auths for a long this is needed. - apt_get install memcached + swift_auth_server="s3token tokenauth keystone" else swift_auth_server=tempauth fi # We do the install of the proxy-server and swift configuration # replacing a few directives to match our configuration. - sed "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},;s/%USER%/$USER/;s/%SERVICE_TOKEN%/${SERVICE_TOKEN}/;s/%AUTH_SERVER%/${swift_auth_server}/" \ - $FILES/swift/proxy-server.conf|sudo tee ${SWIFT_CONFIG_LOCATION}/proxy-server.conf + sed -e "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},g; + s,%USER%,$USER,g; + s,%SERVICE_TOKEN%,${SERVICE_TOKEN},g; + s,%KEYSTONE_SERVICE_PORT%,${KEYSTONE_SERVICE_PORT},g; + s,%KEYSTONE_SERVICE_HOST%,${KEYSTONE_SERVICE_HOST},g; + s,%KEYSTONE_AUTH_PORT%,${KEYSTONE_AUTH_PORT},g; + s,%KEYSTONE_AUTH_HOST%,${KEYSTONE_AUTH_HOST},g; + s,%KEYSTONE_AUTH_PROTOCOL%,${KEYSTONE_AUTH_PROTOCOL},g; + s/%AUTH_SERVER%/${swift_auth_server}/g;" \ + $FILES/swift/proxy-server.conf | \ + sudo tee ${SWIFT_CONFIG_LOCATION}/proxy-server.conf sed -e "s/%SWIFT_HASH%/$SWIFT_HASH/" $FILES/swift/swift.conf > ${SWIFT_CONFIG_LOCATION}/swift.conf From 112a360128a216829f30bb99fabfbc73303b44ca Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sun, 26 Feb 2012 17:14:58 -0800 Subject: [PATCH 376/967] Update incorrect comment about certs in openrc Change-Id: I1efdf9f43572b169a5d35406195c2228bddc90e3 --- openrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openrc b/openrc index d742ced4..9b3d7ba8 100644 --- a/openrc +++ b/openrc @@ -62,7 +62,7 @@ export EC2_ACCESS_KEY=${DEMO_ACCESS} export EC2_SECRET_KEY=${DEMO_SECRET} # Euca2ools Certificate stuff for uploading bundles -# You can get your certs using ./tools/get_certs.sh +# See exercises/bundle.sh to see how to get certs using nova cli NOVARC=$(readlink -f "${BASH_SOURCE:-${0}}" 2>/dev/null) || NOVARC=$(python -c 'import os,sys; print os.path.abspath(os.path.realpath(sys.argv[1]))' "${BASH_SOURCE:-${0}}") NOVA_KEY_DIR=${NOVARC%/*} From 854d8c93b81667b04a3ad38720bcc02acf3d15b0 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 27 Feb 2012 22:41:54 +0000 Subject: [PATCH 377/967] Add a helper method to volumes test to get fields * The way we retrieve data from cli output is janky, this improves it slightly by creating a helper method. Change-Id: Ib0889fd56f6a78bed85dad4c5e9e6e34bac9fb0d --- exercises/volumes.sh | 29 ++++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 1fcc034b..622fb185 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -55,18 +55,33 @@ IMAGE=`glance -f index | egrep $DEFAULT_IMAGE_NAME | head -1 | cut -d" " -f1` # determinine instance type # ------------------------- +# Helper function to grab a numbered field from python novaclient cli result +# Fields are numbered starting with 1 +# Reverse syntax is supported: -1 is the last field, -2 is second to last, etc. +function get_field () { + while read data + do + if [ "$1" -lt 0 ]; then + field="(\$(NF$1))" + else + field="\$$(($1 + 1))" + fi + echo "$data" | awk -F'[ \t]*\\|[ \t]*' "{print $field}" + done +} + # List of instance types: nova flavor-list -INSTANCE_TYPE=`nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | cut -d"|" -f2` +INSTANCE_TYPE=`nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1` if [[ -z "$INSTANCE_TYPE" ]]; then # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=`nova flavor-list | head -n 4 | tail -n 1 | cut -d"|" -f2` + INSTANCE_TYPE=`nova flavor-list | head -n 4 | tail -n 1 | get_field 1` fi NAME="myserver" -VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE $NAME --security_groups=$SECGROUP | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'` +VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE $NAME --security_groups=$SECGROUP | grep ' id ' | get_field 2` # Testing # ======= @@ -85,7 +100,7 @@ if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | g fi # get the IP of the server -IP=`nova show $VM_UUID | grep "private network" | cut -d"|" -f3` +IP=`nova show $VM_UUID | grep "private network" | get_field 2` # for single node deployments, we can ping private ips MULTI_HOST=${MULTI_HOST:-0} @@ -108,7 +123,7 @@ fi VOL_NAME="myvol-$(openssl rand -hex 4)" # Verify it doesn't exist -if [[ -n "`nova volume-list | grep $VOL_NAME | head -1 | cut -d'|' -f3 | sed 's/ //g'`" ]]; then +if [[ -n "`nova volume-list | grep $VOL_NAME | head -1 | get_field 2`" ]]; then echo "Volume $VOL_NAME already exists" exit 1 fi @@ -121,7 +136,7 @@ if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | fi # Get volume ID -VOL_ID=`nova volume-list | grep $VOL_NAME | head -1 | cut -d'|' -f2 | sed 's/ //g'` +VOL_ID=`nova volume-list | grep $VOL_NAME | head -1 | get_field 1` # Attach to server DEVICE=/dev/vdb @@ -131,7 +146,7 @@ if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | exit 1 fi -VOL_ATTACH=`nova volume-list | grep $VOL_NAME | head -1 | cut -d'|' -f6 | sed 's/ //g'` +VOL_ATTACH=`nova volume-list | grep $VOL_NAME | head -1 | get_field -1` if [[ "$VOL_ATTACH" != $VM_UUID ]]; then echo "Volume not attached to correct instance" exit 1 From 890061cc2a5b65d5db2aba4040db4db3d725b09c Mon Sep 17 00:00:00 2001 From: John Garbutt Date: Fri, 24 Feb 2012 14:39:17 +0000 Subject: [PATCH 378/967] Allow ratelimiting to be turned off. Useful when trying to run tempest, or similar tests. Change-Id: Iaf951e2f647f884421b21199522b5fcf86c0dd4e --- stack.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index 067ea7eb..8b694286 100755 --- a/stack.sh +++ b/stack.sh @@ -834,8 +834,10 @@ if is_service_enabled n-api; then } replace_pipeline "ec2cloud" "ec2faultwrap logrequest totoken authtoken keystonecontext cloudrequest authorizer validator ec2executor" replace_pipeline "ec2admin" "ec2faultwrap logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor" - replace_pipeline "openstack_compute_api_v2" "faultwrap authtoken keystonecontext ratelimit osapi_compute_app_v2" - replace_pipeline "openstack_volume_api_v1" "faultwrap authtoken keystonecontext ratelimit osapi_volume_app_v1" + # allow people to turn off rate limiting for testing, like when using tempest, by setting OSAPI_RATE_LIMIT=" " + OSAPI_RATE_LIMIT=${OSAPI_RATE_LIMIT:-"ratelimit"} + replace_pipeline "openstack_compute_api_v2" "faultwrap authtoken keystonecontext $OSAPI_RATE_LIMIT osapi_compute_app_v2" + replace_pipeline "openstack_volume_api_v1" "faultwrap authtoken keystonecontext $OSAPI_RATE_LIMIT osapi_volume_app_v1" fi # Helper to clean iptables rules From 0d7b5a4035a2d9db10f3209d543dbddddc8cb983 Mon Sep 17 00:00:00 2001 From: Zhongyue Luo Date: Sat, 11 Feb 2012 00:56:18 +0800 Subject: [PATCH 379/967] Backslash continuations (DevStack) Fixes bug #943087 Backslash continuations removal for DevStack Change-Id: I3e3e6d35b939ce8d0e3bc2b529c9d172286ba2c0 --- AUTHORS | 1 + tools/jenkins/jenkins_home/print_summary.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/AUTHORS b/AUTHORS index 561826cf..a3a4b6b9 100644 --- a/AUTHORS +++ b/AUTHORS @@ -23,3 +23,4 @@ Todd Willey Tres Henry Vishvananda Ishaya Yun Mao +Zhongyue Luo diff --git a/tools/jenkins/jenkins_home/print_summary.py b/tools/jenkins/jenkins_home/print_summary.py index 1d71a4a8..ea943e1c 100755 --- a/tools/jenkins/jenkins_home/print_summary.py +++ b/tools/jenkins/jenkins_home/print_summary.py @@ -5,8 +5,8 @@ def print_usage(): - print "Usage: %s [jenkins_url (eg. http://50.56.12.202:8080/)]"\ - % sys.argv[0] + print ("Usage: %s [jenkins_url (eg. http://50.56.12.202:8080/)]" + % sys.argv[0]) sys.exit() From f1a11adf2bc308c386cb2c818088fe040019f114 Mon Sep 17 00:00:00 2001 From: Gabriel Hurley Date: Wed, 29 Feb 2012 01:36:53 -0800 Subject: [PATCH 380/967] Updates devstack to E4 final horizon package structure. Change-Id: Iedc30eb532b93e5755310b1037a2ecbb3552aab0 --- files/000-default.template | 4 ++-- files/horizon_settings.py | 5 +---- stack.sh | 13 ++++++------- 3 files changed, 9 insertions(+), 13 deletions(-) diff --git a/files/000-default.template b/files/000-default.template index d97f3659..f499ea07 100644 --- a/files/000-default.template +++ b/files/000-default.template @@ -1,12 +1,12 @@ - WSGIScriptAlias / %HORIZON_DIR%/openstack-dashboard/dashboard/wsgi/django.wsgi + WSGIScriptAlias / %HORIZON_DIR%/openstack_dashboard/wsgi/django.wsgi WSGIDaemonProcess horizon user=%USER% group=%GROUP% processes=3 threads=10 SetEnv APACHE_RUN_USER %USER% SetEnv APACHE_RUN_GROUP %GROUP% WSGIProcessGroup horizon DocumentRoot %HORIZON_DIR%/.blackhole/ - Alias /media %HORIZON_DIR%/openstack-dashboard/dashboard/static + Alias /media %HORIZON_DIR%/openstack_dashboard/static Alias /vpn /opt/stack/vpn diff --git a/files/horizon_settings.py b/files/horizon_settings.py index bbff08fa..2d1d1f86 100644 --- a/files/horizon_settings.py +++ b/files/horizon_settings.py @@ -37,7 +37,7 @@ HORIZON_CONFIG = { 'dashboards': ('nova', 'syspanel', 'settings',), 'default_dashboard': 'nova', - 'user_home': 'dashboard.views.user_home', + 'user_home': 'openstack_dashboard.views.user_home', } OPENSTACK_HOST = "127.0.0.1" @@ -98,6 +98,3 @@ # } # } #} - -# How much ram on each compute host? -COMPUTE_HOST_RAM_GB = 16 diff --git a/stack.sh b/stack.sh index 8b694286..20c44e2e 100755 --- a/stack.sh +++ b/stack.sh @@ -618,8 +618,7 @@ if is_service_enabled g-api n-api; then fi cd $NOVA_DIR; sudo python setup.py develop if is_service_enabled horizon; then - cd $HORIZON_DIR/horizon; sudo python setup.py develop - cd $HORIZON_DIR/openstack-dashboard; sudo python setup.py develop + cd $HORIZON_DIR; sudo python setup.py develop fi if is_service_enabled q-svc; then cd $QUANTUM_DIR; sudo python setup.py develop @@ -718,14 +717,14 @@ if is_service_enabled horizon; then apt_get install apache2 libapache2-mod-wsgi # Link to quantum client directory. - rm -fr ${HORIZON_DIR}/openstack-dashboard/quantum - ln -s ${QUANTUM_CLIENT_DIR}/quantum ${HORIZON_DIR}/openstack-dashboard/quantum + rm -fr ${HORIZON_DIR}/openstack_dashboard/quantum + ln -s ${QUANTUM_CLIENT_DIR}/quantum ${HORIZON_DIR}/openstack_dashboard/quantum # Remove stale session database. - rm -f $HORIZON_DIR/openstack-dashboard/local/dashboard_openstack.sqlite3 + rm -f $HORIZON_DIR/openstack_dashboard/local/dashboard_openstack.sqlite3 # ``local_settings.py`` is used to override horizon default settings. - local_settings=$HORIZON_DIR/openstack-dashboard/local/local_settings.py + local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py cp $FILES/horizon_settings.py $local_settings # Enable quantum in dashboard, if requested @@ -735,7 +734,7 @@ if is_service_enabled horizon; then # Initialize the horizon database (it stores sessions and notices shown to # users). The user system is external (keystone). - cd $HORIZON_DIR/openstack-dashboard + cd $HORIZON_DIR python manage.py syncdb # create an empty directory that apache uses as docroot From 782f24ee39a1c9cc201956b726c95140cb73dc83 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Wed, 29 Feb 2012 13:42:44 +0000 Subject: [PATCH 381/967] Add small note about Swift. Change-Id: Id1c014e6fcdf8e52a5e01e7bacf01a567d25e70a --- README.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/README.md b/README.md index a185f34f..4a7aaa97 100644 --- a/README.md +++ b/README.md @@ -40,3 +40,13 @@ We also provide an environment file that you can use to interact with your cloud # Customizing You can override environment variables used in stack.sh by creating file name 'localrc'. It is likely that you will need to do this to tweak your networking configuration should you need to access your cloud from a different host. + +# Swift + +Swift is not installed by default, you need to add the **swift** keyword in the ENABLED_SERVICES variable to get it installed. + +If you have keystone enabled, Swift will authenticate against it, make sure to use the keystone URL to auth against. + +At this time Swift is not started in a screen session but as daemon you need to use the **swift-init** CLI to manage the swift daemons. + +By default Swift will configure 3 replicas (and one spare) which could be IO intensive on a small vm, if you only want to do some quick testing of the API you can choose to only have one replica by customizing the variable SWIFT_REPLICAS in your localrc. From a6bdfddd2b9ac89ea5ceba7cde5f37eddaf936df Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Wed, 29 Feb 2012 14:11:01 +0000 Subject: [PATCH 382/967] Use OS common cli auth arguments. Change-Id: I80c9e42abb7060622d82cffc83d0108654a36562 --- exercises/swift.sh | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/exercises/swift.sh b/exercises/swift.sh index 3a577443..95443df3 100755 --- a/exercises/swift.sh +++ b/exercises/swift.sh @@ -23,22 +23,18 @@ popd # Testing Swift # ============= -# FIXME(chmou): when review https://review.openstack.org/#change,3712 -# is merged we would be able to use the common openstack options and -# remove the trailing slash to v2.0 auth url. -# # Check if we have to swift via keystone -swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0/ -U admin -K $ADMIN_PASSWORD stat +swift stat # We start by creating a test container -swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0/ -U admin -K $ADMIN_PASSWORD post testcontainer +swift post testcontainer # add some files into it. -swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0/ -U admin -K $ADMIN_PASSWORD upload testcontainer /etc/issue +swift upload testcontainer /etc/issue # list them -swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0/ -U admin -K $ADMIN_PASSWORD list testcontainer +swift list testcontainer # And we may want to delete them now that we have tested that # everything works. -swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0/ -U admin -K $ADMIN_PASSWORD delete testcontainer +swift delete testcontainer From cc86a9e43a23a1e7f86d978fbea3eb56824c705a Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Mon, 6 Feb 2012 12:15:59 -0800 Subject: [PATCH 383/967] Support passing extra args to network create if NETWORK_CREATE_ARGS is defined Change-Id: I17b96b74bc7c137589e9af85379c9c6c1bed76c8 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 20c44e2e..eea283fd 100755 --- a/stack.sh +++ b/stack.sh @@ -1479,7 +1479,7 @@ fi # happen after we've started the Quantum service. if is_service_enabled mysql; then # create a small network - $NOVA_DIR/bin/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE + $NOVA_DIR/bin/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS if is_service_enabled q-svc; then echo "Not creating floating IPs (not supported by QuantumManager)" From 2144ea23bb8621647dfb92b114718c2e3a401f41 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Wed, 29 Feb 2012 11:11:01 -0800 Subject: [PATCH 384/967] Add xml filter to keystone.conf Bug #943499 Change-Id: If636705839606a28c6a6905cd11deaa1fff76438 --- files/keystone.conf | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/files/keystone.conf b/files/keystone.conf index 76c618a0..d9e639fe 100644 --- a/files/keystone.conf +++ b/files/keystone.conf @@ -48,6 +48,9 @@ paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory [filter:admin_token_auth] paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory +[filter:xml_body] +paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory + [filter:json_body] paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory @@ -67,10 +70,10 @@ paste.app_factory = keystone.service:public_app_factory paste.app_factory = keystone.service:admin_app_factory [pipeline:public_api] -pipeline = token_auth admin_token_auth json_body debug ec2_extension s3_extension public_service +pipeline = token_auth admin_token_auth xml_body json_body debug ec2_extension s3_extension public_service [pipeline:admin_api] -pipeline = token_auth admin_token_auth json_body debug ec2_extension crud_extension admin_service +pipeline = token_auth admin_token_auth xml_body json_body debug ec2_extension crud_extension admin_service [app:public_version_service] paste.app_factory = keystone.service:public_version_app_factory @@ -79,10 +82,10 @@ paste.app_factory = keystone.service:public_version_app_factory paste.app_factory = keystone.service:admin_version_app_factory [pipeline:public_version_api] -pipeline = public_version_service +pipeline = xml_body public_version_service [pipeline:admin_version_api] -pipeline = admin_version_service +pipeline = xml_body admin_version_service [composite:main] use = egg:Paste#urlmap From b3288381047690510845209cc372d07e5b11e396 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 28 Feb 2012 16:41:10 -0600 Subject: [PATCH 385/967] Add service account configuration * Use username/password instead of service token for service auth to Keystone * Updates files/glance-*-paste.ini and files/swift/proxy-server.conf * keystone_data.sh creates 'service' tenant, 'nova' and 'glance' users ('swift' and 'quantum' if those services are enabled) * Uses $SERVICE_PASSWORD for the service auth password. There is no default; to default to $ADMIN_PASSWORD, place the assignment in localrc. Fixes bug 942983 Change-Id: If78eed1b509a9c1e8441bb4cfa095da9052f9395 --- files/glance-api-paste.ini | 5 ++++ files/glance-registry-paste.ini | 5 ++++ files/keystone_data.sh | 33 +++++++++++++++++++++ files/swift/proxy-server.conf | 5 ++++ stack.sh | 51 ++++++++++++++++++++++++--------- 5 files changed, 85 insertions(+), 14 deletions(-) diff --git a/files/glance-api-paste.ini b/files/glance-api-paste.ini index b8832ad6..583b70a8 100644 --- a/files/glance-api-paste.ini +++ b/files/glance-api-paste.ini @@ -30,6 +30,7 @@ glance.filter_factory = glance.common.context:ContextMiddleware [filter:authtoken] paste.filter_factory = keystone.middleware.auth_token:filter_factory +# FIXME(dtroyer): remove these service_* entries after auth_token is updated service_host = %KEYSTONE_SERVICE_HOST% service_port = %KEYSTONE_SERVICE_PORT% service_protocol = %KEYSTONE_SERVICE_PROTOCOL% @@ -37,7 +38,11 @@ auth_host = %KEYSTONE_AUTH_HOST% auth_port = %KEYSTONE_AUTH_PORT% auth_protocol = %KEYSTONE_AUTH_PROTOCOL% auth_uri = %KEYSTONE_SERVICE_PROTOCOL%://%KEYSTONE_SERVICE_HOST%:%KEYSTONE_SERVICE_PORT%/ +# FIXME(dtroyer): remove admin_token after auth_token is updated admin_token = %SERVICE_TOKEN% +admin_tenant_name = %SERVICE_TENANT_NAME% +admin_user = %SERVICE_USERNAME% +admin_password = %SERVICE_PASSWORD% [filter:auth-context] paste.filter_factory = glance.common.wsgi:filter_factory diff --git a/files/glance-registry-paste.ini b/files/glance-registry-paste.ini index f4130ec9..fe460d9e 100644 --- a/files/glance-registry-paste.ini +++ b/files/glance-registry-paste.ini @@ -14,6 +14,7 @@ glance.filter_factory = glance.common.context:ContextMiddleware [filter:authtoken] paste.filter_factory = keystone.middleware.auth_token:filter_factory +# FIXME(dtroyer): remove these service_* entries after auth_token is updated service_host = %KEYSTONE_SERVICE_HOST% service_port = %KEYSTONE_SERVICE_PORT% service_protocol = %KEYSTONE_SERVICE_PROTOCOL% @@ -21,7 +22,11 @@ auth_host = %KEYSTONE_AUTH_HOST% auth_port = %KEYSTONE_AUTH_PORT% auth_protocol = %KEYSTONE_AUTH_PROTOCOL% auth_uri = %KEYSTONE_SERVICE_PROTOCOL%://%KEYSTONE_SERVICE_HOST%:%KEYSTONE_SERVICE_PORT%/ +# FIXME(dtroyer): remove admin_token after auth_token is updated admin_token = %SERVICE_TOKEN% +admin_tenant_name = %SERVICE_TENANT_NAME% +admin_user = %SERVICE_USERNAME% +admin_password = %SERVICE_PASSWORD% [filter:auth-context] context_class = glance.registry.context.RequestContext diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 3f4841f9..e2928111 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -17,6 +17,7 @@ if keystone help | grep -q user-role-add; then fi ADMIN_TENANT=`get_id keystone tenant-create --name=admin` +SERVICE_TENANT=`get_id keystone tenant-create --name=$SERVICE_TENANT_NAME` DEMO_TENANT=`get_id keystone tenant-create --name=demo` INVIS_TENANT=`get_id keystone tenant-create --name=invisible_to_admin` @@ -73,6 +74,14 @@ keystone service-create \ --name=nova \ --type=compute \ --description="Nova Compute Service" +NOVA_USER=`get_id keystone user-create \ + --name=nova \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=nova@example.com` +keystone user-role-add --tenant_id $SERVICE_TENANT \ + --user $NOVA_USER \ + --role $ADMIN_ROLE keystone service-create \ --name=ec2 \ @@ -83,6 +92,14 @@ keystone service-create \ --name=glance \ --type=image \ --description="Glance Image Service" +GLANCE_USER=`get_id keystone user-create \ + --name=glance \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=glance@example.com` +keystone user-role-add --tenant_id $SERVICE_TENANT \ + --user $GLANCE_USER \ + --role $ADMIN_ROLE keystone service-create \ --name=keystone \ @@ -101,12 +118,28 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then --name=swift \ --type="object-store" \ --description="Swift Service" + SWIFT_USER=`get_id keystone user-create \ + --name=swift \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=swift@example.com` + keystone user-role-add --tenant_id $SERVICE_TENANT \ + --user $SWIFT_USER \ + --role $ADMIN_ROLE fi if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then keystone service-create \ --name=quantum \ --type=network \ --description="Quantum Service" + QUANTUM_USER=`get_id keystone user-create \ + --name=quantum \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=quantum@example.com` + keystone user-role-add --tenant_id $SERVICE_TENANT \ + --user $QUANTUM_USER \ + --role $ADMIN_ROLE fi # create ec2 creds and parse the secret and access key returned diff --git a/files/swift/proxy-server.conf b/files/swift/proxy-server.conf index d6db117c..e80c1d5e 100644 --- a/files/swift/proxy-server.conf +++ b/files/swift/proxy-server.conf @@ -31,13 +31,18 @@ admin_token = %SERVICE_TOKEN% [filter:tokenauth] paste.filter_factory = keystone.middleware.auth_token:filter_factory +# FIXME(dtroyer): remove these service_* entries after auth_token is updated service_port = %KEYSTONE_SERVICE_PORT% service_host = %KEYSTONE_SERVICE_HOST% auth_port = %KEYSTONE_AUTH_PORT% auth_host = %KEYSTONE_AUTH_HOST% auth_protocol = %KEYSTONE_AUTH_PROTOCOL% auth_token = %SERVICE_TOKEN% +# FIXME(dtroyer): remove admin_token after auth_token is updated admin_token = %SERVICE_TOKEN% +admin_tenant_name = %SERVICE_TENANT_NAME% +admin_user = %SERVICE_USERNAME% +admin_password = %SERVICE_PASSWORD% cache = swift.cache [filter:swift3] diff --git a/stack.sh b/stack.sh index 20c44e2e..92421821 100755 --- a/stack.sh +++ b/stack.sh @@ -421,10 +421,16 @@ fi # Service Token - Openstack components need to have an admin token # to validate user tokens. read_password SERVICE_TOKEN "ENTER A SERVICE_TOKEN TO USE FOR THE SERVICE ADMIN TOKEN." +# Services authenticate to Identity with servicename/SERVICE_PASSWORD +read_password SERVICE_PASSWORD "ENTER A SERVICE_PASSWORD TO USE FOR THE SERVICE AUTHENTICATION." # Horizon currently truncates usernames and passwords at 20 characters read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (20 CHARS OR LESS)." +# Set the tenant for service accounts in Keystone +SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service} + # Set Keystone interface configuration +KEYSTONE_API_PORT=${KEYSTONE_API_PORT:-5000} KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST} KEYSTONE_AUTH_PORT=${KEYSTONE_AUTH_PORT:-35357} KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-http} @@ -768,6 +774,7 @@ if is_service_enabled g-reg; then function glance_config { sudo sed -e " + s,%KEYSTONE_API_PORT%,$KEYSTONE_API_PORT,g; s,%KEYSTONE_AUTH_HOST%,$KEYSTONE_AUTH_HOST,g; s,%KEYSTONE_AUTH_PORT%,$KEYSTONE_AUTH_PORT,g; s,%KEYSTONE_AUTH_PROTOCOL%,$KEYSTONE_AUTH_PROTOCOL,g; @@ -775,6 +782,9 @@ if is_service_enabled g-reg; then s,%KEYSTONE_SERVICE_PORT%,$KEYSTONE_SERVICE_PORT,g; s,%KEYSTONE_SERVICE_PROTOCOL%,$KEYSTONE_SERVICE_PROTOCOL,g; s,%SQL_CONN%,$BASE_SQL_CONN/glance,g; + s,%SERVICE_TENANT_NAME%,$SERVICE_TENANT_NAME,g; + s,%SERVICE_USERNAME%,glance,g; + s,%SERVICE_PASSWORD%,$SERVICE_PASSWORD,g; s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g; s,%DEST%,$DEST,g; s,%SYSLOG%,$SYSLOG,g; @@ -825,7 +835,14 @@ if is_service_enabled n-api; then cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_CONF # Then we add our own service token to the configuration - sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $NOVA_CONF/api-paste.ini + sed -e " + /^admin_token/i admin_tenant_name = $SERVICE_TENANT_NAME + /admin_tenant_name/s/^.*$/admin_tenant_name = $SERVICE_TENANT_NAME/; + /admin_user/s/^.*$/admin_user = nova/; + /admin_password/s/^.*$/admin_password = $SERVICE_PASSWORD/; + s,%SERVICE_TENANT_NAME%,$SERVICE_TENANT_NAME,g; + s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g; + " -i $NOVA_CONF/api-paste.ini # Finally, we change the pipelines in nova to use keystone function replace_pipeline() { @@ -1011,16 +1028,21 @@ if is_service_enabled swift; then # We do the install of the proxy-server and swift configuration # replacing a few directives to match our configuration. - sed -e "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},g; - s,%USER%,$USER,g; - s,%SERVICE_TOKEN%,${SERVICE_TOKEN},g; - s,%KEYSTONE_SERVICE_PORT%,${KEYSTONE_SERVICE_PORT},g; - s,%KEYSTONE_SERVICE_HOST%,${KEYSTONE_SERVICE_HOST},g; - s,%KEYSTONE_AUTH_PORT%,${KEYSTONE_AUTH_PORT},g; - s,%KEYSTONE_AUTH_HOST%,${KEYSTONE_AUTH_HOST},g; - s,%KEYSTONE_AUTH_PROTOCOL%,${KEYSTONE_AUTH_PROTOCOL},g; - s/%AUTH_SERVER%/${swift_auth_server}/g;" \ - $FILES/swift/proxy-server.conf | \ + sed -e " + s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},g; + s,%USER%,$USER,g; + s,%SERVICE_TENANT_NAME%,$SERVICE_TENANT_NAME,g; + s,%SERVICE_USERNAME%,swift,g; + s,%SERVICE_PASSWORD%,$SERVICE_PASSWORD,g; + s,%SERVICE_TOKEN%,${SERVICE_TOKEN},g; + s,%KEYSTONE_SERVICE_PORT%,${KEYSTONE_SERVICE_PORT},g; + s,%KEYSTONE_SERVICE_HOST%,${KEYSTONE_SERVICE_HOST},g; + s,%KEYSTONE_API_PORT%,${KEYSTONE_API_PORT},g; + s,%KEYSTONE_AUTH_HOST%,${KEYSTONE_AUTH_HOST},g; + s,%KEYSTONE_AUTH_PORT%,${KEYSTONE_AUTH_PORT},g; + s,%KEYSTONE_AUTH_PROTOCOL%,${KEYSTONE_AUTH_PROTOCOL},g; + s/%AUTH_SERVER%/${swift_auth_server}/g; + " $FILES/swift/proxy-server.conf | \ sudo tee ${SWIFT_CONFIG_LOCATION}/proxy-server.conf sed -e "s/%SWIFT_HASH%/$SWIFT_HASH/" $FILES/swift/swift.conf > ${SWIFT_CONFIG_LOCATION}/swift.conf @@ -1389,7 +1411,7 @@ fi if is_service_enabled key; then screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug" echo "Waiting for keystone to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/; do sleep 1; done"; then + if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/; do sleep 1; done"; then echo "keystone did not start" exit 1 fi @@ -1401,7 +1423,8 @@ if is_service_enabled key; then # keystone_data.sh creates services, admin and demo users, and roles. SERVICE_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0 - ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES bash $FILES/keystone_data.sh + ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES \ + bash $FILES/keystone_data.sh fi @@ -1630,7 +1653,7 @@ fi # If keystone is present, you can point nova cli to this server if is_service_enabled key; then - echo "keystone is serving at $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/" + echo "keystone is serving at $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/" echo "examples on using novaclient command line is in exercise.sh" echo "the default users are: admin and demo" echo "the password: $ADMIN_PASSWORD" From f2829479fe9053ed9b76e7ef984fa45e67040668 Mon Sep 17 00:00:00 2001 From: Andrew Bogott Date: Thu, 1 Mar 2012 11:44:11 -0600 Subject: [PATCH 386/967] Added tgtadm to sudo cmd list. Fixes bug 927924. We need this if we are ever to create or delete volumes. Change-Id: If451b936a04d064feba2eefec499e1669e1837d6 --- files/sudo/nova | 1 + 1 file changed, 1 insertion(+) diff --git a/files/sudo/nova b/files/sudo/nova index 3231e2da..60dca2ba 100644 --- a/files/sudo/nova +++ b/files/sudo/nova @@ -41,6 +41,7 @@ Cmnd_Alias NOVADEVCMDS = /bin/chmod /var/lib/nova/tmp/*/root/.ssh, \ /usr/bin/socat, \ /sbin/parted, \ /usr/sbin/dnsmasq, \ + /usr/sbin/tgtadm, \ /usr/bin/ovs-vsctl, \ /usr/bin/ovs-ofctl, \ /usr/sbin/arping From 3993816fe6fb92c609043b765fd07d08d469a2bf Mon Sep 17 00:00:00 2001 From: Renuka Apte Date: Thu, 1 Mar 2012 15:43:36 -0800 Subject: [PATCH 387/967] Remove hardcoding of flat network bridge Change-Id: I66210ba438c1d8a2c12223b8895581127130ad5f --- stack.sh | 6 ++++-- tools/xen/build_domU.sh | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index 92421821..35a7b3dd 100755 --- a/stack.sh +++ b/stack.sh @@ -1262,6 +1262,8 @@ done # --------- if [ "$VIRT_DRIVER" = 'xenserver' ]; then + # Get the VM bridge + FLAT_NETWORK_BRIDGE=$(grep -o 'flat_network_bridge=[^.]*' /proc/cmdline | cut -d= -f 2) read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN." add_nova_flag "--connection_type=xenapi" add_nova_flag "--xenapi_connection_url=http://169.254.0.1" @@ -1269,8 +1271,8 @@ if [ "$VIRT_DRIVER" = 'xenserver' ]; then add_nova_flag "--xenapi_connection_password=$XENAPI_PASSWORD" add_nova_flag "--noflat_injected" add_nova_flag "--flat_interface=eth1" - add_nova_flag "--flat_network_bridge=xapi1" - add_nova_flag "--public_interface=eth3" + add_nova_flag "--flat_network_bridge=${FLAT_NETWORK_BRIDGE}" + add_nova_flag "--public_interface=${HOST_IP_IFACE}" # Need to avoid crash due to new firewall support XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"} add_nova_flag "--firewall_driver=$XEN_FIREWALL_DRIVER" diff --git a/tools/xen/build_domU.sh b/tools/xen/build_domU.sh index 455ad264..a0fd316a 100755 --- a/tools/xen/build_domU.sh +++ b/tools/xen/build_domU.sh @@ -182,7 +182,7 @@ fi if [ -z $PUB_BR ]; then PUB_BR=$(xe network-list --minimal uuid=$PUB_NET params=bridge) fi -$TOP_DIR/scripts/install-os-vpx.sh -f $XVA -v $VM_BR -m $MGT_BR -p $PUB_BR -l $GUEST_NAME -w +$TOP_DIR/scripts/install-os-vpx.sh -f $XVA -v $VM_BR -m $MGT_BR -p $PUB_BR -l $GUEST_NAME -w -k "flat_network_bridge=${VM_BR}" # If we have copied our ssh credentials, use ssh to monitor while the installation runs WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1} From 8e2cffdf853467395b29bad891deba26e7b4483e Mon Sep 17 00:00:00 2001 From: John Garbutt Date: Fri, 2 Mar 2012 16:22:07 +0000 Subject: [PATCH 388/967] Unify the way devstack configures networking for libvirt and xenserver Change-Id: I839ccabef6556415653c6f957279585ef2ef7d8d --- stack.sh | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/stack.sh b/stack.sh index 35a7b3dd..481e8564 100755 --- a/stack.sh +++ b/stack.sh @@ -290,14 +290,26 @@ function is_service_enabled() { # FIXME: more documentation about why these are important flags. Also # we should make sure we use the same variable names as the flag names. +if [ "$VIRT_DRIVER" = 'xenserver' ]; then + PUBLIC_INTERFACE_DEFAULT=eth3 + # allow build_domU.sh to specify the flat network bridge via kernel args + FLAT_NETWORK_BRIDGE_DEFAULT=$(grep -o 'flat_network_bridge=[^.]*' /proc/cmdline | cut -d= -f 2) + GUEST_INTERFACE_DEFAULT=eth1 +else + PUBLIC_INTERFACE_DEFAULT=br100 + FLAT_NETWORK_BRIDGE_DEFAULT=br100 + GUEST_INTERFACE_DEFAULT=eth0 +fi + +PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT} PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-br100} FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24} FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256} FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.224/28} NET_MAN=${NET_MAN:-FlatDHCPManager} EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST} -FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-br100} -VLAN_INTERFACE=${VLAN_INTERFACE:-eth0} +FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT} +VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT} # Test floating pool and range are used for testing. They are defined # here until the admin APIs can replace nova-manage @@ -323,7 +335,7 @@ MULTI_HOST=${MULTI_HOST:-False} # devices other than that node, you can set the flat interface to the same # value as ``FLAT_NETWORK_BRIDGE``. This will stop the network hiccup from # occurring. -FLAT_INTERFACE=${FLAT_INTERFACE:-eth0} +FLAT_INTERFACE=${FLAT_INTERFACE:-$GUEST_INTERFACE_DEFAULT} ## FIXME(ja): should/can we check that FLAT_INTERFACE is sane? @@ -1213,6 +1225,10 @@ add_nova_flag "--osapi_compute_extension=nova.api.openstack.compute.contrib.stan add_nova_flag "--my_ip=$HOST_IP" add_nova_flag "--public_interface=$PUBLIC_INTERFACE" add_nova_flag "--vlan_interface=$VLAN_INTERFACE" +add_nova_flag "--flat_network_bridge=$FLAT_NETWORK_BRIDGE" +if [ -n "$FLAT_INTERFACE" ]; then + add_nova_flag "--flat_interface=$FLAT_INTERFACE" +fi add_nova_flag "--sql_connection=$BASE_SQL_CONN/nova" add_nova_flag "--libvirt_type=$LIBVIRT_TYPE" add_nova_flag "--instance_name_template=${INSTANCE_NAME_PREFIX}%08x" @@ -1262,17 +1278,12 @@ done # --------- if [ "$VIRT_DRIVER" = 'xenserver' ]; then - # Get the VM bridge - FLAT_NETWORK_BRIDGE=$(grep -o 'flat_network_bridge=[^.]*' /proc/cmdline | cut -d= -f 2) read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN." add_nova_flag "--connection_type=xenapi" add_nova_flag "--xenapi_connection_url=http://169.254.0.1" add_nova_flag "--xenapi_connection_username=root" add_nova_flag "--xenapi_connection_password=$XENAPI_PASSWORD" add_nova_flag "--noflat_injected" - add_nova_flag "--flat_interface=eth1" - add_nova_flag "--flat_network_bridge=${FLAT_NETWORK_BRIDGE}" - add_nova_flag "--public_interface=${HOST_IP_IFACE}" # Need to avoid crash due to new firewall support XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"} add_nova_flag "--firewall_driver=$XEN_FIREWALL_DRIVER" @@ -1280,10 +1291,6 @@ else add_nova_flag "--connection_type=libvirt" LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} add_nova_flag "--firewall_driver=$LIBVIRT_FIREWALL_DRIVER" - add_nova_flag "--flat_network_bridge=$FLAT_NETWORK_BRIDGE" - if [ -n "$FLAT_INTERFACE" ]; then - add_nova_flag "--flat_interface=$FLAT_INTERFACE" - fi fi # Nova Database From 92e85601f47163fe18c55f63d46cd362654440a2 Mon Sep 17 00:00:00 2001 From: John Garbutt Date: Fri, 2 Mar 2012 16:08:37 +0000 Subject: [PATCH 389/967] Allow people to specifiy the XENAPI_CONNECTION_URL This is useful when testing host aggregates and not wanting to use the host local management network Change-Id: I0d5d13632c74be8d8b8e7de918e7ff28478af9a6 --- stack.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 35a7b3dd..3e59b614 100755 --- a/stack.sh +++ b/stack.sh @@ -1266,7 +1266,8 @@ if [ "$VIRT_DRIVER" = 'xenserver' ]; then FLAT_NETWORK_BRIDGE=$(grep -o 'flat_network_bridge=[^.]*' /proc/cmdline | cut -d= -f 2) read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN." add_nova_flag "--connection_type=xenapi" - add_nova_flag "--xenapi_connection_url=http://169.254.0.1" + XENAPI_CONNECTION_URL=${XENAPI_CONNECTION_URL:-"http://169.254.0.1"} + add_nova_flag "--xenapi_connection_url=$XENAPI_CONNECTION_URL" add_nova_flag "--xenapi_connection_username=root" add_nova_flag "--xenapi_connection_password=$XENAPI_PASSWORD" add_nova_flag "--noflat_injected" From 4807df8e5bc0e5e78536ecea37a0ca7621720524 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 24 Feb 2012 10:44:18 -0600 Subject: [PATCH 390/967] Update openrc to focus on current OS_* environment variables * Support for NOVA_* variables removed * Support for username and tenant on command line added Change-Id: Icd50e8bd06eaeedbc4bfd10a67ad0329d72d5756 --- openrc | 73 ++++++++++++++++++++++++++++++++-------------------------- 1 file changed, 40 insertions(+), 33 deletions(-) diff --git a/openrc b/openrc index 9b3d7ba8..d9e7c92e 100644 --- a/openrc +++ b/openrc @@ -1,8 +1,40 @@ #!/usr/bin/env bash +# +# source openrc [username] [tenantname] +# +# Configure a set of credentials for $TENANT/$USERNAME: +# Set TENANT to override the default tenant 'demo' +# Set USERNAME to override the default user name 'demo' +# Set ADMIN_PASSWORD to set the password for 'admin' and 'demo' + +# NOTE: support for the old NOVA_* novaclient environment variables has +# been removed. + +if [[ -n "$1" ]]; then + USERNAME=$1 +fi +if [[ -n "$2" ]]; then + TENANT=$2 +fi # Load local configuration source ./stackrc +# The introduction of Keystone to the OpenStack ecosystem has standardized the +# term **tenant** as the entity that owns resources. In some places references +# still exist to the original Nova term **project** for this use. Also, +# **tenant_name** is prefered to **tenant_id**. +export OS_TENANT_NAME=${TENANT:-demo} + +# In addition to the owning entity (tenant), nova stores the entity performing +# the action as the **user**. +export OS_USERNAME=${USERNAME:-demo} + +# With Keystone you pass the keystone password instead of an api key. +# Recent versions of novaclient use OS_PASSWORD instead of NOVA_API_KEYs +# or NOVA_PASSWORD. +export OS_PASSWORD=${ADMIN_PASSWORD:-secrete} + # Set api HOST_IP endpoint. SERVICE_HOST may also be used to specify the endpoint, # which is convenient for some localrc configurations. HOST_IP=${HOST_IP:-127.0.0.1} @@ -12,45 +44,20 @@ SERVICE_HOST=${SERVICE_HOST:-$HOST_IP} # should be listening on HOST_IP. If its running elsewhere, it can be set here GLANCE_HOST=${GLANCE_HOST:-$HOST_IP} -# novaclient now supports the new OS_* configuration variables in addition to -# the older NOVA_* variables. Set them both for now... - -# Nova original used project_id as the *account* that owned resources (servers, -# ip address, ...) With the addition of Keystone we have standardized on the -# term **tenant** as the entity that owns the resources. **novaclient** still -# uses the old deprecated terms project_id. Note that this field should now be -# set to tenant_name, not tenant_id. -export NOVA_PROJECT_ID=${TENANT:-demo} -export OS_TENANT_NAME=${NOVA_PROJECT_ID} - -# In addition to the owning entity (tenant), nova stores the entity performing -# the action as the **user**. -export NOVA_USERNAME=${USERNAME:-demo} -export OS_USERNAME=${NOVA_USERNAME} - -# With Keystone you pass the keystone password instead of an api key. -# Recent versions of novaclient use NOVA_PASSWORD instead of NOVA_API_KEY -# The most recent versions of novaclient use OS_PASSWORD in addition to NOVA_PASSWORD -export NOVA_PASSWORD=${ADMIN_PASSWORD:-secrete} -export OS_PASSWORD=${NOVA_PASSWORD} - -# With the addition of Keystone, to use an openstack cloud you should -# authenticate against keystone, which returns a **Token** and **Service -# Catalog**. The catalog contains the endpoint for all services the user/tenant -# has access to - including nova, glance, keystone, swift, ... We currently -# recommend using the 2.0 *auth api*. +# Authenticating against an Openstack cloud using Keystone returns a **Token** +# and **Service Catalog**. The catalog contains the endpoints for all services +# the user/tenant has access to - including nova, glance, keystone, swift, ... +# We currently recommend using the 2.0 *identity api*. # -# *NOTE*: Using the 2.0 *auth api* does not mean that compute api is 2.0. We +# *NOTE*: Using the 2.0 *identity api* does not mean that compute api is 2.0. We # will use the 1.1 *compute api* -export NOVA_URL=${NOVA_URL:-http://$SERVICE_HOST:5000/v2.0} -export OS_AUTH_URL=${NOVA_URL} +export OS_AUTH_URL=http://$SERVICE_HOST:5000/v2.0 # Currently novaclient needs you to specify the *compute api* version. This # needs to match the config of your catalog returned by Keystone. export NOVA_VERSION=${NOVA_VERSION:-1.1} - -# FIXME - why does this need to be specified? -export NOVA_REGION_NAME=${NOVA_REGION_NAME:-RegionOne} +# In the future this will change names: +export COMPUTE_API_VERSION=${COMPUTE_API_VERSION:-$NOVA_VERSION} # Set the ec2 url so euca2ools works export EC2_URL=${EC2_URL:-http://$SERVICE_HOST:8773/services/Cloud} From 489bd2a62b5949665bc7c4a05a52d27a987e0489 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 2 Mar 2012 10:44:29 -0600 Subject: [PATCH 391/967] Improve exercise robustness * Test returns and exit codes on most command invocations * Add start and end banners to make output easier to find in long log files * Adds die_if_error(), die_if_not_set() and is_set() to functions * Add some function tests Fixes bug 944593 Change-Id: I55e2962c5fec9aad237b674732b1e922ad37a62e --- exercises/bundle.sh | 24 ++++++++++++++--- exercises/client-env.sh | 35 ++++++++++--------------- exercises/euca.sh | 28 +++++++++++++++++--- exercises/floating_ips.sh | 42 +++++++++++++++++++++++------- exercises/swift.sh | 34 +++++++++++++++++++----- exercises/volumes.sh | 48 +++++++++++++++++++++------------- functions | 54 +++++++++++++++++++++++++++++++++++++++ tests/functions.sh | 54 +++++++++++++++++++++++++++++++++++++++ 8 files changed, 259 insertions(+), 60 deletions(-) create mode 100755 tests/functions.sh diff --git a/exercises/bundle.sh b/exercises/bundle.sh index d5c78af3..e1c949cf 100755 --- a/exercises/bundle.sh +++ b/exercises/bundle.sh @@ -2,7 +2,10 @@ # we will use the ``euca2ools`` cli tool that wraps the python boto # library to test ec2 compatibility -# + +echo "**************************************************" +echo "Begin DevStack Exercise: $0" +echo "**************************************************" # This script exits on an error so that errors don't compound and you see # only the first error that occured. @@ -16,7 +19,12 @@ set -o xtrace # ======== # Use openrc + stackrc + localrc for settings -pushd $(cd $(dirname "$0")/.. && pwd) +pushd $(cd $(dirname "$0")/.. && pwd) >/dev/null + +# Import common functions +source ./functions + +# Import configuration source ./openrc # Remove old certificates @@ -27,7 +35,7 @@ rm -f pk.pem # Get Certificates nova x509-get-root-cert nova x509-create-cert -popd +popd >/dev/null # Max time to wait for image to be registered REGISTER_TIMEOUT=${REGISTER_TIMEOUT:-15} @@ -36,10 +44,14 @@ BUCKET=testbucket IMAGE=bundle.img truncate -s 5M /tmp/$IMAGE euca-bundle-image -i /tmp/$IMAGE +die_if_error "Failure bundling image $IMAGE" euca-upload-bundle -b $BUCKET -m /tmp/$IMAGE.manifest.xml +die_if_error "Failure uploading bundle $IMAGE to $BUCKET" + AMI=`euca-register $BUCKET/$IMAGE.manifest.xml | cut -f2` +die_if_not_set AMI "Failure registering $BUCKET/$IMAGE" # Wait for the image to become available if ! timeout $REGISTER_TIMEOUT sh -c "while euca-describe-images | grep '$AMI' | grep 'available'; do sleep 1; done"; then @@ -49,3 +61,9 @@ fi # Clean up euca-deregister $AMI +die_if_error "Failure deregistering $AMI" + +set +o xtrace +echo "**************************************************" +echo "End DevStack Exercise: $0" +echo "**************************************************" diff --git a/exercises/client-env.sh b/exercises/client-env.sh index a15a5c04..28c4d95e 100755 --- a/exercises/client-env.sh +++ b/exercises/client-env.sh @@ -2,6 +2,10 @@ # Test OpenStack client enviroment variable handling +echo "**************************************************" +echo "Begin DevStack Exercise: $0" +echo "**************************************************" + # Verify client workage VERIFY=${1:-""} @@ -10,6 +14,11 @@ VERIFY=${1:-""} # Use openrc + stackrc + localrc for settings pushd $(cd $(dirname "$0")/.. && pwd) >/dev/null + +# Import common functions +source ./functions + +# Import configuration source ./openrc popd >/dev/null @@ -23,19 +32,10 @@ unset NOVA_URL unset NOVA_USERNAME unset NOVA_VERSION -# Make sure we have the vars we are expecting -function is_set() { - local var=\$"$1" - eval echo $1=$var - if eval "[ -z $var ]"; then - return 1 - fi - return 0 -} - for i in OS_TENANT_NAME OS_USERNAME OS_PASSWORD OS_AUTH_URL; do is_set $i if [[ $? -ne 0 ]]; then + echo "$i expected to be set" ABORT=1 fi done @@ -52,14 +52,6 @@ if [[ "$ENABLED_SERVICES" =~ "key" ]]; then if [[ "$SKIP_EXERCISES" =~ "key" ]] ; then STATUS_KEYSTONE="Skipped" else - # We need to run the keystone test as admin since there doesn't - # seem to be anything to test the cli vars that runs as a user - # tenant-list should do that, it isn't implemented (yet) - xOS_TENANT_NAME=$OS_TENANT_NAME - xOS_USERNAME=$OS_USERNAME - export OS_USERNAME=admin - export OS_TENANT_NAME=admin - echo -e "\nTest Keystone" if keystone service-list; then STATUS_KEYSTONE="Succeeded" @@ -67,9 +59,6 @@ if [[ "$ENABLED_SERVICES" =~ "key" ]]; then STATUS_KEYSTONE="Failed" RETURN=1 fi - - OS_TENANT_NAME=$xOS_TENANT_NAME - OS_USERNAME=$xOS_USERNAME fi fi @@ -139,4 +128,8 @@ report "Nova" $STATUS_NOVA report "Glance" $STATUS_GLANCE report "Swift" $STATUS_SWIFT +echo "**************************************************" +echo "End DevStack Exercise: $0" +echo "**************************************************" + exit $RETURN diff --git a/exercises/euca.sh b/exercises/euca.sh index 86cd6732..b766bab8 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -2,7 +2,10 @@ # we will use the ``euca2ools`` cli tool that wraps the python boto # library to test ec2 compatibility -# + +echo "**************************************************" +echo "Begin DevStack Exercise: $0" +echo "**************************************************" # This script exits on an error so that errors don't compound and you see # only the first error that occured. @@ -16,9 +19,14 @@ set -o xtrace # ======== # Use openrc + stackrc + localrc for settings -pushd $(cd $(dirname "$0")/.. && pwd) +pushd $(cd $(dirname "$0")/.. && pwd) >/dev/null + +# Import common functions +source ./functions + +# Import configuration source ./openrc -popd +popd >/dev/null # Max time to wait while vm goes from build to active state ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30} @@ -49,6 +57,7 @@ fi # Launch it INSTANCE=`euca-run-instances -g $SECGROUP -t $DEFAULT_INSTANCE_TYPE $IMAGE | grep INSTANCE | cut -f2` +die_if_not_set INSTANCE "Failure launching instance" # Assure it has booted within a reasonable time if ! timeout $RUNNING_TIMEOUT sh -c "while ! euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then @@ -58,12 +67,15 @@ fi # Allocate floating address FLOATING_IP=`euca-allocate-address | cut -f2` +die_if_not_set FLOATING_IP "Failure allocating floating IP" # Associate floating address euca-associate-address -i $INSTANCE $FLOATING_IP +die_if_error "Failure associating address $FLOATING_IP to $INSTANCE" # Authorize pinging euca-authorize -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP +die_if_error "Failure authorizing rule in $SECGROUP" # Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then @@ -73,9 +85,11 @@ fi # Revoke pinging euca-revoke -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP +die_if_error "Failure revoking rule in $SECGROUP" # Release floating address euca-disassociate-address $FLOATING_IP +die_if_error "Failure disassociating address $FLOATING_IP" # Wait just a tick for everything above to complete so release doesn't fail if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep $INSTANCE | grep -q $FLOATING_IP; do sleep 1; done"; then @@ -85,6 +99,7 @@ fi # Release floating address euca-release-address $FLOATING_IP +die_if_error "Failure releasing address $FLOATING_IP" # Wait just a tick for everything above to complete so terminate doesn't fail if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep -q $FLOATING_IP; do sleep 1; done"; then @@ -94,6 +109,7 @@ fi # Terminate instance euca-terminate-instances $INSTANCE +die_if_error "Failure terminating instance $INSTANCE" # Assure it has terminated within a reasonable time if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then @@ -103,3 +119,9 @@ fi # Delete group euca-delete-group $SECGROUP +die_if_error "Failure deleting security group $SECGROUP" + +set +o xtrace +echo "**************************************************" +echo "End DevStack Exercise: $0" +echo "**************************************************" diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index b559965f..a47f1ffc 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -7,6 +7,10 @@ # +echo "**************************************************" +echo "Begin DevStack Exercise: $0" +echo "**************************************************" + # This script exits on an error so that errors don't compound and you see # only the first error that occured. set -o errexit @@ -20,9 +24,14 @@ set -o xtrace # ======== # Use openrc + stackrc + localrc for settings -pushd $(cd $(dirname "$0")/.. && pwd) +pushd $(cd $(dirname "$0")/.. && pwd) >/dev/null + +# Import common functions +source ./functions + +# Import configuration source ./openrc -popd +popd >/dev/null # Max time to wait while vm goes from build to active state ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30} @@ -87,15 +96,16 @@ fi # List of instance types: nova flavor-list -INSTANCE_TYPE=`nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | cut -d"|" -f2` +INSTANCE_TYPE=`nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1` if [[ -z "$INSTANCE_TYPE" ]]; then # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=`nova flavor-list | head -n 4 | tail -n 1 | cut -d"|" -f2` + INSTANCE_TYPE=`nova flavor-list | head -n 4 | tail -n 1 | get_field 1` fi -NAME="myserver" +NAME="ex-float" -VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE $NAME --security_groups=$SECGROUP | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'` +VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE $NAME --security_groups=$SECGROUP | grep ' id ' | get_field 2` +die_if_not_set VM_UUID "Failure launching $NAME" # Testing # ======= @@ -114,7 +124,8 @@ if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | g fi # get the IP of the server -IP=`nova show $VM_UUID | grep "private network" | cut -d"|" -f3` +IP=`nova show $VM_UUID | grep "private network" | get_field 2` +die_if_not_set IP "Failure retrieving IP address" # for single node deployments, we can ping private ips MULTI_HOST=${MULTI_HOST:-0} @@ -147,7 +158,8 @@ fi nova secgroup-list-rules $SECGROUP # allocate a floating ip from default pool -FLOATING_IP=`nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | cut -d '|' -f2` +FLOATING_IP=`nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | get_field 1` +die_if_not_set FLOATING_IP "Failure creating floating IP" # list floating addresses if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep -q $FLOATING_IP; do sleep 1; done"; then @@ -157,6 +169,7 @@ fi # add floating ip to our server nova add-floating-ip $VM_UUID $FLOATING_IP +die_if_error "Failure adding floating IP $FLOATING_IP to $NAME" # test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then @@ -165,7 +178,8 @@ if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sle fi # Allocate an IP from second floating pool -TEST_FLOATING_IP=`nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | cut -d '|' -f2` +TEST_FLOATING_IP=`nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | get_field 1` +die_if_not_set TEST_FLOATING_IP "Failure creating floating IP in $TEST_FLOATING_POOL" # list floating addresses if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep $TEST_FLOATING_POOL | grep -q $TEST_FLOATING_IP; do sleep 1; done"; then @@ -175,6 +189,7 @@ fi # dis-allow icmp traffic (ping) nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 +die_if_error "Failure deleting security group rule from $SECGROUP" # FIXME (anthony): make xs support security groups if [ "$VIRT_DRIVER" != "xenserver" ]; then @@ -188,12 +203,15 @@ fi # de-allocate the floating ip nova floating-ip-delete $FLOATING_IP +die_if_error "Failure deleting floating IP $FLOATING_IP" # Delete second floating IP nova floating-ip-delete $TEST_FLOATING_IP +die_if_error "Failure deleting floating IP $TEST_FLOATING_IP" # shutdown the server nova delete $VM_UUID +die_if_error "Failure deleting instance $NAME" # make sure the VM shuts down within a reasonable time if ! timeout $TERMINATE_TIMEOUT sh -c "while nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then @@ -203,3 +221,9 @@ fi # Delete a secgroup nova secgroup-delete $SECGROUP +die_if_error "Failure deleting security group $SECGROUP" + +set +o xtrace +echo "**************************************************" +echo "End DevStack Exercise: $0" +echo "**************************************************" diff --git a/exercises/swift.sh b/exercises/swift.sh index 95443df3..76096379 100755 --- a/exercises/swift.sh +++ b/exercises/swift.sh @@ -2,6 +2,10 @@ # Test swift via the command line tools that ship with it. +echo "**************************************************" +echo "Begin DevStack Exercise: $0" +echo "**************************************************" + # This script exits on an error so that errors don't compound and you see # only the first error that occured. set -o errexit @@ -15,9 +19,17 @@ set -o xtrace # ======== # Use openrc + stackrc + localrc for settings -pushd $(cd $(dirname "$0")/.. && pwd) +pushd $(cd $(dirname "$0")/.. && pwd) >/dev/null + +# Import common functions +source ./functions + +# Import configuration source ./openrc -popd +popd >/dev/null + +# Container name +CONTAINER=ex-swift # Testing Swift @@ -25,16 +37,26 @@ popd # Check if we have to swift via keystone swift stat +die_if_error "Failure geting status" # We start by creating a test container -swift post testcontainer +swift post $CONTAINER +die_if_error "Failure creating container $CONTAINER" # add some files into it. -swift upload testcontainer /etc/issue +swift upload $CONTAINER /etc/issue +die_if_error "Failure uploading file to container $CONTAINER" # list them -swift list testcontainer +swift list $CONTAINER +die_if_error "Failure listing contents of container $CONTAINER" # And we may want to delete them now that we have tested that # everything works. -swift delete testcontainer +swift delete $CONTAINER +die_if_error "Failure deleting container $CONTAINER" + +set +o xtrace +echo "**************************************************" +echo "End DevStack Exercise: $0" +echo "**************************************************" diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 622fb185..a812401a 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -2,6 +2,10 @@ # Test nova volumes with the nova command from python-novaclient +echo "**************************************************" +echo "Begin DevStack Exercise: $0" +echo "**************************************************" + # This script exits on an error so that errors don't compound and you see # only the first error that occured. set -o errexit @@ -15,9 +19,14 @@ set -o xtrace # ======== # Use openrc + stackrc + localrc for settings -pushd $(cd $(dirname "$0")/.. && pwd) +pushd $(cd $(dirname "$0")/.. && pwd) >/dev/null + +# Import common functions +source ./functions + +# Import configuration source ./openrc -popd +popd >/dev/null # Max time to wait while vm goes from build to active state ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30} @@ -55,21 +64,6 @@ IMAGE=`glance -f index | egrep $DEFAULT_IMAGE_NAME | head -1 | cut -d" " -f1` # determinine instance type # ------------------------- -# Helper function to grab a numbered field from python novaclient cli result -# Fields are numbered starting with 1 -# Reverse syntax is supported: -1 is the last field, -2 is second to last, etc. -function get_field () { - while read data - do - if [ "$1" -lt 0 ]; then - field="(\$(NF$1))" - else - field="\$$(($1 + 1))" - fi - echo "$data" | awk -F'[ \t]*\\|[ \t]*' "{print $field}" - done -} - # List of instance types: nova flavor-list @@ -79,9 +73,11 @@ if [[ -z "$INSTANCE_TYPE" ]]; then INSTANCE_TYPE=`nova flavor-list | head -n 4 | tail -n 1 | get_field 1` fi -NAME="myserver" +NAME="ex-vol" VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE $NAME --security_groups=$SECGROUP | grep ' id ' | get_field 2` +die_if_not_set VM_UUID "Failure launching $NAME" + # Testing # ======= @@ -101,6 +97,7 @@ fi # get the IP of the server IP=`nova show $VM_UUID | grep "private network" | get_field 2` +die_if_not_set IP "Failure retrieving IP address" # for single node deployments, we can ping private ips MULTI_HOST=${MULTI_HOST:-0} @@ -130,6 +127,10 @@ fi # Create a new volume nova volume-create --display_name $VOL_NAME --display_description "test volume: $VOL_NAME" 1 +if [[ $? != 0 ]]; then + echo "Failure creating volume $VOL_NAME" + exit 1 +fi if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep available; do sleep 1; done"; then echo "Volume $VOL_NAME not created" exit 1 @@ -137,16 +138,19 @@ fi # Get volume ID VOL_ID=`nova volume-list | grep $VOL_NAME | head -1 | get_field 1` +die_if_not_set VOL_ID "Failure retrieving volume ID for $VOL_NAME" # Attach to server DEVICE=/dev/vdb nova volume-attach $VM_UUID $VOL_ID $DEVICE +die_if_error "Failure attaching volume $VOL_NAME to $NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep in-use; do sleep 1; done"; then echo "Volume $VOL_NAME not attached to $NAME" exit 1 fi VOL_ATTACH=`nova volume-list | grep $VOL_NAME | head -1 | get_field -1` +die_if_not_set VOL_ATTACH "Failure retrieving $VOL_NAME status" if [[ "$VOL_ATTACH" != $VM_UUID ]]; then echo "Volume not attached to correct instance" exit 1 @@ -154,6 +158,7 @@ fi # Detach volume nova volume-detach $VM_UUID $VOL_ID +die_if_error "Failure detaching volume $VOL_NAME from $NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep available; do sleep 1; done"; then echo "Volume $VOL_NAME not detached from $NAME" exit 1 @@ -161,6 +166,7 @@ fi # Delete volume nova volume-delete $VOL_ID +die_if_error "Failure deleting volume $VOL_NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME; do sleep 1; done"; then echo "Volume $VOL_NAME not deleted" exit 1 @@ -168,3 +174,9 @@ fi # shutdown the server nova delete $NAME +die_if_error "Failure deleting instance $NAME" + +set +o xtrace +echo "**************************************************" +echo "End DevStack Exercise: $0" +echo "**************************************************" diff --git a/functions b/functions index 01c4758f..adcf5bda 100644 --- a/functions +++ b/functions @@ -22,6 +22,48 @@ function cp_it { } +# Checks the exit code of the last command and prints "message" +# if it is non-zero and exits +# die_if_error "message" +function die_if_error() { + local exitcode=$? + if [ $exitcode != 0 ]; then + echo $@ + exit $exitcode + fi +} + + +# Checks an environment variable is not set or has length 0 OR if the +# exit code is non-zero and prints "message" and exits +# NOTE: env-var is the variable name without a '$' +# die_if_not_set env-var "message" +function die_if_not_set() { + local exitcode=$? + local evar=$1; shift + if ! is_set $evar || [ $exitcode != 0 ]; then + echo $@ + exit 99 + fi +} + + +# Grab a numbered field from python prettytable output +# Fields are numbered starting with 1 +# Reverse syntax is supported: -1 is the last field, -2 is second to last, etc. +# get_field field-number +function get_field() { + while read data; do + if [ "$1" -lt 0 ]; then + field="(\$(NF$1))" + else + field="\$$(($1 + 1))" + fi + echo "$data" | awk -F'[ \t]*\\|[ \t]*' "{print $field}" + done +} + + # git clone only if directory doesn't exist already. Since ``DEST`` might not # be owned by the installation user, we create the directory and change the # ownership to the proper user. @@ -67,6 +109,18 @@ function git_clone { } + +# Test if the named environment variable is set and not zero length +# is_set env-var +function is_set() { + local var=\$"$1" + if eval "[ -z $var ]"; then + return 1 + fi + return 0 +} + + # pip install wrapper to set cache and proxy environment variables # pip_install package [package ...] function pip_install { diff --git a/tests/functions.sh b/tests/functions.sh new file mode 100755 index 00000000..0fd76cca --- /dev/null +++ b/tests/functions.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash + +# Tests for DevStack functions + +TOP=$(cd $(dirname "$0")/.. && pwd) + +# Import common functions +source $TOP/functions + +# Import configuration +source $TOP/openrc + + +echo "Testing die_if_error()" + +bash -c "source $TOP/functions; true; die_if_error 'not OK'" +if [[ $? != 0 ]]; then + echo "die_if_error [true] Failed" +fi + +bash -c "source $TOP/functions; false; die_if_error 'OK'" +if [[ $? = 0 ]]; then + echo "die_if_error [false] Failed" +else + echo 'OK' +fi + + +echo "Testing die_if_not_set()" + +bash -c "source $TOP/functions; X=`echo Y && true`; die_if_not_set X 'not OK'" +if [[ $? != 0 ]]; then + echo "die_if_not_set [X='Y' true] Failed" +else + echo 'OK' +fi + +bash -c "source $TOP/functions; X=`true`; die_if_not_set X 'OK'" +if [[ $? = 0 ]]; then + echo "die_if_not_set [X='' true] Failed" +fi + +bash -c "source $TOP/functions; X=`echo Y && false`; die_if_not_set X 'not OK'" +if [[ $? != 0 ]]; then + echo "die_if_not_set [X='Y' false] Failed" +else + echo 'OK' +fi + +bash -c "source $TOP/functions; X=`false`; die_if_not_set X 'OK'" +if [[ $? = 0 ]]; then + echo "die_if_not_set [X='' false] Failed" +fi + From 7052f7299065650dccb69dd12b6e5414aac32005 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sat, 3 Mar 2012 00:51:46 -0800 Subject: [PATCH 392/967] use --silent-upload for glance - bug 945454 Change-Id: Ib96b38223e21644e3bfbbb3836eb24cbd5d33b49 --- stack.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/stack.sh b/stack.sh index 35a7b3dd..e4bbd0e1 100755 --- a/stack.sh +++ b/stack.sh @@ -1573,9 +1573,9 @@ if is_service_enabled g-reg; then fi tar -zxf $FILES/tty.tgz -C $FILES/images - RVAL=`glance add -A $TOKEN name="tty-kernel" is_public=true container_format=aki disk_format=aki < $FILES/images/aki-tty/image` + RVAL=`glance add --silent-upload -A $TOKEN name="tty-kernel" is_public=true container_format=aki disk_format=aki < $FILES/images/aki-tty/image` KERNEL_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "` - RVAL=`glance add -A $TOKEN name="tty-ramdisk" is_public=true container_format=ari disk_format=ari < $FILES/images/ari-tty/image` + RVAL=`glance add --silent-upload -A $TOKEN name="tty-ramdisk" is_public=true container_format=ari disk_format=ari < $FILES/images/ari-tty/image` RAMDISK_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "` glance add -A $TOKEN name="tty" is_public=true container_format=ami disk_format=ami kernel_id=$KERNEL_ID ramdisk_id=$RAMDISK_ID < $FILES/images/ami-tty/image fi @@ -1624,11 +1624,11 @@ if is_service_enabled g-reg; then # kernel for use when uploading the root filesystem. KERNEL_ID=""; RAMDISK_ID=""; if [ -n "$KERNEL" ]; then - RVAL=`glance add -A $TOKEN name="$IMAGE_NAME-kernel" is_public=true container_format=aki disk_format=aki < "$KERNEL"` + RVAL=`glance add --silent-upload -A $TOKEN name="$IMAGE_NAME-kernel" is_public=true container_format=aki disk_format=aki < "$KERNEL"` KERNEL_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "` fi if [ -n "$RAMDISK" ]; then - RVAL=`glance add -A $TOKEN name="$IMAGE_NAME-ramdisk" is_public=true container_format=ari disk_format=ari < "$RAMDISK"` + RVAL=`glance add --silent-upload -A $TOKEN name="$IMAGE_NAME-ramdisk" is_public=true container_format=ari disk_format=ari < "$RAMDISK"` RAMDISK_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "` fi glance add -A $TOKEN name="${IMAGE_NAME%.img}" is_public=true container_format=ami disk_format=ami ${KERNEL_ID:+kernel_id=$KERNEL_ID} ${RAMDISK_ID:+ramdisk_id=$RAMDISK_ID} < <(zcat --force "${IMAGE}") From 9f1891e668fa23c9097197f2a3c6e3047cae5d51 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Sun, 4 Mar 2012 23:40:05 -0800 Subject: [PATCH 393/967] fix kvm configuration for jenkins Change-Id: I46cd63f2ff1eaf24872486f1b0bcb16f2e6f1dd6 --- tools/jenkins/configurations/kvm.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/jenkins/configurations/kvm.sh b/tools/jenkins/configurations/kvm.sh index 727b42a4..d9a160ad 100755 --- a/tools/jenkins/configurations/kvm.sh +++ b/tools/jenkins/configurations/kvm.sh @@ -48,6 +48,7 @@ GUEST_RAM=12574720 MYSQL_PASSWORD=chicken RABBIT_PASSWORD=chicken SERVICE_TOKEN=chicken +SERVICE_PASSWORD=chicken ADMIN_PASSWORD=chicken USERNAME=admin TENANT=admin From 0a8b3222a8858ad7d4a2e092f83997f059b850c6 Mon Sep 17 00:00:00 2001 From: Eoghan Glynn Date: Mon, 5 Mar 2012 13:15:56 +0000 Subject: [PATCH 394/967] Avoid duplicate "/tokens" path in auth_url. The "/tokens" path is explicitly appended to the OS_AUTH_URL by the configure_tempest.sh tool, but this is also appended internally by glance: https://github.com/openstack/glance/blob/master/glance/common/auth.py#L111 leading to a duplicated "/tokens/tokens" path. Change-Id: I2ea49289f7e1976346eff220dc3d1664bdad4fb0 --- tools/configure_tempest.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index f6ef0d3b..9b25b7e8 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -133,7 +133,7 @@ fi sed -e " /^api_key=/s|=.*\$|=$ADMIN_PASSWORD|; - /^auth_url=/s|=.*\$|=${OS_AUTH_URL%/}/tokens/|; + /^auth_url=/s|=.*\$|=${OS_AUTH_URL%/}/|; /^host=/s|=.*\$|=$HOST_IP|; /^image_ref=/s|=.*\$|=$IMAGE_UUID|; /^password=/s|=.*\$|=$ADMIN_PASSWORD|; From 3e20503ef05b3d1d482d050dbf4f318bcd79fbb6 Mon Sep 17 00:00:00 2001 From: Eoghan Glynn Date: Mon, 5 Mar 2012 14:30:02 +0000 Subject: [PATCH 395/967] Set nova config allow_resize_on_same_host Include --allow_resize_on_same_host in nova.conf to allow the tempest resize tests to proceed against devstack. Change-Id: Ifd030ed591d63146c21e640240398a81cee194cd --- stack.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/stack.sh b/stack.sh index 6bbf3025..34936d42 100755 --- a/stack.sh +++ b/stack.sh @@ -1189,6 +1189,7 @@ rm -f $NOVA_DIR/bin/nova.conf rm -f $NOVA_CONF/nova.conf add_nova_flag "--verbose" add_nova_flag "--allow_admin_api" +add_nova_flag "--allow_resize_to_same_host" add_nova_flag "--scheduler_driver=$SCHEDULER" add_nova_flag "--dhcpbridge_flagfile=$NOVA_CONF/nova.conf" add_nova_flag "--fixed_range=$FIXED_RANGE" From b7d1fbbe20ce8ef60607d937c22293dfff90e964 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 2 Mar 2012 08:43:09 -0600 Subject: [PATCH 396/967] Cleanup keystone_data.sh * Remove the compatibility code for older keystone client * Reformat commands similar to keystone sample_data.sh * Improve documentation Change-Id: I2fc544555a1b936d28f11c3c4eaaf885b2cb6d17 --- files/keystone_data.sh | 250 +++++++++++++++++++---------------------- 1 file changed, 117 insertions(+), 133 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index e2928111..958d2af4 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -1,165 +1,149 @@ #!/bin/bash -# Tenants +# +# Initial data for Keystone using python-keystoneclient +# +# A set of EC2-compatible credentials is created for both admin and demo +# users and placed in $DEVSTACK_DIR/ec2rc. +# +# Tenant User Roles +# ------------------------------------------------------- +# admin admin admin +# service glance admin +# service nova admin +# service quantum admin # if enabled +# service swift admin # if enabled +# demo admin admin +# demo demo Member,sysadmin,netadmin +# invisible_to_admin demo Member +# +# Variables set before calling this script: +# SERVICE_TOKEN - aka admin_token in keystone.conf +# SERVICE_ENDPOINT - local Keystone admin endpoint +# SERVICE_TENANT_NAME - name of tenant containing service accounts +# ENABLED_SERVICES - stack.sh's list of services to start +# DEVSTACK_DIR - Top-level DevStack directory + +ADMIN_PASSWORD=${ADMIN_PASSWORD:-secrete} +SERVICE_PASSWORD=${SERVICE_PASSWORD:-$ADMIN_PASSWORD} export SERVICE_TOKEN=$SERVICE_TOKEN export SERVICE_ENDPOINT=$SERVICE_ENDPOINT +SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service} function get_id () { - echo `$@ | grep ' id ' | awk '{print $4}'` + echo `$@ | awk '/ id / { print $4 }'` } -# Detect if the keystone cli binary has the command names changed -# in https://review.openstack.org/4375 -# FIXME(dtroyer): Remove the keystone client command checking -# after a suitable transition period. add-user-role -# and ec2-create-credentials were renamed -if keystone help | grep -q user-role-add; then - KEYSTONE_COMMAND_4375=1 -fi - -ADMIN_TENANT=`get_id keystone tenant-create --name=admin` -SERVICE_TENANT=`get_id keystone tenant-create --name=$SERVICE_TENANT_NAME` -DEMO_TENANT=`get_id keystone tenant-create --name=demo` -INVIS_TENANT=`get_id keystone tenant-create --name=invisible_to_admin` +# Tenants +ADMIN_TENANT=$(get_id keystone tenant-create --name=admin) +SERVICE_TENANT=$(get_id keystone tenant-create --name=$SERVICE_TENANT_NAME) +DEMO_TENANT=$(get_id keystone tenant-create --name=demo) +INVIS_TENANT=$(get_id keystone tenant-create --name=invisible_to_admin) # Users -ADMIN_USER=`get_id keystone user-create \ - --name=admin \ - --pass="$ADMIN_PASSWORD" \ - --email=admin@example.com` -DEMO_USER=`get_id keystone user-create \ - --name=demo \ - --pass="$ADMIN_PASSWORD" \ - --email=admin@example.com` +ADMIN_USER=$(get_id keystone user-create --name=admin \ + --pass="$ADMIN_PASSWORD" \ + --email=admin@example.com) +DEMO_USER=$(get_id keystone user-create --name=demo \ + --pass="$ADMIN_PASSWORD" \ + --email=demo@example.com) # Roles -ADMIN_ROLE=`get_id keystone role-create --name=admin` -MEMBER_ROLE=`get_id keystone role-create --name=Member` -KEYSTONEADMIN_ROLE=`get_id keystone role-create --name=KeystoneAdmin` -KEYSTONESERVICE_ROLE=`get_id keystone role-create --name=KeystoneServiceAdmin` -SYSADMIN_ROLE=`get_id keystone role-create --name=sysadmin` -NETADMIN_ROLE=`get_id keystone role-create --name=netadmin` - - -if [[ -n "$KEYSTONE_COMMAND_4375" ]]; then - # Add Roles to Users in Tenants - keystone user-role-add --user $ADMIN_USER --role $ADMIN_ROLE --tenant_id $ADMIN_TENANT - keystone user-role-add --user $DEMO_USER --role $MEMBER_ROLE --tenant_id $DEMO_TENANT - keystone user-role-add --user $DEMO_USER --role $SYSADMIN_ROLE --tenant_id $DEMO_TENANT - keystone user-role-add --user $DEMO_USER --role $NETADMIN_ROLE --tenant_id $DEMO_TENANT - keystone user-role-add --user $DEMO_USER --role $MEMBER_ROLE --tenant_id $INVIS_TENANT - keystone user-role-add --user $ADMIN_USER --role $ADMIN_ROLE --tenant_id $DEMO_TENANT - - # TODO(termie): these two might be dubious - keystone user-role-add --user $ADMIN_USER --role $KEYSTONEADMIN_ROLE --tenant_id $ADMIN_TENANT - keystone user-role-add --user $ADMIN_USER --role $KEYSTONESERVICE_ROLE --tenant_id $ADMIN_TENANT -else - ### compat - # Add Roles to Users in Tenants - keystone add-user-role $ADMIN_USER $ADMIN_ROLE $ADMIN_TENANT - keystone add-user-role $DEMO_USER $MEMBER_ROLE $DEMO_TENANT - keystone add-user-role $DEMO_USER $SYSADMIN_ROLE $DEMO_TENANT - keystone add-user-role $DEMO_USER $NETADMIN_ROLE $DEMO_TENANT - keystone add-user-role $DEMO_USER $MEMBER_ROLE $INVIS_TENANT - keystone add-user-role $ADMIN_USER $ADMIN_ROLE $DEMO_TENANT - - # TODO(termie): these two might be dubious - keystone add-user-role $ADMIN_USER $KEYSTONEADMIN_ROLE $ADMIN_TENANT - keystone add-user-role $ADMIN_USER $KEYSTONESERVICE_ROLE $ADMIN_TENANT - ### -fi +ADMIN_ROLE=$(get_id keystone role-create --name=admin) +KEYSTONEADMIN_ROLE=$(get_id keystone role-create --name=KeystoneAdmin) +KEYSTONESERVICE_ROLE=$(get_id keystone role-create --name=KeystoneServiceAdmin) +SYSADMIN_ROLE=$(get_id keystone role-create --name=sysadmin) +NETADMIN_ROLE=$(get_id keystone role-create --name=netadmin) + + +# Add Roles to Users in Tenants +keystone user-role-add --user $ADMIN_USER --role $ADMIN_ROLE --tenant_id $ADMIN_TENANT +keystone user-role-add --user $ADMIN_USER --role $ADMIN_ROLE --tenant_id $DEMO_TENANT +keystone user-role-add --user $DEMO_USER --role $SYSADMIN_ROLE --tenant_id $DEMO_TENANT +keystone user-role-add --user $DEMO_USER --role $NETADMIN_ROLE --tenant_id $DEMO_TENANT + +# TODO(termie): these two might be dubious +keystone user-role-add --user $ADMIN_USER --role $KEYSTONEADMIN_ROLE --tenant_id $ADMIN_TENANT +keystone user-role-add --user $ADMIN_USER --role $KEYSTONESERVICE_ROLE --tenant_id $ADMIN_TENANT + + +# The Member role is used by Horizon and Swift so we need to keep it: +MEMBER_ROLE=$(get_id keystone role-create --name=Member) +keystone user-role-add --user $DEMO_USER --role $MEMBER_ROLE --tenant_id $DEMO_TENANT +keystone user-role-add --user $DEMO_USER --role $MEMBER_ROLE --tenant_id $INVIS_TENANT + # Services -keystone service-create \ - --name=nova \ - --type=compute \ - --description="Nova Compute Service" -NOVA_USER=`get_id keystone user-create \ - --name=nova \ - --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=nova@example.com` +keystone service-create --name=keystone \ + --type=identity \ + --description="Keystone Identity Service" + +keystone service-create --name=nova \ + --type=compute \ + --description="Nova Compute Service" +NOVA_USER=$(get_id keystone user-create --name=nova \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=nova@example.com) keystone user-role-add --tenant_id $SERVICE_TENANT \ - --user $NOVA_USER \ - --role $ADMIN_ROLE - -keystone service-create \ - --name=ec2 \ - --type=ec2 \ - --description="EC2 Compatibility Layer" - -keystone service-create \ - --name=glance \ - --type=image \ - --description="Glance Image Service" -GLANCE_USER=`get_id keystone user-create \ - --name=glance \ - --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=glance@example.com` + --user $NOVA_USER \ + --role $ADMIN_ROLE + +keystone service-create --name=ec2 \ + --type=ec2 \ + --description="EC2 Compatibility Layer" + +keystone service-create --name=glance \ + --type=image \ + --description="Glance Image Service" +GLANCE_USER=$(get_id keystone user-create --name=glance \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=glance@example.com) keystone user-role-add --tenant_id $SERVICE_TENANT \ - --user $GLANCE_USER \ - --role $ADMIN_ROLE - -keystone service-create \ - --name=keystone \ - --type=identity \ - --description="Keystone Identity Service" + --user $GLANCE_USER \ + --role $ADMIN_ROLE if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then - keystone service-create \ - --name="nova-volume" \ - --type=volume \ - --description="Nova Volume Service" + keystone service-create --name="nova-volume" \ + --type=volume \ + --description="Nova Volume Service" fi if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then - keystone service-create \ - --name=swift \ - --type="object-store" \ - --description="Swift Service" - SWIFT_USER=`get_id keystone user-create \ - --name=swift \ - --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=swift@example.com` + keystone service-create --name=swift \ + --type="object-store" \ + --description="Swift Service" + SWIFT_USER=$(get_id keystone user-create --name=swift \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=swift@example.com) keystone user-role-add --tenant_id $SERVICE_TENANT \ - --user $SWIFT_USER \ - --role $ADMIN_ROLE + --user $SWIFT_USER \ + --role $ADMIN_ROLE fi + if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then - keystone service-create \ - --name=quantum \ - --type=network \ - --description="Quantum Service" - QUANTUM_USER=`get_id keystone user-create \ - --name=quantum \ - --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=quantum@example.com` + keystone service-create --name=quantum \ + --type=network \ + --description="Quantum Service" + QUANTUM_USER=$(get_id keystone user-create --name=quantum \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=quantum@example.com) keystone user-role-add --tenant_id $SERVICE_TENANT \ - --user $QUANTUM_USER \ - --role $ADMIN_ROLE + --user $QUANTUM_USER \ + --role $ADMIN_ROLE fi # create ec2 creds and parse the secret and access key returned -if [[ -n "$KEYSTONE_COMMAND_4375" ]]; then - RESULT=`keystone ec2-credentials-create --tenant_id=$ADMIN_TENANT --user=$ADMIN_USER` -else - RESULT=`keystone ec2-create-credentials --tenant_id=$ADMIN_TENANT --user_id=$ADMIN_USER` -fi - echo `$@ | grep id | awk '{print $4}'` -ADMIN_ACCESS=`echo "$RESULT" | grep access | awk '{print $4}'` -ADMIN_SECRET=`echo "$RESULT" | grep secret | awk '{print $4}'` +RESULT=$(keystone ec2-credentials-create --tenant_id=$ADMIN_TENANT --user=$ADMIN_USER) +ADMIN_ACCESS=$(echo "$RESULT" | awk '/ access / { print $4 }') +ADMIN_SECRET=$(echo "$RESULT" | awk '/ secret / { print $4 }') - -if [[ -n "$KEYSTONE_COMMAND_4375" ]]; then - RESULT=`keystone ec2-credentials-create --tenant_id=$DEMO_TENANT --user=$DEMO_USER` -else - RESULT=`keystone ec2-create-credentials --tenant_id=$DEMO_TENANT --user_id=$DEMO_USER` -fi -DEMO_ACCESS=`echo "$RESULT" | grep access | awk '{print $4}'` -DEMO_SECRET=`echo "$RESULT" | grep secret | awk '{print $4}'` +RESULT=$(keystone ec2-credentials-create --tenant_id=$DEMO_TENANT --user=$DEMO_USER) +DEMO_ACCESS=$(echo "$RESULT" | awk '/ access / { print $4 }') +DEMO_SECRET=$(echo "$RESULT" | awk '/ secret / { print $4 }') # write the secret and access to ec2rc cat > $DEVSTACK_DIR/ec2rc < Date: Fri, 2 Mar 2012 16:36:16 -0600 Subject: [PATCH 397/967] Create nova.conf in INI format (rather than flagfile) * Rename EXTRA_FLAGS to EXTRA_OPTS * Is dependant on https://review.openstack.org/4946 (MERGED) Fixes bug 938924 Change-Id: Idee4f86e7846cff3529e173526d3ccb2289472f0 --- stack.sh | 152 ++++++++++++++++++++++++++++++------------------------- 1 file changed, 82 insertions(+), 70 deletions(-) diff --git a/stack.sh b/stack.sh index 34936d42..6bf83a6d 100755 --- a/stack.sh +++ b/stack.sh @@ -829,11 +829,11 @@ fi # ---- # Put config files in /etc/nova for everyone to find -NOVA_CONF=/etc/nova -if [[ ! -d $NOVA_CONF ]]; then - sudo mkdir -p $NOVA_CONF +NOVA_CONF_DIR=/etc/nova +if [[ ! -d $NOVA_CONF_DIR ]]; then + sudo mkdir -p $NOVA_CONF_DIR fi -sudo chown `whoami` $NOVA_CONF +sudo chown `whoami` $NOVA_CONF_DIR if is_service_enabled n-api; then # We are going to use a sample http middleware configuration based on the @@ -844,7 +844,7 @@ if is_service_enabled n-api; then rm -f $NOVA_DIR/bin/nova-api-paste.ini # First we add a some extra data to the default paste config from nova - cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_CONF + cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_CONF_DIR # Then we add our own service token to the configuration sed -e " @@ -854,11 +854,11 @@ if is_service_enabled n-api; then /admin_password/s/^.*$/admin_password = $SERVICE_PASSWORD/; s,%SERVICE_TENANT_NAME%,$SERVICE_TENANT_NAME,g; s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g; - " -i $NOVA_CONF/api-paste.ini + " -i $NOVA_CONF_DIR/api-paste.ini # Finally, we change the pipelines in nova to use keystone function replace_pipeline() { - sed "/\[pipeline:$1\]/,/\[/s/^pipeline = .*/pipeline = $2/" -i $NOVA_CONF/api-paste.ini + sed "/\[pipeline:$1\]/,/\[/s/^pipeline = .*/pipeline = $2/" -i $NOVA_CONF_DIR/api-paste.ini } replace_pipeline "ec2cloud" "ec2faultwrap logrequest totoken authtoken keystonecontext cloudrequest authorizer validator ec2executor" replace_pipeline "ec2admin" "ec2faultwrap logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor" @@ -1178,68 +1178,69 @@ if is_service_enabled n-vol; then sudo start tgt fi -function add_nova_flag { - echo "$1" >> $NOVA_CONF/nova.conf +NOVA_CONF=nova.conf +function add_nova_opt { + echo "$1" >> $NOVA_CONF_DIR/$NOVA_CONF } # remove legacy nova.conf rm -f $NOVA_DIR/bin/nova.conf # (re)create nova.conf -rm -f $NOVA_CONF/nova.conf -add_nova_flag "--verbose" -add_nova_flag "--allow_admin_api" -add_nova_flag "--allow_resize_to_same_host" -add_nova_flag "--scheduler_driver=$SCHEDULER" -add_nova_flag "--dhcpbridge_flagfile=$NOVA_CONF/nova.conf" -add_nova_flag "--fixed_range=$FIXED_RANGE" +rm -f $NOVA_CONF_DIR/$NOVA_CONF +add_nova_opt "[DEFAULT]" +add_nova_opt "verbose=True" +add_nova_opt "allow_resize_to_same_host=True" +add_nova_opt "scheduler_driver=$SCHEDULER" +add_nova_opt "dhcpbridge_flagfile=$NOVA_CONF_DIR/$NOVA_CONF" +add_nova_opt "fixed_range=$FIXED_RANGE" if is_service_enabled n-obj; then - add_nova_flag "--s3_host=$SERVICE_HOST" + add_nova_opt "s3_host=$SERVICE_HOST" fi if is_service_enabled quantum; then - add_nova_flag "--network_manager=nova.network.quantum.manager.QuantumManager" - add_nova_flag "--quantum_connection_host=$Q_HOST" - add_nova_flag "--quantum_connection_port=$Q_PORT" + add_nova_opt "network_manager=nova.network.quantum.manager.QuantumManager" + add_nova_opt "quantum_connection_host=$Q_HOST" + add_nova_opt "quantum_connection_port=$Q_PORT" if is_service_enabled melange; then - add_nova_flag "--quantum_ipam_lib=nova.network.quantum.melange_ipam_lib" - add_nova_flag "--use_melange_mac_generation" - add_nova_flag "--melange_host=$M_HOST" - add_nova_flag "--melange_port=$M_PORT" + add_nova_opt "quantum_ipam_lib=nova.network.quantum.melange_ipam_lib" + add_nova_opt "use_melange_mac_generation=True" + add_nova_opt "melange_host=$M_HOST" + add_nova_opt "melange_port=$M_PORT" fi if is_service_enabled q-svc && [[ "$Q_PLUGIN" = "openvswitch" ]]; then - add_nova_flag "--libvirt_vif_type=ethernet" - add_nova_flag "--libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtOpenVswitchDriver" - add_nova_flag "--linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver" - add_nova_flag "--quantum_use_dhcp" + add_nova_opt "libvirt_vif_type=ethernet" + add_nova_opt "libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtOpenVswitchDriver" + add_nova_opt "linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver" + add_nova_opt "quantum_use_dhcp=True" fi else - add_nova_flag "--network_manager=nova.network.manager.$NET_MAN" + add_nova_opt "network_manager=nova.network.manager.$NET_MAN" fi if is_service_enabled n-vol; then - add_nova_flag "--volume_group=$VOLUME_GROUP" - add_nova_flag "--volume_name_template=${VOLUME_NAME_PREFIX}%08x" + add_nova_opt "volume_group=$VOLUME_GROUP" + add_nova_opt "volume_name_template=${VOLUME_NAME_PREFIX}%08x" # oneiric no longer supports ietadm - add_nova_flag "--iscsi_helper=tgtadm" + add_nova_opt "iscsi_helper=tgtadm" fi -add_nova_flag "--osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions" -add_nova_flag "--my_ip=$HOST_IP" -add_nova_flag "--public_interface=$PUBLIC_INTERFACE" -add_nova_flag "--vlan_interface=$VLAN_INTERFACE" -add_nova_flag "--flat_network_bridge=$FLAT_NETWORK_BRIDGE" +add_nova_opt "osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions" +add_nova_opt "my_ip=$HOST_IP" +add_nova_opt "public_interface=$PUBLIC_INTERFACE" +add_nova_opt "vlan_interface=$VLAN_INTERFACE" +add_nova_opt "flat_network_bridge=$FLAT_NETWORK_BRIDGE" if [ -n "$FLAT_INTERFACE" ]; then - add_nova_flag "--flat_interface=$FLAT_INTERFACE" + add_nova_opt "flat_interface=$FLAT_INTERFACE" fi -add_nova_flag "--sql_connection=$BASE_SQL_CONN/nova" -add_nova_flag "--libvirt_type=$LIBVIRT_TYPE" -add_nova_flag "--instance_name_template=${INSTANCE_NAME_PREFIX}%08x" +add_nova_opt "sql_connection=$BASE_SQL_CONN/nova" +add_nova_opt "libvirt_type=$LIBVIRT_TYPE" +add_nova_opt "instance_name_template=${INSTANCE_NAME_PREFIX}%08x" # All nova-compute workers need to know the vnc configuration options # These settings don't hurt anything if n-xvnc and n-novnc are disabled if is_service_enabled n-cpu; then NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"} - add_nova_flag "--novncproxy_base_url=$NOVNCPROXY_URL" + add_nova_opt "novncproxy_base_url=$NOVNCPROXY_URL" XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"} - add_nova_flag "--xvpvncproxy_base_url=$XVPVNCPROXY_URL" + add_nova_opt "xvpvncproxy_base_url=$XVPVNCPROXY_URL" fi if [ "$VIRT_DRIVER" = 'xenserver' ]; then VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1} @@ -1249,30 +1250,36 @@ fi # Address on which instance vncservers will listen on compute hosts. # For multi-host, this should be the management ip of the compute host. VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1} -add_nova_flag "--vncserver_listen=$VNCSERVER_LISTEN" -add_nova_flag "--vncserver_proxyclient_address=$VNCSERVER_PROXYCLIENT_ADDRESS" -add_nova_flag "--api_paste_config=$NOVA_CONF/api-paste.ini" -add_nova_flag "--image_service=nova.image.glance.GlanceImageService" -add_nova_flag "--ec2_dmz_host=$EC2_DMZ_HOST" -add_nova_flag "--rabbit_host=$RABBIT_HOST" -add_nova_flag "--rabbit_password=$RABBIT_PASSWORD" -add_nova_flag "--glance_api_servers=$GLANCE_HOSTPORT" -add_nova_flag "--force_dhcp_release" +add_nova_opt "vncserver_listen=$VNCSERVER_LISTEN" +add_nova_opt "vncserver_proxyclient_address=$VNCSERVER_PROXYCLIENT_ADDRESS" +add_nova_opt "api_paste_config=$NOVA_CONF_DIR/api-paste.ini" +add_nova_opt "image_service=nova.image.glance.GlanceImageService" +add_nova_opt "ec2_dmz_host=$EC2_DMZ_HOST" +add_nova_opt "rabbit_host=$RABBIT_HOST" +add_nova_opt "rabbit_password=$RABBIT_PASSWORD" +add_nova_opt "glance_api_servers=$GLANCE_HOSTPORT" +add_nova_opt "force_dhcp_release=True" if [ -n "$INSTANCES_PATH" ]; then - add_nova_flag "--instances_path=$INSTANCES_PATH" + add_nova_opt "instances_path=$INSTANCES_PATH" fi if [ "$MULTI_HOST" != "False" ]; then - add_nova_flag "--multi_host" - add_nova_flag "--send_arp_for_ha" + add_nova_opt "multi_host=True" + add_nova_opt "send_arp_for_ha=True" fi if [ "$SYSLOG" != "False" ]; then - add_nova_flag "--use_syslog" + add_nova_opt "use_syslog=True" fi -# You can define extra nova conf flags by defining the array EXTRA_FLAGS, -# For Example: EXTRA_FLAGS=(--foo --bar=2) -for I in "${EXTRA_FLAGS[@]}"; do - add_nova_flag $I +# Provide some transition from EXTRA_FLAGS to EXTRA_OPTS +if [[ -z "$EXTRA_OPTS" && -n "$EXTRA_FLAGS" ]]; then + EXTRA_OPTS=$EXTRA_FLAGS +fi + +# You can define extra nova conf flags by defining the array EXTRA_OPTS, +# For Example: EXTRA_OPTS=(foo=true bar=2) +for I in "${EXTRA_OPTS[@]}"; do + # Attempt to convert flags to options + add_nova_opt ${I//-} done # XenServer @@ -1280,19 +1287,19 @@ done if [ "$VIRT_DRIVER" = 'xenserver' ]; then read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN." - add_nova_flag "--connection_type=xenapi" + add_nova_opt "connection_type=xenapi" XENAPI_CONNECTION_URL=${XENAPI_CONNECTION_URL:-"http://169.254.0.1"} - add_nova_flag "--xenapi_connection_url=$XENAPI_CONNECTION_URL" - add_nova_flag "--xenapi_connection_username=root" - add_nova_flag "--xenapi_connection_password=$XENAPI_PASSWORD" - add_nova_flag "--noflat_injected" + add_nova_opt "xenapi_connection_url=$XENAPI_CONNECTION_URL" + add_nova_opt "xenapi_connection_username=root" + add_nova_opt "xenapi_connection_password=$XENAPI_PASSWORD" + add_nova_opt "flat_injected=False" # Need to avoid crash due to new firewall support XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"} - add_nova_flag "--firewall_driver=$XEN_FIREWALL_DRIVER" + add_nova_opt "firewall_driver=$XEN_FIREWALL_DRIVER" else - add_nova_flag "--connection_type=libvirt" + add_nova_opt "connection_type=libvirt" LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} - add_nova_flag "--firewall_driver=$LIBVIRT_FIREWALL_DRIVER" + add_nova_opt "firewall_driver=$LIBVIRT_FIREWALL_DRIVER" fi # Nova Database @@ -1539,10 +1546,10 @@ screen_it n-vol "cd $NOVA_DIR && $NOVA_DIR/bin/nova-volume" screen_it n-net "cd $NOVA_DIR && $NOVA_DIR/bin/nova-network" screen_it n-sch "cd $NOVA_DIR && $NOVA_DIR/bin/nova-scheduler" if is_service_enabled n-novnc; then - screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --flagfile $NOVA_CONF/nova.conf --web ." + screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF --web ." fi if is_service_enabled n-xvnc; then - screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --flagfile $NOVA_CONF/nova.conf" + screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF" fi if is_service_enabled n-cauth; then screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth" @@ -1673,5 +1680,10 @@ fi # Echo HOST_IP - useful for build_uec.sh, which uses dhcp to give the instance an address echo "This is your host ip: $HOST_IP" +# Warn that EXTRA_FLAGS needs to be converted to EXTRA_OPTS +if [[ -n "$EXTRA_FLAGS" ]]; then + echo "WARNING: EXTRA_FLAGS is defined and may need to be converted to EXTRA_OPTS" +fi + # Indicate how long this took to run (bash maintained variable 'SECONDS') echo "stack.sh completed in $SECONDS seconds." From 50aef3ba305ecf1855999997e05b8aba6f2000de Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 5 Mar 2012 23:12:04 -0800 Subject: [PATCH 398/967] Sets nova auth strategy to keystone * Prepares for https://review.openstack.org/#change,4955 Change-Id: I9bacd36d88dc434b1799336e7b2bd8fbf4354bf3 --- stack.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/stack.sh b/stack.sh index 6bf83a6d..c7529b8c 100755 --- a/stack.sh +++ b/stack.sh @@ -1190,6 +1190,7 @@ rm -f $NOVA_DIR/bin/nova.conf rm -f $NOVA_CONF_DIR/$NOVA_CONF add_nova_opt "[DEFAULT]" add_nova_opt "verbose=True" +add_nova_opt "auth_strategy=keystone" add_nova_opt "allow_resize_to_same_host=True" add_nova_opt "scheduler_driver=$SCHEDULER" add_nova_opt "dhcpbridge_flagfile=$NOVA_CONF_DIR/$NOVA_CONF" From 51aa401ff1bc5a217c53dcdfe74c3d13a6a991f6 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 6 Mar 2012 12:45:19 -0800 Subject: [PATCH 399/967] Use the suggested default scheduler Change-Id: I99de26ab653e670576e0825ad35b0a67eacda6f7 --- stack.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/stack.sh b/stack.sh index c7529b8c..eead8a19 100755 --- a/stack.sh +++ b/stack.sh @@ -195,9 +195,9 @@ INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-} VIRT_DRIVER=${VIRT_DRIVER:-libvirt} LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm} -# nova supports pluggable schedulers. ``SimpleScheduler`` should work in most -# cases unless you are working on multi-zone mode. -SCHEDULER=${SCHEDULER:-nova.scheduler.simple.SimpleScheduler} +# Nova supports pluggable schedulers. ``FilterScheduler`` should work in most +# cases. +SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler} HOST_IP_IFACE=${HOST_IP_IFACE:-eth0} # Use the eth0 IP unless an explicit is set by ``HOST_IP`` environment variable @@ -1192,7 +1192,7 @@ add_nova_opt "[DEFAULT]" add_nova_opt "verbose=True" add_nova_opt "auth_strategy=keystone" add_nova_opt "allow_resize_to_same_host=True" -add_nova_opt "scheduler_driver=$SCHEDULER" +add_nova_opt "compute_scheduler_driver=$SCHEDULER" add_nova_opt "dhcpbridge_flagfile=$NOVA_CONF_DIR/$NOVA_CONF" add_nova_opt "fixed_range=$FIXED_RANGE" if is_service_enabled n-obj; then From d0b21e2d1967f650ddd123c1ecc1659b1baff783 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 7 Mar 2012 14:52:25 -0600 Subject: [PATCH 400/967] Fix dependency list generation corner cases * Handle empty install lists in apt_get() and pip_install() * pip_install now uses get_packages() to get the dependency list Fixes bug 948714 Change-Id: I174a60976df18c670eab2067edcd1871c51d07d6 --- functions | 4 ++-- stack.sh | 20 ++++++++++++-------- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/functions b/functions index adcf5bda..c4d56a23 100644 --- a/functions +++ b/functions @@ -4,7 +4,7 @@ # apt-get wrapper to set arguments correctly # apt_get package [package ...] function apt_get() { - [[ "$OFFLINE" = "True" ]] && return + [[ "$OFFLINE" = "True" || -z "$@" ]] && return local sudo="sudo" [[ "$(id -u)" = "0" ]] && sudo="env" $sudo DEBIAN_FRONTEND=noninteractive \ @@ -124,7 +124,7 @@ function is_set() { # pip install wrapper to set cache and proxy environment variables # pip_install package [package ...] function pip_install { - [[ "$OFFLINE" = "True" ]] && return + [[ "$OFFLINE" = "True" || -z "$@" ]] && return sudo PIP_DOWNLOAD_CACHE=/var/cache/pip \ HTTP_PROXY=$http_proxy \ HTTPS_PROXY=$https_proxy \ diff --git a/stack.sh b/stack.sh index eead8a19..fa5652f2 100755 --- a/stack.sh +++ b/stack.sh @@ -516,12 +516,16 @@ fi # dist:DISTRO1,DISTRO2 it will be installed only for those # distros (case insensitive). function get_packages() { - local file_to_parse="general" + local package_dir=$1 + local file_to_parse local service - for service in ${ENABLED_SERVICES//,/ }; do - # Allow individual services to specify dependencies - if [[ -e $FILES/apts/${service} ]]; then + if [[ -z "$package_dir" ]]; then + echo "No package directory supplied" + return 1 + fi + for service in general ${ENABLED_SERVICES//,/ }; do # Allow individual services to specify dependencies + if [[ -e ${package_dir}/${service} ]]; then file_to_parse="${file_to_parse} $service" fi if [[ $service == n-* ]]; then @@ -540,9 +544,9 @@ function get_packages() { done for file in ${file_to_parse}; do - local fname=${FILES}/apts/${file} + local fname=${package_dir}/${file} local OIFS line package distros distro - [[ -e $fname ]] || { echo "missing: $fname"; exit 1 ;} + [[ -e $fname ]] || continue OIFS=$IFS IFS=$'\n' @@ -568,10 +572,10 @@ function get_packages() { # install apt requirements apt_get update -apt_get install $(get_packages) +apt_get install $(get_packages $FILES/apts) # install python requirements -pip_install `cat $FILES/pips/* | uniq` +pip_install $(get_packages $FILES/pips | sort -u) # compute service git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH From 165121f7b69d9b8ad01d3256d068fdf77cebf840 Mon Sep 17 00:00:00 2001 From: Joe Heck Date: Wed, 7 Mar 2012 14:20:11 -0800 Subject: [PATCH 401/967] adding default bind_host to enable change 4880 Change-Id: I949effe56cad7ae466a458da48bc85fe1c110138 --- files/keystone.conf | 1 + 1 file changed, 1 insertion(+) diff --git a/files/keystone.conf b/files/keystone.conf index d9e639fe..64f0a66f 100644 --- a/files/keystone.conf +++ b/files/keystone.conf @@ -1,4 +1,5 @@ [DEFAULT] +bind_host = 0.0.0.0 public_port = 5000 admin_port = 35357 admin_token = %SERVICE_TOKEN% From 0b31e8678e5817950a0aba37d5930601cb4a1d24 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 7 Mar 2012 16:47:56 -0600 Subject: [PATCH 402/967] Ensure that ip_forwarding is always enabled for compute and network Libvirt gets this in most cases, but it can be disabled. This is belts + suspenders + super glue... Fixes bug 885274 Change-Id: Ifb8d627689800b23cce02a623caa525cd17e507d --- stack.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/stack.sh b/stack.sh index eead8a19..b67bf16e 100755 --- a/stack.sh +++ b/stack.sh @@ -886,6 +886,9 @@ if is_service_enabled n-cpu; then # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ apt_get install libvirt-bin + # Force IP forwarding on, just on case + sudo sysctl -w net.ipv4.ip_forward=1 + # attempt to load modules: network block device - used to manage qcow images sudo modprobe nbd || true @@ -967,6 +970,9 @@ if is_service_enabled n-net; then clean_iptables rm -rf $NOVA_DIR/networks mkdir -p $NOVA_DIR/networks + + # Force IP forwarding on, just on case + sudo sysctl -w net.ipv4.ip_forward=1 fi # Storage Service From e0d677c71e30f2caefc1d1c117d3633a064de930 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 7 Mar 2012 14:11:33 -0600 Subject: [PATCH 403/967] Configure nova-rootwrap * Nova runs as the same user as the rest of devstack and due to the sudo configuration for devstack itself it may be possible that root subprocess execution that doesn't go through utils.execute() will not be caught by the expected sudo failure. Fixes bug 944373 Change-Id: I0554b3850a59d065059f8ad349c8ff8fb7cfa132 --- files/sudo/nova | 50 ------------------------------------------------- stack.sh | 26 +++++++++++++++++++------ 2 files changed, 20 insertions(+), 56 deletions(-) delete mode 100644 files/sudo/nova diff --git a/files/sudo/nova b/files/sudo/nova deleted file mode 100644 index 60dca2ba..00000000 --- a/files/sudo/nova +++ /dev/null @@ -1,50 +0,0 @@ -Cmnd_Alias NOVADEVCMDS = /bin/chmod /var/lib/nova/tmp/*/root/.ssh, \ - /bin/chown /var/lib/nova/tmp/*/root/.ssh, \ - /bin/chown, \ - /bin/chmod, \ - /bin/dd, \ - /sbin/ifconfig, \ - /sbin/ip, \ - /sbin/route, \ - /sbin/iptables, \ - /sbin/iptables-save, \ - /sbin/iptables-restore, \ - /sbin/ip6tables-save, \ - /sbin/ip6tables-restore, \ - /sbin/kpartx, \ - /sbin/losetup, \ - /sbin/lvcreate, \ - /sbin/lvdisplay, \ - /sbin/lvremove, \ - /bin/mkdir, \ - /bin/mount, \ - /sbin/pvcreate, \ - /usr/bin/tee, \ - /sbin/tune2fs, \ - /bin/umount, \ - /sbin/vgcreate, \ - /usr/bin/virsh, \ - /usr/bin/qemu-nbd, \ - /usr/sbin/brctl, \ - /sbin/brctl, \ - /usr/sbin/radvd, \ - /usr/sbin/vblade-persist, \ - /sbin/pvcreate, \ - /sbin/aoe-discover, \ - /sbin/vgcreate, \ - /bin/aoe-stat, \ - /bin/kill, \ - /sbin/vconfig, \ - /usr/sbin/ietadm, \ - /sbin/vgs, \ - /sbin/iscsiadm, \ - /usr/bin/socat, \ - /sbin/parted, \ - /usr/sbin/dnsmasq, \ - /usr/sbin/tgtadm, \ - /usr/bin/ovs-vsctl, \ - /usr/bin/ovs-ofctl, \ - /usr/sbin/arping - -%USER% ALL = (root) NOPASSWD: SETENV: NOVADEVCMDS - diff --git a/stack.sh b/stack.sh index eead8a19..680e2f5c 100755 --- a/stack.sh +++ b/stack.sh @@ -135,17 +135,30 @@ if [[ $EUID -eq 0 ]]; then fi exit 1 else - # Our user needs passwordless priviledges for certain commands which nova - # uses internally. - # Natty uec images sudoers does not have a '#includedir'. add one. + # We're not root, make sure sudo is available + dpkg -l sudo + die_if_error "Sudo is required. Re-run stack.sh as root ONE TIME ONLY to set up sudo." + + # UEC images /etc/sudoers does not have a '#includedir'. add one. sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || echo "#includedir /etc/sudoers.d" | sudo tee -a /etc/sudoers + + # Set up devstack sudoers TEMPFILE=`mktemp` - cat $FILES/sudo/nova > $TEMPFILE - sed -e "s,%USER%,$USER,g" -i $TEMPFILE + echo "`whoami` ALL=(root) NOPASSWD:ALL" >$TEMPFILE chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE - sudo mv $TEMPFILE /etc/sudoers.d/stack_sh_nova + sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh + + # Set up the rootwrap sudoers + TEMPFILE=`mktemp` + echo "$USER ALL=(root) NOPASSWD: /usr/local/bin/nova-rootwrap" >$TEMPFILE + chmod 0440 $TEMPFILE + sudo chown root:root $TEMPFILE + sudo mv $TEMPFILE /etc/sudoers.d/nova-rootwrap + + # Remove old file + sudo rm -f /etc/sudoers.d/stack_sh_nova fi # Set True to configure stack.sh to run cleanly without Internet access. @@ -1192,6 +1205,7 @@ add_nova_opt "[DEFAULT]" add_nova_opt "verbose=True" add_nova_opt "auth_strategy=keystone" add_nova_opt "allow_resize_to_same_host=True" +add_nova_opt "root_helper=sudo /usr/local/bin/nova-rootwrap" add_nova_opt "compute_scheduler_driver=$SCHEDULER" add_nova_opt "dhcpbridge_flagfile=$NOVA_CONF_DIR/$NOVA_CONF" add_nova_opt "fixed_range=$FIXED_RANGE" From c56885ad317521951f054a82e0cc8fea4948329d Mon Sep 17 00:00:00 2001 From: Renuka Apte Date: Wed, 29 Feb 2012 16:09:26 -0800 Subject: [PATCH 404/967] Improve devstack docs for Xenserver Minor fixes to scripts Fixes bug 943508 Change-Id: Iaf12b9d67efa78623e8253599010f60192b4af64 --- README.md | 4 ++++ tools/xen/README.md | 43 ++++++++++++++++++++++++++++++--------- tools/xen/build_domU.sh | 4 ++++ tools/xen/build_xva.sh | 3 ++- tools/xen/prepare_dom0.sh | 5 +++++ 5 files changed, 48 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 34eb45f9..31af1ce6 100644 --- a/README.md +++ b/README.md @@ -12,6 +12,10 @@ Read more at http://devstack.org (built from the gh-pages branch) IMPORTANT: Be sure to carefully read stack.sh and any other scripts you execute before you run them, as they install software and may alter your networking configuration. We strongly recommend that you run stack.sh in a clean and disposable vm when you are first getting started. +# Devstack on Xenserver + +If you would like to use Xenserver as the hypervisor, please refer to the instructions in ./tools/xen/README.md. + # Versions The devstack master branch generally points to trunk versions of OpenStack components. For older, stable versions, look for branches named stable/[release]. For example, you can do the following to create a diablo OpenStack cloud: diff --git a/tools/xen/README.md b/tools/xen/README.md index a3398a78..d487a996 100644 --- a/tools/xen/README.md +++ b/tools/xen/README.md @@ -4,12 +4,19 @@ The purpose of the code in this directory it to help developers bootstrap a XenServer 5.6 + Openstack development environment. This file gives some pointers on how to get started. +Xenserver is a Type 1 hypervisor, so it needs to be installed on bare metal. +The Openstack services are configured to run within a "privileged" virtual +machine on the Xenserver host (called OS domU). The VM uses the XAPI toolstack +to communicate with the host. + Step 1: Install Xenserver ------------------------ -Install XenServer 5.6 on a clean box. You can get XenServer by signing +Install XenServer 5.6+ on a clean box. You can get XenServer by signing up for an account on citrix.com, and then visiting: https://www.citrix.com/English/ss/downloads/details.asp?downloadId=2311504&productId=683148 +For details on installation, see: http://wiki.openstack.org/XenServer/Install + Here are some sample Xenserver network settings for when you are just getting started (I use settings like this with a lappy + cheap wifi router): @@ -18,16 +25,25 @@ getting started (I use settings like this with a lappy + cheap wifi router): * XenServer Gateway: 192.168.1.1 * XenServer DNS: 192.168.1.1 +Note: +------ +It is advisable (and necessary if you are using Xenserver 6.0, due to space +limitations), to create the above mentioned OS domU, on a separate dev machine. +To do this, you will need to run Steps 2 on the dev machine (if required) as +well as the Xenserver host. Steps 3 and 4 should be run on the dev machine. +This process requires you to be root on the dev machine. + Step 2: Prepare DOM0 ------------------- -At this point, your server is missing some critical software that you will +At this point, your host is missing some critical software that you will need to run devstack (like git). Do this to install required software: - wget --no-check-certificate https://github.com/cloudbuilders/devstack/raw/xen/tools/xen/prepare_dom0.sh + wget --no-check-certificate https://raw.github.com/openstack-dev/devstack/master/tools/xen/prepare_dom0.sh chmod 755 prepare_dom0.sh ./prepare_dom0.sh -This script will also clone devstack in /root/devstack +This step will also clone devstack in $DEVSTACKSRCROOT/devstack. +$DEVSTACKSRCROOT=/root by default. Step 3: Configure your localrc ----------------------------- @@ -35,7 +51,7 @@ Devstack uses a localrc for user-specific configuration. Note that the XENAPI_PASSWORD must be your dom0 root password. Of course, use real passwords if this machine is exposed. - cat > /root/devstack/localrc < $DEVSTACKSRCROOT/devstack/localrc < Date: Thu, 8 Mar 2012 13:40:29 -0800 Subject: [PATCH 405/967] Moves python-cloudfiles dependency back to PIP. For reasons currently unknown, apt-get seems to install an unusably old version of python-cloudfiles in some small subset of cases. To avoid that frustration for the users it does affect, this patch moves python-cloudfiles back to being installed by pip instead. Fixes bug 931269. Change-Id: I4c2e17ddcc0c0d327b3810b4fd4db117076df069 --- files/apts/horizon | 1 - files/pips/horizon | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/files/apts/horizon b/files/apts/horizon index 1e0b0e60..b00d8c04 100644 --- a/files/apts/horizon +++ b/files/apts/horizon @@ -22,5 +22,4 @@ python-django python-django-mailer python-django-nose python-django-registration -python-cloudfiles python-migrate diff --git a/files/pips/horizon b/files/pips/horizon index 44bf6db3..f15602e9 100644 --- a/files/pips/horizon +++ b/files/pips/horizon @@ -1,2 +1,3 @@ django-nose-selenium pycrypto==2.3 +python-cloudfiles From 690e1e32ea56d8666644017cb9bcfb4da8963efb Mon Sep 17 00:00:00 2001 From: Aaron Lee Date: Thu, 8 Mar 2012 09:57:30 -0800 Subject: [PATCH 406/967] Change the default mac range to not 0 This fixes some of the confusion caused by Bug 947382 zoinks! set the U/L bit Change-Id: I3a610e3aa3904fdb82ad314c3fd1d06c6017f46c --- AUTHORS | 1 + stack.sh | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/AUTHORS b/AUTHORS index a3a4b6b9..5f758370 100644 --- a/AUTHORS +++ b/AUTHORS @@ -1,3 +1,4 @@ +Aaron Lee Adam Gandelman Andy Smith Anthony Young diff --git a/stack.sh b/stack.sh index eead8a19..10da914e 100755 --- a/stack.sh +++ b/stack.sh @@ -179,7 +179,7 @@ M_PORT=${M_PORT:-9898} # Default Melange Host M_HOST=${M_HOST:-localhost} # Melange MAC Address Range -M_MAC_RANGE=${M_MAC_RANGE:-404040/24} +M_MAC_RANGE=${M_MAC_RANGE:-FE-EE-DD-00-00-00/24} # Specify which services to launch. These generally correspond to screen tabs ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,mysql,rabbit} From f056b7d2d098361908df490e6683d26065a7cd3a Mon Sep 17 00:00:00 2001 From: termie Date: Thu, 8 Mar 2012 14:18:22 -0800 Subject: [PATCH 407/967] update policy driver Change-Id: I2450806d27455ae8304446f5b6054e14a66658b1 --- files/keystone.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/keystone.conf b/files/keystone.conf index 64f0a66f..5e5bfeb6 100644 --- a/files/keystone.conf +++ b/files/keystone.conf @@ -35,7 +35,7 @@ template_file = %KEYSTONE_DIR%/etc/default_catalog.templates driver = keystone.token.backends.kvs.Token [policy] -driver = keystone.policy.backends.simple.SimpleMatch +driver = keystone.policy.backends.rules.Policy [ec2] driver = keystone.contrib.ec2.backends.sql.Ec2 From 3b719e50fb8c3bf770018178e522580293a25cd4 Mon Sep 17 00:00:00 2001 From: Hengqing Hu Date: Fri, 9 Mar 2012 16:03:00 +0800 Subject: [PATCH 408/967] Remove trailing whitespaces in regular file Change-Id: I5faf840dd6649afcb53e91f1d033447f9729cee1 --- AUTHORS | 1 + tools/build_uec.sh | 2 +- .../jenkins_home/jobs/diablo-xs_ha/config.xml | 2 +- tools/rfc.sh | 2 +- tools/warm_apts_and_pips_for_uec.sh | 2 +- tools/xen/build_domU.sh | 2 +- tools/xen/prepare_dom0.sh | 2 +- tools/xen/scripts/install-os-vpx.sh | 4 ++-- tools/xen/scripts/mkxva | 12 ++++++------ 9 files changed, 15 insertions(+), 14 deletions(-) diff --git a/AUTHORS b/AUTHORS index a3a4b6b9..d1a9fcdb 100644 --- a/AUTHORS +++ b/AUTHORS @@ -9,6 +9,7 @@ Devin Carlen Eddie Hebert Eoghan Glynn Gabriel Hurley +Hengqing Hu Jake Dahn James E. Blair Jason Cannavale diff --git a/tools/build_uec.sh b/tools/build_uec.sh index ed5a0171..35a4d6db 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -147,7 +147,7 @@ cat > $LIBVIRT_XML < - + diff --git a/tools/jenkins/jenkins_home/jobs/diablo-xs_ha/config.xml b/tools/jenkins/jenkins_home/jobs/diablo-xs_ha/config.xml index d0fa6af3..49a57f04 100644 --- a/tools/jenkins/jenkins_home/jobs/diablo-xs_ha/config.xml +++ b/tools/jenkins/jenkins_home/jobs/diablo-xs_ha/config.xml @@ -78,7 +78,7 @@ set -o xtrace . localrc # Unlike kvm, ssh to the xen host to run tests, in case the test instance is launch with a host only network -ssh root@$XEN_IP "cd devstack && . localrc && cd tools/jenkins && ./run_test.sh $EXECUTOR_NUMBER $ADAPTER '$RC'" +ssh root@$XEN_IP "cd devstack && . localrc && cd tools/jenkins && ./run_test.sh $EXECUTOR_NUMBER $ADAPTER '$RC'" diff --git a/tools/rfc.sh b/tools/rfc.sh index 0bc15319..d4dc5974 100755 --- a/tools/rfc.sh +++ b/tools/rfc.sh @@ -1,5 +1,5 @@ #!/bin/sh -e -# Copyright (c) 2010-2011 Gluster, Inc. +# Copyright (c) 2010-2011 Gluster, Inc. # This initial version of this file was taken from the source tree # of GlusterFS. It was not directly attributed, but is assumed to be # Copyright (c) 2010-2011 Gluster, Inc and release GPLv3 diff --git a/tools/warm_apts_and_pips_for_uec.sh b/tools/warm_apts_and_pips_for_uec.sh index ec7e916c..23a28dec 100755 --- a/tools/warm_apts_and_pips_for_uec.sh +++ b/tools/warm_apts_and_pips_for_uec.sh @@ -30,7 +30,7 @@ fi if [ ! -d files/apts ]; then echo "Please run this script from devstack/tools/" exit 1 -fi +fi # Mount the image STAGING_DIR=/tmp/`echo $1 | sed "s/\//_/g"`.stage diff --git a/tools/xen/build_domU.sh b/tools/xen/build_domU.sh index a0fd316a..f1c5f076 100755 --- a/tools/xen/build_domU.sh +++ b/tools/xen/build_domU.sh @@ -144,7 +144,7 @@ if [ ! -d $TOP_DIR/nova ]; then env GIT_SSL_NO_VERIFY=true git clone $NOVA_REPO cd $TOP_DIR/nova git checkout $NOVA_BRANCH -fi +fi # Install plugins cp -pr $TOP_DIR/nova/plugins/xenserver/xenapi/etc/xapi.d /etc/ diff --git a/tools/xen/prepare_dom0.sh b/tools/xen/prepare_dom0.sh index d28a07f1..55fd6813 100755 --- a/tools/xen/prepare_dom0.sh +++ b/tools/xen/prepare_dom0.sh @@ -21,7 +21,7 @@ if [ -e /usr/bin/vim ]; then ln -s /usr/bin/vim /bin/vi fi -# Install git +# Install git if ! which git; then DEST=/tmp/ GITDIR=$DEST/git-1.7.7 diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh index f0dc3c2d..d45c3702 100755 --- a/tools/xen/scripts/install-os-vpx.sh +++ b/tools/xen/scripts/install-os-vpx.sh @@ -88,7 +88,7 @@ EOF get_params() { - while getopts "hicwbf:d:v:m:p:k:r:l:" OPTION; + while getopts "hicwbf:d:v:m:p:k:r:l:" OPTION; do case $OPTION in h) usage @@ -246,7 +246,7 @@ create_management_vif() # This installs the interface for public traffic, only if a bridge is specified -# The interface is not configured at this stage, but it will be, once the admin +# The interface is not configured at this stage, but it will be, once the admin # tasks are complete for the services of this VPX create_public_vif() { diff --git a/tools/xen/scripts/mkxva b/tools/xen/scripts/mkxva index dcdee61a..a316da2d 100755 --- a/tools/xen/scripts/mkxva +++ b/tools/xen/scripts/mkxva @@ -177,7 +177,7 @@ EOF /sbin/mkfs.ext3 -I 128 -m0 -F "$partition" /sbin/e2label "$partition" vpxroot make_fs_inner "$staging" "$partition" "" - + # Now run grub on the image we've created CLEAN_MOUNTPOINT=$(mktemp -d "$TMPDIR/mkfs-XXXXXX") @@ -203,7 +203,7 @@ EOF $SUDO umount "$CLEAN_MOUNTPOINT" CLEAN_MOUNTPOINT= - + # Grub expects a disk with name /dev/xxxx with a first partition # named /dev/xxxx1, so we give it what it wants using symlinks # Note: /dev is linked to the real /dev of the build machine, so @@ -214,14 +214,14 @@ EOF rm -f "$disk_part1_name" ln -s "$CLEAN_LOSETUP" "$disk_name" ln -s "$partition" "$disk_part1_name" - + # Feed commands into the grub shell to setup the disk grub --no-curses --device-map=/dev/null </dev/null @@ -359,7 +359,7 @@ done # cleanup -if [ -z "${DO_NOT_CLEANUP:-}" ] ; then +if [ -z "${DO_NOT_CLEANUP:-}" ] ; then rm -rf "$XVA_TARBALL_STAGING" rm -f "$FS_TMPFILE" fi From 07c3557383a02b630c7dee720495b65f41410bac Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 5 Mar 2012 07:15:30 -0600 Subject: [PATCH 409/967] Add hacking guideline After all, it _was_ docday when this was proposed! This is by no means complete but some of this has come up a lot recently. Change-Id: I72300506e1c74077d3f9e6bbabea3b2a25a8e829 --- HACKING.rst | 153 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 153 insertions(+) create mode 100644 HACKING.rst diff --git a/HACKING.rst b/HACKING.rst new file mode 100644 index 00000000..d91d4969 --- /dev/null +++ b/HACKING.rst @@ -0,0 +1,153 @@ +Contributing to DevStack +======================== + + +General +------- + +DevStack is written in POSIX shell script. This choice was made because +it best illustrates the configuration steps that this implementation takes +on setting up and interacting with OpenStack components. DevStack specifies +BASH and is compatible with Bash 3. + +DevStack's official repository is located on GitHub at +https://github.com/openstack-dev/devstack.git. Besides the master branch that +tracks the OpenStack trunk branches a separate branch is maintained for all +OpenStack releases starting with Diablo (stable/diablo). + +The primary script in DevStack is ``stack.sh``, which performs the bulk of the +work for DevStack's use cases. There is a subscript ``functions`` that contains +generally useful shell functions and is used by a number of the scripts in +DevStack. + +A number of additional scripts can be found in the ``tools`` directory that may +be useful in setting up special-case uses of DevStack. These include: bare metal +deployment, ramdisk deployment and Jenkins integration. + + +Scripts +------- + +DevStack scripts should generally begin by calling ``env(1)`` in the shebang line:: + + #!/usr/bin/env bash + +Sometimes the script needs to know the location of the DevStack install directory. +``TOP_DIR`` should always point there, even if the script itself is located in +a subdirectory:: + + # Keep track of the current devstack directory. + TOP_DIR=$(cd $(dirname "$0") && pwd) + +Many scripts will utilize shared functions from the ``functions`` file. There are +also rc files (``stackrc`` and ``openrc``) that are often included to set the primary +configuration of the user environment:: + + # Use openrc + stackrc + localrc for settings + pushd $(cd $(dirname "$0")/.. && pwd) >/dev/null + + # Import common functions + source ./functions + + # Import configuration + source ./openrc + popd >/dev/null + +``stack.sh`` is a rather large monolithic script that flows through from beginning +to end. There is a proposal to segment it to put the OpenStack projects +into their own sub-scripts to better document the projects as a unit rather than +have it scattered throughout ``stack.sh``. Someday. + + +Documentation +------------- + +The official DevStack repo on GitHub does not include a gh-pages branch that +GitHub uses to create static web sites. That branch is maintained in the +`CloudBuilders DevStack repo`__ mirror that supports the +http://devstack.org site. This is the primary DevStack +documentation along with the DevStack scripts themselves. + +__ repo_ +.. _repo: https://github.com/cloudbuilders/devstack + +All of the scripts are processed with shocco_ to render them with the comments +as text describing the script below. For this reason we tend to be a little +verbose in the comments _ABOVE_ the code they pertain to. Shocco also supports +Markdown formatting in the comments; use it sparingly. Specifically, ``stack.sh`` +uses Markdown headers to divide the script into logical sections. + +.. _shocco: http://rtomayko.github.com/shocco/ + + +Exercises +--------- + +The scripts in the exercises directory are meant to 1) perform basic operational +checks on certain aspects of OpenStack; and b) document the use of the +OpenStack command-line clients. + +In addition to the guidelines above, exercise scripts MUST follow the structure +outlined here. ``swift.sh`` is perhaps the clearest example of these guidelines. +These scripts are executed serially by ``exercise.sh`` in testing situations. + +* Begin and end with a banner that stands out in a sea of script logs to aid + in debugging failures, particularly in automated testing situations. If the + end banner is not displayed, the script ended prematurely and can be assumed + to have failed. + + :: + + echo "**************************************************" + echo "Begin DevStack Exercise: $0" + echo "**************************************************" + ... + set +o xtrace + echo "**************************************************" + echo "End DevStack Exercise: $0" + echo "**************************************************" + +* The scripts will generally have the shell ``xtrace`` attribute set to display + the actual commands being executed, and the ``errexit`` attribute set to exit + the script on non-zero exit codes:: + + # This script exits on an error so that errors don't compound and you see + # only the first error that occured. + set -o errexit + + # Print the commands being run so that we can see the command that triggers + # an error. It is also useful for following allowing as the install occurs. + set -o xtrace + +* There are a couple of helper functions in the common ``functions`` sub-script + that will check for non-zero exit codes and unset environment variables and + print a message and exit the script. These should be called after most client + commands that are not otherwise checked to short-circuit long timeouts + (instance boot failure, for example):: + + swift post $CONTAINER + die_if_error "Failure creating container $CONTAINER" + + FLOATING_IP=`euca-allocate-address | cut -f2` + die_if_not_set FLOATING_IP "Failure allocating floating IP" + +* The exercise scripts should only use the various OpenStack client binaries to + interact with OpenStack. This specifically excludes any ``*-manage`` tools + as those assume direct access to configuration and databases, as well as direct + database access from the exercise itself. + +* If specific configuration needs to be present for the exercise to complete, + it should be staged in ``stack.sh``, or called from ``stack.sh`` (see + ``files/keystone_data.sh`` for an example of this). + +* The ``OS_*`` environment variables should be the only ones used for all + authentication to OpenStack clients as documented in the CLIAuth_ wiki page. + +.. _CLIAuth: http://wiki.openstack.org/CLIAuth + +* The exercise MUST clean up after itself if successful. If it is not successful, + it is assumed that state will be left behind; this allows a chance for developers + to look around and attempt to debug the problem. The exercise SHOULD clean up + or graciously handle possible artifacts left over from previous runs if executed + again. It is acceptable to require a reboot or even a re-install of DevStack + to restore a clean test environment. From 389f4efb1ed9c2261caffc3a64006fe6171aae39 Mon Sep 17 00:00:00 2001 From: Ken Pepple Date: Thu, 8 Mar 2012 19:37:03 -0800 Subject: [PATCH 410/967] support glance only mode by if...then checking for is_service_enabled nova. removed redundant is_service_enabled checks for screen_it processes. fixes bug 885767 Change-Id: I4441e8d0e9d7b96770923721fa97dbd10c4379a8 --- AUTHORS | 1 + stack.sh | 19 ++++++------------- 2 files changed, 7 insertions(+), 13 deletions(-) diff --git a/AUTHORS b/AUTHORS index a3a4b6b9..126f0f09 100644 --- a/AUTHORS +++ b/AUTHORS @@ -16,6 +16,7 @@ Jay Pipes Jesse Andrews Johannes Erdfelt Justin Shepherd +Ken Pepple Kiall Mac Innes Russell Bryant Scott Moser diff --git a/stack.sh b/stack.sh index eead8a19..5999f637 100755 --- a/stack.sh +++ b/stack.sh @@ -1519,7 +1519,7 @@ fi # If we're using Quantum (i.e. q-svc is enabled), network creation has to # happen after we've started the Quantum service. -if is_service_enabled mysql; then +if is_service_enabled mysql && is_service_enabled nova; then # create a small network $NOVA_DIR/bin/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE @@ -1540,24 +1540,17 @@ fi # ``libvirtd`` to our user in this script, when nova-compute is run it is # within the context of our original shell (so our groups won't be updated). # Use 'sg' to execute nova-compute as a member of the libvirtd group. +# We don't check for is_service_enable as screen_it does it for us screen_it n-cpu "cd $NOVA_DIR && sg libvirtd $NOVA_DIR/bin/nova-compute" screen_it n-crt "cd $NOVA_DIR && $NOVA_DIR/bin/nova-cert" screen_it n-obj "cd $NOVA_DIR && $NOVA_DIR/bin/nova-objectstore" screen_it n-vol "cd $NOVA_DIR && $NOVA_DIR/bin/nova-volume" screen_it n-net "cd $NOVA_DIR && $NOVA_DIR/bin/nova-network" screen_it n-sch "cd $NOVA_DIR && $NOVA_DIR/bin/nova-scheduler" -if is_service_enabled n-novnc; then - screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF --web ." -fi -if is_service_enabled n-xvnc; then - screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF" -fi -if is_service_enabled n-cauth; then - screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth" -fi -if is_service_enabled horizon; then - screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/apache2/error.log" -fi +screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF --web ." +screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF" +screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth" +screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/apache2/error.log" # Install Images # ============== From fff6fec46d2659c39e05984a682ec8986715fbc9 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Fri, 9 Mar 2012 15:37:56 +0000 Subject: [PATCH 411/967] Make sure Logdir is created. - FIxes bug 950902. Change-Id: I1e387b4c97551744673281fdeae61d87c1fc39ab --- stack.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/stack.sh b/stack.sh index eead8a19..1562886c 100755 --- a/stack.sh +++ b/stack.sh @@ -472,6 +472,7 @@ if [[ -n "$LOGFILE" ]]; then LOGDAYS=${LOGDAYS:-7} LOGDIR=$(dirname "$LOGFILE") LOGNAME=$(basename "$LOGFILE") + mkdir -p $LOGDIR find $LOGDIR -maxdepth 1 -name $LOGNAME.\* -mtime +$LOGDAYS -exec rm {} \; TIMESTAMP_FORMAT=${TIMESTAMP_FORMAT:-"%F-%H%M%S"} From e2d139b8c86dcb1e7c2be3ee00c686ca47199849 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 9 Mar 2012 15:47:28 -0800 Subject: [PATCH 412/967] Remove calls to service-create in devstack for now. * The recommended backend for essex is the template backend. * Fixes bug 950213 Change-Id: I2f8bc9f74cee2d783cbadcc1d4d4e674ddb905b9 --- files/keystone_data.sh | 28 +--------------------------- 1 file changed, 1 insertion(+), 27 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 958d2af4..c9aa7d74 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -73,14 +73,7 @@ keystone user-role-add --user $DEMO_USER --role $MEMBER_ROLE --tenant_id $DEMO_T keystone user-role-add --user $DEMO_USER --role $MEMBER_ROLE --tenant_id $INVIS_TENANT -# Services -keystone service-create --name=keystone \ - --type=identity \ - --description="Keystone Identity Service" - -keystone service-create --name=nova \ - --type=compute \ - --description="Nova Compute Service" +# Configure service users/roles NOVA_USER=$(get_id keystone user-create --name=nova \ --pass="$SERVICE_PASSWORD" \ --tenant_id $SERVICE_TENANT \ @@ -89,13 +82,6 @@ keystone user-role-add --tenant_id $SERVICE_TENANT \ --user $NOVA_USER \ --role $ADMIN_ROLE -keystone service-create --name=ec2 \ - --type=ec2 \ - --description="EC2 Compatibility Layer" - -keystone service-create --name=glance \ - --type=image \ - --description="Glance Image Service" GLANCE_USER=$(get_id keystone user-create --name=glance \ --pass="$SERVICE_PASSWORD" \ --tenant_id $SERVICE_TENANT \ @@ -104,16 +90,7 @@ keystone user-role-add --tenant_id $SERVICE_TENANT \ --user $GLANCE_USER \ --role $ADMIN_ROLE -if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then - keystone service-create --name="nova-volume" \ - --type=volume \ - --description="Nova Volume Service" -fi - if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then - keystone service-create --name=swift \ - --type="object-store" \ - --description="Swift Service" SWIFT_USER=$(get_id keystone user-create --name=swift \ --pass="$SERVICE_PASSWORD" \ --tenant_id $SERVICE_TENANT \ @@ -124,9 +101,6 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then fi if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then - keystone service-create --name=quantum \ - --type=network \ - --description="Quantum Service" QUANTUM_USER=$(get_id keystone user-create --name=quantum \ --pass="$SERVICE_PASSWORD" \ --tenant_id $SERVICE_TENANT \ From d966ed23e27bd3af3eebae48b8bc20747b1c0c96 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Mon, 5 Mar 2012 12:42:48 +0000 Subject: [PATCH 413/967] Implement logging of screen windows. - Fixes bug 945231. Change-Id: Ie26555d3e99905ba053510f6b08822b1043aa974 --- stack.sh | 33 ++++++++++++++++++++++++++++++--- 1 file changed, 30 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index 8fdb6796..9fd95c12 100755 --- a/stack.sh +++ b/stack.sh @@ -465,18 +465,22 @@ APACHE_GROUP=${APACHE_GROUP:-$APACHE_USER} # Set LOGFILE to turn on logging # We append '.xxxxxxxx' to the given name to maintain history # where xxxxxxxx is a representation of the date the file was created +if [[ -n "$LOGFILE" || -n "$SCREEN_LOGDIR" ]]; then + LOGDAYS=${LOGDAYS:-7} + TIMESTAMP_FORMAT=${TIMESTAMP_FORMAT:-"%F-%H%M%S"} + CURRENT_LOG_TIME=$(date "+$TIMESTAMP_FORMAT") +fi + if [[ -n "$LOGFILE" ]]; then # First clean up old log files. Use the user-specified LOGFILE # as the template to search for, appending '.*' to match the date # we added on earlier runs. - LOGDAYS=${LOGDAYS:-7} LOGDIR=$(dirname "$LOGFILE") LOGNAME=$(basename "$LOGFILE") mkdir -p $LOGDIR find $LOGDIR -maxdepth 1 -name $LOGNAME.\* -mtime +$LOGDAYS -exec rm {} \; - TIMESTAMP_FORMAT=${TIMESTAMP_FORMAT:-"%F-%H%M%S"} - LOGFILE=$LOGFILE.$(date "+$TIMESTAMP_FORMAT") + LOGFILE=$LOGFILE.${CURRENT_LOG_TIME} # Redirect stdout/stderr to tee to write the log file exec 1> >( tee "${LOGFILE}" ) 2>&1 echo "stack.sh log $LOGFILE" @@ -484,6 +488,23 @@ if [[ -n "$LOGFILE" ]]; then ln -sf $LOGFILE $LOGDIR/$LOGNAME fi +# Set up logging of screen windows +# Set SCREEN_LOGDIR to turn on logging of screen windows to the +# directory specified in SCREEN_LOGDIR, we will log to the the file +# screen-$SERVICE_NAME-$TIMESTAMP.log in that dir and have a link +# screen-$SERVICE_NAME.log to the latest log file. +# Logs are kept for as long specified in LOGDAYS. +if [[ -n "$SCREEN_LOGDIR" ]]; then + + # We make sure the directory is created. + if [[ -d "$SCREEN_LOGDIR" ]]; then + # We cleanup the old logs + find $SCREEN_LOGDIR -maxdepth 1 -name screen-\*.log -mtime +$LOGDAYS -exec rm {} \; + else + mkdir -p $SCREEN_LOGDIR + fi +fi + # So that errors don't compound we exit on any errors so you see only the # first error that occurred. trap failed ERR @@ -1357,6 +1378,12 @@ function screen_it { # creating a new window in screen and then sends characters, so if # bash isn't running by the time we send the command, nothing happens sleep 1.5 + + if [[ -n ${SCREEN_LOGDIR} ]]; then + screen -S stack -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log + screen -S stack -p $1 -X log on + ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log + fi screen -S stack -p $1 -X stuff "$2$NL" fi } From 0bd2410d469f11934b5965d83b57d56418e66b48 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 8 Mar 2012 00:33:54 -0600 Subject: [PATCH 414/967] Move all EC2 cred creation to eucarc * Remove credential creation from files/keystone_data.sh * Remove EC2 cert setup from openrc * Remove sourcing of ec2rc from stackrc * Collect the above in eucarc * Allow rc files to be sourced from other directories; based on Chmouel's 4881 proposal but is simpler and doesn't actually change the directory * Create S3 endpoint * Get EC2 and S3 endpoints from Keystone service catalog * Add EC2 credential checks to exercises/client-env.sh * exercises/bundle.sh and exercises/euca.sh use eucarc Updates: * remove readlink -f to stay bash 3 compatible * use service catalog * create S3 endpoint Fixes bug 949528 Change-Id: I58caea8cecbbd10661779bc2d150d241f4a5822e --- README.md | 5 +++++ eucarc | 40 +++++++++++++++++++++++++++++++++ exercises/bundle.sh | 22 +++++++++--------- exercises/client-env.sh | 30 ++++++++++++++++++++----- exercises/euca.sh | 12 +++++----- files/default_catalog.templates | 6 +++++ files/keystone_data.sh | 21 +---------------- openrc | 29 +++++------------------- stackrc | 12 +++++----- 9 files changed, 103 insertions(+), 74 deletions(-) create mode 100644 eucarc diff --git a/README.md b/README.md index 34eb45f9..e311bb80 100644 --- a/README.md +++ b/README.md @@ -41,6 +41,11 @@ We also provide an environment file that you can use to interact with your cloud . openrc # list instances nova list + +If the EC2 API is your cup-o-tea, you can create credentials and use euca2ools: + + # source eucarc to generate EC2 credentials and set up the environment + . eucarc # list instances using ec2 api euca-describe-instances diff --git a/eucarc b/eucarc new file mode 100644 index 00000000..2b0f7dd1 --- /dev/null +++ b/eucarc @@ -0,0 +1,40 @@ +#!/usr/bin/env bash +# +# source eucarc [username] [tenantname] +# +# Create EC2 credentials for the current user as defined by OS_TENANT_NAME:OS_USERNAME +# Optionally set the tenant/username via openrc + +if [[ -n "$1" ]]; then + USERNAME=$1 +fi +if [[ -n "$2" ]]; then + TENANT=$2 +fi + +# Find the other rc files +RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd) + +# Get user configuration +source $RC_DIR/openrc + +# Set the ec2 url so euca2ools works +export EC2_URL=$(keystone catalog --service ec2 | awk '/ publicURL / { print $4 }') + +# Create EC2 credentials for the current user +CREDS=$(keystone ec2-credentials-create) +export EC2_ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }') +export EC2_SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }') + +# Euca2ools Certificate stuff for uploading bundles +# See exercises/bundle.sh to see how to get certs using nova cli +NOVA_KEY_DIR=${NOVA_KEY_DIR:-$RC_DIR} +export S3_URL=$(keystone catalog --service s3 | awk '/ publicURL / { print $4 }') +export EC2_USER_ID=42 # nova does not use user id, but bundling requires it +export EC2_PRIVATE_KEY=${NOVA_KEY_DIR}/pk.pem +export EC2_CERT=${NOVA_KEY_DIR}/cert.pem +export NOVA_CERT=${NOVA_KEY_DIR}/cacert.pem +export EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this set +alias ec2-bundle-image="ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_PRIVATE_KEY} --user ${EC2_USER_ID} --ec2cert ${NOVA_CERT}" +alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}" + diff --git a/exercises/bundle.sh b/exercises/bundle.sh index e1c949cf..47bacac3 100755 --- a/exercises/bundle.sh +++ b/exercises/bundle.sh @@ -18,24 +18,24 @@ set -o xtrace # Settings # ======== -# Use openrc + stackrc + localrc for settings -pushd $(cd $(dirname "$0")/.. && pwd) >/dev/null +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) # Import common functions -source ./functions +source $TOP_DIR/functions -# Import configuration -source ./openrc +# Import EC2 configuration +source $TOP_DIR/eucarc # Remove old certificates -rm -f cacert.pem -rm -f cert.pem -rm -f pk.pem +rm -f $TOP_DIR/cacert.pem +rm -f $TOP_DIR/cert.pem +rm -f $TOP_DIR/pk.pem # Get Certificates -nova x509-get-root-cert -nova x509-create-cert -popd >/dev/null +nova x509-get-root-cert $TOP_DIR/cacert.pem +nova x509-create-cert $TOP_DIR/pk.pem $TOP_DIR/cert.pem # Max time to wait for image to be registered REGISTER_TIMEOUT=${REGISTER_TIMEOUT:-15} diff --git a/exercises/client-env.sh b/exercises/client-env.sh index 28c4d95e..d4ba702e 100755 --- a/exercises/client-env.sh +++ b/exercises/client-env.sh @@ -12,15 +12,15 @@ VERIFY=${1:-""} # Settings # ======== -# Use openrc + stackrc + localrc for settings -pushd $(cd $(dirname "$0")/.. && pwd) >/dev/null +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) # Import common functions -source ./functions +source $TOP_DIR/functions # Import configuration -source ./openrc -popd >/dev/null +source $TOP_DIR/openrc # Unset all of the known NOVA_ vars unset NOVA_API_KEY @@ -53,7 +53,7 @@ if [[ "$ENABLED_SERVICES" =~ "key" ]]; then STATUS_KEYSTONE="Skipped" else echo -e "\nTest Keystone" - if keystone service-list; then + if keystone catalog --service identity; then STATUS_KEYSTONE="Succeeded" else STATUS_KEYSTONE="Failed" @@ -68,7 +68,9 @@ fi if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then if [[ "$SKIP_EXERCISES" =~ "n-api" ]] ; then STATUS_NOVA="Skipped" + STATUS_EC2="Skipped" else + # Test OSAPI echo -e "\nTest Nova" if nova flavor-list; then STATUS_NOVA="Succeeded" @@ -76,6 +78,21 @@ if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then STATUS_NOVA="Failed" RETURN=1 fi + + # Test EC2 API + echo -e "\nTest EC2" + # Get EC2 creds + source $TOP_DIR/eucarc + + if euca-describe-images; then + STATUS_EC2="Succeeded" + else + STATUS_EC2="Failed" + RETURN=1 + fi + + # Clean up side effects + unset NOVA_VERSION fi fi @@ -125,6 +142,7 @@ function report() { echo -e "\n" report "Keystone" $STATUS_KEYSTONE report "Nova" $STATUS_NOVA +report "EC2" $STATUS_EC2 report "Glance" $STATUS_GLANCE report "Swift" $STATUS_SWIFT diff --git a/exercises/euca.sh b/exercises/euca.sh index b766bab8..2be2f626 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -18,15 +18,15 @@ set -o xtrace # Settings # ======== -# Use openrc + stackrc + localrc for settings -pushd $(cd $(dirname "$0")/.. && pwd) >/dev/null +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) # Import common functions -source ./functions +source $TOP_DIR/functions -# Import configuration -source ./openrc -popd >/dev/null +# Import EC2 configuration +source $TOP_DIR/eucarc # Max time to wait while vm goes from build to active state ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30} diff --git a/files/default_catalog.templates b/files/default_catalog.templates index b9b18441..0dfd4fcd 100644 --- a/files/default_catalog.templates +++ b/files/default_catalog.templates @@ -24,6 +24,12 @@ catalog.RegionOne.ec2.internalURL = http://%SERVICE_HOST%:8773/services/Cloud catalog.RegionOne.ec2.name = 'EC2 Service' +catalog.RegionOne.s3.publicURL = http://%SERVICE_HOST%:3333 +catalog.RegionOne.s3.adminURL = http://%SERVICE_HOST%:3333 +catalog.RegionOne.s3.internalURL = http://%SERVICE_HOST%:3333 +catalog.RegionOne.s3.name = 'S3 Service' + + catalog.RegionOne.image.publicURL = http://%SERVICE_HOST%:9292/v1 catalog.RegionOne.image.adminURL = http://%SERVICE_HOST%:9292/v1 catalog.RegionOne.image.internalURL = http://%SERVICE_HOST%:9292/v1 diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 958d2af4..8cc472fd 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -2,9 +2,6 @@ # # Initial data for Keystone using python-keystoneclient # -# A set of EC2-compatible credentials is created for both admin and demo -# users and placed in $DEVSTACK_DIR/ec2rc. -# # Tenant User Roles # ------------------------------------------------------- # admin admin admin @@ -48,6 +45,7 @@ DEMO_USER=$(get_id keystone user-create --name=demo \ --pass="$ADMIN_PASSWORD" \ --email=demo@example.com) + # Roles ADMIN_ROLE=$(get_id keystone role-create --name=admin) KEYSTONEADMIN_ROLE=$(get_id keystone role-create --name=KeystoneAdmin) @@ -135,20 +133,3 @@ if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then --user $QUANTUM_USER \ --role $ADMIN_ROLE fi - -# create ec2 creds and parse the secret and access key returned -RESULT=$(keystone ec2-credentials-create --tenant_id=$ADMIN_TENANT --user=$ADMIN_USER) -ADMIN_ACCESS=$(echo "$RESULT" | awk '/ access / { print $4 }') -ADMIN_SECRET=$(echo "$RESULT" | awk '/ secret / { print $4 }') - -RESULT=$(keystone ec2-credentials-create --tenant_id=$DEMO_TENANT --user=$DEMO_USER) -DEMO_ACCESS=$(echo "$RESULT" | awk '/ access / { print $4 }') -DEMO_SECRET=$(echo "$RESULT" | awk '/ secret / { print $4 }') - -# write the secret and access to ec2rc -cat > $DEVSTACK_DIR/ec2rc </dev/null) || - NOVARC=$(python -c 'import os,sys; print os.path.abspath(os.path.realpath(sys.argv[1]))' "${BASH_SOURCE:-${0}}") -NOVA_KEY_DIR=${NOVARC%/*} -export S3_URL=http://$SERVICE_HOST:3333 -export EC2_USER_ID=42 # nova does not use user id, but bundling requires it -export EC2_PRIVATE_KEY=${NOVA_KEY_DIR}/pk.pem -export EC2_CERT=${NOVA_KEY_DIR}/cert.pem -export NOVA_CERT=${NOVA_KEY_DIR}/cacert.pem -export EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this set -alias ec2-bundle-image="ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_PRIVATE_KEY} --user 42 --ec2cert ${NOVA_CERT}" -alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}" - # set log level to DEBUG (helps debug issues) +# export KEYSTONECLIENT_DEBUG=1 # export NOVACLIENT_DEBUG=1 # Max time till the vm is bootable diff --git a/stackrc b/stackrc index a20426b3..8df3b83a 100644 --- a/stackrc +++ b/stackrc @@ -1,3 +1,6 @@ +# Find the other rc files +RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd) + # compute service NOVA_REPO=https://github.com/openstack/nova.git NOVA_BRANCH=master @@ -76,12 +79,7 @@ case "$LIBVIRT_TYPE" in IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-uec.tar.gz";; esac -# use stored ec2 env variables -if [ -f ./ec2rc ]; then - source ./ec2rc -fi - # allow local overrides of env variables -if [ -f ./localrc ]; then - source ./localrc +if [ -f $RC_DIR/localrc ]; then + source $RC_DIR/localrc fi From 8da5656ffd8b28d88536efbe6d452ab86c3b3e74 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Fri, 9 Mar 2012 14:21:40 +0000 Subject: [PATCH 415/967] Move ENABLED_SERVICES to stackrc. allow us to do things like that : ENABLED_SERVICES="$ENABLED_SERVICES,swift" in localrc instead of having to copy the full config from stack.sh. Fixes bug 951598. Change-Id: I17e168473540760bcfa40a752ff2c266bd9b7044 --- stack.sh | 6 ++---- stackrc | 8 ++++++++ 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/stack.sh b/stack.sh index 991ea001..2c535b9f 100755 --- a/stack.sh +++ b/stack.sh @@ -83,7 +83,8 @@ fi # If ``localrc`` exists, then ``stackrc`` will load those settings. This is # useful for changing a branch or repository to test other versions. Also you # can store your other settings like **MYSQL_PASSWORD** or **ADMIN_PASSWORD** instead -# of letting devstack generate random ones for you. +# of letting devstack generate random ones for you. You can customize +# which services to install as well in your localrc. source ./stackrc # Destination path for installation ``DEST`` @@ -181,9 +182,6 @@ M_HOST=${M_HOST:-localhost} # Melange MAC Address Range M_MAC_RANGE=${M_MAC_RANGE:-FE-EE-DD-00-00-00/24} -# Specify which services to launch. These generally correspond to screen tabs -ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,mysql,rabbit} - # Name of the lvm volume group to use/create for iscsi volumes VOLUME_GROUP=${VOLUME_GROUP:-nova-volumes} VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} diff --git a/stackrc b/stackrc index 8df3b83a..d0fa1c25 100644 --- a/stackrc +++ b/stackrc @@ -1,6 +1,14 @@ # Find the other rc files RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd) +# Specify which services to launch. These generally correspond to +# screen tabs. If you like to add other services that are not enabled +# by default you can append them in your ENABLED_SERVICES variable in +# your localrc. For example for swift you can just add this in your +# localrc to add it with the other services: +# ENABLED_SERVICES="$ENABLED_SERVICES,swift" +ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,mysql,rabbit + # compute service NOVA_REPO=https://github.com/openstack/nova.git NOVA_BRANCH=master From 1c77d700f975b085af6e05718ed7c8cabfd0ccb6 Mon Sep 17 00:00:00 2001 From: Tomoe Sugihara Date: Mon, 12 Mar 2012 21:49:54 +0900 Subject: [PATCH 416/967] Create floating ip pool when quantum is enalbed QuantumManager didn't use to support floating ip, but now it should. Change-Id: Ic150a4a4bd44e05a0a094ee80578c908162fb2c0 --- stack.sh | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/stack.sh b/stack.sh index 2c535b9f..aa957a1b 100755 --- a/stack.sh +++ b/stack.sh @@ -1559,15 +1559,11 @@ if is_service_enabled mysql && is_service_enabled nova; then # create a small network $NOVA_DIR/bin/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE - if is_service_enabled q-svc; then - echo "Not creating floating IPs (not supported by QuantumManager)" - else - # create some floating ips - $NOVA_DIR/bin/nova-manage floating create $FLOATING_RANGE + # create some floating ips + $NOVA_DIR/bin/nova-manage floating create $FLOATING_RANGE - # create a second pool - $NOVA_DIR/bin/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL - fi + # create a second pool + $NOVA_DIR/bin/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL fi From 51fb454f71a9814486b5f3ad14bfad4d9c950596 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 9 Mar 2012 22:21:59 -0600 Subject: [PATCH 417/967] Create exerciserc to configure exercises * Move timeouts from openrc to (new) exerciserc * Update all exercise scripts * Update HACKING.rst Fixes bug 951315 Change-Id: Icc4ff03a7dcf0cc711e204046176fb5186990c17 --- HACKING.rst | 15 ++++++++++----- exerciserc | 22 ++++++++++++++++++++++ exercises/bundle.sh | 3 +++ exercises/client-env.sh | 3 +++ exercises/euca.sh | 10 ++-------- exercises/floating_ips.sh | 20 +++++++------------- exercises/swift.sh | 13 ++++++++----- exercises/volumes.sh | 20 +++++++------------- openrc | 15 --------------- 9 files changed, 62 insertions(+), 59 deletions(-) create mode 100644 exerciserc diff --git a/HACKING.rst b/HACKING.rst index d91d4969..a105a66e 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -43,15 +43,14 @@ Many scripts will utilize shared functions from the ``functions`` file. There a also rc files (``stackrc`` and ``openrc``) that are often included to set the primary configuration of the user environment:: - # Use openrc + stackrc + localrc for settings - pushd $(cd $(dirname "$0")/.. && pwd) >/dev/null + # Keep track of the current devstack directory. + TOP_DIR=$(cd $(dirname "$0") && pwd) # Import common functions - source ./functions + source $TOP_DIR/functions # Import configuration - source ./openrc - popd >/dev/null + source $TOP_DIR/openrc ``stack.sh`` is a rather large monolithic script that flows through from beginning to end. There is a proposal to segment it to put the OpenStack projects @@ -119,6 +118,12 @@ These scripts are executed serially by ``exercise.sh`` in testing situations. # an error. It is also useful for following allowing as the install occurs. set -o xtrace +* Settings and configuration are stored in ``exerciserc``, which must be + sourced after ``openrc`` or ``stackrc``:: + + # Import exercise configuration + source $TOP_DIR/exerciserc + * There are a couple of helper functions in the common ``functions`` sub-script that will check for non-zero exit codes and unset environment variables and print a message and exit the script. These should be called after most client diff --git a/exerciserc b/exerciserc new file mode 100644 index 00000000..b41714da --- /dev/null +++ b/exerciserc @@ -0,0 +1,22 @@ +#!/usr/bin/env bash +# +# source exerciserc +# +# Configure the DevStack exercise scripts +# For best results, source this _after_ stackrc/localrc as it will set +# values only if they are not already set. + +# Max time to wait while vm goes from build to active state +export ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30} + +# Max time to wait for proper IP association and dis-association. +export ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-15} + +# Max time till the vm is bootable +export BOOT_TIMEOUT=${BOOT_TIMEOUT:-30} + +# Max time from run instance command until it is running +export RUNNING_TIMEOUT=${RUNNING_TIMEOUT:-$(($BOOT_TIMEOUT + $ACTIVE_TIMEOUT))} + +# Max time to wait for a vm to terminate +export TERMINATE_TIMEOUT=${TERMINATE_TIMEOUT:-30} diff --git a/exercises/bundle.sh b/exercises/bundle.sh index 47bacac3..0f128af1 100755 --- a/exercises/bundle.sh +++ b/exercises/bundle.sh @@ -28,6 +28,9 @@ source $TOP_DIR/functions # Import EC2 configuration source $TOP_DIR/eucarc +# Import exercise configuration +source $TOP_DIR/exerciserc + # Remove old certificates rm -f $TOP_DIR/cacert.pem rm -f $TOP_DIR/cert.pem diff --git a/exercises/client-env.sh b/exercises/client-env.sh index d4ba702e..0f172750 100755 --- a/exercises/client-env.sh +++ b/exercises/client-env.sh @@ -22,6 +22,9 @@ source $TOP_DIR/functions # Import configuration source $TOP_DIR/openrc +# Import exercise configuration +source $TOP_DIR/exerciserc + # Unset all of the known NOVA_ vars unset NOVA_API_KEY unset NOVA_ENDPOINT_NAME diff --git a/exercises/euca.sh b/exercises/euca.sh index 2be2f626..703c7aac 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -28,14 +28,8 @@ source $TOP_DIR/functions # Import EC2 configuration source $TOP_DIR/eucarc -# Max time to wait while vm goes from build to active state -ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30} - -# Max time till the vm is bootable -BOOT_TIMEOUT=${BOOT_TIMEOUT:-30} - -# Max time to wait for proper association and dis-association. -ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-15} +# Import exercise configuration +source $TOP_DIR/exerciserc # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index a47f1ffc..f2b9d036 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -23,24 +23,18 @@ set -o xtrace # Settings # ======== -# Use openrc + stackrc + localrc for settings -pushd $(cd $(dirname "$0")/.. && pwd) >/dev/null +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) # Import common functions -source ./functions +source $TOP_DIR/functions # Import configuration -source ./openrc -popd >/dev/null +source $TOP_DIR/openrc -# Max time to wait while vm goes from build to active state -ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30} - -# Max time till the vm is bootable -BOOT_TIMEOUT=${BOOT_TIMEOUT:-30} - -# Max time to wait for proper association and dis-association. -ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-15} +# Import exercise configuration +source $TOP_DIR/exerciserc # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} diff --git a/exercises/swift.sh b/exercises/swift.sh index 76096379..b70b85f2 100755 --- a/exercises/swift.sh +++ b/exercises/swift.sh @@ -18,15 +18,18 @@ set -o xtrace # Settings # ======== -# Use openrc + stackrc + localrc for settings -pushd $(cd $(dirname "$0")/.. && pwd) >/dev/null +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) # Import common functions -source ./functions +source $TOP_DIR/functions # Import configuration -source ./openrc -popd >/dev/null +source $TOP_DIR/openrc + +# Import exercise configuration +source $TOP_DIR/exerciserc # Container name CONTAINER=ex-swift diff --git a/exercises/volumes.sh b/exercises/volumes.sh index a812401a..77c3498c 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -18,24 +18,18 @@ set -o xtrace # Settings # ======== -# Use openrc + stackrc + localrc for settings -pushd $(cd $(dirname "$0")/.. && pwd) >/dev/null +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) # Import common functions -source ./functions +source $TOP_DIR/functions # Import configuration -source ./openrc -popd >/dev/null +source $TOP_DIR/openrc -# Max time to wait while vm goes from build to active state -ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30} - -# Max time till the vm is bootable -BOOT_TIMEOUT=${BOOT_TIMEOUT:-30} - -# Max time to wait for proper association and dis-association. -ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-15} +# Import exercise configuration +source $TOP_DIR/exerciserc # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} diff --git a/openrc b/openrc index 188ffc56..7aefb0ff 100644 --- a/openrc +++ b/openrc @@ -65,18 +65,3 @@ export COMPUTE_API_VERSION=${COMPUTE_API_VERSION:-$NOVA_VERSION} # set log level to DEBUG (helps debug issues) # export KEYSTONECLIENT_DEBUG=1 # export NOVACLIENT_DEBUG=1 - -# Max time till the vm is bootable -export BOOT_TIMEOUT=${BOOT_TIMEOUT:-30} - -# Max time to wait while vm goes from build to active state -export ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30} - -# Max time from run instance command until it is running -export RUNNING_TIMEOUT=${RUNNING_TIMEOUT:-$(($BOOT_TIMEOUT + $ACTIVE_TIMEOUT))} - -# Max time to wait for proper IP association and dis-association. -export ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-15} - -# Max time to wait for a vm to terminate -export TERMINATE_TIMEOUT=${TERMINATE_TIMEOUT:-30} From 06a09d0c33f0626379cad377b4ffa20fcda54858 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 12 Mar 2012 14:43:26 -0500 Subject: [PATCH 418/967] Fix double-quoted service names The Keystone service template parser doesn't do any quote interpolation, it just splits on ' = ' and passes the two parts on. So we just remove the quotes for now. Fixes bug 943523 Change-Id: Ia2a10ec18db1a82f23f36200b0cdef84b4f78155 --- files/default_catalog.templates | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/files/default_catalog.templates b/files/default_catalog.templates index 0dfd4fcd..7a98c94c 100644 --- a/files/default_catalog.templates +++ b/files/default_catalog.templates @@ -3,34 +3,34 @@ catalog.RegionOne.identity.publicURL = http://%SERVICE_HOST%:$(public_port)s/v2.0 catalog.RegionOne.identity.adminURL = http://%SERVICE_HOST%:$(admin_port)s/v2.0 catalog.RegionOne.identity.internalURL = http://%SERVICE_HOST%:$(public_port)s/v2.0 -catalog.RegionOne.identity.name = 'Identity Service' +catalog.RegionOne.identity.name = Identity Service catalog.RegionOne.compute.publicURL = http://%SERVICE_HOST%:8774/v2/$(tenant_id)s catalog.RegionOne.compute.adminURL = http://%SERVICE_HOST%:8774/v2/$(tenant_id)s catalog.RegionOne.compute.internalURL = http://%SERVICE_HOST%:8774/v2/$(tenant_id)s -catalog.RegionOne.compute.name = 'Compute Service' +catalog.RegionOne.compute.name = Compute Service catalog.RegionOne.volume.publicURL = http://%SERVICE_HOST%:8776/v1/$(tenant_id)s catalog.RegionOne.volume.adminURL = http://%SERVICE_HOST%:8776/v1/$(tenant_id)s catalog.RegionOne.volume.internalURL = http://%SERVICE_HOST%:8776/v1/$(tenant_id)s -catalog.RegionOne.volume.name = 'Volume Service' +catalog.RegionOne.volume.name = Volume Service catalog.RegionOne.ec2.publicURL = http://%SERVICE_HOST%:8773/services/Cloud catalog.RegionOne.ec2.adminURL = http://%SERVICE_HOST%:8773/services/Admin catalog.RegionOne.ec2.internalURL = http://%SERVICE_HOST%:8773/services/Cloud -catalog.RegionOne.ec2.name = 'EC2 Service' +catalog.RegionOne.ec2.name = EC2 Service catalog.RegionOne.s3.publicURL = http://%SERVICE_HOST%:3333 catalog.RegionOne.s3.adminURL = http://%SERVICE_HOST%:3333 catalog.RegionOne.s3.internalURL = http://%SERVICE_HOST%:3333 -catalog.RegionOne.s3.name = 'S3 Service' +catalog.RegionOne.s3.name = S3 Service catalog.RegionOne.image.publicURL = http://%SERVICE_HOST%:9292/v1 catalog.RegionOne.image.adminURL = http://%SERVICE_HOST%:9292/v1 catalog.RegionOne.image.internalURL = http://%SERVICE_HOST%:9292/v1 -catalog.RegionOne.image.name = 'Image Service' +catalog.RegionOne.image.name = Image Service From 770cec73146596c686405492e523fbe8861f36f7 Mon Sep 17 00:00:00 2001 From: jakedahn Date: Mon, 12 Mar 2012 14:07:51 -0700 Subject: [PATCH 419/967] Making users creatable by default in horizon Change-Id: I021bd17564f1a20591d3aac8a254f4976e14e681 --- files/horizon_settings.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/files/horizon_settings.py b/files/horizon_settings.py index 2d1d1f86..1a6c17af 100644 --- a/files/horizon_settings.py +++ b/files/horizon_settings.py @@ -40,6 +40,12 @@ 'user_home': 'openstack_dashboard.views.user_home', } +# TODO(tres): Remove these once Keystone has an API to identify auth backend. +OPENSTACK_KEYSTONE_BACKEND = { + 'name': 'native', + 'can_edit_user': True +} + OPENSTACK_HOST = "127.0.0.1" OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST # FIXME: this is only needed until keystone fixes its GET /tenants call From df0972c1ee4d8dbb5b7a053198d8772a39fbdf86 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 7 Mar 2012 17:31:03 -0600 Subject: [PATCH 420/967] Spring cleaning in docs This is nearly all about spiffing up stack.sh to improve comment content and readability. Shocco has been fixed so the comments and code line up properly again in http://devstack.org/stack.sh.html so the comments are being cleaned up and updated. Change-Id: I2add0351106fb832fbf6e236cbd90630df97dec3 --- stack.sh | 154 ++++++++++++++++++++++++++++++------------------------- 1 file changed, 85 insertions(+), 69 deletions(-) diff --git a/stack.sh b/stack.sh index 4c8dcf45..83adc8dd 100755 --- a/stack.sh +++ b/stack.sh @@ -1,8 +1,9 @@ #!/usr/bin/env bash -# **stack.sh** is an opinionated openstack developer installation. +# **stack.sh** is an opinionated OpenStack developer installation. -# This script installs and configures *nova*, *glance*, *horizon* and *keystone* +# This script installs and configures various combinations of *Glance*, +# *Horizon*, *Keystone*, *Melange*, *Nova*, *Quantum* and *Swift* # This script allows you to specify configuration options of what git # repositories to use, enabled services, network configuration and various @@ -17,6 +18,7 @@ # Learn more and get the most recent version at http://devstack.org + # Sanity Check # ============ @@ -49,19 +51,18 @@ if [ ! -d $FILES ]; then fi - # Settings # ======== -# This script is customizable through setting environment variables. If you -# want to override a setting you can either:: +# ``stack.sh`` is customizable through setting environment variables. If you +# want to override a setting you can set and export it:: # # export MYSQL_PASSWORD=anothersecret # ./stack.sh # # You can also pass options on a single line ``MYSQL_PASSWORD=simple ./stack.sh`` # -# Additionally, you can put any local variables into a ``localrc`` file, like:: +# Additionally, you can put any local variables into a ``localrc`` file:: # # MYSQL_PASSWORD=anothersecret # MYSQL_USER=hellaroot @@ -69,22 +70,17 @@ fi # We try to have sensible defaults, so you should be able to run ``./stack.sh`` # in most cases. # +# DevStack distributes ``stackrc`` which contains locations for the OpenStack +# repositories and branches to configure. ``stackrc`` sources ``localrc`` to +# allow you to override those settings and not have your changes overwritten +# when updating DevStack. + # We support HTTP and HTTPS proxy servers via the usual environment variables -# http_proxy and https_proxy. They can be set in localrc if necessary or +# **http_proxy** and **https_proxy**. They can be set in ``localrc`` if necessary or # on the command line:: # # http_proxy=http://proxy.example.com:3128/ ./stack.sh -# -# We source our settings from ``stackrc``. This file is distributed with devstack -# and contains locations for what repositories to use. If you want to use other -# repositories and branches, you can add your own settings with another file called -# ``localrc`` -# -# If ``localrc`` exists, then ``stackrc`` will load those settings. This is -# useful for changing a branch or repository to test other versions. Also you -# can store your other settings like **MYSQL_PASSWORD** or **ADMIN_PASSWORD** instead -# of letting devstack generate random ones for you. You can customize -# which services to install as well in your localrc. + source ./stackrc # Destination path for installation ``DEST`` @@ -100,7 +96,7 @@ fi # OpenStack is designed to be run as a regular user (Horizon will fail to run # as root, since apache refused to startup serve content from root user). If -# stack.sh is run as root, it automatically creates a stack user with +# ``stack.sh`` is run as **root**, it automatically creates a **stack** user with # sudo privileges and runs as that user. if [[ $EUID -eq 0 ]]; then @@ -162,9 +158,9 @@ else sudo rm -f /etc/sudoers.d/stack_sh_nova fi -# Set True to configure stack.sh to run cleanly without Internet access. -# stack.sh must have been previously run with Internet access to install -# prerequisites and initialize $DEST. +# Set True to configure ``stack.sh`` to run cleanly without Internet access. +# ``stack.sh`` must have been previously run with Internet access to install +# prerequisites and initialize ``$DEST``. OFFLINE=`trueorfalse False $OFFLINE` # Set the destination directories for openstack projects @@ -200,8 +196,8 @@ VOLUME_GROUP=${VOLUME_GROUP:-nova-volumes} VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-} -# Nova hypervisor configuration. We default to libvirt whth **kvm** but will -# drop back to **qemu** if we are unable to load the kvm module. Stack.sh can +# Nova hypervisor configuration. We default to libvirt with **kvm** but will +# drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can # also install an **LXC** based system. VIRT_DRIVER=${VIRT_DRIVER:-libvirt} LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm} @@ -224,7 +220,7 @@ fi # Allow the use of an alternate hostname (such as localhost/127.0.0.1) for service endpoints. SERVICE_HOST=${SERVICE_HOST:-$HOST_IP} -# Configure services to syslog instead of writing to individual log files +# Configure services to use syslog instead of writing to individual log files SYSLOG=`trueorfalse False $SYSLOG` SYSLOG_HOST=${SYSLOG_HOST:-$HOST_IP} SYSLOG_PORT=${SYSLOG_PORT:-516} @@ -273,17 +269,18 @@ function read_password { set -o xtrace } -# This function will check if the service(s) specified in argument is -# enabled by the user in ENABLED_SERVICES. +# is_service_enabled() checks if the service(s) specified as arguments are +# enabled by the user in **ENABLED_SERVICES**. # -# If there is multiple services specified as argument it will act as a +# If there are multiple services specified as arguments the test performs a # boolean OR or if any of the services specified on the command line # return true. # -# There is a special cases for some 'catch-all' services : -# nova would catch if any service enabled start by n- -# glance would catch if any service enabled start by g- -# quantum would catch if any service enabled start by q- +# There is a special cases for some 'catch-all' services:: +# **nova** returns true if any service enabled start with **n-** +# **glance** returns true if any service enabled start with **g-** +# **quantum** returns true if any service enabled start with **q-** + function is_service_enabled() { services=$@ for service in ${services}; do @@ -295,11 +292,12 @@ function is_service_enabled() { return 1 } + # Nova Network Configuration # -------------------------- -# FIXME: more documentation about why these are important flags. Also -# we should make sure we use the same variable names as the flag names. +# FIXME: more documentation about why these are important options. Also +# we should make sure we use the same variable names as the option names. if [ "$VIRT_DRIVER" = 'xenserver' ]; then PUBLIC_INTERFACE_DEFAULT=eth3 @@ -327,7 +325,7 @@ VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT} TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test} TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29} -# Multi-host is a mode where each compute node runs its own network node. This +# **MULTI_HOST** is a mode where each compute node runs its own network node. This # allows network operations and routing for a VM to occur on the server that is # running the VM - removing a SPOF and bandwidth bottleneck. MULTI_HOST=${MULTI_HOST:-False} @@ -380,12 +378,12 @@ FLAT_INTERFACE=${FLAT_INTERFACE:-$GUEST_INTERFACE_DEFAULT} # By default this script will install and configure MySQL. If you want to # use an existing server, you can pass in the user/password/host parameters. # You will need to send the same ``MYSQL_PASSWORD`` to every host if you are doing -# a multi-node devstack installation. +# a multi-node DevStack installation. MYSQL_HOST=${MYSQL_HOST:-localhost} MYSQL_USER=${MYSQL_USER:-root} read_password MYSQL_PASSWORD "ENTER A PASSWORD TO USE FOR MYSQL." -# don't specify /db in this string, so we can use it for multiple services +# NOTE: Don't specify /db in this string so we can use it for multiple services BASE_SQL_CONN=${BASE_SQL_CONN:-mysql://$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST} # Rabbit connection info @@ -395,6 +393,7 @@ read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT." # Glance connection info. Note the port must be specified. GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$SERVICE_HOST:9292} + # SWIFT # ----- # TODO: implement glance support @@ -438,6 +437,7 @@ if is_service_enabled swift; then read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH." fi + # Keystone # -------- @@ -461,6 +461,7 @@ KEYSTONE_SERVICE_HOST=${KEYSTONE_SERVICE_HOST:-$SERVICE_HOST} KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000} KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-http} + # Horizon # ------- @@ -469,6 +470,7 @@ KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-http} APACHE_USER=${APACHE_USER:-$USER} APACHE_GROUP=${APACHE_GROUP:-$APACHE_USER} + # Log files # --------- @@ -536,18 +538,21 @@ if [ ! -w $DEST ]; then sudo chown `whoami` $DEST fi + # Install Packages # ================ # # Openstack uses a fair number of other projects. -# - We are going to install packages only for the services needed. -# - We are parsing the packages files and detecting metadatas. -# - If there is a NOPRIME as comment mean we are not doing the install -# just yet. -# - If we have the meta-keyword dist:DISTRO or -# dist:DISTRO1,DISTRO2 it will be installed only for those -# distros (case insensitive). +# get_packages() collects a list of package names of any type from the +# prerequisite files in ``files/{apts|pips}``. The list is intended +# to be passed to a package installer such as apt or pip. +# +# Only packages required for the services in ENABLED_SERVICES will be +# included. Two bits of metadata are recognized in the prerequisite files: +# - ``# NOPRIME`` defers installation to be performed later in stack.sh +# - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection +# of the package to the distros listed. The distro names are case insensitive. function get_packages() { local package_dir=$1 local file_to_parse @@ -654,10 +659,10 @@ if is_service_enabled melange; then git_clone $MELANGECLIENT_REPO $MELANGECLIENT_DIR $MELANGECLIENT_BRANCH fi + # Initialization # ============== - # setup our checkouts so they are installed into python path # allowing ``import nova`` or ``import glance.client`` cd $KEYSTONECLIENT_DIR; sudo python setup.py develop @@ -688,8 +693,9 @@ if is_service_enabled melange; then cd $MELANGECLIENT_DIR; sudo python setup.py develop fi + # Syslog -# --------- +# ------ if [[ $SYSLOG != "False" ]]; then apt_get install -y rsyslog-relp @@ -710,8 +716,9 @@ EOF sudo /usr/sbin/service rsyslog restart fi + # Rabbit -# --------- +# ------ if is_service_enabled rabbit; then # Install and start rabbitmq-server @@ -724,8 +731,9 @@ if is_service_enabled rabbit; then sudo rabbitmqctl change_password guest $RABBIT_PASSWORD fi + # Mysql -# --------- +# ----- if is_service_enabled mysql; then @@ -762,7 +770,7 @@ fi # Horizon -# --------- +# ------- # Setup the django horizon application to serve via apache/wsgi @@ -862,6 +870,7 @@ if is_service_enabled g-reg; then fi fi + # Nova # ---- @@ -873,17 +882,22 @@ fi sudo chown `whoami` $NOVA_CONF_DIR if is_service_enabled n-api; then - # We are going to use a sample http middleware configuration based on the - # one from the keystone project to launch nova. This paste config adds - # the configuration required for nova to validate keystone tokens. + # Use the sample http middleware configuration supplied in the + # Nova sources. This paste config adds the configuration required + # for Nova to validate Keystone tokens. + + # Allow rate limiting to be turned off for testing, like for Tempest + # NOTE: Set OSAPI_RATE_LIMIT=" " to turn OFF rate limiting + OSAPI_RATE_LIMIT=${OSAPI_RATE_LIMIT:-"ratelimit"} - # Remove legacy paste config + # Remove legacy paste config if present rm -f $NOVA_DIR/bin/nova-api-paste.ini - # First we add a some extra data to the default paste config from nova + # Get the sample configuration file in place cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_CONF_DIR - # Then we add our own service token to the configuration + # Rewrite the authtoken configration for our Keystone service. + # This is a bit defensive to allow the sample file some varaince. sed -e " /^admin_token/i admin_tenant_name = $SERVICE_TENANT_NAME /admin_tenant_name/s/^.*$/admin_tenant_name = $SERVICE_TENANT_NAME/; @@ -893,14 +907,12 @@ if is_service_enabled n-api; then s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g; " -i $NOVA_CONF_DIR/api-paste.ini - # Finally, we change the pipelines in nova to use keystone + # Finally, change the Nova pipelines to use Keystone function replace_pipeline() { sed "/\[pipeline:$1\]/,/\[/s/^pipeline = .*/pipeline = $2/" -i $NOVA_CONF_DIR/api-paste.ini } replace_pipeline "ec2cloud" "ec2faultwrap logrequest totoken authtoken keystonecontext cloudrequest authorizer validator ec2executor" replace_pipeline "ec2admin" "ec2faultwrap logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor" - # allow people to turn off rate limiting for testing, like when using tempest, by setting OSAPI_RATE_LIMIT=" " - OSAPI_RATE_LIMIT=${OSAPI_RATE_LIMIT:-"ratelimit"} replace_pipeline "openstack_compute_api_v2" "faultwrap authtoken keystonecontext $OSAPI_RATE_LIMIT osapi_compute_app_v2" replace_pipeline "openstack_volume_api_v1" "faultwrap authtoken keystonecontext $OSAPI_RATE_LIMIT osapi_volume_app_v1" fi @@ -1178,6 +1190,7 @@ if is_service_enabled swift; then unset s swift_hash swift_auth_server fi + # Volume Service # -------------- @@ -1327,6 +1340,7 @@ for I in "${EXTRA_OPTS[@]}"; do add_nova_opt ${I//-} done + # XenServer # --------- @@ -1347,6 +1361,7 @@ else add_nova_opt "firewall_driver=$LIBVIRT_FIREWALL_DRIVER" fi + # Nova Database # ~~~~~~~~~~~~~ @@ -1496,7 +1511,6 @@ if is_service_enabled key; then bash $FILES/keystone_data.sh fi - # launch the nova-api and wait for it to answer before continuing if is_service_enabled n-api; then screen_it n-api "cd $NOVA_DIR && $NOVA_DIR/bin/nova-api" @@ -1580,7 +1594,6 @@ if is_service_enabled mysql && is_service_enabled nova; then $NOVA_DIR/bin/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL fi - # Launching nova-compute should be as simple as running ``nova-compute`` but # have to do a little more than that in our script. Since we add the group # ``libvirtd`` to our user in this script, when nova-compute is run it is @@ -1598,6 +1611,7 @@ screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --config-file $NOVA_CON screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth" screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/apache2/error.log" + # Install Images # ============== @@ -1606,7 +1620,7 @@ screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/apache2/error.log" # The default image is a small ***TTY*** testing image, which lets you login # the username/password of root/password. # -# TTY also uses cloud-init, supporting login via keypair and sending scripts as +# TTY also uses ``cloud-init``, supporting login via keypair and sending scripts as # userdata. See https://help.ubuntu.com/community/CloudInit for more on cloud-init # # Override ``IMAGE_URLS`` with a comma-separated list of uec images. @@ -1691,11 +1705,13 @@ if is_service_enabled g-reg; then done fi + # Fin # === set +o xtrace + # Using the cloud # =============== @@ -1703,24 +1719,24 @@ echo "" echo "" echo "" -# If you installed the horizon on this server, then you should be able +# If you installed Horizon on this server you should be able # to access the site using your browser. if is_service_enabled horizon; then - echo "horizon is now available at http://$SERVICE_HOST/" + echo "Horizon is now available at http://$SERVICE_HOST/" fi -# If keystone is present, you can point nova cli to this server +# If Keystone is present you can point ``nova`` cli to this server if is_service_enabled key; then - echo "keystone is serving at $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/" - echo "examples on using novaclient command line is in exercise.sh" - echo "the default users are: admin and demo" - echo "the password: $ADMIN_PASSWORD" + echo "Keystone is serving at $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/" + echo "Examples on using novaclient command line is in exercise.sh" + echo "The default users are: admin and demo" + echo "The password: $ADMIN_PASSWORD" fi # Echo HOST_IP - useful for build_uec.sh, which uses dhcp to give the instance an address echo "This is your host ip: $HOST_IP" -# Warn that EXTRA_FLAGS needs to be converted to EXTRA_OPTS +# Warn that ``EXTRA_FLAGS`` needs to be converted to ``EXTRA_OPTS`` if [[ -n "$EXTRA_FLAGS" ]]; then echo "WARNING: EXTRA_FLAGS is defined and may need to be converted to EXTRA_OPTS" fi From ae604985e78ae5e9763a348d132cb8e120b85c95 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Mon, 12 Mar 2012 17:12:00 -0700 Subject: [PATCH 421/967] Remove references to deprecated ec2 authorizor. * remove replace_pipeline * removes references to sysadmin and netadmin roles * removes references to admin ec2 api * fixes bug 953744 * related fix in nova: https://review.openstack.org/#change,5024 Change-Id: I8792dae0157b8596a5f946670742411781fe47c0 --- files/keystone_data.sh | 10 +++++----- stack.sh | 9 --------- 2 files changed, 5 insertions(+), 14 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index e97ad73d..319bae34 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -10,7 +10,7 @@ # service quantum admin # if enabled # service swift admin # if enabled # demo admin admin -# demo demo Member,sysadmin,netadmin +# demo demo Member,anotherrole # invisible_to_admin demo Member # # Variables set before calling this script: @@ -50,15 +50,15 @@ DEMO_USER=$(get_id keystone user-create --name=demo \ ADMIN_ROLE=$(get_id keystone role-create --name=admin) KEYSTONEADMIN_ROLE=$(get_id keystone role-create --name=KeystoneAdmin) KEYSTONESERVICE_ROLE=$(get_id keystone role-create --name=KeystoneServiceAdmin) -SYSADMIN_ROLE=$(get_id keystone role-create --name=sysadmin) -NETADMIN_ROLE=$(get_id keystone role-create --name=netadmin) +# ANOTHER_ROLE demonstrates that an arbitrary role may be created and used +# TODO(sleepsonthefloor): show how this can be used for rbac in the future! +ANOTHER_ROLE=$(get_id keystone role-create --name=anotherrole) # Add Roles to Users in Tenants keystone user-role-add --user $ADMIN_USER --role $ADMIN_ROLE --tenant_id $ADMIN_TENANT keystone user-role-add --user $ADMIN_USER --role $ADMIN_ROLE --tenant_id $DEMO_TENANT -keystone user-role-add --user $DEMO_USER --role $SYSADMIN_ROLE --tenant_id $DEMO_TENANT -keystone user-role-add --user $DEMO_USER --role $NETADMIN_ROLE --tenant_id $DEMO_TENANT +keystone user-role-add --user $DEMO_USER --role $ANOTHER_ROLE --tenant_id $DEMO_TENANT # TODO(termie): these two might be dubious keystone user-role-add --user $ADMIN_USER --role $KEYSTONEADMIN_ROLE --tenant_id $ADMIN_TENANT diff --git a/stack.sh b/stack.sh index 83adc8dd..f6b5e9eb 100755 --- a/stack.sh +++ b/stack.sh @@ -906,15 +906,6 @@ if is_service_enabled n-api; then s,%SERVICE_TENANT_NAME%,$SERVICE_TENANT_NAME,g; s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g; " -i $NOVA_CONF_DIR/api-paste.ini - - # Finally, change the Nova pipelines to use Keystone - function replace_pipeline() { - sed "/\[pipeline:$1\]/,/\[/s/^pipeline = .*/pipeline = $2/" -i $NOVA_CONF_DIR/api-paste.ini - } - replace_pipeline "ec2cloud" "ec2faultwrap logrequest totoken authtoken keystonecontext cloudrequest authorizer validator ec2executor" - replace_pipeline "ec2admin" "ec2faultwrap logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor" - replace_pipeline "openstack_compute_api_v2" "faultwrap authtoken keystonecontext $OSAPI_RATE_LIMIT osapi_compute_app_v2" - replace_pipeline "openstack_volume_api_v1" "faultwrap authtoken keystonecontext $OSAPI_RATE_LIMIT osapi_volume_app_v1" fi # Helper to clean iptables rules From 4d88347f59afbb99a3674bd93cf520c8ac305eea Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 13 Mar 2012 23:56:49 -0500 Subject: [PATCH 422/967] Add exercises/client-args.sh to test the pending cli changes for Keystone, Nova, Glance and Swift. Change-Id: I05aeda9be61e9c556d23ebc33076477c71708460 --- exercises/client-args.sh | 142 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 142 insertions(+) create mode 100755 exercises/client-args.sh diff --git a/exercises/client-args.sh b/exercises/client-args.sh new file mode 100755 index 00000000..7cb7c456 --- /dev/null +++ b/exercises/client-args.sh @@ -0,0 +1,142 @@ +#!/usr/bin/env bash + +# Test OpenStack client authentication aguemnts handling + +echo "**************************************************" +echo "Begin DevStack Exercise: $0" +echo "**************************************************" + +# Settings +# ======== + +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) + +# Import common functions +source $TOP_DIR/functions + +# Import configuration +source $TOP_DIR/openrc + +# Import exercise configuration +source $TOP_DIR/exerciserc + +# Unset all of the known NOVA_ vars +unset NOVA_API_KEY +unset NOVA_ENDPOINT_NAME +unset NOVA_PASSWORD +unset NOVA_PROJECT_ID +unset NOVA_REGION_NAME +unset NOVA_URL +unset NOVA_USERNAME +unset NOVA_VERSION + +# Save the known variables for later +export x_TENANT_NAME=$OS_TENANT_NAME +export x_USERNAME=$OS_USERNAME +export x_PASSWORD=$OS_PASSWORD +export x_AUTH_URL=$OS_AUTH_URL + +#Unset the usual variables to force argument processing +unset OS_TENANT_NAME +unset OS_USERNAME +unset OS_PASSWORD +unset OS_AUTH_URL + +# Common authentication args +TENANT_ARG="--os_tenant_name=$x_TENANT_NAME" +ARGS="--os_username=$x_USERNAME --os_password=$x_PASSWORD --os_auth_url=$x_AUTH_URL" + +# Set global return +RETURN=0 + +# Keystone client +# --------------- +if [[ "$ENABLED_SERVICES" =~ "key" ]]; then + if [[ "$SKIP_EXERCISES" =~ "key" ]] ; then + STATUS_KEYSTONE="Skipped" + else + echo -e "\nTest Keystone" + if keystone $TENANT_ARG $ARGS catalog --service identity; then + STATUS_KEYSTONE="Succeeded" + else + STATUS_KEYSTONE="Failed" + RETURN=1 + fi + fi +fi + +# Nova client +# ----------- + +if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then + if [[ "$SKIP_EXERCISES" =~ "n-api" ]] ; then + STATUS_NOVA="Skipped" + STATUS_EC2="Skipped" + else + # Test OSAPI + echo -e "\nTest Nova" + if nova $TENANT_ARG $ARGS flavor-list; then + STATUS_NOVA="Succeeded" + else + STATUS_NOVA="Failed" + RETURN=1 + fi + fi +fi + +# Glance client +# ------------- + +if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then + if [[ "$SKIP_EXERCISES" =~ "g-api" ]] ; then + STATUS_GLANCE="Skipped" + else + echo -e "\nTest Glance" + if glance $TENANT_ARG $ARGS index; then + STATUS_GLANCE="Succeeded" + else + STATUS_GLANCE="Failed" + RETURN=1 + fi + fi +fi + +# Swift client +# ------------ + +if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then + if [[ "$SKIP_EXERCISES" =~ "swift" ]] ; then + STATUS_SWIFT="Skipped" + else + echo -e "\nTest Swift" + if swift $ARGS stat; then + STATUS_SWIFT="Succeeded" + else + STATUS_SWIFT="Failed" + RETURN=1 + fi + fi +fi + +# Results +# ------- + +function report() { + if [[ -n "$2" ]]; then + echo "$1: $2" + fi +} + +echo -e "\n" +report "Keystone" $STATUS_KEYSTONE +report "Nova" $STATUS_NOVA +report "Glance" $STATUS_GLANCE +report "Swift" $STATUS_SWIFT + +echo "**************************************************" +echo "End DevStack Exercise: $0" +echo "**************************************************" + +exit $RETURN From 440be4b6a85f6e25de834d4f5b2736fd82785b17 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 10 Feb 2012 21:42:39 -0800 Subject: [PATCH 423/967] Add exercise that boots an instance from a volume. * Launches a cirros builder instance * Mounts a disk to the builder instance * Creates a bootable volume from builder * Launches the volume-backed instance * Update glance syntax * Don't require instance-to-web communication (that only works with fully configured floating ips) * Add footer/header Change-Id: Ia6dcf399ee49154aaf4e597b060164c2f41cf3d2 --- exercises/boot_from_volume.sh | 253 ++++++++++++++++++++++++++++++++++ 1 file changed, 253 insertions(+) create mode 100755 exercises/boot_from_volume.sh diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh new file mode 100755 index 00000000..8d39703a --- /dev/null +++ b/exercises/boot_from_volume.sh @@ -0,0 +1,253 @@ +#!/usr/bin/env bash + +# **boot_from_volume.sh** + +# This script demonstrates how to boot from a volume. It does the following: +# * Create a 'builder' instance +# * Attach a volume to the instance +# * Format and install an os onto the volume +# * Detach volume from builder, and then boot volume-backed instance + +echo "**************************************************" +echo "Begin DevStack Exercise: $0" +echo "**************************************************" + +# This script exits on an error so that errors don't compound and you see +# only the first error that occured. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + + +# Settings +# ======== + +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) + +# Import common functions +source $TOP_DIR/functions + +# Import configuration +source $TOP_DIR/openrc + +# Import exercise configuration +source $TOP_DIR/exerciserc + +# Boot this image, use first AMI image if unset +DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} + +# Instance type +DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} + +# Default floating IP pool name +DEFAULT_FLOATING_POOL=${DEFAULT_FLOATING_POOL:-nova} + +# Grab the id of the image to launch +IMAGE=`glance -f index | egrep $DEFAULT_IMAGE_NAME | head -1 | cut -d" " -f1` + +die_if_not_set IMAGE "Failure getting image" + +# Instance and volume names +INSTANCE_NAME=${INSTANCE_NAME:-test_instance} +VOL_INSTANCE_NAME=${VOL_INSTANCE_NAME:-test_vol_instance} +VOL_NAME=${VOL_NAME:-test_volume} + +# Clean-up from previous runs +nova delete $VOL_INSTANCE_NAME || true +nova delete $INSTANCE_NAME || true + +# Wait till server is gone +if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $INSTANCE_NAME; do sleep 1; done"; then + echo "server didn't terminate!" + exit 1 +fi + +# Configure Security Groups +SECGROUP=${SECGROUP:-test_secgroup} +nova secgroup-delete $SECGROUP || true +nova secgroup-create $SECGROUP "$SECGROUP description" +nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 +nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0 + +# Determinine instance type +INSTANCE_TYPE=`nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | cut -d"|" -f2` +if [[ -z "$INSTANCE_TYPE" ]]; then + # grab the first flavor in the list to launch if default doesn't exist + INSTANCE_TYPE=`nova flavor-list | head -n 4 | tail -n 1 | cut -d"|" -f2` +fi + +# Setup Keypair +KEY_NAME=test_key +KEY_FILE=key.pem +nova keypair-delete $KEY_NAME || true +nova keypair-add $KEY_NAME > $KEY_FILE +chmod 600 $KEY_FILE + +# Boot our instance +VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP --key_name $KEY_NAME $INSTANCE_NAME | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'` + +# check that the status is active within ACTIVE_TIMEOUT seconds +if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then + echo "server didn't become active!" + exit 1 +fi + +# Delete the old volume +nova volume-delete $VOL_NAME || true + +# Free every floating ips - setting FREE_ALL_FLOATING_IPS=True in localrc will make life easier for testers +if [ "$FREE_ALL_FLOATING_IPS" = "True" ]; then + nova floating-ip-list | grep nova | cut -d "|" -f2 | tr -d " " | xargs -n1 nova floating-ip-delete || true +fi + +# Allocate floating ip +FLOATING_IP=`nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | cut -d '|' -f2 | tr -d ' '` + +# Make sure the ip gets allocated +if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep -q $FLOATING_IP; do sleep 1; done"; then + echo "Floating IP not allocated" + exit 1 +fi + +# Add floating ip to our server +nova add-floating-ip $VM_UUID $FLOATING_IP + +# Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds +if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then + echo "Couldn't ping server with floating ip" + exit 1 +fi + +# Create our volume +nova volume-create --display_name=$VOL_NAME 1 + +# Wait for volume to activate +if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep available; do sleep 1; done"; then + echo "Volume $VOL_NAME not created" + exit 1 +fi + +# FIXME (anthony) - python-novaclient should accept a volume_name for the attachment param? +DEVICE=/dev/vdb +VOLUME_ID=`nova volume-list | grep $VOL_NAME | cut -d '|' -f 2 | tr -d ' '` +nova volume-attach $INSTANCE_NAME $VOLUME_ID $DEVICE + +# Wait till volume is attached +if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep in-use; do sleep 1; done"; then + echo "Volume $VOL_NAME not created" + exit 1 +fi + +# The following script builds our bootable volume. +# To do this, ssh to the builder instance, mount volume, and build a volume-backed image. +STAGING_DIR=/tmp/stage +CIRROS_DIR=/tmp/cirros +ssh -o StrictHostKeyChecking=no -i $KEY_FILE cirros@$FLOATING_IP << EOF +set -o errexit +set -o xtrace +sudo mkdir -p $STAGING_DIR +sudo mkfs.ext3 -b 1024 $DEVICE 1048576 +sudo mount $DEVICE $STAGING_DIR +# The following lines create a writable empty file so that we can scp +# the actual file +sudo touch $STAGING_DIR/cirros-0.3.0-x86_64-rootfs.img.gz +sudo chown cirros $STAGING_DIR/cirros-0.3.0-x86_64-rootfs.img.gz +EOF + +# Download cirros +if [ ! -e cirros-0.3.0-x86_64-rootfs.img.gz ]; then + wget http://images.ansolabs.com/cirros-0.3.0-x86_64-rootfs.img.gz +fi + +# Copy cirros onto the volume +scp -o StrictHostKeyChecking=no -i $KEY_FILE cirros-0.3.0-x86_64-rootfs.img.gz cirros@$FLOATING_IP:$STAGING_DIR + +# Unpack cirros into volume +ssh -o StrictHostKeyChecking=no -i $KEY_FILE cirros@$FLOATING_IP << EOF +set -o errexit +set -o xtrace +cd $STAGING_DIR +sudo mkdir -p $CIRROS_DIR +sudo gunzip cirros-0.3.0-x86_64-rootfs.img.gz +sudo mount cirros-0.3.0-x86_64-rootfs.img $CIRROS_DIR + +# Copy cirros into our volume +sudo cp -pr $CIRROS_DIR/* $STAGING_DIR/ + +cd +sync +sudo umount $CIRROS_DIR +# The following typically fails. Don't know why. +sudo umount $STAGING_DIR || true +EOF + +# Detach the volume from the builder instance +nova volume-detach $INSTANCE_NAME $VOLUME_ID + +# Boot instance from volume! This is done with the --block_device_mapping param. +# The format of mapping is: +# =::: +# Leaving the middle two fields blank appears to do-the-right-thing +VOL_VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE --block_device_mapping vda=$VOLUME_ID:::0 --security_groups=$SECGROUP --key_name $KEY_NAME $VOL_INSTANCE_NAME | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'` + +# Check that the status is active within ACTIVE_TIMEOUT seconds +if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VOL_VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then + echo "server didn't become active!" + exit 1 +fi + +# Add floating ip to our server +nova remove-floating-ip $VM_UUID $FLOATING_IP + +# Gratuitous sleep, probably hiding a race condition :/ +sleep 1 + +# Add floating ip to our server +nova add-floating-ip $VOL_VM_UUID $FLOATING_IP + +# Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds +if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then + echo "Couldn't ping volume-backed server with floating ip" + exit 1 +fi + +# Make sure our volume-backed instance launched +ssh -o StrictHostKeyChecking=no -i $KEY_FILE cirros@$FLOATING_IP << EOF +echo "success!" +EOF + +# Delete volume backed instance +nova delete $VOL_INSTANCE_NAME + +# Wait till our volume is no longer in-use +if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep available; do sleep 1; done"; then + echo "Volume $VOL_NAME not created" + exit 1 +fi + +# Delete the volume +nova volume-delete $VOL_NAME + +# Delete instance +nova delete $INSTANCE_NAME + +# Wait for termination +if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $INSTANCE_NAME; do sleep 1; done"; then + echo "server didn't terminate!" + exit 1 +fi + +# De-allocate the floating ip +nova floating-ip-delete $FLOATING_IP + +# Delete secgroup +nova secgroup-delete $SECGROUP + +set +o xtrace +echo "**************************************************" +echo "End DevStack Exercise: $0" +echo "**************************************************" From 1df0789c0a0ee478b946ffec553683da9bca8678 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 15 Mar 2012 01:08:13 -0700 Subject: [PATCH 424/967] Use sudo+virsh to clean instances frm previous run * fixes bug 955782 Change-Id: I8868132d5e18908d309da025760582a778ab4e72 --- stack.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index f6b5e9eb..55c8a457 100755 --- a/stack.sh +++ b/stack.sh @@ -990,10 +990,10 @@ if is_service_enabled n-cpu; then clean_iptables # Destroy old instances - instances=`virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"` + instances=`sudo virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"` if [ ! "$instances" = "" ]; then - echo $instances | xargs -n1 virsh destroy || true - echo $instances | xargs -n1 virsh undefine || true + echo $instances | xargs -n1 sudo virsh destroy || true + echo $instances | xargs -n1 sudo virsh undefine || true fi # Logout and delete iscsi sessions From 7dadd35c087df2c7f98e0c7e5458cb237cd3644d Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Wed, 14 Mar 2012 14:13:03 +0000 Subject: [PATCH 425/967] Ubuntu precise support. - Fixes bug 954249. - Install python-argparse only on oneiric (tks: dtroyer for suggestions). Change-Id: I5ba8424a9cd172ac5246106163d21ca6a67ef6ef --- files/apts/glance | 2 +- stack.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/files/apts/glance b/files/apts/glance index 71230c49..17c84adb 100644 --- a/files/apts/glance +++ b/files/apts/glance @@ -1,7 +1,7 @@ python-eventlet python-routes python-greenlet -python-argparse +python-argparse # dist:oneiric python-sqlalchemy python-wsgiref python-pastedeploy diff --git a/stack.sh b/stack.sh index f6b5e9eb..803f7b50 100755 --- a/stack.sh +++ b/stack.sh @@ -26,7 +26,7 @@ # installation with ``FORCE=yes ./stack`` DISTRO=$(lsb_release -c -s) -if [[ ! ${DISTRO} =~ (oneiric) ]]; then +if [[ ! ${DISTRO} =~ (oneiric|precise) ]]; then echo "WARNING: this script has only been tested on oneiric" if [[ "$FORCE" != "yes" ]]; then echo "If you wish to run this script anyway run with FORCE=yes" From c9e0188e929a44e8a2ce957885ed755f2096058e Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 15 Mar 2012 17:21:24 +0000 Subject: [PATCH 426/967] Only do a nova-manage db_sync when we have nova. - Fixes bug 956226. Change-Id: I823e8f43425c2d629219bb426fb6aa5d1ab9ff34 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 5425df94..054b15e6 100755 --- a/stack.sh +++ b/stack.sh @@ -1359,7 +1359,7 @@ fi # All nova components talk to a central database. We will need to do this step # only once for an entire cluster. -if is_service_enabled mysql; then +if is_service_enabled mysql && is_service_enabled nova; then # (re)create nova database mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS nova;' mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE nova;' From 504f871cdb1ca4f630847f38a007698747d9a019 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 15 Mar 2012 20:43:26 +0000 Subject: [PATCH 427/967] Update Swift doc. Update README.md with the latest changes. Change-Id: I63e8094cce31e0490b72a131f8a3bcf928c5881a --- README.md | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 90becf83..3e57fc35 100644 --- a/README.md +++ b/README.md @@ -59,10 +59,18 @@ You can override environment variables used in stack.sh by creating file name 'l # Swift -Swift is not installed by default, you need to add the **swift** keyword in the ENABLED_SERVICES variable to get it installed. +Swift is not installed by default, you can enable easily by adding this to your localrc: -If you have keystone enabled, Swift will authenticate against it, make sure to use the keystone URL to auth against. + ENABLED_SERVICE="$ENABLED_SERVICES,swift" -At this time Swift is not started in a screen session but as daemon you need to use the **swift-init** CLI to manage the swift daemons. +If you want a minimal swift install with only swift and keystone you can have this instead in your localrc: + + ENABLED_SERVICES="key,mysql,swift" + +If you use swift with keystone, Swift will authenticate against it. You will need to make sure to use the keystone URL to auth against. + +Swift will be acting as a S3 endpoint for keystone so effectively replacing the **nova-objectore**. + +Only swift proxy server is launched in the screen session all other services are started in background and managed by **swift-init* tool. By default Swift will configure 3 replicas (and one spare) which could be IO intensive on a small vm, if you only want to do some quick testing of the API you can choose to only have one replica by customizing the variable SWIFT_REPLICAS in your localrc. From 77b0e1d8ff9617dc71cf92a7a9d7fb850e2e5998 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Wed, 29 Feb 2012 16:55:43 +0000 Subject: [PATCH 428/967] Converts bundle exercise to use swift/s3 - Fix keystone s3token configuration (in admin api not public api). - Set s3 service in keystone to swift if installed. - Fixes a bug in bundle.sh - Adds config options for nova to use swift as s3 store Change-Id: Ic2fca5aba06a25c0b3a74f1e97d062390a8e2ab1 --- exercises/bundle.sh | 2 +- files/default_catalog.templates | 6 +++--- files/keystone.conf | 4 ++-- files/keystone_data.sh | 15 ++++++++++++--- stack.sh | 28 +++++++++++++++++++++++----- 5 files changed, 41 insertions(+), 14 deletions(-) diff --git a/exercises/bundle.sh b/exercises/bundle.sh index 0f128af1..a165d55b 100755 --- a/exercises/bundle.sh +++ b/exercises/bundle.sh @@ -57,7 +57,7 @@ AMI=`euca-register $BUCKET/$IMAGE.manifest.xml | cut -f2` die_if_not_set AMI "Failure registering $BUCKET/$IMAGE" # Wait for the image to become available -if ! timeout $REGISTER_TIMEOUT sh -c "while euca-describe-images | grep '$AMI' | grep 'available'; do sleep 1; done"; then +if ! timeout $REGISTER_TIMEOUT sh -c "while euca-describe-images | grep $AMI | grep -q available; do sleep 1; done"; then echo "Image $AMI not available within $REGISTER_TIMEOUT seconds" exit 1 fi diff --git a/files/default_catalog.templates b/files/default_catalog.templates index 7a98c94c..31618abb 100644 --- a/files/default_catalog.templates +++ b/files/default_catalog.templates @@ -24,9 +24,9 @@ catalog.RegionOne.ec2.internalURL = http://%SERVICE_HOST%:8773/services/Cloud catalog.RegionOne.ec2.name = EC2 Service -catalog.RegionOne.s3.publicURL = http://%SERVICE_HOST%:3333 -catalog.RegionOne.s3.adminURL = http://%SERVICE_HOST%:3333 -catalog.RegionOne.s3.internalURL = http://%SERVICE_HOST%:3333 +catalog.RegionOne.s3.publicURL = http://%SERVICE_HOST%:%S3_SERVICE_PORT% +catalog.RegionOne.s3.adminURL = http://%SERVICE_HOST%:%S3_SERVICE_PORT% +catalog.RegionOne.s3.internalURL = http://%SERVICE_HOST%:%S3_SERVICE_PORT% catalog.RegionOne.s3.name = S3 Service diff --git a/files/keystone.conf b/files/keystone.conf index 5e5bfeb6..1a924edd 100644 --- a/files/keystone.conf +++ b/files/keystone.conf @@ -71,10 +71,10 @@ paste.app_factory = keystone.service:public_app_factory paste.app_factory = keystone.service:admin_app_factory [pipeline:public_api] -pipeline = token_auth admin_token_auth xml_body json_body debug ec2_extension s3_extension public_service +pipeline = token_auth admin_token_auth xml_body json_body debug ec2_extension public_service [pipeline:admin_api] -pipeline = token_auth admin_token_auth xml_body json_body debug ec2_extension crud_extension admin_service +pipeline = token_auth admin_token_auth xml_body json_body debug ec2_extension s3_extension crud_extension admin_service [app:public_version_service] paste.app_factory = keystone.service:public_version_app_factory diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 319bae34..a49eb426 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -3,14 +3,14 @@ # Initial data for Keystone using python-keystoneclient # # Tenant User Roles -# ------------------------------------------------------- +# ------------------------------------------------------------------ # admin admin admin # service glance admin -# service nova admin +# service nova admin, [ResellerAdmin (swift only)] # service quantum admin # if enabled # service swift admin # if enabled # demo admin admin -# demo demo Member,anotherrole +# demo demo Member, anotherrole # invisible_to_admin demo Member # # Variables set before calling this script: @@ -96,6 +96,15 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then keystone user-role-add --tenant_id $SERVICE_TENANT \ --user $SWIFT_USER \ --role $ADMIN_ROLE + # Nova needs ResellerAdmin role to download images when accessing + # swift through the s3 api. The admin role in swift allows a user + # to act as an admin for their tenant, but ResellerAdmin is needed + # for a user to act as any tenant. The name of this role is also + # configurable in swift-proxy.conf + RESELLER_ROLE=$(get_id keystone role-create --name=ResellerAdmin) + keystone user-role-add --tenant_id $SERVICE_TENANT \ + --user $NOVA_USER \ + --role $RESELLER_ROLE fi if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then diff --git a/stack.sh b/stack.sh index 5425df94..886e8392 100755 --- a/stack.sh +++ b/stack.sh @@ -430,13 +430,18 @@ SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9} # only some quick testing. SWIFT_REPLICAS=${SWIFT_REPLICAS:-3} -# We only ask for Swift Hash if we have enabled swift service. if is_service_enabled swift; then + # If we are using swift, we can default the s3 port to swift instead + # of nova-objectstore + S3_SERVICE_PORT=${S3_SERVICE_PORT:-8080} + # We only ask for Swift Hash if we have enabled swift service. # SWIFT_HASH is a random unique string for a swift cluster that # can never change. read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH." fi +# Set default port for nova-objectstore +S3_SERVICE_PORT=${S3_SERVICE_PORT:-3333} # Keystone # -------- @@ -1017,6 +1022,9 @@ fi # Storage Service if is_service_enabled swift; then + # Install memcached for swift. + apt_get install memcached + # We first do a bit of setup by creating the directories and # changing the permissions so we can run it as our user. @@ -1176,7 +1184,7 @@ if is_service_enabled swift; then # TODO: Bring some services in foreground. # Launch all services. - swift-init all start + swift-init all restart unset s swift_hash swift_auth_server fi @@ -1243,9 +1251,8 @@ add_nova_opt "root_helper=sudo /usr/local/bin/nova-rootwrap" add_nova_opt "compute_scheduler_driver=$SCHEDULER" add_nova_opt "dhcpbridge_flagfile=$NOVA_CONF_DIR/$NOVA_CONF" add_nova_opt "fixed_range=$FIXED_RANGE" -if is_service_enabled n-obj; then - add_nova_opt "s3_host=$SERVICE_HOST" -fi +add_nova_opt "s3_host=$SERVICE_HOST" +add_nova_opt "s3_port=$S3_SERVICE_PORT" if is_service_enabled quantum; then add_nova_opt "network_manager=nova.network.quantum.manager.QuantumManager" add_nova_opt "quantum_connection_host=$Q_HOST" @@ -1471,6 +1478,7 @@ if is_service_enabled key; then sudo sed -e "s,%SERVICE_HOST%,$SERVICE_HOST,g" -i $KEYSTONE_CATALOG + sudo sed -e "s,%S3_SERVICE_PORT%,$S3_SERVICE_PORT,g" -i $KEYSTONE_CATALOG if [ "$SYSLOG" != "False" ]; then cp $KEYSTONE_DIR/etc/logging.conf.sample $KEYSTONE_DIR/etc/logging.conf @@ -1500,6 +1508,16 @@ if is_service_enabled key; then SERVICE_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0 ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES \ bash $FILES/keystone_data.sh + + # create an access key and secret key for nova ec2 register image + if is_service_enabled swift && is_service_enabled nova; then + CREDS=$(keystone --os_auth_url=$SERVICE_ENDPOINT --os_username=nova --os_password=$SERVICE_PASSWORD --os_tenant_name=$SERVICE_TENANT_NAME ec2-credentials-create) + ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }') + SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }') + add_nova_opt "s3_access_key=$ACCESS_KEY" + add_nova_opt "s3_secret_key=$SECRET_KEY" + add_nova_opt "s3_affix_tenant=True" + fi fi # launch the nova-api and wait for it to answer before continuing From 185c66e44f786393f40724c7d2f74c5ac27f7034 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 15 Mar 2012 17:17:39 +0000 Subject: [PATCH 429/967] Run swift-proxy from screen. - Fixes bug 956172 Change-Id: I9a83d6afc04596bf84e3cf27a3f3f6f7b7d05180 --- stack.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index 218e83f5..27173a14 100755 --- a/stack.sh +++ b/stack.sh @@ -1182,9 +1182,10 @@ if is_service_enabled swift; then # We then can start rsync. sudo /etc/init.d/rsync restart || : - # TODO: Bring some services in foreground. - # Launch all services. + # With swift-init we are first spawning all the swift services but kill the + # proxy service so we can run it in foreground in screen. swift-init all restart + swift-init proxy stop unset s swift_hash swift_auth_server fi @@ -1619,6 +1620,7 @@ screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --config-file $NOVA_ screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF" screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth" screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/apache2/error.log" +screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_LOCATION}/proxy-server.conf -v" # Install Images From 7ee6a9724db57eb5395d661a525bacba061085bc Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Fri, 16 Mar 2012 15:42:21 +0000 Subject: [PATCH 430/967] Add python-mysqldb to keystone apt package list. - Fixes bug 957065. Change-Id: If811a725c2348cebfb326e08ef69909905e46420 --- files/apts/keystone | 1 + 1 file changed, 1 insertion(+) diff --git a/files/apts/keystone b/files/apts/keystone index 94479c92..ce536bfc 100644 --- a/files/apts/keystone +++ b/files/apts/keystone @@ -7,6 +7,7 @@ python-paste sqlite3 python-pysqlite2 python-sqlalchemy +python-mysqldb python-webob python-greenlet python-routes From b1f6c1835b52f4045e73eda34d3a98d89518f388 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 16 Mar 2012 09:23:23 -0500 Subject: [PATCH 431/967] Fix swift restart error 'swift-init all {restart|stop}' exits with '1' if there are any problems sutting down any swift server, including if they were not running to begin with. An attempt to address this has already been ignored by the Swift team, and it would not have completely eliminated the problem, so we'll just be defensive here. Change-Id: Ib8e30221e1c8873d0e849add89dc87d5ccda9d82 --- stack.sh | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index 27173a14..68b430e8 100755 --- a/stack.sh +++ b/stack.sh @@ -1182,10 +1182,12 @@ if is_service_enabled swift; then # We then can start rsync. sudo /etc/init.d/rsync restart || : - # With swift-init we are first spawning all the swift services but kill the + # First spawn all the swift services then kill the # proxy service so we can run it in foreground in screen. - swift-init all restart - swift-init proxy stop + # ``swift-init ... {stop|restart}`` exits with '1' if no servers are running, + # ignore it just in case + swift-init all restart || true + swift-init proxy stop || true unset s swift_hash swift_auth_server fi From e347b990ceb091fb3b4e8d1924ee3f6bddaa7cba Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Fri, 16 Mar 2012 17:38:49 +0000 Subject: [PATCH 432/967] Don't enabled nova-objectstore if swift is enabled - Fixes bug 957178. Change-Id: Ieb2840344bf8c0d9a1da50925f5ca0649d9dad21 --- stack.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 68b430e8..111f8ef8 100755 --- a/stack.sh +++ b/stack.sh @@ -1614,7 +1614,6 @@ fi # We don't check for is_service_enable as screen_it does it for us screen_it n-cpu "cd $NOVA_DIR && sg libvirtd $NOVA_DIR/bin/nova-compute" screen_it n-crt "cd $NOVA_DIR && $NOVA_DIR/bin/nova-cert" -screen_it n-obj "cd $NOVA_DIR && $NOVA_DIR/bin/nova-objectstore" screen_it n-vol "cd $NOVA_DIR && $NOVA_DIR/bin/nova-volume" screen_it n-net "cd $NOVA_DIR && $NOVA_DIR/bin/nova-network" screen_it n-sch "cd $NOVA_DIR && $NOVA_DIR/bin/nova-scheduler" @@ -1624,6 +1623,10 @@ screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth" screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/apache2/error.log" screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_LOCATION}/proxy-server.conf -v" +# Starting the nova-objectstore only if swift service is not enabled. +# Swift will act as s3 objectstore. +is_service_enabled swift || \ + screen_it n-obj "cd $NOVA_DIR && $NOVA_DIR/bin/nova-objectstore" # Install Images # ============== From b3e2f3399c62cb4e4515de587db91999ecbacb3c Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 16 Mar 2012 17:01:49 -0700 Subject: [PATCH 433/967] Some tweaks for xen + devstack. * Import functions for git_clone (allows RECLONE for xen plugins) * Fix a potential xvas path issue * Tweaks to docs Change-Id: I34f5c57a53884dfe944f3b0eb8896c57e348e389 --- tools/xen/build_domU.sh | 16 +++++----------- tools/xen/xenrc | 2 +- 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/tools/xen/build_domU.sh b/tools/xen/build_domU.sh index 04627965..5fa7aa85 100755 --- a/tools/xen/build_domU.sh +++ b/tools/xen/build_domU.sh @@ -10,7 +10,10 @@ fi # This directory TOP_DIR=$(cd $(dirname "$0") && pwd) -# Source params - override xenrc params in your localrc to suite your taste +# Source lower level functions +. $TOP_DIR/../../functions + +# Source params - override xenrc params in your localrc to suit your taste source xenrc # Echo commands @@ -134,17 +137,8 @@ echo 1 > /proc/sys/net/ipv4/ip_forward SR_UUID=`xe sr-list --minimal name-label="Local storage"` xe sr-param-set uuid=$SR_UUID other-config:i18n-key=local-storage -# Clean nova if desired -if [ "$CLEAN" = "1" ]; then - rm -rf $TOP_DIR/nova -fi - # Checkout nova -if [ ! -d $TOP_DIR/nova ]; then - env GIT_SSL_NO_VERIFY=true git clone $NOVA_REPO - cd $TOP_DIR/nova - git checkout $NOVA_BRANCH -fi +git_clone $NOVA_REPO $TOP_DIR/nova $NOVA_BRANCH # Install plugins cp -pr $TOP_DIR/nova/plugins/xenserver/xenapi/etc/xapi.d /etc/ diff --git a/tools/xen/xenrc b/tools/xen/xenrc index 73f9c025..58fda31a 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -36,7 +36,7 @@ MGT_VLAN=${MGT_VLAN:-101} MGT_DEV=${MGT_DEV:-eth0} # XVA Directory -XVA_DIR=${XVA_DIR:-xvas} +XVA_DIR=${XVA_DIR:-`pwd`/xvas} # Path to xva file XVA=${XVA:-$XVA_DIR/$GUEST_NAME.xva } From 84a03e0d8338fb566680517fb138382fc089ba43 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 15 Mar 2012 11:27:13 -0700 Subject: [PATCH 434/967] Remove default quantum-keystoneclient dep. * Don't automatically install q-cli with horizon * Always install q-cli if ENABLED_SERVICES includes 'quantum' * Completely remove quantum config from horizon (there is no quantum+horizon in essex) Change-Id: I0897437326abd757b6c792fd6ec946fa6e7981c9 --- stack.sh | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/stack.sh b/stack.sh index 111f8ef8..ff093163 100755 --- a/stack.sh +++ b/stack.sh @@ -651,7 +651,7 @@ if is_service_enabled q-svc; then # quantum git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH fi -if is_service_enabled q-svc horizon; then +if is_service_enabled quantum; then git_clone $QUANTUM_CLIENT_REPO $QUANTUM_CLIENT_DIR $QUANTUM_CLIENT_BRANCH fi @@ -688,7 +688,7 @@ fi if is_service_enabled q-svc; then cd $QUANTUM_DIR; sudo python setup.py develop fi -if is_service_enabled q-svc horizon; then +if is_service_enabled quantum; then cd $QUANTUM_CLIENT_DIR; sudo python setup.py develop fi if is_service_enabled m-svc; then @@ -784,9 +784,6 @@ if is_service_enabled horizon; then # Install apache2, which is NOPRIME'd apt_get install apache2 libapache2-mod-wsgi - # Link to quantum client directory. - rm -fr ${HORIZON_DIR}/openstack_dashboard/quantum - ln -s ${QUANTUM_CLIENT_DIR}/quantum ${HORIZON_DIR}/openstack_dashboard/quantum # Remove stale session database. rm -f $HORIZON_DIR/openstack_dashboard/local/dashboard_openstack.sqlite3 @@ -795,11 +792,6 @@ if is_service_enabled horizon; then local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py cp $FILES/horizon_settings.py $local_settings - # Enable quantum in dashboard, if requested - if is_service_enabled quantum; then - sudo sed -e "s,QUANTUM_ENABLED = False,QUANTUM_ENABLED = True,g" -i $local_settings - fi - # Initialize the horizon database (it stores sessions and notices shown to # users). The user system is external (keystone). cd $HORIZON_DIR From 09407d90a841c05d83f82b588af0769161326a4d Mon Sep 17 00:00:00 2001 From: Gabriel Hurley Date: Sun, 18 Mar 2012 16:26:22 -0700 Subject: [PATCH 435/967] Removes extraneous quotes from service names. Fixes bug 958894. Change-Id: Id517519b027c70eff22e04b79597f6d47fcc5eed --- stack.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index ff093163..6c12c96f 100755 --- a/stack.sh +++ b/stack.sh @@ -1460,7 +1460,7 @@ if is_service_enabled key; then echo "catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG echo "catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:8080/" >> $KEYSTONE_CATALOG echo "catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG - echo "catalog.RegionOne.object_store.name = 'Swift Service'" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.object_store.name = Swift Service" >> $KEYSTONE_CATALOG fi # Add quantum endpoints to service catalog if quantum is enabled @@ -1468,7 +1468,7 @@ if is_service_enabled key; then echo "catalog.RegionOne.network.publicURL = http://%SERVICE_HOST%:9696/" >> $KEYSTONE_CATALOG echo "catalog.RegionOne.network.adminURL = http://%SERVICE_HOST%:9696/" >> $KEYSTONE_CATALOG echo "catalog.RegionOne.network.internalURL = http://%SERVICE_HOST%:9696/" >> $KEYSTONE_CATALOG - echo "catalog.RegionOne.network.name = 'Quantum Service'" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.network.name = Quantum Service" >> $KEYSTONE_CATALOG fi sudo sed -e "s,%SERVICE_HOST%,$SERVICE_HOST,g" -i $KEYSTONE_CATALOG From 27e326995aaddf9e34cdf54a56f0ed02fc04bfcc Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 16 Mar 2012 16:16:56 -0500 Subject: [PATCH 436/967] Simplify die_if_error * Replace die_if_error() with the simpler die() * Attempt to clean up unnecessary trace output * Formatting cleanups on all exercise scripts Change-Id: I72a542b3a59ee9bf12bee6bcc605edd7579205e0 --- exercise.sh | 17 +++++++++---- exercises/boot_from_volume.sh | 44 +++++++++++++++++++++------------- exercises/bundle.sh | 24 +++++++++---------- exercises/client-args.sh | 10 ++++---- exercises/client-env.sh | 10 ++++---- exercises/euca.sh | 45 ++++++++++++++++++++--------------- exercises/floating_ips.sh | 36 ++++++++++++---------------- exercises/swift.sh | 27 ++++++++++----------- exercises/volumes.sh | 26 ++++++++++---------- functions | 37 +++++++++++++++++----------- stack.sh | 3 +-- tests/functions.sh | 23 ++++-------------- 12 files changed, 154 insertions(+), 148 deletions(-) diff --git a/exercise.sh b/exercise.sh index dd45c5ac..2072b23b 100755 --- a/exercise.sh +++ b/exercise.sh @@ -1,6 +1,13 @@ #!/usr/bin/env bash -source ./stackrc +# **exercise.sh** + +# Keep track of the current devstack directory. +TOP_DIR=$(cd $(dirname "$0") && pwd) + +# Load local configuration +source $TOP_DIR/stackrc + # Run everything in the exercises/ directory that isn't explicitly disabled # comma separated list of script basenames to skip @@ -21,9 +28,9 @@ for script in $basenames; do if [[ "$SKIP_EXERCISES" =~ $script ]] ; then skips="$skips $script" else - echo ========================= + echo "=====================================================================" echo Running $script - echo ========================= + echo "=====================================================================" $EXERCISE_DIR/$script.sh if [[ $? -ne 0 ]] ; then failures="$failures $script" @@ -34,8 +41,7 @@ for script in $basenames; do done # output status of exercise run -echo ========================= -echo ========================= +echo "=====================================================================" for script in $skips; do echo SKIP $script done @@ -45,6 +51,7 @@ done for script in $failures; do echo FAILED $script done +echo "=====================================================================" if [ -n "$failures" ] ; then exit 1 diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index 8d39703a..c707b470 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -8,9 +8,9 @@ # * Format and install an os onto the volume # * Detach volume from builder, and then boot volume-backed instance -echo "**************************************************" +echo "*********************************************************************" echo "Begin DevStack Exercise: $0" -echo "**************************************************" +echo "*********************************************************************" # This script exits on an error so that errors don't compound and you see # only the first error that occured. @@ -46,9 +46,12 @@ DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} # Default floating IP pool name DEFAULT_FLOATING_POOL=${DEFAULT_FLOATING_POOL:-nova} + +# Launching servers +# ================= + # Grab the id of the image to launch IMAGE=`glance -f index | egrep $DEFAULT_IMAGE_NAME | head -1 | cut -d" " -f1` - die_if_not_set IMAGE "Failure getting image" # Instance and volume names @@ -88,7 +91,8 @@ nova keypair-add $KEY_NAME > $KEY_FILE chmod 600 $KEY_FILE # Boot our instance -VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP --key_name $KEY_NAME $INSTANCE_NAME | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'` +VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP --key_name $KEY_NAME $INSTANCE_NAME | grep ' id ' | get_field 2` +die_if_not_set VM_UUID "Failure launching $INSTANCE_NAME" # check that the status is active within ACTIVE_TIMEOUT seconds if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then @@ -105,7 +109,7 @@ if [ "$FREE_ALL_FLOATING_IPS" = "True" ]; then fi # Allocate floating ip -FLOATING_IP=`nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | cut -d '|' -f2 | tr -d ' '` +FLOATING_IP=`nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | get_field 1` # Make sure the ip gets allocated if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep -q $FLOATING_IP; do sleep 1; done"; then @@ -133,7 +137,7 @@ fi # FIXME (anthony) - python-novaclient should accept a volume_name for the attachment param? DEVICE=/dev/vdb -VOLUME_ID=`nova volume-list | grep $VOL_NAME | cut -d '|' -f 2 | tr -d ' '` +VOLUME_ID=`nova volume-list | grep $VOL_NAME | get_field 1` nova volume-attach $INSTANCE_NAME $VOLUME_ID $DEVICE # Wait till volume is attached @@ -192,7 +196,8 @@ nova volume-detach $INSTANCE_NAME $VOLUME_ID # The format of mapping is: # =::: # Leaving the middle two fields blank appears to do-the-right-thing -VOL_VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE --block_device_mapping vda=$VOLUME_ID:::0 --security_groups=$SECGROUP --key_name $KEY_NAME $VOL_INSTANCE_NAME | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'` +VOL_VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE --block_device_mapping vda=$VOLUME_ID:::0 --security_groups=$SECGROUP --key_name $KEY_NAME $VOL_INSTANCE_NAME | grep ' id ' | get_field 2` +die_if_not_set VOL_VM_UUID "Failure launching $VOL_INSTANCE_NAME" # Check that the status is active within ACTIVE_TIMEOUT seconds if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VOL_VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then @@ -201,7 +206,7 @@ if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VOL_VM_UUID | grep status fi # Add floating ip to our server -nova remove-floating-ip $VM_UUID $FLOATING_IP +nova remove-floating-ip $VM_UUID $FLOATING_IP # Gratuitous sleep, probably hiding a race condition :/ sleep 1 @@ -221,7 +226,8 @@ echo "success!" EOF # Delete volume backed instance -nova delete $VOL_INSTANCE_NAME +nova delete $VOL_INSTANCE_NAME || \ + die "Failure deleting instance volume $VOL_INSTANCE_NAME" # Wait till our volume is no longer in-use if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep available; do sleep 1; done"; then @@ -230,10 +236,12 @@ if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | fi # Delete the volume -nova volume-delete $VOL_NAME +nova volume-delete $VOL_NAME || \ + die "Failure deleting volume $VOLUME_NAME" # Delete instance -nova delete $INSTANCE_NAME +nova delete $INSTANCE_NAME || \ + die "Failure deleting instance $INSTANCE_NAME" # Wait for termination if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $INSTANCE_NAME; do sleep 1; done"; then @@ -242,12 +250,14 @@ if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $INSTANCE_NAME; do sleep 1; fi # De-allocate the floating ip -nova floating-ip-delete $FLOATING_IP +nova floating-ip-delete $FLOATING_IP || \ + die "Failure deleting floating IP $FLOATING_IP" -# Delete secgroup -nova secgroup-delete $SECGROUP +# Delete a secgroup +nova secgroup-delete $SECGROUP || \ + die "Failure deleting security group $SECGROUP" set +o xtrace -echo "**************************************************" -echo "End DevStack Exercise: $0" -echo "**************************************************" +echo "*********************************************************************" +echo "SUCCESS: End DevStack Exercise: $0" +echo "*********************************************************************" diff --git a/exercises/bundle.sh b/exercises/bundle.sh index a165d55b..c607c94b 100755 --- a/exercises/bundle.sh +++ b/exercises/bundle.sh @@ -1,11 +1,13 @@ #!/usr/bin/env bash +# **bundle.sh** + # we will use the ``euca2ools`` cli tool that wraps the python boto -# library to test ec2 compatibility +# library to test ec2 bundle upload compatibility -echo "**************************************************" +echo "*********************************************************************" echo "Begin DevStack Exercise: $0" -echo "**************************************************" +echo "*********************************************************************" # This script exits on an error so that errors don't compound and you see # only the first error that occured. @@ -46,12 +48,9 @@ REGISTER_TIMEOUT=${REGISTER_TIMEOUT:-15} BUCKET=testbucket IMAGE=bundle.img truncate -s 5M /tmp/$IMAGE -euca-bundle-image -i /tmp/$IMAGE -die_if_error "Failure bundling image $IMAGE" - +euca-bundle-image -i /tmp/$IMAGE || die "Failure bundling image $IMAGE" -euca-upload-bundle -b $BUCKET -m /tmp/$IMAGE.manifest.xml -die_if_error "Failure uploading bundle $IMAGE to $BUCKET" +euca-upload-bundle -b $BUCKET -m /tmp/$IMAGE.manifest.xml || die "Failure uploading bundle $IMAGE to $BUCKET" AMI=`euca-register $BUCKET/$IMAGE.manifest.xml | cut -f2` die_if_not_set AMI "Failure registering $BUCKET/$IMAGE" @@ -63,10 +62,9 @@ if ! timeout $REGISTER_TIMEOUT sh -c "while euca-describe-images | grep $AMI | g fi # Clean up -euca-deregister $AMI -die_if_error "Failure deregistering $AMI" +euca-deregister $AMI || die "Failure deregistering $AMI" set +o xtrace -echo "**************************************************" -echo "End DevStack Exercise: $0" -echo "**************************************************" +echo "*********************************************************************" +echo "SUCCESS: End DevStack Exercise: $0" +echo "*********************************************************************" diff --git a/exercises/client-args.sh b/exercises/client-args.sh index 7cb7c456..66fddcf1 100755 --- a/exercises/client-args.sh +++ b/exercises/client-args.sh @@ -2,9 +2,9 @@ # Test OpenStack client authentication aguemnts handling -echo "**************************************************" +echo "*********************************************************************" echo "Begin DevStack Exercise: $0" -echo "**************************************************" +echo "*********************************************************************" # Settings # ======== @@ -135,8 +135,8 @@ report "Nova" $STATUS_NOVA report "Glance" $STATUS_GLANCE report "Swift" $STATUS_SWIFT -echo "**************************************************" -echo "End DevStack Exercise: $0" -echo "**************************************************" +echo "*********************************************************************" +echo "SUCCESS: End DevStack Exercise: $0" +echo "*********************************************************************" exit $RETURN diff --git a/exercises/client-env.sh b/exercises/client-env.sh index 0f172750..af2c4c24 100755 --- a/exercises/client-env.sh +++ b/exercises/client-env.sh @@ -2,9 +2,9 @@ # Test OpenStack client enviroment variable handling -echo "**************************************************" +echo "*********************************************************************" echo "Begin DevStack Exercise: $0" -echo "**************************************************" +echo "*********************************************************************" # Verify client workage VERIFY=${1:-""} @@ -149,8 +149,8 @@ report "EC2" $STATUS_EC2 report "Glance" $STATUS_GLANCE report "Swift" $STATUS_SWIFT -echo "**************************************************" -echo "End DevStack Exercise: $0" -echo "**************************************************" +echo "*********************************************************************" +echo "SUCCESS: End DevStack Exercise: $0" +echo "*********************************************************************" exit $RETURN diff --git a/exercises/euca.sh b/exercises/euca.sh index 703c7aac..76e5202a 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -1,11 +1,13 @@ #!/usr/bin/env bash +# **euca.sh** + # we will use the ``euca2ools`` cli tool that wraps the python boto # library to test ec2 compatibility -echo "**************************************************" +echo "*********************************************************************" echo "Begin DevStack Exercise: $0" -echo "**************************************************" +echo "*********************************************************************" # This script exits on an error so that errors don't compound and you see # only the first error that occured. @@ -15,6 +17,7 @@ set -o errexit # an error. It is also useful for following allowing as the install occurs. set -o xtrace + # Settings # ======== @@ -34,6 +37,10 @@ source $TOP_DIR/exerciserc # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} + +# Launching a server +# ================== + # Find a machine image to boot IMAGE=`euca-describe-images | grep machine | cut -f2 | head -n1` @@ -64,12 +71,12 @@ FLOATING_IP=`euca-allocate-address | cut -f2` die_if_not_set FLOATING_IP "Failure allocating floating IP" # Associate floating address -euca-associate-address -i $INSTANCE $FLOATING_IP -die_if_error "Failure associating address $FLOATING_IP to $INSTANCE" +euca-associate-address -i $INSTANCE $FLOATING_IP || \ + die "Failure associating address $FLOATING_IP to $INSTANCE" # Authorize pinging -euca-authorize -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP -die_if_error "Failure authorizing rule in $SECGROUP" +euca-authorize -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \ + die "Failure authorizing rule in $SECGROUP" # Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then @@ -78,12 +85,12 @@ if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sle fi # Revoke pinging -euca-revoke -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP -die_if_error "Failure revoking rule in $SECGROUP" +euca-revoke -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \ + die "Failure revoking rule in $SECGROUP" # Release floating address -euca-disassociate-address $FLOATING_IP -die_if_error "Failure disassociating address $FLOATING_IP" +euca-disassociate-address $FLOATING_IP || \ + die "Failure disassociating address $FLOATING_IP" # Wait just a tick for everything above to complete so release doesn't fail if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep $INSTANCE | grep -q $FLOATING_IP; do sleep 1; done"; then @@ -92,8 +99,8 @@ if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep $INS fi # Release floating address -euca-release-address $FLOATING_IP -die_if_error "Failure releasing address $FLOATING_IP" +euca-release-address $FLOATING_IP || \ + die "Failure releasing address $FLOATING_IP" # Wait just a tick for everything above to complete so terminate doesn't fail if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep -q $FLOATING_IP; do sleep 1; done"; then @@ -102,8 +109,8 @@ if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep -q $ fi # Terminate instance -euca-terminate-instances $INSTANCE -die_if_error "Failure terminating instance $INSTANCE" +euca-terminate-instances $INSTANCE || \ + die "Failure terminating instance $INSTANCE" # Assure it has terminated within a reasonable time if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then @@ -112,10 +119,10 @@ if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | fi # Delete group -euca-delete-group $SECGROUP -die_if_error "Failure deleting security group $SECGROUP" +euca-delete-group $SECGROUP || \ + die "Failure deleting security group $SECGROUP" set +o xtrace -echo "**************************************************" -echo "End DevStack Exercise: $0" -echo "**************************************************" +echo "*********************************************************************" +echo "SUCCESS: End DevStack Exercise: $0" +echo "*********************************************************************" diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index f2b9d036..9974b4b9 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -1,15 +1,13 @@ #!/usr/bin/env bash -# **exercise.sh** - using the cloud can be fun +# **floating_ips.sh** - using the cloud can be fun # we will use the ``nova`` cli tool provided by the ``python-novaclient`` -# package -# +# package to work out the instance connectivity - -echo "**************************************************" +echo "*********************************************************************" echo "Begin DevStack Exercise: $0" -echo "**************************************************" +echo "*********************************************************************" # This script exits on an error so that errors don't compound and you see # only the first error that occured. @@ -51,6 +49,7 @@ DEFAULT_FLOATING_POOL=${DEFAULT_FLOATING_POOL:-nova} # Additional floating IP pool and range TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test} + # Launching a server # ================== @@ -162,8 +161,8 @@ if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep -q $ fi # add floating ip to our server -nova add-floating-ip $VM_UUID $FLOATING_IP -die_if_error "Failure adding floating IP $FLOATING_IP to $NAME" +nova add-floating-ip $VM_UUID $FLOATING_IP || \ + die "Failure adding floating IP $FLOATING_IP to $NAME" # test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then @@ -182,8 +181,7 @@ if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep $TES fi # dis-allow icmp traffic (ping) -nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 -die_if_error "Failure deleting security group rule from $SECGROUP" +nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || die "Failure deleting security group rule from $SECGROUP" # FIXME (anthony): make xs support security groups if [ "$VIRT_DRIVER" != "xenserver" ]; then @@ -196,16 +194,13 @@ if [ "$VIRT_DRIVER" != "xenserver" ]; then fi # de-allocate the floating ip -nova floating-ip-delete $FLOATING_IP -die_if_error "Failure deleting floating IP $FLOATING_IP" +nova floating-ip-delete $FLOATING_IP || die "Failure deleting floating IP $FLOATING_IP" # Delete second floating IP -nova floating-ip-delete $TEST_FLOATING_IP -die_if_error "Failure deleting floating IP $TEST_FLOATING_IP" +nova floating-ip-delete $TEST_FLOATING_IP || die "Failure deleting floating IP $TEST_FLOATING_IP" # shutdown the server -nova delete $VM_UUID -die_if_error "Failure deleting instance $NAME" +nova delete $VM_UUID || die "Failure deleting instance $NAME" # make sure the VM shuts down within a reasonable time if ! timeout $TERMINATE_TIMEOUT sh -c "while nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then @@ -214,10 +209,9 @@ if ! timeout $TERMINATE_TIMEOUT sh -c "while nova show $VM_UUID | grep status | fi # Delete a secgroup -nova secgroup-delete $SECGROUP -die_if_error "Failure deleting security group $SECGROUP" +nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP" set +o xtrace -echo "**************************************************" -echo "End DevStack Exercise: $0" -echo "**************************************************" +echo "*********************************************************************" +echo "SUCCESS: End DevStack Exercise: $0" +echo "*********************************************************************" diff --git a/exercises/swift.sh b/exercises/swift.sh index b70b85f2..d8b41a33 100755 --- a/exercises/swift.sh +++ b/exercises/swift.sh @@ -1,10 +1,12 @@ #!/usr/bin/env bash +# **swift.sh** + # Test swift via the command line tools that ship with it. -echo "**************************************************" +echo "*********************************************************************" echo "Begin DevStack Exercise: $0" -echo "**************************************************" +echo "*********************************************************************" # This script exits on an error so that errors don't compound and you see # only the first error that occured. @@ -39,27 +41,22 @@ CONTAINER=ex-swift # ============= # Check if we have to swift via keystone -swift stat -die_if_error "Failure geting status" +swift stat || die "Failure geting status" # We start by creating a test container -swift post $CONTAINER -die_if_error "Failure creating container $CONTAINER" +swift post $CONTAINER || die "Failure creating container $CONTAINER" # add some files into it. -swift upload $CONTAINER /etc/issue -die_if_error "Failure uploading file to container $CONTAINER" +swift upload $CONTAINER /etc/issue || die "Failure uploading file to container $CONTAINER" # list them -swift list $CONTAINER -die_if_error "Failure listing contents of container $CONTAINER" +swift list $CONTAINER || die "Failure listing contents of container $CONTAINER" # And we may want to delete them now that we have tested that # everything works. -swift delete $CONTAINER -die_if_error "Failure deleting container $CONTAINER" +swift delete $CONTAINER || die "Failure deleting container $CONTAINER" set +o xtrace -echo "**************************************************" -echo "End DevStack Exercise: $0" -echo "**************************************************" +echo "*********************************************************************" +echo "SUCCESS: End DevStack Exercise: $0" +echo "*********************************************************************" diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 77c3498c..1abbecc0 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -1,10 +1,12 @@ #!/usr/bin/env bash +# **volumes.sh** + # Test nova volumes with the nova command from python-novaclient -echo "**************************************************" +echo "*********************************************************************" echo "Begin DevStack Exercise: $0" -echo "**************************************************" +echo "*********************************************************************" # This script exits on an error so that errors don't compound and you see # only the first error that occured. @@ -37,6 +39,7 @@ DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} # Boot this image, use first AMi image if unset DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} + # Launching a server # ================== @@ -136,8 +139,8 @@ die_if_not_set VOL_ID "Failure retrieving volume ID for $VOL_NAME" # Attach to server DEVICE=/dev/vdb -nova volume-attach $VM_UUID $VOL_ID $DEVICE -die_if_error "Failure attaching volume $VOL_NAME to $NAME" +nova volume-attach $VM_UUID $VOL_ID $DEVICE || \ + die "Failure attaching volume $VOL_NAME to $NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep in-use; do sleep 1; done"; then echo "Volume $VOL_NAME not attached to $NAME" exit 1 @@ -151,26 +154,23 @@ if [[ "$VOL_ATTACH" != $VM_UUID ]]; then fi # Detach volume -nova volume-detach $VM_UUID $VOL_ID -die_if_error "Failure detaching volume $VOL_NAME from $NAME" +nova volume-detach $VM_UUID $VOL_ID || die "Failure detaching volume $VOL_NAME from $NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep available; do sleep 1; done"; then echo "Volume $VOL_NAME not detached from $NAME" exit 1 fi # Delete volume -nova volume-delete $VOL_ID -die_if_error "Failure deleting volume $VOL_NAME" +nova volume-delete $VOL_ID || die "Failure deleting volume $VOL_NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME; do sleep 1; done"; then echo "Volume $VOL_NAME not deleted" exit 1 fi # shutdown the server -nova delete $NAME -die_if_error "Failure deleting instance $NAME" +nova delete $NAME || die "Failure deleting instance $NAME" set +o xtrace -echo "**************************************************" -echo "End DevStack Exercise: $0" -echo "**************************************************" +echo "*********************************************************************" +echo "SUCCESS: End DevStack Exercise: $0" +echo "*********************************************************************" diff --git a/functions b/functions index c4d56a23..7fd37c00 100644 --- a/functions +++ b/functions @@ -1,5 +1,9 @@ # functions - Common functions used by DevStack components +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + # apt-get wrapper to set arguments correctly # apt_get package [package ...] @@ -22,15 +26,13 @@ function cp_it { } -# Checks the exit code of the last command and prints "message" -# if it is non-zero and exits -# die_if_error "message" -function die_if_error() { +# Prints "message" and exits +# die "message" +function die() { local exitcode=$? - if [ $exitcode != 0 ]; then - echo $@ - exit $exitcode - fi + set +o xtrace + echo $@ + exit $exitcode } @@ -39,12 +41,16 @@ function die_if_error() { # NOTE: env-var is the variable name without a '$' # die_if_not_set env-var "message" function die_if_not_set() { - local exitcode=$? - local evar=$1; shift - if ! is_set $evar || [ $exitcode != 0 ]; then - echo $@ - exit 99 - fi + ( + local exitcode=$? + set +o xtrace + local evar=$1; shift + if ! is_set $evar || [ $exitcode != 0 ]; then + set +o xtrace + echo $@ + exit -1 + fi + ) } @@ -143,3 +149,6 @@ function trueorfalse() { [[ "1 yes true True TRUE" =~ "$testval" ]] && { echo "True"; return; } echo "$default" } + +# Restore xtrace +$XTRACE \ No newline at end of file diff --git a/stack.sh b/stack.sh index 6c12c96f..3beb8b7c 100755 --- a/stack.sh +++ b/stack.sh @@ -133,8 +133,7 @@ if [[ $EUID -eq 0 ]]; then exit 1 else # We're not root, make sure sudo is available - dpkg -l sudo - die_if_error "Sudo is required. Re-run stack.sh as root ONE TIME ONLY to set up sudo." + dpkg -l sudo || die "Sudo is required. Re-run stack.sh as root ONE TIME ONLY to set up sudo." # UEC images /etc/sudoers does not have a '#includedir'. add one. sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || diff --git a/tests/functions.sh b/tests/functions.sh index 0fd76cca..69e8c0ab 100755 --- a/tests/functions.sh +++ b/tests/functions.sh @@ -11,43 +11,28 @@ source $TOP/functions source $TOP/openrc -echo "Testing die_if_error()" - -bash -c "source $TOP/functions; true; die_if_error 'not OK'" -if [[ $? != 0 ]]; then - echo "die_if_error [true] Failed" -fi - -bash -c "source $TOP/functions; false; die_if_error 'OK'" -if [[ $? = 0 ]]; then - echo "die_if_error [false] Failed" -else - echo 'OK' -fi - - echo "Testing die_if_not_set()" -bash -c "source $TOP/functions; X=`echo Y && true`; die_if_not_set X 'not OK'" +bash -cx "source $TOP/functions; X=`echo Y && true`; die_if_not_set X 'not OK'" if [[ $? != 0 ]]; then echo "die_if_not_set [X='Y' true] Failed" else echo 'OK' fi -bash -c "source $TOP/functions; X=`true`; die_if_not_set X 'OK'" +bash -cx "source $TOP/functions; X=`true`; die_if_not_set X 'OK'" if [[ $? = 0 ]]; then echo "die_if_not_set [X='' true] Failed" fi -bash -c "source $TOP/functions; X=`echo Y && false`; die_if_not_set X 'not OK'" +bash -cx "source $TOP/functions; X=`echo Y && false`; die_if_not_set X 'not OK'" if [[ $? != 0 ]]; then echo "die_if_not_set [X='Y' false] Failed" else echo 'OK' fi -bash -c "source $TOP/functions; X=`false`; die_if_not_set X 'OK'" +bash -cx "source $TOP/functions; X=`false`; die_if_not_set X 'OK'" if [[ $? = 0 ]]; then echo "die_if_not_set [X='' false] Failed" fi From 6aef757432595ec4aa318c20246bf1d6aaf681db Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Mon, 19 Mar 2012 07:54:16 -0700 Subject: [PATCH 437/967] Update glance pipelines to use context middleware * Glance no longer depends on keystone.middleware.glance_auth_token Change-Id: Ie634a007f710792eda810e479fae463c158ebc5f --- files/glance-api-paste.ini | 8 +------- files/glance-registry-paste.ini | 9 +-------- 2 files changed, 2 insertions(+), 15 deletions(-) diff --git a/files/glance-api-paste.ini b/files/glance-api-paste.ini index 583b70a8..ed39fccf 100644 --- a/files/glance-api-paste.ini +++ b/files/glance-api-paste.ini @@ -1,7 +1,7 @@ [pipeline:glance-api] #pipeline = versionnegotiation context apiv1app # NOTE: use the following pipeline for keystone -pipeline = versionnegotiation authtoken auth-context apiv1app +pipeline = versionnegotiation authtoken context apiv1app # To enable Image Cache Management API replace pipeline with below: # pipeline = versionnegotiation context imagecache apiv1app @@ -38,12 +38,6 @@ auth_host = %KEYSTONE_AUTH_HOST% auth_port = %KEYSTONE_AUTH_PORT% auth_protocol = %KEYSTONE_AUTH_PROTOCOL% auth_uri = %KEYSTONE_SERVICE_PROTOCOL%://%KEYSTONE_SERVICE_HOST%:%KEYSTONE_SERVICE_PORT%/ -# FIXME(dtroyer): remove admin_token after auth_token is updated -admin_token = %SERVICE_TOKEN% admin_tenant_name = %SERVICE_TENANT_NAME% admin_user = %SERVICE_USERNAME% admin_password = %SERVICE_PASSWORD% - -[filter:auth-context] -paste.filter_factory = glance.common.wsgi:filter_factory -glance.filter_factory = keystone.middleware.glance_auth_token:KeystoneContextMiddleware diff --git a/files/glance-registry-paste.ini b/files/glance-registry-paste.ini index fe460d9e..987a8a49 100644 --- a/files/glance-registry-paste.ini +++ b/files/glance-registry-paste.ini @@ -1,7 +1,7 @@ [pipeline:glance-registry] #pipeline = context registryapp # NOTE: use the following pipeline for keystone -pipeline = authtoken auth-context context registryapp +pipeline = authtoken context registryapp [app:registryapp] paste.app_factory = glance.common.wsgi:app_factory @@ -22,13 +22,6 @@ auth_host = %KEYSTONE_AUTH_HOST% auth_port = %KEYSTONE_AUTH_PORT% auth_protocol = %KEYSTONE_AUTH_PROTOCOL% auth_uri = %KEYSTONE_SERVICE_PROTOCOL%://%KEYSTONE_SERVICE_HOST%:%KEYSTONE_SERVICE_PORT%/ -# FIXME(dtroyer): remove admin_token after auth_token is updated -admin_token = %SERVICE_TOKEN% admin_tenant_name = %SERVICE_TENANT_NAME% admin_user = %SERVICE_USERNAME% admin_password = %SERVICE_PASSWORD% - -[filter:auth-context] -context_class = glance.registry.context.RequestContext -paste.filter_factory = glance.common.wsgi:filter_factory -glance.filter_factory = keystone.middleware.glance_auth_token:KeystoneContextMiddleware From ecdd8fc6ba872f7615bf78a1df71e3dc7d8e1d23 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Mon, 19 Mar 2012 13:50:45 +0000 Subject: [PATCH 438/967] Remove service_ and admin_token. - Remove cache = swift.cache as well from swift config as not needed as well. Change-Id: I39f0b7ed48e6ee8926cdc011a46fdd2d01880a89 --- files/glance-api-paste.ini | 4 ---- files/glance-registry-paste.ini | 4 ---- files/swift/proxy-server.conf | 14 +++++--------- stack.sh | 3 ++- 4 files changed, 7 insertions(+), 18 deletions(-) diff --git a/files/glance-api-paste.ini b/files/glance-api-paste.ini index ed39fccf..5cfd22f0 100644 --- a/files/glance-api-paste.ini +++ b/files/glance-api-paste.ini @@ -30,10 +30,6 @@ glance.filter_factory = glance.common.context:ContextMiddleware [filter:authtoken] paste.filter_factory = keystone.middleware.auth_token:filter_factory -# FIXME(dtroyer): remove these service_* entries after auth_token is updated -service_host = %KEYSTONE_SERVICE_HOST% -service_port = %KEYSTONE_SERVICE_PORT% -service_protocol = %KEYSTONE_SERVICE_PROTOCOL% auth_host = %KEYSTONE_AUTH_HOST% auth_port = %KEYSTONE_AUTH_PORT% auth_protocol = %KEYSTONE_AUTH_PROTOCOL% diff --git a/files/glance-registry-paste.ini b/files/glance-registry-paste.ini index 987a8a49..b792aa8e 100644 --- a/files/glance-registry-paste.ini +++ b/files/glance-registry-paste.ini @@ -14,10 +14,6 @@ glance.filter_factory = glance.common.context:ContextMiddleware [filter:authtoken] paste.filter_factory = keystone.middleware.auth_token:filter_factory -# FIXME(dtroyer): remove these service_* entries after auth_token is updated -service_host = %KEYSTONE_SERVICE_HOST% -service_port = %KEYSTONE_SERVICE_PORT% -service_protocol = %KEYSTONE_SERVICE_PROTOCOL% auth_host = %KEYSTONE_AUTH_HOST% auth_port = %KEYSTONE_AUTH_PORT% auth_protocol = %KEYSTONE_AUTH_PROTOCOL% diff --git a/files/swift/proxy-server.conf b/files/swift/proxy-server.conf index e80c1d5e..1627af06 100644 --- a/files/swift/proxy-server.conf +++ b/files/swift/proxy-server.conf @@ -19,6 +19,8 @@ account_autocreate = true paste.filter_factory = keystone.middleware.swift_auth:filter_factory operator_roles = Member,admin +# NOTE(chmou): s3token middleware is not updated yet to use only +# username and password. [filter:s3token] paste.filter_factory = keystone.middleware.s3_token:filter_factory service_port = %KEYSTONE_SERVICE_PORT% @@ -29,21 +31,15 @@ auth_protocol = %KEYSTONE_AUTH_PROTOCOL% auth_token = %SERVICE_TOKEN% admin_token = %SERVICE_TOKEN% -[filter:tokenauth] +[filter:authtoken] paste.filter_factory = keystone.middleware.auth_token:filter_factory -# FIXME(dtroyer): remove these service_* entries after auth_token is updated -service_port = %KEYSTONE_SERVICE_PORT% -service_host = %KEYSTONE_SERVICE_HOST% -auth_port = %KEYSTONE_AUTH_PORT% auth_host = %KEYSTONE_AUTH_HOST% +auth_port = %KEYSTONE_AUTH_PORT% auth_protocol = %KEYSTONE_AUTH_PROTOCOL% -auth_token = %SERVICE_TOKEN% -# FIXME(dtroyer): remove admin_token after auth_token is updated -admin_token = %SERVICE_TOKEN% +auth_uri = %KEYSTONE_SERVICE_PROTOCOL%://%KEYSTONE_SERVICE_HOST%:%KEYSTONE_SERVICE_PORT%/ admin_tenant_name = %SERVICE_TENANT_NAME% admin_user = %SERVICE_USERNAME% admin_password = %SERVICE_PASSWORD% -cache = swift.cache [filter:swift3] use = egg:swift#swift3 diff --git a/stack.sh b/stack.sh index 6c12c96f..452eb2fd 100755 --- a/stack.sh +++ b/stack.sh @@ -1079,7 +1079,7 @@ if is_service_enabled swift; then # which has some default username and password if you have # configured keystone it will checkout the directory. if is_service_enabled key; then - swift_auth_server="s3token tokenauth keystone" + swift_auth_server="s3token authtoken keystone" else swift_auth_server=tempauth fi @@ -1092,6 +1092,7 @@ if is_service_enabled swift; then s,%SERVICE_TENANT_NAME%,$SERVICE_TENANT_NAME,g; s,%SERVICE_USERNAME%,swift,g; s,%SERVICE_PASSWORD%,$SERVICE_PASSWORD,g; + s,%KEYSTONE_SERVICE_PROTOCOL%,$KEYSTONE_SERVICE_PROTOCOL,g; s,%SERVICE_TOKEN%,${SERVICE_TOKEN},g; s,%KEYSTONE_SERVICE_PORT%,${KEYSTONE_SERVICE_PORT},g; s,%KEYSTONE_SERVICE_HOST%,${KEYSTONE_SERVICE_HOST},g; From 0ddcae6baa01468eb8d23c1e418bc81a3ef307e0 Mon Sep 17 00:00:00 2001 From: Yong Sheng Gong Date: Tue, 20 Mar 2012 21:17:39 +0800 Subject: [PATCH 439/967] move glance conf dir to /etc/glance bug 959735 Change-Id: I0593790fda6c2f3c9af7a8c930234d21e4acf643 --- AUTHORS | 1 + stack.sh | 17 +++++++++++------ 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/AUTHORS b/AUTHORS index 073315fc..86456159 100644 --- a/AUTHORS +++ b/AUTHORS @@ -26,4 +26,5 @@ Todd Willey Tres Henry Vishvananda Ishaya Yun Mao +Yong Sheng Gong Zhongyue Luo diff --git a/stack.sh b/stack.sh index 9f88a0bd..205567dc 100755 --- a/stack.sh +++ b/stack.sh @@ -814,6 +814,11 @@ fi # ------ if is_service_enabled g-reg; then + GLANCE_CONF_DIR=/etc/glance + if [[ ! -d $GLANCE_CONF_DIR ]]; then + sudo mkdir -p $GLANCE_CONF_DIR + fi + sudo chown `whoami` $GLANCE_CONF_DIR GLANCE_IMAGE_DIR=$DEST/glance/images # Delete existing images rm -rf $GLANCE_IMAGE_DIR @@ -845,22 +850,22 @@ if is_service_enabled g-reg; then } # Copy over our glance configurations and update them - GLANCE_REGISTRY_CONF=$GLANCE_DIR/etc/glance-registry.conf + GLANCE_REGISTRY_CONF=$GLANCE_CONF_DIR/glance-registry.conf cp $FILES/glance-registry.conf $GLANCE_REGISTRY_CONF glance_config $GLANCE_REGISTRY_CONF if [[ -e $FILES/glance-registry-paste.ini ]]; then - GLANCE_REGISTRY_PASTE_INI=$GLANCE_DIR/etc/glance-registry-paste.ini + GLANCE_REGISTRY_PASTE_INI=$GLANCE_CONF_DIR/glance-registry-paste.ini cp $FILES/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI glance_config $GLANCE_REGISTRY_PASTE_INI fi - GLANCE_API_CONF=$GLANCE_DIR/etc/glance-api.conf + GLANCE_API_CONF=$GLANCE_CONF_DIR/glance-api.conf cp $FILES/glance-api.conf $GLANCE_API_CONF glance_config $GLANCE_API_CONF if [[ -e $FILES/glance-api-paste.ini ]]; then - GLANCE_API_PASTE_INI=$GLANCE_DIR/etc/glance-api-paste.ini + GLANCE_API_PASTE_INI=$GLANCE_CONF_DIR/glance-api-paste.ini cp $FILES/glance-api-paste.ini $GLANCE_API_PASTE_INI glance_config $GLANCE_API_PASTE_INI fi @@ -1426,12 +1431,12 @@ screen -r stack -X hardstatus alwayslastline "%-Lw%{= BW}%50>%n%f* %t%{-}%+Lw%< # launch the glance registry service if is_service_enabled g-reg; then - screen_it g-reg "cd $GLANCE_DIR; bin/glance-registry --config-file=etc/glance-registry.conf" + screen_it g-reg "cd $GLANCE_DIR; bin/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf" fi # launch the glance api and wait for it to answer before continuing if is_service_enabled g-api; then - screen_it g-api "cd $GLANCE_DIR; bin/glance-api --config-file=etc/glance-api.conf" + screen_it g-api "cd $GLANCE_DIR; bin/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf" echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then echo "g-api did not start" From ed111950f86300774ec963d644fc29ee5934052c Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 20 Mar 2012 15:08:15 -0700 Subject: [PATCH 440/967] Allow nova rate limiting to be disabled * fixes bug 959518 Change-Id: Ifc469a2e1cd90737420c17af513d9c39fb57ffb5 --- stack.sh | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index 9f88a0bd..67c8162b 100755 --- a/stack.sh +++ b/stack.sh @@ -883,8 +883,8 @@ if is_service_enabled n-api; then # for Nova to validate Keystone tokens. # Allow rate limiting to be turned off for testing, like for Tempest - # NOTE: Set OSAPI_RATE_LIMIT=" " to turn OFF rate limiting - OSAPI_RATE_LIMIT=${OSAPI_RATE_LIMIT:-"ratelimit"} + # NOTE: Set API_RATE_LIMIT="False" to turn OFF rate limiting + API_RATE_LIMIT=${API_RATE_LIMIT:-"True"} # Remove legacy paste config if present rm -f $NOVA_DIR/bin/nova-api-paste.ini @@ -1320,10 +1320,14 @@ fi if [ "$SYSLOG" != "False" ]; then add_nova_opt "use_syslog=True" fi +if [ "$API_RATE_LIMIT" != "True" ]; then + add_nova_opt "api_rate_limit=False" +fi + # Provide some transition from EXTRA_FLAGS to EXTRA_OPTS if [[ -z "$EXTRA_OPTS" && -n "$EXTRA_FLAGS" ]]; then - EXTRA_OPTS=$EXTRA_FLAGS + EXTRA_OPTS=$EXTRA_FLAGS fi # You can define extra nova conf flags by defining the array EXTRA_OPTS, From e9819d59502beb2470788a70467a23ee1dbbb8bb Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 21 Mar 2012 11:25:06 -0500 Subject: [PATCH 441/967] README updates: * Remove out of date branch example * Make formatting of variables and filenames consistient Change-Id: I24983b1bcf1531307ccb0af66f5ba0aeb39f4ae5 --- README.md | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index 3e57fc35..5c328937 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@ -Devstack is a set of scripts and utilities to quickly deploy an OpenStack cloud. +DevStack is a set of scripts and utilities to quickly deploy an OpenStack cloud. # Goals -* To quickly build dev OpenStack environments in a clean oneiric environment +* To quickly build dev OpenStack environments in a clean Oneiric or Precise environment * To describe working configurations of OpenStack (which code branches work together? what do config files look like for those branches?) * To make it easier for developers to dive into OpenStack so that they can productively contribute without having to understand every part of the system at once * To make it easy to prototype cross-project features @@ -10,23 +10,23 @@ Devstack is a set of scripts and utilities to quickly deploy an OpenStack cloud. Read more at http://devstack.org (built from the gh-pages branch) -IMPORTANT: Be sure to carefully read stack.sh and any other scripts you execute before you run them, as they install software and may alter your networking configuration. We strongly recommend that you run stack.sh in a clean and disposable vm when you are first getting started. +IMPORTANT: Be sure to carefully read `stack.sh` and any other scripts you execute before you run them, as they install software and may alter your networking configuration. We strongly recommend that you run `stack.sh` in a clean and disposable vm when you are first getting started. # Devstack on Xenserver -If you would like to use Xenserver as the hypervisor, please refer to the instructions in ./tools/xen/README.md. +If you would like to use Xenserver as the hypervisor, please refer to the instructions in `./tools/xen/README.md`. # Versions -The devstack master branch generally points to trunk versions of OpenStack components. For older, stable versions, look for branches named stable/[release]. For example, you can do the following to create a diablo OpenStack cloud: +The devstack master branch generally points to trunk versions of OpenStack components. For older, stable versions, look for branches named stable/[release] in the DevStack repo. For example, you can do the following to create a diablo OpenStack cloud: git checkout stable/diablo ./stack.sh -Milestone builds are also available in this manner: +You can also pick specific OpenStack project releases by setting the appropriate `*_BRANCH` variables in `localrc` (look in `stackrc` for the default set). Usually just before a release there will be milestone-proposed branches that need to be tested:: - git checkout essex-3 - ./stack.sh + GLANCE_REPO=https://github.com/openstack/glance.git + GLANCE_BRANCH=milestone-proposed # Start A Dev Cloud @@ -55,22 +55,22 @@ If the EC2 API is your cup-o-tea, you can create credentials and use euca2ools: # Customizing -You can override environment variables used in stack.sh by creating file name 'localrc'. It is likely that you will need to do this to tweak your networking configuration should you need to access your cloud from a different host. +You can override environment variables used in `stack.sh` by creating file name `localrc`. It is likely that you will need to do this to tweak your networking configuration should you need to access your cloud from a different host. # Swift -Swift is not installed by default, you can enable easily by adding this to your localrc: +Swift is not installed by default, you can enable easily by adding this to your `localrc`: - ENABLED_SERVICE="$ENABLED_SERVICES,swift" + ENABLED_SERVICE="$ENABLED_SERVICES,swift" -If you want a minimal swift install with only swift and keystone you can have this instead in your localrc: +If you want a minimal Swift install with only Swift and Keystone you can have this instead in your `localrc`: - ENABLED_SERVICES="key,mysql,swift" + ENABLED_SERVICES="key,mysql,swift" -If you use swift with keystone, Swift will authenticate against it. You will need to make sure to use the keystone URL to auth against. +If you use Swift with Keystone, Swift will authenticate against it. You will need to make sure to use the Keystone URL to auth against. -Swift will be acting as a S3 endpoint for keystone so effectively replacing the **nova-objectore**. +Swift will be acting as a S3 endpoint for Keystone so effectively replacing the `nova-objectstore`. -Only swift proxy server is launched in the screen session all other services are started in background and managed by **swift-init* tool. +Only Swift proxy server is launched in the screen session all other services are started in background and managed by `swift-init` tool. -By default Swift will configure 3 replicas (and one spare) which could be IO intensive on a small vm, if you only want to do some quick testing of the API you can choose to only have one replica by customizing the variable SWIFT_REPLICAS in your localrc. +By default Swift will configure 3 replicas (and one spare) which could be IO intensive on a small vm, if you only want to do some quick testing of the API you can choose to only have one replica by customizing the variable `SWIFT_REPLICAS` in your `localrc`. From 6325907aa5cd4558936233a2427fd18025384376 Mon Sep 17 00:00:00 2001 From: Renuka Apte Date: Wed, 21 Mar 2012 14:42:30 -0700 Subject: [PATCH 442/967] Fix typo in prepare_dom0.sh Change-Id: I9ad4ff3f9490822428e12eadd454fe2c51affa71 --- tools/xen/prepare_dom0.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/xen/prepare_dom0.sh b/tools/xen/prepare_dom0.sh index 49988bd6..71e9d6d3 100755 --- a/tools/xen/prepare_dom0.sh +++ b/tools/xen/prepare_dom0.sh @@ -35,7 +35,7 @@ if ! which git; then fi # Clone devstack -DEVSTACK=${DEVSTACKROOT:/root/devstack} +DEVSTACK=${DEVSTACKROOT:-"/root/devstack"} if [ ! -d $DEVSTACK ]; then git clone git://github.com/openstack-dev/devstack.git $DEVSTACK fi From 3c4c2d8418b4f2dc8121e432ebb52cf93710face Mon Sep 17 00:00:00 2001 From: Yong Sheng Gong Date: Fri, 23 Mar 2012 19:17:15 +0800 Subject: [PATCH 443/967] Move Quantum config files to /etc/quantum. We will have ovs_quantum_plugin.ini, plugins.ini and quantum.conf under /etc/quantum. Bug #959736 Change-Id: I04ae74e81aaffe346ab030ac98ea75a526c207a1 --- stack.sh | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/stack.sh b/stack.sh index fa597696..dfc1a0eb 100755 --- a/stack.sh +++ b/stack.sh @@ -1536,6 +1536,11 @@ fi # Quantum service if is_service_enabled q-svc; then + QUANTUM_CONF_DIR=/etc/quantum + if [[ ! -d $QUANTUM_CONF_DIR ]]; then + sudo mkdir -p $QUANTUM_CONF_DIR + fi + sudo chown `whoami` $QUANTUM_CONF_DIR if [[ "$Q_PLUGIN" = "openvswitch" ]]; then # Install deps # FIXME add to files/apts/quantum, but don't install if not needed! @@ -1548,11 +1553,13 @@ if is_service_enabled q-svc; then echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin." exit 1 fi - QUANTUM_PLUGIN_INI_FILE=$QUANTUM_DIR/etc/plugins.ini + QUANTUM_PLUGIN_INI_FILE=$QUANTUM_CONF_DIR/plugins.ini + sudo cp $QUANTUM_DIR/etc/plugins.ini $QUANTUM_PLUGIN_INI_FILE # Make sure we're using the openvswitch plugin - sed -i -e "s/^provider =.*$/provider = quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin/g" $QUANTUM_PLUGIN_INI_FILE + sudo sed -i -e "s/^provider =.*$/provider = quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin/g" $QUANTUM_PLUGIN_INI_FILE fi - screen_it q-svc "cd $QUANTUM_DIR && PYTHONPATH=.:$QUANTUM_CLIENT_DIR:$PYTHONPATH python $QUANTUM_DIR/bin/quantum-server $QUANTUM_DIR/etc/quantum.conf" + sudo cp $QUANTUM_DIR/etc/quantum.conf $QUANTUM_CONF_DIR/quantum.conf + screen_it q-svc "cd $QUANTUM_DIR && PYTHONPATH=.:$QUANTUM_CLIENT_DIR:$PYTHONPATH python $QUANTUM_DIR/bin/quantum-server $QUANTUM_CONF_DIR/quantum.conf" fi # Quantum agent (for compute nodes) @@ -1565,8 +1572,9 @@ if is_service_enabled q-agt; then sudo ovs-vsctl --no-wait br-set-external-id $OVS_BRIDGE bridge-id br-int # Start up the quantum <-> openvswitch agent - QUANTUM_OVS_CONFIG_FILE=$QUANTUM_DIR/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini - sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/ovs_quantum/g" $QUANTUM_OVS_CONFIG_FILE + QUANTUM_OVS_CONFIG_FILE=$QUANTUM_CONF_DIR/ovs_quantum_plugin.ini + sudo cp $QUANTUM_DIR/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini $QUANTUM_OVS_CONFIG_FILE + sudo sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/ovs_quantum/g" $QUANTUM_OVS_CONFIG_FILE screen_it q-agt "sleep 4; sudo python $QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py $QUANTUM_OVS_CONFIG_FILE -v" fi From 5440ac0e09acfbb45bc663af3d08fcd75f7595d2 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 23 Mar 2012 11:32:29 -0700 Subject: [PATCH 444/967] Install quantumclient from git before quantum. There is an issue with installing git dependency_links via setuptools develop command. We're working on it generally, but for now this will make quantum happier with devstack. Change-Id: I8df88f4bca9f2b43b1d56fc7e77e26793b4c4b9f --- stack.sh | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/stack.sh b/stack.sh index fa597696..5f7b9b83 100755 --- a/stack.sh +++ b/stack.sh @@ -646,14 +646,13 @@ if is_service_enabled horizon; then # django powered web control panel for openstack git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH $HORIZON_TAG fi +if is_service_enabled quantum; then + git_clone $QUANTUM_CLIENT_REPO $QUANTUM_CLIENT_DIR $QUANTUM_CLIENT_BRANCH +fi if is_service_enabled q-svc; then # quantum git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH fi -if is_service_enabled quantum; then - git_clone $QUANTUM_CLIENT_REPO $QUANTUM_CLIENT_DIR $QUANTUM_CLIENT_BRANCH -fi - if is_service_enabled m-svc; then # melange git_clone $MELANGE_REPO $MELANGE_DIR $MELANGE_BRANCH @@ -684,12 +683,12 @@ cd $NOVA_DIR; sudo python setup.py develop if is_service_enabled horizon; then cd $HORIZON_DIR; sudo python setup.py develop fi -if is_service_enabled q-svc; then - cd $QUANTUM_DIR; sudo python setup.py develop -fi if is_service_enabled quantum; then cd $QUANTUM_CLIENT_DIR; sudo python setup.py develop fi +if is_service_enabled q-svc; then + cd $QUANTUM_DIR; sudo python setup.py develop +fi if is_service_enabled m-svc; then cd $MELANGE_DIR; sudo python setup.py develop fi From 408b009ccda94a95d3b3999f6db2bd62e92cdfb9 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 15 Mar 2012 23:21:55 +0000 Subject: [PATCH 445/967] Allow skipping exercises. - Catch a special exit signal 55 to notify that we want to skip an excercise. - Move is_enabled_service to functions. - Fix bug 928390. Change-Id: Iebf7a6f30a0f305a2a70173fb6b988bc07e34292 --- HACKING.rst | 4 ++++ exercise.sh | 5 ++++- exercises/swift.sh | 3 +++ functions | 24 +++++++++++++++++++++++- stack.sh | 24 ------------------------ 5 files changed, 34 insertions(+), 26 deletions(-) diff --git a/HACKING.rst b/HACKING.rst index a105a66e..7262cff6 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -136,6 +136,10 @@ These scripts are executed serially by ``exercise.sh`` in testing situations. FLOATING_IP=`euca-allocate-address | cut -f2` die_if_not_set FLOATING_IP "Failure allocating floating IP" +* If you want an exercise to be skipped when for example a service wasn't + enabled for the exercise to be run, you can exit your exercise with the + special exitcode 55 and it will be detected as skipped. + * The exercise scripts should only use the various OpenStack client binaries to interact with OpenStack. This specifically excludes any ``*-manage`` tools as those assume direct access to configuration and databases, as well as direct diff --git a/exercise.sh b/exercise.sh index 2072b23b..15f264f4 100755 --- a/exercise.sh +++ b/exercise.sh @@ -32,7 +32,10 @@ for script in $basenames; do echo Running $script echo "=====================================================================" $EXERCISE_DIR/$script.sh - if [[ $? -ne 0 ]] ; then + exitcode=$? + if [[ $exitcode == 55 ]]; then + skips="$skips $script" + elif [[ $exitcode -ne 0 ]] ; then failures="$failures $script" else passes="$passes $script" diff --git a/exercises/swift.sh b/exercises/swift.sh index d8b41a33..732445d3 100755 --- a/exercises/swift.sh +++ b/exercises/swift.sh @@ -36,6 +36,9 @@ source $TOP_DIR/exerciserc # Container name CONTAINER=ex-swift +# If swift is not enabled we exit with exitcode 55 which mean +# exercise is skipped. +is_service_enabled swift || exit 55 # Testing Swift # ============= diff --git a/functions b/functions index 7fd37c00..75c20d75 100644 --- a/functions +++ b/functions @@ -115,6 +115,28 @@ function git_clone { } +# is_service_enabled() checks if the service(s) specified as arguments are +# enabled by the user in **ENABLED_SERVICES**. +# +# If there are multiple services specified as arguments the test performs a +# boolean OR or if any of the services specified on the command line +# return true. +# +# There is a special cases for some 'catch-all' services:: +# **nova** returns true if any service enabled start with **n-** +# **glance** returns true if any service enabled start with **g-** +# **quantum** returns true if any service enabled start with **q-** +function is_service_enabled() { + services=$@ + for service in ${services}; do + [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && return 0 + [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && return 0 + [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0 + [[ ${service} == "quantum" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0 + done + return 1 +} + # Test if the named environment variable is set and not zero length # is_set env-var @@ -151,4 +173,4 @@ function trueorfalse() { } # Restore xtrace -$XTRACE \ No newline at end of file +$XTRACE diff --git a/stack.sh b/stack.sh index d77b1d5f..3a7fc5da 100755 --- a/stack.sh +++ b/stack.sh @@ -268,30 +268,6 @@ function read_password { set -o xtrace } -# is_service_enabled() checks if the service(s) specified as arguments are -# enabled by the user in **ENABLED_SERVICES**. -# -# If there are multiple services specified as arguments the test performs a -# boolean OR or if any of the services specified on the command line -# return true. -# -# There is a special cases for some 'catch-all' services:: -# **nova** returns true if any service enabled start with **n-** -# **glance** returns true if any service enabled start with **g-** -# **quantum** returns true if any service enabled start with **q-** - -function is_service_enabled() { - services=$@ - for service in ${services}; do - [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && return 0 - [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && return 0 - [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0 - [[ ${service} == "quantum" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0 - done - return 1 -} - - # Nova Network Configuration # -------------------------- From 08e07fb4c817796db06bf8b90982c3b7cc5c41f2 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Wed, 28 Mar 2012 15:23:58 -0400 Subject: [PATCH 446/967] Don't use $USERNAME in openrc. Fix bug 967429. Don't use $USERNAME as a variable in openrc. It's commonly set by default and can result in $OS_USERNAME getting set to something else unexpectedly, resulting in an environment that doesn't work. Change-Id: I6083a871209d30c81ca6876b1ef6c154aef7f598 --- openrc | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/openrc b/openrc index 7aefb0ff..be7850b5 100644 --- a/openrc +++ b/openrc @@ -3,18 +3,18 @@ # source openrc [username] [tenantname] # # Configure a set of credentials for $TENANT/$USERNAME: -# Set TENANT to override the default tenant 'demo' -# Set USERNAME to override the default user name 'demo' +# Set OS_TENANT_NAME to override the default tenant 'demo' +# Set OS_USERNAME to override the default user name 'demo' # Set ADMIN_PASSWORD to set the password for 'admin' and 'demo' # NOTE: support for the old NOVA_* novaclient environment variables has # been removed. if [[ -n "$1" ]]; then - USERNAME=$1 + OS_USERNAME=$1 fi if [[ -n "$2" ]]; then - TENANT=$2 + OS_TENANT_NAME=$2 fi # Find the other rc files @@ -27,11 +27,11 @@ source $RC_DIR/stackrc # term **tenant** as the entity that owns resources. In some places references # still exist to the original Nova term **project** for this use. Also, # **tenant_name** is prefered to **tenant_id**. -export OS_TENANT_NAME=${TENANT:-demo} +export OS_TENANT_NAME=${OS_TENANT_NAME:-demo} # In addition to the owning entity (tenant), nova stores the entity performing # the action as the **user**. -export OS_USERNAME=${USERNAME:-demo} +export OS_USERNAME=${OS_USERNAME:-demo} # With Keystone you pass the keystone password instead of an api key. # Recent versions of novaclient use OS_PASSWORD instead of NOVA_API_KEYs From f5633ddb7d543397fb881a4d3cbf9207abd6b1de Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 28 Mar 2012 11:21:40 -0500 Subject: [PATCH 447/967] Add local.sh support and samples of local.sh and locarc Run $TOP_DIR/local.sh at the end of stack.sh if it exists and is executable. This allows the user to automatically perform local actions on every re-stack, such as creating custom flavors or specific tenants/users. Like localrc, this file is not distributed with DevStack so user modifications will be undisturbed. Add local.sh to .gitignore Examples of local.sh and localrc are in the samples/ directory. Change-Id: I0be6b4d80ce084981cac8a3a8f1dc9bc8c3bbd4e --- .gitignore | 1 + samples/local.sh | 59 +++++++++++++++++++++++++++++++++++++ samples/localrc | 77 ++++++++++++++++++++++++++++++++++++++++++++++++ stack.sh | 10 +++++++ 4 files changed, 147 insertions(+) create mode 100755 samples/local.sh create mode 100644 samples/localrc diff --git a/.gitignore b/.gitignore index e4820903..c8d25605 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,4 @@ proto *.log src localrc +local.sh diff --git a/samples/local.sh b/samples/local.sh new file mode 100755 index 00000000..83637f98 --- /dev/null +++ b/samples/local.sh @@ -0,0 +1,59 @@ +#!/usr/bin/env bash + +# Sample ``local.sh`` for user-configurable tasks to run automatically +# at the sucessful conclusion of ``stack.sh``. + +# NOTE: Copy this file to the root ``devstack`` directory for it to +# work properly. + +# This is a collection of some of the things we have found to be useful to run +# after stack.sh to tweak the OpenStack configuration that DevStack produces. +# These should be considered as samples and are unsupported DevStack code. + +# Keep track of the devstack directory +TOP_DIR=$(cd $(dirname "$0") && pwd) + +# Use openrc + stackrc + localrc for settings +source $TOP_DIR/stackrc + +# Destination path for installation ``DEST`` +DEST=${DEST:-/opt/stack} + + +# Import ssh keys +# --------------- + +# Import keys from the current user into the default OpenStack user (usually +# ``demo``) + +# Get OpenStack auth +source $TOP_DIR/openrc + +# Add first keypair found in localhost:$HOME/.ssh +for i in $HOME/.ssh/id_rsa.pub $HOME/.ssh/id_dsa.pub; do + if [[ -f $i ]]; then + nova keypair-add --pub_key=$i `hostname` + break + fi +done + + +# Create A Flavor +# --------------- + +# Get OpenStack admin auth +source $TOP_DIR/openrc admin admin + +# Name of new flavor +# set in ``localrc`` with ``DEFAULT_INSTANCE_TYPE=m1.micro`` +MI_NAME=m1.micro + +# Create micro flavor if not present +if [[ -z $(nova flavor-list | grep $MI_NAME) ]]; then + nova flavor-create $MI_NAME 6 128 0 1 +fi +# Other Uses +# ---------- + +# Add tcp/22 to default security group + diff --git a/samples/localrc b/samples/localrc new file mode 100644 index 00000000..4fb093dc --- /dev/null +++ b/samples/localrc @@ -0,0 +1,77 @@ +# Sample ``localrc`` for user-configurable variables in ``stack.sh`` + +# NOTE: Copy this file to the root ``devstack`` directory for it to work properly. + +# ``localrc`` is a user-maintained setings file that is sourced at the end of +# ``stackrc``. This gives it the ability to override any variables set in ``stackrc``. +# Also, most of the settings in ``stack.sh`` are written to only be set if no +# value has already been set; this lets ``localrc`` effectively override the +# default values. + +# This is a collection of some of the settings we have found to be useful +# in our DevStack development environments. Additional settings are described +# in http://devstack.org/localrc.html +# These should be considered as samples and are unsupported DevStack code. + + +# Minimal Contents +# ---------------- + +# While ``stack.sh`` is happy to run without ``localrc``, devlife is better when +# there are a few minimal variables set: + +# If the ``*_PASSWORD`` variables are not set here you will be prompted to enter +# values for them by ``stack.sh``. +ADMIN_PASSWORD=nomoresecrete +MYSQL_PASSWORD=stackdb +RABBIT_PASSWORD=stackqueue +SERVICE_PASSWORD=$ADMIN_PASSWORD + +# HOST_IP should be set manually for best results. It is auto-detected during the +# first run of ``stack.sh`` but often is indeterminate on later runs due to the IP +# being moved from an Ethernet interface to a bridge on the host. Setting it here +# also makes it available for ``openrc`` to include when setting ``OS_AUTH_URL``. +# ``HOST_IP`` is not set by default. +HOST_IP=w.x.y.z + + +# Set DevStack Install Directory +# ------------------------------ + +# The DevStack install directory is set by the ``DEST`` variable. By setting it +# early in ``localrc`` you can reference it in later variables. The default value +# is ``/opt/stack``. It can be useful to set it even though it is not changed from +# the default value. +DEST=/opt/stack + + +# Using milestone-proposed branches +# --------------------------------- + +# Uncomment these to grab the milestone-proposed branches from the repos: +#GLANCE_BRANCH=milestone-proposed +#HORIZON_BRANCH=milestone-proposed +#KEYSTONE_BRANCH=milestone-proposed +#KEYSTONECLIENT_BRANCH=milestone-proposed +#NOVA_BRANCH=milestone-proposed +#NOVACLIENT_BRANCH=milestone-proposed +#SWIFT_BRANCH=milestone-proposed + + +# Swift +# ----- + +# Swift is now used as the back-end for the S3-like object store. If Nova's +# objectstore (``n-obj`` in ``ENABLED_SERVICES``) is enabled, it will NOT +# run if Swift is enabled. Setting the hash value is required and you will +# be prompted for it if Swift is enabled so just set it to something already: +SWIFT_HASH=66a3d6b56c1f479c8b4e70ab5c2000f5 + +# For development purposes the default of 3 replicas is usually not required. +# Set this to 1 to save some resources: +SWIFT_REPLICAS=1 + +# The data for Swift is stored in the source tree by default (``$DEST/swift/data``) +# and can be moved by setting ``SWIFT_DATA_DIR``. The directory will be created +# if it does not exist. +SWIFT_DATA_DIR=$DEST/data diff --git a/stack.sh b/stack.sh index 3a7fc5da..fd2daaf8 100755 --- a/stack.sh +++ b/stack.sh @@ -1706,6 +1706,16 @@ if is_service_enabled g-reg; then fi +# Run local script +# ================ + +# Run ``local.sh`` if it exists to perform user-managed tasks +if [[ -x $TOP_DIR/local.sh ]]; then + echo "Running user script $TOP_DIR/local.sh" + $TOP_DIR/local.sh +fi + + # Fin # === From f4d2395e76a9b64176e301fbfd8c7a08c96f1c3a Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 28 Mar 2012 11:19:24 -0500 Subject: [PATCH 448/967] Add unstack.sh unstack.sh is a simple clean-up script to kill known running OpenStack processes: * quit devstack's screen session * stop swift daemons * stop apache * remove volumes Change-Id: I41b33817c4436e644c336e4e0673144ac0844c26 --- unstack.sh | 60 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100755 unstack.sh diff --git a/unstack.sh b/unstack.sh new file mode 100755 index 00000000..cfe2de6d --- /dev/null +++ b/unstack.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env bash +# +# Stops that which is started by ``stack.sh`` (mostly) +# mysql and rabbit are left running as OpenStack code refreshes +# do not require them to be restarted. +# +# Stop all processes by setting UNSTACK_ALL or specifying ``--all`` +# on the command line + +# Keep track of the current devstack directory. +TOP_DIR=$(cd $(dirname "$0") && pwd) + +# Import common functions +source $TOP_DIR/functions + +# Load local configuration +source $TOP_DIR/stackrc + +# Determine what system we are running on. This provides ``os_VENDOR``, +# ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` +GetOSVersion + +if [[ "$1" == "--all" ]]; then + UNSTACK_ALL=${UNSTACK_ALL:-1} +fi + +# Shut down devstack's screen to get the bulk of OpenStack services in one shot +SESSION=$(screen -ls | grep "[0-9].stack" | awk '{ print $1 }') +if [[ -n "$SESSION" ]]; then + screen -X -S $SESSION quit +fi + +# Swift runs daemons +if is_service_enabled swift; then + swift-init all stop +fi + +# Apache has the WSGI processes +if is_service_enabled horizon; then + stop_service apache2 +fi + +# Get the iSCSI volumes +if is_service_enabled n-vol; then + TARGETS=$(sudo tgtadm --op show --mode target) + if [[ -n "$TARGETS" ]]; then + # FIXME(dtroyer): this could very well require more here to + # clean up left-over volumes + echo "iSCSI target cleanup needed:" + echo "$TARGETS" + fi + sudo stop tgt +fi + +if [[ -n "$UNSTACK_ALL" ]]; then + # Stop MySQL server + if is_service_enabled mysql; then + stop_service mysql + fi +fi From fd1c87e83035438d58692bef63186598373a06af Mon Sep 17 00:00:00 2001 From: John Garbutt Date: Fri, 24 Feb 2012 14:52:54 +0000 Subject: [PATCH 449/967] blueprint host-aggregates add some inital tests for the host-aggregates blueprint Change-Id: I0c07f2e7fd123bbda6d26f4ff64bea3949e57157 --- exercises/aggregates.sh | 141 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 141 insertions(+) create mode 100755 exercises/aggregates.sh diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh new file mode 100755 index 00000000..38fac120 --- /dev/null +++ b/exercises/aggregates.sh @@ -0,0 +1,141 @@ +#!/usr/bin/env bash + +# **aggregates.sh** + +# This script demonstrates how to use host aggregates: +# * Create an Aggregate +# * Updating Aggregate details +# * Testing Aggregate metadata +# * Testing Aggregate delete +# * TODO(johngar) - test adding a host (idealy with two hosts) + +echo "**************************************************" +echo "Begin DevStack Exercise: $0" +echo "**************************************************" + +# This script exits on an error so that errors don't compound and you see +# only the first error that occured. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + + +# Settings +# ======== + +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) + +# Import common functions +source $TOP_DIR/functions + +# Import configuration +source $TOP_DIR/openrc + +# Import exercise configuration +source $TOP_DIR/exerciserc + +# run test as the admin user +_OLD_USERNAME=$OS_USERNAME +OS_USERNAME=admin + + +# Create an aggregate +# =================== + +AGGREGATE_NAME=test_aggregate_$RANDOM +AGGREGATE_A_ZONE=nova + +exit_if_aggregate_present() { + aggregate_name=$1 + + if [ `nova aggregate-list | grep -c " $aggregate_name "` == 0 ]; then + echo "SUCCESS $aggregate_name not present" + else + echo "ERROR found aggregate: $aggregate_name" + exit -1 + fi +} + +exit_if_aggregate_present $AGGREGATE_NAME + +AGGREGATE_ID=`nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE_NAME " | get_field 1` + +# check aggregate created +nova aggregate-list | grep -q " $AGGREGATE_NAME " || die "Aggregate $AGGREGATE_NAME not created" + + +# Ensure creating a duplicate fails +# ================================= + +if nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE; then + echo "ERROR could create duplicate aggregate" + exit -1 +fi + + +# Test aggregate-update (and aggregate-details) +# ============================================= +AGGREGATE_NEW_NAME=test_aggregate_$RANDOM + +nova aggregate-update $AGGREGATE_ID $AGGREGATE_NEW_NAME +nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_NEW_NAME +nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_A_ZONE + +nova aggregate-update $AGGREGATE_ID $AGGREGATE_NAME $AGGREGATE_A_ZONE +nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_NAME +nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_A_ZONE + + +# Test aggregate-set-metadata +# =========================== +META_DATA_1_KEY=asdf +META_DATA_2_KEY=foo +META_DATA_3_KEY=bar + +#ensure no metadata is set +nova aggregate-details $AGGREGATE_ID | grep {} + +nova aggregate-set-metadata $AGGREGATE_ID ${META_DATA_1_KEY}=123 +nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY +nova aggregate-details $AGGREGATE_ID | grep 123 + +nova aggregate-set-metadata $AGGREGATE_ID ${META_DATA_2_KEY}=456 +nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY +nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY + +nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_2_KEY ${META_DATA_3_KEY}=789 +nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY +nova aggregate-details $AGGREGATE_ID | grep $META_DATA_3_KEY + +nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY && die "ERROR metadata was not cleared" + +nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_3_KEY $META_DATA_1_KEY +nova aggregate-details $AGGREGATE_ID | grep {} + + +# Test aggregate-add/remove-host +# ============================== +if [ "$VIRT_DRIVER" == "xenserver" ]; then + echo "TODO(johngarbutt) add tests for add/remove host from aggregate" +fi + + +# Test aggregate-delete +# ===================== +nova aggregate-delete $AGGREGATE_ID +exit_if_aggregate_present $AGGREGATE_NAME + + +# Test complete +# ============= +OS_USERNAME=$_OLD_USERNAME +echo "AGGREGATE TEST PASSED" + +set +o xtrace +echo "**************************************************" +echo "End DevStack Exercise: $0" +echo "**************************************************" From 20121bd1dca3e02272c861cb2469277241007cb8 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Wed, 28 Mar 2012 16:22:52 -0400 Subject: [PATCH 450/967] Add support for qcow2 images in $IMAGE_URLS. This patch adds support for loading a qcow2 image and using the 'bare' container format for all single file images. I tested this successfully by setting: IMAGE_URLS="http://berrange.fedorapeople.org/images/2012-02-29/f16-x86_64-openstack-sda.qcow2" Change-Id: Ia55ffd4957866a3d7b9fd7ba4c62e38663b35080 --- stack.sh | 38 ++++++++++++++++++++++++++------------ 1 file changed, 26 insertions(+), 12 deletions(-) diff --git a/stack.sh b/stack.sh index 3a7fc5da..27667445 100755 --- a/stack.sh +++ b/stack.sh @@ -1681,27 +1681,41 @@ if is_service_enabled g-reg; then *.img) IMAGE="$FILES/$IMAGE_FNAME"; IMAGE_NAME=$(basename "$IMAGE" ".img") + DISK_FORMAT=raw + CONTAINER_FORMAT=bare ;; *.img.gz) IMAGE="$FILES/${IMAGE_FNAME}" IMAGE_NAME=$(basename "$IMAGE" ".img.gz") + DISK_FORMAT=raw + CONTAINER_FORMAT=bare + ;; + *.qcow2) + IMAGE="$FILES/${IMAGE_FNAME}" + IMAGE_NAME=$(basename "$IMAGE" ".qcow2") + DISK_FORMAT=qcow2 + CONTAINER_FORMAT=bare ;; *) echo "Do not know what to do with $IMAGE_FNAME"; false;; esac - # Use glance client to add the kernel the root filesystem. - # We parse the results of the first upload to get the glance ID of the - # kernel for use when uploading the root filesystem. - KERNEL_ID=""; RAMDISK_ID=""; - if [ -n "$KERNEL" ]; then - RVAL=`glance add --silent-upload -A $TOKEN name="$IMAGE_NAME-kernel" is_public=true container_format=aki disk_format=aki < "$KERNEL"` - KERNEL_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "` - fi - if [ -n "$RAMDISK" ]; then - RVAL=`glance add --silent-upload -A $TOKEN name="$IMAGE_NAME-ramdisk" is_public=true container_format=ari disk_format=ari < "$RAMDISK"` - RAMDISK_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "` + if [ "$CONTAINER_FORMAT" = "bare" ]; then + glance add --silent-upload -A $TOKEN name="$IMAGE_NAME" is_public=true container_format=$CONTAINER_FORMAT disk_format=$DISK_FORMAT < <(zcat --force "${IMAGE}") + else + # Use glance client to add the kernel the root filesystem. + # We parse the results of the first upload to get the glance ID of the + # kernel for use when uploading the root filesystem. + KERNEL_ID=""; RAMDISK_ID=""; + if [ -n "$KERNEL" ]; then + RVAL=`glance add --silent-upload -A $TOKEN name="$IMAGE_NAME-kernel" is_public=true container_format=aki disk_format=aki < "$KERNEL"` + KERNEL_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "` + fi + if [ -n "$RAMDISK" ]; then + RVAL=`glance add --silent-upload -A $TOKEN name="$IMAGE_NAME-ramdisk" is_public=true container_format=ari disk_format=ari < "$RAMDISK"` + RAMDISK_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "` + fi + glance add -A $TOKEN name="${IMAGE_NAME%.img}" is_public=true container_format=ami disk_format=ami ${KERNEL_ID:+kernel_id=$KERNEL_ID} ${RAMDISK_ID:+ramdisk_id=$RAMDISK_ID} < <(zcat --force "${IMAGE}") fi - glance add -A $TOKEN name="${IMAGE_NAME%.img}" is_public=true container_format=ami disk_format=ami ${KERNEL_ID:+kernel_id=$KERNEL_ID} ${RAMDISK_ID:+ramdisk_id=$RAMDISK_ID} < <(zcat --force "${IMAGE}") done fi From 314da5a432d4cac2c63304fe084971db7e8066b4 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 28 Mar 2012 19:15:48 -0500 Subject: [PATCH 451/967] Handle additional directory structures in image upload There appear to be multiple forms if AMI archive directory structures in common use. Add the one used by the tty-linux so we can get rid of the special case for it to support UPLOAD_LEGACY_TTY (new): image-name/aki-tty/image image-name/ami-tty/image image-name/ari-tty/image and (existing): image-name/*.img image-name/*.initrd image-name/*.vmlinuz Change-Id: Ia7d88d53760f571c6a488c3139049502b484d33e --- stack.sh | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/stack.sh b/stack.sh index 27667445..a7ed1b9d 100755 --- a/stack.sh +++ b/stack.sh @@ -1637,17 +1637,8 @@ if is_service_enabled g-reg; then TOKEN=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$ADMIN_USER\", \"password\": \"$ADMIN_PASSWORD\"}, \"tenantName\": \"$ADMIN_TENANT\"}}" -H "Content-type: application/json" http://$HOST_IP:5000/v2.0/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"` # Option to upload legacy ami-tty, which works with xenserver - if [ $UPLOAD_LEGACY_TTY ]; then - if [ ! -f $FILES/tty.tgz ]; then - wget -c http://images.ansolabs.com/tty.tgz -O $FILES/tty.tgz - fi - - tar -zxf $FILES/tty.tgz -C $FILES/images - RVAL=`glance add --silent-upload -A $TOKEN name="tty-kernel" is_public=true container_format=aki disk_format=aki < $FILES/images/aki-tty/image` - KERNEL_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "` - RVAL=`glance add --silent-upload -A $TOKEN name="tty-ramdisk" is_public=true container_format=ari disk_format=ari < $FILES/images/ari-tty/image` - RAMDISK_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "` - glance add -A $TOKEN name="tty" is_public=true container_format=ami disk_format=ami kernel_id=$KERNEL_ID ramdisk_id=$RAMDISK_ID < $FILES/images/ami-tty/image + if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then + IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}http://images.ansolabs.com/tty.tgz" fi for image_url in ${IMAGE_URLS//,/ }; do @@ -1669,14 +1660,15 @@ if is_service_enabled g-reg; then rm -Rf "$xdir"; mkdir "$xdir" tar -zxf $FILES/$IMAGE_FNAME -C "$xdir" - KERNEL=$(for f in "$xdir/"*-vmlinuz*; do + KERNEL=$(for f in "$xdir/"*-vmlinuz* "$xdir/"aki-*/image; do [ -f "$f" ] && echo "$f" && break; done; true) - RAMDISK=$(for f in "$xdir/"*-initrd*; do + RAMDISK=$(for f in "$xdir/"*-initrd* "$xdir/"ari-*/image; do [ -f "$f" ] && echo "$f" && break; done; true) - IMAGE=$(for f in "$xdir/"*.img; do + IMAGE=$(for f in "$xdir/"*.img "$xdir/"ami-*/image; do [ -f "$f" ] && echo "$f" && break; done; true) - [ -n "$IMAGE_NAME" ] - IMAGE_NAME=$(basename "$IMAGE" ".img") + if [[ -z "$IMAGE_NAME" ]]; then + IMAGE_NAME=$(basename "$IMAGE" ".img") + fi ;; *.img) IMAGE="$FILES/$IMAGE_FNAME"; From b315ddf82616d6f6157f13ab19961fa0338af7d3 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 29 Mar 2012 16:19:42 -0500 Subject: [PATCH 452/967] Fix oversight in container format handling https://review.openstack.org/5934 changed the container and disk format handling, but I forgot to reset the variables at the top of the loop (DOH!) Change-Id: I2782c06e7b4da1b69ecb72f847a5593bda522b7d --- stack.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stack.sh b/stack.sh index 27667445..1a9b0c65 100755 --- a/stack.sh +++ b/stack.sh @@ -1659,6 +1659,8 @@ if is_service_enabled g-reg; then KERNEL="" RAMDISK="" + DISK_FORMAT="" + CONTAINER_FORMAT="" case "$IMAGE_FNAME" in *.tar.gz|*.tgz) # Extract ami and aki files From 71f23ebeb7c45fb25b41d891e0fb42797c276f1e Mon Sep 17 00:00:00 2001 From: Gabriel Hurley Date: Wed, 15 Feb 2012 17:39:05 -0800 Subject: [PATCH 453/967] Converts all tables and connections to UTF8. This is https://review.openstack.org/4221 resurrected as the original was auto-abandoned. Nova database is handled in db migrations; this adds the nova.conf change noted by Naveed Massjouni in the original proposal. Connections to quantum and melange DB changed to utf8 References bug 933208. Change-Id: Ifc4e5cd71cafb719da62214aafc394563ed1aa29 --- stack.sh | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/stack.sh b/stack.sh index 430710cb..797b902b 100755 --- a/stack.sh +++ b/stack.sh @@ -803,7 +803,7 @@ if is_service_enabled g-reg; then # (re)create glance database mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS glance;' - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE glance;' + mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE glance CHARACTER SET utf8;' function glance_config { sudo sed -e " @@ -814,7 +814,7 @@ if is_service_enabled g-reg; then s,%KEYSTONE_SERVICE_HOST%,$KEYSTONE_SERVICE_HOST,g; s,%KEYSTONE_SERVICE_PORT%,$KEYSTONE_SERVICE_PORT,g; s,%KEYSTONE_SERVICE_PROTOCOL%,$KEYSTONE_SERVICE_PROTOCOL,g; - s,%SQL_CONN%,$BASE_SQL_CONN/glance,g; + s,%SQL_CONN%,$BASE_SQL_CONN/glance?charset=utf8,g; s,%SERVICE_TENANT_NAME%,$SERVICE_TENANT_NAME,g; s,%SERVICE_USERNAME%,glance,g; s,%SERVICE_PASSWORD%,$SERVICE_PASSWORD,g; @@ -1262,7 +1262,7 @@ add_nova_opt "flat_network_bridge=$FLAT_NETWORK_BRIDGE" if [ -n "$FLAT_INTERFACE" ]; then add_nova_opt "flat_interface=$FLAT_INTERFACE" fi -add_nova_opt "sql_connection=$BASE_SQL_CONN/nova" +add_nova_opt "sql_connection=$BASE_SQL_CONN/nova?charset=utf8" add_nova_opt "libvirt_type=$LIBVIRT_TYPE" add_nova_opt "instance_name_template=${INSTANCE_NAME_PREFIX}%08x" # All nova-compute workers need to know the vnc configuration options @@ -1426,12 +1426,12 @@ fi if is_service_enabled key; then # (re)create keystone database mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS keystone;' - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE keystone;' + mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE keystone CHARACTER SET utf8;' # Configure keystone.conf KEYSTONE_CONF=$KEYSTONE_DIR/etc/keystone.conf cp $FILES/keystone.conf $KEYSTONE_CONF - sudo sed -e "s,%SQL_CONN%,$BASE_SQL_CONN/keystone,g" -i $KEYSTONE_CONF + sudo sed -e "s,%SQL_CONN%,$BASE_SQL_CONN/keystone?charset=utf8,g" -i $KEYSTONE_CONF sudo sed -e "s,%DEST%,$DEST,g" -i $KEYSTONE_CONF sudo sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $KEYSTONE_CONF sudo sed -e "s,%KEYSTONE_DIR%,$KEYSTONE_DIR,g" -i $KEYSTONE_CONF @@ -1523,7 +1523,7 @@ if is_service_enabled q-svc; then # Create database for the plugin/agent if is_service_enabled mysql; then mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS ovs_quantum;' - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE IF NOT EXISTS ovs_quantum;' + mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE IF NOT EXISTS ovs_quantum CHARACTER SET utf8;' else echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin." exit 1 @@ -1549,7 +1549,7 @@ if is_service_enabled q-agt; then # Start up the quantum <-> openvswitch agent QUANTUM_OVS_CONFIG_FILE=$QUANTUM_CONF_DIR/ovs_quantum_plugin.ini sudo cp $QUANTUM_DIR/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini $QUANTUM_OVS_CONFIG_FILE - sudo sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/ovs_quantum/g" $QUANTUM_OVS_CONFIG_FILE + sudo sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/ovs_quantum?charset=utf8/g" $QUANTUM_OVS_CONFIG_FILE screen_it q-agt "sleep 4; sudo python $QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py $QUANTUM_OVS_CONFIG_FILE -v" fi @@ -1559,14 +1559,14 @@ fi if is_service_enabled m-svc; then if is_service_enabled mysql; then mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS melange;' - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE melange;' + mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE melange CHARACTER SET utf8;' else echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin." exit 1 fi MELANGE_CONFIG_FILE=$MELANGE_DIR/etc/melange/melange.conf cp $MELANGE_CONFIG_FILE.sample $MELANGE_CONFIG_FILE - sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/melange/g" $MELANGE_CONFIG_FILE + sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/melange?charset=utf8/g" $MELANGE_CONFIG_FILE cd $MELANGE_DIR && PYTHONPATH=.:$PYTHONPATH python $MELANGE_DIR/bin/melange-manage --config-file=$MELANGE_CONFIG_FILE db_sync screen_it m-svc "cd $MELANGE_DIR && PYTHONPATH=.:$PYTHONPATH python $MELANGE_DIR/bin/melange-server --config-file=$MELANGE_CONFIG_FILE" echo "Waiting for melange to start..." From 60df29a234a03bb6b94c030c1661dbc8d1421156 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 28 Mar 2012 09:40:17 -0700 Subject: [PATCH 454/967] Fix quantum deps * Compile linux headers * Install quantum before libvirt/n-cpu since openvswitch complains if bridges are present during installation * Fixes bug 968424 Rebased Change-Id: Iec7c029f264998ad9e23901bdf2129a404d057cd --- stack.sh | 233 ++++++++++++++++++++++++++++--------------------------- 1 file changed, 119 insertions(+), 114 deletions(-) diff --git a/stack.sh b/stack.sh index 797b902b..1fb9564a 100755 --- a/stack.sh +++ b/stack.sh @@ -747,6 +747,51 @@ EOF sudo service mysql restart fi +# Our screenrc file builder +function screen_rc { + SCREENRC=$TOP_DIR/stack-screenrc + if [[ ! -e $SCREENRC ]]; then + # Name the screen session + echo "sessionname stack" > $SCREENRC + # Set a reasonable statusbar + echo 'hardstatus alwayslastline "%-Lw%{= BW}%50>%n%f* %t%{-}%+Lw%< %= %H"' >> $SCREENRC + echo "screen -t stack bash" >> $SCREENRC + fi + # If this service doesn't already exist in the screenrc file + if ! grep $1 $SCREENRC 2>&1 > /dev/null; then + NL=`echo -ne '\015'` + echo "screen -t $1 bash" >> $SCREENRC + echo "stuff \"$2$NL\"" >> $SCREENRC + fi +} + +# Our screen helper to launch a service in a hidden named screen +function screen_it { + NL=`echo -ne '\015'` + if is_service_enabled $1; then + # Append the service to the screen rc file + screen_rc "$1" "$2" + + screen -S stack -X screen -t $1 + # sleep to allow bash to be ready to be send the command - we are + # creating a new window in screen and then sends characters, so if + # bash isn't running by the time we send the command, nothing happens + sleep 1.5 + + if [[ -n ${SCREEN_LOGDIR} ]]; then + screen -S stack -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log + screen -S stack -p $1 -X log on + ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log + fi + screen -S stack -p $1 -X stuff "$2$NL" + fi +} + +# create a new named screen to run processes in +screen -d -m -S stack -t stack -s /bin/bash +sleep 1 +# set a reasonable statusbar +screen -r stack -X hardstatus alwayslastline "%-Lw%{= BW}%50>%n%f* %t%{-}%+Lw%< %= %H" # Horizon # ------- @@ -846,6 +891,80 @@ if is_service_enabled g-reg; then fi fi +# Quantum +# ------- + +# Quantum service +if is_service_enabled q-svc; then + QUANTUM_CONF_DIR=/etc/quantum + if [[ ! -d $QUANTUM_CONF_DIR ]]; then + sudo mkdir -p $QUANTUM_CONF_DIR + fi + sudo chown `whoami` $QUANTUM_CONF_DIR + if [[ "$Q_PLUGIN" = "openvswitch" ]]; then + # Install deps + # FIXME add to files/apts/quantum, but don't install if not needed! + kernel_version=`cat /proc/version | cut -d " " -f3` + apt_get install linux-headers-$kernel_version + apt_get install openvswitch-switch openvswitch-datapath-dkms + # Create database for the plugin/agent + if is_service_enabled mysql; then + mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS ovs_quantum;' + mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE IF NOT EXISTS ovs_quantum CHARACTER SET utf8;' + else + echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin." + exit 1 + fi + QUANTUM_PLUGIN_INI_FILE=$QUANTUM_CONF_DIR/plugins.ini + sudo cp $QUANTUM_DIR/etc/plugins.ini $QUANTUM_PLUGIN_INI_FILE + # Make sure we're using the openvswitch plugin + sudo sed -i -e "s/^provider =.*$/provider = quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin/g" $QUANTUM_PLUGIN_INI_FILE + fi + sudo cp $QUANTUM_DIR/etc/quantum.conf $QUANTUM_CONF_DIR/quantum.conf + screen_it q-svc "cd $QUANTUM_DIR && PYTHONPATH=.:$QUANTUM_CLIENT_DIR:$PYTHONPATH python $QUANTUM_DIR/bin/quantum-server $QUANTUM_CONF_DIR/quantum.conf" +fi + +# Quantum agent (for compute nodes) +if is_service_enabled q-agt; then + if [[ "$Q_PLUGIN" = "openvswitch" ]]; then + # Set up integration bridge + OVS_BRIDGE=${OVS_BRIDGE:-br-int} + sudo ovs-vsctl --no-wait -- --if-exists del-br $OVS_BRIDGE + sudo ovs-vsctl --no-wait add-br $OVS_BRIDGE + sudo ovs-vsctl --no-wait br-set-external-id $OVS_BRIDGE bridge-id br-int + + # Start up the quantum <-> openvswitch agent + QUANTUM_OVS_CONFIG_FILE=$QUANTUM_CONF_DIR/ovs_quantum_plugin.ini + sudo cp $QUANTUM_DIR/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini $QUANTUM_OVS_CONFIG_FILE + sudo sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/ovs_quantum?charset=utf8/g" $QUANTUM_OVS_CONFIG_FILE + screen_it q-agt "sleep 4; sudo python $QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py $QUANTUM_OVS_CONFIG_FILE -v" + fi + +fi + +# Melange service +if is_service_enabled m-svc; then + if is_service_enabled mysql; then + mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS melange;' + mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE melange CHARACTER SET utf8;' + else + echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin." + exit 1 + fi + MELANGE_CONFIG_FILE=$MELANGE_DIR/etc/melange/melange.conf + cp $MELANGE_CONFIG_FILE.sample $MELANGE_CONFIG_FILE + sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/melange?charset=utf8/g" $MELANGE_CONFIG_FILE + cd $MELANGE_DIR && PYTHONPATH=.:$PYTHONPATH python $MELANGE_DIR/bin/melange-manage --config-file=$MELANGE_CONFIG_FILE db_sync + screen_it m-svc "cd $MELANGE_DIR && PYTHONPATH=.:$PYTHONPATH python $MELANGE_DIR/bin/melange-server --config-file=$MELANGE_CONFIG_FILE" + echo "Waiting for melange to start..." + if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:9898; do sleep 1; done"; then + echo "melange-server did not start" + exit 1 + fi + melange mac_address_range create cidr=$M_MAC_RANGE +fi + + # Nova # ---- @@ -1362,52 +1481,6 @@ fi # so send the start command by forcing text into the window. # Only run the services specified in ``ENABLED_SERVICES`` -# Our screenrc file builder -function screen_rc { - SCREENRC=$TOP_DIR/stack-screenrc - if [[ ! -e $SCREENRC ]]; then - # Name the screen session - echo "sessionname stack" > $SCREENRC - # Set a reasonable statusbar - echo 'hardstatus alwayslastline "%-Lw%{= BW}%50>%n%f* %t%{-}%+Lw%< %= %H"' >> $SCREENRC - echo "screen -t stack bash" >> $SCREENRC - fi - # If this service doesn't already exist in the screenrc file - if ! grep $1 $SCREENRC 2>&1 > /dev/null; then - NL=`echo -ne '\015'` - echo "screen -t $1 bash" >> $SCREENRC - echo "stuff \"$2$NL\"" >> $SCREENRC - fi -} - -# Our screen helper to launch a service in a hidden named screen -function screen_it { - NL=`echo -ne '\015'` - if is_service_enabled $1; then - # Append the service to the screen rc file - screen_rc "$1" "$2" - - screen -S stack -X screen -t $1 - # sleep to allow bash to be ready to be send the command - we are - # creating a new window in screen and then sends characters, so if - # bash isn't running by the time we send the command, nothing happens - sleep 1.5 - - if [[ -n ${SCREEN_LOGDIR} ]]; then - screen -S stack -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log - screen -S stack -p $1 -X log on - ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log - fi - screen -S stack -p $1 -X stuff "$2$NL" - fi -} - -# create a new named screen to run processes in -screen -d -m -S stack -t stack -s /bin/bash -sleep 1 -# set a reasonable statusbar -screen -r stack -X hardstatus alwayslastline "%-Lw%{= BW}%50>%n%f* %t%{-}%+Lw%< %= %H" - # launch the glance registry service if is_service_enabled g-reg; then screen_it g-reg "cd $GLANCE_DIR; bin/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf" @@ -1509,74 +1582,6 @@ if is_service_enabled n-api; then fi fi -# Quantum service -if is_service_enabled q-svc; then - QUANTUM_CONF_DIR=/etc/quantum - if [[ ! -d $QUANTUM_CONF_DIR ]]; then - sudo mkdir -p $QUANTUM_CONF_DIR - fi - sudo chown `whoami` $QUANTUM_CONF_DIR - if [[ "$Q_PLUGIN" = "openvswitch" ]]; then - # Install deps - # FIXME add to files/apts/quantum, but don't install if not needed! - apt_get install openvswitch-switch openvswitch-datapath-dkms - # Create database for the plugin/agent - if is_service_enabled mysql; then - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS ovs_quantum;' - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE IF NOT EXISTS ovs_quantum CHARACTER SET utf8;' - else - echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin." - exit 1 - fi - QUANTUM_PLUGIN_INI_FILE=$QUANTUM_CONF_DIR/plugins.ini - sudo cp $QUANTUM_DIR/etc/plugins.ini $QUANTUM_PLUGIN_INI_FILE - # Make sure we're using the openvswitch plugin - sudo sed -i -e "s/^provider =.*$/provider = quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin/g" $QUANTUM_PLUGIN_INI_FILE - fi - sudo cp $QUANTUM_DIR/etc/quantum.conf $QUANTUM_CONF_DIR/quantum.conf - screen_it q-svc "cd $QUANTUM_DIR && PYTHONPATH=.:$QUANTUM_CLIENT_DIR:$PYTHONPATH python $QUANTUM_DIR/bin/quantum-server $QUANTUM_CONF_DIR/quantum.conf" -fi - -# Quantum agent (for compute nodes) -if is_service_enabled q-agt; then - if [[ "$Q_PLUGIN" = "openvswitch" ]]; then - # Set up integration bridge - OVS_BRIDGE=${OVS_BRIDGE:-br-int} - sudo ovs-vsctl --no-wait -- --if-exists del-br $OVS_BRIDGE - sudo ovs-vsctl --no-wait add-br $OVS_BRIDGE - sudo ovs-vsctl --no-wait br-set-external-id $OVS_BRIDGE bridge-id br-int - - # Start up the quantum <-> openvswitch agent - QUANTUM_OVS_CONFIG_FILE=$QUANTUM_CONF_DIR/ovs_quantum_plugin.ini - sudo cp $QUANTUM_DIR/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini $QUANTUM_OVS_CONFIG_FILE - sudo sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/ovs_quantum?charset=utf8/g" $QUANTUM_OVS_CONFIG_FILE - screen_it q-agt "sleep 4; sudo python $QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py $QUANTUM_OVS_CONFIG_FILE -v" - fi - -fi - -# Melange service -if is_service_enabled m-svc; then - if is_service_enabled mysql; then - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS melange;' - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE melange CHARACTER SET utf8;' - else - echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin." - exit 1 - fi - MELANGE_CONFIG_FILE=$MELANGE_DIR/etc/melange/melange.conf - cp $MELANGE_CONFIG_FILE.sample $MELANGE_CONFIG_FILE - sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/melange?charset=utf8/g" $MELANGE_CONFIG_FILE - cd $MELANGE_DIR && PYTHONPATH=.:$PYTHONPATH python $MELANGE_DIR/bin/melange-manage --config-file=$MELANGE_CONFIG_FILE db_sync - screen_it m-svc "cd $MELANGE_DIR && PYTHONPATH=.:$PYTHONPATH python $MELANGE_DIR/bin/melange-server --config-file=$MELANGE_CONFIG_FILE" - echo "Waiting for melange to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:9898; do sleep 1; done"; then - echo "melange-server did not start" - exit 1 - fi - melange mac_address_range create cidr=$M_MAC_RANGE -fi - # If we're using Quantum (i.e. q-svc is enabled), network creation has to # happen after we've started the Quantum service. if is_service_enabled mysql && is_service_enabled nova; then From 13dc5ccd13a636dcde03324ef7586728caa59db2 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 27 Mar 2012 14:50:45 -0500 Subject: [PATCH 455/967] A) Add/move functions to 'functions' file Add ini*() and tests Add GetOSVersion() Add install_package(), yum_install() Add *_service() Rebased Change-Id: I570dba5ed4d2b988cdd1771cf6bed0aaf8e0fe27 --- functions | 163 ++++++++++++++++++++++++++++++++++++++++++++- stack.sh | 32 ++++----- tests/functions.sh | 86 ++++++++++++++++++++++++ 3 files changed, 263 insertions(+), 18 deletions(-) diff --git a/functions b/functions index 75c20d75..ecfda057 100644 --- a/functions +++ b/functions @@ -1,4 +1,7 @@ # functions - Common functions used by DevStack components +# +# ENABLED_SERVICES is used by is_service_enabled() + # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -6,7 +9,7 @@ set +o xtrace # apt-get wrapper to set arguments correctly -# apt_get package [package ...] +# apt_get operation package [package ...] function apt_get() { [[ "$OFFLINE" = "True" || -z "$@" ]] && return local sudo="sudo" @@ -70,6 +73,71 @@ function get_field() { } +# Determine OS Vendor, Release and Update +# Tested with OS/X, Ubuntu, RedHat, CentOS, Fedora +# Returns results in global variables: +# os_VENDOR - vendor name +# os_RELEASE - release +# os_UPDATE - update +# os_PACKAGE - package type +# os_CODENAME - vendor's codename for release +# GetOSVersion +GetOSVersion() { + # Figure out which vendor we are + if [[ -n "`which sw_vers 2>/dev/null`" ]]; then + # OS/X + os_VENDOR=`sw_vers -productName` + os_RELEASE=`sw_vers -productVersion` + os_UPDATE=${os_RELEASE##*.} + os_RELEASE=${os_RELEASE%.*} + os_PACKAGE="" + if [[ "$os_RELEASE" =~ "10.7" ]]; then + os_CODENAME="lion" + elif [[ "$os_RELEASE" =~ "10.6" ]]; then + os_CODENAME="snow leopard" + elif [[ "$os_RELEASE" =~ "10.5" ]]; then + os_CODENAME="leopard" + elif [[ "$os_RELEASE" =~ "10.4" ]]; then + os_CODENAME="tiger" + elif [[ "$os_RELEASE" =~ "10.3" ]]; then + os_CODENAME="panther" + else + os_CODENAME="" + fi + elif [[ -x $(which lsb_release 2>/dev/null) ]]; then + os_VENDOR=$(lsb_release -i -s) + os_RELEASE=$(lsb_release -r -s) + os_UPDATE="" + if [[ "Debian,Ubuntu" =~ $os_VENDOR ]]; then + os_PACKAGE="deb" + else + os_PACKAGE="rpm" + fi + os_CODENAME=$(lsb_release -c -s) + elif [[ -r /etc/redhat-release ]]; then + # Red Hat Enterprise Linux Server release 5.5 (Tikanga) + # CentOS release 5.5 (Final) + # CentOS Linux release 6.0 (Final) + # Fedora release 16 (Verne) + os_CODENAME="" + for r in "Red Hat" CentOS Fedora; do + os_VENDOR=$r + if [[ -n "`grep \"$r\" /etc/redhat-release`" ]]; then + ver=`sed -e 's/^.* \(.*\) (\(.*\)).*$/\1\|\2/' /etc/redhat-release` + os_CODENAME=${ver#*|} + os_RELEASE=${ver%|*} + os_UPDATE=${os_RELEASE##*.} + os_RELEASE=${os_RELEASE%.*} + break + fi + os_VENDOR="" + done + os_PACKAGE="rpm" + fi + export os_VENDOR os_RELEASE os_UPDATE os_PACKAGE os_CODENAME +} + + # git clone only if directory doesn't exist already. Since ``DEST`` might not # be owned by the installation user, we create the directory and change the # ownership to the proper user. @@ -115,6 +183,42 @@ function git_clone { } +# Comment an option in an INI file +# optset config-file section option +function inicomment() { + local file=$1 + local section=$2 + local option=$3 + sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" $file +} + + +# Get an option from an INI file +# optget config-file section option +function iniget() { + local file=$1 + local section=$2 + local option=$3 + local line + line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" $file) + echo ${line#*=} +} + + +# Set an option in an INI file +# This is NOT a complete option setter, it assumes that the section and +# option already exist in the INI file. If the section does not exist, +# nothing happens. +# optset config-file section option value +function iniset() { + local file=$1 + local section=$2 + local option=$3 + local value=$4 + sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=[ \t]*\).*$|\1$value|" $file +} + + # is_service_enabled() checks if the service(s) specified as arguments are # enabled by the user in **ENABLED_SERVICES**. # @@ -138,6 +242,20 @@ function is_service_enabled() { } +# Distro-agnostic package installer +# install_package package [package ...] +function install_package() { + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + if [[ "$os_PACKAGE" = "deb" ]]; then + apt_get install "$@" + else + yum_install "$@" + fi +} + + # Test if the named environment variable is set and not zero length # is_set env-var function is_set() { @@ -153,10 +271,39 @@ function is_set() { # pip_install package [package ...] function pip_install { [[ "$OFFLINE" = "True" || -z "$@" ]] && return + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + if [[ "$os_PACKAGE" = "deb" ]]; then + CMD_PIP=/usr/bin/pip + else + CMD_PIP=/usr/bin/pip-python + fi sudo PIP_DOWNLOAD_CACHE=/var/cache/pip \ HTTP_PROXY=$http_proxy \ HTTPS_PROXY=$https_proxy \ - pip install --use-mirrors $@ + $CMD_PIP install --use-mirrors $@ +} + + +# Service wrapper to restart services +# restart_service service-name +function restart_service() { + sudo /usr/sbin/service $1 restart +} + + +# Service wrapper to start services +# start_service service-name +function start_service() { + sudo /usr/sbin/service $1 start +} + + +# Service wrapper to stop services +# stop_service service-name +function stop_service() { + sudo /usr/sbin/service $1 stop } @@ -172,5 +319,17 @@ function trueorfalse() { echo "$default" } + +# yum wrapper to set arguments correctly +# yum_install package [package ...] +function yum_install() { + [[ "$OFFLINE" = "True" ]] && return + local sudo="sudo" + [[ "$(id -u)" = "0" ]] && sudo="env" + $sudo http_proxy=$http_proxy https_proxy=$https_proxy \ + yum install -y "$@" +} + + # Restore xtrace $XTRACE diff --git a/stack.sh b/stack.sh index 1fb9564a..60e9c727 100755 --- a/stack.sh +++ b/stack.sh @@ -107,7 +107,7 @@ if [[ $EUID -eq 0 ]]; then # since this script runs as a normal user, we need to give that user # ability to run sudo - dpkg -l sudo || apt_get update && apt_get install sudo + dpkg -l sudo || apt_get update && install_package sudo if ! getent passwd stack >/dev/null; then echo "Creating a user called stack" @@ -268,6 +268,7 @@ function read_password { set -o xtrace } + # Nova Network Configuration # -------------------------- @@ -590,7 +591,7 @@ function get_packages() { # install apt requirements apt_get update -apt_get install $(get_packages $FILES/apts) +install_package $(get_packages $FILES/apts) # install python requirements pip_install $(get_packages $FILES/pips | sort -u) @@ -677,7 +678,7 @@ fi # ------ if [[ $SYSLOG != "False" ]]; then - apt_get install -y rsyslog-relp + install_package rsyslog-relp if [[ "$SYSLOG_HOST" = "$HOST_IP" ]]; then # Configure the master host to receive cat </tmp/90-stack-m.conf @@ -692,7 +693,7 @@ EOF EOF sudo mv /tmp/90-stack-s.conf /etc/rsyslog.d fi - sudo /usr/sbin/service rsyslog restart + restart_service rsyslog fi @@ -703,7 +704,7 @@ if is_service_enabled rabbit; then # Install and start rabbitmq-server # the temp file is necessary due to LP: #878600 tfile=$(mktemp) - apt_get install rabbitmq-server > "$tfile" 2>&1 + install_package rabbitmq-server > "$tfile" 2>&1 cat "$tfile" rm -f "$tfile" # change the rabbit password since the default is "guest" @@ -738,13 +739,13 @@ EOF fi # Install and start mysql-server - apt_get install mysql-server + install_package mysql-server # Update the DB to give user ‘$MYSQL_USER’@’%’ full control of the all databases: sudo mysql -uroot -p$MYSQL_PASSWORD -h127.0.0.1 -e "GRANT ALL PRIVILEGES ON *.* TO '$MYSQL_USER'@'%' identified by '$MYSQL_PASSWORD';" # Edit /etc/mysql/my.cnf to change ‘bind-address’ from localhost (127.0.0.1) to any (0.0.0.0) and restart the mysql service: sudo sed -i 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf - sudo service mysql restart + restart_service mysql fi # Our screenrc file builder @@ -801,7 +802,7 @@ screen -r stack -X hardstatus alwayslastline "%-Lw%{= BW}%50>%n%f* %t%{-}%+Lw%< if is_service_enabled horizon; then # Install apache2, which is NOPRIME'd - apt_get install apache2 libapache2-mod-wsgi + install_package apache2 libapache2-mod-wsgi # Remove stale session database. @@ -826,7 +827,7 @@ if is_service_enabled horizon; then s,%GROUP%,$APACHE_GROUP,g; s,%HORIZON_DIR%,$HORIZON_DIR,g; " -i /etc/apache2/sites-enabled/000-default - sudo service apache2 restart + restart_service apache2 fi @@ -905,8 +906,7 @@ if is_service_enabled q-svc; then # Install deps # FIXME add to files/apts/quantum, but don't install if not needed! kernel_version=`cat /proc/version | cut -d " " -f3` - apt_get install linux-headers-$kernel_version - apt_get install openvswitch-switch openvswitch-datapath-dkms + install_package openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version # Create database for the plugin/agent if is_service_enabled mysql; then mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS ovs_quantum;' @@ -1019,7 +1019,7 @@ if is_service_enabled n-cpu; then # Virtualization Configuration # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - apt_get install libvirt-bin + install_package libvirt-bin # Force IP forwarding on, just on case sudo sysctl -w net.ipv4.ip_forward=1 @@ -1043,7 +1043,7 @@ if is_service_enabled n-cpu; then # to simulate multiple systems. if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then if [[ "$DISTRO" > natty ]]; then - apt_get install cgroup-lite + install_package cgroup-lite else cgline="none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0" sudo mkdir -p /cgroup @@ -1062,7 +1062,7 @@ if is_service_enabled n-cpu; then # libvirt detects various settings on startup, as we potentially changed # the system configuration (modules, filesystems), we need to restart # libvirt to detect those changes. - sudo /etc/init.d/libvirt-bin restart + restart_service libvirt-bin # Instance Storage @@ -1113,7 +1113,7 @@ fi # Storage Service if is_service_enabled swift; then # Install memcached for swift. - apt_get install memcached + install_package memcached # We first do a bit of setup by creating the directories and # changing the permissions so we can run it as our user. @@ -1297,7 +1297,7 @@ if is_service_enabled n-vol; then # By default, the backing file is 2G in size, and is stored in /opt/stack. # install the package - apt_get install tgt + install_package tgt if ! sudo vgs $VOLUME_GROUP; then VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DEST/nova-volumes-backing-file} diff --git a/tests/functions.sh b/tests/functions.sh index 69e8c0ab..931cde81 100755 --- a/tests/functions.sh +++ b/tests/functions.sh @@ -37,3 +37,89 @@ if [[ $? = 0 ]]; then echo "die_if_not_set [X='' false] Failed" fi + +echo "Testing INI functions" + +cat >test.ini < Date: Tue, 3 Apr 2012 11:54:21 -0700 Subject: [PATCH 456/967] bug 965199: quantum devstack broken by previous commit that moved config files Change-Id: Id323e53d206304f4628e5710bb60252c48e4b615 --- stack.sh | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/stack.sh b/stack.sh index ae4ee87d..8b96956d 100755 --- a/stack.sh +++ b/stack.sh @@ -916,12 +916,17 @@ if is_service_enabled q-svc; then exit 1 fi QUANTUM_PLUGIN_INI_FILE=$QUANTUM_CONF_DIR/plugins.ini - sudo cp $QUANTUM_DIR/etc/plugins.ini $QUANTUM_PLUGIN_INI_FILE + # must remove this file from existing location, otherwise Quantum will prefer it + if [[ -e $QUANTUM_DIR/etc/plugins.ini ]]; then + sudo mv $QUANTUM_DIR/etc/plugins.ini $QUANTUM_PLUGIN_INI_FILE + fi # Make sure we're using the openvswitch plugin sudo sed -i -e "s/^provider =.*$/provider = quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin/g" $QUANTUM_PLUGIN_INI_FILE fi - sudo cp $QUANTUM_DIR/etc/quantum.conf $QUANTUM_CONF_DIR/quantum.conf - screen_it q-svc "cd $QUANTUM_DIR && PYTHONPATH=.:$QUANTUM_CLIENT_DIR:$PYTHONPATH python $QUANTUM_DIR/bin/quantum-server $QUANTUM_CONF_DIR/quantum.conf" + if [[ -e $QUANTUM_DIR/etc/quantum.conf ]]; then + sudo mv $QUANTUM_DIR/etc/quantum.conf $QUANTUM_CONF_DIR/quantum.conf + fi + screen_it q-svc "cd $QUANTUM_DIR && PYTHONPATH=.:$QUANTUM_CLIENT_DIR:$PYTHONPATH python $QUANTUM_DIR/bin/quantum-server $QUANTUM_CONF_DIR/quantum.conf" fi # Quantum agent (for compute nodes) @@ -933,11 +938,15 @@ if is_service_enabled q-agt; then sudo ovs-vsctl --no-wait add-br $OVS_BRIDGE sudo ovs-vsctl --no-wait br-set-external-id $OVS_BRIDGE bridge-id br-int - # Start up the quantum <-> openvswitch agent - QUANTUM_OVS_CONFIG_FILE=$QUANTUM_CONF_DIR/ovs_quantum_plugin.ini - sudo cp $QUANTUM_DIR/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini $QUANTUM_OVS_CONFIG_FILE - sudo sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/ovs_quantum?charset=utf8/g" $QUANTUM_OVS_CONFIG_FILE - screen_it q-agt "sleep 4; sudo python $QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py $QUANTUM_OVS_CONFIG_FILE -v" + # Start up the quantum <-> openvswitch agent + QUANTUM_OVS_CONF_DIR=$QUANTUM_CONF_DIR/plugins/openvswitch + mkdir -p $QUANTUM_OVS_CONF_DIR + QUANTUM_OVS_CONFIG_FILE=$QUANTUM_OVS_CONF_DIR/ovs_quantum_plugin.ini + if [[ -e $QUANTUM_DIR/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini ]]; then + sudo mv $QUANTUM_DIR/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini $QUANTUM_OVS_CONFIG_FILE + fi + sudo sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/ovs_quantum?charset=utf8/g" $QUANTUM_OVS_CONFIG_FILE + screen_it q-agt "sleep 4; sudo python $QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py $QUANTUM_OVS_CONFIG_FILE -v" fi fi From dbdf6be8790305bbf1bb4e97369e097a4aa314cb Mon Sep 17 00:00:00 2001 From: Ben Andrews Date: Wed, 4 Apr 2012 14:43:32 -0400 Subject: [PATCH 457/967] added sec_groups exercise Change-Id: Ib969efab4ef4c408fa59a44eff25d2c4ac56d024 --- exercises/sec_groups.sh | 74 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) create mode 100755 exercises/sec_groups.sh diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh new file mode 100755 index 00000000..49cb58fd --- /dev/null +++ b/exercises/sec_groups.sh @@ -0,0 +1,74 @@ +#!/usr/bin/env bash + +# **sec_groups.sh** + +# Test security groups via the command line tools that ship with it. + +echo "*********************************************************************" +echo "Begin DevStack Exercise: $0" +echo "*********************************************************************" + +# This script exits on an error so that errors don't compound and you see +# only the first error that occured. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + + +# Settings +# ======== + +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) + +# Import common functions +source $TOP_DIR/functions + +# Import configuration +source $TOP_DIR/openrc + +# Import exercise configuration +source $TOP_DIR/exerciserc + + +# Testing Security Groups +# ============= + +# List security groups +nova secgroup-list + +# Create random name for new sec group and create secgroup of said name +SEC_GROUP_NAME="sec-group-$(openssl rand -hex 4)" +nova secgroup-create $SEC_GROUP_NAME 'a test security group' + +# Add some rules to the secgroup +RULES_TO_ADD=( 22 3389 5900 ) + +for RULE in "${RULES_TO_ADD[@]}"; do + nova secgroup-add-rule $SEC_GROUP_NAME tcp $RULE $RULE 0.0.0.0/00 +done + +# Check to make sure rules were added +SEC_GROUP_RULES=( $(nova secgroup-list-rules $SEC_GROUP_NAME | grep -v \- | grep -v 'Source Group' | cut -d '|' -f3 | tr -d ' ') ) +for i in "${RULES_TO_ADD[@]}"; do + skip= + for j in "${SEC_GROUP_RULES[@]}"; do + [[ $i == $j ]] && { skip=1; break; } + done + [[ -n $skip ]] || exit 1 +done + +# Delete rules and secgroup +for RULE in "${RULES_TO_ADD[@]}"; do + nova secgroup-delete-rule $SEC_GROUP_NAME tcp $RULE $RULE 0.0.0.0/00 +done +nova secgroup-delete $SEC_GROUP_NAME + + +set +o xtrace +echo "*********************************************************************" +echo "SUCCESS: End DevStack Exercise: $0" +echo "*********************************************************************" From d01325f3e45d1a9a107db6bc9045146436a53d42 Mon Sep 17 00:00:00 2001 From: Jay Pipes Date: Wed, 4 Apr 2012 16:21:33 -0400 Subject: [PATCH 458/967] Get tempest configuration file up2date with trunk The old configure_tempest script wasn't writing an appropriate Tempest config file. This should get things updated to the point where Tempest should at least run properly without erroring out on configuration issues like we've been seeing... Change-Id: Ice7aca10e74c0a365e1638f7b3e423aa768d3074 --- tools/configure_tempest.sh | 75 ++++++++++++++++++++++++++++++++------ 1 file changed, 64 insertions(+), 11 deletions(-) diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index 9b25b7e8..01849ad3 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -125,22 +125,75 @@ if [[ -n "$IMAGE_NAME" ]]; then IMAGE_UUID=$(echo $IMAGE_UUID) fi -# Create tempest.conf from tempest.conf.sample +# Create tempest.conf from tempest.conf.tpl if [[ ! -r $TEMPEST_CONF ]]; then - cp $TEMPEST_CONF.sample $TEMPEST_CONF + cp $TEMPEST_CONF.tpl $TEMPEST_CONF fi +IDENTITY_USE_SSL=${IDENTITY_USE_SSL:-False} +IDENTITY_PORT=${IDENTITY_PORT:-5000} +IDENTITY_API_VERSION={$IDENTITY_API_VERSION:-v2.0} # Note: need v for now... +# TODO(jaypipes): This is dumb and needs to be removed +# from the Tempest configuration file entirely... +IDENTITY_PATH=${IDENTITY_PATH:-tokens} +IDENTITY_STRATEGY=${IDENTITY_STRATEGY:-keystone} + +# We use regular, non-admin users in Tempest for the USERNAME +# substitutions and use ADMIN_USERNAME et al for the admin stuff. +# OS_USERNAME et all should be defined in openrc. +OS_USERNAME=${OS_USERNAME:-demo} +OS_TENANT_NAME=${OS_TENANT_NAME:-demo} +OS_PASSWORD=${OS_PASSWORD:-secrete} + +# TODO(jaypipes): Support multiple regular user accounts instead +# of using the same regular user account for the alternate user... +ALT_USERNAME=$OS_USERNAME +ALT_PASSWORD=$OS_PASSWORD +ALT_TENANT_NAME=$OS_TENANT_NAME + +# TODO(jaypipes): Support multiple images instead of plopping +# the IMAGE_UUID into both the image_ref and image_ref_alt slots +IMAGE_UUID_ALT=$IMAGE_UUID + +# TODO(jaypipes): Support configurable flavor refs here... +FLAVOR_REF=1 +FLAVOR_REF_ALT=2 + +ADMIN_USERNAME={$ADMIN_USERNAME:-admin} +ADMIN_PASSWORD={$ADMIN_PASSWORD:-secrete} +ADMIN_TENANT_NAME={$ADMIN_TENANT:-admin} + +# Do any of the following need to be configurable? +COMPUTE_CATALOG_TYPE=compute +COMPUTE_CREATE_IMAGE_ENABLED=True +COMPUTE_RESIZE_AVAILABLE=True +COMPUTE_LOG_LEVEL=ERROR + sed -e " - /^api_key=/s|=.*\$|=$ADMIN_PASSWORD|; - /^auth_url=/s|=.*\$|=${OS_AUTH_URL%/}/|; - /^host=/s|=.*\$|=$HOST_IP|; - /^image_ref=/s|=.*\$|=$IMAGE_UUID|; - /^password=/s|=.*\$|=$ADMIN_PASSWORD|; - /^tenant=/s|=.*\$|=$TENANT|; - /^tenant_name=/s|=.*\$|=$TENANT|; - /^user=/s|=.*\$|=$USERNAME|; - /^username=/s|=.*\$|=$USERNAME|; + s,%IDENTITY_USE_SSL%,$IDENTITY_USE_SSL,g; + s,%IDENTITY_HOST%,$HOST_IP,g; + s,%IDENTITY_PORT%,$IDENTITY_PORT,g; + s,%IDENTITY_API_VERSION%,$IDENTITY_API_VERSION,g; + s,%IDENTITY_PATH%,$IDENTITY_PATH,g; + s,%IDENTITY_STRATEGY%,$IDENTITY_STRATEGY,g; + s,%USERNAME%,$OS_USERNAME,g; + s,%PASSWORD%,$OS_PASSWORD,g; + s,%TENANT_NAME%,$OS_TENANT_NAME,g; + s,%ALT_USERNAME%,$ALT_USERNAME,g; + s,%ALT_PASSWORD%,$ALT_PASSWORD,g; + s,%ALT_TENANT_NAME%,$ALT_TENANT_NAME,g; + s,%COMPUTE_CATALOG_TYPE%,$COMPUTE_CATALOG_TYPE,g; + s,%COMPUTE_CREATE_IMAGE_ENABLED%,$COMPUTE_CREATE_IMAGE_ENABLED,g; + s,%COMPUTE_RESIZE_AVAILABLE%,$COMPUTE_RESIZE_AVAILABLE,g; + s,%COMPUTE_LOG_LEVEL%,$COMPUTE_LOG_LEVEL,g; + s,%IMAGE_ID%,$IMAGE_UUID,g; + s,%IMAGE_ID_ALT%,$IMAGE_UUID_ALT,g; + s,%FLAVOR_REF%,$FLAVOR_REF,g; + s,%FLAVOR_REF_ALT%,$FLAVOR_REF_ALT,g; + s,%ADMIN_USERNAME%,$ADMIN_USERNAME,g; + s,%ADMIN_PASSWORD%,$ADMIN_PASSWORD,g; + s,%ADMIN_TENANT_NAME%,$ADMIN_TENANT_NAME,g; " -i $TEMPEST_CONF # Create config.ini From d252e06e3b2b2de23f8fd14ae1853255e57c4aff Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 4 Apr 2012 13:46:27 -0700 Subject: [PATCH 459/967] Install dnsmasq-utils on precise as well Change-Id: I1a730d9ffdf934cfcec466d15453dd908b1a58de --- files/apts/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/apts/nova b/files/apts/nova index f2059ba7..66640c50 100644 --- a/files/apts/nova +++ b/files/apts/nova @@ -1,5 +1,5 @@ dnsmasq-base -dnsmasq-utils # for dhcp_release only available in dist:oneiric +dnsmasq-utils # for dhcp_release only available in dist:oneiric,precise kpartx parted arping # only available in dist:natty From 1e51c11582fc2da61595ae94b60ae0b5d3e7956b Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 16 Mar 2012 10:42:00 -0500 Subject: [PATCH 460/967] C) Move swift defaults for config & data * change config vars _LOCATION to _DIR to be consistient with other vars * let config files live in /etc/swift like they want to * move swift backing files to $DEST/data/swift Change-Id: Idbd507736b46c5c2330e0255cc87f40068d3faf6 --- files/swift/account-server.conf | 2 +- files/swift/container-server.conf | 2 +- files/swift/object-server.conf | 2 +- files/swift/proxy-server.conf | 2 +- files/swift/rsyncd.conf | 24 +++--- stack.sh | 121 ++++++++++++++++-------------- 6 files changed, 82 insertions(+), 71 deletions(-) diff --git a/files/swift/account-server.conf b/files/swift/account-server.conf index db0f097f..763c306f 100644 --- a/files/swift/account-server.conf +++ b/files/swift/account-server.conf @@ -4,7 +4,7 @@ mount_check = false bind_port = %BIND_PORT% user = %USER% log_facility = LOG_LOCAL%LOG_FACILITY% -swift_dir = %SWIFT_CONFIG_LOCATION% +swift_dir = %SWIFT_CONFIG_DIR% [pipeline:main] pipeline = account-server diff --git a/files/swift/container-server.conf b/files/swift/container-server.conf index bdc3e3a0..106dcab6 100644 --- a/files/swift/container-server.conf +++ b/files/swift/container-server.conf @@ -4,7 +4,7 @@ mount_check = false bind_port = %BIND_PORT% user = %USER% log_facility = LOG_LOCAL%LOG_FACILITY% -swift_dir = %SWIFT_CONFIG_LOCATION% +swift_dir = %SWIFT_CONFIG_DIR% [pipeline:main] pipeline = container-server diff --git a/files/swift/object-server.conf b/files/swift/object-server.conf index 2f888a27..7eea67d5 100644 --- a/files/swift/object-server.conf +++ b/files/swift/object-server.conf @@ -4,7 +4,7 @@ mount_check = false bind_port = %BIND_PORT% user = %USER% log_facility = LOG_LOCAL%LOG_FACILITY% -swift_dir = %SWIFT_CONFIG_LOCATION% +swift_dir = %SWIFT_CONFIG_DIR% [pipeline:main] pipeline = object-server diff --git a/files/swift/proxy-server.conf b/files/swift/proxy-server.conf index 1627af06..ce5473b2 100644 --- a/files/swift/proxy-server.conf +++ b/files/swift/proxy-server.conf @@ -1,7 +1,7 @@ [DEFAULT] bind_port = 8080 user = %USER% -swift_dir = %SWIFT_CONFIG_LOCATION% +swift_dir = %SWIFT_CONFIG_DIR% workers = 1 log_name = swift log_facility = LOG_LOCAL1 diff --git a/files/swift/rsyncd.conf b/files/swift/rsyncd.conf index 66215c7f..4e0dcbf9 100644 --- a/files/swift/rsyncd.conf +++ b/files/swift/rsyncd.conf @@ -6,74 +6,74 @@ address = 127.0.0.1 [account6012] max connections = 25 -path = %SWIFT_DATA_LOCATION%/1/node/ +path = %SWIFT_DATA_DIR%/1/node/ read only = false lock file = /var/lock/account6012.lock [account6022] max connections = 25 -path = %SWIFT_DATA_LOCATION%/2/node/ +path = %SWIFT_DATA_DIR%/2/node/ read only = false lock file = /var/lock/account6022.lock [account6032] max connections = 25 -path = %SWIFT_DATA_LOCATION%/3/node/ +path = %SWIFT_DATA_DIR%/3/node/ read only = false lock file = /var/lock/account6032.lock [account6042] max connections = 25 -path = %SWIFT_DATA_LOCATION%/4/node/ +path = %SWIFT_DATA_DIR%/4/node/ read only = false lock file = /var/lock/account6042.lock [container6011] max connections = 25 -path = %SWIFT_DATA_LOCATION%/1/node/ +path = %SWIFT_DATA_DIR%/1/node/ read only = false lock file = /var/lock/container6011.lock [container6021] max connections = 25 -path = %SWIFT_DATA_LOCATION%/2/node/ +path = %SWIFT_DATA_DIR%/2/node/ read only = false lock file = /var/lock/container6021.lock [container6031] max connections = 25 -path = %SWIFT_DATA_LOCATION%/3/node/ +path = %SWIFT_DATA_DIR%/3/node/ read only = false lock file = /var/lock/container6031.lock [container6041] max connections = 25 -path = %SWIFT_DATA_LOCATION%/4/node/ +path = %SWIFT_DATA_DIR%/4/node/ read only = false lock file = /var/lock/container6041.lock [object6010] max connections = 25 -path = %SWIFT_DATA_LOCATION%/1/node/ +path = %SWIFT_DATA_DIR%/1/node/ read only = false lock file = /var/lock/object6010.lock [object6020] max connections = 25 -path = %SWIFT_DATA_LOCATION%/2/node/ +path = %SWIFT_DATA_DIR%/2/node/ read only = false lock file = /var/lock/object6020.lock [object6030] max connections = 25 -path = %SWIFT_DATA_LOCATION%/3/node/ +path = %SWIFT_DATA_DIR%/3/node/ read only = false lock file = /var/lock/object6030.lock [object6040] max connections = 25 -path = %SWIFT_DATA_LOCATION%/4/node/ +path = %SWIFT_DATA_DIR%/4/node/ read only = false lock file = /var/lock/object6040.lock diff --git a/stack.sh b/stack.sh index 444a7381..28ae1227 100755 --- a/stack.sh +++ b/stack.sh @@ -376,13 +376,13 @@ GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$SERVICE_HOST:9292} # TODO: add logging to different location. # By default the location of swift drives and objects is located inside -# the swift source directory. SWIFT_DATA_LOCATION variable allow you to redefine +# the swift source directory. SWIFT_DATA_DIR variable allow you to redefine # this. -SWIFT_DATA_LOCATION=${SWIFT_DATA_LOCATION:-${SWIFT_DIR}/data} +SWIFT_DATA_DIR=${SWIFT_DATA_DIR:-${DEST}/data/swift} # We are going to have the configuration files inside the source -# directory, change SWIFT_CONFIG_LOCATION if you want to adjust that. -SWIFT_CONFIG_LOCATION=${SWIFT_CONFIG_LOCATION:-${SWIFT_DIR}/config} +# directory, change SWIFT_CONFIG_DIR if you want to adjust that. +SWIFT_CONFIG_DIR=${SWIFT_CONFIG_DIR:-/etc/swift} # devstack will create a loop-back disk formatted as XFS to store the # swift data. By default the disk size is 1 gigabyte. The variable @@ -1128,39 +1128,39 @@ if is_service_enabled swift; then # changing the permissions so we can run it as our user. USER_GROUP=$(id -g) - sudo mkdir -p ${SWIFT_DATA_LOCATION}/drives - sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_LOCATION} + sudo mkdir -p ${SWIFT_DATA_DIR}/drives + sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_DIR} # We then create a loopback disk and format it to XFS. # TODO: Reset disks on new pass. - if [[ ! -e ${SWIFT_DATA_LOCATION}/drives/images/swift.img ]]; then - mkdir -p ${SWIFT_DATA_LOCATION}/drives/images - sudo touch ${SWIFT_DATA_LOCATION}/drives/images/swift.img - sudo chown $USER: ${SWIFT_DATA_LOCATION}/drives/images/swift.img + if [[ ! -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then + mkdir -p ${SWIFT_DATA_DIR}/drives/images + sudo touch ${SWIFT_DATA_DIR}/drives/images/swift.img + sudo chown $USER: ${SWIFT_DATA_DIR}/drives/images/swift.img - dd if=/dev/zero of=${SWIFT_DATA_LOCATION}/drives/images/swift.img \ + dd if=/dev/zero of=${SWIFT_DATA_DIR}/drives/images/swift.img \ bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE} - mkfs.xfs -f -i size=1024 ${SWIFT_DATA_LOCATION}/drives/images/swift.img + mkfs.xfs -f -i size=1024 ${SWIFT_DATA_DIR}/drives/images/swift.img fi # After the drive being created we mount the disk with a few mount # options to make it most efficient as possible for swift. - mkdir -p ${SWIFT_DATA_LOCATION}/drives/sdb1 - if ! egrep -q ${SWIFT_DATA_LOCATION}/drives/sdb1 /proc/mounts; then + mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1 + if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 \ - ${SWIFT_DATA_LOCATION}/drives/images/swift.img ${SWIFT_DATA_LOCATION}/drives/sdb1 + ${SWIFT_DATA_DIR}/drives/images/swift.img ${SWIFT_DATA_DIR}/drives/sdb1 fi # We then create link to that mounted location so swift would know # where to go. for x in $(seq ${SWIFT_REPLICAS}); do - sudo ln -sf ${SWIFT_DATA_LOCATION}/drives/sdb1/$x ${SWIFT_DATA_LOCATION}/$x; done + sudo ln -sf ${SWIFT_DATA_DIR}/drives/sdb1/$x ${SWIFT_DATA_DIR}/$x; done # We now have to emulate a few different servers into one we # create all the directories needed for swift for x in $(seq ${SWIFT_REPLICAS}); do - drive=${SWIFT_DATA_LOCATION}/drives/sdb1/${x} - node=${SWIFT_DATA_LOCATION}/${x}/node + drive=${SWIFT_DATA_DIR}/drives/sdb1/${x} + node=${SWIFT_DATA_DIR}/${x}/node node_device=${node}/sdb1 [[ -d $node ]] && continue [[ -d $drive ]] && continue @@ -1169,17 +1169,23 @@ if is_service_enabled swift; then sudo chown -R $USER: ${node} done - sudo mkdir -p ${SWIFT_CONFIG_LOCATION}/{object,container,account}-server /var/run/swift - sudo chown -R $USER: ${SWIFT_CONFIG_LOCATION} /var/run/swift + sudo mkdir -p ${SWIFT_CONFIG_DIR}/{object,container,account}-server /var/run/swift + sudo chown -R $USER: ${SWIFT_CONFIG_DIR} /var/run/swift - # swift-init has a bug using /etc/swift until bug #885595 is fixed - # we have to create a link - sudo ln -sf ${SWIFT_CONFIG_LOCATION} /etc/swift + if [[ "$SWIFT_CONFIG_DIR" != "/etc/swift" ]]; then + # Some swift tools are hard-coded to use /etc/swift and are apparenty not going to be fixed. + # Create a symlink if the config dir is moved + sudo ln -sf ${SWIFT_CONFIG_DIR} /etc/swift + fi - # Swift use rsync to syncronize between all the different - # partitions (which make more sense when you have a multi-node - # setup) we configure it with our version of rsync. - sed -e "s/%GROUP%/${USER_GROUP}/;s/%USER%/$USER/;s,%SWIFT_DATA_LOCATION%,$SWIFT_DATA_LOCATION," $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf + # Swift use rsync to syncronize between all the different + # partitions (which make more sense when you have a multi-node + # setup) we configure it with our version of rsync. + sed -e " + s/%GROUP%/${USER_GROUP}/; + s/%USER%/$USER/; + s,%SWIFT_DATA_DIR%,$SWIFT_DATA_DIR,; + " $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync # By default Swift will be installed with the tempauth middleware @@ -1194,7 +1200,7 @@ if is_service_enabled swift; then # We do the install of the proxy-server and swift configuration # replacing a few directives to match our configuration. sed -e " - s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},g; + s,%SWIFT_CONFIG_DIR%,${SWIFT_CONFIG_DIR},g; s,%USER%,$USER,g; s,%SERVICE_TENANT_NAME%,$SERVICE_TENANT_NAME,g; s,%SERVICE_USERNAME%,swift,g; @@ -1209,35 +1215,40 @@ if is_service_enabled swift; then s,%KEYSTONE_AUTH_PROTOCOL%,${KEYSTONE_AUTH_PROTOCOL},g; s/%AUTH_SERVER%/${swift_auth_server}/g; " $FILES/swift/proxy-server.conf | \ - sudo tee ${SWIFT_CONFIG_LOCATION}/proxy-server.conf - - sed -e "s/%SWIFT_HASH%/$SWIFT_HASH/" $FILES/swift/swift.conf > ${SWIFT_CONFIG_LOCATION}/swift.conf - - # We need to generate a object/account/proxy configuration - # emulating 4 nodes on different ports we have a little function - # that help us doing that. - function generate_swift_configuration() { - local server_type=$1 - local bind_port=$2 - local log_facility=$3 - local node_number - - for node_number in $(seq ${SWIFT_REPLICAS}); do - node_path=${SWIFT_DATA_LOCATION}/${node_number} - sed -e "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},;s,%USER%,$USER,;s,%NODE_PATH%,${node_path},;s,%BIND_PORT%,${bind_port},;s,%LOG_FACILITY%,${log_facility}," \ - $FILES/swift/${server_type}-server.conf > ${SWIFT_CONFIG_LOCATION}/${server_type}-server/${node_number}.conf - bind_port=$(( ${bind_port} + 10 )) - log_facility=$(( ${log_facility} + 1 )) - done - } - generate_swift_configuration object 6010 2 - generate_swift_configuration container 6011 2 - generate_swift_configuration account 6012 2 + sudo tee ${SWIFT_CONFIG_DIR}/proxy-server.conf + + sed -e "s/%SWIFT_HASH%/$SWIFT_HASH/" $FILES/swift/swift.conf > ${SWIFT_CONFIG_DIR}/swift.conf + + # We need to generate a object/account/proxy configuration + # emulating 4 nodes on different ports we have a little function + # that help us doing that. + function generate_swift_configuration() { + local server_type=$1 + local bind_port=$2 + local log_facility=$3 + local node_number + + for node_number in $(seq ${SWIFT_REPLICAS}); do + node_path=${SWIFT_DATA_DIR}/${node_number} + sed -e " + s,%SWIFT_CONFIG_DIR%,${SWIFT_CONFIG_DIR},; + s,%USER%,$USER,; + s,%NODE_PATH%,${node_path},; + s,%BIND_PORT%,${bind_port},; + s,%LOG_FACILITY%,${log_facility}, + " $FILES/swift/${server_type}-server.conf > ${SWIFT_CONFIG_DIR}/${server_type}-server/${node_number}.conf + bind_port=$(( ${bind_port} + 10 )) + log_facility=$(( ${log_facility} + 1 )) + done + } + generate_swift_configuration object 6010 2 + generate_swift_configuration container 6011 2 + generate_swift_configuration account 6012 2 # We have some specific configuration for swift for rsyslog. See # the file /etc/rsyslog.d/10-swift.conf for more info. - swift_log_dir=${SWIFT_DATA_LOCATION}/logs + swift_log_dir=${SWIFT_DATA_DIR}/logs rm -rf ${swift_log_dir} mkdir -p ${swift_log_dir}/hourly sudo chown -R syslog:adm ${swift_log_dir} @@ -1247,7 +1258,7 @@ if is_service_enabled swift; then # This is where we create three different rings for swift with # different object servers binding on different ports. - pushd ${SWIFT_CONFIG_LOCATION} >/dev/null && { + pushd ${SWIFT_CONFIG_DIR} >/dev/null && { rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz @@ -1619,7 +1630,7 @@ screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --config-file $NOVA_ screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF" screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth" screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/apache2/error.log" -screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_LOCATION}/proxy-server.conf -v" +screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_DIR}/proxy-server.conf -v" # Starting the nova-objectstore only if swift service is not enabled. # Swift will act as s3 objectstore. From 0a7a41eb1ebbbed7814e47690db8248576e4ed47 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Wed, 4 Apr 2012 17:47:56 -0500 Subject: [PATCH 461/967] Make the screen hardstatus line configurable. The default hardstatus line now includes the system load along with the hostname. Minor color changes. Change-Id: I70ebeef0981c741dd647c0e98df3f4b7e09de9cd --- AUTHORS | 1 + stack.sh | 8 ++++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/AUTHORS b/AUTHORS index 86456159..820a6773 100644 --- a/AUTHORS +++ b/AUTHORS @@ -17,6 +17,7 @@ Jason Cannavale Jay Pipes Jesse Andrews Johannes Erdfelt +Josh Kearney Justin Shepherd Ken Pepple Kiall Mac Innes diff --git a/stack.sh b/stack.sh index 444a7381..c82c2969 100755 --- a/stack.sh +++ b/stack.sh @@ -748,6 +748,10 @@ EOF restart_service mysql fi +if [ -z "$SCREEN_HARDSTATUS" ]; then + SCREEN_HARDSTATUS='%{= .} %-Lw%{= .}%> %n%f %t*%{= .}%+Lw%< %-=%{g}(%{d}%H/%l%{g})' +fi + # Our screenrc file builder function screen_rc { SCREENRC=$TOP_DIR/stack-screenrc @@ -755,7 +759,7 @@ function screen_rc { # Name the screen session echo "sessionname stack" > $SCREENRC # Set a reasonable statusbar - echo 'hardstatus alwayslastline "%-Lw%{= BW}%50>%n%f* %t%{-}%+Lw%< %= %H"' >> $SCREENRC + echo 'hardstatus alwayslastline "$SCREEN_HARDSTATUS"' >> $SCREENRC echo "screen -t stack bash" >> $SCREENRC fi # If this service doesn't already exist in the screenrc file @@ -792,7 +796,7 @@ function screen_it { screen -d -m -S stack -t stack -s /bin/bash sleep 1 # set a reasonable statusbar -screen -r stack -X hardstatus alwayslastline "%-Lw%{= BW}%50>%n%f* %t%{-}%+Lw%< %= %H" +screen -r stack -X hardstatus alwayslastline "$SCREEN_HARDSTATUS" # Horizon # ------- From c6c1d439240c0dcb5cd6d77ae608c407cdcd5e50 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 27 Mar 2012 20:59:22 -0500 Subject: [PATCH 462/967] D) Reorganize top of stack.sh Pull stack.sh sanity check reorg from multi-distro branch. This performs OS detection checks earlier and moves the Sanity Check section later so we have more information available. Change-Id: I5b9e64c4dc024a9ad90bd4f7e5ed86d601c0f610 --- stack.sh | 91 +++++++++++++++++++++++++++++++++----------------------- 1 file changed, 53 insertions(+), 38 deletions(-) diff --git a/stack.sh b/stack.sh index 444a7381..4fee1332 100755 --- a/stack.sh +++ b/stack.sh @@ -1,9 +1,8 @@ #!/usr/bin/env bash -# **stack.sh** is an opinionated OpenStack developer installation. - -# This script installs and configures various combinations of *Glance*, -# *Horizon*, *Keystone*, *Melange*, *Nova*, *Quantum* and *Swift* +# ``stack.sh`` is an opinionated OpenStack developer installation. It +# installs and configures various combinations of **Glance**, **Horizon**, +# **Keystone**, **Melange**, **Nova**, **Quantum** and **Swift** # This script allows you to specify configuration options of what git # repositories to use, enabled services, network configuration and various @@ -12,42 +11,30 @@ # developer install. # To keep this script simple we assume you are running on an **Ubuntu 11.10 -# Oneiric** machine. It should work in a VM or physical server. Additionally -# we put the list of *apt* and *pip* dependencies and other configuration files -# in this repo. So start by grabbing this script and the dependencies. +# Oneiric** or **Ubuntu 12.04 Precise** machine. It should work in a VM or +# physical server. Additionally we put the list of ``apt`` and ``pip`` +# dependencies and other configuration files in this repo. So start by +# grabbing this script and the dependencies. # Learn more and get the most recent version at http://devstack.org - -# Sanity Check -# ============ - -# Warn users who aren't on oneiric, but allow them to override check and attempt -# installation with ``FORCE=yes ./stack`` -DISTRO=$(lsb_release -c -s) - -if [[ ! ${DISTRO} =~ (oneiric|precise) ]]; then - echo "WARNING: this script has only been tested on oneiric" - if [[ "$FORCE" != "yes" ]]; then - echo "If you wish to run this script anyway run with FORCE=yes" - exit 1 - fi -fi - -# Keep track of the current devstack directory. +# Keep track of the devstack directory TOP_DIR=$(cd $(dirname "$0") && pwd) # Import common functions -. $TOP_DIR/functions +source $TOP_DIR/functions -# stack.sh keeps the list of **apt** and **pip** dependencies in external -# files, along with config templates and other useful files. You can find these -# in the ``files`` directory (next to this script). We will reference this -# directory using the ``FILES`` variable in this script. -FILES=$TOP_DIR/files -if [ ! -d $FILES ]; then - echo "ERROR: missing devstack/files - did you grab more than just stack.sh?" - exit 1 +# Determine what system we are running on. This provides ``os_VENDOR``, +# ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` +GetOSVersion + +# Translate the OS version values into common nomenclature +if [[ "$os_VENDOR" =~ (Ubuntu) ]]; then + # 'Everyone' refers to Ubuntu releases by the code name adjective + DISTRO=$os_CODENAME +else + # Catch-all for now is Vendor + Release + Update + DISTRO="$os_VENDOR-$os_RELEASE.$os_UPDATE" fi @@ -72,21 +59,49 @@ fi # # DevStack distributes ``stackrc`` which contains locations for the OpenStack # repositories and branches to configure. ``stackrc`` sources ``localrc`` to -# allow you to override those settings and not have your changes overwritten +# allow you to safely override those settings without being overwritten # when updating DevStack. -# We support HTTP and HTTPS proxy servers via the usual environment variables -# **http_proxy** and **https_proxy**. They can be set in ``localrc`` if necessary or -# on the command line:: +# HTTP and HTTPS proxy servers are supported via the usual environment variables +# ``http_proxy`` and ``https_proxy``. They can be set in ``localrc`` if necessary +# or on the command line:: # # http_proxy=http://proxy.example.com:3128/ ./stack.sh +if [[ ! -r $TOP_DIR/stackrc ]]; then + echo "ERROR: missing $TOP_DIR/stackrc - did you grab more than just stack.sh?" + exit 1 +fi source ./stackrc # Destination path for installation ``DEST`` DEST=${DEST:-/opt/stack} -# Check to see if we are already running a stack.sh + +# Sanity Check +# ============ + +# Warn users who aren't on an explicitly supported distro, but allow them to +# override check and attempt installation with ``FORCE=yes ./stack`` +if [[ ! ${DISTRO} =~ (oneiric|precise) ]]; then + echo "WARNING: this script has only been tested on oneiric and precise" + if [[ "$FORCE" != "yes" ]]; then + echo "If you wish to run this script anyway run with FORCE=yes" + exit 1 + fi +fi + +# stack.sh keeps the list of ``apt`` and ``pip`` dependencies in external +# files, along with config templates and other useful files. You can find these +# in the ``files`` directory (next to this script). We will reference this +# directory using the ``FILES`` variable in this script. +FILES=$TOP_DIR/files +if [ ! -d $FILES ]; then + echo "ERROR: missing devstack/files - did you grab more than just stack.sh?" + exit 1 +fi + +# Check to see if we are already running DevStack if type -p screen >/dev/null && screen -ls | egrep -q "[0-9].stack"; then echo "You are already running a stack.sh session." echo "To rejoin this session type 'screen -x stack'." From 076e86aefee03358aad4ca1b53ed5b931924334c Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 5 Apr 2012 16:25:03 -0500 Subject: [PATCH 463/967] Properly generate the hardstatus for stack-screenrc. Fixes bug 974641. Change-Id: I56879083c6e9d6711afacb786f5e238ff843f55b --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index c82c2969..5401db2e 100755 --- a/stack.sh +++ b/stack.sh @@ -759,7 +759,7 @@ function screen_rc { # Name the screen session echo "sessionname stack" > $SCREENRC # Set a reasonable statusbar - echo 'hardstatus alwayslastline "$SCREEN_HARDSTATUS"' >> $SCREENRC + echo "hardstatus alwayslastline '$SCREEN_HARDSTATUS'" >> $SCREENRC echo "screen -t stack bash" >> $SCREENRC fi # If this service doesn't already exist in the screenrc file From 09e636e435ed15302e3960affef0a450fa7accf6 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 19 Mar 2012 16:31:12 -0500 Subject: [PATCH 464/967] B) Use keystone config files from source; move to /etc/keystone * Put all config files in /etc/keystone * keystone.conf rewritten * logging.conf.sample rewritten to logging.conf * default_catalog.templates copied from devstack/files * iniset() now properly adds options that do not previously exist Fixed to re-configure the catalog templated backend; sql is the default in trunk now but DevStack needs a bit more work before it can use it. Change-Id: Ic7060ef897e47495cd08ca3786e49fdebadf6723 --- files/keystone.conf | 99 --------------------------------------------- functions | 23 +++++++---- stack.sh | 78 ++++++++++++++++++++++------------- tests/functions.sh | 22 +++++++++- 4 files changed, 87 insertions(+), 135 deletions(-) delete mode 100644 files/keystone.conf diff --git a/files/keystone.conf b/files/keystone.conf deleted file mode 100644 index 1a924edd..00000000 --- a/files/keystone.conf +++ /dev/null @@ -1,99 +0,0 @@ -[DEFAULT] -bind_host = 0.0.0.0 -public_port = 5000 -admin_port = 35357 -admin_token = %SERVICE_TOKEN% -compute_port = 3000 -verbose = True -debug = True -# commented out so devstack logs to stdout -# log_file = %DEST%/keystone/keystone.log - -# ================= Syslog Options ============================ -# Send logs to syslog (/dev/log) instead of to file specified -# by `log-file` -use_syslog = False - -# Facility to use. If unset defaults to LOG_USER. -# syslog_log_facility = LOG_LOCAL0 - -[sql] -connection = %SQL_CONN% -idle_timeout = 30 -min_pool_size = 5 -max_pool_size = 10 -pool_timeout = 200 - -[identity] -driver = keystone.identity.backends.sql.Identity - -[catalog] -driver = keystone.catalog.backends.templated.TemplatedCatalog -template_file = %KEYSTONE_DIR%/etc/default_catalog.templates - -[token] -driver = keystone.token.backends.kvs.Token - -[policy] -driver = keystone.policy.backends.rules.Policy - -[ec2] -driver = keystone.contrib.ec2.backends.sql.Ec2 - -[filter:debug] -paste.filter_factory = keystone.common.wsgi:Debug.factory - -[filter:token_auth] -paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory - -[filter:admin_token_auth] -paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory - -[filter:xml_body] -paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory - -[filter:json_body] -paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory - -[filter:crud_extension] -paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory - -[filter:ec2_extension] -paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory - -[filter:s3_extension] -paste.filter_factory = keystone.contrib.s3:S3Extension.factory - -[app:public_service] -paste.app_factory = keystone.service:public_app_factory - -[app:admin_service] -paste.app_factory = keystone.service:admin_app_factory - -[pipeline:public_api] -pipeline = token_auth admin_token_auth xml_body json_body debug ec2_extension public_service - -[pipeline:admin_api] -pipeline = token_auth admin_token_auth xml_body json_body debug ec2_extension s3_extension crud_extension admin_service - -[app:public_version_service] -paste.app_factory = keystone.service:public_version_app_factory - -[app:admin_version_service] -paste.app_factory = keystone.service:admin_version_app_factory - -[pipeline:public_version_api] -pipeline = xml_body public_version_service - -[pipeline:admin_version_api] -pipeline = xml_body admin_version_service - -[composite:main] -use = egg:Paste#urlmap -/v2.0 = public_api -/ = public_version_api - -[composite:admin] -use = egg:Paste#urlmap -/v2.0 = admin_api -/ = admin_version_api diff --git a/functions b/functions index ecfda057..5114de10 100644 --- a/functions +++ b/functions @@ -184,7 +184,7 @@ function git_clone { # Comment an option in an INI file -# optset config-file section option +# iniset config-file section option function inicomment() { local file=$1 local section=$2 @@ -194,7 +194,7 @@ function inicomment() { # Get an option from an INI file -# optget config-file section option +# iniget config-file section option function iniget() { local file=$1 local section=$2 @@ -206,16 +206,25 @@ function iniget() { # Set an option in an INI file -# This is NOT a complete option setter, it assumes that the section and -# option already exist in the INI file. If the section does not exist, -# nothing happens. -# optset config-file section option value +# iniset config-file section option value function iniset() { local file=$1 local section=$2 local option=$3 local value=$4 - sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=[ \t]*\).*$|\1$value|" $file + if ! grep -q "^\[$section\]" $file; then + # Add section at the end + echo -e "\n[$section]" >>$file + fi + if [[ -z "$(iniget $file $section $option)" ]]; then + # Add it + sed -i -e "/^\[$section\]/ a\\ +$option = $value +" $file + else + # Replace it + sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=[ \t]*\).*$|\1$value|" $file + fi } diff --git a/stack.sh b/stack.sh index c82c2969..de4e926b 100755 --- a/stack.sh +++ b/stack.sh @@ -1514,16 +1514,42 @@ if is_service_enabled key; then mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS keystone;' mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE keystone CHARACTER SET utf8;' - # Configure keystone.conf - KEYSTONE_CONF=$KEYSTONE_DIR/etc/keystone.conf - cp $FILES/keystone.conf $KEYSTONE_CONF - sudo sed -e "s,%SQL_CONN%,$BASE_SQL_CONN/keystone?charset=utf8,g" -i $KEYSTONE_CONF - sudo sed -e "s,%DEST%,$DEST,g" -i $KEYSTONE_CONF - sudo sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $KEYSTONE_CONF - sudo sed -e "s,%KEYSTONE_DIR%,$KEYSTONE_DIR,g" -i $KEYSTONE_CONF + KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone} + KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf + KEYSTONE_CATALOG=$KEYSTONE_CONF_DIR/default_catalog.templates - KEYSTONE_CATALOG=$KEYSTONE_DIR/etc/default_catalog.templates - cp $FILES/default_catalog.templates $KEYSTONE_CATALOG + if [[ ! -d $KEYSTONE_CONF_DIR ]]; then + sudo mkdir -p $KEYSTONE_CONF_DIR + sudo chown `whoami` $KEYSTONE_CONF_DIR + fi + + if [[ "$KEYSTONE_CONF_DIR" != "$KEYSTONE_DIR/etc" ]]; then + # FIXME(dtroyer): etc/keystone.conf causes trouble if the config files + # are located anywhere else (say, /etc/keystone). + # LP 966670 fixes this in keystone, we fix it + # here until the bug fix is committed. + if [[ -r $KEYSTONE_DIR/etc/keystone.conf ]]; then + # Get the sample config file out of the way + mv $KEYSTONE_DIR/etc/keystone.conf $KEYSTONE_DIR/etc/keystone.conf.sample + fi + cp -p $KEYSTONE_DIR/etc/keystone.conf.sample $KEYSTONE_CONF + cp -p $KEYSTONE_DIR/etc/policy.json $KEYSTONE_CONF_DIR + fi + cp -p $FILES/default_catalog.templates $KEYSTONE_CATALOG + + # Rewrite stock keystone.conf: + iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN" + iniset $KEYSTONE_CONF sql connection "$BASE_SQL_CONN/keystone?charset=utf8" + iniset $KEYSTONE_CONF catalog template_file "$KEYSTONE_CATALOG" + iniset $KEYSTONE_CONF ec2 driver "keystone.contrib.ec2.backends.sql.Ec2" + # Configure keystone.conf to use templates + iniset $KEYSTONE_CONF catalog driver "keystone.catalog.backends.templated.TemplatedCatalog" + iniset $KEYSTONE_CONF catalog template_file "$KEYSTONE_CATALOG" + sed -e " + /^pipeline.*ec2_extension crud_/s|ec2_extension crud_extension|ec2_extension s3_extension crud_extension|; + " -i $KEYSTONE_CONF + # Append the S3 bits + iniset $KEYSTONE_CONF filter:s3_extension paste.filter_factory "keystone.contrib.s3:S3Extension.factory" # Add swift endpoints to service catalog if swift is enabled if is_service_enabled swift; then @@ -1541,34 +1567,32 @@ if is_service_enabled key; then echo "catalog.RegionOne.network.name = Quantum Service" >> $KEYSTONE_CATALOG fi - sudo sed -e "s,%SERVICE_HOST%,$SERVICE_HOST,g" -i $KEYSTONE_CATALOG - - sudo sed -e "s,%S3_SERVICE_PORT%,$S3_SERVICE_PORT,g" -i $KEYSTONE_CATALOG + sudo sed -e " + s,%SERVICE_HOST%,$SERVICE_HOST,g; + s,%S3_SERVICE_PORT%,$S3_SERVICE_PORT,g; + " -i $KEYSTONE_CATALOG + # Set up logging + LOGGING_ROOT="devel" if [ "$SYSLOG" != "False" ]; then - cp $KEYSTONE_DIR/etc/logging.conf.sample $KEYSTONE_DIR/etc/logging.conf - sed -i -e '/^handlers=devel$/s/=devel/=production/' \ - $KEYSTONE_DIR/etc/logging.conf - sed -i -e "/^log_file/s/log_file/\#log_file/" \ - $KEYSTONE_DIR/etc/keystone.conf - KEYSTONE_LOG_CONFIG="--log-config $KEYSTONE_DIR/etc/logging.conf" + LOGGING_ROOT="$LOGGING_ROOT,production" fi -fi + KEYSTONE_LOG_CONFIG="--log-config $KEYSTONE_CONF_DIR/logging.conf" + cp $KEYSTONE_DIR/etc/logging.conf.sample $KEYSTONE_CONF_DIR/logging.conf + iniset $KEYSTONE_CONF_DIR/logging.conf logger_root level "DEBUG" + iniset $KEYSTONE_CONF_DIR/logging.conf logger_root handlers "devel,production" -# launch the keystone and wait for it to answer before continuing -if is_service_enabled key; then + # initialize keystone database + $KEYSTONE_DIR/bin/keystone-manage db_sync + + # launch keystone and wait for it to answer before continuing screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug" echo "Waiting for keystone to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/; do sleep 1; done"; then + if ! timeout $SERVICE_TIMEOUT sh -c "while http_proxy= wget -O- $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/ 2>&1 | grep -q 'refused'; do sleep 1; done"; then echo "keystone did not start" exit 1 fi - # initialize keystone with default users/endpoints - pushd $KEYSTONE_DIR - $KEYSTONE_DIR/bin/keystone-manage db_sync - popd - # keystone_data.sh creates services, admin and demo users, and roles. SERVICE_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0 ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES \ diff --git a/tests/functions.sh b/tests/functions.sh index 931cde81..e7fbe0c5 100755 --- a/tests/functions.sh +++ b/tests/functions.sh @@ -98,7 +98,7 @@ fi VAL=$(iniget test.ini zzz handlers) if [[ -z "$VAL" ]]; then - echo "OK" + echo "OK: zzz not present" else echo "iniget failed: $VAL" fi @@ -106,12 +106,30 @@ fi iniset test.ini zzz handlers "999" VAL=$(iniget test.ini zzz handlers) +if [[ -n "$VAL" ]]; then + echo "OK: zzz not present" +else + echo "iniget failed: $VAL" +fi + + +# Test option not exist + +VAL=$(iniget test.ini aaa debug) if [[ -z "$VAL" ]]; then - echo "OK" + echo "OK aaa.debug not present" else echo "iniget failed: $VAL" fi +iniset test.ini aaa debug "999" + +VAL=$(iniget test.ini aaa debug) +if [[ -n "$VAL" ]]; then + echo "OK aaa.debug present" +else + echo "iniget failed: $VAL" +fi # Test comments From 9bab2597ca880e8aa3cc420021fcb656f778aa74 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 6 Apr 2012 12:10:05 -0500 Subject: [PATCH 465/967] Use glance config files from source tree This rewrites the config files for Glance API and Registry (-paste.ini and .conf) rather than use templates from devstack/files. Fixes bug 950289 Change-Id: I90151e36bb5f778499481e9abe0d8637d41f19aa --- files/glance-api-paste.ini | 39 --------- files/glance-api.conf | 139 -------------------------------- files/glance-registry-paste.ini | 23 ------ files/glance-registry.conf | 44 ---------- stack.sh | 67 ++++++++------- 5 files changed, 32 insertions(+), 280 deletions(-) delete mode 100644 files/glance-api-paste.ini delete mode 100644 files/glance-api.conf delete mode 100644 files/glance-registry-paste.ini delete mode 100644 files/glance-registry.conf diff --git a/files/glance-api-paste.ini b/files/glance-api-paste.ini deleted file mode 100644 index 5cfd22f0..00000000 --- a/files/glance-api-paste.ini +++ /dev/null @@ -1,39 +0,0 @@ -[pipeline:glance-api] -#pipeline = versionnegotiation context apiv1app -# NOTE: use the following pipeline for keystone -pipeline = versionnegotiation authtoken context apiv1app - -# To enable Image Cache Management API replace pipeline with below: -# pipeline = versionnegotiation context imagecache apiv1app -# NOTE: use the following pipeline for keystone auth (with caching) -# pipeline = versionnegotiation authtoken auth-context imagecache apiv1app - -[app:apiv1app] -paste.app_factory = glance.common.wsgi:app_factory -glance.app_factory = glance.api.v1.router:API - -[filter:versionnegotiation] -paste.filter_factory = glance.common.wsgi:filter_factory -glance.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter - -[filter:cache] -paste.filter_factory = glance.common.wsgi:filter_factory -glance.filter_factory = glance.api.middleware.cache:CacheFilter - -[filter:cachemanage] -paste.filter_factory = glance.common.wsgi:filter_factory -glance.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter - -[filter:context] -paste.filter_factory = glance.common.wsgi:filter_factory -glance.filter_factory = glance.common.context:ContextMiddleware - -[filter:authtoken] -paste.filter_factory = keystone.middleware.auth_token:filter_factory -auth_host = %KEYSTONE_AUTH_HOST% -auth_port = %KEYSTONE_AUTH_PORT% -auth_protocol = %KEYSTONE_AUTH_PROTOCOL% -auth_uri = %KEYSTONE_SERVICE_PROTOCOL%://%KEYSTONE_SERVICE_HOST%:%KEYSTONE_SERVICE_PORT%/ -admin_tenant_name = %SERVICE_TENANT_NAME% -admin_user = %SERVICE_USERNAME% -admin_password = %SERVICE_PASSWORD% diff --git a/files/glance-api.conf b/files/glance-api.conf deleted file mode 100644 index b4ba098a..00000000 --- a/files/glance-api.conf +++ /dev/null @@ -1,139 +0,0 @@ -[DEFAULT] -# Show more verbose log output (sets INFO log level output) -verbose = True - -# Show debugging output in logs (sets DEBUG log level output) -debug = True - -# Which backend store should Glance use by default is not specified -# in a request to add a new image to Glance? Default: 'file' -# Available choices are 'file', 'swift', and 's3' -default_store = file - -# Address to bind the API server -bind_host = 0.0.0.0 - -# Port the bind the API server to -bind_port = 9292 - -# Address to find the registry server -registry_host = 0.0.0.0 - -# Port the registry server is listening on -registry_port = 9191 - -# Log to this file. Make sure you do not set the same log -# file for both the API and registry servers! -#log_file = %DEST%/glance/api.log - -# Send logs to syslog (/dev/log) instead of to file specified by `log_file` -use_syslog = %SYSLOG% - -# ============ Notification System Options ===================== - -# Notifications can be sent when images are create, updated or deleted. -# There are three methods of sending notifications, logging (via the -# log_file directive), rabbit (via a rabbitmq queue) or noop (no -# notifications sent, the default) -notifier_strategy = noop - -# Configuration options if sending notifications via rabbitmq (these are -# the defaults) -rabbit_host = localhost -rabbit_port = 5672 -rabbit_use_ssl = false -rabbit_userid = guest -rabbit_password = guest -rabbit_virtual_host = / -rabbit_notification_topic = glance_notifications - -# ============ Filesystem Store Options ======================== - -# Directory that the Filesystem backend store -# writes image data to -filesystem_store_datadir = %DEST%/glance/images/ - -# ============ Swift Store Options ============================= - -# Address where the Swift authentication service lives -swift_store_auth_address = 127.0.0.1:8080/v1.0/ - -# User to authenticate against the Swift authentication service -swift_store_user = jdoe - -# Auth key for the user authenticating against the -# Swift authentication service -swift_store_key = a86850deb2742ec3cb41518e26aa2d89 - -# Container within the account that the account should use -# for storing images in Swift -swift_store_container = glance - -# Do we create the container if it does not exist? -swift_store_create_container_on_put = False - -# What size, in MB, should Glance start chunking image files -# and do a large object manifest in Swift? By default, this is -# the maximum object size in Swift, which is 5GB -swift_store_large_object_size = 5120 - -# When doing a large object manifest, what size, in MB, should -# Glance write chunks to Swift? This amount of data is written -# to a temporary disk buffer during the process of chunking -# the image file, and the default is 200MB -swift_store_large_object_chunk_size = 200 - -# Whether to use ServiceNET to communicate with the Swift storage servers. -# (If you aren't RACKSPACE, leave this False!) -# -# To use ServiceNET for authentication, prefix hostname of -# `swift_store_auth_address` with 'snet-'. -# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/ -swift_enable_snet = False - -# ============ S3 Store Options ============================= - -# Address where the S3 authentication service lives -s3_store_host = 127.0.0.1:8080/v1.0/ - -# User to authenticate against the S3 authentication service -s3_store_access_key = <20-char AWS access key> - -# Auth key for the user authenticating against the -# S3 authentication service -s3_store_secret_key = <40-char AWS secret key> - -# Container within the account that the account should use -# for storing images in S3. Note that S3 has a flat namespace, -# so you need a unique bucket name for your glance images. An -# easy way to do this is append your AWS access key to "glance". -# S3 buckets in AWS *must* be lowercased, so remember to lowercase -# your AWS access key if you use it in your bucket name below! -s3_store_bucket = glance - -# Do we create the bucket if it does not exist? -s3_store_create_bucket_on_put = False - -# ============ Image Cache Options ======================== - -image_cache_enabled = False - -# Directory that the Image Cache writes data to -# Make sure this is also set in glance-pruner.conf -image_cache_datadir = /var/lib/glance/image-cache/ - -# Number of seconds after which we should consider an incomplete image to be -# stalled and eligible for reaping -image_cache_stall_timeout = 86400 - -# ============ Delayed Delete Options ============================= - -# Turn on/off delayed delete -delayed_delete = False - -# Delayed delete time in seconds -scrub_time = 43200 - -# Directory that the scrubber will use to remind itself of what to delete -# Make sure this is also set in glance-scrubber.conf -scrubber_datadir = /var/lib/glance/scrubber diff --git a/files/glance-registry-paste.ini b/files/glance-registry-paste.ini deleted file mode 100644 index b792aa8e..00000000 --- a/files/glance-registry-paste.ini +++ /dev/null @@ -1,23 +0,0 @@ -[pipeline:glance-registry] -#pipeline = context registryapp -# NOTE: use the following pipeline for keystone -pipeline = authtoken context registryapp - -[app:registryapp] -paste.app_factory = glance.common.wsgi:app_factory -glance.app_factory = glance.registry.api.v1:API - -[filter:context] -context_class = glance.registry.context.RequestContext -paste.filter_factory = glance.common.wsgi:filter_factory -glance.filter_factory = glance.common.context:ContextMiddleware - -[filter:authtoken] -paste.filter_factory = keystone.middleware.auth_token:filter_factory -auth_host = %KEYSTONE_AUTH_HOST% -auth_port = %KEYSTONE_AUTH_PORT% -auth_protocol = %KEYSTONE_AUTH_PROTOCOL% -auth_uri = %KEYSTONE_SERVICE_PROTOCOL%://%KEYSTONE_SERVICE_HOST%:%KEYSTONE_SERVICE_PORT%/ -admin_tenant_name = %SERVICE_TENANT_NAME% -admin_user = %SERVICE_USERNAME% -admin_password = %SERVICE_PASSWORD% diff --git a/files/glance-registry.conf b/files/glance-registry.conf deleted file mode 100644 index 2c327457..00000000 --- a/files/glance-registry.conf +++ /dev/null @@ -1,44 +0,0 @@ -[DEFAULT] -# Show more verbose log output (sets INFO log level output) -verbose = True - -# Show debugging output in logs (sets DEBUG log level output) -debug = True - -# Address to bind the registry server -bind_host = 0.0.0.0 - -# Port the bind the registry server to -bind_port = 9191 - -# Log to this file. Make sure you do not set the same log -# file for both the API and registry servers! -#log_file = %DEST%/glance/registry.log - -# Where to store images -filesystem_store_datadir = %DEST%/glance/images - -# Send logs to syslog (/dev/log) instead of to file specified by `log_file` -use_syslog = %SYSLOG% - -# SQLAlchemy connection string for the reference implementation -# registry server. Any valid SQLAlchemy connection string is fine. -# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine -sql_connection = %SQL_CONN% - -# Period in seconds after which SQLAlchemy should reestablish its connection -# to the database. -# -# MySQL uses a default `wait_timeout` of 8 hours, after which it will drop -# idle connections. This can result in 'MySQL Gone Away' exceptions. If you -# notice this, you can lower this value to ensure that SQLAlchemy reconnects -# before MySQL can drop the connection. -sql_idle_timeout = 3600 - -# Limit the api to return `param_limit_max` items in a call to a container. If -# a larger `limit` query param is provided, it will be reduced to this value. -api_limit_max = 1000 - -# If a `limit` query param is not provided in an api request, it will -# default to `limit_param_default` -limit_param_default = 25 diff --git a/stack.sh b/stack.sh index fd1d97f0..179b6538 100755 --- a/stack.sh +++ b/stack.sh @@ -855,45 +855,42 @@ if is_service_enabled g-reg; then mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS glance;' mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE glance CHARACTER SET utf8;' - function glance_config { - sudo sed -e " - s,%KEYSTONE_API_PORT%,$KEYSTONE_API_PORT,g; - s,%KEYSTONE_AUTH_HOST%,$KEYSTONE_AUTH_HOST,g; - s,%KEYSTONE_AUTH_PORT%,$KEYSTONE_AUTH_PORT,g; - s,%KEYSTONE_AUTH_PROTOCOL%,$KEYSTONE_AUTH_PROTOCOL,g; - s,%KEYSTONE_SERVICE_HOST%,$KEYSTONE_SERVICE_HOST,g; - s,%KEYSTONE_SERVICE_PORT%,$KEYSTONE_SERVICE_PORT,g; - s,%KEYSTONE_SERVICE_PROTOCOL%,$KEYSTONE_SERVICE_PROTOCOL,g; - s,%SQL_CONN%,$BASE_SQL_CONN/glance?charset=utf8,g; - s,%SERVICE_TENANT_NAME%,$SERVICE_TENANT_NAME,g; - s,%SERVICE_USERNAME%,glance,g; - s,%SERVICE_PASSWORD%,$SERVICE_PASSWORD,g; - s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g; - s,%DEST%,$DEST,g; - s,%SYSLOG%,$SYSLOG,g; - " -i $1 - } - # Copy over our glance configurations and update them GLANCE_REGISTRY_CONF=$GLANCE_CONF_DIR/glance-registry.conf - cp $FILES/glance-registry.conf $GLANCE_REGISTRY_CONF - glance_config $GLANCE_REGISTRY_CONF - - if [[ -e $FILES/glance-registry-paste.ini ]]; then - GLANCE_REGISTRY_PASTE_INI=$GLANCE_CONF_DIR/glance-registry-paste.ini - cp $FILES/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI - glance_config $GLANCE_REGISTRY_PASTE_INI - fi + cp $GLANCE_DIR/etc/glance-registry.conf $GLANCE_REGISTRY_CONF + iniset $GLANCE_REGISTRY_CONF DEFAULT debug True + inicomment $GLANCE_REGISTRY_CONF DEFAULT log_file + iniset $GLANCE_REGISTRY_CONF DEFAULT sql_connection $BASE_SQL_CONN/glance?charset=utf8 + iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG + iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone + + GLANCE_REGISTRY_PASTE_INI=$GLANCE_CONF_DIR/glance-registry-paste.ini + cp $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI + iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ + iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken admin_user glance + iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD GLANCE_API_CONF=$GLANCE_CONF_DIR/glance-api.conf - cp $FILES/glance-api.conf $GLANCE_API_CONF - glance_config $GLANCE_API_CONF - - if [[ -e $FILES/glance-api-paste.ini ]]; then - GLANCE_API_PASTE_INI=$GLANCE_CONF_DIR/glance-api-paste.ini - cp $FILES/glance-api-paste.ini $GLANCE_API_PASTE_INI - glance_config $GLANCE_API_PASTE_INI - fi + cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF + iniset $GLANCE_API_CONF DEFAULT debug True + inicomment $GLANCE_API_CONF DEFAULT log_file + iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG + iniset $GLANCE_API_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/ + iniset $GLANCE_API_CONF paste_deploy flavor keystone + + GLANCE_API_PASTE_INI=$GLANCE_CONF_DIR/glance-api-paste.ini + cp $GLANCE_DIR/etc/glance-api-paste.ini $GLANCE_API_PASTE_INI + iniset $GLANCE_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $GLANCE_API_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $GLANCE_API_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $GLANCE_API_PASTE_INI filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ + iniset $GLANCE_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $GLANCE_API_PASTE_INI filter:authtoken admin_user glance + iniset $GLANCE_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD fi # Quantum From 07e27e0b2399f4c1dedc59bf9b02a2077dcc6fbb Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Mon, 9 Apr 2012 15:41:09 -0500 Subject: [PATCH 466/967] Remove temporary fix for keystone.conf file. Fixed in https://review.openstack.org/#change,6387. Change-Id: I7fd73fa2658fcb8ba3a6db4d19ebde25e67cfbda --- stack.sh | 8 -------- 1 file changed, 8 deletions(-) diff --git a/stack.sh b/stack.sh index 8a936082..86e8203c 100755 --- a/stack.sh +++ b/stack.sh @@ -1547,14 +1547,6 @@ if is_service_enabled key; then fi if [[ "$KEYSTONE_CONF_DIR" != "$KEYSTONE_DIR/etc" ]]; then - # FIXME(dtroyer): etc/keystone.conf causes trouble if the config files - # are located anywhere else (say, /etc/keystone). - # LP 966670 fixes this in keystone, we fix it - # here until the bug fix is committed. - if [[ -r $KEYSTONE_DIR/etc/keystone.conf ]]; then - # Get the sample config file out of the way - mv $KEYSTONE_DIR/etc/keystone.conf $KEYSTONE_DIR/etc/keystone.conf.sample - fi cp -p $KEYSTONE_DIR/etc/keystone.conf.sample $KEYSTONE_CONF cp -p $KEYSTONE_DIR/etc/policy.json $KEYSTONE_CONF_DIR fi From 4c889349b7c81e4c814e4da2b4a1158a47243735 Mon Sep 17 00:00:00 2001 From: Renuka Apte Date: Thu, 8 Mar 2012 13:15:03 -0800 Subject: [PATCH 467/967] Script for Ubuntu 11.10 on Xenserver This script creates a template for an Ubuntu 11.10 VM on Xenserver. This is not officially supported by Citrix, but is useful, as most of the Openstack development and instructions are based on it. Change-Id: I8457f88ebe1065429d4c03de11d7ab0ef22f357a --- tools/xen/README.md | 6 ++++ tools/xen/scripts/xenoneirictemplate.sh | 41 +++++++++++++++++++++++++ 2 files changed, 47 insertions(+) create mode 100755 tools/xen/scripts/xenoneirictemplate.sh diff --git a/tools/xen/README.md b/tools/xen/README.md index d487a996..020ec99d 100644 --- a/tools/xen/README.md +++ b/tools/xen/README.md @@ -100,3 +100,9 @@ Step 6: Do cloudy stuff! * Play with horizon * Play with the CLI * Log bugs to devstack and core projects, and submit fixes! + +Ubuntu 11.10 VM on Xenserver +---------------------------- +Run ./scripts/xenoneirictemplate.sh on your Xenserver host. This creates a +template to be able to install a Ubuntu Oneiric (11.10) virtual machine. +Once the template is created, follow the wizard to complete the network install. diff --git a/tools/xen/scripts/xenoneirictemplate.sh b/tools/xen/scripts/xenoneirictemplate.sh new file mode 100755 index 00000000..baf4866a --- /dev/null +++ b/tools/xen/scripts/xenoneirictemplate.sh @@ -0,0 +1,41 @@ +#!/bin/bash +## makeubuntu.sh, this creates Ubuntu server 11.10 32 and 64 bit templates +## on Xenserver 6.0.2 Net install only +## Original Author: David Markey +## Author: Renuka Apte +## This is not an officially supported guest OS on XenServer 6.02 + +LENNY=$(xe template-list name-label=Debian\ Lenny\ 5.0\ \(32-bit\) --minimal) + +if [[ -z $LENNY ]] ; then + echo "Cant find lenny 32bit template, is this on 6.0.2?" + exit 1 +fi + +distro="Ubuntu 11.10" +arches=("32-bit" "64-bit") + + +for arch in ${arches[@]} ; do + echo "Attempting $distro ($arch)" + if [[ -n $(xe template-list name-label="$distro ($arch)" params=uuid --minimal) ]] ; then + echo "$distro ($arch)" already exists, Skipping + else + + NEWUUID=$(xe vm-clone uuid=$LENNY new-name-label="$distro ($arch)") + xe template-param-set uuid=$NEWUUID other-config:install-methods=http,ftp \ + other-config:install-repository=http://archive.ubuntu.net/ubuntu \ + PV-args="-- quiet console=hvc0 partman/default_filesystem=ext3 locale=en_US console-setup/ask_detect=false keyboard-configuration/layoutcode=us netcfg/choose_interface=eth3 netcfg/get_hostname=unassigned-hostname netcfg/get_domain=unassigned-domain auto url=http://images.ansolabs.com/devstackubuntupreseed.cfg" \ + other-config:debian-release=oneiric \ + other-config:default_template=true + + if [[ "$arch" == "32-bit" ]] ; then + xe template-param-set uuid=$NEWUUID other-config:install-arch="i386" + else + xe template-param-set uuid=$NEWUUID other-config:install-arch="amd64" + fi + echo "Success" + fi +done + +echo "Done" From 5218d451dc6d21c32e5bf5ab81c53bdd1db50234 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Sat, 4 Feb 2012 02:13:23 -0600 Subject: [PATCH 468/967] E) Add support for Fedora 16 Highlights: * Add files/rpms/* * get_packages() only loads deps for services that are enabled 11Apr2012 * change mod_wsgi cwd to $HORIZON_DIR * fix horizon apache log file name and location Note: This superceeds https://review.openstack.org/4364 Change-Id: I95486584561e4418907a6a4feb0ffbe4f4ea1843 --- README.md | 2 +- ...fault.template => apache-horizon.template} | 9 +- files/pips/horizon | 2 + files/rpms/general | 14 ++ files/rpms/glance | 8 + files/rpms/horizon | 25 ++ files/rpms/keystone | 11 + files/rpms/n-api | 1 + files/rpms/n-cpu | 3 + files/rpms/n-novnc | 1 + files/rpms/n-vol | 2 + files/rpms/nova | 39 ++++ files/rpms/swift | 18 ++ functions | 27 ++- stack.sh | 213 +++++++++++++----- unstack.sh | 2 +- 16 files changed, 313 insertions(+), 64 deletions(-) rename files/{000-default.template => apache-horizon.template} (77%) create mode 100644 files/rpms/general create mode 100644 files/rpms/glance create mode 100644 files/rpms/horizon create mode 100644 files/rpms/keystone create mode 100644 files/rpms/n-api create mode 100644 files/rpms/n-cpu create mode 100644 files/rpms/n-novnc create mode 100644 files/rpms/n-vol create mode 100644 files/rpms/nova create mode 100644 files/rpms/swift diff --git a/README.md b/README.md index 5c328937..cfcfe7c3 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ DevStack is a set of scripts and utilities to quickly deploy an OpenStack cloud. # Goals -* To quickly build dev OpenStack environments in a clean Oneiric or Precise environment +* To quickly build dev OpenStack environments in a clean Ubuntu or Fedora environment * To describe working configurations of OpenStack (which code branches work together? what do config files look like for those branches?) * To make it easier for developers to dive into OpenStack so that they can productively contribute without having to understand every part of the system at once * To make it easy to prototype cross-project features diff --git a/files/000-default.template b/files/apache-horizon.template similarity index 77% rename from files/000-default.template rename to files/apache-horizon.template index f499ea07..e54f16ce 100644 --- a/files/000-default.template +++ b/files/apache-horizon.template @@ -1,13 +1,13 @@ WSGIScriptAlias / %HORIZON_DIR%/openstack_dashboard/wsgi/django.wsgi - WSGIDaemonProcess horizon user=%USER% group=%GROUP% processes=3 threads=10 + WSGIDaemonProcess horizon user=%USER% group=%GROUP% processes=3 threads=10 home=%HORIZON_DIR% + SetEnv APACHE_RUN_USER %USER% SetEnv APACHE_RUN_GROUP %GROUP% WSGIProcessGroup horizon DocumentRoot %HORIZON_DIR%/.blackhole/ Alias /media %HORIZON_DIR%/openstack_dashboard/static - Alias /vpn /opt/stack/vpn Options FollowSymLinks @@ -21,8 +21,9 @@ allow from all - ErrorLog /var/log/apache2/error.log + ErrorLog /var/log/%APACHE_NAME%/horizon_error.log LogLevel warn - CustomLog /var/log/apache2/access.log combined + CustomLog /var/log/%APACHE_NAME%/horizon_access.log combined +WSGISocketPrefix /var/run/%APACHE_NAME% diff --git a/files/pips/horizon b/files/pips/horizon index f15602e9..c2475426 100644 --- a/files/pips/horizon +++ b/files/pips/horizon @@ -1,3 +1,5 @@ +django-mailer # dist:f16 +django-nose # dist:f16 django-nose-selenium pycrypto==2.3 python-cloudfiles diff --git a/files/rpms/general b/files/rpms/general new file mode 100644 index 00000000..af199d54 --- /dev/null +++ b/files/rpms/general @@ -0,0 +1,14 @@ +curl +euca2ools # only for testing client +git-core +openssh-server +psmisc +pylint +python-pep8 +python-pip +python-unittest2 +python-virtualenv +screen +tcpdump +unzip +wget diff --git a/files/rpms/glance b/files/rpms/glance new file mode 100644 index 00000000..141fe972 --- /dev/null +++ b/files/rpms/glance @@ -0,0 +1,8 @@ +python-argparse +python-eventlet +python-greenlet +python-paste-deploy +python-routes +python-sqlalchemy +python-wsgiref +pyxattr diff --git a/files/rpms/horizon b/files/rpms/horizon new file mode 100644 index 00000000..3c5fbc17 --- /dev/null +++ b/files/rpms/horizon @@ -0,0 +1,25 @@ +Django +django-registration +gcc +httpd # NOPRIME +mod_wsgi # NOPRIME +pylint +python-anyjson +python-boto +python-coverage +python-dateutil +python-eventlet +python-greenlet +python-httplib2 +python-kombu +python-migrate +python-mox +python-nose +python-paste +python-paste-deploy +python-pep8 +python-routes +python-sphinx +python-sqlalchemy +python-webob +pyxattr diff --git a/files/rpms/keystone b/files/rpms/keystone new file mode 100644 index 00000000..59868c7f --- /dev/null +++ b/files/rpms/keystone @@ -0,0 +1,11 @@ +python-greenlet +python-lxml +python-paste +python-paste-deploy +python-paste-script +python-routes +python-setuptools +python-sqlalchemy +python-sqlite2 +python-webob +sqlite diff --git a/files/rpms/n-api b/files/rpms/n-api new file mode 100644 index 00000000..0f08daac --- /dev/null +++ b/files/rpms/n-api @@ -0,0 +1 @@ +python-dateutil diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu new file mode 100644 index 00000000..1996a986 --- /dev/null +++ b/files/rpms/n-cpu @@ -0,0 +1,3 @@ +# Stuff for diablo volumes +iscsi-initiator-utils +lvm2 diff --git a/files/rpms/n-novnc b/files/rpms/n-novnc new file mode 100644 index 00000000..24ce15ab --- /dev/null +++ b/files/rpms/n-novnc @@ -0,0 +1 @@ +numpy diff --git a/files/rpms/n-vol b/files/rpms/n-vol new file mode 100644 index 00000000..df861aad --- /dev/null +++ b/files/rpms/n-vol @@ -0,0 +1,2 @@ +lvm2 +scsi-target-utils diff --git a/files/rpms/nova b/files/rpms/nova new file mode 100644 index 00000000..1b1d47f7 --- /dev/null +++ b/files/rpms/nova @@ -0,0 +1,39 @@ +MySQL-python +curl +dnsmasq-utils # for dhcp_release +ebtables +gawk +iptables +iputils +kpartx +kvm +libvirt-bin # NOPRIME +libvirt-python +libxml2-python +m2crypto +mysql-server # NOPRIME +parted +python-boto +python-carrot +python-cheetah +python-eventlet +python-feedparser +python-gflags +python-greenlet +python-iso8601 +python-kombu +python-lockfile +python-migrate +python-mox +python-netaddr +python-paramiko +python-paste +python-paste-deploy +python-routes +python-sqlalchemy +python-suds +python-tempita +rabbitmq-server # NOPRIME +sqlite +sudo +vconfig diff --git a/files/rpms/swift b/files/rpms/swift new file mode 100644 index 00000000..c9d49e92 --- /dev/null +++ b/files/rpms/swift @@ -0,0 +1,18 @@ +curl +gcc +memcached # NOPRIME +python-configobj +python-coverage +python-devel +python-eventlet +python-greenlet +python-netifaces +python-nose +python-paste-deploy +python-setuptools +python-simplejson +python-webob +pyxattr +sqlite +xfsprogs +xinetd diff --git a/functions b/functions index 5114de10..7072fdd8 100644 --- a/functions +++ b/functions @@ -298,21 +298,42 @@ function pip_install { # Service wrapper to restart services # restart_service service-name function restart_service() { - sudo /usr/sbin/service $1 restart + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + if [[ "$os_PACKAGE" = "deb" ]]; then + sudo /usr/sbin/service $1 restart + else + sudo /sbin/service $1 restart + fi } # Service wrapper to start services # start_service service-name function start_service() { - sudo /usr/sbin/service $1 start + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + if [[ "$os_PACKAGE" = "deb" ]]; then + sudo /usr/sbin/service $1 start + else + sudo /sbin/service $1 start + fi } # Service wrapper to stop services # stop_service service-name function stop_service() { - sudo /usr/sbin/service $1 stop + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + if [[ "$os_PACKAGE" = "deb" ]]; then + sudo /usr/sbin/service $1 stop + else + sudo /sbin/service $1 stop + fi } diff --git a/stack.sh b/stack.sh index 86e8203c..debbc4ea 100755 --- a/stack.sh +++ b/stack.sh @@ -32,6 +32,9 @@ GetOSVersion if [[ "$os_VENDOR" =~ (Ubuntu) ]]; then # 'Everyone' refers to Ubuntu releases by the code name adjective DISTRO=$os_CODENAME +elif [[ "$os_VENDOR" =~ (Fedora) ]]; then + # For Fedora, just use 'f' and the release + DISTRO="f$os_RELEASE" else # Catch-all for now is Vendor + Release + Update DISTRO="$os_VENDOR-$os_RELEASE.$os_UPDATE" @@ -72,7 +75,7 @@ if [[ ! -r $TOP_DIR/stackrc ]]; then echo "ERROR: missing $TOP_DIR/stackrc - did you grab more than just stack.sh?" exit 1 fi -source ./stackrc +source $TOP_DIR/stackrc # Destination path for installation ``DEST`` DEST=${DEST:-/opt/stack} @@ -83,14 +86,21 @@ DEST=${DEST:-/opt/stack} # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (oneiric|precise) ]]; then - echo "WARNING: this script has only been tested on oneiric and precise" +if [[ ! ${DISTRO} =~ (oneiric|precise|f16) ]]; then + echo "WARNING: this script has been tested on oneiric, precise and f16" if [[ "$FORCE" != "yes" ]]; then echo "If you wish to run this script anyway run with FORCE=yes" exit 1 fi fi +# Set the paths of certain binaries +if [[ "$os_PACKAGE" = "deb" ]]; then + NOVA_ROOTWRAP=/usr/local/bin/nova-rootwrap +else + NOVA_ROOTWRAP=/usr/bin/nova-rootwrap +fi + # stack.sh keeps the list of ``apt`` and ``pip`` dependencies in external # files, along with config templates and other useful files. You can find these # in the ``files`` directory (next to this script). We will reference this @@ -122,11 +132,16 @@ if [[ $EUID -eq 0 ]]; then # since this script runs as a normal user, we need to give that user # ability to run sudo - dpkg -l sudo || apt_get update && install_package sudo - + if [[ "$os_PACKAGE" = "deb" ]]; then + dpkg -l sudo || apt_get update && install_package sudo + STACK_GROUP=sudo + else + rpm -qa | grep sudo || install_package sudo + STACK_GROUP=wheel + fi if ! getent passwd stack >/dev/null; then echo "Creating a user called stack" - useradd -U -G sudo -s /bin/bash -d $DEST -m stack + useradd -U -G $STACK_GROUP -s /bin/bash -d $DEST -m stack fi echo "Giving stack user passwordless sudo priviledges" @@ -148,7 +163,12 @@ if [[ $EUID -eq 0 ]]; then exit 1 else # We're not root, make sure sudo is available - dpkg -l sudo || die "Sudo is required. Re-run stack.sh as root ONE TIME ONLY to set up sudo." + if [[ "$os_PACKAGE" = "deb" ]]; then + CHECK_SUDO_CMD="dpkg -l sudo" + else + CHECK_SUDO_CMD="rpm -q sudo" + fi + $CHECK_SUDO_CMD || die "Sudo is required. Re-run stack.sh as root ONE TIME ONLY to set up sudo." # UEC images /etc/sudoers does not have a '#includedir'. add one. sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || @@ -163,7 +183,7 @@ else # Set up the rootwrap sudoers TEMPFILE=`mktemp` - echo "$USER ALL=(root) NOPASSWD: /usr/local/bin/nova-rootwrap" >$TEMPFILE + echo "$USER ALL=(root) NOPASSWD: $NOVA_ROOTWRAP" >$TEMPFILE chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/nova-rootwrap @@ -549,6 +569,8 @@ fi # - ``# NOPRIME`` defers installation to be performed later in stack.sh # - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection # of the package to the distros listed. The distro names are case insensitive. +# +# get_packages dir function get_packages() { local package_dir=$1 local file_to_parse @@ -558,7 +580,8 @@ function get_packages() { echo "No package directory supplied" return 1 fi - for service in general ${ENABLED_SERVICES//,/ }; do # Allow individual services to specify dependencies + for service in general ${ENABLED_SERVICES//,/ }; do + # Allow individual services to specify dependencies if [[ -e ${package_dir}/${service} ]]; then file_to_parse="${file_to_parse} $service" fi @@ -604,9 +627,13 @@ function get_packages() { done } -# install apt requirements -apt_get update -install_package $(get_packages $FILES/apts) +# install package requirements +if [[ "$os_PACKAGE" = "deb" ]]; then + apt_get update + install_package $(get_packages $FILES/apts) +else + install_package $(get_packages $FILES/rpms) +fi # install python requirements pip_install $(get_packages $FILES/pips | sort -u) @@ -722,6 +749,10 @@ if is_service_enabled rabbit; then install_package rabbitmq-server > "$tfile" 2>&1 cat "$tfile" rm -f "$tfile" + if [[ "$os_PACKAGE" = "rpm" ]]; then + # RPM doesn't start the service + restart_service rabbitmq-server + fi # change the rabbit password since the default is "guest" sudo rabbitmqctl change_password guest $RABBIT_PASSWORD fi @@ -732,13 +763,15 @@ fi if is_service_enabled mysql; then - # Seed configuration with mysql password so that apt-get install doesn't - # prompt us for a password upon install. - cat </etc/$APACHE_NAME/$APACHE_CONF" + restart_service $APACHE_NAME fi @@ -921,8 +983,13 @@ if is_service_enabled q-svc; then if [[ "$Q_PLUGIN" = "openvswitch" ]]; then # Install deps # FIXME add to files/apts/quantum, but don't install if not needed! - kernel_version=`cat /proc/version | cut -d " " -f3` - install_package openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version + if [[ "$os_PACKAGE" = "deb" ]]; then + kernel_version=`cat /proc/version | cut -d " " -f3` + install_package openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version + else + ### FIXME(dtroyer): Find RPMs for OpenVSwitch + echo "OpenVSwitch packages need to be located" + fi # Create database for the plugin/agent if is_service_enabled mysql; then mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS ovs_quantum;' @@ -1044,7 +1111,12 @@ if is_service_enabled n-cpu; then # Virtualization Configuration # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - install_package libvirt-bin + if [[ "$os_PACKAGE" = "deb" ]]; then + LIBVIRT_PKG_NAME=libvirt-bin + else + LIBVIRT_PKG_NAME=libvirt + fi + install_package $LIBVIRT_PKG_NAME # Force IP forwarding on, just on case sudo sysctl -w net.ipv4.ip_forward=1 @@ -1067,27 +1139,50 @@ if is_service_enabled n-cpu; then # splitting a system into many smaller parts. LXC uses cgroups and chroot # to simulate multiple systems. if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then - if [[ "$DISTRO" > natty ]]; then - install_package cgroup-lite - else - cgline="none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0" - sudo mkdir -p /cgroup - if ! grep -q cgroup /etc/fstab; then - echo "$cgline" | sudo tee -a /etc/fstab - fi - if ! mount -n | grep -q cgroup; then - sudo mount /cgroup + if [[ "$os_PACKAGE" = "deb" ]]; then + if [[ "$DISTRO" > natty ]]; then + install_package cgroup-lite + else + cgline="none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0" + sudo mkdir -p /cgroup + if ! grep -q cgroup /etc/fstab; then + echo "$cgline" | sudo tee -a /etc/fstab + fi + if ! mount -n | grep -q cgroup; then + sudo mount /cgroup + fi fi + else + ### FIXME(dtroyer): figure this out + echo "RPM-based cgroup not implemented yet" + yum_install libcgroup-tools fi fi + if [[ "$os_PACKAGE" = "deb" ]]; then + LIBVIRT_DAEMON=libvirt-bin + else + # http://wiki.libvirt.org/page/SSHPolicyKitSetup + if ! grep ^libvirtd: /etc/group >/dev/null; then + sudo groupadd libvirtd + fi + sudo bash -c 'cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla +[libvirt Management Access] +Identity=unix-group:libvirtd +Action=org.libvirt.unix.manage +ResultAny=yes +ResultInactive=yes +ResultActive=yes +EOF' + LIBVIRT_DAEMON=libvirtd + fi # The user that nova runs as needs to be member of libvirtd group otherwise # nova-compute will be unable to use libvirt. sudo usermod -a -G libvirtd `whoami` # libvirt detects various settings on startup, as we potentially changed # the system configuration (modules, filesystems), we need to restart # libvirt to detect those changes. - restart_service libvirt-bin + restart_service $LIBVIRT_DAEMON # Instance Storage @@ -1202,7 +1297,11 @@ if is_service_enabled swift; then s/%USER%/$USER/; s,%SWIFT_DATA_DIR%,$SWIFT_DATA_DIR,; " $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf - sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync + if [[ "$os_PACKAGE" = "deb" ]]; then + sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync + else + sudo sed -i '/disable *= *yes/ { s/yes/no/ }' /etc/xinetd.d/rsync + fi # By default Swift will be installed with the tempauth middleware # which has some default username and password if you have @@ -1267,10 +1366,10 @@ if is_service_enabled swift; then swift_log_dir=${SWIFT_DATA_DIR}/logs rm -rf ${swift_log_dir} mkdir -p ${swift_log_dir}/hourly - sudo chown -R syslog:adm ${swift_log_dir} + sudo chown -R $USER:adm ${swift_log_dir} sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \ tee /etc/rsyslog.d/10-swift.conf - sudo restart rsyslog + restart_service rsyslog # This is where we create three different rings for swift with # different object servers binding on different ports. @@ -1304,10 +1403,12 @@ if is_service_enabled swift; then } && popd >/dev/null - sudo chmod +x /usr/local/bin/swift-* - # We then can start rsync. - sudo /etc/init.d/rsync restart || : + if [[ "$os_PACKAGE" = "deb" ]]; then + sudo /etc/init.d/rsync restart || : + else + sudo systemctl start xinetd.service + fi # First spawn all the swift services then kill the # proxy service so we can run it in foreground in screen. @@ -1332,9 +1433,6 @@ if is_service_enabled n-vol; then # # By default, the backing file is 2G in size, and is stored in /opt/stack. - # install the package - install_package tgt - if ! sudo vgs $VOLUME_GROUP; then VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DEST/nova-volumes-backing-file} VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-2052M} @@ -1357,10 +1455,15 @@ if is_service_enabled n-vol; then done fi - # tgt in oneiric doesn't restart properly if tgtd isn't running - # do it in two steps - sudo stop tgt || true - sudo start tgt + if [[ "$os_PACKAGE" = "deb" ]]; then + # tgt in oneiric doesn't restart properly if tgtd isn't running + # do it in two steps + sudo stop tgt || true + sudo start tgt + else + # bypass redirection to systemctl during restart + sudo /sbin/service --skip-redirect tgtd restart + fi fi NOVA_CONF=nova.conf @@ -1377,7 +1480,7 @@ add_nova_opt "[DEFAULT]" add_nova_opt "verbose=True" add_nova_opt "auth_strategy=keystone" add_nova_opt "allow_resize_to_same_host=True" -add_nova_opt "root_helper=sudo /usr/local/bin/nova-rootwrap" +add_nova_opt "root_helper=sudo $NOVA_ROOTWRAP" add_nova_opt "compute_scheduler_driver=$SCHEDULER" add_nova_opt "dhcpbridge_flagfile=$NOVA_CONF_DIR/$NOVA_CONF" add_nova_opt "fixed_range=$FIXED_RANGE" @@ -1661,7 +1764,7 @@ screen_it n-sch "cd $NOVA_DIR && $NOVA_DIR/bin/nova-scheduler" screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF --web ." screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF" screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth" -screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/apache2/error.log" +screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/$APACHE_NAME/horizon_error.log" screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_DIR}/proxy-server.conf -v" # Starting the nova-objectstore only if swift service is not enabled. diff --git a/unstack.sh b/unstack.sh index cfe2de6d..7de0d749 100755 --- a/unstack.sh +++ b/unstack.sh @@ -49,7 +49,7 @@ if is_service_enabled n-vol; then echo "iSCSI target cleanup needed:" echo "$TARGETS" fi - sudo stop tgt + stop_service tgt fi if [[ -n "$UNSTACK_ALL" ]]; then From 7030ec58c4a58aeb2a7c0002952eacc41d7acd20 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 12 Apr 2012 11:19:42 -0500 Subject: [PATCH 469/967] Add gcc to glance prereqs Change-Id: I60584e5f3c99d15cd0f3a2222a448f7e01295779 --- files/apts/glance | 1 + 1 file changed, 1 insertion(+) diff --git a/files/apts/glance b/files/apts/glance index 17c84adb..42d9fb82 100644 --- a/files/apts/glance +++ b/files/apts/glance @@ -1,3 +1,4 @@ +gcc python-eventlet python-routes python-greenlet From 9ac2aa4618154384af716f16d41014bd0c60e9f8 Mon Sep 17 00:00:00 2001 From: Gabriel Hurley Date: Thu, 12 Apr 2012 11:35:07 -0700 Subject: [PATCH 470/967] Bumping Horizon Django dependency to v1.4 for Folsom. Horizon is moving to Django 1.4 as the minimum version, however 1.4 is not the packaged version in the apt repositories. Thereby the dependency needs to be moved back to being installed by pip. Change-Id: I3c3116c1d02bdfaccab521175816502fd8ea054a --- files/apts/horizon | 1 - files/pips/horizon | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/files/apts/horizon b/files/apts/horizon index b00d8c04..d93c34b2 100644 --- a/files/apts/horizon +++ b/files/apts/horizon @@ -18,7 +18,6 @@ python-mox python-kombu python-coverage python-cherrypy3 # why? -python-django python-django-mailer python-django-nose python-django-registration diff --git a/files/pips/horizon b/files/pips/horizon index c2475426..309a5fee 100644 --- a/files/pips/horizon +++ b/files/pips/horizon @@ -1,3 +1,4 @@ +django>=1.4 django-mailer # dist:f16 django-nose # dist:f16 django-nose-selenium From 55e82033c928b68b940f7af74ca8e7df493fe352 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 12 Apr 2012 21:15:35 +0200 Subject: [PATCH 471/967] Store glance images in Swift if installed. - Fixes bug 968950. Change-Id: I62587965cd01e7d7bb1ffb6081c57099a76fc87e --- stack.sh | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/stack.sh b/stack.sh index debbc4ea..1d25e4ea 100755 --- a/stack.sh +++ b/stack.sh @@ -959,6 +959,15 @@ if is_service_enabled g-reg; then iniset $GLANCE_API_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/ iniset $GLANCE_API_CONF paste_deploy flavor keystone + # Store the images in swift if enabled. + if is_service_enabled swift; then + iniset $GLANCE_API_CONF DEFAULT default_store swift + iniset $GLANCE_API_CONF DEFAULT swift_store_auth_address $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ + iniset $GLANCE_API_CONF DEFAULT swift_store_user $SERVICE_TENANT_NAME:glance + iniset $GLANCE_API_CONF DEFAULT swift_store_key $SERVICE_PASSWORD + iniset $GLANCE_API_CONF DEFAULT swift_store_create_container_on_put True + fi + GLANCE_API_PASTE_INI=$GLANCE_CONF_DIR/glance-api-paste.ini cp $GLANCE_DIR/etc/glance-api-paste.ini $GLANCE_API_PASTE_INI iniset $GLANCE_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST From 4f27a72029722af0133cb554a5192e0bbc16d165 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Fri, 13 Apr 2012 12:33:49 +0100 Subject: [PATCH 472/967] bug 980725: multi_host is always set to True with virtdriver=xenserver set MULTI_HOST according to the value as defined in localrc or other rc files. Change-Id: Ifc5d0fff06bffd39ab4ff1c1a6fa5d4ab10b9e27 --- tools/xen/build_xva.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh index c235485e..1b41963a 100755 --- a/tools/xen/build_xva.sh +++ b/tools/xen/build_xva.sh @@ -177,7 +177,7 @@ cat <$STAGING_DIR/opt/stack/run.sh #!/bin/bash cd /opt/stack/devstack killall screen -UPLOAD_LEGACY_TTY=yes HOST_IP=$PUB_IP VIRT_DRIVER=xenserver FORCE=yes MULTI_HOST=1 HOST_IP_IFACE=$HOST_IP_IFACE $STACKSH_PARAMS ./stack.sh +UPLOAD_LEGACY_TTY=yes HOST_IP=$PUB_IP VIRT_DRIVER=xenserver FORCE=yes MULTI_HOST=$MULTI_HOST HOST_IP_IFACE=$HOST_IP_IFACE $STACKSH_PARAMS ./stack.sh EOF chmod 755 $STAGING_DIR/opt/stack/run.sh From d4c420dcfe5b6b799594e721471cb62d53628fe3 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 13 Apr 2012 09:13:36 -0700 Subject: [PATCH 473/967] Remove code that allows use of swift + glance. * Removing since it only works with essex, not trunk * Reverts https://review.openstack.org/#/c/5988/ Change-Id: Iedcfa536c25d9ee3cc36714aa9028672b2e59a66 --- stack.sh | 9 --------- 1 file changed, 9 deletions(-) diff --git a/stack.sh b/stack.sh index 1d25e4ea..debbc4ea 100755 --- a/stack.sh +++ b/stack.sh @@ -959,15 +959,6 @@ if is_service_enabled g-reg; then iniset $GLANCE_API_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/ iniset $GLANCE_API_CONF paste_deploy flavor keystone - # Store the images in swift if enabled. - if is_service_enabled swift; then - iniset $GLANCE_API_CONF DEFAULT default_store swift - iniset $GLANCE_API_CONF DEFAULT swift_store_auth_address $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ - iniset $GLANCE_API_CONF DEFAULT swift_store_user $SERVICE_TENANT_NAME:glance - iniset $GLANCE_API_CONF DEFAULT swift_store_key $SERVICE_PASSWORD - iniset $GLANCE_API_CONF DEFAULT swift_store_create_container_on_put True - fi - GLANCE_API_PASTE_INI=$GLANCE_CONF_DIR/glance-api-paste.ini cp $GLANCE_DIR/etc/glance-api-paste.ini $GLANCE_API_PASTE_INI iniset $GLANCE_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST From ba429edfe149307cdc1e3041710e433df28a1de8 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 13 Apr 2012 11:35:07 -0500 Subject: [PATCH 474/967] Another prereq for n-api on a compute node * work around n-api on a compute node pulling in glance without running through the glance prereqs Change-Id: I400c555dd5c04d52bf8aa415747769374a145390 --- files/apts/n-api | 1 + 1 file changed, 1 insertion(+) diff --git a/files/apts/n-api b/files/apts/n-api index 0f08daac..ad943ffd 100644 --- a/files/apts/n-api +++ b/files/apts/n-api @@ -1 +1,2 @@ +gcc # temporary because this pulls in glance to get the client without running the glance prereqs python-dateutil From 8ad0351148a850f4f34fb19797a1e0538cc8ac76 Mon Sep 17 00:00:00 2001 From: debo Date: Tue, 28 Feb 2012 17:47:26 -0800 Subject: [PATCH 475/967] Updated CI test script Debo~ Dutta@Cisco, Dave Lapsley@Nicira * original at https://review.openstack.org/#change,3682 * Allow this exercise to be skipped if quantum is not enabled Change-Id: I8463f654fb85394d78dd01c93c7f7b2706511030 --- exercises/quantum.sh | 393 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 393 insertions(+) create mode 100755 exercises/quantum.sh diff --git a/exercises/quantum.sh b/exercises/quantum.sh new file mode 100755 index 00000000..943a07d7 --- /dev/null +++ b/exercises/quantum.sh @@ -0,0 +1,393 @@ +#!/usr/bin/env bash +# + +# **quantum.sh** + +# We will use this test to perform integration testing of nova and +# other components with Quantum. + +echo "*********************************************************************" +echo "Begin DevStack Exercise: $0" +echo "*********************************************************************" + +# This script exits on an error so that errors don't compound and you see +# only the first error that occured. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + +#------------------------------------------------------------------------------ +# Quantum config check +#------------------------------------------------------------------------------ +# Warn if quantum is not enabled +if [[ ! "$ENABLED_SERVICES" =~ "q-svc" ]]; then + echo "WARNING: Running quantum test without enabling quantum" +fi + +#------------------------------------------------------------------------------ +# Environment +#------------------------------------------------------------------------------ + +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) + +# Import common functions +source $TOP_DIR/functions + +# Import configuration +source $TOP_DIR/openrc + +# Import exercise configuration +source $TOP_DIR/exerciserc + +# If quantum is not enabled we exit with exitcode 55 which mean +# exercise is skipped. +is_service_enabled quantum || exit 55 + +#------------------------------------------------------------------------------ +# Various default parameters. +#------------------------------------------------------------------------------ + +# Max time to wait while vm goes from build to active state +ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30} + +# Max time till the vm is bootable +BOOT_TIMEOUT=${BOOT_TIMEOUT:-60} + +# Max time to wait for proper association and dis-association. +ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-15} + +# Max time to wait before delete VMs and delete Networks +VM_NET_DELETE_TIMEOUT=${VM_NET_TIMEOUT:-10} + +# Instance type to create +DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} + +# Boot this image, use first AMi image if unset +DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} + +# OVS Hosts +OVS_HOSTS=${DEFAULT_OVS_HOSTS:-"localhost"} + +#------------------------------------------------------------------------------ +# Nova settings. +#------------------------------------------------------------------------------ +NOVA_MANAGE=/opt/stack/nova/bin/nova-manage +NOVA=/usr/local/bin/nova +NOVA_CONF=/etc/nova/nova.conf + +#------------------------------------------------------------------------------ +# Mysql settings. +#------------------------------------------------------------------------------ +MYSQL="/usr/bin/mysql --skip-column-name --host=$MYSQL_HOST" + +#------------------------------------------------------------------------------ +# Keystone settings. +#------------------------------------------------------------------------------ +KEYSTONE="keystone" + +#------------------------------------------------------------------------------ +# Get a token for clients that don't support service catalog +#------------------------------------------------------------------------------ + +# manually create a token by querying keystone (sending JSON data). Keystone +# returns a token and catalog of endpoints. We use python to parse the token +# and save it. + +TOKEN=`keystone token-get | grep ' id ' | awk '{print $4}'` + +#------------------------------------------------------------------------------ +# Various functions. +#------------------------------------------------------------------------------ +function get_image_id { + local IMAGE_ID=`glance -f -A $TOKEN index | egrep $DEFAULT_IMAGE_NAME | head -1 | cut -d" " -f1` + echo "$IMAGE_ID" +} + +function get_tenant_id { + local TENANT_NAME=$1 + local TENANT_ID=`keystone tenant-list | grep $TENANT_NAME | awk '{print $2}'` + echo "$TENANT_ID" +} + +function get_user_id { + local USER_NAME=$1 + local USER_ID=`keystone user-list | grep $USER_NAME | awk '{print $2}'` + echo "$USER_ID" +} + +function get_role_id { + local ROLE_NAME=$1 + local ROLE_ID=`keystone role-list | grep $ROLE_NAME | awk '{print $2}'` + echo "$ROLE_ID" +} + +# TODO: (Debo) Change Quantum client CLI and then remove the MYSQL stuff. +function get_network_id { + local NETWORK_NAME=$1 + local QUERY="select uuid from networks where label='$NETWORK_NAME'" + local NETWORK_ID=`echo $QUERY | $MYSQL -u root -p$MYSQL_PASSWORD nova` + echo "$NETWORK_ID" +} + +function get_flavor_id { + local INSTANCE_TYPE=$1 + local FLAVOR_ID=`nova flavor-list | grep $INSTANCE_TYPE | awk '{print $2}'` + echo "$FLAVOR_ID" +} + +function add_tenant { + local TENANT=$1 + local USER=$3 + local PASSWORD=$2 + + $KEYSTONE tenant-create --name=$TENANT + $KEYSTONE user-create --name=$USER --pass=${PASSWORD} + + local USER_ID=$(get_user_id $USER) + local TENANT_ID=$(get_tenant_id $TENANT) + + $KEYSTONE user-role-add --user $USER_ID --role $(get_role_id Member) --tenant_id $TENANT_ID + $KEYSTONE user-role-add --user $USER_ID --role $(get_role_id admin) --tenant_id $TENANT_ID + $KEYSTONE user-role-add --user $USER_ID --role $(get_role_id anotherrole) --tenant_id $TENANT_ID + #$KEYSTONE user-role-add --user $USER_ID --role $(get_role_id sysadmin) --tenant_id $TENANT_ID + #$KEYSTONE user-role-add --user $USER_ID --role $(get_role_id netadmin) --tenant_id $TENANT_ID +} + +function remove_tenant { + local TENANT=$1 + local TENANT_ID=$(get_tenant_id $TENANT) + + $KEYSTONE tenant-delete $TENANT_ID +} + +function remove_user { + local USER=$1 + local USER_ID=$(get_user_id $USER) + + $KEYSTONE user-delete $USER_ID +} + + +#------------------------------------------------------------------------------ +# "Create" functions +#------------------------------------------------------------------------------ + +function create_tenants { + add_tenant demo1 nova demo1 + add_tenant demo2 nova demo2 +} + +function delete_tenants_and_users { + remove_tenant demo1 + remove_tenant demo2 + remove_user demo1 + remove_user demo2 +} + +function create_networks { + $NOVA_MANAGE --flagfile=$NOVA_CONF network create \ + --label=public-net1 \ + --fixed_range_v4=11.0.0.0/24 + + $NOVA_MANAGE --flagfile=$NOVA_CONF network create \ + --label=demo1-net1 \ + --fixed_range_v4=12.0.0.0/24 \ + --project_id=$(get_tenant_id demo1) \ + --priority=1 + + $NOVA_MANAGE --flagfile=$NOVA_CONF network create \ + --label=demo2-net1 \ + --fixed_range_v4=13.0.0.0/24 \ + --project_id=$(get_tenant_id demo2) \ + --priority=1 +} + +function create_vms { + PUBLIC_NET1_ID=$(get_network_id public-net1) + DEMO1_NET1_ID=$(get_network_id demo1-net1) + DEMO2_NET1_ID=$(get_network_id demo2-net1) + + export OS_TENANT_NAME=demo1 + export OS_USERNAME=demo1 + export OS_PASSWORD=nova + VM_UUID1=`$NOVA boot --flavor $(get_flavor_id m1.tiny) \ + --image $(get_image_id) \ + --nic net-id=$PUBLIC_NET1_ID \ + --nic net-id=$DEMO1_NET1_ID \ + demo1-server1 | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'` + die_if_not_set VM_UUID1 "Failure launching demo1-server1" + + export OS_TENANT_NAME=demo2 + export OS_USERNAME=demo2 + export OS_PASSWORD=nova + VM_UUID2=`$NOVA boot --flavor $(get_flavor_id m1.tiny) \ + --image $(get_image_id) \ + --nic net-id=$PUBLIC_NET1_ID \ + --nic net-id=$DEMO2_NET1_ID \ + demo2-server1 | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'` + die_if_not_set VM_UUID2 "Failure launching demo2-server1" + + VM_UUID3=`$NOVA boot --flavor $(get_flavor_id m1.tiny) \ + --image $(get_image_id) \ + --nic net-id=$PUBLIC_NET1_ID \ + --nic net-id=$DEMO2_NET1_ID \ + demo2-server2 | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'` + die_if_not_set VM_UUID3 "Failure launching demo2-server2" + +} + +function ping_vms { + + echo "Sleeping a bit let the VMs come up" + sleep $ACTIVE_TIMEOUT + + export OS_TENANT_NAME=demo1 + export OS_USERNAME=demo1 + export OS_PASSWORD=nova + # get the IP of the servers + PUBLIC_IP1=`nova show $VM_UUID1 | grep public-net1 | awk '{print $5}'` + export OS_TENANT_NAME=demo2 + export OS_USERNAME=demo2 + export OS_PASSWORD=nova + PUBLIC_IP2=`nova show $VM_UUID2 | grep public-net1 | awk '{print $5}'` + + MULTI_HOST=${MULTI_HOST:-0} + if [ "$MULTI_HOST" = "0" ]; then + # sometimes the first ping fails (10 seconds isn't enough time for the VM's + # network to respond?), so let's ping for a default of 15 seconds with a + # timeout of a second for each ping. + if ! timeout $BOOT_TIMEOUT sh -c "while ! ping -c1 -w1 $PUBLIC_IP1; do sleep 1; done"; then + echo "Couldn't ping server" + exit 1 + fi + if ! timeout $BOOT_TIMEOUT sh -c "while ! ping -c1 -w1 $PUBLIC_IP2; do sleep 1; done"; then + echo "Couldn't ping server" + exit 1 + fi + else + # On a multi-host system, without vm net access, do a sleep to wait for the boot + sleep $BOOT_TIMEOUT + fi +} + +function shutdown_vms { + export OS_TENANT_NAME=demo1 + export OS_USERNAME=demo1 + export OS_PASSWORD=nova + nova delete $VM_UUID1 + + export OS_TENANT_NAME=demo2 + export OS_USERNAME=demo2 + export OS_PASSWORD=nova + nova delete $VM_UUID2 + nova delete $VM_UUID3 + +} + +function delete_networks { + PUBLIC_NET1_ID=$(get_network_id public-net1) + DEMO1_NET1_ID=$(get_network_id demo1-net1) + DEMO2_NET1_ID=$(get_network_id demo2-net1) + nova-manage network delete --uuid=$PUBLIC_NET1_ID + nova-manage network delete --uuid=$DEMO1_NET1_ID + nova-manage network delete --uuid=$DEMO2_NET1_ID +} + +function all { + create_tenants + create_networks + create_vms + ping_vms + shutdown_vms + delete_networks + delete_tenants_and_users +} + +#------------------------------------------------------------------------------ +# Test functions. +#------------------------------------------------------------------------------ +function test_functions { + IMAGE=$(get_image_id) + echo $IMAGE + + TENANT_ID=$(get_tenant_id demo) + echo $TENANT_ID + + FLAVOR_ID=$(get_flavor_id m1.tiny) + echo $FLAVOR_ID + + NETWORK_ID=$(get_network_id private) + echo $NETWORK_ID +} + +#------------------------------------------------------------------------------ +# Usage and main. +#------------------------------------------------------------------------------ +usage() { + echo "$0: [-h]" + echo " -h, --help Display help message" + echo " -n, --net Create networks" + echo " -v, --vm Create vms" + echo " -t, --tenant Create tenants" + echo " -T, --test Test functions" +} + +main() { + if [ $# -eq 0 ] ; then + usage + exit + fi + + echo Description + echo + echo Copyright 2012, Cisco Systems + echo Copyright 2012, Nicira Networks, Inc. + echo + echo Please direct any questions to dedutta@cisco.com, dlapsley@nicira.com + echo + + while [ "$1" != "" ]; do + case $1 in + -h | --help ) usage + exit + ;; + -n | --net ) create_networks + exit + ;; + -v | --vm ) create_vms + exit + ;; + -t | --tenant ) create_tenants + exit + ;; + -p | --ping ) ping_vms + exit + ;; + -T | --test ) test_functions + exit + ;; + -a | --all ) all + exit + ;; + * ) usage + exit 1 + esac + shift + done +} + + +#------------------------------------------------------------------------------- +# Kick off script. +#------------------------------------------------------------------------------- +echo $* +main -a + +set +o xtrace +echo "*********************************************************************" +echo "SUCCESS: End DevStack Exercise: $0" +echo "*********************************************************************" From 588f4069e56d8f59e178cd68c9cd88b412b13f04 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 2 Apr 2012 16:50:49 -0500 Subject: [PATCH 476/967] Change MySQL engine default to InnoDB Fixes bug 971881 13Apr2012 - rebased to add Fedora support Change-Id: Ib93187b4727157cc8dc63cd4599970535c85adce --- stack.sh | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/stack.sh b/stack.sh index debbc4ea..7ec4ee45 100755 --- a/stack.sh +++ b/stack.sh @@ -797,15 +797,28 @@ EOF # Update the DB to give user ‘$MYSQL_USER’@’%’ full control of the all databases: sudo mysql -uroot -p$MYSQL_PASSWORD -h127.0.0.1 -e "GRANT ALL PRIVILEGES ON *.* TO '$MYSQL_USER'@'%' identified by '$MYSQL_PASSWORD';" - # Edit /etc/mysql/my.cnf to change ‘bind-address’ from localhost (127.0.0.1) to any (0.0.0.0) and restart the mysql service: + # Update ``my.cnf`` for some local needs and restart the mysql service if [[ "$os_PACKAGE" = "deb" ]]; then - MY_CNF=/etc/mysql/my.cnf + MY_CONF=/etc/mysql/my.cnf MYSQL=mysql else - MY_CNF=/etc/my.cnf + MY_CONF=/etc/my.cnf MYSQL=mysqld fi - sudo sed -i 's/127.0.0.1/0.0.0.0/g' $MY_CNF + + # Change ‘bind-address’ from localhost (127.0.0.1) to any (0.0.0.0) + sudo sed -i '/^bind-address/s/127.0.0.1/0.0.0.0/g' $MY_CONF + + # Set default db type to InnoDB + if grep -q "default-storage-engine" $MY_CONF; then + # Change it + sudo bash -c "source $TOP_DIR/functions; iniset $MY_CONF mysqld default-storage-engine InnoDB" + else + # Add it + sudo sed -i -e "/^\[mysqld\]/ a \ +default-storage-engine = InnoDB" $MY_CONF + fi + restart_service $MYSQL fi From 58d34ea9c168992659238eee726c14005d0c23cf Mon Sep 17 00:00:00 2001 From: Jay Pipes Date: Thu, 5 Apr 2012 17:19:02 -0400 Subject: [PATCH 477/967] Make required changes to tempest.conf build * Echo out the tempest.conf for easier debugging * Make sure IDENTITY_HOST defaults to 127.0.0.1 * Source and ensure openrc is available * Ensure BUILD_TIMEOUT and BUILD_INTERVAL are set appropriately * Set COMPUTE_RESIZE_AVAILABLE to False because QEMU does not support Change-Id: If39c3cc011c1a2207fbcb2922094bd9ff2973746 --- tools/configure_tempest.sh | 31 ++++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index 01849ad3..6ba301f7 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -37,13 +37,18 @@ TOP_DIR=$(cd $TOOLS_DIR/..; pwd) # Abort if localrc is not set if [ ! -e $TOP_DIR/localrc ]; then - echo "You must have a localrc with ALL necessary passwords and configuration defined before proceeding." - echo "See stack.sh for required passwords." + echo "You must have a localrc with necessary basic configuration defined before proceeding." exit 1 fi -# Source params -source ./stackrc +# Abort if openrc is not set +if [ ! -e $TOP_DIR/openrc ]; then + echo "You must have an openrc with ALL necessary passwords and credentials defined before proceeding." + exit 1 +fi + +# Source params. openrc sources stackrc which sources localrc +source $TOP_DIR/openrc # Set defaults not configured by stackrc TENANT=${TENANT:-admin} @@ -132,8 +137,8 @@ if [[ ! -r $TEMPEST_CONF ]]; then fi IDENTITY_USE_SSL=${IDENTITY_USE_SSL:-False} -IDENTITY_PORT=${IDENTITY_PORT:-5000} -IDENTITY_API_VERSION={$IDENTITY_API_VERSION:-v2.0} # Note: need v for now... +TEMPEST_IDENTITY_HOST=${IDENTITY_HOST:-127.0.0.1} +TEMPEST_IDENTITY_API_VERSION="v2.0" # Note: need v for now... # TODO(jaypipes): This is dumb and needs to be removed # from the Tempest configuration file entirely... IDENTITY_PATH=${IDENTITY_PATH:-tokens} @@ -167,14 +172,16 @@ ADMIN_TENANT_NAME={$ADMIN_TENANT:-admin} # Do any of the following need to be configurable? COMPUTE_CATALOG_TYPE=compute COMPUTE_CREATE_IMAGE_ENABLED=True -COMPUTE_RESIZE_AVAILABLE=True +COMPUTE_RESIZE_AVAILABLE=False # not supported with QEMU... COMPUTE_LOG_LEVEL=ERROR +BUILD_INTERVAL=10 +BUILD_TIMEOUT=600 sed -e " s,%IDENTITY_USE_SSL%,$IDENTITY_USE_SSL,g; - s,%IDENTITY_HOST%,$HOST_IP,g; + s,%IDENTITY_HOST%,$TEMPEST_IDENTITY_HOST,g; s,%IDENTITY_PORT%,$IDENTITY_PORT,g; - s,%IDENTITY_API_VERSION%,$IDENTITY_API_VERSION,g; + s,%IDENTITY_API_VERSION%,$TEMPEST_IDENTITY_API_VERSION,g; s,%IDENTITY_PATH%,$IDENTITY_PATH,g; s,%IDENTITY_STRATEGY%,$IDENTITY_STRATEGY,g; s,%USERNAME%,$OS_USERNAME,g; @@ -187,6 +194,8 @@ sed -e " s,%COMPUTE_CREATE_IMAGE_ENABLED%,$COMPUTE_CREATE_IMAGE_ENABLED,g; s,%COMPUTE_RESIZE_AVAILABLE%,$COMPUTE_RESIZE_AVAILABLE,g; s,%COMPUTE_LOG_LEVEL%,$COMPUTE_LOG_LEVEL,g; + s,%BUILD_INTERVAL%,$BUILD_INTERVAL,g; + s,%BUILD_TIMEOUT%,$BUILD_TIMEOUT,g; s,%IMAGE_ID%,$IMAGE_UUID,g; s,%IMAGE_ID_ALT%,$IMAGE_UUID_ALT,g; s,%FLAVOR_REF%,$FLAVOR_REF,g; @@ -196,6 +205,10 @@ sed -e " s,%ADMIN_TENANT_NAME%,$ADMIN_TENANT_NAME,g; " -i $TEMPEST_CONF +echo "Created tempest configuration file:" +cat $TEMPEST_CONF +echo "\n\n" + # Create config.ini CONFIG_INI_TMP=$(mktemp $CONFIG_INI.XXXXXX) From 836955f825234aa9a1e82795b58818438e228d8d Mon Sep 17 00:00:00 2001 From: Renuka Apte Date: Mon, 2 Apr 2012 15:22:55 -0700 Subject: [PATCH 478/967] XenServer: change install script for new install process Change-Id: Iee64e19fb21c1c633b5a2df7c4c154702f2a4526 --- tools/xen/scripts/install-os-vpx.sh | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh index d45c3702..14240a75 100755 --- a/tools/xen/scripts/install-os-vpx.sh +++ b/tools/xen/scripts/install-os-vpx.sh @@ -38,7 +38,7 @@ usage() cat << EOF Usage: $0 [-f FILE_PATH] [-d DISK_SIZE] [-v BRIDGE_NAME] [-m BRIDGE_NAME] [-p BRIDGE_NAME] - [-k PARAMS] [-r RAM] [-i|-c] [-w] [-b] [-l NAME_LABEL] + [-k PARAMS] [-r RAM] [-i|-c] [-w] [-b] [-l NAME_LABEL] [-t TEMPLATE_NW_INSTALL] Installs XenServer OpenStack VPX. @@ -61,6 +61,7 @@ cat << EOF -r MiB Specifies RAM used by the VPX, in MiB. By default it will take the value from the XVA. -l name Specifies the name label for the VM. + -t template Network install an openstack domU from this template EXAMPLES: @@ -88,7 +89,7 @@ EOF get_params() { - while getopts "hicwbf:d:v:m:p:k:r:l:" OPTION; + while getopts "hicwbf:d:v:m:p:k:r:l:t:" OPTION; do case $OPTION in h) usage @@ -127,9 +128,12 @@ get_params() v) BRIDGE_V=$OPTARG ;; - l) + l) NAME_LABEL=$OPTARG ;; + t) + TEMPLATE_NAME=$OPTARG + ;; ?) usage exit @@ -422,6 +426,16 @@ then destroy_vifs "$vm_uuid" set_all "$vm_uuid" +elif [ "$TEMPLATE_NAME" ] +then + echo $TEMPLATE_NAME + vm_uuid=$(xe_min vm-install template="$TEMPLATE_NAME" new-name-label="DevstackOSDomu") + destroy_vifs "$vm_uuid" + set_auto_start "$vm_uuid" + create_gi_vif "$vm_uuid" + create_vm_vif "$vm_uuid" + create_management_vif "$vm_uuid" + create_public_vif "$vm_uuid" else if [ ! -f "$VPX_FILE" ] then From 0af143b34ea32c88f22bb762a0bac6615b501a59 Mon Sep 17 00:00:00 2001 From: Renuka Apte Date: Mon, 2 Apr 2012 15:46:53 -0700 Subject: [PATCH 479/967] XenServer: new build and install scripts Change-Id: Ia13a9c8073e59edf98415ba5b9f3a9cbd1453d32 --- stack.sh | 2 +- tools/xen/build_xva.sh | 106 +++++----------- .../xen/{build_domU.sh => install_os_domU.sh} | 116 +++++++++++------- tools/xen/prepare_guest.sh | 8 -- tools/xen/scripts/install-os-vpx.sh | 16 ++- tools/xen/xenrc | 10 +- 6 files changed, 116 insertions(+), 142 deletions(-) rename tools/xen/{build_domU.sh => install_os_domU.sh} (69%) mode change 100644 => 100755 tools/xen/prepare_guest.sh diff --git a/stack.sh b/stack.sh index debbc4ea..8fa3902b 100755 --- a/stack.sh +++ b/stack.sh @@ -313,7 +313,7 @@ function read_password { if [ "$VIRT_DRIVER" = 'xenserver' ]; then PUBLIC_INTERFACE_DEFAULT=eth3 # allow build_domU.sh to specify the flat network bridge via kernel args - FLAT_NETWORK_BRIDGE_DEFAULT=$(grep -o 'flat_network_bridge=[^.]*' /proc/cmdline | cut -d= -f 2) + FLAT_NETWORK_BRIDGE_DEFAULT=$(grep -o 'flat_network_bridge=[[:alnum:]]*' /proc/cmdline | cut -d= -f 2 | sort -u) GUEST_INTERFACE_DEFAULT=eth1 else PUBLIC_INTERFACE_DEFAULT=br100 diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh index c235485e..2f28b5fe 100755 --- a/tools/xen/build_xva.sh +++ b/tools/xen/build_xva.sh @@ -1,5 +1,27 @@ #!/bin/bash +set -e + +declare -a on_exit_hooks + +on_exit() +{ + for i in $(seq $((${#on_exit_hooks[*]} - 1)) -1 0) + do + eval "${on_exit_hooks[$i]}" + done +} + +add_on_exit() +{ + local n=${#on_exit_hooks[*]} + on_exit_hooks[$n]="$*" + if [[ $n -eq 0 ]] + then + trap on_exit EXIT + fi +} + # Abort if localrc is not set if [ ! -e ../../localrc ]; then echo "You must have a localrc with ALL necessary passwords defined before proceeding." @@ -16,27 +38,11 @@ source xenrc # Echo commands set -o xtrace -# Directory where we stage the build -STAGING_DIR=$TOP_DIR/stage - -# Option to clean out old stuff -CLEAN=${CLEAN:-0} -if [ "$CLEAN" = "1" ]; then - rm -rf $STAGING_DIR -fi - -# Download our base image. This image is made using prepare_guest.sh -BASE_IMAGE_URL=${BASE_IMAGE_URL:-http://images.ansolabs.com/xen/stage.tgz} -if [ ! -e $STAGING_DIR ]; then - if [ ! -e /tmp/stage.tgz ]; then - wget $BASE_IMAGE_URL -O /tmp/stage.tgz - fi - tar xfz /tmp/stage.tgz - cd $TOP_DIR -fi +GUEST_NAME="$1" -# Free up precious disk space -rm -f /tmp/stage.tgz +# Directory where we stage the build +STAGING_DIR=$($TOP_DIR/scripts/manage-vdi open $GUEST_NAME 0 1 | grep -o "/tmp/tmp.[[:alnum:]]*") +add_on_exit "$TOP_DIR/scripts/manage-vdi close $GUEST_NAME 0 1" # Make sure we have a stage if [ ! -d $STAGING_DIR/etc ]; then @@ -55,63 +61,26 @@ SCRIPT_DIR=$TOP_DIR/scripts UBUNTU_VERSION=`cat $STAGING_DIR/etc/lsb-release | grep "DISTRIB_CODENAME=" | sed "s/DISTRIB_CODENAME=//"` KERNEL_VERSION=`ls $STAGING_DIR/boot/vmlinuz* | head -1 | sed "s/.*vmlinuz-//"` -# Directory for xvas -XVA_DIR=$TOP_DIR/xvas - -# Create xva dir -mkdir -p $XVA_DIR - -# Path to xva -XVA=$XVA_DIR/$GUEST_NAME.xva - -# Setup fake grub -rm -rf $STAGING_DIR/boot/grub/ -mkdir -p $STAGING_DIR/boot/grub/ -cp $TEMPLATES_DIR/menu.lst.in $STAGING_DIR/boot/grub/menu.lst -sed -e "s,@KERNEL_VERSION@,$KERNEL_VERSION,g" -i $STAGING_DIR/boot/grub/menu.lst - -# Setup fstab, tty, and other system stuff -cp $FILES_DIR/fstab $STAGING_DIR/etc/fstab -cp $FILES_DIR/hvc0.conf $STAGING_DIR/etc/init/ - -# Put the VPX into UTC. -rm -f $STAGING_DIR/etc/localtime - # Configure dns (use same dns as dom0) cp /etc/resolv.conf $STAGING_DIR/etc/resolv.conf # Copy over devstack rm -f /tmp/devstack.tar -tar --exclude='stage' --exclude='xen/xvas' --exclude='xen/nova' -cvf /tmp/devstack.tar $TOP_DIR/../../../devstack -cd $STAGING_DIR/opt/stack/ -tar xf /tmp/devstack.tar +cd $TOP_DIR/../../ +tar --exclude='stage' --exclude='xen/xvas' --exclude='xen/nova' -cvf /tmp/devstack.tar . +mkdir -p $STAGING_DIR/opt/stack/devstack +tar xf /tmp/devstack.tar -C $STAGING_DIR/opt/stack/devstack cd $TOP_DIR -# Configure OVA -VDI_SIZE=$(($VDI_MB*1024*1024)) -PRODUCT_BRAND=${PRODUCT_BRAND:-openstack} -PRODUCT_VERSION=${PRODUCT_VERSION:-001} -BUILD_NUMBER=${BUILD_NUMBER:-001} -LABEL="$PRODUCT_BRAND $PRODUCT_VERSION-$BUILD_NUMBER" -OVA=$STAGING_DIR/tmp/ova.xml -cp $TEMPLATES_DIR/ova.xml.in $OVA -sed -e "s,@VDI_SIZE@,$VDI_SIZE,g" -i $OVA -sed -e "s,@PRODUCT_BRAND@,$PRODUCT_BRAND,g" -i $OVA -sed -e "s,@PRODUCT_VERSION@,$PRODUCT_VERSION,g" -i $OVA -sed -e "s,@BUILD_NUMBER@,$BUILD_NUMBER,g" -i $OVA - # Run devstack on launch cat <$STAGING_DIR/etc/rc.local # network restart required for getting the right gateway /etc/init.d/networking restart GUEST_PASSWORD=$GUEST_PASSWORD STAGING_DIR=/ DO_TGZ=0 bash /opt/stack/devstack/tools/xen/prepare_guest.sh > /opt/stack/prepare_guest.log 2>&1 -su -c "/opt/stack/run.sh > /opt/stack/run.sh.log" stack +su -c "/opt/stack/run.sh > /opt/stack/run.sh.log 2>&1" stack exit 0 EOF -# Clean old xva. In the future may not do this every time. -rm -f $XVA - # Configure the hostname echo $GUEST_NAME > $STAGING_DIR/etc/hostname @@ -151,10 +120,6 @@ else sed -e "s,@ETH3_NETMASK@,$PUB_NETMASK,g" -i $INTERFACES fi -if [ -h $STAGING_DIR/sbin/dhclient3 ]; then - rm -f $STAGING_DIR/sbin/dhclient3 -fi - # Gracefully cp only if source file/dir exists function cp_it { if [ -e $1 ] || [ -d $1 ]; then @@ -181,11 +146,4 @@ UPLOAD_LEGACY_TTY=yes HOST_IP=$PUB_IP VIRT_DRIVER=xenserver FORCE=yes MULTI_HOST EOF chmod 755 $STAGING_DIR/opt/stack/run.sh -# Create xva -if [ ! -e $XVA ]; then - rm -rf /tmp/mkxva* - UID=0 $SCRIPT_DIR/mkxva -o $XVA -t xva -x $OVA $STAGING_DIR $VDI_MB /tmp/ -fi - -echo "Built $(basename $XVA). If your dom0 is on a different machine, copy this to [devstackdir]/tools/xen/$(basename $XVA)" -echo "Also copy your localrc to [devstackdir]" +echo "Done" diff --git a/tools/xen/build_domU.sh b/tools/xen/install_os_domU.sh similarity index 69% rename from tools/xen/build_domU.sh rename to tools/xen/install_os_domU.sh index 5fa7aa85..31bcc40c 100755 --- a/tools/xen/build_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -1,5 +1,8 @@ #!/bin/bash +# Exit on errors +set -o errexit + # Abort if localrc is not set if [ ! -e ../../localrc ]; then echo "You must have a localrc with ALL necessary passwords defined before proceeding." @@ -19,25 +22,29 @@ source xenrc # Echo commands set -o xtrace -# Check for xva file -if [ ! -e $XVA ]; then - echo "Missing xva file. Please run build_xva.sh (ideally on a non dom0 host since the build can require lots of space)." - echo "Place the resulting xva file in $XVA" - exit 1 -fi +xe_min() +{ + local cmd="$1" + shift + xe "$cmd" --minimal "$@" +} -# Make sure we have git -if ! which git; then - GITDIR=/tmp/git-1.7.7 - cd /tmp - rm -rf $GITDIR* - wget http://git-core.googlecode.com/files/git-1.7.7.tar.gz - tar xfv git-1.7.7.tar.gz - cd $GITDIR - ./configure --with-curl --with-expat - make install - cd $TOP_DIR +cd $TOP_DIR +if [ -f ./master ] +then + rm -rf ./master + rm -rf ./nova fi +wget https://github.com/openstack/nova/zipball/master --no-check-certificate +unzip -o master -d ./nova +cp -pr ./nova/*/plugins/xenserver/xenapi/etc/xapi.d /etc/ +chmod a+x /etc/xapi.d/plugins/* + +mkdir -p /boot/guest + +GUEST_NAME=${GUEST_NAME:-"DevStackOSDomU"} +SNAME="ubuntusnapshot" +TNAME="ubuntuready" # Helper to create networks # Uses echo trickery to return network uuid @@ -48,23 +55,23 @@ function create_network() { netname=$4 if [ -z $br ] then - pif=$(xe pif-list --minimal device=$dev VLAN=$vlan) + pif=$(xe_min pif-list device=$dev VLAN=$vlan) if [ -z $pif ] then net=$(xe network-create name-label=$netname) else - net=$(xe network-list --minimal PIF-uuids=$pif) + net=$(xe_min network-list PIF-uuids=$pif) fi echo $net return 0 fi - if [ ! $(xe network-list --minimal params=bridge | grep -w --only-matching $br) ] + if [ ! $(xe_min network-list params=bridge | grep -w --only-matching $br) ] then echo "Specified bridge $br does not exist" echo "If you wish to use defaults, please keep the bridge name empty" exit 1 else - net=$(xe network-list --minimal bridge=$br) + net=$(xe_min network-list bridge=$br) echo $net fi } @@ -95,13 +102,13 @@ function create_vlan() { then return fi - if [ -z $(xe vlan-list --minimal tag=$vlan) ] + if [ -z $(xe_min vlan-list tag=$vlan) ] then - pif=$(xe pif-list --minimal network-uuid=$net) + pif=$(xe_min pif-list network-uuid=$net) # We created a brand new network this time if [ -z $pif ] then - pif=$(xe pif-list --minimal device=$dev VLAN=-1) + pif=$(xe_min pif-list device=$dev VLAN=-1) xe vlan-create pif-uuid=$pif vlan=$vlan network-uuid=$net else echo "VLAN does not exist but PIF attached to this network" @@ -133,24 +140,11 @@ fi # Enable ip forwarding at runtime as well echo 1 > /proc/sys/net/ipv4/ip_forward -# Set local storage il8n -SR_UUID=`xe sr-list --minimal name-label="Local storage"` -xe sr-param-set uuid=$SR_UUID other-config:i18n-key=local-storage - -# Checkout nova -git_clone $NOVA_REPO $TOP_DIR/nova $NOVA_BRANCH - -# Install plugins -cp -pr $TOP_DIR/nova/plugins/xenserver/xenapi/etc/xapi.d /etc/ -chmod a+x /etc/xapi.d/plugins/* -yum --enablerepo=base install -y parted -mkdir -p /boot/guest - # Shutdown previous runs DO_SHUTDOWN=${DO_SHUTDOWN:-1} if [ "$DO_SHUTDOWN" = "1" ]; then # Shutdown all domU's that created previously - xe vm-list --minimal name-label="$LABEL" | xargs ./scripts/uninstall-os-vpx.sh + xe_min vm-list name-label="$GUEST_NAME" | xargs ./scripts/uninstall-os-vpx.sh # Destroy any instances that were launched for uuid in `xe vm-list | grep -1 instance | grep uuid | sed "s/.*\: //g"`; do @@ -168,18 +162,54 @@ fi # Start guest if [ -z $VM_BR ]; then - VM_BR=$(xe network-list --minimal uuid=$VM_NET params=bridge) + VM_BR=$(xe_min network-list uuid=$VM_NET params=bridge) fi if [ -z $MGT_BR ]; then - MGT_BR=$(xe network-list --minimal uuid=$MGT_NET params=bridge) + MGT_BR=$(xe_min network-list uuid=$MGT_NET params=bridge) fi if [ -z $PUB_BR ]; then - PUB_BR=$(xe network-list --minimal uuid=$PUB_NET params=bridge) + PUB_BR=$(xe_min network-list uuid=$PUB_NET params=bridge) fi -$TOP_DIR/scripts/install-os-vpx.sh -f $XVA -v $VM_BR -m $MGT_BR -p $PUB_BR -l $GUEST_NAME -w -k "flat_network_bridge=${VM_BR}" + +templateuuid=$(xe template-list name-label="$TNAME") +if [ -n "$templateuuid" ] +then + vm_uuid=$(xe vm-install template="$TNAME" new-name-label="$GUEST_NAME") +else + template=$(xe_min template-list name-label="Ubuntu 11.10 (64-bit)") + if [ -z "$template" ] + then + $TOP_DIR/scripts/xenoneirictemplate.sh + fi + $TOP_DIR/scripts/install-os-vpx.sh -t "Ubuntu 11.10 (64-bit)" -v $VM_BR -m $MGT_BR -p $PUB_BR -l $GUEST_NAME -r $OSDOMU_MEM_MB -k "flat_network_bridge=${VM_BR}" + + # Wait for install to finish + while true + do + state=$(xe_min vm-list name-label="$GUEST_NAME" power-state=halted) + if [ -n "$state" ] + then + break + else + echo "Waiting for "$GUEST_NAME" to finish installation..." + sleep 30 + fi + done + + vm_uuid=$(xe_min vm-list name-label="$GUEST_NAME") + xe vm-param-set actions-after-reboot=Restart uuid="$vm_uuid" + + # Make template from VM + snuuid=$(xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME") + template_uuid=$(xe snapshot-clone uuid=$snuuid new-name-label="$TNAME") +fi + +$TOP_DIR/build_xva.sh "$GUEST_NAME" + +xe vm-start vm="$GUEST_NAME" if [ $PUB_IP == "dhcp" ]; then - PUB_IP=$(xe vm-list --minimal name-label=$GUEST_NAME params=networks | sed -ne 's,^.*3/ip: \([0-9.]*\).*$,\1,p') + PUB_IP=$(xe_min vm-list name-label=$GUEST_NAME params=networks | sed -ne 's,^.*3/ip: \([0-9.]*\).*$,\1,p') fi # If we have copied our ssh credentials, use ssh to monitor while the installation runs diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh old mode 100644 new mode 100755 index 77ce54a8..5d39ac65 --- a/tools/xen/prepare_guest.sh +++ b/tools/xen/prepare_guest.sh @@ -6,13 +6,6 @@ set -x GUEST_PASSWORD=${GUEST_PASSWORD:-secrete} STAGING_DIR=${STAGING_DIR:-stage} DO_TGZ=${DO_TGZ:-1} -KERNEL_VERSION=3.0.0-12-virtual - -# Debootstrap base system -if [ ! -d $STAGING_DIR ]; then - apt-get install debootstrap - debootstrap --arch amd64 oneiric $STAGING_DIR http://us.archive.ubuntu.com/ubuntu/ -fi # Sources.list cat <$STAGING_DIR/etc/apt/sources.list @@ -28,7 +21,6 @@ EOF # Install basics chroot $STAGING_DIR apt-get update -chroot $STAGING_DIR apt-get install -y linux-image-$KERNEL_VERSION chroot $STAGING_DIR apt-get install -y cracklib-runtime curl wget ssh openssh-server tcpdump ethtool chroot $STAGING_DIR apt-get install -y curl wget ssh openssh-server python-pip git vim-nox sudo chroot $STAGING_DIR pip install xenapi diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh index 14240a75..fe5e8107 100755 --- a/tools/xen/scripts/install-os-vpx.sh +++ b/tools/xen/scripts/install-os-vpx.sh @@ -332,17 +332,11 @@ set_kernel_params() { local v="$1" local args=$KERNEL_PARAMS - local cmdline=$(cat /proc/cmdline) - for word in $cmdline - do - if echo "$word" | grep -q "geppetto" - then - args="$word $args" - fi - done if [ "$args" != "" ] then echo "Passing Geppetto args to VPX: $args." + pvargs=$(xe vm-param-get param-name=PV-args uuid="$v") + args="$pvargs $args" xe vm-param-set PV-args="$args" uuid="$v" fi } @@ -429,13 +423,17 @@ then elif [ "$TEMPLATE_NAME" ] then echo $TEMPLATE_NAME - vm_uuid=$(xe_min vm-install template="$TEMPLATE_NAME" new-name-label="DevstackOSDomu") + vm_uuid=$(xe_min vm-install template="$TEMPLATE_NAME" new-name-label="$NAME_LABEL") destroy_vifs "$vm_uuid" set_auto_start "$vm_uuid" create_gi_vif "$vm_uuid" create_vm_vif "$vm_uuid" create_management_vif "$vm_uuid" create_public_vif "$vm_uuid" + set_kernel_params "$vm_uuid" + xe vm-param-set other-config:os-vpx=true uuid="$vm_uuid" + xe vm-param-set actions-after-reboot=Destroy uuid="$vm_uuid" + set_memory "$vm_uuid" else if [ ! -f "$VPX_FILE" ] then diff --git a/tools/xen/xenrc b/tools/xen/xenrc index 58fda31a..f434b111 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -1,10 +1,10 @@ #!/bin/bash # Name of this guest -GUEST_NAME=${GUEST_NAME:-ALLINONE} +GUEST_NAME=${GUEST_NAME:-DevStackOSDomU} # Size of image -VDI_MB=${VDI_MB:-2500} +VDI_MB=${VDI_MB:-5000} # VM Password GUEST_PASSWORD=${GUEST_PASSWORD:-secrete} @@ -35,11 +35,7 @@ MGT_BR=${MGT_BR:-""} MGT_VLAN=${MGT_VLAN:-101} MGT_DEV=${MGT_DEV:-eth0} -# XVA Directory -XVA_DIR=${XVA_DIR:-`pwd`/xvas} - -# Path to xva file -XVA=${XVA:-$XVA_DIR/$GUEST_NAME.xva } +OSDOMU_MEM_MB=1024 # Source params cd ../.. && source ./stackrc && cd $TOP_DIR From c0ae3164308011dd10f31a4b9b44a6f96fd923f5 Mon Sep 17 00:00:00 2001 From: Renuka Apte Date: Thu, 5 Apr 2012 17:18:27 -0700 Subject: [PATCH 480/967] XenServer new install: modify README Modify the README to specify the new steps to install Openstack on XenServer. Specifically, the new install will use a network-installed Ubuntu VM on the XenServer host and install the OpenStack services on it. This eliminates the need for the dev machine (which was required with XenServer 6.0 and above). Change-Id: I5f86aa7929754e04ec4a959053c3fb871e3cda76 --- tools/xen/README.md | 68 +++++++++++++-------------------------------- 1 file changed, 19 insertions(+), 49 deletions(-) diff --git a/tools/xen/README.md b/tools/xen/README.md index 020ec99d..b77bbb51 100644 --- a/tools/xen/README.md +++ b/tools/xen/README.md @@ -1,7 +1,7 @@ Getting Started With XenServer 5.6 and Devstack =============================================== The purpose of the code in this directory it to help developers bootstrap -a XenServer 5.6 + Openstack development environment. This file gives +a XenServer 5.6 (or greater) + Openstack development environment. This file gives some pointers on how to get started. Xenserver is a Type 1 hypervisor, so it needs to be installed on bare metal. @@ -25,36 +25,25 @@ getting started (I use settings like this with a lappy + cheap wifi router): * XenServer Gateway: 192.168.1.1 * XenServer DNS: 192.168.1.1 -Note: ------- -It is advisable (and necessary if you are using Xenserver 6.0, due to space -limitations), to create the above mentioned OS domU, on a separate dev machine. -To do this, you will need to run Steps 2 on the dev machine (if required) as -well as the Xenserver host. Steps 3 and 4 should be run on the dev machine. -This process requires you to be root on the dev machine. - -Step 2: Prepare DOM0 -------------------- -At this point, your host is missing some critical software that you will -need to run devstack (like git). Do this to install required software: - - wget --no-check-certificate https://raw.github.com/openstack-dev/devstack/master/tools/xen/prepare_dom0.sh - chmod 755 prepare_dom0.sh - ./prepare_dom0.sh +Step 2: Download devstack +-------------------------- +On your XenServer host, run the following commands as root: -This step will also clone devstack in $DEVSTACKSRCROOT/devstack. -$DEVSTACKSRCROOT=/root by default. +wget --no-check-certificate https://github.com/openstack-dev/devstack/zipball/master +unzip -o master -d ./devstack +cd devstack/*/ -Step 3: Configure your localrc ------------------------------ +Step 3: Configure your localrc inside the devstack directory +------------------------------------------------------------ Devstack uses a localrc for user-specific configuration. Note that the XENAPI_PASSWORD must be your dom0 root password. Of course, use real passwords if this machine is exposed. - cat > $DEVSTACKSRCROOT/devstack/localrc < ./localrc < Date: Mon, 2 Apr 2012 15:45:27 -0700 Subject: [PATCH 481/967] XenServer: Add script to mount OS domU in dom0 Change-Id: I1ad3d63c55b95f2588007c5e88704022f54e1c06 --- tools/xen/scripts/manage-vdi | 52 ++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100755 tools/xen/scripts/manage-vdi diff --git a/tools/xen/scripts/manage-vdi b/tools/xen/scripts/manage-vdi new file mode 100755 index 00000000..a0a27e8a --- /dev/null +++ b/tools/xen/scripts/manage-vdi @@ -0,0 +1,52 @@ +#!/bin/bash + +set -eux + +action="$1" +vm="$2" +device="${3-0}" +part="${4-}" + +xe_min() +{ + local cmd="$1" + shift + xe "$cmd" --minimal "$@" +} + +vm_uuid=$(xe_min vm-list name-label="$vm") +vdi_uuid=$(xe_min vbd-list params=vdi-uuid vm-uuid="$vm_uuid" \ + userdevice="$device") + +dom0_uuid=$(xe_min vm-list is-control-domain=true) + +open_vdi() +{ + vbd_uuid=$(xe vbd-create vm-uuid="$dom0_uuid" vdi-uuid="$vdi_uuid" \ + device=autodetect) + mp=$(mktemp -d) + xe vbd-plug uuid="$vbd_uuid" + + udevsettle + dev=$(xe_min vbd-list params=device uuid="$vbd_uuid") + mount "/dev/$dev$part" "$mp" + echo "Your vdi is mounted at $mp" +} + +close_vdi() +{ + vbd_uuid=$(xe_min vbd-list vm-uuid="$dom0_uuid" vdi-uuid="$vdi_uuid") + dev=$(xe_min vbd-list params=device uuid="$vbd_uuid") + umount "/dev/$dev$part" + + xe vbd-unplug uuid=$vbd_uuid + xe vbd-destroy uuid=$vbd_uuid +} + +if [ "$action" == "open" ] +then + open_vdi +elif [ "$action" == "close" ] +then + close_vdi +fi From 360e29bc0dc857e82445696a5802db24d9c97f77 Mon Sep 17 00:00:00 2001 From: Renuka Apte Date: Mon, 9 Apr 2012 16:24:53 -0700 Subject: [PATCH 482/967] XenServer new install: Keep preseed file in dom0 Use dom0 to dish out the preseed.cfg file instead of getting it from anso Change-Id: I220948ba9a2cd3006b2c050d976dfcb49b8d956e --- tools/xen/devstackubuntupreseed.cfg | 470 ++++++++++++++++++++++++ tools/xen/install_os_domU.sh | 3 +- tools/xen/scripts/xenoneirictemplate.sh | 3 +- 3 files changed, 474 insertions(+), 2 deletions(-) create mode 100644 tools/xen/devstackubuntupreseed.cfg diff --git a/tools/xen/devstackubuntupreseed.cfg b/tools/xen/devstackubuntupreseed.cfg new file mode 100644 index 00000000..d8caaeed --- /dev/null +++ b/tools/xen/devstackubuntupreseed.cfg @@ -0,0 +1,470 @@ +### Contents of the preconfiguration file (for squeeze) +### Localization +# Preseeding only locale sets language, country and locale. +d-i debian-installer/locale string en_US + +# The values can also be preseeded individually for greater flexibility. +#d-i debian-installer/language string en +#d-i debian-installer/country string NL +#d-i debian-installer/locale string en_GB.UTF-8 +# Optionally specify additional locales to be generated. +#d-i localechooser/supported-locales en_US.UTF-8, nl_NL.UTF-8 + +# Keyboard selection. +# Disable automatic (interactive) keymap detection. +d-i console-setup/ask_detect boolean false +#d-i keyboard-configuration/modelcode string pc105 +d-i keyboard-configuration/layoutcode string us +# To select a variant of the selected layout (if you leave this out, the +# basic form of the layout will be used): +#d-i keyboard-configuration/variantcode string dvorak + +### Network configuration +# Disable network configuration entirely. This is useful for cdrom +# installations on non-networked devices where the network questions, +# warning and long timeouts are a nuisance. +#d-i netcfg/enable boolean false + +# netcfg will choose an interface that has link if possible. This makes it +# skip displaying a list if there is more than one interface. +d-i netcfg/choose_interface select auto + +# To pick a particular interface instead: +#d-i netcfg/choose_interface select eth1 + +# If you have a slow dhcp server and the installer times out waiting for +# it, this might be useful. +#d-i netcfg/dhcp_timeout string 60 + +# If you prefer to configure the network manually, uncomment this line and +# the static network configuration below. +#d-i netcfg/disable_autoconfig boolean true + +# If you want the preconfiguration file to work on systems both with and +# without a dhcp server, uncomment these lines and the static network +# configuration below. +#d-i netcfg/dhcp_failed note +#d-i netcfg/dhcp_options select Configure network manually + +# Static network configuration. +#d-i netcfg/get_nameservers string 192.168.1.1 +#d-i netcfg/get_ipaddress string 192.168.1.42 +#d-i netcfg/get_netmask string 255.255.255.0 +#d-i netcfg/get_gateway string 192.168.1.1 +#d-i netcfg/confirm_static boolean true + +# Any hostname and domain names assigned from dhcp take precedence over +# values set here. However, setting the values still prevents the questions +# from being shown, even if values come from dhcp. +d-i netcfg/get_hostname string stack +d-i netcfg/get_domain string stackpass + +# Disable that annoying WEP key dialog. +d-i netcfg/wireless_wep string +# The wacky dhcp hostname that some ISPs use as a password of sorts. +#d-i netcfg/dhcp_hostname string radish + +# If non-free firmware is needed for the network or other hardware, you can +# configure the installer to always try to load it, without prompting. Or +# change to false to disable asking. +#d-i hw-detect/load_firmware boolean true + +### Network console +# Use the following settings if you wish to make use of the network-console +# component for remote installation over SSH. This only makes sense if you +# intend to perform the remainder of the installation manually. +#d-i anna/choose_modules string network-console +#d-i network-console/password password r00tme +#d-i network-console/password-again password r00tme + +### Mirror settings +# If you select ftp, the mirror/country string does not need to be set. +#d-i mirror/protocol string ftp +d-i mirror/country string manual +d-i mirror/http/hostname string archive.ubuntu.com +d-i mirror/http/directory string /ubuntu +d-i mirror/http/proxy string + +# Alternatively: by default, the installer uses CC.archive.ubuntu.com where +# CC is the ISO-3166-2 code for the selected country. You can preseed this +# so that it does so without asking. +#d-i mirror/http/mirror select CC.archive.ubuntu.com + +# Suite to install. +#d-i mirror/suite string squeeze +# Suite to use for loading installer components (optional). +#d-i mirror/udeb/suite string squeeze +# Components to use for loading installer components (optional). +#d-i mirror/udeb/components multiselect main, restricted + +### Clock and time zone setup +# Controls whether or not the hardware clock is set to UTC. +d-i clock-setup/utc boolean true + +# You may set this to any valid setting for $TZ; see the contents of +# /usr/share/zoneinfo/ for valid values. +d-i time/zone string US/Pacific + +# Controls whether to use NTP to set the clock during the install +d-i clock-setup/ntp boolean true +# NTP server to use. The default is almost always fine here. +d-i clock-setup/ntp-server string 0.us.pool.ntp.org + +### Partitioning +## Partitioning example +# If the system has free space you can choose to only partition that space. +# This is only honoured if partman-auto/method (below) is not set. +# Alternatives: custom, some_device, some_device_crypto, some_device_lvm. +#d-i partman-auto/init_automatically_partition select biggest_free + +# Alternatively, you may specify a disk to partition. If the system has only +# one disk the installer will default to using that, but otherwise the device +# name must be given in traditional, non-devfs format (so e.g. /dev/hda or +# /dev/sda, and not e.g. /dev/discs/disc0/disc). +# For example, to use the first SCSI/SATA hard disk: +#d-i partman-auto/disk string /dev/sda +# In addition, you'll need to specify the method to use. +# The presently available methods are: +# - regular: use the usual partition types for your architecture +# - lvm: use LVM to partition the disk +# - crypto: use LVM within an encrypted partition +d-i partman-auto/method string regular + +# If one of the disks that are going to be automatically partitioned +# contains an old LVM configuration, the user will normally receive a +# warning. This can be preseeded away... +d-i partman-lvm/device_remove_lvm boolean true +# The same applies to pre-existing software RAID array: +d-i partman-md/device_remove_md boolean true +# And the same goes for the confirmation to write the lvm partitions. +d-i partman-lvm/confirm boolean true + +# For LVM partitioning, you can select how much of the volume group to use +# for logical volumes. +#d-i partman-auto-lvm/guided_size string max +#d-i partman-auto-lvm/guided_size string 10GB +#d-i partman-auto-lvm/guided_size string 50% + +# You can choose one of the three predefined partitioning recipes: +# - atomic: all files in one partition +# - home: separate /home partition +# - multi: separate /home, /usr, /var, and /tmp partitions +d-i partman-auto/choose_recipe select atomic + +# Or provide a recipe of your own... +# If you have a way to get a recipe file into the d-i environment, you can +# just point at it. +#d-i partman-auto/expert_recipe_file string /hd-media/recipe + +# If not, you can put an entire recipe into the preconfiguration file in one +# (logical) line. This example creates a small /boot partition, suitable +# swap, and uses the rest of the space for the root partition: +#d-i partman-auto/expert_recipe string \ +# boot-root :: \ +# 40 50 100 ext3 \ +# $primary{ } $bootable{ } \ +# method{ format } format{ } \ +# use_filesystem{ } filesystem{ ext3 } \ +# mountpoint{ /boot } \ +# . \ +# 500 10000 1000000000 ext3 \ +# method{ format } format{ } \ +# use_filesystem{ } filesystem{ ext3 } \ +# mountpoint{ / } \ +# . \ +# 64 512 300% linux-swap \ +# method{ swap } format{ } \ +# . + +# If you just want to change the default filesystem from ext3 to something +# else, you can do that without providing a full recipe. +d-i partman/default_filesystem string ext3 + +# The full recipe format is documented in the file partman-auto-recipe.txt +# included in the 'debian-installer' package or available from D-I source +# repository. This also documents how to specify settings such as file +# system labels, volume group names and which physical devices to include +# in a volume group. + +# This makes partman automatically partition without confirmation, provided +# that you told it what to do using one of the methods above. +d-i partman-partitioning/confirm_write_new_label boolean true +d-i partman/choose_partition select finish +d-i partman/confirm boolean true +d-i partman/confirm_nooverwrite boolean true + +## Partitioning using RAID +# The method should be set to "raid". +#d-i partman-auto/method string raid +# Specify the disks to be partitioned. They will all get the same layout, +# so this will only work if the disks are the same size. +#d-i partman-auto/disk string /dev/sda /dev/sdb + +# Next you need to specify the physical partitions that will be used. +#d-i partman-auto/expert_recipe string \ +# multiraid :: \ +# 1000 5000 4000 raid \ +# $primary{ } method{ raid } \ +# . \ +# 64 512 300% raid \ +# method{ raid } \ +# . \ +# 500 10000 1000000000 raid \ +# method{ raid } \ +# . + +# Last you need to specify how the previously defined partitions will be +# used in the RAID setup. Remember to use the correct partition numbers +# for logical partitions. RAID levels 0, 1, 5, 6 and 10 are supported; +# devices are separated using "#". +# Parameters are: +# \ +# + +#d-i partman-auto-raid/recipe string \ +# 1 2 0 ext3 / \ +# /dev/sda1#/dev/sdb1 \ +# . \ +# 1 2 0 swap - \ +# /dev/sda5#/dev/sdb5 \ +# . \ +# 0 2 0 ext3 /home \ +# /dev/sda6#/dev/sdb6 \ +# . + +# For additional information see the file partman-auto-raid-recipe.txt +# included in the 'debian-installer' package or available from D-I source +# repository. + +# This makes partman automatically partition without confirmation. +d-i partman-md/confirm boolean true +d-i partman-partitioning/confirm_write_new_label boolean true +d-i partman/choose_partition select finish +d-i partman/confirm boolean true +d-i partman/confirm_nooverwrite boolean true + +## Controlling how partitions are mounted +# The default is to mount by UUID, but you can also choose "traditional" to +# use traditional device names, or "label" to try filesystem labels before +# falling back to UUIDs. +#d-i partman/mount_style select uuid + +### Base system installation +# Configure APT to not install recommended packages by default. Use of this +# option can result in an incomplete system and should only be used by very +# experienced users. +#d-i base-installer/install-recommends boolean false + +# The kernel image (meta) package to be installed; "none" can be used if no +# kernel is to be installed. +#d-i base-installer/kernel/image string linux-generic + +### Account setup +# Skip creation of a root account (normal user account will be able to +# use sudo). The default is false; preseed this to true if you want to set +# a root password. +d-i passwd/root-login boolean true +# Alternatively, to skip creation of a normal user account. +d-i passwd/make-user boolean false + +# Root password, either in clear text +d-i passwd/root-password password stackpass +d-i passwd/root-password-again password stackpass +# or encrypted using an MD5 hash. +#d-i passwd/root-password-crypted password [MD5 hash] + +# To create a normal user account. +#d-i passwd/user-fullname string Ubuntu User +#d-i passwd/username string ubuntu +# Normal user's password, either in clear text +#d-i passwd/user-password password insecure +#d-i passwd/user-password-again password insecure +# or encrypted using an MD5 hash. +#d-i passwd/user-password-crypted password [MD5 hash] +# Create the first user with the specified UID instead of the default. +#d-i passwd/user-uid string 1010 +# The installer will warn about weak passwords. If you are sure you know +# what you're doing and want to override it, uncomment this. +d-i user-setup/allow-password-weak boolean true + +# The user account will be added to some standard initial groups. To +# override that, use this. +#d-i passwd/user-default-groups string audio cdrom video + +# Set to true if you want to encrypt the first user's home directory. +d-i user-setup/encrypt-home boolean false + +### Apt setup +# You can choose to install restricted and universe software, or to install +# software from the backports repository. +#d-i apt-setup/restricted boolean true +#d-i apt-setup/universe boolean true +#d-i apt-setup/backports boolean true +# Uncomment this if you don't want to use a network mirror. +#d-i apt-setup/use_mirror boolean false +# Select which update services to use; define the mirrors to be used. +# Values shown below are the normal defaults. +#d-i apt-setup/services-select multiselect security +#d-i apt-setup/security_host string security.ubuntu.com +#d-i apt-setup/security_path string /ubuntu + +# Additional repositories, local[0-9] available +#d-i apt-setup/local0/repository string \ +# http://local.server/ubuntu squeeze main +#d-i apt-setup/local0/comment string local server +# Enable deb-src lines +#d-i apt-setup/local0/source boolean true +# URL to the public key of the local repository; you must provide a key or +# apt will complain about the unauthenticated repository and so the +# sources.list line will be left commented out +#d-i apt-setup/local0/key string http://local.server/key + +# By default the installer requires that repositories be authenticated +# using a known gpg key. This setting can be used to disable that +# authentication. Warning: Insecure, not recommended. +#d-i debian-installer/allow_unauthenticated boolean true + +### Package selection +#tasksel tasksel/first multiselect ubuntu-desktop +#tasksel tasksel/first multiselect lamp-server, print-server +#tasksel tasksel/first multiselect kubuntu-desktop +tasksel tasksel/first multiselect openssh-server + +# Individual additional packages to install +#d-i pkgsel/include string openssh-server build-essential +# Whether to upgrade packages after debootstrap. +# Allowed values: none, safe-upgrade, full-upgrade +#d-i pkgsel/upgrade select none + +# Language pack selection +#d-i pkgsel/language-packs multiselect de, en, zh + +# Policy for applying updates. May be "none" (no automatic updates), +# "unattended-upgrades" (install security updates automatically), or +# "landscape" (manage system with Landscape). +d-i pkgsel/update-policy select unattended-upgrades + +# Some versions of the installer can report back on what software you have +# installed, and what software you use. The default is not to report back, +# but sending reports helps the project determine what software is most +# popular and include it on CDs. +#popularity-contest popularity-contest/participate boolean false + +# By default, the system's locate database will be updated after the +# installer has finished installing most packages. This may take a while, so +# if you don't want it, you can set this to "false" to turn it off. +d-i pkgsel/updatedb boolean false + +### Boot loader installation +# Grub is the default boot loader (for x86). If you want lilo installed +# instead, uncomment this: +#d-i grub-installer/skip boolean true +# To also skip installing lilo, and install no bootloader, uncomment this +# too: +#d-i lilo-installer/skip boolean true + +# With a few exceptions for unusual partitioning setups, GRUB 2 is now the +# default. If you need GRUB Legacy for some particular reason, then +# uncomment this: +#d-i grub-installer/grub2_instead_of_grub_legacy boolean false + +# This is fairly safe to set, it makes grub install automatically to the MBR +# if no other operating system is detected on the machine. +d-i grub-installer/only_debian boolean true + +# This one makes grub-installer install to the MBR if it also finds some other +# OS, which is less safe as it might not be able to boot that other OS. +d-i grub-installer/with_other_os boolean true + +# Alternatively, if you want to install to a location other than the mbr, +# uncomment and edit these lines: +#d-i grub-installer/only_debian boolean false +#d-i grub-installer/with_other_os boolean false +#d-i grub-installer/bootdev string (hd0,0) +# To install grub to multiple disks: +#d-i grub-installer/bootdev string (hd0,0) (hd1,0) (hd2,0) + +# Optional password for grub, either in clear text +#d-i grub-installer/password password r00tme +#d-i grub-installer/password-again password r00tme +# or encrypted using an MD5 hash, see grub-md5-crypt(8). +#d-i grub-installer/password-crypted password [MD5 hash] + +# Use the following option to add additional boot parameters for the +# installed system (if supported by the bootloader installer). +# Note: options passed to the installer will be added automatically. +#d-i debian-installer/add-kernel-opts string nousb + +### Finishing up the installation +# During installations from serial console, the regular virtual consoles +# (VT1-VT6) are normally disabled in /etc/inittab. Uncomment the next +# line to prevent this. +d-i finish-install/keep-consoles boolean true + +# Avoid that last message about the install being complete. +d-i finish-install/reboot_in_progress note + +# This will prevent the installer from ejecting the CD during the reboot, +# which is useful in some situations. +#d-i cdrom-detect/eject boolean false + +# This is how to make the installer shutdown when finished, but not +# reboot into the installed system. +#d-i debian-installer/exit/halt boolean true +# This will power off the machine instead of just halting it. +#d-i debian-installer/exit/poweroff boolean true + +### X configuration +# X can detect the right driver for some cards, but if you're preseeding, +# you override whatever it chooses. Still, vesa will work most places. +#xserver-xorg xserver-xorg/config/device/driver select vesa + +# A caveat with mouse autodetection is that if it fails, X will retry it +# over and over. So if it's preseeded to be done, there is a possibility of +# an infinite loop if the mouse is not autodetected. +#xserver-xorg xserver-xorg/autodetect_mouse boolean true + +# Monitor autodetection is recommended. +xserver-xorg xserver-xorg/autodetect_monitor boolean true +# Uncomment if you have an LCD display. +#xserver-xorg xserver-xorg/config/monitor/lcd boolean true +# X has three configuration paths for the monitor. Here's how to preseed +# the "medium" path, which is always available. The "simple" path may not +# be available, and the "advanced" path asks too many questions. +xserver-xorg xserver-xorg/config/monitor/selection-method \ + select medium +xserver-xorg xserver-xorg/config/monitor/mode-list \ + select 1024x768 @ 60 Hz + +### Preseeding other packages +# Depending on what software you choose to install, or if things go wrong +# during the installation process, it's possible that other questions may +# be asked. You can preseed those too, of course. To get a list of every +# possible question that could be asked during an install, do an +# installation, and then run these commands: +# debconf-get-selections --installer > file +# debconf-get-selections >> file + + +#### Advanced options +### Running custom commands during the installation +# d-i preseeding is inherently not secure. Nothing in the installer checks +# for attempts at buffer overflows or other exploits of the values of a +# preconfiguration file like this one. Only use preconfiguration files from +# trusted locations! To drive that home, and because it's generally useful, +# here's a way to run any shell command you'd like inside the installer, +# automatically. + +# This first command is run as early as possible, just after +# preseeding is read. +#d-i preseed/early_command string anna-install some-udeb +# This command is run immediately before the partitioner starts. It may be +# useful to apply dynamic partitioner preseeding that depends on the state +# of the disks (which may not be visible when preseed/early_command runs). +#d-i partman/early_command \ +# string debconf-set partman-auto/disk "$(list-devices disk | head -n1)" +# This command is run just before the install finishes, but when there is +# still a usable /target directory. You can chroot to /target and use it +# directly, or use the apt-install and in-target commands to easily install +# packages and run commands in the target system. +#d-i preseed/late_command string apt-install zsh; in-target chsh -s /bin/zsh diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 31bcc40c..54c93c71 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -179,7 +179,8 @@ else template=$(xe_min template-list name-label="Ubuntu 11.10 (64-bit)") if [ -z "$template" ] then - $TOP_DIR/scripts/xenoneirictemplate.sh + cp $TOP_DIR/devstackubuntupreseed.cfg /opt/xensource/www/ + $TOP_DIR/scripts/xenoneirictemplate.sh "${HOST_IP}/devstackubuntupreseed.cfg" fi $TOP_DIR/scripts/install-os-vpx.sh -t "Ubuntu 11.10 (64-bit)" -v $VM_BR -m $MGT_BR -p $PUB_BR -l $GUEST_NAME -r $OSDOMU_MEM_MB -k "flat_network_bridge=${VM_BR}" diff --git a/tools/xen/scripts/xenoneirictemplate.sh b/tools/xen/scripts/xenoneirictemplate.sh index baf4866a..003acd57 100755 --- a/tools/xen/scripts/xenoneirictemplate.sh +++ b/tools/xen/scripts/xenoneirictemplate.sh @@ -15,6 +15,7 @@ fi distro="Ubuntu 11.10" arches=("32-bit" "64-bit") +preseedurl=${1:-"http://images.ansolabs.com/devstackubuntupreseed.cfg"} for arch in ${arches[@]} ; do echo "Attempting $distro ($arch)" @@ -25,7 +26,7 @@ for arch in ${arches[@]} ; do NEWUUID=$(xe vm-clone uuid=$LENNY new-name-label="$distro ($arch)") xe template-param-set uuid=$NEWUUID other-config:install-methods=http,ftp \ other-config:install-repository=http://archive.ubuntu.net/ubuntu \ - PV-args="-- quiet console=hvc0 partman/default_filesystem=ext3 locale=en_US console-setup/ask_detect=false keyboard-configuration/layoutcode=us netcfg/choose_interface=eth3 netcfg/get_hostname=unassigned-hostname netcfg/get_domain=unassigned-domain auto url=http://images.ansolabs.com/devstackubuntupreseed.cfg" \ + PV-args="-- quiet console=hvc0 partman/default_filesystem=ext3 locale=en_US console-setup/ask_detect=false keyboard-configuration/layoutcode=us netcfg/choose_interface=eth3 netcfg/get_hostname=os netcfg/get_domain=os auto url=${preseedurl}" \ other-config:debian-release=oneiric \ other-config:default_template=true From 668d9cb9fd6d476c4bdeb08ff3830073aa9223d9 Mon Sep 17 00:00:00 2001 From: Renuka Apte Date: Wed, 11 Apr 2012 11:42:54 -0700 Subject: [PATCH 483/967] XenServer: Allow static network config for install Change-Id: Ief24e21fcd8d4cd61296e4b81051d3332314d45c --- tools/xen/README.md | 5 +++++ tools/xen/scripts/xenoneirictemplate.sh | 17 +++++++++++++++-- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/tools/xen/README.md b/tools/xen/README.md index b77bbb51..d102b01d 100644 --- a/tools/xen/README.md +++ b/tools/xen/README.md @@ -59,6 +59,11 @@ Of course, use real passwords if this machine is exposed. ACTIVE_TIMEOUT=45 # Interface on which you would like to access services HOST_IP_IFACE=ethX + # First time Ubuntu network install params + NETINSTALLIP="dhcp" + NAMESERVERS="" + NETMASK="" + GATEWAY="" EOF Step 4: Run ./install_os_domU.sh from the tools/xen directory diff --git a/tools/xen/scripts/xenoneirictemplate.sh b/tools/xen/scripts/xenoneirictemplate.sh index 003acd57..1d9ec5e2 100755 --- a/tools/xen/scripts/xenoneirictemplate.sh +++ b/tools/xen/scripts/xenoneirictemplate.sh @@ -5,6 +5,9 @@ ## Author: Renuka Apte ## This is not an officially supported guest OS on XenServer 6.02 +BASE_DIR=$(cd $(dirname "$0") && pwd) +source $BASE_DIR/../../../localrc + LENNY=$(xe template-list name-label=Debian\ Lenny\ 5.0\ \(32-bit\) --minimal) if [[ -z $LENNY ]] ; then @@ -22,11 +25,21 @@ for arch in ${arches[@]} ; do if [[ -n $(xe template-list name-label="$distro ($arch)" params=uuid --minimal) ]] ; then echo "$distro ($arch)" already exists, Skipping else - + if [ -z $NETINSTALLIP ] + then + echo "NETINSTALLIP not set in localrc" + exit 1 + fi + pvargs="-- quiet console=hvc0 partman/default_filesystem=ext3 locale=en_US console-setup/ask_detect=false keyboard-configuration/layoutcode=us netcfg/choose_interface=eth3 netcfg/get_hostname=os netcfg/get_domain=os auto url=${preseedurl}" + if [ "$NETINSTALLIP" != "dhcp" ] + then + netcfgargs="netcfg/disable_autoconfig=true netcfg/get_nameservers=${NAMESERVERS} netcfg/get_ipaddress=${NETINSTALLIP} netcfg/get_netmask=${NETMASK} netcfg/get_gateway=${GATEWAY} netcfg/confirm_static=true" + pvargs="${pvargs} ${netcfgargs}" + fi NEWUUID=$(xe vm-clone uuid=$LENNY new-name-label="$distro ($arch)") xe template-param-set uuid=$NEWUUID other-config:install-methods=http,ftp \ other-config:install-repository=http://archive.ubuntu.net/ubuntu \ - PV-args="-- quiet console=hvc0 partman/default_filesystem=ext3 locale=en_US console-setup/ask_detect=false keyboard-configuration/layoutcode=us netcfg/choose_interface=eth3 netcfg/get_hostname=os netcfg/get_domain=os auto url=${preseedurl}" \ + PV-args="$pvargs" \ other-config:debian-release=oneiric \ other-config:default_template=true From e8309627edca10cb60ad3b284176469926c8c8d5 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 18 Apr 2012 01:45:16 -0500 Subject: [PATCH 484/967] Add Apache 2 LICENSE file Fixes bug 979172 Change-Id: I4ad8e78adf116a1e7120222456f6131542b11759 --- LICENSE | 176 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 176 insertions(+) create mode 100644 LICENSE diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..68c771a0 --- /dev/null +++ b/LICENSE @@ -0,0 +1,176 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + From 2be6155c6e4ad7bc0df61c47a1cfbb093a48b43d Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 19 Apr 2012 11:16:47 -0500 Subject: [PATCH 485/967] Add python dev dependency pysendfile 2.0 needs this to build Change-Id: I40a2c08bfc6d533bdcb052d0c36353e95e059e03 --- files/apts/glance | 1 + 1 file changed, 1 insertion(+) diff --git a/files/apts/glance b/files/apts/glance index 42d9fb82..9612267c 100644 --- a/files/apts/glance +++ b/files/apts/glance @@ -1,4 +1,5 @@ gcc +python-dev python-eventlet python-routes python-greenlet From 7a5f7f2d9111727eab19c884550f9ee90d84fa6b Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Fri, 20 Apr 2012 22:58:00 +0100 Subject: [PATCH 486/967] bug 986401: xe vm-shutdown expects running instances in os_install_domU.sh do not fail if the instance is already halted. Change-Id: Id080535c1eb008c9fc7335c9004318bbfb41e1f7 --- tools/xen/install_os_domU.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 54c93c71..3c25d4a5 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -150,7 +150,7 @@ if [ "$DO_SHUTDOWN" = "1" ]; then for uuid in `xe vm-list | grep -1 instance | grep uuid | sed "s/.*\: //g"`; do echo "Shutting down nova instance $uuid" xe vm-unpause uuid=$uuid || true - xe vm-shutdown uuid=$uuid + xe vm-shutdown uuid=$uuid || true xe vm-destroy uuid=$uuid done From 678a188e2d6ddd9230a80a63901202d573281d31 Mon Sep 17 00:00:00 2001 From: Jay Pipes Date: Mon, 23 Apr 2012 10:56:15 -0400 Subject: [PATCH 487/967] Tempest - Remove Kong support and fix images * Removes (non-working) Kong config.ini support * Replaces copy/paste code from stack.sh that was not properly grabbing image UUIDs with a call to glance index * Grabs any non-kernel non-ramdisk images and properly populates the IMAGE_UUID_ALT variable if more than 1 image is available Change-Id: Ieaf892b8b3fb4ef4fe2e6168f7a53bbe42dd684c --- tools/configure_tempest.sh | 224 ++++++++----------------------------- 1 file changed, 48 insertions(+), 176 deletions(-) diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index 6ba301f7..0eacc4a7 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -2,10 +2,22 @@ # # configure_tempest.sh - Build a tempest configuration file from devstack +echo "**************************************************" +echo "Configuring Tempest" +echo "**************************************************" + +# This script exits on an error so that errors don't compound and you see +# only the first error that occured. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + function usage { echo "$0 - Build tempest.conf" echo "" - echo "Usage: $0 [configdir]" + echo "Usage: $0" exit 1 } @@ -13,21 +25,6 @@ if [ "$1" = "-h" ]; then usage fi -# Clean up any resources that may be in use -cleanup() { - set +o errexit - - # Mop up temporary files - if [ -n "$CONFIG_INI_TMP" -a -e "$CONFIG_INI_TMP" ]; then - rm -f $CONFIG_INI_TMP - fi - - # Kill ourselves to signal any calling process - trap 2; kill -2 $$ -} - -trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT - # Keep track of the current directory TOOLS_DIR=$(cd $(dirname "$0") && pwd) TOP_DIR=$(cd $TOOLS_DIR/..; pwd) @@ -47,39 +44,16 @@ if [ ! -e $TOP_DIR/openrc ]; then exit 1 fi -# Source params. openrc sources stackrc which sources localrc +# Source params source $TOP_DIR/openrc -# Set defaults not configured by stackrc -TENANT=${TENANT:-admin} -USERNAME=${USERNAME:-admin} -IDENTITY_HOST=${IDENTITY_HOST:-$HOST_IP} -IDENTITY_PORT=${IDENTITY_PORT:-5000} -IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-2.0} - # Where Openstack code lives DEST=${DEST:-/opt/stack} TEMPEST_DIR=$DEST/tempest - -CONFIG_DIR=${1:-$TEMPEST_DIR/etc} -CONFIG_INI=$CONFIG_DIR/config.ini +CONFIG_DIR=$TEMPEST_DIR/etc TEMPEST_CONF=$CONFIG_DIR/tempest.conf -if [ ! -f $DEST/.ramdisk ]; then - # Process network configuration vars - GUEST_NETWORK=${GUEST_NETWORK:-1} - GUEST_RECREATE_NET=${GUEST_RECREATE_NET:-yes} - - GUEST_IP=${GUEST_IP:-192.168.$GUEST_NETWORK.50} - GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24} - GUEST_NETMASK=${GUEST_NETMASK:-255.255.255.0} - GUEST_GATEWAY=${GUEST_GATEWAY:-192.168.$GUEST_NETWORK.1} - GUEST_MAC=${GUEST_MAC:-"02:16:3e:07:69:`printf '%02X' $GUEST_NETWORK`"} - GUEST_RAM=${GUEST_RAM:-1524288} - GUEST_CORES=${GUEST_CORES:-1} -fi - # Use the GUEST_IP unless an explicit IP is set by ``HOST_IP`` HOST_IP=${HOST_IP:-$GUEST_IP} # Use the first IP if HOST_IP still is not set @@ -87,58 +61,42 @@ if [ ! -n "$HOST_IP" ]; then HOST_IP=`LC_ALL=C /sbin/ifconfig | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'` fi -RABBIT_HOST=${RABBIT_HOST:-localhost} - -# Glance connection info. Note the port must be specified. -GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$HOST_IP:9292} -set `echo $GLANCE_HOSTPORT | tr ':' ' '` -GLANCE_HOST=$1 -GLANCE_PORT=$2 - -# Set up downloaded images -# Defaults to use first image - -IMAGE_DIR="" -IMAGE_NAME="" -for imagedir in $TOP_DIR/files/images/*; do - KERNEL="" - RAMDISK="" - IMAGE="" - IMAGE_RAMDISK="" - KERNEL=$(for f in "$imagedir/"*-vmlinuz*; do - [ -f "$f" ] && echo "$f" && break; done; true) - [ -n "$KERNEL" ] && ln -sf $KERNEL $imagedir/kernel - RAMDISK=$(for f in "$imagedir/"*-initrd*; do - [ -f "$f" ] && echo "$f" && break; done; true) - [ -n "$RAMDISK" ] && ln -sf $RAMDISK $imagedir/ramdisk && \ - IMAGE_RAMDISK="ari_location = $imagedir/ramdisk" - IMAGE=$(for f in "$imagedir/"*.img; do - [ -f "$f" ] && echo "$f" && break; done; true) - if [ -n "$IMAGE" ]; then - ln -sf $IMAGE $imagedir/disk - # Save the first image directory that contains a disk image link - if [ -z "$IMAGE_DIR" ]; then - IMAGE_DIR=$imagedir - IMAGE_NAME=$(basename ${IMAGE%.img}) - fi - fi +# Glance should already contain images to be used in tempest +# testing. Here we simply look for images stored in Glance +# and set the appropriate variables for use in the tempest config +# We ignore ramdisk and kernel images and set the IMAGE_UUID to +# the first image returned and set IMAGE_UUID_ALT to the second, +# if there is more than one returned... +IMAGE_LINES=`glance index` +IFS="$(echo -e "\n\r")" +IMAGES="" +for line in $IMAGE_LINES; do + IMAGES="$IMAGES `echo $line | grep -v "^\(ID\|--\)" | grep -v "\(aki\|ari\)" | cut -d' ' -f1`" done -if [[ -n "$IMAGE_NAME" ]]; then - # Get the image UUID - IMAGE_UUID=$(nova image-list | grep " $IMAGE_NAME " | cut -d'|' -f2) - # Strip spaces off - IMAGE_UUID=$(echo $IMAGE_UUID) +# Create array of image UUIDs... +IFS=" " +IMAGES=($IMAGES) +NUM_IMAGES=${#IMAGES[*]} +echo "Found $NUM_IMAGES images" +if [[ $NUM_IMAGES -eq 0 ]]; then + echo "Found no valid images to use!" + exit 1 +fi +IMAGE_UUID=${IMAGES[0]} +IMAGE_UUID_ALT=$IMAGE_UUID +if [[ $NUM_IMAGES -gt 1 ]]; then + IMAGE_UUID_ALT=${IMAGES[1]} fi # Create tempest.conf from tempest.conf.tpl - if [[ ! -r $TEMPEST_CONF ]]; then cp $TEMPEST_CONF.tpl $TEMPEST_CONF fi IDENTITY_USE_SSL=${IDENTITY_USE_SSL:-False} -TEMPEST_IDENTITY_HOST=${IDENTITY_HOST:-127.0.0.1} -TEMPEST_IDENTITY_API_VERSION="v2.0" # Note: need v for now... +IDENTITY_HOST=${IDENTITY_HOST:-127.0.0.1} +IDENTITY_PORT=${IDENTITY_PORT:-5000} +IDENTITY_API_VERSION="v2.0" # Note: need v for now... # TODO(jaypipes): This is dumb and needs to be removed # from the Tempest configuration file entirely... IDENTITY_PATH=${IDENTITY_PATH:-tokens} @@ -157,10 +115,6 @@ ALT_USERNAME=$OS_USERNAME ALT_PASSWORD=$OS_PASSWORD ALT_TENANT_NAME=$OS_TENANT_NAME -# TODO(jaypipes): Support multiple images instead of plopping -# the IMAGE_UUID into both the image_ref and image_ref_alt slots -IMAGE_UUID_ALT=$IMAGE_UUID - # TODO(jaypipes): Support configurable flavor refs here... FLAVOR_REF=1 FLAVOR_REF_ALT=2 @@ -179,9 +133,9 @@ BUILD_TIMEOUT=600 sed -e " s,%IDENTITY_USE_SSL%,$IDENTITY_USE_SSL,g; - s,%IDENTITY_HOST%,$TEMPEST_IDENTITY_HOST,g; + s,%IDENTITY_HOST%,$IDENTITY_HOST,g; s,%IDENTITY_PORT%,$IDENTITY_PORT,g; - s,%IDENTITY_API_VERSION%,$TEMPEST_IDENTITY_API_VERSION,g; + s,%IDENTITY_API_VERSION%,$IDENTITY_API_VERSION,g; s,%IDENTITY_PATH%,$IDENTITY_PATH,g; s,%IDENTITY_STRATEGY%,$IDENTITY_STRATEGY,g; s,%USERNAME%,$OS_USERNAME,g; @@ -207,90 +161,8 @@ sed -e " echo "Created tempest configuration file:" cat $TEMPEST_CONF -echo "\n\n" - -# Create config.ini - -CONFIG_INI_TMP=$(mktemp $CONFIG_INI.XXXXXX) -if [ "$UPLOAD_LEGACY_TTY" ]; then - cat >$CONFIG_INI_TMP <$CONFIG_INI_TMP <>$CONFIG_INI_TMP < Date: Tue, 24 Apr 2012 16:05:08 -0400 Subject: [PATCH 488/967] Fixes typo on ADMIN_XXX variables Change-Id: Idae7b0db7d2cbc28873d613dd31cf2fff20b7855 --- tools/configure_tempest.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index 0eacc4a7..52e501ef 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -119,9 +119,9 @@ ALT_TENANT_NAME=$OS_TENANT_NAME FLAVOR_REF=1 FLAVOR_REF_ALT=2 -ADMIN_USERNAME={$ADMIN_USERNAME:-admin} -ADMIN_PASSWORD={$ADMIN_PASSWORD:-secrete} -ADMIN_TENANT_NAME={$ADMIN_TENANT:-admin} +ADMIN_USERNAME=${ADMIN_USERNAME:-admin} +ADMIN_PASSWORD=${ADMIN_PASSWORD:-secrete} +ADMIN_TENANT_NAME=${ADMIN_TENANT:-admin} # Do any of the following need to be configurable? COMPUTE_CATALOG_TYPE=compute From 4fcab6d0cf381a147e40121e0d0bc940d6882476 Mon Sep 17 00:00:00 2001 From: "James E. Blair" Date: Tue, 24 Apr 2012 14:08:49 -0700 Subject: [PATCH 489/967] Add nosexunit as a pip requires. Install the nose xunit plugin so that tempest can produce nice reports in Jenkins. Change-Id: Ia8c4c9db8b0733d09a0bef59a08f438c6a92f1ca --- files/pips/tempest | 1 + 1 file changed, 1 insertion(+) diff --git a/files/pips/tempest b/files/pips/tempest index df7f4230..6eeb5b9c 100644 --- a/files/pips/tempest +++ b/files/pips/tempest @@ -1 +1,2 @@ pika +nosexunit # For use by jenkins in producing reports From 45495258d64589795b16f2b1927f5ca3490f9e16 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 13 Apr 2012 13:16:38 -0500 Subject: [PATCH 490/967] Add glance client * python-glanceclient overrides the old client shipped with glance in /usr/local/bin * start adding exercises Change-Id: I460ed5749bca69425f23d328c0537c2ef91f84a4 --- exercises/boot_from_volume.sh | 2 +- exercises/client-args.sh | 4 +++- exercises/client-env.sh | 2 +- exercises/floating_ips.sh | 4 ++-- exercises/volumes.sh | 4 ++-- files/default_catalog.templates | 6 +++--- stack.sh | 15 +++++++++------ stackrc | 8 ++++---- 8 files changed, 25 insertions(+), 20 deletions(-) diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index c707b470..6a0937ab 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -51,7 +51,7 @@ DEFAULT_FLOATING_POOL=${DEFAULT_FLOATING_POOL:-nova} # ================= # Grab the id of the image to launch -IMAGE=`glance -f index | egrep $DEFAULT_IMAGE_NAME | head -1 | cut -d" " -f1` +IMAGE=`glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1` die_if_not_set IMAGE "Failure getting image" # Instance and volume names diff --git a/exercises/client-args.sh b/exercises/client-args.sh index 66fddcf1..1d7d5b6c 100755 --- a/exercises/client-args.sh +++ b/exercises/client-args.sh @@ -46,7 +46,9 @@ unset OS_AUTH_URL # Common authentication args TENANT_ARG="--os_tenant_name=$x_TENANT_NAME" +TENANT_ARG_DASH="--os-tenant-name=$x_TENANT_NAME" ARGS="--os_username=$x_USERNAME --os_password=$x_PASSWORD --os_auth_url=$x_AUTH_URL" +ARGS_DASH="--os-username=$x_USERNAME --os-password=$x_PASSWORD --os-auth-url=$x_AUTH_URL" # Set global return RETURN=0 @@ -94,7 +96,7 @@ if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then STATUS_GLANCE="Skipped" else echo -e "\nTest Glance" - if glance $TENANT_ARG $ARGS index; then + if glance $TENANT_ARG_DASH $ARGS_DASH image-list; then STATUS_GLANCE="Succeeded" else STATUS_GLANCE="Failed" diff --git a/exercises/client-env.sh b/exercises/client-env.sh index af2c4c24..10871a6a 100755 --- a/exercises/client-env.sh +++ b/exercises/client-env.sh @@ -107,7 +107,7 @@ if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then STATUS_GLANCE="Skipped" else echo -e "\nTest Glance" - if glance index; then + if glance image-list; then STATUS_GLANCE="Succeeded" else STATUS_GLANCE="Failed" diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 9974b4b9..dd40aa0b 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -63,10 +63,10 @@ nova list nova image-list # But we recommend using glance directly -glance -f index +glance image-list # Grab the id of the image to launch -IMAGE=`glance -f index | egrep $DEFAULT_IMAGE_NAME | head -1 | cut -d" " -f1` +IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) # Security Groups # --------------- diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 1abbecc0..b62427fc 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -53,10 +53,10 @@ nova list nova image-list # But we recommend using glance directly -glance -f index +glance image-list # Grab the id of the image to launch -IMAGE=`glance -f index | egrep $DEFAULT_IMAGE_NAME | head -1 | cut -d" " -f1` +IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) # determinine instance type # ------------------------- diff --git a/files/default_catalog.templates b/files/default_catalog.templates index 31618abb..66052b6a 100644 --- a/files/default_catalog.templates +++ b/files/default_catalog.templates @@ -30,7 +30,7 @@ catalog.RegionOne.s3.internalURL = http://%SERVICE_HOST%:%S3_SERVICE_PORT% catalog.RegionOne.s3.name = S3 Service -catalog.RegionOne.image.publicURL = http://%SERVICE_HOST%:9292/v1 -catalog.RegionOne.image.adminURL = http://%SERVICE_HOST%:9292/v1 -catalog.RegionOne.image.internalURL = http://%SERVICE_HOST%:9292/v1 +catalog.RegionOne.image.publicURL = http://%SERVICE_HOST%:9292 +catalog.RegionOne.image.adminURL = http://%SERVICE_HOST%:9292 +catalog.RegionOne.image.internalURL = http://%SERVICE_HOST%:9292 catalog.RegionOne.image.name = Image Service diff --git a/stack.sh b/stack.sh index 8fa3902b..1b69475b 100755 --- a/stack.sh +++ b/stack.sh @@ -201,6 +201,7 @@ OFFLINE=`trueorfalse False $OFFLINE` NOVA_DIR=$DEST/nova HORIZON_DIR=$DEST/horizon GLANCE_DIR=$DEST/glance +GLANCECLIENT_DIR=$DEST/python-glanceclient KEYSTONE_DIR=$DEST/keystone NOVACLIENT_DIR=$DEST/python-novaclient KEYSTONECLIENT_DIR=$DEST/python-keystoneclient @@ -643,6 +644,7 @@ git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH # python client library to nova that horizon (and others) use git_clone $KEYSTONECLIENT_REPO $KEYSTONECLIENT_DIR $KEYSTONECLIENT_BRANCH git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH +git_clone $GLANCECLIENT_REPO $GLANCECLIENT_DIR $GLANCECLIENT_BRANCH # glance, swift middleware and nova api needs keystone middleware if is_service_enabled key g-api n-api swift; then @@ -715,6 +717,9 @@ if is_service_enabled melange; then cd $MELANGECLIENT_DIR; sudo python setup.py develop fi +# Do this _after_ glance is installed to override the old binary +cd $GLANCECLIENT_DIR; sudo python setup.py develop + # Syslog # ------ @@ -1854,21 +1859,19 @@ if is_service_enabled g-reg; then esac if [ "$CONTAINER_FORMAT" = "bare" ]; then - glance add --silent-upload -A $TOKEN name="$IMAGE_NAME" is_public=true container_format=$CONTAINER_FORMAT disk_format=$DISK_FORMAT < <(zcat --force "${IMAGE}") + glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --public --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < <(zcat --force "${IMAGE}") else # Use glance client to add the kernel the root filesystem. # We parse the results of the first upload to get the glance ID of the # kernel for use when uploading the root filesystem. KERNEL_ID=""; RAMDISK_ID=""; if [ -n "$KERNEL" ]; then - RVAL=`glance add --silent-upload -A $TOKEN name="$IMAGE_NAME-kernel" is_public=true container_format=aki disk_format=aki < "$KERNEL"` - KERNEL_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "` + KERNEL_ID=$(glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-kernel" --public --container-format aki --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2) fi if [ -n "$RAMDISK" ]; then - RVAL=`glance add --silent-upload -A $TOKEN name="$IMAGE_NAME-ramdisk" is_public=true container_format=ari disk_format=ari < "$RAMDISK"` - RAMDISK_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "` + RAMDISK_ID=$(glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-ramdisk" --public --container-format ari --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2) fi - glance add -A $TOKEN name="${IMAGE_NAME%.img}" is_public=true container_format=ami disk_format=ami ${KERNEL_ID:+kernel_id=$KERNEL_ID} ${RAMDISK_ID:+ramdisk_id=$RAMDISK_ID} < <(zcat --force "${IMAGE}") + glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "${IMAGE_NAME%.img}" --public --container-format ami --disk-format ami ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}" fi done fi diff --git a/stackrc b/stackrc index d0fa1c25..3ad46ccc 100644 --- a/stackrc +++ b/stackrc @@ -17,14 +17,14 @@ NOVA_BRANCH=master SWIFT_REPO=https://github.com/openstack/swift.git SWIFT_BRANCH=master -# swift and keystone integration -SWIFT_KEYSTONE_REPO=https://github.com/cloudbuilders/swift-keystone2.git -SWIFT_KEYSTONE_BRANCH=master - # image catalog service GLANCE_REPO=https://github.com/openstack/glance.git GLANCE_BRANCH=master +# python glance client library +GLANCECLIENT_REPO=https://github.com/openstack/python-glanceclient +GLANCECLIENT_BRANCH=master + # unified auth system (manages accounts/tokens) KEYSTONE_REPO=https://github.com/openstack/keystone.git KEYSTONE_BRANCH=master From a1c87388a45ede40cb150a137560d0aeb50bee0c Mon Sep 17 00:00:00 2001 From: Jay Pipes Date: Fri, 27 Apr 2012 17:46:58 -0400 Subject: [PATCH 491/967] Fixes parsing of glance(client) image-list. There are still failures when tempest runs because of recent changes with glanceclient, but at least this patch gets the tools/configure_tempest working again. Change-Id: I73a5042dac2c930998663c478fb2ccd907c3ef87 --- tools/configure_tempest.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index 0eacc4a7..dd433136 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -67,11 +67,11 @@ fi # We ignore ramdisk and kernel images and set the IMAGE_UUID to # the first image returned and set IMAGE_UUID_ALT to the second, # if there is more than one returned... -IMAGE_LINES=`glance index` +IMAGE_LINES=`glance image-list` IFS="$(echo -e "\n\r")" IMAGES="" for line in $IMAGE_LINES; do - IMAGES="$IMAGES `echo $line | grep -v "^\(ID\|--\)" | grep -v "\(aki\|ari\)" | cut -d' ' -f1`" + IMAGES="$IMAGES `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | cut -d' ' -f2`" done # Create array of image UUIDs... IFS=" " From 90e7eeb24a95b28033dad045e41b6e4645160d36 Mon Sep 17 00:00:00 2001 From: Mark McLoughlin Date: Mon, 30 Apr 2012 20:06:04 +0100 Subject: [PATCH 492/967] Remove duplicate setting of keystone's template_file option It seems pretty clear that this is a duplicate line Change-Id: I3390df30cb34ced9fe74bdea387dbc67f5ee8f10 --- stack.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/stack.sh b/stack.sh index 1b69475b..c43c9e2c 100755 --- a/stack.sh +++ b/stack.sh @@ -1663,7 +1663,6 @@ if is_service_enabled key; then # Rewrite stock keystone.conf: iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN" iniset $KEYSTONE_CONF sql connection "$BASE_SQL_CONN/keystone?charset=utf8" - iniset $KEYSTONE_CONF catalog template_file "$KEYSTONE_CATALOG" iniset $KEYSTONE_CONF ec2 driver "keystone.contrib.ec2.backends.sql.Ec2" # Configure keystone.conf to use templates iniset $KEYSTONE_CONF catalog driver "keystone.catalog.backends.templated.TemplatedCatalog" From c0e1ef520cead9038b345f28652801f75857c8a2 Mon Sep 17 00:00:00 2001 From: Jay Pipes Date: Mon, 30 Apr 2012 15:56:13 -0400 Subject: [PATCH 493/967] Add host/port/api_version to tempest.conf * Something exposed by LP #992096 was that the image tests were broken because the URL returned from the service catalog was no longer including a version identifier. The fix in Tempest was to pass the configure_via_auth=False parameter to the glance.client.Client constructor. However, in order for this to work, the host/port in the [image] section of the Tempest configuration file needs to be set Change-Id: I9f661a02270a1ad52c10f2233baf899e5f706c82 --- tools/configure_tempest.sh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index dd433136..99c50128 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -131,6 +131,11 @@ COMPUTE_LOG_LEVEL=ERROR BUILD_INTERVAL=10 BUILD_TIMEOUT=600 +# Image test configuration options... +IMAGE_HOST=${IMAGE_HOST:-127.0.0.1} +IMAGE_PORT=${IMAGE_PORT:-9292} +IMAGE_API_VERSION="1" + sed -e " s,%IDENTITY_USE_SSL%,$IDENTITY_USE_SSL,g; s,%IDENTITY_HOST%,$IDENTITY_HOST,g; @@ -154,6 +159,9 @@ sed -e " s,%IMAGE_ID_ALT%,$IMAGE_UUID_ALT,g; s,%FLAVOR_REF%,$FLAVOR_REF,g; s,%FLAVOR_REF_ALT%,$FLAVOR_REF_ALT,g; + s,%IMAGE_HOST%,$IMAGE_HOST,g; + s,%IMAGE_PORT%,$IMAGE_PORT,g; + s,%IMAGE_API_VERSION%,$IMAGE_API_VERSION,g; s,%ADMIN_USERNAME%,$ADMIN_USERNAME,g; s,%ADMIN_PASSWORD%,$ADMIN_PASSWORD,g; s,%ADMIN_TENANT_NAME%,$ADMIN_TENANT_NAME,g; From 255db3da19c11ce3b6d7dbbb871a57d7df215aae Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 27 Apr 2012 15:29:14 -0500 Subject: [PATCH 494/967] Update quantum.sh for glance client change Change-Id: Ib67301b26e1c3e1b68669eed6cd89e40687b14e9 --- exercises/quantum.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exercises/quantum.sh b/exercises/quantum.sh index 943a07d7..a6df7656 100755 --- a/exercises/quantum.sh +++ b/exercises/quantum.sh @@ -103,7 +103,7 @@ TOKEN=`keystone token-get | grep ' id ' | awk '{print $4}'` # Various functions. #------------------------------------------------------------------------------ function get_image_id { - local IMAGE_ID=`glance -f -A $TOKEN index | egrep $DEFAULT_IMAGE_NAME | head -1 | cut -d" " -f1` + local IMAGE_ID=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) echo "$IMAGE_ID" } From 2fb5bce5b0e6037ff8dbe5b7b0d53288715e803a Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 30 Apr 2012 12:12:50 -0400 Subject: [PATCH 495/967] libxml2-dev package is needed for lxml.etree added fedora package as well Change-Id: I7bf34ad1842a6320536ff9ce468c5cb9f0ef3cd5 --- files/apts/glance | 1 + files/rpms/glance | 1 + 2 files changed, 2 insertions(+) diff --git a/files/apts/glance b/files/apts/glance index 9612267c..a05e9f2e 100644 --- a/files/apts/glance +++ b/files/apts/glance @@ -1,4 +1,5 @@ gcc +libxml2-dev python-dev python-eventlet python-routes diff --git a/files/rpms/glance b/files/rpms/glance index 141fe972..e38f2392 100644 --- a/files/rpms/glance +++ b/files/rpms/glance @@ -1,3 +1,4 @@ +libxml2-devel python-argparse python-eventlet python-greenlet From 77a4e3a0f0378517307e07beb73ca266791d0c4c Mon Sep 17 00:00:00 2001 From: Andrew Bogott Date: Tue, 1 May 2012 00:07:29 -0500 Subject: [PATCH 496/967] Invite python-openstackclient to the party. (It doesn't do much, yet.) Change-Id: If625a15d2d979b91b1d5d764b24c63acaf154657 --- stack.sh | 3 +++ stackrc | 4 ++++ 2 files changed, 7 insertions(+) diff --git a/stack.sh b/stack.sh index 71c5ffc4..52859f5f 100755 --- a/stack.sh +++ b/stack.sh @@ -205,6 +205,7 @@ GLANCECLIENT_DIR=$DEST/python-glanceclient KEYSTONE_DIR=$DEST/keystone NOVACLIENT_DIR=$DEST/python-novaclient KEYSTONECLIENT_DIR=$DEST/python-keystoneclient +OPENSTACKCLIENT_DIR=$DEST/python-openstackclient NOVNC_DIR=$DEST/noVNC SWIFT_DIR=$DEST/swift QUANTUM_DIR=$DEST/quantum @@ -644,6 +645,7 @@ git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH # python client library to nova that horizon (and others) use git_clone $KEYSTONECLIENT_REPO $KEYSTONECLIENT_DIR $KEYSTONECLIENT_BRANCH git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH +git_clone $OPENSTACKCLIENT_REPO $OPENSTACKCLIENT_DIR $OPENSTACKCLIENT_BRANCH git_clone $GLANCECLIENT_REPO $GLANCECLIENT_DIR $GLANCECLIENT_BRANCH # glance, swift middleware and nova api needs keystone middleware @@ -691,6 +693,7 @@ fi # allowing ``import nova`` or ``import glance.client`` cd $KEYSTONECLIENT_DIR; sudo python setup.py develop cd $NOVACLIENT_DIR; sudo python setup.py develop +cd $OPENSTACKCLIENT_DIR; sudo python setup.py develop if is_service_enabled key g-api n-api swift; then cd $KEYSTONE_DIR; sudo python setup.py develop fi diff --git a/stackrc b/stackrc index 3ad46ccc..092ba9ee 100644 --- a/stackrc +++ b/stackrc @@ -41,6 +41,10 @@ HORIZON_BRANCH=master NOVACLIENT_REPO=https://github.com/openstack/python-novaclient.git NOVACLIENT_BRANCH=master +# Shared openstack python client library +OPENSTACKCLIENT_REPO=https://github.com/openstack/python-openstackclient.git +OPENSTACKCLIENT_BRANCH=master + # python keystone client library to nova that horizon uses KEYSTONECLIENT_REPO=https://github.com/openstack/python-keystoneclient KEYSTONECLIENT_BRANCH=master From f106240ca5c95c8614e9391e094b786a3523743f Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 1 May 2012 16:43:15 -0400 Subject: [PATCH 497/967] n-api also requires glance for now make sure to install it's dependencies Change-Id: I05b34709aa94048a2f4f80e8d46b2f61c95ed81a --- stack.sh | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 71c5ffc4..3dc50262 100755 --- a/stack.sh +++ b/stack.sh @@ -586,7 +586,16 @@ function get_packages() { if [[ -e ${package_dir}/${service} ]]; then file_to_parse="${file_to_parse} $service" fi - if [[ $service == n-* ]]; then + # NOTE(sdague) n-api needs glance for now because that's where + # glance client is + if [[ $service == n-api ]]; then + if [[ ! $file_to_parse =~ nova ]]; then + file_to_parse="${file_to_parse} nova" + fi + if [[ ! $file_to_parse =~ glance ]]; then + file_to_parse="${file_to_parse} glance" + fi + elif [[ $service == n-* ]]; then if [[ ! $file_to_parse =~ nova ]]; then file_to_parse="${file_to_parse} nova" fi From 0ac5a8ae69878a6d85319afcdcd1d0729ae6cfde Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 2 May 2012 00:27:27 +0000 Subject: [PATCH 498/967] change volume name template to prepare for 6511 Change-Id: I1162c169eeffbd9adf9cfb9f473761d6f8b2d120 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 71c5ffc4..a7891afe 100755 --- a/stack.sh +++ b/stack.sh @@ -1526,7 +1526,7 @@ else fi if is_service_enabled n-vol; then add_nova_opt "volume_group=$VOLUME_GROUP" - add_nova_opt "volume_name_template=${VOLUME_NAME_PREFIX}%08x" + add_nova_opt "volume_name_template=${VOLUME_NAME_PREFIX}%s" # oneiric no longer supports ietadm add_nova_opt "iscsi_helper=tgtadm" fi From 7d13f309bcd1f799e7fd90780614243342cfc6f7 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 19 Apr 2012 22:26:16 +0100 Subject: [PATCH 499/967] bug 985905: multi_host flag cunfusing settings switch to using trueorfalse for parsing MULTI_HOST values. Change-Id: I289563e0cd05cda014198cf21b3c88897aba5d4f --- exercises/floating_ips.sh | 4 ++-- exercises/quantum.sh | 4 ++-- exercises/volumes.sh | 4 ++-- stack.sh | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index dd40aa0b..82f29eb4 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -121,8 +121,8 @@ IP=`nova show $VM_UUID | grep "private network" | get_field 2` die_if_not_set IP "Failure retrieving IP address" # for single node deployments, we can ping private ips -MULTI_HOST=${MULTI_HOST:-0} -if [ "$MULTI_HOST" = "0" ]; then +MULTI_HOST=`trueorfalse False $MULTI_HOST` +if [ "$MULTI_HOST" = "False" ]; then # sometimes the first ping fails (10 seconds isn't enough time for the VM's # network to respond?), so let's ping for a default of 15 seconds with a # timeout of a second for each ping. diff --git a/exercises/quantum.sh b/exercises/quantum.sh index 943a07d7..81781121 100755 --- a/exercises/quantum.sh +++ b/exercises/quantum.sh @@ -255,8 +255,8 @@ function ping_vms { export OS_PASSWORD=nova PUBLIC_IP2=`nova show $VM_UUID2 | grep public-net1 | awk '{print $5}'` - MULTI_HOST=${MULTI_HOST:-0} - if [ "$MULTI_HOST" = "0" ]; then + MULTI_HOST=`trueorfalse False $MULTI_HOST` + if [ "$MULTI_HOST" = "False" ]; then # sometimes the first ping fails (10 seconds isn't enough time for the VM's # network to respond?), so let's ping for a default of 15 seconds with a # timeout of a second for each ping. diff --git a/exercises/volumes.sh b/exercises/volumes.sh index b62427fc..6749558a 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -97,8 +97,8 @@ IP=`nova show $VM_UUID | grep "private network" | get_field 2` die_if_not_set IP "Failure retrieving IP address" # for single node deployments, we can ping private ips -MULTI_HOST=${MULTI_HOST:-0} -if [ "$MULTI_HOST" = "0" ]; then +MULTI_HOST=`trueorfalse False $MULTI_HOST` +if [ "$MULTI_HOST" = "False" ]; then # sometimes the first ping fails (10 seconds isn't enough time for the VM's # network to respond?), so let's ping for a default of 15 seconds with a # timeout of a second for each ping. diff --git a/stack.sh b/stack.sh index 71c5ffc4..f657fe3d 100755 --- a/stack.sh +++ b/stack.sh @@ -340,7 +340,7 @@ TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29} # **MULTI_HOST** is a mode where each compute node runs its own network node. This # allows network operations and routing for a VM to occur on the server that is # running the VM - removing a SPOF and bandwidth bottleneck. -MULTI_HOST=${MULTI_HOST:-False} +MULTI_HOST=`trueorfalse False $MULTI_HOST` # If you are using FlatDHCP on multiple hosts, set the ``FLAT_INTERFACE`` # variable but make sure that the interface doesn't already have an From 4dc53aa7983a18d9fe2ff6267cbc97ccf6049444 Mon Sep 17 00:00:00 2001 From: Shweta P Date: Wed, 4 Apr 2012 16:17:40 -0400 Subject: [PATCH 500/967] Adds LinuxBridge plugin setup support. Change-Id: I4c3250ac9bd3f3eb466a211bcaddaf1855d6ef8d --- stack.sh | 63 ++++++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 54 insertions(+), 9 deletions(-) diff --git a/stack.sh b/stack.sh index 71c5ffc4..526e2349 100755 --- a/stack.sh +++ b/stack.sh @@ -990,14 +990,36 @@ fi # Quantum # ------- - -# Quantum service -if is_service_enabled q-svc; then +if is_service_enabled quantum; then + # Put config files in /etc/quantum for everyone to find QUANTUM_CONF_DIR=/etc/quantum if [[ ! -d $QUANTUM_CONF_DIR ]]; then sudo mkdir -p $QUANTUM_CONF_DIR fi sudo chown `whoami` $QUANTUM_CONF_DIR + + # Set default values when using Linux Bridge plugin + if [[ "$Q_PLUGIN" = "linuxbridge" ]]; then + # set the config file + QUANTUM_LB_CONF_DIR=$QUANTUM_CONF_DIR/plugins/linuxbridge + mkdir -p $QUANTUM_LB_CONF_DIR + QUANTUM_LB_CONFIG_FILE=$QUANTUM_LB_CONF_DIR/linuxbridge_conf.ini + # must remove this file from existing location, otherwise Quantum will prefer it + if [[ -e $QUANTUM_DIR/etc/quantum/plugins/linuxbridge/linuxbridge_conf.ini ]]; then + sudo mv $QUANTUM_DIR/etc/quantum/plugins/linuxbridge/linuxbridge_conf.ini $QUANTUM_LB_CONFIG_FILE + fi + #set the default network interface + QUANTUM_LB_PRIVATE_INTERFACE=${QUANTUM_LB_PRIVATE_INTERFACE:-$GUEST_INTERFACE_DEFAULT} + fi +fi +# Quantum service +if is_service_enabled q-svc; then + QUANTUM_PLUGIN_INI_FILE=$QUANTUM_CONF_DIR/plugins.ini + # must remove this file from existing location, otherwise Quantum will prefer it + if [[ -e $QUANTUM_DIR/etc/plugins.ini ]]; then + sudo mv $QUANTUM_DIR/etc/plugins.ini $QUANTUM_PLUGIN_INI_FILE + fi + if [[ "$Q_PLUGIN" = "openvswitch" ]]; then # Install deps # FIXME add to files/apts/quantum, but don't install if not needed! @@ -1016,13 +1038,27 @@ if is_service_enabled q-svc; then echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin." exit 1 fi - QUANTUM_PLUGIN_INI_FILE=$QUANTUM_CONF_DIR/plugins.ini - # must remove this file from existing location, otherwise Quantum will prefer it - if [[ -e $QUANTUM_DIR/etc/plugins.ini ]]; then - sudo mv $QUANTUM_DIR/etc/plugins.ini $QUANTUM_PLUGIN_INI_FILE - fi # Make sure we're using the openvswitch plugin sudo sed -i -e "s/^provider =.*$/provider = quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin/g" $QUANTUM_PLUGIN_INI_FILE + elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then + # Install deps + # FIXME add to files/apts/quantum, but don't install if not needed! + install_package python-configobj + # Create database for the plugin/agent + if is_service_enabled mysql; then + mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS quantum_linux_bridge;' + mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE IF NOT EXISTS quantum_linux_bridge;' + else + echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin." + exit 1 + fi + # Make sure we're using the linuxbridge plugin and set the mysql hostname, username and password in the config file + sudo sed -i -e "s/^provider =.*$/provider = quantum.plugins.linuxbridge.LinuxBridgePlugin.LinuxBridgePlugin/g" $QUANTUM_PLUGIN_INI_FILE + sudo sed -i -e "s/^connection = sqlite$/#connection = sqlite/g" $QUANTUM_LB_CONFIG_FILE + sudo sed -i -e "s/^#connection = mysql$/connection = mysql/g" $QUANTUM_LB_CONFIG_FILE + sudo sed -i -e "s/^user = .*$/user = $MYSQL_USER/g" $QUANTUM_LB_CONFIG_FILE + sudo sed -i -e "s/^pass = .*$/pass = $MYSQL_PASSWORD/g" $QUANTUM_LB_CONFIG_FILE + sudo sed -i -e "s/^host = .*$/host = $MYSQL_HOST/g" $QUANTUM_LB_CONFIG_FILE fi if [[ -e $QUANTUM_DIR/etc/quantum.conf ]]; then sudo mv $QUANTUM_DIR/etc/quantum.conf $QUANTUM_CONF_DIR/quantum.conf @@ -1048,8 +1084,12 @@ if is_service_enabled q-agt; then fi sudo sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/ovs_quantum?charset=utf8/g" $QUANTUM_OVS_CONFIG_FILE screen_it q-agt "sleep 4; sudo python $QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py $QUANTUM_OVS_CONFIG_FILE -v" + elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then + # Start up the quantum <-> linuxbridge agent + install_package bridge-utils + sudo sed -i -e "s/^physical_interface = .*$/physical_interface = $QUANTUM_LB_PRIVATE_INTERFACE/g" $QUANTUM_LB_CONFIG_FILE + screen_it q-agt "sleep 4; sudo python $QUANTUM_DIR/quantum/plugins/linuxbridge/agent/linuxbridge_quantum_agent.py $QUANTUM_LB_CONFIG_FILE -v" fi - fi # Melange service @@ -1520,6 +1560,11 @@ if is_service_enabled quantum; then add_nova_opt "libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtOpenVswitchDriver" add_nova_opt "linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver" add_nova_opt "quantum_use_dhcp=True" + elif is_service_enabled q-svc && [[ "$Q_PLUGIN" = "linuxbridge" ]]; then + add_nova_opt "libvirt_vif_type=ethernet" + add_nova_opt "libvirt_vif_driver=nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver" + add_nova_opt "linuxnet_interface_driver=nova.network.linux_net.QuantumLinuxBridgeInterfaceDriver" + add_nova_opt "quantum_use_dhcp=True" fi else add_nova_opt "network_manager=nova.network.manager.$NET_MAN" From 520a9ca5565d145d4e970595419cb00b19735030 Mon Sep 17 00:00:00 2001 From: Hua ZHANG Date: Thu, 3 May 2012 18:17:18 +0800 Subject: [PATCH 501/967] Allow wget to handle Non-English output The stack.sh has a loop to wait for keystone to start. But the output of wget tool has been globalized which means it won't return the English word 'refused' on Non-English environment even the keystone is not up. So the script will assume the keystone has been started and continue. The command of keystone tenant-create always failed immediately after skipping this loop since it require keystone to be started to authenticate. That's why you can see authentication error in the log because the tenant information was not correctly set up. Fix bug 978739 Change-Id: Ia4ebe6ad0f9b5a886de48a4bea7c2aebff420dad --- AUTHORS | 1 + stack.sh | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/AUTHORS b/AUTHORS index 820a6773..bca25b43 100644 --- a/AUTHORS +++ b/AUTHORS @@ -11,6 +11,7 @@ Eddie Hebert Eoghan Glynn Gabriel Hurley Hengqing Hu +Hua ZHANG Jake Dahn James E. Blair Jason Cannavale diff --git a/stack.sh b/stack.sh index a7891afe..e66f71d1 100755 --- a/stack.sh +++ b/stack.sh @@ -1723,7 +1723,7 @@ if is_service_enabled key; then # launch keystone and wait for it to answer before continuing screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug" echo "Waiting for keystone to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while http_proxy= wget -O- $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/ 2>&1 | grep -q 'refused'; do sleep 1; done"; then + if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -O- $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/ 2>&1 | grep -q '200 OK'; do sleep 1; done"; then echo "keystone did not start" exit 1 fi From 0f5da0016881578b3b822f4e8500095fefa08b83 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 3 May 2012 11:52:55 -0400 Subject: [PATCH 502/967] Regenerate tempest.conf every time Select for active images If we don't do this, repeat devstack installs won't be able to use this script. Change-Id: I95746ffebfa7163c80161d26de1e575c0fc5d39c --- tools/configure_tempest.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index bed3d670..a84c42b5 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -67,11 +67,12 @@ fi # We ignore ramdisk and kernel images and set the IMAGE_UUID to # the first image returned and set IMAGE_UUID_ALT to the second, # if there is more than one returned... +# ... Also ensure we only take active images, so we don't get snapshots in process IMAGE_LINES=`glance image-list` IFS="$(echo -e "\n\r")" IMAGES="" for line in $IMAGE_LINES; do - IMAGES="$IMAGES `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | cut -d' ' -f2`" + IMAGES="$IMAGES `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | grep 'active' | cut -d' ' -f2`" done # Create array of image UUIDs... IFS=" " @@ -89,9 +90,8 @@ if [[ $NUM_IMAGES -gt 1 ]]; then fi # Create tempest.conf from tempest.conf.tpl -if [[ ! -r $TEMPEST_CONF ]]; then - cp $TEMPEST_CONF.tpl $TEMPEST_CONF -fi +# copy every time, because the image UUIDS are going to change +cp $TEMPEST_CONF.tpl $TEMPEST_CONF IDENTITY_USE_SSL=${IDENTITY_USE_SSL:-False} IDENTITY_HOST=${IDENTITY_HOST:-127.0.0.1} From 220d93848f0254a72d15d7a16f288b85634ab180 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Fri, 4 May 2012 10:19:44 -0700 Subject: [PATCH 503/967] Update to point noVNC to kanaka/master * nova-novncproxy was accepted into kanaka/master * Fixes bug 994653 Change-Id: I1cb40440e6cfbad9ed28203c45deea9fb9305f91 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 092ba9ee..41a399a2 100644 --- a/stackrc +++ b/stackrc @@ -30,7 +30,7 @@ KEYSTONE_REPO=https://github.com/openstack/keystone.git KEYSTONE_BRANCH=master # a websockets/html5 or flash powered VNC console for vm instances -NOVNC_REPO=https://github.com/cloudbuilders/noVNC.git +NOVNC_REPO=https://github.com/kanaka/noVNC.git NOVNC_BRANCH=master # django powered web control panel for openstack From e7114ca22f39e98d7a0dc3125f0a05dc8fc7ff97 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Fri, 4 May 2012 13:42:35 -0700 Subject: [PATCH 504/967] Copy Glance's policy.json to /etc/glance Change-Id: Ib83c9874b7d31e66206cf8907a12c65d9794a1a2 --- stack.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stack.sh b/stack.sh index 8a443d1f..48659e98 100755 --- a/stack.sh +++ b/stack.sh @@ -998,6 +998,9 @@ if is_service_enabled g-reg; then iniset $GLANCE_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $GLANCE_API_PASTE_INI filter:authtoken admin_user glance iniset $GLANCE_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD + + GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json + cp $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON fi # Quantum From c39e681ecb9196fd2968a8ad68c96cd453c22f04 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 7 May 2012 13:16:23 -0500 Subject: [PATCH 505/967] Force prettytable==0.5 prettytable 0.6 was released recently and removed the printt() method. All non-pinned prettytable projects (all of them) are now broken. This forces a pre-loaded prettytable 0.5 package in order to allow the real fix (removing printt() calls) to proceed. Change-Id: Iacf24b8474f12b28090329496720767281181d7e --- files/pips/general | 1 + 1 file changed, 1 insertion(+) create mode 100644 files/pips/general diff --git a/files/pips/general b/files/pips/general new file mode 100644 index 00000000..f7403063 --- /dev/null +++ b/files/pips/general @@ -0,0 +1 @@ +prettytable==0.5 From a143e73cafb0d92f31907f6817a7007aad1e6503 Mon Sep 17 00:00:00 2001 From: Everett Toews Date: Tue, 8 May 2012 22:13:08 +0000 Subject: [PATCH 506/967] Changed --user to --user_id and --role to --role_id in the keystone client for consistency. Need to update keystone calls here. This change should be applied after [bug/994744 b7fe11c] in python-keystoneclient. Fixes bug 994744. Change-Id: I13e643f8552d86ed0bf92799271899f777bde9b2 --- files/keystone_data.sh | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index a49eb426..b19ba473 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -56,19 +56,19 @@ ANOTHER_ROLE=$(get_id keystone role-create --name=anotherrole) # Add Roles to Users in Tenants -keystone user-role-add --user $ADMIN_USER --role $ADMIN_ROLE --tenant_id $ADMIN_TENANT -keystone user-role-add --user $ADMIN_USER --role $ADMIN_ROLE --tenant_id $DEMO_TENANT -keystone user-role-add --user $DEMO_USER --role $ANOTHER_ROLE --tenant_id $DEMO_TENANT +keystone user-role-add --user_id $ADMIN_USER --role_id $ADMIN_ROLE --tenant_id $ADMIN_TENANT +keystone user-role-add --user_id $ADMIN_USER --role_id $ADMIN_ROLE --tenant_id $DEMO_TENANT +keystone user-role-add --user_id $DEMO_USER --role_id $ANOTHER_ROLE --tenant_id $DEMO_TENANT # TODO(termie): these two might be dubious -keystone user-role-add --user $ADMIN_USER --role $KEYSTONEADMIN_ROLE --tenant_id $ADMIN_TENANT -keystone user-role-add --user $ADMIN_USER --role $KEYSTONESERVICE_ROLE --tenant_id $ADMIN_TENANT +keystone user-role-add --user_id $ADMIN_USER --role_id $KEYSTONEADMIN_ROLE --tenant_id $ADMIN_TENANT +keystone user-role-add --user_id $ADMIN_USER --role_id $KEYSTONESERVICE_ROLE --tenant_id $ADMIN_TENANT # The Member role is used by Horizon and Swift so we need to keep it: MEMBER_ROLE=$(get_id keystone role-create --name=Member) -keystone user-role-add --user $DEMO_USER --role $MEMBER_ROLE --tenant_id $DEMO_TENANT -keystone user-role-add --user $DEMO_USER --role $MEMBER_ROLE --tenant_id $INVIS_TENANT +keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $DEMO_TENANT +keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $INVIS_TENANT # Configure service users/roles @@ -77,16 +77,16 @@ NOVA_USER=$(get_id keystone user-create --name=nova \ --tenant_id $SERVICE_TENANT \ --email=nova@example.com) keystone user-role-add --tenant_id $SERVICE_TENANT \ - --user $NOVA_USER \ - --role $ADMIN_ROLE + --user_id $NOVA_USER \ + --role_id $ADMIN_ROLE GLANCE_USER=$(get_id keystone user-create --name=glance \ --pass="$SERVICE_PASSWORD" \ --tenant_id $SERVICE_TENANT \ --email=glance@example.com) keystone user-role-add --tenant_id $SERVICE_TENANT \ - --user $GLANCE_USER \ - --role $ADMIN_ROLE + --user_id $GLANCE_USER \ + --role_id $ADMIN_ROLE if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then SWIFT_USER=$(get_id keystone user-create --name=swift \ @@ -94,8 +94,8 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then --tenant_id $SERVICE_TENANT \ --email=swift@example.com) keystone user-role-add --tenant_id $SERVICE_TENANT \ - --user $SWIFT_USER \ - --role $ADMIN_ROLE + --user_id $SWIFT_USER \ + --role_id $ADMIN_ROLE # Nova needs ResellerAdmin role to download images when accessing # swift through the s3 api. The admin role in swift allows a user # to act as an admin for their tenant, but ResellerAdmin is needed @@ -103,8 +103,8 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then # configurable in swift-proxy.conf RESELLER_ROLE=$(get_id keystone role-create --name=ResellerAdmin) keystone user-role-add --tenant_id $SERVICE_TENANT \ - --user $NOVA_USER \ - --role $RESELLER_ROLE + --user_id $NOVA_USER \ + --role_id $RESELLER_ROLE fi if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then @@ -113,6 +113,6 @@ if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then --tenant_id $SERVICE_TENANT \ --email=quantum@example.com) keystone user-role-add --tenant_id $SERVICE_TENANT \ - --user $QUANTUM_USER \ - --role $ADMIN_ROLE + --user_id $QUANTUM_USER \ + --role_id $ADMIN_ROLE fi From ee76d26f232beced4e12ff636fe3bdcce8b91c55 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Wed, 9 May 2012 17:19:09 +0100 Subject: [PATCH 507/967] Store glance images in Swift if enabled. - Fixes bug 968950. - This should be applied after review 5856 for glance. Change-Id: I779fd75ce7394bdfc2ffc1354b4ed35a6109321f --- stack.sh | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/stack.sh b/stack.sh index 66c8e932..fa5b7eca 100755 --- a/stack.sh +++ b/stack.sh @@ -989,6 +989,15 @@ if is_service_enabled g-reg; then iniset $GLANCE_API_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/ iniset $GLANCE_API_CONF paste_deploy flavor keystone + # Store the images in swift if enabled. + if is_service_enabled swift; then + iniset $GLANCE_API_CONF DEFAULT default_store swift + iniset $GLANCE_API_CONF DEFAULT swift_store_auth_address $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ + iniset $GLANCE_API_CONF DEFAULT swift_store_user $SERVICE_TENANT_NAME:glance + iniset $GLANCE_API_CONF DEFAULT swift_store_key $SERVICE_PASSWORD + iniset $GLANCE_API_CONF DEFAULT swift_store_create_container_on_put True + fi + GLANCE_API_PASTE_INI=$GLANCE_CONF_DIR/glance-api-paste.ini cp $GLANCE_DIR/etc/glance-api-paste.ini $GLANCE_API_PASTE_INI iniset $GLANCE_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST From 73812ae7c39c3ea32770d9c0c22ea24de65e5491 Mon Sep 17 00:00:00 2001 From: Chuck Short Date: Thu, 3 May 2012 13:28:21 -0400 Subject: [PATCH 508/967] Add quantal Allow devstack to run on quantal. Change-Id: Iec79aa3669dc1dcc695470e6c15957622268476f Signed-off-by: Chuck Short --- files/apts/nova | 2 +- stack.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/files/apts/nova b/files/apts/nova index 66640c50..8a3ca4cf 100644 --- a/files/apts/nova +++ b/files/apts/nova @@ -1,5 +1,5 @@ dnsmasq-base -dnsmasq-utils # for dhcp_release only available in dist:oneiric,precise +dnsmasq-utils # for dhcp_release only available in dist:oneiric,precise,quantal kpartx parted arping # only available in dist:natty diff --git a/stack.sh b/stack.sh index 66c8e932..41bce47e 100755 --- a/stack.sh +++ b/stack.sh @@ -86,7 +86,7 @@ DEST=${DEST:-/opt/stack} # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (oneiric|precise|f16) ]]; then +if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|f16) ]]; then echo "WARNING: this script has been tested on oneiric, precise and f16" if [[ "$FORCE" != "yes" ]]; then echo "If you wish to run this script anyway run with FORCE=yes" From 1040a65fb90fcf52e11416807f27f7db8a5d5606 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Wed, 2 May 2012 01:41:18 +0100 Subject: [PATCH 509/967] bug 988550: devstack installer for xenserver to support user-defined ubuntu mirror add support for local mirrors by: - making sure the preseed file points to a local mirror. - the apt sources mounted in the DevStack VM will point automatically to the same mirrror Change-Id: I33052ac5c10387db7206ce2210ee2cbe9096df47 --- tools/xen/install_os_domU.sh | 5 ++++- tools/xen/prepare_dom0.sh | 41 ------------------------------------ tools/xen/prepare_guest.sh | 12 ----------- 3 files changed, 4 insertions(+), 54 deletions(-) delete mode 100755 tools/xen/prepare_dom0.sh diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 3c25d4a5..088748f5 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -174,13 +174,16 @@ fi templateuuid=$(xe template-list name-label="$TNAME") if [ -n "$templateuuid" ] then - vm_uuid=$(xe vm-install template="$TNAME" new-name-label="$GUEST_NAME") + vm_uuid=$(xe vm-install template="$TNAME" new-name-label="$GUEST_NAME") else template=$(xe_min template-list name-label="Ubuntu 11.10 (64-bit)") if [ -z "$template" ] then cp $TOP_DIR/devstackubuntupreseed.cfg /opt/xensource/www/ $TOP_DIR/scripts/xenoneirictemplate.sh "${HOST_IP}/devstackubuntupreseed.cfg" + MIRROR=${MIRROR:-archive.ubuntu.com} + sed -e "s,d-i mirror/http/hostname string .*,d-i mirror/http/hostname string $MIRROR," \ + -i /opt/xensource/www/devstackubuntupreseed.cfg fi $TOP_DIR/scripts/install-os-vpx.sh -t "Ubuntu 11.10 (64-bit)" -v $VM_BR -m $MGT_BR -p $PUB_BR -l $GUEST_NAME -r $OSDOMU_MEM_MB -k "flat_network_bridge=${VM_BR}" diff --git a/tools/xen/prepare_dom0.sh b/tools/xen/prepare_dom0.sh deleted file mode 100755 index 71e9d6d3..00000000 --- a/tools/xen/prepare_dom0.sh +++ /dev/null @@ -1,41 +0,0 @@ -#i!/bin/sh -set -o xtrace -set -o errexit - -# Install basics for vi and git -yum -y --enablerepo=base install gcc make vim-enhanced zlib-devel openssl-devel curl-devel.i386 - -# Simple but usable vimrc -if [ ! -e /root/.vimrc ]; then - cat > /root/.vimrc <$STAGING_DIR/etc/apt/sources.list -deb http://us.archive.ubuntu.com/ubuntu/ oneiric main restricted -deb-src http://us.archive.ubuntu.com/ubuntu/ oneiric main restricted -deb http://us.archive.ubuntu.com/ubuntu/ oneiric-updates main restricted -deb-src http://us.archive.ubuntu.com/ubuntu/ oneiric-updates main restricted -deb http://us.archive.ubuntu.com/ubuntu/ oneiric universe -deb http://us.archive.ubuntu.com/ubuntu/ oneiric-updates universe -deb http://us.archive.ubuntu.com/ubuntu/ oneiric multiverse -deb http://us.archive.ubuntu.com/ubuntu/ oneiric-updates multiverse -EOF - # Install basics chroot $STAGING_DIR apt-get update chroot $STAGING_DIR apt-get install -y cracklib-runtime curl wget ssh openssh-server tcpdump ethtool From 962c25eb7421094c6fc1ef7edb56fe1da69a972b Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Thu, 10 May 2012 07:24:01 -0400 Subject: [PATCH 510/967] stack.sh: updates for linuxbridge support 1. make use of uniform configuration files 2. enables agent and plugin to run on different hosts Change-Id: I04bab95956e3733c48dcecb6a94042e9666778d3 --- stack.sh | 33 +++++++++++++++++++++++++++------ 1 file changed, 27 insertions(+), 6 deletions(-) diff --git a/stack.sh b/stack.sh index 66c8e932..59653d4d 100755 --- a/stack.sh +++ b/stack.sh @@ -1045,10 +1045,15 @@ if is_service_enabled q-svc; then ### FIXME(dtroyer): Find RPMs for OpenVSwitch echo "OpenVSwitch packages need to be located" fi + + QUANTUM_OVS_CONF_DIR=$QUANTUM_CONF_DIR/plugins/openvswitch + QUANTUM_OVS_CONFIG_FILE=$QUANTUM_OVS_CONF_DIR/ovs_quantum_plugin.ini + # Create database for the plugin/agent if is_service_enabled mysql; then mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS ovs_quantum;' mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE IF NOT EXISTS ovs_quantum CHARACTER SET utf8;' + sudo sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/ovs_quantum?charset=utf8/g" $QUANTUM_OVS_CONFIG_FILE else echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin." exit 1 @@ -1063,17 +1068,22 @@ if is_service_enabled q-svc; then if is_service_enabled mysql; then mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS quantum_linux_bridge;' mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE IF NOT EXISTS quantum_linux_bridge;' + if grep -Fxq "user = " $QUANTUM_LB_CONFIG_FILE + then + sudo sed -i -e "s/^connection = sqlite$/#connection = sqlite/g" $QUANTUM_LB_CONFIG_FILE + sudo sed -i -e "s/^#connection = mysql$/connection = mysql/g" $QUANTUM_LB_CONFIG_FILE + sudo sed -i -e "s/^user = .*$/user = $MYSQL_USER/g" $QUANTUM_LB_CONFIG_FILE + sudo sed -i -e "s/^pass = .*$/pass = $MYSQL_PASSWORD/g" $QUANTUM_LB_CONFIG_FILE + sudo sed -i -e "s/^host = .*$/host = $MYSQL_HOST/g" $QUANTUM_LB_CONFIG_FILE + else + sudo sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/quantum_linux_bridge?charset=utf8/g" $QUANTUM_LB_CONFIG_FILE + fi else echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin." exit 1 fi - # Make sure we're using the linuxbridge plugin and set the mysql hostname, username and password in the config file + # Make sure we're using the linuxbridge plugin sudo sed -i -e "s/^provider =.*$/provider = quantum.plugins.linuxbridge.LinuxBridgePlugin.LinuxBridgePlugin/g" $QUANTUM_PLUGIN_INI_FILE - sudo sed -i -e "s/^connection = sqlite$/#connection = sqlite/g" $QUANTUM_LB_CONFIG_FILE - sudo sed -i -e "s/^#connection = mysql$/connection = mysql/g" $QUANTUM_LB_CONFIG_FILE - sudo sed -i -e "s/^user = .*$/user = $MYSQL_USER/g" $QUANTUM_LB_CONFIG_FILE - sudo sed -i -e "s/^pass = .*$/pass = $MYSQL_PASSWORD/g" $QUANTUM_LB_CONFIG_FILE - sudo sed -i -e "s/^host = .*$/host = $MYSQL_HOST/g" $QUANTUM_LB_CONFIG_FILE fi if [[ -e $QUANTUM_DIR/etc/quantum.conf ]]; then sudo mv $QUANTUM_DIR/etc/quantum.conf $QUANTUM_CONF_DIR/quantum.conf @@ -1103,6 +1113,17 @@ if is_service_enabled q-agt; then # Start up the quantum <-> linuxbridge agent install_package bridge-utils sudo sed -i -e "s/^physical_interface = .*$/physical_interface = $QUANTUM_LB_PRIVATE_INTERFACE/g" $QUANTUM_LB_CONFIG_FILE + if grep -Fxq "user = " $QUANTUM_LB_CONFIG_FILE + then + sudo sed -i -e "s/^connection = sqlite$/#connection = sqlite/g" $QUANTUM_LB_CONFIG_FILE + sudo sed -i -e "s/^#connection = mysql$/connection = mysql/g" $QUANTUM_LB_CONFIG_FILE + sudo sed -i -e "s/^user = .*$/user = $MYSQL_USER/g" $QUANTUM_LB_CONFIG_FILE + sudo sed -i -e "s/^pass = .*$/pass = $MYSQL_PASSWORD/g" $QUANTUM_LB_CONFIG_FILE + sudo sed -i -e "s/^host = .*$/host = $MYSQL_HOST/g" $QUANTUM_LB_CONFIG_FILE + else + sudo sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/quantum_linux_bridge?charset=utf8/g" $QUANTUM_LB_CONFIG_FILE + fi + screen_it q-agt "sleep 4; sudo python $QUANTUM_DIR/quantum/plugins/linuxbridge/agent/linuxbridge_quantum_agent.py $QUANTUM_LB_CONFIG_FILE -v" fi fi From b297d2d0a8e1b38ffaebc4590bced3361df6fe89 Mon Sep 17 00:00:00 2001 From: Jay Pipes Date: Thu, 10 May 2012 11:21:22 -0400 Subject: [PATCH 511/967] Fixes LP #996571 - Alternate Tempest user Adds an alternate user to Keystone for Tempest Tempest has a number of tests that are skipped if the compute.alt_username is the same as compute.username or None. Here, we modify files/keystone_data.sh to add an additional regular user called alt_demo if Tempest is enabled in stackrc. We also make corresponding changes to the tools/configure_tempest.sh script to make use of this alternate user credential Change-Id: I551f3b378f843c62fffcf6effa916056708d54d3 --- files/keystone_data.sh | 12 ++++++++++++ tools/configure_tempest.sh | 18 +++++++++--------- 2 files changed, 21 insertions(+), 9 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index a49eb426..9e994fd1 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -12,6 +12,8 @@ # demo admin admin # demo demo Member, anotherrole # invisible_to_admin demo Member +# Tempest Only: +# alt_demo alt_demo Member # # Variables set before calling this script: # SERVICE_TOKEN - aka admin_token in keystone.conf @@ -116,3 +118,13 @@ if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then --user $QUANTUM_USER \ --role $ADMIN_ROLE fi + +if [[ "$ENABLED_SERVICES" =~ "tempest" ]]; then + # Tempest has some tests that validate various authorization checks + # between two regular users in separate tenants + ALT_DEMO_TENANT=$(get_id keystone tenant-create --name=alt_demo) + ALT_DEMO_USER=$(get_id keystone user-create --name=alt_demo \ + --pass="$ADMIN_PASSWORD" \ + --email=alt_demo@example.com) + keystone user-role-add --user $ALT_DEMO_USER --role $MEMBER_ROLE --tenant_id $ALT_DEMO_TENANT +fi diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index a84c42b5..2c069343 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -93,6 +93,10 @@ fi # copy every time, because the image UUIDS are going to change cp $TEMPEST_CONF.tpl $TEMPEST_CONF +ADMIN_USERNAME=${ADMIN_USERNAME:-admin} +ADMIN_PASSWORD=${ADMIN_PASSWORD:-secrete} +ADMIN_TENANT_NAME=${ADMIN_TENANT:-admin} + IDENTITY_USE_SSL=${IDENTITY_USE_SSL:-False} IDENTITY_HOST=${IDENTITY_HOST:-127.0.0.1} IDENTITY_PORT=${IDENTITY_PORT:-5000} @@ -107,22 +111,18 @@ IDENTITY_STRATEGY=${IDENTITY_STRATEGY:-keystone} # OS_USERNAME et all should be defined in openrc. OS_USERNAME=${OS_USERNAME:-demo} OS_TENANT_NAME=${OS_TENANT_NAME:-demo} -OS_PASSWORD=${OS_PASSWORD:-secrete} +OS_PASSWORD=${OS_PASSWORD:$ADMIN_PASSWORD} -# TODO(jaypipes): Support multiple regular user accounts instead -# of using the same regular user account for the alternate user... -ALT_USERNAME=$OS_USERNAME +# See files/keystone_data.sh where alt_demo user +# and tenant are set up... +ALT_USERNAME=${ALT_USERNAME:-alt_demo} +ALT_TENANT_NAME=${ALT_TENANT_NAME:-alt_demo} ALT_PASSWORD=$OS_PASSWORD -ALT_TENANT_NAME=$OS_TENANT_NAME # TODO(jaypipes): Support configurable flavor refs here... FLAVOR_REF=1 FLAVOR_REF_ALT=2 -ADMIN_USERNAME=${ADMIN_USERNAME:-admin} -ADMIN_PASSWORD=${ADMIN_PASSWORD:-secrete} -ADMIN_TENANT_NAME=${ADMIN_TENANT:-admin} - # Do any of the following need to be configurable? COMPUTE_CATALOG_TYPE=compute COMPUTE_CREATE_IMAGE_ENABLED=True From 27e39fded215058b4a7424da6884721b07e06a88 Mon Sep 17 00:00:00 2001 From: Mark McLoughlin Date: Thu, 10 May 2012 07:12:36 +0100 Subject: [PATCH 512/967] Copy Nova's policy.json to /etc/nova Change-Id: I170e536331e617ea8984182d95616928f02a34a6 --- stack.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stack.sh b/stack.sh index 66c8e932..d8720d92 100755 --- a/stack.sh +++ b/stack.sh @@ -1141,6 +1141,8 @@ if [[ ! -d $NOVA_CONF_DIR ]]; then fi sudo chown `whoami` $NOVA_CONF_DIR +cp -p $NOVA_DIR/etc/nova/policy.json $NOVA_CONF_DIR + if is_service_enabled n-api; then # Use the sample http middleware configuration supplied in the # Nova sources. This paste config adds the configuration required From d7ed417fecf3c510165e9fee514868c34a4c5fb7 Mon Sep 17 00:00:00 2001 From: Renuka Apte Date: Thu, 10 May 2012 15:36:03 -0700 Subject: [PATCH 513/967] Add script to delete templates This is a common operation on XenServer, which seems to be a three step process. Change-Id: I125b9c11ace0b3454b7c974f9209aca01cdd7ed2 --- tools/xen/scripts/templatedelete.sh | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100755 tools/xen/scripts/templatedelete.sh diff --git a/tools/xen/scripts/templatedelete.sh b/tools/xen/scripts/templatedelete.sh new file mode 100755 index 00000000..66765b24 --- /dev/null +++ b/tools/xen/scripts/templatedelete.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +#Usage: ./templatedelete.sh + +templateuuid="$1" + +xe template-param-set other-config:default_template=false uuid="$templateuuid" +xe template-param-set is-a-template=false uuid="$templateuuid" +xe vm-destroy uuid="$templateuuid" From f49670691bc4b517427ad81cda09c7f01eb95449 Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Sat, 12 May 2012 18:40:13 -0700 Subject: [PATCH 514/967] Remove reference to QUANTUM_OVS_CONF_FILE before it is created. This was introduced by commit 962c25eb7421094c6fc1ef7edb56fe1da69a972b, but it completely breaks a clean install of devstack with OVS. Note: the author was probably attempting to get devstack working in a multi-node environment. There is another branch under submission for that already. Change-Id: Ie2cbcbd58e0d0babf9b0af0db9aa879703a2aeb9 --- stack.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/stack.sh b/stack.sh index c6f5d1e3..a379796d 100755 --- a/stack.sh +++ b/stack.sh @@ -1053,7 +1053,6 @@ if is_service_enabled q-svc; then if is_service_enabled mysql; then mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS ovs_quantum;' mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE IF NOT EXISTS ovs_quantum CHARACTER SET utf8;' - sudo sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/ovs_quantum?charset=utf8/g" $QUANTUM_OVS_CONFIG_FILE else echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin." exit 1 From 9a4478b8b20bbb4008c8a62d14574377fb2909fd Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 16 May 2012 11:07:52 -0500 Subject: [PATCH 515/967] Remove prettytable pin to 0.5 * The clients have all been fixed to work with prettytable 0.6 Change-Id: I968d73862e6d45d6974c43ea47ba84e8210ed787 --- files/pips/general | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/pips/general b/files/pips/general index f7403063..deb2d14c 100644 --- a/files/pips/general +++ b/files/pips/general @@ -1 +1 @@ -prettytable==0.5 +prettytable From 00edc95c6bd80927c727742ac044082f64b26bec Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 17 May 2012 11:10:36 -0500 Subject: [PATCH 516/967] Create new swift data store filesystem every time The Swift data store was not cleaned out between stack.sh runs; although the contents were not visible they were still taking up space. Create a new XFS filesystem on every stack.sh run. Fixes bug 1000827 Change-Id: Ieab6b5f65b7964906f244975cbcdf2cf50344ca5 --- stack.sh | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index 0ee2ab87..13cf62a8 100755 --- a/stack.sh +++ b/stack.sh @@ -1347,16 +1347,20 @@ if is_service_enabled swift; then sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_DIR} # We then create a loopback disk and format it to XFS. - # TODO: Reset disks on new pass. - if [[ ! -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then + if [[ -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then + if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then + sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 + fi + else mkdir -p ${SWIFT_DATA_DIR}/drives/images sudo touch ${SWIFT_DATA_DIR}/drives/images/swift.img sudo chown $USER: ${SWIFT_DATA_DIR}/drives/images/swift.img dd if=/dev/zero of=${SWIFT_DATA_DIR}/drives/images/swift.img \ bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE} - mkfs.xfs -f -i size=1024 ${SWIFT_DATA_DIR}/drives/images/swift.img fi + # Make a fresh XFS filesystem + mkfs.xfs -f -i size=1024 ${SWIFT_DATA_DIR}/drives/images/swift.img # After the drive being created we mount the disk with a few mount # options to make it most efficient as possible for swift. From 4a221459b72856a0593c6182334cbbcc8def5f6c Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Tue, 13 Mar 2012 13:44:12 -0400 Subject: [PATCH 517/967] Add support for using Qpid instead of RabbitMQ. To use Qpid instead of RabbitMQ, you set 'qpid' instead of 'rabbit' in ENABLED_SERVICES in your localrc file. Otherwise, RabbitMQ is still used by default. (dtroyer) fixed problem with service test that failed to configure rabbitmq if it was still selected. Change-Id: I8c62b588a461a068463821b2c079ffa4bfa1f804 --- files/apts/nova | 2 ++ files/rpms/nova | 2 ++ stack.sh | 35 +++++++++++++++++++++++++++-------- 3 files changed, 31 insertions(+), 8 deletions(-) diff --git a/files/apts/nova b/files/apts/nova index 8a3ca4cf..3ebf57c6 100644 --- a/files/apts/nova +++ b/files/apts/nova @@ -19,6 +19,7 @@ libvirt-bin # NOPRIME vlan curl rabbitmq-server # NOPRIME +qpidd # NOPRIME socat # used by ajaxterm python-mox python-paste @@ -42,3 +43,4 @@ python-boto python-kombu python-feedparser python-iso8601 +python-qpid # dist:precise diff --git a/files/rpms/nova b/files/rpms/nova index 1b1d47f7..bb920267 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -29,11 +29,13 @@ python-netaddr python-paramiko python-paste python-paste-deploy +python-qpid python-routes python-sqlalchemy python-suds python-tempita rabbitmq-server # NOPRIME +qpid-cpp-server # NOPRIME sqlite sudo vconfig diff --git a/stack.sh b/stack.sh index 0ee2ab87..df260279 100755 --- a/stack.sh +++ b/stack.sh @@ -94,6 +94,12 @@ if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|f16) ]]; then fi fi +if [ "${DISTRO}" = "oneiric" ] && is_service_enabled qpid ; then + # Qpid was introduced in precise + echo "You must use Ubuntu Precise or newer for Qpid support." + exit 1 +fi + # Set the paths of certain binaries if [[ "$os_PACKAGE" = "deb" ]]; then NOVA_ROOTWRAP=/usr/local/bin/nova-rootwrap @@ -381,8 +387,8 @@ FLAT_INTERFACE=${FLAT_INTERFACE:-$GUEST_INTERFACE_DEFAULT} # host. -# MySQL & RabbitMQ -# ---------------- +# MySQL & (RabbitMQ or Qpid) +# -------------------------- # We configure Nova, Horizon, Glance and Keystone to use MySQL as their # database server. While they share a single server, each has their own @@ -400,8 +406,10 @@ read_password MYSQL_PASSWORD "ENTER A PASSWORD TO USE FOR MYSQL." BASE_SQL_CONN=${BASE_SQL_CONN:-mysql://$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST} # Rabbit connection info -RABBIT_HOST=${RABBIT_HOST:-localhost} -read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT." +if is_service_enabled rabbit; then + RABBIT_HOST=${RABBIT_HOST:-localhost} + read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT." +fi # Glance connection info. Note the port must be specified. GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$SERVICE_HOST:9292} @@ -756,8 +764,8 @@ EOF fi -# Rabbit -# ------ +# Rabbit or Qpid +# -------------- if is_service_enabled rabbit; then # Install and start rabbitmq-server @@ -772,6 +780,13 @@ if is_service_enabled rabbit; then fi # change the rabbit password since the default is "guest" sudo rabbitmqctl change_password guest $RABBIT_PASSWORD +elif is_service_enabled qpid; then + if [[ "$os_PACKAGE" = "rpm" ]]; then + install_package qpid-cpp-server + restart_service qpidd + else + install_package qpidd + fi fi @@ -1653,8 +1668,12 @@ add_nova_opt "vncserver_proxyclient_address=$VNCSERVER_PROXYCLIENT_ADDRESS" add_nova_opt "api_paste_config=$NOVA_CONF_DIR/api-paste.ini" add_nova_opt "image_service=nova.image.glance.GlanceImageService" add_nova_opt "ec2_dmz_host=$EC2_DMZ_HOST" -add_nova_opt "rabbit_host=$RABBIT_HOST" -add_nova_opt "rabbit_password=$RABBIT_PASSWORD" +if is_service_enabled rabbit ; then + add_nova_opt "rabbit_host=$RABBIT_HOST" + add_nova_opt "rabbit_password=$RABBIT_PASSWORD" +elif is_service_enabled qpid ; then + add_nova_opt "rpc_backend=nova.rpc.impl_qpid" +fi add_nova_opt "glance_api_servers=$GLANCE_HOSTPORT" add_nova_opt "force_dhcp_release=True" if [ -n "$INSTANCES_PATH" ]; then From 1fad810cef247356a362448ec01a808b9b13a107 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Mon, 7 May 2012 15:18:27 -0700 Subject: [PATCH 518/967] Remove unused service_* from authtoken middleware service_port and service_host are not used by the current keystone authtoken Change-Id: Iff5d2dccdc69b0a05443500d45b144acfbbe3c79 --- AUTHORS | 1 + files/swift/proxy-server.conf | 2 -- stack.sh | 2 -- 3 files changed, 1 insertion(+), 4 deletions(-) diff --git a/AUTHORS b/AUTHORS index bca25b43..9d838e87 100644 --- a/AUTHORS +++ b/AUTHORS @@ -17,6 +17,7 @@ James E. Blair Jason Cannavale Jay Pipes Jesse Andrews +Joe Gordon Johannes Erdfelt Josh Kearney Justin Shepherd diff --git a/files/swift/proxy-server.conf b/files/swift/proxy-server.conf index ce5473b2..2fe20f57 100644 --- a/files/swift/proxy-server.conf +++ b/files/swift/proxy-server.conf @@ -23,8 +23,6 @@ operator_roles = Member,admin # username and password. [filter:s3token] paste.filter_factory = keystone.middleware.s3_token:filter_factory -service_port = %KEYSTONE_SERVICE_PORT% -service_host = %KEYSTONE_SERVICE_HOST% auth_port = %KEYSTONE_AUTH_PORT% auth_host = %KEYSTONE_AUTH_HOST% auth_protocol = %KEYSTONE_AUTH_PROTOCOL% diff --git a/stack.sh b/stack.sh index df260279..98ed59c0 100755 --- a/stack.sh +++ b/stack.sh @@ -1441,8 +1441,6 @@ if is_service_enabled swift; then s,%SERVICE_PASSWORD%,$SERVICE_PASSWORD,g; s,%KEYSTONE_SERVICE_PROTOCOL%,$KEYSTONE_SERVICE_PROTOCOL,g; s,%SERVICE_TOKEN%,${SERVICE_TOKEN},g; - s,%KEYSTONE_SERVICE_PORT%,${KEYSTONE_SERVICE_PORT},g; - s,%KEYSTONE_SERVICE_HOST%,${KEYSTONE_SERVICE_HOST},g; s,%KEYSTONE_API_PORT%,${KEYSTONE_API_PORT},g; s,%KEYSTONE_AUTH_HOST%,${KEYSTONE_AUTH_HOST},g; s,%KEYSTONE_AUTH_PORT%,${KEYSTONE_AUTH_PORT},g; From 0007f3a6151e2cfbe52760c689fde0fb3429ddce Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Fri, 18 May 2012 13:37:47 -0700 Subject: [PATCH 519/967] enable quantum multi-node scenarios. Let's you run with a centralized quantum service node running the plugin and quantum agents running on one or more hosts. Also: - make OVS plugin work on precise by configuring cgroup_device_acl - consolidate ovs + linux-bridge config to avoid duplicate code - support configuring tunnel-mode for OVS plugin - add additional build packages for OVS apt-get - remove backward compat support for old DB config for linux-bridge plugin Change-Id: Ifab268f739b004db13024633e8abeb17691b9e46 --- stack.sh | 205 +++++++++++++++++++++++++++---------------------------- 1 file changed, 102 insertions(+), 103 deletions(-) diff --git a/stack.sh b/stack.sh index df260279..fea92552 100755 --- a/stack.sh +++ b/stack.sh @@ -689,7 +689,7 @@ fi if is_service_enabled quantum; then git_clone $QUANTUM_CLIENT_REPO $QUANTUM_CLIENT_DIR $QUANTUM_CLIENT_BRANCH fi -if is_service_enabled q-svc; then +if is_service_enabled quantum; then # quantum git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH fi @@ -727,7 +727,7 @@ fi if is_service_enabled quantum; then cd $QUANTUM_CLIENT_DIR; sudo python setup.py develop fi -if is_service_enabled q-svc; then +if is_service_enabled quantum; then cd $QUANTUM_DIR; sudo python setup.py develop fi if is_service_enabled m-svc; then @@ -1027,128 +1027,111 @@ if is_service_enabled g-reg; then cp $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON fi -# Quantum +# Quantum (for controller or agent nodes) # ------- if is_service_enabled quantum; then # Put config files in /etc/quantum for everyone to find - QUANTUM_CONF_DIR=/etc/quantum - if [[ ! -d $QUANTUM_CONF_DIR ]]; then - sudo mkdir -p $QUANTUM_CONF_DIR + if [[ ! -d /etc/quantum ]]; then + sudo mkdir -p /etc/quantum + fi + sudo chown `whoami` /etc/quantum + + if [[ "$Q_PLUGIN" = "openvswitch" ]]; then + Q_PLUGIN_CONF_PATH=etc/quantum/plugins/openvswitch + Q_PLUGIN_CONF_FILENAME=ovs_quantum_plugin.ini + Q_DB_NAME="ovs_quantum" + Q_PLUGIN_CLASS="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin" + elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then + # Install deps + # FIXME add to files/apts/quantum, but don't install if not needed! + install_package python-configobj + Q_PLUGIN_CONF_PATH=etc/quantum/plugins/linuxbridge + Q_PLUGIN_CONF_FILENAME=linuxbridge_conf.ini + Q_DB_NAME="quantum_linux_bridge" + Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.LinuxBridgePlugin.LinuxBridgePlugin" + else + echo "Unknown Quantum plugin '$Q_PLUGIN'.. exiting" + exit 1 + fi + + # if needed, move config file from $QUANTUM_DIR/etc/quantum to /etc/quantum + mkdir -p /$Q_PLUGIN_CONF_PATH + Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME + if [[ -e $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE ]]; then + sudo mv $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE fi - sudo chown `whoami` $QUANTUM_CONF_DIR - - # Set default values when using Linux Bridge plugin - if [[ "$Q_PLUGIN" = "linuxbridge" ]]; then - # set the config file - QUANTUM_LB_CONF_DIR=$QUANTUM_CONF_DIR/plugins/linuxbridge - mkdir -p $QUANTUM_LB_CONF_DIR - QUANTUM_LB_CONFIG_FILE=$QUANTUM_LB_CONF_DIR/linuxbridge_conf.ini - # must remove this file from existing location, otherwise Quantum will prefer it - if [[ -e $QUANTUM_DIR/etc/quantum/plugins/linuxbridge/linuxbridge_conf.ini ]]; then - sudo mv $QUANTUM_DIR/etc/quantum/plugins/linuxbridge/linuxbridge_conf.ini $QUANTUM_LB_CONFIG_FILE + sudo sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/$Q_DB_NAME?charset=utf8/g" /$Q_PLUGIN_CONF_FILE + + OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-True} + if [[ "$Q_PLUGIN" = "openvswitch" && $OVS_ENABLE_TUNNELING = "True" ]]; then + OVS_VERSION=`ovs-vsctl --version | head -n 1 | awk '{print $4;}'` + if [ $OVS_VERSION \< "1.4" ] && ! is_service_enabled q-svc ; then + echo "You are running OVS version $OVS_VERSION." + echo "OVS 1.4+ is required for tunneling between multiple hosts." + exit 1 fi - #set the default network interface - QUANTUM_LB_PRIVATE_INTERFACE=${QUANTUM_LB_PRIVATE_INTERFACE:-$GUEST_INTERFACE_DEFAULT} + sudo sed -i -e "s/.*enable-tunneling = .*$/enable-tunneling = $OVS_ENABLE_TUNNELING/g" /$Q_PLUGIN_CONF_FILE fi fi -# Quantum service + +# Quantum service (for controller node) if is_service_enabled q-svc; then - QUANTUM_PLUGIN_INI_FILE=$QUANTUM_CONF_DIR/plugins.ini + Q_PLUGIN_INI_FILE=/etc/quantum/plugins.ini + Q_CONF_FILE=/etc/quantum/quantum.conf # must remove this file from existing location, otherwise Quantum will prefer it if [[ -e $QUANTUM_DIR/etc/plugins.ini ]]; then - sudo mv $QUANTUM_DIR/etc/plugins.ini $QUANTUM_PLUGIN_INI_FILE + sudo mv $QUANTUM_DIR/etc/plugins.ini $Q_PLUGIN_INI_FILE fi - if [[ "$Q_PLUGIN" = "openvswitch" ]]; then - # Install deps - # FIXME add to files/apts/quantum, but don't install if not needed! - if [[ "$os_PACKAGE" = "deb" ]]; then - kernel_version=`cat /proc/version | cut -d " " -f3` - install_package openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version - else - ### FIXME(dtroyer): Find RPMs for OpenVSwitch - echo "OpenVSwitch packages need to be located" - fi - - QUANTUM_OVS_CONF_DIR=$QUANTUM_CONF_DIR/plugins/openvswitch - QUANTUM_OVS_CONFIG_FILE=$QUANTUM_OVS_CONF_DIR/ovs_quantum_plugin.ini + if [[ -e $QUANTUM_DIR/etc/quantum.conf ]]; then + sudo mv $QUANTUM_DIR/etc/quantum.conf $Q_CONF_FILE + fi - # Create database for the plugin/agent - if is_service_enabled mysql; then - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS ovs_quantum;' - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE IF NOT EXISTS ovs_quantum CHARACTER SET utf8;' - else - echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin." - exit 1 - fi - # Make sure we're using the openvswitch plugin - sudo sed -i -e "s/^provider =.*$/provider = quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin/g" $QUANTUM_PLUGIN_INI_FILE - elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then - # Install deps - # FIXME add to files/apts/quantum, but don't install if not needed! - install_package python-configobj - # Create database for the plugin/agent - if is_service_enabled mysql; then - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS quantum_linux_bridge;' - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE IF NOT EXISTS quantum_linux_bridge;' - if grep -Fxq "user = " $QUANTUM_LB_CONFIG_FILE - then - sudo sed -i -e "s/^connection = sqlite$/#connection = sqlite/g" $QUANTUM_LB_CONFIG_FILE - sudo sed -i -e "s/^#connection = mysql$/connection = mysql/g" $QUANTUM_LB_CONFIG_FILE - sudo sed -i -e "s/^user = .*$/user = $MYSQL_USER/g" $QUANTUM_LB_CONFIG_FILE - sudo sed -i -e "s/^pass = .*$/pass = $MYSQL_PASSWORD/g" $QUANTUM_LB_CONFIG_FILE - sudo sed -i -e "s/^host = .*$/host = $MYSQL_HOST/g" $QUANTUM_LB_CONFIG_FILE - else - sudo sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/quantum_linux_bridge?charset=utf8/g" $QUANTUM_LB_CONFIG_FILE - fi + if is_service_enabled mysql; then + mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e "DROP DATABASE IF EXISTS $Q_DB_NAME;" + mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e "CREATE DATABASE IF NOT EXISTS $Q_DB_NAME CHARACTER SET utf8;" else echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin." exit 1 - fi - # Make sure we're using the linuxbridge plugin - sudo sed -i -e "s/^provider =.*$/provider = quantum.plugins.linuxbridge.LinuxBridgePlugin.LinuxBridgePlugin/g" $QUANTUM_PLUGIN_INI_FILE - fi - if [[ -e $QUANTUM_DIR/etc/quantum.conf ]]; then - sudo mv $QUANTUM_DIR/etc/quantum.conf $QUANTUM_CONF_DIR/quantum.conf fi - screen_it q-svc "cd $QUANTUM_DIR && PYTHONPATH=.:$QUANTUM_CLIENT_DIR:$PYTHONPATH python $QUANTUM_DIR/bin/quantum-server $QUANTUM_CONF_DIR/quantum.conf" + sudo sed -i -e "s/^provider =.*$/provider = $Q_PLUGIN_CLASS/g" $Q_PLUGIN_INI_FILE + + screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server $Q_CONF_FILE" fi # Quantum agent (for compute nodes) if is_service_enabled q-agt; then if [[ "$Q_PLUGIN" = "openvswitch" ]]; then + # Install deps + # FIXME add to files/apts/quantum, but don't install if not needed! + if [[ "$os_PACKAGE" = "deb" ]]; then + kernel_version=`cat /proc/version | cut -d " " -f3` + install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version + else + ### FIXME(dtroyer): Find RPMs for OpenVSwitch + echo "OpenVSwitch packages need to be located" + fi # Set up integration bridge OVS_BRIDGE=${OVS_BRIDGE:-br-int} + for PORT in `sudo ovs-vsctl --no-wait list-ports $OVS_BRIDGE`; do + if [[ "$PORT" =~ tap* ]]; then echo `sudo ip link delete $PORT` > /dev/null; fi + sudo ovs-vsctl --no-wait del-port $OVS_BRIDGE $PORT + done sudo ovs-vsctl --no-wait -- --if-exists del-br $OVS_BRIDGE sudo ovs-vsctl --no-wait add-br $OVS_BRIDGE sudo ovs-vsctl --no-wait br-set-external-id $OVS_BRIDGE bridge-id br-int - - # Start up the quantum <-> openvswitch agent - QUANTUM_OVS_CONF_DIR=$QUANTUM_CONF_DIR/plugins/openvswitch - mkdir -p $QUANTUM_OVS_CONF_DIR - QUANTUM_OVS_CONFIG_FILE=$QUANTUM_OVS_CONF_DIR/ovs_quantum_plugin.ini - if [[ -e $QUANTUM_DIR/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini ]]; then - sudo mv $QUANTUM_DIR/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini $QUANTUM_OVS_CONFIG_FILE - fi - sudo sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/ovs_quantum?charset=utf8/g" $QUANTUM_OVS_CONFIG_FILE - screen_it q-agt "sleep 4; sudo python $QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py $QUANTUM_OVS_CONFIG_FILE -v" + sudo sed -i -e "s/.*local-ip = .*/local-ip = $HOST_IP/g" /$Q_PLUGIN_CONF_FILE + AGENT_BINARY=$QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then # Start up the quantum <-> linuxbridge agent install_package bridge-utils - sudo sed -i -e "s/^physical_interface = .*$/physical_interface = $QUANTUM_LB_PRIVATE_INTERFACE/g" $QUANTUM_LB_CONFIG_FILE - if grep -Fxq "user = " $QUANTUM_LB_CONFIG_FILE - then - sudo sed -i -e "s/^connection = sqlite$/#connection = sqlite/g" $QUANTUM_LB_CONFIG_FILE - sudo sed -i -e "s/^#connection = mysql$/connection = mysql/g" $QUANTUM_LB_CONFIG_FILE - sudo sed -i -e "s/^user = .*$/user = $MYSQL_USER/g" $QUANTUM_LB_CONFIG_FILE - sudo sed -i -e "s/^pass = .*$/pass = $MYSQL_PASSWORD/g" $QUANTUM_LB_CONFIG_FILE - sudo sed -i -e "s/^host = .*$/host = $MYSQL_HOST/g" $QUANTUM_LB_CONFIG_FILE - else - sudo sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/quantum_linux_bridge?charset=utf8/g" $QUANTUM_LB_CONFIG_FILE - fi - - screen_it q-agt "sleep 4; sudo python $QUANTUM_DIR/quantum/plugins/linuxbridge/agent/linuxbridge_quantum_agent.py $QUANTUM_LB_CONFIG_FILE -v" + #set the default network interface + QUANTUM_LB_PRIVATE_INTERFACE=${QUANTUM_LB_PRIVATE_INTERFACE:-$GUEST_INTERFACE_DEFAULT} + sudo sed -i -e "s/^physical_interface = .*$/physical_interface = $QUANTUM_LB_PRIVATE_INTERFACE/g" /$Q_PLUGIN_CONF_FILE + AGENT_BINARY=$QUANTUM_DIR/quantum/plugins/linuxbridge/agent/linuxbridge_quantum_agent.py fi + # Start up the quantum agent + screen_it q-agt "sudo python $AGENT_BINARY /$Q_PLUGIN_CONF_FILE -v" fi # Melange service @@ -1278,6 +1261,21 @@ if is_service_enabled n-cpu; then fi fi + QEMU_CONF=/etc/libvirt/qemu.conf + if is_service_enabled quantum && [[ $Q_PLUGIN = "openvswitch" ]] && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF ; then + # add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces + sudo chmod 666 $QEMU_CONF + sudo cat <> /etc/libvirt/qemu.conf +cgroup_device_acl = [ + "/dev/null", "/dev/full", "/dev/zero", + "/dev/random", "/dev/urandom", + "/dev/ptmx", "/dev/kvm", "/dev/kqemu", + "/dev/rtc", "/dev/hpet","/dev/net/tun", +] +EOF + sudo chmod 644 $QEMU_CONF + fi + if [[ "$os_PACKAGE" = "deb" ]]; then LIBVIRT_DAEMON=libvirt-bin else @@ -1616,17 +1614,18 @@ if is_service_enabled quantum; then add_nova_opt "melange_host=$M_HOST" add_nova_opt "melange_port=$M_PORT" fi - if is_service_enabled q-svc && [[ "$Q_PLUGIN" = "openvswitch" ]]; then - add_nova_opt "libvirt_vif_type=ethernet" - add_nova_opt "libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtOpenVswitchDriver" - add_nova_opt "linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver" - add_nova_opt "quantum_use_dhcp=True" - elif is_service_enabled q-svc && [[ "$Q_PLUGIN" = "linuxbridge" ]]; then - add_nova_opt "libvirt_vif_type=ethernet" - add_nova_opt "libvirt_vif_driver=nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver" - add_nova_opt "linuxnet_interface_driver=nova.network.linux_net.QuantumLinuxBridgeInterfaceDriver" - add_nova_opt "quantum_use_dhcp=True" + + if [[ "$Q_PLUGIN" = "openvswitch" ]]; then + NOVA_VIF_DRIVER="nova.virt.libvirt.vif.LibvirtOpenVswitchDriver" + LINUXNET_VIF_DRIVER="nova.network.linux_net.LinuxOVSInterfaceDriver" + elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then + NOVA_VIF_DRIVER="nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver" + LINUXNET_VIF_DRIVER="nova.network.linux_net.QuantumLinuxBridgeInterfaceDriver" fi + add_nova_opt "libvirt_vif_type=ethernet" + add_nova_opt "libvirt_vif_driver=$NOVA_VIF_DRIVER" + add_nova_opt "linuxnet_interface_driver=$LINUXNET_VIF_DRIVER" + add_nova_opt "quantum_use_dhcp=True" else add_nova_opt "network_manager=nova.network.manager.$NET_MAN" fi From d0e55c859ab2482ea1d85f1c234238a153fe334e Mon Sep 17 00:00:00 2001 From: "James E. Blair" Date: Mon, 21 May 2012 09:04:27 -0700 Subject: [PATCH 520/967] Add "dist:precise" to qpidd. It is also marked NOPRIME, but some setups (devstack-gate) would like to cache, but not install, all .debs to avoid false negative tests due to network problems. This way it can be cached only on appropriate platforms. Change-Id: Ia4216e9e8e6e7f55bf5fe9bc683fee97976a9b04 --- files/apts/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/apts/nova b/files/apts/nova index 3ebf57c6..c16a7087 100644 --- a/files/apts/nova +++ b/files/apts/nova @@ -19,7 +19,7 @@ libvirt-bin # NOPRIME vlan curl rabbitmq-server # NOPRIME -qpidd # NOPRIME +qpidd # dist:precise NOPRIME socat # used by ajaxterm python-mox python-paste From daadf744edfae895da1d254f9d25c9dbe2ae6db0 Mon Sep 17 00:00:00 2001 From: John Garbutt Date: Fri, 27 Apr 2012 18:28:28 +0100 Subject: [PATCH 521/967] Improvements to DevStack's XenServer scripts I have ensured: - template gets re-used on second run - template includes XenServer tools, and custom user accounts - take snapshot before first boot, for easy re-run - make host_ip_iface work with either eth2 or eth3 - make ssh into domU checks looser - above is all ground work for improved jenkins tests - added some more comments to make it scripts clearer Change-Id: I5c45370bf8a1393d669480e196b13f592d29154f --- tools/xen/README.md | 6 + tools/xen/build_xva.sh | 70 +++--- tools/xen/install_domU_multi.sh | 40 ---- tools/xen/install_os_domU.sh | 306 ++++++++++++++++++------ tools/xen/prepare_guest.sh | 23 +- tools/xen/prepare_guest_template.sh | 57 +++++ tools/xen/scripts/install-os-vpx.sh | 7 +- tools/xen/scripts/manage-vdi | 42 +++- tools/xen/scripts/on_exit.sh | 24 ++ tools/xen/scripts/uninstall-os-vpx.sh | 63 ++--- tools/xen/scripts/xenoneirictemplate.sh | 14 +- tools/xen/xenrc | 9 +- 12 files changed, 453 insertions(+), 208 deletions(-) delete mode 100755 tools/xen/install_domU_multi.sh create mode 100755 tools/xen/prepare_guest_template.sh create mode 100755 tools/xen/scripts/on_exit.sh diff --git a/tools/xen/README.md b/tools/xen/README.md index d102b01d..f20ad04b 100644 --- a/tools/xen/README.md +++ b/tools/xen/README.md @@ -81,3 +81,9 @@ Step 5: Do cloudy stuff! * Play with horizon * Play with the CLI * Log bugs to devstack and core projects, and submit fixes! + +Step 6: Run from snapshot +------------------------- +If you want to quicky re-run devstack from a clean state, +using the same settings you used in your previous run, +you can revert the DomU to the snapshot called "before_first_boot" diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh index edc0db3b..18035048 100755 --- a/tools/xen/build_xva.sh +++ b/tools/xen/build_xva.sh @@ -1,46 +1,40 @@ #!/bin/bash -set -e - -declare -a on_exit_hooks - -on_exit() -{ - for i in $(seq $((${#on_exit_hooks[*]} - 1)) -1 0) - do - eval "${on_exit_hooks[$i]}" - done -} - -add_on_exit() -{ - local n=${#on_exit_hooks[*]} - on_exit_hooks[$n]="$*" - if [[ $n -eq 0 ]] - then - trap on_exit EXIT - fi -} - -# Abort if localrc is not set -if [ ! -e ../../localrc ]; then - echo "You must have a localrc with ALL necessary passwords defined before proceeding." - echo "See the xen README for required passwords." - exit 1 -fi +# This script is run by install_os_domU.sh +# +# It modifies the ubuntu image created by install_os_domU.sh +# and previously moodified by prepare_guest_template.sh +# +# This script is responsible for: +# - pushing in the DevStack code +# - creating run.sh, to run the code on boot +# It does this by mounting the disk image of the VM. +# +# The resultant image is then templated and started +# by install_os_domU.sh + +# Exit on errors +set -o errexit +# Echo commands +set -o xtrace # This directory TOP_DIR=$(cd $(dirname "$0") && pwd) +# Include onexit commands +. $TOP_DIR/scripts/on_exit.sh + # Source params - override xenrc params in your localrc to suite your taste source xenrc -# Echo commands -set -o xtrace - +# +# Parameters +# GUEST_NAME="$1" -# Directory where we stage the build +# +# Mount the VDI +# STAGING_DIR=$($TOP_DIR/scripts/manage-vdi open $GUEST_NAME 0 1 | grep -o "/tmp/tmp.[[:alnum:]]*") add_on_exit "$TOP_DIR/scripts/manage-vdi close $GUEST_NAME 0 1" @@ -76,7 +70,7 @@ cd $TOP_DIR cat <$STAGING_DIR/etc/rc.local # network restart required for getting the right gateway /etc/init.d/networking restart -GUEST_PASSWORD=$GUEST_PASSWORD STAGING_DIR=/ DO_TGZ=0 bash /opt/stack/devstack/tools/xen/prepare_guest.sh > /opt/stack/prepare_guest.log 2>&1 +chown -R stack /opt/stack su -c "/opt/stack/run.sh > /opt/stack/run.sh.log 2>&1" stack exit 0 EOF @@ -85,8 +79,12 @@ EOF echo $GUEST_NAME > $STAGING_DIR/etc/hostname # Hostname must resolve for rabbit +HOSTS_FILE_IP=$PUB_IP +if [ $MGT_IP != "dhcp" ]; then + HOSTS_FILE_IP=$MGT_IP +fi cat <$STAGING_DIR/etc/hosts -$MGT_IP $GUEST_NAME +$HOSTS_FILE_IP $GUEST_NAME 127.0.0.1 localhost localhost.localdomain EOF @@ -142,8 +140,6 @@ cat <$STAGING_DIR/opt/stack/run.sh #!/bin/bash cd /opt/stack/devstack killall screen -UPLOAD_LEGACY_TTY=yes HOST_IP=$PUB_IP VIRT_DRIVER=xenserver FORCE=yes MULTI_HOST=$MULTI_HOST HOST_IP_IFACE=$HOST_IP_IFACE $STACKSH_PARAMS ./stack.sh +VIRT_DRIVER=xenserver FORCE=yes MULTI_HOST=$MULTI_HOST HOST_IP_IFACE=$HOST_IP_IFACE $STACKSH_PARAMS ./stack.sh EOF chmod 755 $STAGING_DIR/opt/stack/run.sh - -echo "Done" diff --git a/tools/xen/install_domU_multi.sh b/tools/xen/install_domU_multi.sh deleted file mode 100755 index 91129c57..00000000 --- a/tools/xen/install_domU_multi.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env bash - -# Echo commands -set -o xtrace - -# Head node host, which runs glance, api, keystone -HEAD_PUB_IP=${HEAD_PUB_IP:-192.168.1.57} -HEAD_MGT_IP=${HEAD_MGT_IP:-172.16.100.57} - -COMPUTE_PUB_IP=${COMPUTE_PUB_IP:-192.168.1.58} -COMPUTE_MGT_IP=${COMPUTE_MGT_IP:-172.16.100.58} - -# Networking params -FLOATING_RANGE=${FLOATING_RANGE:-192.168.1.196/30} - -# Variables common amongst all hosts in the cluster -COMMON_VARS="$STACKSH_PARAMS MYSQL_HOST=$HEAD_MGT_IP RABBIT_HOST=$HEAD_MGT_IP GLANCE_HOSTPORT=$HEAD_MGT_IP:9292 FLOATING_RANGE=$FLOATING_RANGE" - -# Helper to launch containers -function install_domU { - GUEST_NAME=$1 PUB_IP=$2 MGT_IP=$3 DO_SHUTDOWN=$4 TERMINATE=$TERMINATE STACKSH_PARAMS="$COMMON_VARS $5" ./build_domU.sh -} - -# Launch the head node - headnode uses a non-ip domain name, -# because rabbit won't launch with an ip addr hostname :( -install_domU HEADNODE $HEAD_PUB_IP $HEAD_MGT_IP 1 "ENABLED_SERVICES=g-api,g-reg,key,n-api,n-sch,n-vnc,horizon,mysql,rabbit" - -if [ $HEAD_PUB_IP == "dhcp" ] -then - guestnet=$(xe vm-list --minimal name-label=HEADNODE params=networks) - HEAD_PUB_IP=$(echo $guestnet | grep -w -o --only-matching "3/ip: [0-9,.]*;" | cut -d ':' -f2 | cut -d ';' -f 1) -fi -# Wait till the head node is up -while ! curl -L http://$HEAD_PUB_IP | grep -q username; do - echo "Waiting for head node ($HEAD_PUB_IP) to start..." - sleep 5 -done - -# Build the HA compute host -install_domU COMPUTENODE $COMPUTE_PUB_IP $COMPUTE_MGT_IP 0 "ENABLED_SERVICES=n-cpu,n-net,n-api" diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 088748f5..352f63ac 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -1,7 +1,16 @@ #!/bin/bash +# This script is a level script +# It must be run on a XenServer or XCP machine +# +# It creates a DomU VM that runs OpenStack services +# +# For more details see: README.md + # Exit on errors set -o errexit +# Echo commands +set -o xtrace # Abort if localrc is not set if [ ! -e ../../localrc ]; then @@ -16,12 +25,17 @@ TOP_DIR=$(cd $(dirname "$0") && pwd) # Source lower level functions . $TOP_DIR/../../functions +# Include onexit commands +. $TOP_DIR/scripts/on_exit.sh + + +# +# Get Settings +# + # Source params - override xenrc params in your localrc to suit your taste source xenrc -# Echo commands -set -o xtrace - xe_min() { local cmd="$1" @@ -29,22 +43,38 @@ xe_min() xe "$cmd" --minimal "$@" } + +# +# Prepare Dom0 +# including installing XenAPI plugins +# + cd $TOP_DIR if [ -f ./master ] then rm -rf ./master rm -rf ./nova fi + +# get nova wget https://github.com/openstack/nova/zipball/master --no-check-certificate unzip -o master -d ./nova -cp -pr ./nova/*/plugins/xenserver/xenapi/etc/xapi.d /etc/ -chmod a+x /etc/xapi.d/plugins/* + +# install xapi plugins +XAPI_PLUGIN_DIR=/etc/xapi.d/plugins/ +if [ ! -d $XAPI_PLUGIN_DIR ]; then + # the following is needed when using xcp-xapi + XAPI_PLUGIN_DIR=/usr/lib/xcp/plugins/ +fi +cp -pr ./nova/*/plugins/xenserver/xenapi/etc/xapi.d/plugins/* $XAPI_PLUGIN_DIR +chmod a+x ${XAPI_PLUGIN_DIR}* mkdir -p /boot/guest -GUEST_NAME=${GUEST_NAME:-"DevStackOSDomU"} -SNAME="ubuntusnapshot" -TNAME="ubuntuready" + +# +# Configure Networking +# # Helper to create networks # Uses echo trickery to return network uuid @@ -84,7 +114,7 @@ function errorcheck() { fi } -# Create host, vm, mgmt, pub networks +# Create host, vm, mgmt, pub networks on XenServer VM_NET=$(create_network "$VM_BR" "$VM_DEV" "$VM_VLAN" "vmbr") errorcheck MGT_NET=$(create_network "$MGT_BR" "$MGT_DEV" "$MGT_VLAN" "mgtbr") @@ -123,28 +153,48 @@ create_vlan $PUB_DEV $PUB_VLAN $PUB_NET create_vlan $VM_DEV $VM_VLAN $VM_NET create_vlan $MGT_DEV $MGT_VLAN $MGT_NET -# dom0 ip +# Get final bridge names +if [ -z $VM_BR ]; then + VM_BR=$(xe_min network-list uuid=$VM_NET params=bridge) +fi +if [ -z $MGT_BR ]; then + MGT_BR=$(xe_min network-list uuid=$MGT_NET params=bridge) +fi +if [ -z $PUB_BR ]; then + PUB_BR=$(xe_min network-list uuid=$PUB_NET params=bridge) +fi + +# dom0 ip, XenAPI is assumed to be listening HOST_IP=${HOST_IP:-`ifconfig xenbr0 | grep "inet addr" | cut -d ":" -f2 | sed "s/ .*//"`} -# Set up ip forwarding -if ! grep -q "FORWARD_IPV4=YES" /etc/sysconfig/network; then - # FIXME: This doesn't work on reboot! - echo "FORWARD_IPV4=YES" >> /etc/sysconfig/network +# Set up ip forwarding, but skip on xcp-xapi +if [ -a /etc/sysconfig/network]; then + if ! grep -q "FORWARD_IPV4=YES" /etc/sysconfig/network; then + # FIXME: This doesn't work on reboot! + echo "FORWARD_IPV4=YES" >> /etc/sysconfig/network + fi fi - # Also, enable ip forwarding in rc.local, since the above trick isn't working if ! grep -q "echo 1 >/proc/sys/net/ipv4/ip_forward" /etc/rc.local; then echo "echo 1 >/proc/sys/net/ipv4/ip_forward" >> /etc/rc.local fi - # Enable ip forwarding at runtime as well echo 1 > /proc/sys/net/ipv4/ip_forward + +# # Shutdown previous runs +# + DO_SHUTDOWN=${DO_SHUTDOWN:-1} +CLEAN_TEMPLATES=${CLEAN_TEMPLATES:-false} if [ "$DO_SHUTDOWN" = "1" ]; then # Shutdown all domU's that created previously - xe_min vm-list name-label="$GUEST_NAME" | xargs ./scripts/uninstall-os-vpx.sh + clean_templates_arg="" + if $CLEAN_TEMPLATES; then + clean_templates_arg="--remove-templates" + fi + ./scripts/uninstall-os-vpx.sh $clean_templates_arg # Destroy any instances that were launched for uuid in `xe vm-list | grep -1 instance | grep uuid | sed "s/.*\: //g"`; do @@ -160,34 +210,18 @@ if [ "$DO_SHUTDOWN" = "1" ]; then done fi -# Start guest -if [ -z $VM_BR ]; then - VM_BR=$(xe_min network-list uuid=$VM_NET params=bridge) -fi -if [ -z $MGT_BR ]; then - MGT_BR=$(xe_min network-list uuid=$MGT_NET params=bridge) -fi -if [ -z $PUB_BR ]; then - PUB_BR=$(xe_min network-list uuid=$PUB_NET params=bridge) -fi -templateuuid=$(xe template-list name-label="$TNAME") -if [ -n "$templateuuid" ] -then - vm_uuid=$(xe vm-install template="$TNAME" new-name-label="$GUEST_NAME") -else - template=$(xe_min template-list name-label="Ubuntu 11.10 (64-bit)") - if [ -z "$template" ] - then - cp $TOP_DIR/devstackubuntupreseed.cfg /opt/xensource/www/ - $TOP_DIR/scripts/xenoneirictemplate.sh "${HOST_IP}/devstackubuntupreseed.cfg" - MIRROR=${MIRROR:-archive.ubuntu.com} - sed -e "s,d-i mirror/http/hostname string .*,d-i mirror/http/hostname string $MIRROR," \ - -i /opt/xensource/www/devstackubuntupreseed.cfg - fi - $TOP_DIR/scripts/install-os-vpx.sh -t "Ubuntu 11.10 (64-bit)" -v $VM_BR -m $MGT_BR -p $PUB_BR -l $GUEST_NAME -r $OSDOMU_MEM_MB -k "flat_network_bridge=${VM_BR}" +# +# Create Ubuntu VM template +# and/or create VM from template +# - # Wait for install to finish +GUEST_NAME=${GUEST_NAME:-"DevStackOSDomU"} +TNAME="devstack_template_folsom_11.10" +SNAME_PREPARED="template_prepared" +SNAME_FIRST_BOOT="before_first_boot" + +function wait_for_VM_to_halt() { while true do state=$(xe_min vm-list name-label="$GUEST_NAME" power-state=halted) @@ -196,72 +230,199 @@ else break else echo "Waiting for "$GUEST_NAME" to finish installation..." - sleep 30 + sleep 20 fi done +} + +templateuuid=$(xe template-list name-label="$TNAME") +if [ -z "$templateuuid" ]; then + # + # Install Ubuntu over network + # + + # try to find ubuntu template + ubuntu_template_name="Ubuntu 11.10 for DevStack (64-bit)" + ubuntu_template=$(xe_min template-list name-label="$ubuntu_template_name") + + # remove template, if we are in CLEAN_TEMPLATE mode + if [ -n "$ubuntu_template" ]; then + if $CLEAN_TEMPLATES; then + xe template-param-clear param-name=other-config uuid=$ubuntu_template + xe template-uninstall template-uuid=$ubuntu_template force=true + ubuntu_template="" + fi + fi + + # always update the preseed file, incase we have a newer one + PRESEED_URL=${PRESEED_URL:-""} + if [ -z "$PRESEED_URL" ]; then + PRESEED_URL="${HOST_IP}/devstackubuntupreseed.cfg" + HTTP_SERVER_LOCATION="/opt/xensource/www" + if [ ! -e $HTTP_SERVER_LOCATION ]; then + HTTP_SERVER_LOCATION="/var/www/html" + mkdir -p $HTTP_SERVER_LOCATION + fi + cp -f $TOP_DIR/devstackubuntupreseed.cfg $HTTP_SERVER_LOCATION + MIRROR=${MIRROR:-""} + if [ -n "$MIRROR" ]; then + sed -e "s,d-i mirror/http/hostname string .*,d-i mirror/http/hostname string $MIRROR," \ + -i "${HTTP_SERVER_LOCATION}/devstackubuntupreseed.cfg" + fi + fi + + if [ -z "$ubuntu_template" ]; then + $TOP_DIR/scripts/xenoneirictemplate.sh $PRESEED_URL + fi + # create a new VM with the given template + # creating the correct VIFs and metadata + $TOP_DIR/scripts/install-os-vpx.sh -t "$ubuntu_template_name" -v $VM_BR -m $MGT_BR -p $PUB_BR -l $GUEST_NAME -r $OSDOMU_MEM_MB -k "flat_network_bridge=${VM_BR}" + + # wait for install to finish + wait_for_VM_to_halt + + # set VM to restart after a reboot vm_uuid=$(xe_min vm-list name-label="$GUEST_NAME") xe vm-param-set actions-after-reboot=Restart uuid="$vm_uuid" + # + # Prepare VM for DevStack + # + + # Install XenServer tools, and other such things + $TOP_DIR/prepare_guest_template.sh "$GUEST_NAME" + + # start the VM to run the prepare steps + xe vm-start vm="$GUEST_NAME" + + # Wait for prep script to finish and shutdown system + wait_for_VM_to_halt + # Make template from VM - snuuid=$(xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME") - template_uuid=$(xe snapshot-clone uuid=$snuuid new-name-label="$TNAME") + snuuid=$(xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME_PREPARED") + xe snapshot-clone uuid=$snuuid new-name-label="$TNAME" +else + # + # Template already installed, create VM from template + # + vm_uuid=$(xe vm-install template="$TNAME" new-name-label="$GUEST_NAME") fi + +# +# Inject DevStack inside VM disk +# $TOP_DIR/build_xva.sh "$GUEST_NAME" +# create a snapshot before the first boot +# to allow a quick re-run with the same settings +xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME_FIRST_BOOT" + + +# +# Run DevStack VM +# xe vm-start vm="$GUEST_NAME" -if [ $PUB_IP == "dhcp" ]; then - PUB_IP=$(xe_min vm-list name-label=$GUEST_NAME params=networks | sed -ne 's,^.*3/ip: \([0-9.]*\).*$,\1,p') + +# +# Find IP and optionally wait for stack.sh to complete +# + +function find_ip_by_name() { + local guest_name="$1" + local interface="$2" + local period=10 + max_tries=10 + i=0 + while true + do + if [ $i -ge $max_tries ]; then + echo "Timed out waiting for devstack ip address" + exit 11 + fi + + devstackip=$(xe vm-list --minimal \ + name-label=$guest_name \ + params=networks | sed -ne "s,^.*${interface}/ip: \([0-9.]*\).*\$,\1,p") + if [ -z "$devstackip" ] + then + sleep $period + ((i++)) + else + echo $devstackip + break + fi + done +} + +function ssh_no_check() { + ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "$@" +} + +# Note the XenServer needs to be on the chosen +# network, so XenServer can access Glance API +if [ $HOST_IP_IFACE == "eth2" ]; then + DOMU_IP=$MGT_IP + if [ $MGT_IP == "dhcp" ]; then + DOMU_IP=$(find_ip_by_name $GUEST_NAME 2) + fi +else + DOMU_IP=$PUB_IP + if [ $PUB_IP == "dhcp" ]; then + DOMU_IP=$(find_ip_by_name $GUEST_NAME 3) + fi fi # If we have copied our ssh credentials, use ssh to monitor while the installation runs WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1} +COPYENV=${COPYENV:-1} if [ "$WAIT_TILL_LAUNCH" = "1" ] && [ -e ~/.ssh/id_rsa.pub ] && [ "$COPYENV" = "1" ]; then - # Done creating the container, let's tail the log - echo - echo "=============================================================" - echo " -- YAY! --" - echo "=============================================================" - echo echo "We're done launching the vm, about to start tailing the" echo "stack.sh log. It will take a second or two to start." echo echo "Just CTRL-C at any time to stop tailing." - set +o xtrace - - while ! ssh -q stack@$PUB_IP "[ -e run.sh.log ]"; do - sleep 1 + # wait for log to appear + while ! ssh_no_check -q stack@$DOMU_IP "[ -e run.sh.log ]"; do + sleep 10 done - ssh stack@$PUB_IP 'tail -f run.sh.log' & - + # output the run.sh.log + ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no stack@$DOMU_IP 'tail -f run.sh.log' & TAIL_PID=$! function kill_tail() { - kill $TAIL_PID + kill -9 $TAIL_PID exit 1 } - # Let Ctrl-c kill tail and exit trap kill_tail SIGINT - echo "Waiting stack.sh to finish..." - while ! ssh -q stack@$PUB_IP "grep -q 'stack.sh completed in' run.sh.log"; do - sleep 1 + # ensure we kill off the tail if we exit the script early + # for other reasons + add_on_exit "kill -9 $TAIL_PID || true" + + # wait silently until stack.sh has finished + set +o xtrace + while ! ssh_no_check -q stack@$DOMU_IP "tail run.sh.log | grep -q 'stack.sh completed in'"; do + sleep 10 done + set -o xtrace - kill $TAIL_PID + # kill the tail process now stack.sh has finished + kill -9 $TAIL_PID - if ssh -q stack@$PUB_IP "grep -q 'stack.sh failed' run.sh.log"; then + # check for a failure + if ssh_no_check -q stack@$DOMU_IP "grep -q 'stack.sh failed' run.sh.log"; then exit 1 fi + echo "################################################################################" echo "" - echo "Finished - Zip-a-dee Doo-dah!" - echo "You can then visit the OpenStack Dashboard" - echo "at http://$PUB_IP, and contact other services at the usual ports." + echo "All Finished!" + echo "You can visit the OpenStack Dashboard" + echo "at http://$DOMU_IP, and contact other services at the usual ports." else echo "################################################################################" echo "" @@ -269,10 +430,9 @@ else echo "Now, you can monitor the progress of the stack.sh installation by " echo "tailing /opt/stack/run.sh.log from within your domU." echo "" - echo "ssh into your domU now: 'ssh stack@$PUB_IP' using your password" + echo "ssh into your domU now: 'ssh stack@$DOMU_IP' using your password" echo "and then do: 'tail -f /opt/stack/run.sh.log'" echo "" echo "When the script completes, you can then visit the OpenStack Dashboard" - echo "at http://$PUB_IP, and contact other services at the usual ports." - + echo "at http://$DOMU_IP, and contact other services at the usual ports." fi diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh index 74efaff9..89a01694 100755 --- a/tools/xen/prepare_guest.sh +++ b/tools/xen/prepare_guest.sh @@ -1,6 +1,18 @@ #!/bin/bash +# This script is run on an Ubuntu VM. +# This script is inserted into the VM by prepare_guest_template.sh +# and is run when that VM boots. +# It customizes a fresh Ubuntu install, so it is ready +# to run stack.sh +# +# This includes installing the XenServer tools, +# creating the user called "stack", +# and shuts down the VM to signal the script has completed + set -x +# Echo commands +set -o xtrace # Configurable nuggets GUEST_PASSWORD=${GUEST_PASSWORD:-secrete} @@ -13,7 +25,7 @@ chroot $STAGING_DIR apt-get install -y cracklib-runtime curl wget ssh openssh-se chroot $STAGING_DIR apt-get install -y curl wget ssh openssh-server python-pip git vim-nox sudo chroot $STAGING_DIR pip install xenapi -# Install guest utilities +# Install XenServer guest utilities XEGUEST=xe-guest-utilities_5.6.100-651_amd64.deb wget http://images.ansolabs.com/xen/$XEGUEST -O $XEGUEST cp $XEGUEST $STAGING_DIR/root @@ -68,3 +80,12 @@ if [ "$DO_TGZ" = "1" ]; then rm -f stage.tgz tar cfz stage.tgz stage fi + +# remove self from local.rc +# so this script is not run again +rm -rf /etc/rc.local +mv /etc/rc.local.preparebackup /etc/rc.local +cp $STAGING_DIR/etc/rc.local $STAGING_DIR/etc/rc.local.backup + +# shutdown to notify we are done +shutdown -h now diff --git a/tools/xen/prepare_guest_template.sh b/tools/xen/prepare_guest_template.sh new file mode 100755 index 00000000..7c6dec4f --- /dev/null +++ b/tools/xen/prepare_guest_template.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +# This script is run by install_os_domU.sh +# +# Parameters: +# - $GUEST_NAME - hostname for the DomU VM +# +# It modifies the ubuntu image created by install_os_domU.sh +# +# This script is responsible for cusomtizing the fresh ubuntu +# image so on boot it runs the prepare_guest.sh script +# that modifies the VM so it is ready to run stack.sh. +# It does this by mounting the disk image of the VM. +# +# The resultant image is started by install_os_domU.sh, +# and once the VM has shutdown, build_xva.sh is run + +# Exit on errors +set -o errexit +# Echo commands +set -o xtrace + +# This directory +TOP_DIR=$(cd $(dirname "$0") && pwd) + +# Include onexit commands +. $TOP_DIR/scripts/on_exit.sh + +# Source params - override xenrc params in your localrc to suite your taste +source xenrc + +# +# Parameters +# +GUEST_NAME="$1" + +# Mount the VDI +STAGING_DIR=$($TOP_DIR/scripts/manage-vdi open $GUEST_NAME 0 1 | grep -o "/tmp/tmp.[[:alnum:]]*") +add_on_exit "$TOP_DIR/scripts/manage-vdi close $GUEST_NAME 0 1" + +# Make sure we have a stage +if [ ! -d $STAGING_DIR/etc ]; then + echo "Stage is not properly set up!" + exit 1 +fi + +# Copy prepare_guest.sh to VM +mkdir -p $STAGING_DIR/opt/stack/ +cp $TOP_DIR/prepare_guest.sh $STAGING_DIR/opt/stack/prepare_guest.sh + +# backup rc.local +cp $STAGING_DIR/etc/rc.local $STAGING_DIR/etc/rc.local.preparebackup + +# run prepare_guest.sh on boot +cat <$STAGING_DIR/etc/rc.local +GUEST_PASSWORD=$GUEST_PASSWORD STAGING_DIR=/ DO_TGZ=0 bash /opt/stack/prepare_guest.sh > /opt/stack/prepare_guest.log 2>&1 +EOF diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh index fe5e8107..7f2f3e62 100755 --- a/tools/xen/scripts/install-os-vpx.sh +++ b/tools/xen/scripts/install-os-vpx.sh @@ -19,7 +19,12 @@ set -eux -. /etc/xensource-inventory +if [ -a /etc/xensource-inventory] +then + . /etc/xensource-inventory +else + . /etc/xcp/inventory +fi NAME="XenServer OpenStack VPX" DATA_VDI_SIZE="500MiB" diff --git a/tools/xen/scripts/manage-vdi b/tools/xen/scripts/manage-vdi index a0a27e8a..7f12ebc1 100755 --- a/tools/xen/scripts/manage-vdi +++ b/tools/xen/scripts/manage-vdi @@ -20,6 +20,26 @@ vdi_uuid=$(xe_min vbd-list params=vdi-uuid vm-uuid="$vm_uuid" \ dom0_uuid=$(xe_min vm-list is-control-domain=true) +get_mount_device() +{ + vbd_uuid=$1 + + dev=$(xe_min vbd-list params=device uuid="$vbd_uuid") + if [[ "$dev" =~ "sm/" ]]; then + DEBIAN_FRONTEND=noninteractive \ + apt-get --option "Dpkg::Options::=--force-confold" --assume-yes \ + install kpartx || true &> /dev/null + mapping=$(kpartx -av "/dev/$dev" | sed -ne 's,^add map \([a-f0-9\-]*\).*$,\1,p' | sed -ne "s,^\(.*${part}\)\$,\1,p") + if [ -z "$mapping" ]; then + echo "Failed to find mapping" + exit -1 + fi + echo "mapper/${mapping}" + else + echo "/dev/$dev$part" + fi +} + open_vdi() { vbd_uuid=$(xe vbd-create vm-uuid="$dom0_uuid" vdi-uuid="$vdi_uuid" \ @@ -27,26 +47,30 @@ open_vdi() mp=$(mktemp -d) xe vbd-plug uuid="$vbd_uuid" - udevsettle - dev=$(xe_min vbd-list params=device uuid="$vbd_uuid") - mount "/dev/$dev$part" "$mp" + which_udev=$(which udevsettle) || true + if [ -n "$which_udev" ]; then + udevsettle + else + udevadm settle + fi + + mount_device=$(get_mount_device "$vbd_uuid") + mount "$mount_device" "$mp" echo "Your vdi is mounted at $mp" } close_vdi() { vbd_uuid=$(xe_min vbd-list vm-uuid="$dom0_uuid" vdi-uuid="$vdi_uuid") - dev=$(xe_min vbd-list params=device uuid="$vbd_uuid") - umount "/dev/$dev$part" + mount_device=$(get_mount_device "$vbd_uuid") + umount "$mount_device" xe vbd-unplug uuid=$vbd_uuid xe vbd-destroy uuid=$vbd_uuid } -if [ "$action" == "open" ] -then +if [ "$action" == "open" ]; then open_vdi -elif [ "$action" == "close" ] -then +elif [ "$action" == "close" ]; then close_vdi fi diff --git a/tools/xen/scripts/on_exit.sh b/tools/xen/scripts/on_exit.sh new file mode 100755 index 00000000..a4db39c2 --- /dev/null +++ b/tools/xen/scripts/on_exit.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +set -e +set -o xtrace + +declare -a on_exit_hooks + +on_exit() +{ + for i in $(seq $((${#on_exit_hooks[*]} - 1)) -1 0) + do + eval "${on_exit_hooks[$i]}" + done +} + +add_on_exit() +{ + local n=${#on_exit_hooks[*]} + on_exit_hooks[$n]="$*" + if [[ $n -eq 0 ]] + then + trap on_exit EXIT + fi +} diff --git a/tools/xen/scripts/uninstall-os-vpx.sh b/tools/xen/scripts/uninstall-os-vpx.sh index a82f3a05..0feaec79 100755 --- a/tools/xen/scripts/uninstall-os-vpx.sh +++ b/tools/xen/scripts/uninstall-os-vpx.sh @@ -17,19 +17,19 @@ # under the License. # -remove_data= -if [ "$1" = "--remove-data" ] -then - remove_data=1 -fi +set -ex -set -eu +# By default, don't remove the templates +REMOVE_TEMPLATES=${REMOVE_TEMPLATES:-"false"} +if [ "$1" = "--remove-templates" ]; then + REMOVE_TEMPLATES=true +fi xe_min() { local cmd="$1" shift - /opt/xensource/bin/xe "$cmd" --minimal "$@" + xe "$cmd" --minimal "$@" } destroy_vdi() @@ -39,11 +39,8 @@ destroy_vdi() local dev=$(xe_min vbd-list uuid=$vbd_uuid params=userdevice) local vdi_uuid=$(xe_min vbd-list uuid=$vbd_uuid params=vdi-uuid) - if [ "$type" = 'Disk' ] && [ "$dev" != 'xvda' ] && [ "$dev" != '0' ] - then - echo -n "Destroying data disk... " + if [ "$type" == 'Disk' ] && [ "$dev" != 'xvda' ] && [ "$dev" != '0' ]; then xe vdi-destroy uuid=$vdi_uuid - echo "done." fi } @@ -52,50 +49,36 @@ uninstall() local vm_uuid="$1" local power_state=$(xe_min vm-list uuid=$vm_uuid params=power-state) - if [ "$power_state" != "halted" ] - then - echo -n "Shutting down VM... " + if [ "$power_state" != "halted" ]; then xe vm-shutdown vm=$vm_uuid force=true - echo "done." fi - if [ "$remove_data" = "1" ] - then - for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g') - do - destroy_vdi "$v" - done - fi + for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g'); do + destroy_vdi "$v" + done - echo -n "Deleting VM... " xe vm-uninstall vm=$vm_uuid force=true >/dev/null - echo "done." } uninstall_template() { local vm_uuid="$1" - if [ "$remove_data" = "1" ] - then - for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g') - do - destroy_vdi "$v" - done - fi + for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g'); do + destroy_vdi "$v" + done - echo -n "Deleting template... " xe template-uninstall template-uuid=$vm_uuid force=true >/dev/null - echo "done." } - -for u in $(xe_min vm-list other-config:os-vpx=true | sed -e 's/,/ /g') -do +# remove the VMs and their disks +for u in $(xe_min vm-list other-config:os-vpx=true | sed -e 's/,/ /g'); do uninstall "$u" done -for u in $(xe_min template-list other-config:os-vpx=true | sed -e 's/,/ /g') -do - uninstall_template "$u" -done +# remove the templates +if [ "$REMOVE_TEMPLATES" == "true" ]; then + for u in $(xe_min template-list other-config:os-vpx=true | sed -e 's/,/ /g'); do + uninstall_template "$u" + done +fi diff --git a/tools/xen/scripts/xenoneirictemplate.sh b/tools/xen/scripts/xenoneirictemplate.sh index 1d9ec5e2..c5ae990b 100755 --- a/tools/xen/scripts/xenoneirictemplate.sh +++ b/tools/xen/scripts/xenoneirictemplate.sh @@ -3,7 +3,7 @@ ## on Xenserver 6.0.2 Net install only ## Original Author: David Markey ## Author: Renuka Apte -## This is not an officially supported guest OS on XenServer 6.02 +## This is not an officially supported guest OS on XenServer 6.0.2 BASE_DIR=$(cd $(dirname "$0") && pwd) source $BASE_DIR/../../../localrc @@ -15,11 +15,15 @@ if [[ -z $LENNY ]] ; then exit 1 fi -distro="Ubuntu 11.10" +distro="Ubuntu 11.10 for DevStack" arches=("32-bit" "64-bit") preseedurl=${1:-"http://images.ansolabs.com/devstackubuntupreseed.cfg"} +NETINSTALL_LOCALE=${NETINSTALL_LOCALE:-en_US} +NETINSTALL_KEYBOARD=${NETINSTALL_KEYBOARD:-us} +NETINSTALL_IFACE=${NETINSTALL_IFACE:-eth3} + for arch in ${arches[@]} ; do echo "Attempting $distro ($arch)" if [[ -n $(xe template-list name-label="$distro ($arch)" params=uuid --minimal) ]] ; then @@ -30,7 +34,11 @@ for arch in ${arches[@]} ; do echo "NETINSTALLIP not set in localrc" exit 1 fi - pvargs="-- quiet console=hvc0 partman/default_filesystem=ext3 locale=en_US console-setup/ask_detect=false keyboard-configuration/layoutcode=us netcfg/choose_interface=eth3 netcfg/get_hostname=os netcfg/get_domain=os auto url=${preseedurl}" + # Some of these settings can be found in example preseed files + # however these need to be answered before the netinstall + # is ready to fetch the preseed file, and as such must be here + # to get a fully automated install + pvargs="-- quiet console=hvc0 partman/default_filesystem=ext3 locale=${NETINSTALL_LOCALE} console-setup/ask_detect=false keyboard-configuration/layoutcode=${NETINSTALL_KEYBOARD} netcfg/choose_interface=${NETINSTALL_IFACE} netcfg/get_hostname=os netcfg/get_domain=os auto url=${preseedurl}" if [ "$NETINSTALLIP" != "dhcp" ] then netcfgargs="netcfg/disable_autoconfig=true netcfg/get_nameservers=${NAMESERVERS} netcfg/get_ipaddress=${NETINSTALLIP} netcfg/get_netmask=${NETMASK} netcfg/get_gateway=${GATEWAY} netcfg/confirm_static=true" diff --git a/tools/xen/xenrc b/tools/xen/xenrc index f434b111..bf3d16af 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -5,12 +5,15 @@ GUEST_NAME=${GUEST_NAME:-DevStackOSDomU} # Size of image VDI_MB=${VDI_MB:-5000} +OSDOMU_MEM_MB=1024 # VM Password GUEST_PASSWORD=${GUEST_PASSWORD:-secrete} -# Host Interface, i.e. the public facing interface on the nova vm -HOST_IP_IFACE=${HOST_IP_IFACE:-eth0} +# Host Interface, i.e. the interface on the nova vm you want to expose the services on +# Usually either eth2 (management network) or eth3 (public network) +# not eth0 (private network with XenServer host) or eth1 (VM traffic network) +HOST_IP_IFACE=${HOST_IP_IFACE:-eth3} # Our nova host's network info VM_IP=${VM_IP:-10.255.255.255} # A host-only ip that let's the interface come up, otherwise unused @@ -35,7 +38,5 @@ MGT_BR=${MGT_BR:-""} MGT_VLAN=${MGT_VLAN:-101} MGT_DEV=${MGT_DEV:-eth0} -OSDOMU_MEM_MB=1024 - # Source params cd ../.. && source ./stackrc && cd $TOP_DIR From 030fb2362f6ca33a2bf19d1083c9556433f2b983 Mon Sep 17 00:00:00 2001 From: John Garbutt Date: Fri, 27 Apr 2012 18:28:28 +0100 Subject: [PATCH 522/967] Make devstack work with xcp-xapi package on Ubuntu 12.04 - allow you to configure the xenapi_user (often other than root) - allow you to disable the guest installer network - install the plugins in the xcp-xapi location - use alternate webserver location when adding the preseed file - skip the centos specific ip forwarding configuration - make use xcp inventory, if no xensource-inventory is found - correctly deal with kpartx to mount the VM VDI in manage_vdi Change-Id: I8d51725fc97f0bcaa27a46f7a7ced13c369c809e --- stack.sh | 5 ++-- tools/xen/build_xva.sh | 7 +++++ tools/xen/scripts/manage-vdi | 44 +++++++++++++++++++------------ tools/xen/templates/interfaces.in | 3 --- tools/xen/xenrc | 3 +++ 5 files changed, 40 insertions(+), 22 deletions(-) diff --git a/stack.sh b/stack.sh index df260279..032c0b93 100755 --- a/stack.sh +++ b/stack.sh @@ -1700,7 +1700,7 @@ fi # For Example: EXTRA_OPTS=(foo=true bar=2) for I in "${EXTRA_OPTS[@]}"; do # Attempt to convert flags to options - add_nova_opt ${I//-} + add_nova_opt ${I//--} done @@ -1711,8 +1711,9 @@ if [ "$VIRT_DRIVER" = 'xenserver' ]; then read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN." add_nova_opt "connection_type=xenapi" XENAPI_CONNECTION_URL=${XENAPI_CONNECTION_URL:-"http://169.254.0.1"} + XENAPI_USER=${XENAPI_USER:-"root"} add_nova_opt "xenapi_connection_url=$XENAPI_CONNECTION_URL" - add_nova_opt "xenapi_connection_username=root" + add_nova_opt "xenapi_connection_username=$XENAPI_USER" add_nova_opt "xenapi_connection_password=$XENAPI_PASSWORD" add_nova_opt "flat_injected=False" # Need to avoid crash due to new firewall support diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh index 18035048..fdc6a606 100755 --- a/tools/xen/build_xva.sh +++ b/tools/xen/build_xva.sh @@ -118,6 +118,13 @@ else sed -e "s,@ETH3_NETMASK@,$PUB_NETMASK,g" -i $INTERFACES fi +if [ "$ENABLE_GI" == "true" ]; then + cat <>$INTERFACES +auto eth0 +iface eth0 inet dhcp +EOF +fi + # Gracefully cp only if source file/dir exists function cp_it { if [ -e $1 ] || [ -d $1 ]; then diff --git a/tools/xen/scripts/manage-vdi b/tools/xen/scripts/manage-vdi index 7f12ebc1..05c4b074 100755 --- a/tools/xen/scripts/manage-vdi +++ b/tools/xen/scripts/manage-vdi @@ -7,64 +7,74 @@ vm="$2" device="${3-0}" part="${4-}" -xe_min() -{ +function xe_min() { local cmd="$1" shift xe "$cmd" --minimal "$@" } +function run_udev_settle() { + which_udev=$(which udevsettle) || true + if [ -n "$which_udev" ]; then + udevsettle + else + udevadm settle + fi +} + vm_uuid=$(xe_min vm-list name-label="$vm") vdi_uuid=$(xe_min vbd-list params=vdi-uuid vm-uuid="$vm_uuid" \ userdevice="$device") dom0_uuid=$(xe_min vm-list is-control-domain=true) -get_mount_device() -{ +function get_mount_device() { vbd_uuid=$1 dev=$(xe_min vbd-list params=device uuid="$vbd_uuid") if [[ "$dev" =~ "sm/" ]]; then DEBIAN_FRONTEND=noninteractive \ apt-get --option "Dpkg::Options::=--force-confold" --assume-yes \ - install kpartx || true &> /dev/null - mapping=$(kpartx -av "/dev/$dev" | sed -ne 's,^add map \([a-f0-9\-]*\).*$,\1,p' | sed -ne "s,^\(.*${part}\)\$,\1,p") + install kpartx &> /dev/null || true + mapping=$(kpartx -av "/dev/$dev" | sed -ne 's,^add map \([a-z0-9\-]*\).*$,\1,p' | sed -ne "s,^\(.*${part}\)\$,\1,p") if [ -z "$mapping" ]; then echo "Failed to find mapping" exit -1 fi - echo "mapper/${mapping}" + echo "/dev/mapper/${mapping}" else echo "/dev/$dev$part" fi } -open_vdi() -{ +function clean_dev_mappings() { + dev=$(xe_min vbd-list params=device uuid="$vbd_uuid") + if [[ "$dev" =~ "sm/" ]]; then + kpartx -dv "/dev/$dev" + fi +} + +function open_vdi() { vbd_uuid=$(xe vbd-create vm-uuid="$dom0_uuid" vdi-uuid="$vdi_uuid" \ device=autodetect) mp=$(mktemp -d) xe vbd-plug uuid="$vbd_uuid" - which_udev=$(which udevsettle) || true - if [ -n "$which_udev" ]; then - udevsettle - else - udevadm settle - fi + run_udev_settle mount_device=$(get_mount_device "$vbd_uuid") mount "$mount_device" "$mp" echo "Your vdi is mounted at $mp" } -close_vdi() -{ +function close_vdi() { vbd_uuid=$(xe_min vbd-list vm-uuid="$dom0_uuid" vdi-uuid="$vdi_uuid") mount_device=$(get_mount_device "$vbd_uuid") + run_udev_settle umount "$mount_device" + clean_dev_mappings + xe vbd-unplug uuid=$vbd_uuid xe vbd-destroy uuid=$vbd_uuid } diff --git a/tools/xen/templates/interfaces.in b/tools/xen/templates/interfaces.in index e315a8c3..74b41ccf 100644 --- a/tools/xen/templates/interfaces.in +++ b/tools/xen/templates/interfaces.in @@ -21,6 +21,3 @@ auto eth2 iface eth2 inet static address @ETH2_IP@ netmask @ETH2_NETMASK@ - -auto eth0 -iface eth0 inet dhcp diff --git a/tools/xen/xenrc b/tools/xen/xenrc index bf3d16af..102a492e 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -38,5 +38,8 @@ MGT_BR=${MGT_BR:-""} MGT_VLAN=${MGT_VLAN:-101} MGT_DEV=${MGT_DEV:-eth0} +# Guest installer network +ENABLE_GI=true + # Source params cd ../.. && source ./stackrc && cd $TOP_DIR From 0c2891558122aa9d030811109536caf5c81cfb75 Mon Sep 17 00:00:00 2001 From: John Postlethwait Date: Tue, 22 May 2012 13:40:39 -0700 Subject: [PATCH 523/967] Node JS is a Horizon requirement now. Horizon is going to start using LessCSS, so Node JS needs to be installed now with Horizon now. Change-Id: I416824dcab169f9be9afd3c3aa46ff911223947d --- AUTHORS | 1 + files/apts/horizon | 1 + 2 files changed, 2 insertions(+) diff --git a/AUTHORS b/AUTHORS index bca25b43..c85f0bd6 100644 --- a/AUTHORS +++ b/AUTHORS @@ -18,6 +18,7 @@ Jason Cannavale Jay Pipes Jesse Andrews Johannes Erdfelt +John Postlethwait Josh Kearney Justin Shepherd Ken Pepple diff --git a/files/apts/horizon b/files/apts/horizon index d93c34b2..9b1c9ee8 100644 --- a/files/apts/horizon +++ b/files/apts/horizon @@ -22,3 +22,4 @@ python-django-mailer python-django-nose python-django-registration python-migrate +nodejs From 819d2bed1d0cccf8f5afc31975c9a674a6938096 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Wed, 23 May 2012 19:06:34 -0700 Subject: [PATCH 524/967] Remove unused tools/rfc.sh OpenStack now uses git-review from pip Change-Id: If9f59738b807f36168076dc5b1c6bfb8cc780536 --- AUTHORS | 1 + tools/rfc.sh | 145 --------------------------------------------------- 2 files changed, 1 insertion(+), 145 deletions(-) delete mode 100755 tools/rfc.sh diff --git a/AUTHORS b/AUTHORS index c85f0bd6..f9aa9eab 100644 --- a/AUTHORS +++ b/AUTHORS @@ -17,6 +17,7 @@ James E. Blair Jason Cannavale Jay Pipes Jesse Andrews +Joe Gordon Johannes Erdfelt John Postlethwait Josh Kearney diff --git a/tools/rfc.sh b/tools/rfc.sh deleted file mode 100755 index d4dc5974..00000000 --- a/tools/rfc.sh +++ /dev/null @@ -1,145 +0,0 @@ -#!/bin/sh -e -# Copyright (c) 2010-2011 Gluster, Inc. -# This initial version of this file was taken from the source tree -# of GlusterFS. It was not directly attributed, but is assumed to be -# Copyright (c) 2010-2011 Gluster, Inc and release GPLv3 -# Subsequent modifications are Copyright (c) 2011 OpenStack, LLC. -# -# GlusterFS is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published -# by the Free Software Foundation; either version 3 of the License, -# or (at your option) any later version. -# -# GlusterFS is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see -# . - - -branch="master"; - -set_hooks_commit_msg() -{ - top_dir=`git rev-parse --show-toplevel` - f="${top_dir}/.git/hooks/commit-msg"; - u="https://review.openstack.org/tools/hooks/commit-msg"; - - if [ -x "$f" ]; then - return; - fi - - curl -o $f $u || wget -O $f $u; - - chmod +x $f; - - GIT_EDITOR=true git commit --amend -} - -add_remote() -{ - username=$1 - project=$2 - - echo "No remote set, testing ssh://$username@review.openstack.org:29418" - if project_list=`ssh -p29418 -o StrictHostKeyChecking=no $username@review.openstack.org gerrit ls-projects 2>/dev/null` - then - echo "$username@review.openstack.org:29418 worked." - if echo $project_list | grep $project >/dev/null - then - echo "Creating a git remote called gerrit that maps to:" - echo " ssh://$username@review.openstack.org:29418/$project" - git remote add gerrit ssh://$username@review.openstack.org:29418/$project - else - echo "The current project name, $project, is not a known project." - echo "Please either reclone from github/gerrit or create a" - echo "remote named gerrit that points to the intended project." - return 1 - fi - - return 0 - fi - return 1 -} - -check_remote() -{ - if ! git remote | grep gerrit >/dev/null 2>&1 - then - origin_project=`git remote show origin | grep 'Fetch URL' | perl -nle '@fields = split(m|[:/]|); $len = $#fields; print $fields[$len-1], "/", $fields[$len];'` - if add_remote $USERNAME $origin_project - then - return 0 - else - echo "Your local name doesn't work on Gerrit." - echo -n "Enter Gerrit username (same as launchpad): " - read gerrit_user - if add_remote $gerrit_user $origin_project - then - return 0 - else - echo "Can't infer where gerrit is - please set a remote named" - echo "gerrit manually and then try again." - echo - echo "For more information, please see:" - echo "\thttp://wiki.openstack.org/GerritWorkflow" - exit 1 - fi - fi - fi -} - -rebase_changes() -{ - git fetch; - - GIT_EDITOR=true git rebase -i origin/$branch || exit $?; -} - - -assert_diverge() -{ - if ! git diff origin/$branch..HEAD | grep -q . - then - echo "No changes between the current branch and origin/$branch." - exit 1 - fi -} - - -main() -{ - set_hooks_commit_msg; - - check_remote; - - rebase_changes; - - assert_diverge; - - bug=$(git show --format='%s %b' | perl -nle 'if (/\b([Bb]ug|[Ll][Pp])\s*[#:]?\s*(\d+)/) {print "$2"; exit}') - - bp=$(git show --format='%s %b' | perl -nle 'if (/\b([Bb]lue[Pp]rint|[Bb][Pp])\s*[#:]?\s*([0-9a-zA-Z-_]+)/) {print "$2"; exit}') - - if [ "$DRY_RUN" = 1 ]; then - drier='echo -e Please use the following command to send your commits to review:\n\n' - else - drier= - fi - - local_branch=`git branch | grep -Ei "\* (.*)" | cut -f2 -d' '` - if [ -z "$bug" ]; then - if [ -z "$bp" ]; then - $drier git push gerrit HEAD:refs/for/$branch/$local_branch; - else - $drier git push gerrit HEAD:refs/for/$branch/bp/$bp; - fi - else - $drier git push gerrit HEAD:refs/for/$branch/bug/$bug; - fi -} - -main "$@" From 22ddb27eaef2ee55e5c78c8b7bbaa88e7fcdd1fb Mon Sep 17 00:00:00 2001 From: John Garbutt Date: Thu, 24 May 2012 15:56:06 +0100 Subject: [PATCH 525/967] Fix multi-host deployments using RabbitMQ Currently the rabbit password and rabbit_host are no longer written into nova.conf. This is due to this change: https://review.openstack.org/#/c/6501/ My solution is to write the values if: - they are set - and you are not using the alternative queue system Change-Id: I8de7b57125d1fdf50044fc2b3ae4683ac15d5a61 --- stack.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index 45e285c9..c86017ed 100755 --- a/stack.sh +++ b/stack.sh @@ -1672,11 +1672,11 @@ add_nova_opt "vncserver_proxyclient_address=$VNCSERVER_PROXYCLIENT_ADDRESS" add_nova_opt "api_paste_config=$NOVA_CONF_DIR/api-paste.ini" add_nova_opt "image_service=nova.image.glance.GlanceImageService" add_nova_opt "ec2_dmz_host=$EC2_DMZ_HOST" -if is_service_enabled rabbit ; then +if is_service_enabled qpid ; then + add_nova_opt "rpc_backend=nova.rpc.impl_qpid" +elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then add_nova_opt "rabbit_host=$RABBIT_HOST" add_nova_opt "rabbit_password=$RABBIT_PASSWORD" -elif is_service_enabled qpid ; then - add_nova_opt "rpc_backend=nova.rpc.impl_qpid" fi add_nova_opt "glance_api_servers=$GLANCE_HOSTPORT" add_nova_opt "force_dhcp_release=True" From 37826bc8c878aefc23d945aff3c63e8b00397b5c Mon Sep 17 00:00:00 2001 From: John Garbutt Date: Fri, 25 May 2012 12:55:29 +0100 Subject: [PATCH 526/967] Move the ubuntu template generator to use Squeeze. Newer versions of XenServer don't have lenny templates, so moving to use Squeeze. Change-Id: I95473d4cdc6caad86bc53799fe2ecbedd9223026 --- tools/xen/scripts/xenoneirictemplate.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/xen/scripts/xenoneirictemplate.sh b/tools/xen/scripts/xenoneirictemplate.sh index 1d9ec5e2..9e1e9dea 100755 --- a/tools/xen/scripts/xenoneirictemplate.sh +++ b/tools/xen/scripts/xenoneirictemplate.sh @@ -8,10 +8,10 @@ BASE_DIR=$(cd $(dirname "$0") && pwd) source $BASE_DIR/../../../localrc -LENNY=$(xe template-list name-label=Debian\ Lenny\ 5.0\ \(32-bit\) --minimal) +LENNY=$(xe template-list name-label=Debian\ Squeeze\ 6.0\ \(32-bit\) --minimal) if [[ -z $LENNY ]] ; then - echo "Cant find lenny 32bit template, is this on 6.0.2?" + echo "Cant find Squeeze 32bit template." exit 1 fi From 42b1aa9c136bd738c32075b2098cebdfa3a886f8 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Thu, 24 May 2012 14:11:01 -0700 Subject: [PATCH 527/967] Fix devstack to support the new pulled out swift3 Change-Id: Ia4f5092e666b581aab4af4851c2848e84817c07a --- files/swift/proxy-server.conf | 2 +- stack.sh | 3 +++ stackrc | 3 +++ 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/files/swift/proxy-server.conf b/files/swift/proxy-server.conf index 2fe20f57..84bf9cd5 100644 --- a/files/swift/proxy-server.conf +++ b/files/swift/proxy-server.conf @@ -40,7 +40,7 @@ admin_user = %SERVICE_USERNAME% admin_password = %SERVICE_PASSWORD% [filter:swift3] -use = egg:swift#swift3 +use = egg:swift3#middleware [filter:tempauth] use = egg:swift#tempauth diff --git a/stack.sh b/stack.sh index 340d4a8b..eb734c3e 100755 --- a/stack.sh +++ b/stack.sh @@ -214,6 +214,7 @@ KEYSTONECLIENT_DIR=$DEST/python-keystoneclient OPENSTACKCLIENT_DIR=$DEST/python-openstackclient NOVNC_DIR=$DEST/noVNC SWIFT_DIR=$DEST/swift +SWIFT3_DIR=$DEST/swift3 QUANTUM_DIR=$DEST/quantum QUANTUM_CLIENT_DIR=$DEST/python-quantumclient MELANGE_DIR=$DEST/melange @@ -673,6 +674,7 @@ fi if is_service_enabled swift; then # storage service git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH + git_clone $SWIFT3_REPO $SWIFT3_DIR $SWIFT3_BRANCH fi if is_service_enabled g-api n-api; then # image catalog service @@ -716,6 +718,7 @@ if is_service_enabled key g-api n-api swift; then fi if is_service_enabled swift; then cd $SWIFT_DIR; sudo python setup.py develop + cd $SWIFT3_DIR; sudo python setup.py develop fi if is_service_enabled g-api n-api; then cd $GLANCE_DIR; sudo python setup.py develop diff --git a/stackrc b/stackrc index 41a399a2..98e6bd48 100644 --- a/stackrc +++ b/stackrc @@ -16,6 +16,9 @@ NOVA_BRANCH=master # storage service SWIFT_REPO=https://github.com/openstack/swift.git SWIFT_BRANCH=master +SWIFT3_REPO=https://github.com/fujita/swift3.git +SWIFT3_BRANCH=master + # image catalog service GLANCE_REPO=https://github.com/openstack/glance.git From 7f9833fb9db48875cffa7d4e81af5bffe20e9b52 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Fri, 25 May 2012 08:31:59 -0700 Subject: [PATCH 528/967] Set sql_connection in glance-api.conf The v2 API uses the database directly from the glance-api server. We need to provide the same connection credentials as we set for the glance-registry server. Change-Id: I5093a9f7978f627e1dee4047b1f45383f9e4e7da --- stack.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/stack.sh b/stack.sh index 340d4a8b..78874d5e 100755 --- a/stack.sh +++ b/stack.sh @@ -1000,6 +1000,7 @@ if is_service_enabled g-reg; then cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF iniset $GLANCE_API_CONF DEFAULT debug True inicomment $GLANCE_API_CONF DEFAULT log_file + iniset $GLANCE_API_CONF DEFAULT sql_connection $BASE_SQL_CONN/glance?charset=utf8 iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_API_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/ iniset $GLANCE_API_CONF paste_deploy flavor keystone From 3ea11129012b9ffdf5f38724e11fa9efda9362f5 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 31 May 2012 16:39:36 -0500 Subject: [PATCH 529/967] Fix multiple distro dependency bug get_packages() icorrectly handled multiple distros listed in a dependency file, such as: xyz # dist:fred,barney,wilma Change-Id: Ib1178b2aaaddafe581902b32776180bb0b41f1ae --- stack.sh | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/stack.sh b/stack.sh index 92a95674..23f84180 100755 --- a/stack.sh +++ b/stack.sh @@ -632,13 +632,13 @@ function get_packages() { continue fi - if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then # We are using BASH regexp matching feature. - package=${BASH_REMATCH[1]} - distros=${BASH_REMATCH[2]} - for distro in ${distros//,/ }; do #In bash ${VAR,,} will lowecase VAR - [[ ${distro,,} == ${DISTRO,,} ]] && echo $package - done - continue + if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then + # We are using BASH regexp matching feature. + package=${BASH_REMATCH[1]} + distros=${BASH_REMATCH[2]} + # In bash ${VAR,,} will lowecase VAR + [[ ${distros,,} =~ ${DISTRO,,} ]] && echo $package + continue fi echo ${line%#*} From 83d475eebea28d3ff8e8a0f54b90db465c2419fb Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 30 May 2012 10:58:18 -0500 Subject: [PATCH 530/967] Set default image for exercises The exercises search for an 'ami' image to run; when multiple images are loaded this fails. Rather than try to guess or just use the first one we set the default image name to the cirros image that we load by default. Change-Id: Iae91a011ca9c42a7715747a68e0deba0dba20835 --- stackrc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stackrc b/stackrc index 98e6bd48..62f6b875 100644 --- a/stackrc +++ b/stackrc @@ -89,8 +89,10 @@ MELANGECLIENT_BRANCH=master #IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img" # cirros full disk image case "$LIBVIRT_TYPE" in lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc + DEFAULT_IMAGE_NAME=cirros-0.3.0-x86_64-rootfs IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-rootfs.img.gz";; *) # otherwise, use the uec style image (with kernel, ramdisk, disk) + DEFAULT_IMAGE_NAME=cirros-0.3.0-x86_64-uec IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-uec.tar.gz";; esac From 1f8efd93d9e28cee2caf72267335126954bbb45e Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Sat, 2 Jun 2012 01:40:00 +0100 Subject: [PATCH 531/967] fix syntax error in the if test switch to using -f rather than -a. This has been overlooked in a review recently merged. Without this fix, devstack on XenServer is broken. Change-Id: I1882cc70528772287241848adea3a520738d4144 --- tools/xen/scripts/install-os-vpx.sh | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh index 7f2f3e62..241296bd 100755 --- a/tools/xen/scripts/install-os-vpx.sh +++ b/tools/xen/scripts/install-os-vpx.sh @@ -19,12 +19,7 @@ set -eux -if [ -a /etc/xensource-inventory] -then - . /etc/xensource-inventory -else - . /etc/xcp/inventory -fi +[[ -f "/etc/xensource-inventory" ]] && source "/etc/xensource-inventory" || source "/etc/xcp/inventory" NAME="XenServer OpenStack VPX" DATA_VDI_SIZE="500MiB" From c7214e838e65b51b81b84e2a3e2ce3c34490fd46 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Wed, 6 Jun 2012 13:56:39 +0200 Subject: [PATCH 532/967] Use swift config files shipped with swift. - Fixes bug 1004548. - Add function iniuncomment. - Trying to match the indent as the other stack.sh file. Change-Id: I70339f7b4c0dd3ef8a018daecb2e1b3fb72c741b --- files/swift/account-server.conf | 20 --- files/swift/container-server.conf | 22 ---- files/swift/object-server.conf | 22 ---- files/swift/proxy-server.conf | 57 --------- files/swift/swift.conf | 3 - functions | 11 +- stack.sh | 205 +++++++++++++++++++----------- 7 files changed, 139 insertions(+), 201 deletions(-) delete mode 100644 files/swift/account-server.conf delete mode 100644 files/swift/container-server.conf delete mode 100644 files/swift/object-server.conf delete mode 100644 files/swift/proxy-server.conf delete mode 100644 files/swift/swift.conf diff --git a/files/swift/account-server.conf b/files/swift/account-server.conf deleted file mode 100644 index 763c306f..00000000 --- a/files/swift/account-server.conf +++ /dev/null @@ -1,20 +0,0 @@ -[DEFAULT] -devices = %NODE_PATH%/node -mount_check = false -bind_port = %BIND_PORT% -user = %USER% -log_facility = LOG_LOCAL%LOG_FACILITY% -swift_dir = %SWIFT_CONFIG_DIR% - -[pipeline:main] -pipeline = account-server - -[app:account-server] -use = egg:swift#account - -[account-replicator] -vm_test_mode = yes - -[account-auditor] - -[account-reaper] diff --git a/files/swift/container-server.conf b/files/swift/container-server.conf deleted file mode 100644 index 106dcab6..00000000 --- a/files/swift/container-server.conf +++ /dev/null @@ -1,22 +0,0 @@ -[DEFAULT] -devices = %NODE_PATH%/node -mount_check = false -bind_port = %BIND_PORT% -user = %USER% -log_facility = LOG_LOCAL%LOG_FACILITY% -swift_dir = %SWIFT_CONFIG_DIR% - -[pipeline:main] -pipeline = container-server - -[app:container-server] -use = egg:swift#container - -[container-replicator] -vm_test_mode = yes - -[container-updater] - -[container-auditor] - -[container-sync] diff --git a/files/swift/object-server.conf b/files/swift/object-server.conf deleted file mode 100644 index 7eea67d5..00000000 --- a/files/swift/object-server.conf +++ /dev/null @@ -1,22 +0,0 @@ -[DEFAULT] -devices = %NODE_PATH%/node -mount_check = false -bind_port = %BIND_PORT% -user = %USER% -log_facility = LOG_LOCAL%LOG_FACILITY% -swift_dir = %SWIFT_CONFIG_DIR% - -[pipeline:main] -pipeline = object-server - -[app:object-server] -use = egg:swift#object - -[object-replicator] -vm_test_mode = yes - -[object-updater] - -[object-auditor] - -[object-expirer] diff --git a/files/swift/proxy-server.conf b/files/swift/proxy-server.conf deleted file mode 100644 index 84bf9cd5..00000000 --- a/files/swift/proxy-server.conf +++ /dev/null @@ -1,57 +0,0 @@ -[DEFAULT] -bind_port = 8080 -user = %USER% -swift_dir = %SWIFT_CONFIG_DIR% -workers = 1 -log_name = swift -log_facility = LOG_LOCAL1 -log_level = DEBUG - -[pipeline:main] -pipeline = healthcheck cache swift3 %AUTH_SERVER% proxy-server - -[app:proxy-server] -use = egg:swift#proxy -allow_account_management = true -account_autocreate = true - -[filter:keystone] -paste.filter_factory = keystone.middleware.swift_auth:filter_factory -operator_roles = Member,admin - -# NOTE(chmou): s3token middleware is not updated yet to use only -# username and password. -[filter:s3token] -paste.filter_factory = keystone.middleware.s3_token:filter_factory -auth_port = %KEYSTONE_AUTH_PORT% -auth_host = %KEYSTONE_AUTH_HOST% -auth_protocol = %KEYSTONE_AUTH_PROTOCOL% -auth_token = %SERVICE_TOKEN% -admin_token = %SERVICE_TOKEN% - -[filter:authtoken] -paste.filter_factory = keystone.middleware.auth_token:filter_factory -auth_host = %KEYSTONE_AUTH_HOST% -auth_port = %KEYSTONE_AUTH_PORT% -auth_protocol = %KEYSTONE_AUTH_PROTOCOL% -auth_uri = %KEYSTONE_SERVICE_PROTOCOL%://%KEYSTONE_SERVICE_HOST%:%KEYSTONE_SERVICE_PORT%/ -admin_tenant_name = %SERVICE_TENANT_NAME% -admin_user = %SERVICE_USERNAME% -admin_password = %SERVICE_PASSWORD% - -[filter:swift3] -use = egg:swift3#middleware - -[filter:tempauth] -use = egg:swift#tempauth -user_admin_admin = admin .admin .reseller_admin -user_test_tester = testing .admin -user_test2_tester2 = testing2 .admin -user_test_tester3 = testing3 -bind_ip = 0.0.0.0 - -[filter:healthcheck] -use = egg:swift#healthcheck - -[filter:cache] -use = egg:swift#memcache diff --git a/files/swift/swift.conf b/files/swift/swift.conf deleted file mode 100644 index 98df4663..00000000 --- a/files/swift/swift.conf +++ /dev/null @@ -1,3 +0,0 @@ -[swift-hash] -# random unique string that can never change (DO NOT LOSE) -swift_hash_path_suffix = %SWIFT_HASH% diff --git a/functions b/functions index 7072fdd8..915b829e 100644 --- a/functions +++ b/functions @@ -184,7 +184,7 @@ function git_clone { # Comment an option in an INI file -# iniset config-file section option +# inicomment config-file section option function inicomment() { local file=$1 local section=$2 @@ -192,6 +192,15 @@ function inicomment() { sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" $file } +# Uncomment an option in an INI file +# iniuncomment config-file section option +function iniuncomment() { + local file=$1 + local section=$2 + local option=$3 + sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" $file +} + # Get an option from an INI file # iniget config-file section option diff --git a/stack.sh b/stack.sh index 776ff866..10954b74 100755 --- a/stack.sh +++ b/stack.sh @@ -1428,34 +1428,69 @@ if is_service_enabled swift; then sudo sed -i '/disable *= *yes/ { s/yes/no/ }' /etc/xinetd.d/rsync fi - # By default Swift will be installed with the tempauth middleware - # which has some default username and password if you have - # configured keystone it will checkout the directory. - if is_service_enabled key; then - swift_auth_server="s3token authtoken keystone" - else - swift_auth_server=tempauth - fi - - # We do the install of the proxy-server and swift configuration - # replacing a few directives to match our configuration. - sed -e " - s,%SWIFT_CONFIG_DIR%,${SWIFT_CONFIG_DIR},g; - s,%USER%,$USER,g; - s,%SERVICE_TENANT_NAME%,$SERVICE_TENANT_NAME,g; - s,%SERVICE_USERNAME%,swift,g; - s,%SERVICE_PASSWORD%,$SERVICE_PASSWORD,g; - s,%KEYSTONE_SERVICE_PROTOCOL%,$KEYSTONE_SERVICE_PROTOCOL,g; - s,%SERVICE_TOKEN%,${SERVICE_TOKEN},g; - s,%KEYSTONE_API_PORT%,${KEYSTONE_API_PORT},g; - s,%KEYSTONE_AUTH_HOST%,${KEYSTONE_AUTH_HOST},g; - s,%KEYSTONE_AUTH_PORT%,${KEYSTONE_AUTH_PORT},g; - s,%KEYSTONE_AUTH_PROTOCOL%,${KEYSTONE_AUTH_PROTOCOL},g; - s/%AUTH_SERVER%/${swift_auth_server}/g; - " $FILES/swift/proxy-server.conf | \ - sudo tee ${SWIFT_CONFIG_DIR}/proxy-server.conf - - sed -e "s/%SWIFT_HASH%/$SWIFT_HASH/" $FILES/swift/swift.conf > ${SWIFT_CONFIG_DIR}/swift.conf + # By default Swift will be installed with the tempauth middleware + # which has some default username and password if you have + # configured keystone it will checkout the directory. + if is_service_enabled key; then + swift_auth_server="s3token authtoken keystone" + else + swift_auth_server=tempauth + fi + + SWIFT_CONFIG_PROXY_SERVER=${SWIFT_CONFIG_DIR}/proxy-server.conf + cp ${SWIFT_DIR}/etc/proxy-server.conf-sample ${SWIFT_CONFIG_PROXY_SERVER} + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user ${USER} + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir ${SWIFT_CONFIG_DIR} + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers 1 + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level DEBUG + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080} + + iniset ${SWIFT_CONFIG_PROXY_SERVER} pipeline:main pipeline "catch_errors healthcheck cache ratelimit swift3 ${swift_auth_server} proxy-logging proxy-server" + + iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true + + cat <>${SWIFT_CONFIG_PROXY_SERVER} + +[filter:keystone] +paste.filter_factory = keystone.middleware.swift_auth:filter_factory +operator_roles = Member,admin + +# NOTE(chmou): s3token middleware is not updated yet to use only +# username and password. +[filter:s3token] +paste.filter_factory = keystone.middleware.s3_token:filter_factory +auth_port = ${KEYSTONE_AUTH_PORT} +auth_host = ${KEYSTONE_AUTH_HOST} +auth_protocol = ${KEYSTONE_AUTH_PROTOCOL} +auth_token = ${SERVICE_TOKEN} +admin_token = ${SERVICE_TOKEN} + +[filter:authtoken] +paste.filter_factory = keystone.middleware.auth_token:filter_factory +auth_host = ${KEYSTONE_AUTH_HOST} +auth_port = ${KEYSTONE_AUTH_PORT} +auth_protocol = ${KEYSTONE_AUTH_PROTOCOL} +auth_uri = ${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}:${KEYSTONE_SERVICE_PORT}/ +admin_tenant_name = ${SERVICE_TENANT_NAME} +admin_user = swift +admin_password = ${SERVICE_PASSWORD} + +[filter:swift3] +use = egg:swift3#middleware +EOF + + cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONFIG_DIR}/swift.conf + iniset ${SWIFT_CONFIG_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH} # We need to generate a object/account/proxy configuration # emulating 4 nodes on different ports we have a little function @@ -1465,16 +1500,35 @@ if is_service_enabled swift; then local bind_port=$2 local log_facility=$3 local node_number + local swift_node_config for node_number in $(seq ${SWIFT_REPLICAS}); do node_path=${SWIFT_DATA_DIR}/${node_number} - sed -e " - s,%SWIFT_CONFIG_DIR%,${SWIFT_CONFIG_DIR},; - s,%USER%,$USER,; - s,%NODE_PATH%,${node_path},; - s,%BIND_PORT%,${bind_port},; - s,%LOG_FACILITY%,${log_facility}, - " $FILES/swift/${server_type}-server.conf > ${SWIFT_CONFIG_DIR}/${server_type}-server/${node_number}.conf + swift_node_config=${SWIFT_CONFIG_DIR}/${server_type}-server/${node_number}.conf + + cp ${SWIFT_DIR}/etc/${server_type}-server.conf-sample ${swift_node_config} + + iniuncomment ${swift_node_config} DEFAULT user + iniset ${swift_node_config} DEFAULT user ${USER} + + iniuncomment ${swift_node_config} DEFAULT bind_port + iniset ${swift_node_config} DEFAULT bind_port ${bind_port} + + iniuncomment ${swift_node_config} DEFAULT swift_dir + iniset ${swift_node_config} DEFAULT swift_dir ${SWIFT_CONFIG_DIR} + + iniuncomment ${swift_node_config} DEFAULT devices + iniset ${swift_node_config} DEFAULT devices ${node_path} + + iniuncomment ${swift_node_config} DEFAULT log_facility + iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility} + + iniuncomment ${swift_node_config} DEFAULT mount_check + iniset ${swift_node_config} DEFAULT mount_check false + + iniuncomment ${swift_node_config} ${server_type}-replicator vm_test_mode + iniset ${swift_node_config} ${server_type}-replicator vm_test_mode yes + bind_port=$(( ${bind_port} + 10 )) log_facility=$(( ${log_facility} + 1 )) done @@ -1483,48 +1537,47 @@ if is_service_enabled swift; then generate_swift_configuration container 6011 2 generate_swift_configuration account 6012 2 + # We have some specific configuration for swift for rsyslog. See + # the file /etc/rsyslog.d/10-swift.conf for more info. + swift_log_dir=${SWIFT_DATA_DIR}/logs + rm -rf ${swift_log_dir} + mkdir -p ${swift_log_dir}/hourly + sudo chown -R $USER:adm ${swift_log_dir} + sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \ + tee /etc/rsyslog.d/10-swift.conf + restart_service rsyslog + + # This is where we create three different rings for swift with + # different object servers binding on different ports. + pushd ${SWIFT_CONFIG_DIR} >/dev/null && { + + rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz + + port_number=6010 + swift-ring-builder object.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 + for x in $(seq ${SWIFT_REPLICAS}); do + swift-ring-builder object.builder add z${x}-127.0.0.1:${port_number}/sdb1 1 + port_number=$[port_number + 10] + done + swift-ring-builder object.builder rebalance + + port_number=6011 + swift-ring-builder container.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 + for x in $(seq ${SWIFT_REPLICAS}); do + swift-ring-builder container.builder add z${x}-127.0.0.1:${port_number}/sdb1 1 + port_number=$[port_number + 10] + done + swift-ring-builder container.builder rebalance + + port_number=6012 + swift-ring-builder account.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 + for x in $(seq ${SWIFT_REPLICAS}); do + swift-ring-builder account.builder add z${x}-127.0.0.1:${port_number}/sdb1 1 + port_number=$[port_number + 10] + done + swift-ring-builder account.builder rebalance - # We have some specific configuration for swift for rsyslog. See - # the file /etc/rsyslog.d/10-swift.conf for more info. - swift_log_dir=${SWIFT_DATA_DIR}/logs - rm -rf ${swift_log_dir} - mkdir -p ${swift_log_dir}/hourly - sudo chown -R $USER:adm ${swift_log_dir} - sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \ - tee /etc/rsyslog.d/10-swift.conf - restart_service rsyslog - - # This is where we create three different rings for swift with - # different object servers binding on different ports. - pushd ${SWIFT_CONFIG_DIR} >/dev/null && { - - rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz - - port_number=6010 - swift-ring-builder object.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 - for x in $(seq ${SWIFT_REPLICAS}); do - swift-ring-builder object.builder add z${x}-127.0.0.1:${port_number}/sdb1 1 - port_number=$[port_number + 10] - done - swift-ring-builder object.builder rebalance - - port_number=6011 - swift-ring-builder container.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 - for x in $(seq ${SWIFT_REPLICAS}); do - swift-ring-builder container.builder add z${x}-127.0.0.1:${port_number}/sdb1 1 - port_number=$[port_number + 10] - done - swift-ring-builder container.builder rebalance - - port_number=6012 - swift-ring-builder account.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 - for x in $(seq ${SWIFT_REPLICAS}); do - swift-ring-builder account.builder add z${x}-127.0.0.1:${port_number}/sdb1 1 - port_number=$[port_number + 10] - done - swift-ring-builder account.builder rebalance - - } && popd >/dev/null + } && popd >/dev/null # We then can start rsync. if [[ "$os_PACKAGE" = "deb" ]]; then From fa3fb4ab9078b4917f00eb187eafc3590c0d01c9 Mon Sep 17 00:00:00 2001 From: "James E. Blair" Date: Wed, 6 Jun 2012 17:30:49 -0700 Subject: [PATCH 533/967] Remove pips. All of the requirements should now be specified in the pip-requires of individual projects, which will be installed via setup.py install. Therefore, remove the few remaining pip dependencies from devstack. Distutils has a problem installing django 1.4 when django 1.3 is already installed via debian packages. Therefore, remove the dependency on the django debian package (which installs 1.3 and is obsolete anyway). Change-Id: Ia4dc1f4c94c7f5c0811bafcf41dadc5d629a516d --- files/apts/horizon | 3 --- files/pips/general | 1 - files/pips/horizon | 6 ------ files/pips/keystone | 1 - files/pips/tempest | 2 -- 5 files changed, 13 deletions(-) delete mode 100644 files/pips/general delete mode 100644 files/pips/horizon delete mode 100644 files/pips/keystone delete mode 100644 files/pips/tempest diff --git a/files/apts/horizon b/files/apts/horizon index 9b1c9ee8..6348f562 100644 --- a/files/apts/horizon +++ b/files/apts/horizon @@ -18,8 +18,5 @@ python-mox python-kombu python-coverage python-cherrypy3 # why? -python-django-mailer -python-django-nose -python-django-registration python-migrate nodejs diff --git a/files/pips/general b/files/pips/general deleted file mode 100644 index deb2d14c..00000000 --- a/files/pips/general +++ /dev/null @@ -1 +0,0 @@ -prettytable diff --git a/files/pips/horizon b/files/pips/horizon deleted file mode 100644 index 309a5fee..00000000 --- a/files/pips/horizon +++ /dev/null @@ -1,6 +0,0 @@ -django>=1.4 -django-mailer # dist:f16 -django-nose # dist:f16 -django-nose-selenium -pycrypto==2.3 -python-cloudfiles diff --git a/files/pips/keystone b/files/pips/keystone deleted file mode 100644 index 09636e49..00000000 --- a/files/pips/keystone +++ /dev/null @@ -1 +0,0 @@ -PassLib diff --git a/files/pips/tempest b/files/pips/tempest deleted file mode 100644 index 6eeb5b9c..00000000 --- a/files/pips/tempest +++ /dev/null @@ -1,2 +0,0 @@ -pika -nosexunit # For use by jenkins in producing reports From 66c70c72d49c42059ee649725ee038bed3b252dc Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 7 Jun 2012 14:21:59 -0400 Subject: [PATCH 534/967] Use pip to install python depends. When we use python setup.py develop, if there is a packages not installed already, it means distutils/easy_install installs it. Unfortunately, those are both way more stupid than pip. Instead, get pip to install missing depends, then run the setup.py develop so that distutils doesn't need to install things. Change-Id: Ifad3bbc8e9eac0b14dc5bb40175cf2bd45b64b00 --- stack.sh | 38 ++++++++++++++++++++++++-------------- 1 file changed, 24 insertions(+), 14 deletions(-) diff --git a/stack.sh b/stack.sh index 776ff866..3d93de47 100755 --- a/stack.sh +++ b/stack.sh @@ -647,6 +647,16 @@ function get_packages() { done } +# pip install the dependencies of the package before we do the setup.py +# develop, so that pip and not distutils process the dependency chain +function setup_develop() { + python setup.py egg_info + raw_links=`cat *.egg-info/dependency_links.txt | awk '{print "-f " $1}'` + depend_links=`echo $raw_links | xargs` + sudo pip install -r *-info/requires.txt $depend_links + sudo python setup.py develop +} + # install package requirements if [[ "$os_PACKAGE" = "deb" ]]; then apt_get update @@ -710,38 +720,38 @@ fi # setup our checkouts so they are installed into python path # allowing ``import nova`` or ``import glance.client`` -cd $KEYSTONECLIENT_DIR; sudo python setup.py develop -cd $NOVACLIENT_DIR; sudo python setup.py develop -cd $OPENSTACKCLIENT_DIR; sudo python setup.py develop +cd $KEYSTONECLIENT_DIR; setup_develop +cd $NOVACLIENT_DIR; setup_develop +cd $OPENSTACKCLIENT_DIR; setup_develop if is_service_enabled key g-api n-api swift; then - cd $KEYSTONE_DIR; sudo python setup.py develop + cd $KEYSTONE_DIR; setup_develop fi if is_service_enabled swift; then - cd $SWIFT_DIR; sudo python setup.py develop - cd $SWIFT3_DIR; sudo python setup.py develop + cd $SWIFT_DIR; setup_develop + cd $SWIFT3_DIR; setup_develop fi if is_service_enabled g-api n-api; then - cd $GLANCE_DIR; sudo python setup.py develop + cd $GLANCE_DIR; setup_develop fi -cd $NOVA_DIR; sudo python setup.py develop +cd $NOVA_DIR; setup_develop if is_service_enabled horizon; then - cd $HORIZON_DIR; sudo python setup.py develop + cd $HORIZON_DIR; setup_develop fi if is_service_enabled quantum; then - cd $QUANTUM_CLIENT_DIR; sudo python setup.py develop + cd $QUANTUM_CLIENT_DIR; setup_develop fi if is_service_enabled quantum; then - cd $QUANTUM_DIR; sudo python setup.py develop + cd $QUANTUM_DIR; setup_develop fi if is_service_enabled m-svc; then - cd $MELANGE_DIR; sudo python setup.py develop + cd $MELANGE_DIR; setup_develop fi if is_service_enabled melange; then - cd $MELANGECLIENT_DIR; sudo python setup.py develop + cd $MELANGECLIENT_DIR; setup_develop fi # Do this _after_ glance is installed to override the old binary -cd $GLANCECLIENT_DIR; sudo python setup.py develop +cd $GLANCECLIENT_DIR; setup_develop # Syslog From d2f8fa3b444d944a7fad29cabb4216e745c66772 Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Mon, 11 Jun 2012 16:45:29 +0200 Subject: [PATCH 535/967] Always setup rootwrap sudoers entry Setup /etc/sudoers.d/nova-rootwrap in all cases, and not just when devstack is not run as root. Fixes bug 1011652. Change-Id: Ib4cdeaa282f01cf2ce98119618f232c91b6e8db4 --- AUTHORS | 1 + stack.sh | 14 +++++++------- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/AUTHORS b/AUTHORS index f9aa9eab..ff05f821 100644 --- a/AUTHORS +++ b/AUTHORS @@ -26,6 +26,7 @@ Ken Pepple Kiall Mac Innes Russell Bryant Scott Moser +Thierry Carrez Todd Willey Tres Henry Vishvananda Ishaya diff --git a/stack.sh b/stack.sh index 3d93de47..a675905c 100755 --- a/stack.sh +++ b/stack.sh @@ -187,13 +187,6 @@ else sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh - # Set up the rootwrap sudoers - TEMPFILE=`mktemp` - echo "$USER ALL=(root) NOPASSWD: $NOVA_ROOTWRAP" >$TEMPFILE - chmod 0440 $TEMPFILE - sudo chown root:root $TEMPFILE - sudo mv $TEMPFILE /etc/sudoers.d/nova-rootwrap - # Remove old file sudo rm -f /etc/sudoers.d/stack_sh_nova fi @@ -1184,6 +1177,13 @@ sudo chown `whoami` $NOVA_CONF_DIR cp -p $NOVA_DIR/etc/nova/policy.json $NOVA_CONF_DIR +# Set up the rootwrap sudoers +TEMPFILE=`mktemp` +echo "$USER ALL=(root) NOPASSWD: $NOVA_ROOTWRAP" >$TEMPFILE +chmod 0440 $TEMPFILE +sudo chown root:root $TEMPFILE +sudo mv $TEMPFILE /etc/sudoers.d/nova-rootwrap + if is_service_enabled n-api; then # Use the sample http middleware configuration supplied in the # Nova sources. This paste config adds the configuration required From f07d9b1956c35367ef3481c5103b1df8cbcb8cab Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 11 Jun 2012 14:39:16 -0500 Subject: [PATCH 536/967] Add BeautifulSoup dependency for Horizon This is a workaround for an undocumented dependency on the Python BeautifulSoup package. It fixes both Ubuntu and Fedora builds. Fixes bug 1010968 Change-Id: If7f5d55c38d3b9ab677378c57839bc8f03898e2f --- files/apts/horizon | 1 + files/rpms/horizon | 1 + 2 files changed, 2 insertions(+) diff --git a/files/apts/horizon b/files/apts/horizon index 6348f562..53bddf09 100644 --- a/files/apts/horizon +++ b/files/apts/horizon @@ -1,5 +1,6 @@ apache2 # NOPRIME libapache2-mod-wsgi # NOPRIME +python-beautifulsoup python-dateutil python-paste python-pastedeploy diff --git a/files/rpms/horizon b/files/rpms/horizon index 3c5fbc17..5e368208 100644 --- a/files/rpms/horizon +++ b/files/rpms/horizon @@ -5,6 +5,7 @@ httpd # NOPRIME mod_wsgi # NOPRIME pylint python-anyjson +python-BeautifulSoup python-boto python-coverage python-dateutil From fda9df8795d422679387b2ea8b20556fe4116645 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Tue, 22 May 2012 10:27:08 +0000 Subject: [PATCH 537/967] Install python-swiftclient when installing swift. - Fixes bug 1002789. Change-Id: I8ee54652c6a38c7c226c820366897e53807f664d --- stack.sh | 5 +++++ stackrc | 4 ++++ 2 files changed, 9 insertions(+) diff --git a/stack.sh b/stack.sh index 3d93de47..0d90baea 100755 --- a/stack.sh +++ b/stack.sh @@ -215,6 +215,7 @@ OPENSTACKCLIENT_DIR=$DEST/python-openstackclient NOVNC_DIR=$DEST/noVNC SWIFT_DIR=$DEST/swift SWIFT3_DIR=$DEST/swift3 +SWIFTCLIENT_DIR=$DEST/python-swiftclient QUANTUM_DIR=$DEST/quantum QUANTUM_CLIENT_DIR=$DEST/python-quantumclient MELANGE_DIR=$DEST/melange @@ -684,6 +685,9 @@ fi if is_service_enabled swift; then # storage service git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH + # storage service client and and Library + git_clone $SWIFTCLIENT_REPO $SWIFTCLIENT_DIR $SWIFTCLIENT_BRANCH + # swift3 middleware to provide S3 emulation to Swift git_clone $SWIFT3_REPO $SWIFT3_DIR $SWIFT3_BRANCH fi if is_service_enabled g-api n-api; then @@ -728,6 +732,7 @@ if is_service_enabled key g-api n-api swift; then fi if is_service_enabled swift; then cd $SWIFT_DIR; setup_develop + cd $SWIFTCLIENT_DIR; setup_develop cd $SWIFT3_DIR; setup_develop fi if is_service_enabled g-api n-api; then diff --git a/stackrc b/stackrc index 98e6bd48..3c20c874 100644 --- a/stackrc +++ b/stackrc @@ -20,6 +20,10 @@ SWIFT3_REPO=https://github.com/fujita/swift3.git SWIFT3_BRANCH=master +# python swift client library +SWIFTCLIENT_REPO=https://github.com/openstack/python-swiftclient +SWIFTCLIENT_BRANCH=master + # image catalog service GLANCE_REPO=https://github.com/openstack/glance.git GLANCE_BRANCH=master From bbafb1b5b24377f9d471fc73de3ef07f97deed96 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 11 Jun 2012 16:51:39 -0500 Subject: [PATCH 538/967] Fix issues when working through an outbound proxy * reqork setup-develop() to handle proxy operations and empty dependency lists * use keystone to get admin token (eliminate a direct curl invocation) * check for cached image files of 0 bytes and re-download if needed Change-Id: Ief356667ed3ef8d05c6604f12513ae81f68cec04 --- functions | 17 ++++++++++++++++ stack.sh | 61 ++++++++++++++++++++++++------------------------------- 2 files changed, 44 insertions(+), 34 deletions(-) diff --git a/functions b/functions index 7072fdd8..7fd726ef 100644 --- a/functions +++ b/functions @@ -309,6 +309,23 @@ function restart_service() { } +# pip install the dependencies of the package before we do the setup.py +# develop, so that pip and not distutils process the dependency chain +# setup_develop directory +function setup_develop() { + (cd $1; \ + python setup.py egg_info; \ + raw_links=$(awk '/^.+/ {print "-f " $1}' *.egg-info/dependency_links.txt); \ + depend_links=$(echo $raw_links | xargs); \ + pip_install -r *-info/requires.txt $depend_links; \ + sudo \ + HTTP_PROXY=$http_proxy \ + HTTPS_PROXY=$https_proxy \ + python setup.py develop \ + ) +} + + # Service wrapper to start services # start_service service-name function start_service() { diff --git a/stack.sh b/stack.sh index 0d90baea..4f26e1e3 100755 --- a/stack.sh +++ b/stack.sh @@ -64,18 +64,23 @@ fi # repositories and branches to configure. ``stackrc`` sources ``localrc`` to # allow you to safely override those settings without being overwritten # when updating DevStack. +if [[ ! -r $TOP_DIR/stackrc ]]; then + echo "ERROR: missing $TOP_DIR/stackrc - did you grab more than just stack.sh?" + exit 1 +fi +source $TOP_DIR/stackrc # HTTP and HTTPS proxy servers are supported via the usual environment variables # ``http_proxy`` and ``https_proxy``. They can be set in ``localrc`` if necessary # or on the command line:: # # http_proxy=http://proxy.example.com:3128/ ./stack.sh - -if [[ ! -r $TOP_DIR/stackrc ]]; then - echo "ERROR: missing $TOP_DIR/stackrc - did you grab more than just stack.sh?" - exit 1 +if [[ -n "$http_proxy" ]]; then + export http_proxy=$http_proxy +fi +if [[ -n "$https_proxy" ]]; then + export https_proxy=$https_proxy fi -source $TOP_DIR/stackrc # Destination path for installation ``DEST`` DEST=${DEST:-/opt/stack} @@ -648,16 +653,6 @@ function get_packages() { done } -# pip install the dependencies of the package before we do the setup.py -# develop, so that pip and not distutils process the dependency chain -function setup_develop() { - python setup.py egg_info - raw_links=`cat *.egg-info/dependency_links.txt | awk '{print "-f " $1}'` - depend_links=`echo $raw_links | xargs` - sudo pip install -r *-info/requires.txt $depend_links - sudo python setup.py develop -} - # install package requirements if [[ "$os_PACKAGE" = "deb" ]]; then apt_get update @@ -724,39 +719,37 @@ fi # setup our checkouts so they are installed into python path # allowing ``import nova`` or ``import glance.client`` -cd $KEYSTONECLIENT_DIR; setup_develop -cd $NOVACLIENT_DIR; setup_develop -cd $OPENSTACKCLIENT_DIR; setup_develop +setup_develop $KEYSTONECLIENT_DIR +setup_develop $NOVACLIENT_DIR +setup_develop $OPENSTACKCLIENT_DIR if is_service_enabled key g-api n-api swift; then - cd $KEYSTONE_DIR; setup_develop + setup_develop $KEYSTONE_DIR fi if is_service_enabled swift; then - cd $SWIFT_DIR; setup_develop - cd $SWIFTCLIENT_DIR; setup_develop - cd $SWIFT3_DIR; setup_develop + setup_develop $SWIFT_DIR + setup_develop $SWIFTCLIENT_DIR + setup_develop $SWIFT3_DIR fi if is_service_enabled g-api n-api; then - cd $GLANCE_DIR; setup_develop + setup_develop $GLANCE_DIR fi -cd $NOVA_DIR; setup_develop +setup_develop $NOVA_DIR if is_service_enabled horizon; then - cd $HORIZON_DIR; setup_develop -fi -if is_service_enabled quantum; then - cd $QUANTUM_CLIENT_DIR; setup_develop + setup_develop $HORIZON_DIR fi if is_service_enabled quantum; then - cd $QUANTUM_DIR; setup_develop + setup_develop $QUANTUM_CLIENT_DIR + setup_develop $QUANTUM_DIR fi if is_service_enabled m-svc; then - cd $MELANGE_DIR; setup_develop + setup_develop $MELANGE_DIR fi if is_service_enabled melange; then - cd $MELANGECLIENT_DIR; setup_develop + setup_develop $MELANGECLIENT_DIR fi # Do this _after_ glance is installed to override the old binary -cd $GLANCECLIENT_DIR; setup_develop +setup_develop $GLANCECLIENT_DIR # Syslog @@ -1942,7 +1935,7 @@ if is_service_enabled g-reg; then ADMIN_USER=admin ADMIN_TENANT=admin - TOKEN=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$ADMIN_USER\", \"password\": \"$ADMIN_PASSWORD\"}, \"tenantName\": \"$ADMIN_TENANT\"}}" -H "Content-type: application/json" http://$HOST_IP:5000/v2.0/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"` + TOKEN=$(keystone --os_tenant_name $ADMIN_TENANT --os_username $ADMIN_USER --os_password $ADMIN_PASSWORD --os_auth_url http://$HOST_IP:5000/v2.0 token-get | grep ' id ' | get_field 2) # Option to upload legacy ami-tty, which works with xenserver if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then @@ -1952,7 +1945,7 @@ if is_service_enabled g-reg; then for image_url in ${IMAGE_URLS//,/ }; do # Downloads the image (uec ami+aki style), then extracts it. IMAGE_FNAME=`basename "$image_url"` - if [ ! -f $FILES/$IMAGE_FNAME ]; then + if [[ ! -f $FILES/$IMAGE_FNAME || "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then wget -c $image_url -O $FILES/$IMAGE_FNAME fi From 0a4c34529b1dbdda627417eeead13eca43f283d8 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Wed, 13 Jun 2012 16:49:06 +0200 Subject: [PATCH 539/967] Add python-devel to rpms list for glance It's needed to build the pysendfile module with pip. This was fixed for Debian in 2be6155c6e4ad7bc0df61c47a1cfbb093a48b43d Change-Id: I4ee2b12e2fd3fd9ea44420fdb44cc8ec339ff4fa --- files/rpms/glance | 1 + 1 file changed, 1 insertion(+) diff --git a/files/rpms/glance b/files/rpms/glance index e38f2392..eff6c2c0 100644 --- a/files/rpms/glance +++ b/files/rpms/glance @@ -1,5 +1,6 @@ libxml2-devel python-argparse +python-devel python-eventlet python-greenlet python-paste-deploy From 0f39c5df1442a9d72c55408f76ad0480ab7939a4 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Wed, 13 Jun 2012 15:17:19 -0400 Subject: [PATCH 540/967] Run glance-manage db_sync. Updates stack.sh so that it runs glance-manage db_sync. This should pave the way for us to default Glance DB auto-creation to False in Glance. Previously we would rely on glance-registry (and glance-api in Folsom) to auto create the DB for us. I've found this to be a bit racey so explicitly using glance-manage seems the way to go. Change-Id: I61f165db6e0591e819d12aa2e2a4d336ad1172c3 --- AUTHORS | 1 + stack.sh | 3 +++ 2 files changed, 4 insertions(+) diff --git a/AUTHORS b/AUTHORS index ff05f821..b5f972fd 100644 --- a/AUTHORS +++ b/AUTHORS @@ -5,6 +5,7 @@ Anthony Young Armando Migliaccio Brad Hall Chmouel Boudjnah +Dan Prince Dean Troyer Devin Carlen Eddie Hebert diff --git a/stack.sh b/stack.sh index 7faa8f65..e73d3e41 100755 --- a/stack.sh +++ b/stack.sh @@ -1037,6 +1037,9 @@ if is_service_enabled g-reg; then GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json cp $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON + + $GLANCE_DIR/bin/glance-manage db_sync + fi # Quantum (for controller or agent nodes) From 6f13ba33d84b95808fc2a7672f332c1f0494e741 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 1 Jun 2012 23:17:38 +0000 Subject: [PATCH 541/967] Make the log output pretty and more useful * requires https://review.openstack.org/#/c/8067/ Change-Id: Ib26c1bd7e9ef933a7dbe2ee0c476e8d439e17574 --- stack.sh | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/stack.sh b/stack.sh index 87d38d2c..9f3def5c 100755 --- a/stack.sh +++ b/stack.sh @@ -262,6 +262,9 @@ SYSLOG=`trueorfalse False $SYSLOG` SYSLOG_HOST=${SYSLOG_HOST:-$HOST_IP} SYSLOG_PORT=${SYSLOG_PORT:-516} +# Use color for logging output +LOG_COLOR=`trueorfalse True $LOG_COLOR` + # Service startup timeout SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} @@ -1709,6 +1712,16 @@ fi if [ "$API_RATE_LIMIT" != "True" ]; then add_nova_opt "api_rate_limit=False" fi +if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then + # Add color to logging output + add_nova_opt "logging_context_format_string=%(asctime)s %(color)s%(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s%(color)s] %(instance)s%(color)s%(message)s" + add_nova_opt "logging_default_format_string=%(asctime)s %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" + add_nova_opt "logging_debug_format_suffix=from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" + add_nova_opt "logging_exception_prefix=%(color)s%(asctime)s TRACE %(name)s %(instance)s" +else + # Show user_name and project_name instead of user_id and project_id + add_nova_opt "logging_context_format_string=%(asctime)s %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" +fi # Provide some transition from EXTRA_FLAGS to EXTRA_OPTS From f87fd042e730a42ddbd4615cd0e44c2b3d99a94a Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 14 Jun 2012 13:08:24 +0000 Subject: [PATCH 542/967] Kill all swift processes before trying to install. - Before trying to install swift we ensure there is no swift processes running. Change-Id: Ibeb511a67f1ccc4914d9cdf2874324159e3928cc --- stack.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stack.sh b/stack.sh index 87d38d2c..0955b1d3 100755 --- a/stack.sh +++ b/stack.sh @@ -1371,6 +1371,9 @@ if is_service_enabled swift; then # Install memcached for swift. install_package memcached + # We make sure to kill all swift processes first + pkill -f -9 swift- + # We first do a bit of setup by creating the directories and # changing the permissions so we can run it as our user. From ef352b51c5b8840c70c396196d3a9c155a40fd02 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Wed, 13 Jun 2012 11:24:48 +0200 Subject: [PATCH 543/967] Ignore vim swap files Makes the life of vim users simpler. Change-Id: I62da9821be8b619fe3fc41d4eeb6286d3ec88e1c --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index c8d25605..83c54197 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ proto *~ +.*.sw[nop] *.log src localrc From bf3923174e40b7931963bcba455d837e0ec864e3 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Wed, 13 Jun 2012 11:26:31 +0200 Subject: [PATCH 544/967] Remove test.ini when done with it during tests The file is created in the tests, and so should also be removed. Change-Id: I8c087cc5cc71ac80b7c4974e2a69a747a929bedf --- tests/functions.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/functions.sh b/tests/functions.sh index e7fbe0c5..e436ed97 100755 --- a/tests/functions.sh +++ b/tests/functions.sh @@ -141,3 +141,5 @@ if [[ -z "$VAL" ]]; then else echo "inicomment failed: $VAL" fi + +rm test.ini From 4556b5d2b041b279f17501d1c8037ee5c5e35931 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Thu, 14 Jun 2012 09:20:55 +0200 Subject: [PATCH 545/967] Fix grep on MySQL configuration file The file is not readable by non-root users (at least on openSUSE), so we need to use sudo. Change-Id: I42fff066a60318a954110736d5352387888931e8 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 87d38d2c..41e07107 100755 --- a/stack.sh +++ b/stack.sh @@ -853,7 +853,7 @@ EOF sudo sed -i '/^bind-address/s/127.0.0.1/0.0.0.0/g' $MY_CONF # Set default db type to InnoDB - if grep -q "default-storage-engine" $MY_CONF; then + if sudo grep -q "default-storage-engine" $MY_CONF; then # Change it sudo bash -c "source $TOP_DIR/functions; iniset $MY_CONF mysqld default-storage-engine InnoDB" else From eeaf266a7962fd1ee751288bae38a0710b3a6771 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 14 Jun 2012 09:11:38 -0500 Subject: [PATCH 546/967] Skip screen if not present Change-Id: I018249c415fa91ca6461a8f9d236767aa57a8fbd --- unstack.sh | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/unstack.sh b/unstack.sh index 7de0d749..341270d6 100755 --- a/unstack.sh +++ b/unstack.sh @@ -25,9 +25,12 @@ if [[ "$1" == "--all" ]]; then fi # Shut down devstack's screen to get the bulk of OpenStack services in one shot -SESSION=$(screen -ls | grep "[0-9].stack" | awk '{ print $1 }') -if [[ -n "$SESSION" ]]; then - screen -X -S $SESSION quit +SCREEN=$(which screen) +if [[ -n "$SCREEN" ]]; then + SESSION=$(screen -ls | awk '/[0-9].stack/ { print $1 }') + if [[ -n "$SESSION" ]]; then + screen -X -S $SESSION quit + fi fi # Swift runs daemons From 3378b3a69ef911d2bdd2548891a0c29f66e9b4cc Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Tue, 12 Jun 2012 14:18:57 +0200 Subject: [PATCH 547/967] Stop adding the stack user to the sudo or wheel group This is not needed since we explicitly add a sudoers rule for the stack user. Change-Id: I4c63ab25811d55b7eee2677c954133dc3e7ae397 --- stack.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index 71c2f9ea..a1f89c1e 100755 --- a/stack.sh +++ b/stack.sh @@ -145,14 +145,12 @@ if [[ $EUID -eq 0 ]]; then # ability to run sudo if [[ "$os_PACKAGE" = "deb" ]]; then dpkg -l sudo || apt_get update && install_package sudo - STACK_GROUP=sudo else rpm -qa | grep sudo || install_package sudo - STACK_GROUP=wheel fi if ! getent passwd stack >/dev/null; then echo "Creating a user called stack" - useradd -U -G $STACK_GROUP -s /bin/bash -d $DEST -m stack + useradd -U -s /bin/bash -d $DEST -m stack fi echo "Giving stack user passwordless sudo priviledges" From ce5b8ed38b32f13a00411dfc980bf02e89932d7b Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Thu, 14 Jun 2012 12:27:58 +0200 Subject: [PATCH 548/967] Support upcoming rootwrap.d config files Add support in devstack for upcoming /etc/nova/rootwrap.d configuration files. Note that we don't change anything if Nova doesn't ship them, so devstack supports both cases. This is the first step for blueprint folsom-nova-rootwrap. It needs to go in first so that tests pass when rootwrap.d changes will be proposed in Nova. Change-Id: I0189575ed9adb1be61c8563ce8f3199c52fc08ff --- stack.sh | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 71c2f9ea..cc5f5941 100755 --- a/stack.sh +++ b/stack.sh @@ -1178,9 +1178,25 @@ sudo chown `whoami` $NOVA_CONF_DIR cp -p $NOVA_DIR/etc/nova/policy.json $NOVA_CONF_DIR +# If Nova ships the new rootwrap.d config files, deploy them +# (owned by root) and add a parameter to $NOVA_ROOTWRAP +ROOTWRAP_SUDOER_CMD="$NOVA_ROOTWRAP" +if [[ -d $NOVA_DIR/etc/nova/rootwrap.d ]]; then + # Wipe any existing rootwrap.d files first + if [[ -d $NOVA_CONF_DIR/rootwrap.d ]]; then + sudo rm -rf $NOVA_CONF_DIR/rootwrap.d + fi + sudo mkdir -m 755 $NOVA_CONF_DIR/rootwrap.d + sudo cp $NOVA_DIR/etc/nova/rootwrap.d/* $NOVA_CONF_DIR/rootwrap.d + sudo chown -R root:root $NOVA_CONF_DIR/rootwrap.d + sudo chmod 644 $NOVA_CONF_DIR/rootwrap.d/* + NOVA_ROOTWRAP="$NOVA_ROOTWRAP $NOVA_CONF_DIR/rootwrap.d" + ROOTWRAP_SUDOER_CMD="$NOVA_ROOTWRAP *" +fi + # Set up the rootwrap sudoers TEMPFILE=`mktemp` -echo "$USER ALL=(root) NOPASSWD: $NOVA_ROOTWRAP" >$TEMPFILE +echo "$USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/nova-rootwrap From 5f4b6de23a30999c172e47c11e78739e5bdcfbbf Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Tue, 5 Jun 2012 07:05:35 -0400 Subject: [PATCH 549/967] Quantum common configuration support. This ensures that devstack will work with the Quantum versions and ini files prior to, and after, the above mentioned changes. Change-Id: I18da8febf808c4752330ad1699a079c1d0a544fa --- stack.sh | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/stack.sh b/stack.sh index 71c2f9ea..86b9e0ee 100755 --- a/stack.sh +++ b/stack.sh @@ -1084,17 +1084,23 @@ fi # Quantum service (for controller node) if is_service_enabled q-svc; then - Q_PLUGIN_INI_FILE=/etc/quantum/plugins.ini - Q_CONF_FILE=/etc/quantum/quantum.conf # must remove this file from existing location, otherwise Quantum will prefer it if [[ -e $QUANTUM_DIR/etc/plugins.ini ]]; then + # Support prior to common config + Q_PLUGIN_INI_FILE=/etc/quantum/plugins.ini sudo mv $QUANTUM_DIR/etc/plugins.ini $Q_PLUGIN_INI_FILE fi + Q_CONF_FILE=/etc/quantum/quantum.conf + Q_API_PASTE_FILE=/etc/quantum/api-paste.ini if [[ -e $QUANTUM_DIR/etc/quantum.conf ]]; then sudo mv $QUANTUM_DIR/etc/quantum.conf $Q_CONF_FILE fi + if [[ -e $QUANTUM_DIR/etc/api-paste.ini ]]; then + sudo mv $QUANTUM_DIR/etc/api-paste.ini $Q_API_PASTE_FILE + fi + if is_service_enabled mysql; then mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e "DROP DATABASE IF EXISTS $Q_DB_NAME;" mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e "CREATE DATABASE IF NOT EXISTS $Q_DB_NAME CHARACTER SET utf8;" @@ -1102,9 +1108,16 @@ if is_service_enabled q-svc; then echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin." exit 1 fi - sudo sed -i -e "s/^provider =.*$/provider = $Q_PLUGIN_CLASS/g" $Q_PLUGIN_INI_FILE - screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server $Q_CONF_FILE" + # Update either configuration file with plugin or old plugin file + # file checked below exists only in common config version + if [[ -e $QUANTUM_DIR/quantum/tests/etc/quantum.conf.test ]]; then + sudo sed -i -e "s/^core_plugin =.*$/core_plugin = $Q_PLUGIN_CLASS/g" $Q_CONF_FILE + else + sudo sed -i -e "s/^provider =.*$/provider = $Q_PLUGIN_CLASS/g" $Q_PLUGIN_INI_FILE + fi + + screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $Q_CONF_FILE" fi # Quantum agent (for compute nodes) From f8be4288836c1dac97871a733401a81f8be93ea5 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Tue, 19 Jun 2012 11:01:04 +0000 Subject: [PATCH 550/967] Fix swift3 middleware entry point. - Fixes bug 1015071. Change-Id: I969d8826b2a2633488d6eac321e1f52bd47bf7f8 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 71c2f9ea..4d922a5c 100755 --- a/stack.sh +++ b/stack.sh @@ -1497,7 +1497,7 @@ admin_user = swift admin_password = ${SERVICE_PASSWORD} [filter:swift3] -use = egg:swift3#middleware +use = egg:swift3#swift3 EOF cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONFIG_DIR}/swift.conf From 3f7c06f5aaff5d3e2ec28931e0fe4ab8376208e6 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 3 Apr 2012 17:19:36 -0500 Subject: [PATCH 551/967] Support sql service catalog backend * Add KEYSTONE_CATALOG_BACKEND to select 'sql' or 'template' 'template' is the default * Add service creation to keystone_data.sh Rebased and re-submitted Fixes bug 966457 Change-Id: Id24fbdeba3de11537559e24b72571ec92ab44750 --- files/keystone_data.sh | 225 +++++++++++++++++++++++++++++++++-------- stack.sh | 68 ++++++++----- 2 files changed, 226 insertions(+), 67 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 2cdc2fa9..1f05f109 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -19,8 +19,13 @@ # SERVICE_TOKEN - aka admin_token in keystone.conf # SERVICE_ENDPOINT - local Keystone admin endpoint # SERVICE_TENANT_NAME - name of tenant containing service accounts +# SERVICE_HOST - host used for endpoint creation # ENABLED_SERVICES - stack.sh's list of services to start # DEVSTACK_DIR - Top-level DevStack directory +# KEYSTONE_CATALOG_BACKEND - used to determine service catalog creation + +# Defaults +# -------- ADMIN_PASSWORD=${ADMIN_PASSWORD:-secrete} SERVICE_PASSWORD=${SERVICE_PASSWORD:-$ADMIN_PASSWORD} @@ -29,10 +34,13 @@ export SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service} function get_id () { - echo `$@ | awk '/ id / { print $4 }'` + echo `"$@" | awk '/ id / { print $4 }'` } + # Tenants +# ------- + ADMIN_TENANT=$(get_id keystone tenant-create --name=admin) SERVICE_TENANT=$(get_id keystone tenant-create --name=$SERVICE_TENANT_NAME) DEMO_TENANT=$(get_id keystone tenant-create --name=demo) @@ -40,6 +48,8 @@ INVIS_TENANT=$(get_id keystone tenant-create --name=invisible_to_admin) # Users +# ----- + ADMIN_USER=$(get_id keystone user-create --name=admin \ --pass="$ADMIN_PASSWORD" \ --email=admin@example.com) @@ -49,6 +59,8 @@ DEMO_USER=$(get_id keystone user-create --name=demo \ # Roles +# ----- + ADMIN_ROLE=$(get_id keystone role-create --name=admin) KEYSTONEADMIN_ROLE=$(get_id keystone role-create --name=KeystoneAdmin) KEYSTONESERVICE_ROLE=$(get_id keystone role-create --name=KeystoneServiceAdmin) @@ -73,58 +85,191 @@ keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $ keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $INVIS_TENANT -# Configure service users/roles -NOVA_USER=$(get_id keystone user-create --name=nova \ - --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=nova@example.com) -keystone user-role-add --tenant_id $SERVICE_TENANT \ - --user_id $NOVA_USER \ - --role_id $ADMIN_ROLE +# Services +# -------- -GLANCE_USER=$(get_id keystone user-create --name=glance \ - --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=glance@example.com) -keystone user-role-add --tenant_id $SERVICE_TENANT \ - --user_id $GLANCE_USER \ - --role_id $ADMIN_ROLE +# Keystone +if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + KEYSTONE_SERVICE=$(get_id keystone service-create \ + --name=keystone \ + --type=identity \ + --description="Keystone Identity Service") + keystone endpoint-create \ + --region RegionOne \ + --service_id $KEYSTONE_SERVICE \ + --publicurl "http://$SERVICE_HOST:\$(public_port)s/v2.0" \ + --adminurl "http://$SERVICE_HOST:\$(admin_port)s/v2.0" \ + --internalurl "http://$SERVICE_HOST:\$(admin_port)s/v2.0" +fi -if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then - SWIFT_USER=$(get_id keystone user-create --name=swift \ - --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=swift@example.com) - keystone user-role-add --tenant_id $SERVICE_TENANT \ - --user_id $SWIFT_USER \ - --role_id $ADMIN_ROLE +# Nova +if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then + NOVA_USER=$(get_id keystone user-create \ + --name=nova \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=nova@example.com) + keystone user-role-add \ + --tenant_id $SERVICE_TENANT \ + --user_id $NOVA_USER \ + --role_id $ADMIN_ROLE + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + NOVA_SERVICE=$(get_id keystone service-create \ + --name=nova \ + --type=compute \ + --description="Nova Compute Service") + keystone endpoint-create \ + --region RegionOne \ + --service_id $NOVA_SERVICE \ + --publicurl "http://$SERVICE_HOST:\$(compute_port)s/v1.1/\$(tenant_id)s" \ + --adminurl "http://$SERVICE_HOST:\$(compute_port)s/v1.1/\$(tenant_id)s" \ + --internalurl "http://$SERVICE_HOST:\$(compute_port)s/v1.1/\$(tenant_id)s" + fi # Nova needs ResellerAdmin role to download images when accessing # swift through the s3 api. The admin role in swift allows a user # to act as an admin for their tenant, but ResellerAdmin is needed # for a user to act as any tenant. The name of this role is also # configurable in swift-proxy.conf RESELLER_ROLE=$(get_id keystone role-create --name=ResellerAdmin) - keystone user-role-add --tenant_id $SERVICE_TENANT \ - --user_id $NOVA_USER \ - --role_id $RESELLER_ROLE + keystone user-role-add \ + --tenant_id $SERVICE_TENANT \ + --user_id $NOVA_USER \ + --role_id $RESELLER_ROLE fi -if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then - QUANTUM_USER=$(get_id keystone user-create --name=quantum \ - --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=quantum@example.com) - keystone user-role-add --tenant_id $SERVICE_TENANT \ - --user_id $QUANTUM_USER \ - --role_id $ADMIN_ROLE +# Volume +if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + VOLUME_SERVICE=$(get_id keystone service-create \ + --name=volume \ + --type=volume \ + --description="Volume Service") + keystone endpoint-create \ + --region RegionOne \ + --service_id $VOLUME_SERVICE \ + --publicurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" \ + --adminurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" \ + --internalurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" + fi +fi + +# Glance +if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then + GLANCE_USER=$(get_id keystone user-create \ + --name=glance \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=glance@example.com) + keystone user-role-add \ + --tenant_id $SERVICE_TENANT \ + --user_id $GLANCE_USER \ + --role_id $ADMIN_ROLE + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + GLANCE_SERVICE=$(get_id keystone service-create \ + --name=glance \ + --type=image \ + --description="Glance Image Service") + keystone endpoint-create \ + --region RegionOne \ + --service_id $GLANCE_SERVICE \ + --publicurl "http://$SERVICE_HOST:9292/v1" \ + --adminurl "http://$SERVICE_HOST:9292/v1" \ + --internalurl "http://$SERVICE_HOST:9292/v1" + fi +fi + +# Swift +if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then + SWIFT_USER=$(get_id keystone user-create \ + --name=swift \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=swift@example.com) + keystone user-role-add \ + --tenant_id $SERVICE_TENANT \ + --user_id $SWIFT_USER \ + --role_id $ADMIN_ROLE + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + SWIFT_SERVICE=$(get_id keystone service-create \ + --name=swift \ + --type="object-store" \ + --description="Swift Service") + keystone endpoint-create \ + --region RegionOne \ + --service_id $SWIFT_SERVICE \ + --publicurl "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" \ + --adminurl "http://$SERVICE_HOST:8080/v1" \ + --internalurl "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" + fi +fi + +if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then + QUANTUM_USER=$(get_id keystone user-create \ + --name=quantum \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=quantum@example.com) + keystone user-role-add \ + --tenant_id $SERVICE_TENANT \ + --user_id $QUANTUM_USER \ + --role_id $ADMIN_ROLE + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + QUANTUM_SERVICE=$(get_id keystone service-create \ + --name=quantum \ + --type=network \ + --description="Quantum Service") + keystone endpoint-create \ + --region RegionOne \ + --service_id $QUANTUM_SERVICE \ + --publicurl "http://$SERVICE_HOST:9696/" \ + --adminurl "http://$SERVICE_HOST:9696/" \ + --internalurl "http://$SERVICE_HOST:9696/" + fi +fi + +# EC2 +if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + EC2_SERVICE=$(get_id keystone service-create \ + --name=ec2 \ + --type=ec2 \ + --description="EC2 Compatibility Layer") + keystone endpoint-create \ + --region RegionOne \ + --service_id $EC2_SERVICE \ + --publicurl "http://$SERVICE_HOST:8773/services/Cloud" \ + --adminurl "http://$SERVICE_HOST:8773/services/Admin" \ + --internalurl "http://$SERVICE_HOST:8773/services/Cloud" + fi +fi + +# S3 +if [[ "$ENABLED_SERVICES" =~ "n-obj" || "$ENABLED_SERVICES" =~ "swift" ]]; then + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + S3_SERVICE=$(get_id keystone service-create \ + --name=s3 \ + --type=s3 \ + --description="S3") + keystone endpoint-create \ + --region RegionOne \ + --service_id $S3_SERVICE \ + --publicurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \ + --adminurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \ + --internalurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" + fi fi if [[ "$ENABLED_SERVICES" =~ "tempest" ]]; then # Tempest has some tests that validate various authorization checks # between two regular users in separate tenants - ALT_DEMO_TENANT=$(get_id keystone tenant-create --name=alt_demo) - ALT_DEMO_USER=$(get_id keystone user-create --name=alt_demo \ - --pass="$ADMIN_PASSWORD" \ - --email=alt_demo@example.com) - keystone user-role-add --user $ALT_DEMO_USER --role $MEMBER_ROLE --tenant_id $ALT_DEMO_TENANT + ALT_DEMO_TENANT=$(get_id keystone tenant-create \ + --name=alt_demo) + ALT_DEMO_USER=$(get_id keystone user-create \ + --name=alt_demo \ + --pass="$ADMIN_PASSWORD" \ + --email=alt_demo@example.com) + keystone user-role-add \ + --tenant_id $ALT_DEMO_TENANT \ + --user_id $ALT_DEMO_USER \ + --role_id $MEMBER_ROLE fi diff --git a/stack.sh b/stack.sh index cc5f5941..f3c05377 100755 --- a/stack.sh +++ b/stack.sh @@ -1866,7 +1866,7 @@ if is_service_enabled key; then KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone} KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf - KEYSTONE_CATALOG=$KEYSTONE_CONF_DIR/default_catalog.templates + KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-template} if [[ ! -d $KEYSTONE_CONF_DIR ]]; then sudo mkdir -p $KEYSTONE_CONF_DIR @@ -1877,41 +1877,49 @@ if is_service_enabled key; then cp -p $KEYSTONE_DIR/etc/keystone.conf.sample $KEYSTONE_CONF cp -p $KEYSTONE_DIR/etc/policy.json $KEYSTONE_CONF_DIR fi - cp -p $FILES/default_catalog.templates $KEYSTONE_CATALOG # Rewrite stock keystone.conf: iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN" iniset $KEYSTONE_CONF sql connection "$BASE_SQL_CONN/keystone?charset=utf8" iniset $KEYSTONE_CONF ec2 driver "keystone.contrib.ec2.backends.sql.Ec2" - # Configure keystone.conf to use templates - iniset $KEYSTONE_CONF catalog driver "keystone.catalog.backends.templated.TemplatedCatalog" - iniset $KEYSTONE_CONF catalog template_file "$KEYSTONE_CATALOG" sed -e " /^pipeline.*ec2_extension crud_/s|ec2_extension crud_extension|ec2_extension s3_extension crud_extension|; " -i $KEYSTONE_CONF # Append the S3 bits iniset $KEYSTONE_CONF filter:s3_extension paste.filter_factory "keystone.contrib.s3:S3Extension.factory" - # Add swift endpoints to service catalog if swift is enabled - if is_service_enabled swift; then - echo "catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG - echo "catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:8080/" >> $KEYSTONE_CATALOG - echo "catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG - echo "catalog.RegionOne.object_store.name = Swift Service" >> $KEYSTONE_CATALOG - fi + if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then + # Configure keystone.conf to use sql + iniset $KEYSTONE_CONF catalog driver keystone.catalog.backends.sql.Catalog + inicomment $KEYSTONE_CONF catalog template_file + else + KEYSTONE_CATALOG=$KEYSTONE_CONF_DIR/default_catalog.templates + cp -p $FILES/default_catalog.templates $KEYSTONE_CATALOG + # Add swift endpoints to service catalog if swift is enabled + if is_service_enabled swift; then + echo "catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:8080/" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.object_store.name = Swift Service" >> $KEYSTONE_CATALOG + fi - # Add quantum endpoints to service catalog if quantum is enabled - if is_service_enabled quantum; then - echo "catalog.RegionOne.network.publicURL = http://%SERVICE_HOST%:9696/" >> $KEYSTONE_CATALOG - echo "catalog.RegionOne.network.adminURL = http://%SERVICE_HOST%:9696/" >> $KEYSTONE_CATALOG - echo "catalog.RegionOne.network.internalURL = http://%SERVICE_HOST%:9696/" >> $KEYSTONE_CATALOG - echo "catalog.RegionOne.network.name = Quantum Service" >> $KEYSTONE_CATALOG - fi + # Add quantum endpoints to service catalog if quantum is enabled + if is_service_enabled quantum; then + echo "catalog.RegionOne.network.publicURL = http://%SERVICE_HOST%:9696/" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.network.adminURL = http://%SERVICE_HOST%:9696/" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.network.internalURL = http://%SERVICE_HOST%:9696/" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.network.name = Quantum Service" >> $KEYSTONE_CATALOG + fi + + sudo sed -e " + s,%SERVICE_HOST%,$SERVICE_HOST,g; + s,%S3_SERVICE_PORT%,$S3_SERVICE_PORT,g; + " -i $KEYSTONE_CATALOG - sudo sed -e " - s,%SERVICE_HOST%,$SERVICE_HOST,g; - s,%S3_SERVICE_PORT%,$S3_SERVICE_PORT,g; - " -i $KEYSTONE_CATALOG + # Configure keystone.conf to use templates + iniset $KEYSTONE_CONF catalog driver "keystone.catalog.backends.templated.TemplatedCatalog" + iniset $KEYSTONE_CONF catalog template_file "$KEYSTONE_CATALOG" + fi # Set up logging LOGGING_ROOT="devel" @@ -1923,25 +1931,31 @@ if is_service_enabled key; then iniset $KEYSTONE_CONF_DIR/logging.conf logger_root level "DEBUG" iniset $KEYSTONE_CONF_DIR/logging.conf logger_root handlers "devel,production" - # initialize keystone database + # Set up the keystone database $KEYSTONE_DIR/bin/keystone-manage db_sync # launch keystone and wait for it to answer before continuing screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug" echo "Waiting for keystone to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -O- $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/ 2>&1 | grep -q '200 OK'; do sleep 1; done"; then + if ! timeout $SERVICE_TIMEOUT sh -c "while http_proxy= wget -O- $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/ 2>&1 | grep -q 'refused'; do sleep 1; done"; then echo "keystone did not start" exit 1 fi # keystone_data.sh creates services, admin and demo users, and roles. SERVICE_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0 - ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES \ + + ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \ + SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \ + S3_SERVICE_PORT=$S3_SERVICE_PORT KEYSTONE_CATALOG_BACKEND=$KEYSTONE_CATALOG_BACKEND \ + DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES \ bash $FILES/keystone_data.sh # create an access key and secret key for nova ec2 register image if is_service_enabled swift && is_service_enabled nova; then - CREDS=$(keystone --os_auth_url=$SERVICE_ENDPOINT --os_username=nova --os_password=$SERVICE_PASSWORD --os_tenant_name=$SERVICE_TENANT_NAME ec2-credentials-create) + NOVA_USER_ID=$(keystone user-list | grep ' nova ' | get_field 1) + NOVA_TENANT_ID=$(keystone tenant-list | grep " $SERVICE_TENANT_NAME " | get_field 1) + CREDS=$(keystone ec2-credentials-create --user $NOVA_USER_ID --tenant_id $NOVA_TENANT_ID) ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }') SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }') add_nova_opt "s3_access_key=$ACCESS_KEY" From 10db44dea85202465e50abc88403cb8af9bc248d Mon Sep 17 00:00:00 2001 From: Adam Young Date: Thu, 31 May 2012 20:18:53 -0400 Subject: [PATCH 552/967] Use ip instead of ifconfig. Should work on F16, F17 and Debian based systesm. Change-Id: I22dd10097baebf6e7e396fcbf5e68a3dae63539f --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index fcefbde4..f21c92f7 100755 --- a/stack.sh +++ b/stack.sh @@ -251,7 +251,7 @@ SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler} HOST_IP_IFACE=${HOST_IP_IFACE:-eth0} # Use the eth0 IP unless an explicit is set by ``HOST_IP`` environment variable if [ -z "$HOST_IP" -o "$HOST_IP" == "dhcp" ]; then - HOST_IP=`LC_ALL=C /sbin/ifconfig ${HOST_IP_IFACE} | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'` + HOST_IP=`LC_ALL=C ip -f inet addr show ${HOST_IP_IFACE} | awk '/inet/ {split($2,parts,"/"); print parts[1]}' | head -n1` if [ "$HOST_IP" = "" ]; then echo "Could not determine host ip address." echo "Either localrc specified dhcp on ${HOST_IP_IFACE} or defaulted to eth0" From be5d3f23beb24836074b3de450b46e70cd50ad79 Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Wed, 20 Jun 2012 18:17:03 +0200 Subject: [PATCH 553/967] Adjust support for upcoming rootwrap changes Adjust the recently-added support for upcoming nova-rootwrap changes to match the latest proposed implementation. We now have a rootwrap.conf configuration files that must point to the directory where the filters are actually defined. See https://review.openstack.org/#/c/8747/1 for the Nova change that justifies this. Change-Id: I20f2bff0f9e87cb11d58f083a326656c4a124bf1 --- stack.sh | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/stack.sh b/stack.sh index 126ff375..38f6735c 100755 --- a/stack.sh +++ b/stack.sh @@ -1194,19 +1194,26 @@ sudo chown `whoami` $NOVA_CONF_DIR cp -p $NOVA_DIR/etc/nova/policy.json $NOVA_CONF_DIR -# If Nova ships the new rootwrap.d config files, deploy them +# If Nova ships the new rootwrap filters files, deploy them # (owned by root) and add a parameter to $NOVA_ROOTWRAP ROOTWRAP_SUDOER_CMD="$NOVA_ROOTWRAP" -if [[ -d $NOVA_DIR/etc/nova/rootwrap.d ]]; then +if [[ -d $NOVA_DIR/etc/nova/rootwrap ]]; then # Wipe any existing rootwrap.d files first if [[ -d $NOVA_CONF_DIR/rootwrap.d ]]; then sudo rm -rf $NOVA_CONF_DIR/rootwrap.d fi + # Deploy filters to /etc/nova/rootwrap.d sudo mkdir -m 755 $NOVA_CONF_DIR/rootwrap.d - sudo cp $NOVA_DIR/etc/nova/rootwrap.d/* $NOVA_CONF_DIR/rootwrap.d + sudo cp $NOVA_DIR/etc/nova/rootwrap/*.filters $NOVA_CONF_DIR/rootwrap.d sudo chown -R root:root $NOVA_CONF_DIR/rootwrap.d sudo chmod 644 $NOVA_CONF_DIR/rootwrap.d/* - NOVA_ROOTWRAP="$NOVA_ROOTWRAP $NOVA_CONF_DIR/rootwrap.d" + # Set up rootwrap.conf, pointing to /etc/nova/rootwrap.d + sudo cp $NOVA_DIR/etc/nova/rootwrap.conf $NOVA_CONF_DIR/ + sudo sed -e "s:^path=.*$:path=$NOVA_CONF_DIR/rootwrap.d:" -i $NOVA_CONF_DIR/rootwrap.conf + sudo chown root:root $NOVA_CONF_DIR/rootwrap.conf + sudo chmod 0644 $NOVA_CONF_DIR/rootwrap.conf + # Specify rootwrap.conf as first parameter to nova-rootwrap + NOVA_ROOTWRAP="$NOVA_ROOTWRAP $NOVA_CONF_DIR/rootwrap.conf" ROOTWRAP_SUDOER_CMD="$NOVA_ROOTWRAP *" fi From 67787e6b4c6f31388cbee6d83b67371b31c443d4 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 2 May 2012 11:48:15 -0500 Subject: [PATCH 554/967] Add cinder support * using the new functional pattern: cinder_XXX functions are in lib/cinder * enable with 'c-api,c-sch,c-vol' in ENABLED_SERVICES, n-vol is still the default * exercises/volumes.sh runs for cinder and n-vol * move config to /etc/cinder * change volume_group to stack-volumes; this also renames the backing file to /opt/stack/data/stack-volumes-backing-file. * removes osapi_volume from nova.conf enabled_apis * integrates cinder + keystone * launches c-sch * tweaks for multi node * move enabled_apis substitution to init_cinder 18Jun2010 * restored & rebased * update setup.py to use setup_develop() in lib/cinder Change-Id: I1e1aa4387031c56e4fa239eb73bea2af8cef0e38 --- exerciserc | 6 ++ exercises/euca.sh | 3 + exercises/volumes.sh | 4 ++ files/apts/cinder | 2 + files/keystone_data.sh | 11 +++ files/rpms/cinder | 2 + functions | 1 + lib/cinder | 154 +++++++++++++++++++++++++++++++++++++++++ stack.sh | 62 ++++++++++++++--- stackrc | 11 +++ unstack.sh | 2 +- 11 files changed, 248 insertions(+), 10 deletions(-) create mode 100644 files/apts/cinder create mode 100644 files/rpms/cinder create mode 100644 lib/cinder diff --git a/exerciserc b/exerciserc index b41714da..82c74b7f 100644 --- a/exerciserc +++ b/exerciserc @@ -20,3 +20,9 @@ export RUNNING_TIMEOUT=${RUNNING_TIMEOUT:-$(($BOOT_TIMEOUT + $ACTIVE_TIMEOUT))} # Max time to wait for a vm to terminate export TERMINATE_TIMEOUT=${TERMINATE_TIMEOUT:-30} + +# Max time to wait for a euca-volume command to propogate +export VOLUME_TIMEOUT=${VOLUME_TIMEOUT:-30} + +# Max time to wait for a euca-delete command to propogate +export VOLUME_DELETE_TIMEOUT=${SNAPSHOT_DELETE_TIMEOUT:-60} diff --git a/exercises/euca.sh b/exercises/euca.sh index 76e5202a..4a538c63 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -24,6 +24,9 @@ set -o xtrace # Keep track of the current directory EXERCISE_DIR=$(cd $(dirname "$0") && pwd) TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) +VOLUME_ZONE=cinder +VOLUME_SIZE=1 +ATTACH_DEVICE=/dev/vdc # Import common functions source $TOP_DIR/functions diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 6749558a..0f25355f 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -33,6 +33,10 @@ source $TOP_DIR/openrc # Import exercise configuration source $TOP_DIR/exerciserc +# If cinder or n-vol are not enabled we exit with exitcode 55 which mean +# exercise is skipped. +is_service_enabled cinder n-vol || exit 55 + # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} diff --git a/files/apts/cinder b/files/apts/cinder new file mode 100644 index 00000000..5db06eac --- /dev/null +++ b/files/apts/cinder @@ -0,0 +1,2 @@ +tgt +lvm2 diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 2cdc2fa9..5aea82ea 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -9,6 +9,7 @@ # service nova admin, [ResellerAdmin (swift only)] # service quantum admin # if enabled # service swift admin # if enabled +# service cinder admin # if enabled # demo admin admin # demo demo Member, anotherrole # invisible_to_admin demo Member @@ -128,3 +129,13 @@ if [[ "$ENABLED_SERVICES" =~ "tempest" ]]; then --email=alt_demo@example.com) keystone user-role-add --user $ALT_DEMO_USER --role $MEMBER_ROLE --tenant_id $ALT_DEMO_TENANT fi + +if [[ "$ENABLED_SERVICES" =~ "cinder" ]]; then + CINDER_USER=$(get_id keystone user-create --name=cinder \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=cinder@example.com) + keystone user-role-add --tenant_id $SERVICE_TENANT \ + --user_id $CINDER_USER \ + --role_id $ADMIN_ROLE +fi diff --git a/files/rpms/cinder b/files/rpms/cinder new file mode 100644 index 00000000..df861aad --- /dev/null +++ b/files/rpms/cinder @@ -0,0 +1,2 @@ +lvm2 +scsi-target-utils diff --git a/functions b/functions index 32427a4d..a80d06d4 100644 --- a/functions +++ b/functions @@ -253,6 +253,7 @@ function is_service_enabled() { for service in ${services}; do [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && return 0 [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && return 0 + [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && return 0 [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0 [[ ${service} == "quantum" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0 done diff --git a/lib/cinder b/lib/cinder new file mode 100644 index 00000000..f0715a4d --- /dev/null +++ b/lib/cinder @@ -0,0 +1,154 @@ +# lib/cinder +# Install and start Cinder volume service + +# Dependencies: +# - functions +# - KEYSTONE_AUTH_* must be defined +# SERVICE_{TENANT_NAME|PASSWORD} must be defined + +# stack.sh +# --------- +# install_XXX +# configure_XXX +# init_XXX +# start_XXX +# stop_XXX +# cleanup_XXX + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following along as the install occurs. +set -o xtrace + + +# Defaults +# -------- + +# set up default directories +CINDER_DIR=$DEST/cinder +CINDERCLIENT_DIR=$DEST/python-cinderclient +CINDER_CONF_DIR=/etc/cinder +CINDER_CONF=$CINDER_CONF_DIR/cinder.conf + +# Name of the lvm volume group to use/create for iscsi volumes +VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} +VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} + +# cleanup_cinder() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_cinder() { + # This function intentionally left blank + : +} + +# configure_cinder() - Set config files, create data dirs, etc +function configure_cinder() { + setup_develop $CINDER_DIR + setup_develop $CINDERCLIENT_DIR + + if [[ ! -d $CINDER_CONF_DIR ]]; then + sudo mkdir -p $CINDER_CONF_DIR + fi + sudo chown `whoami` $CINDER_CONF_DIR + + cp -p $CINDER_DIR/etc/cinder/policy.json $CINDER_CONF_DIR + + CINDER_API_PASTE_INI=$CINDER_CONF_DIR/api-paste.ini + cp $CINDER_DIR/etc/cinder/api-paste.ini $CINDER_API_PASTE_INI + iniset $CINDER_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $CINDER_API_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $CINDER_API_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $CINDER_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $CINDER_API_PASTE_INI filter:authtoken admin_user cinder + iniset $CINDER_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD + + cp $CINDER_DIR/etc/cinder/cinder.conf.sample $CINDER_CONF + iniset $CINDER_CONF DEFAULT auth_strategy keystone + iniset $CINDER_CONF DEFAULT verbose True + iniset $CINDER_CONF DEFAULT volume_group $VOLUME_GROUP + iniset $CINDER_CONF DEFAULT volume_name_template ${VOLUME_NAME_PREFIX}%s + iniset $CINDER_CONF DEFAULT iscsi_helper tgtadm + iniset $CINDER_CONF DEFAULT sql_connection $BASE_SQL_CONN/cinder?charset=utf8 + iniset $CINDER_CONF DEFAULT rabbit_host $RABBIT_HOST + iniset $CINDER_CONF DEFAULT rabbit_password $RABBIT_PASSWORD + iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI +} + +# init_cinder() - Initialize database and volume group +function init_cinder() { + # Force nova volumes off + NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/osapi_volume,//") + + if is_service_enabled mysql; then + # (re)create cinder database + mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS cinder;' + mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE cinder;' + + # (re)create cinder database + $CINDER_DIR/bin/cinder-manage db sync + fi + + if is_service_enabled c-vol; then + # Configure a default volume group called '`stack-volumes`' for the volume + # service if it does not yet exist. If you don't wish to use a file backed + # volume group, create your own volume group called ``stack-volumes`` before + # invoking ``stack.sh``. + # + # By default, the backing file is 2G in size, and is stored in ``/opt/stack/data``. + + if ! sudo vgs $VOLUME_GROUP; then + VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DATA_DIR/${VOLUME_GROUP}-backing-file} + VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-2052M} + # Only create if the file doesn't already exists + [[ -f $VOLUME_BACKING_FILE ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE + DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE` + # Only create if the loopback device doesn't contain $VOLUME_GROUP + if ! sudo vgs $VOLUME_GROUP; then sudo vgcreate $VOLUME_GROUP $DEV; fi + fi + + if sudo vgs $VOLUME_GROUP; then + # Remove iscsi targets + sudo tgtadm --op show --mode target | grep $VOLUME_NAME_PREFIX | grep Target | cut -f3 -d ' ' | sudo xargs -n1 tgt-admin --delete || true + # Clean out existing volumes + for lv in `sudo lvs --noheadings -o lv_name $VOLUME_GROUP`; do + # VOLUME_NAME_PREFIX prefixes the LVs we want + if [[ "${lv#$VOLUME_NAME_PREFIX}" != "$lv" ]]; then + sudo lvremove -f $VOLUME_GROUP/$lv + fi + done + fi + fi +} + +# install_cinder() - Collect source and prepare +function install_cinder() { + git_clone $CINDER_REPO $CINDER_DIR $CINDER_BRANCH + git_clone $CINDERCLIENT_REPO $CINDERCLIENT_DIR $CINDERCLIENT_BRANCH +} + +# start_cinder() - Start running processes, including screen +function start_cinder() { + if is_service_enabled c-vol; then + if [[ "$os_PACKAGE" = "deb" ]]; then + # tgt in oneiric doesn't restart properly if tgtd isn't running + # do it in two steps + sudo stop tgt || true + sudo start tgt + else + # bypass redirection to systemctl during restart + sudo /sbin/service --skip-redirect tgtd restart + fi + fi + + screen_it c-api "cd $CINDER_DIR && $CINDER_DIR/bin/cinder-api --config-file $CINDER_CONF" + screen_it c-vol "cd $CINDER_DIR && $CINDER_DIR/bin/cinder-volume --config-file $CINDER_CONF" + screen_it c-sch "cd $CINDER_DIR && $CINDER_DIR/bin/cinder-scheduler --config-file $CINDER_CONF" +} + +# stop_cinder() - Stop running processes (non-screen) +function stop_cinder() { + # FIXME(dtroyer): stop only the cinder screen window? + + if is_service_enabled c-vol; then + stop_service tgt + fi +} diff --git a/stack.sh b/stack.sh index 126ff375..793ded99 100755 --- a/stack.sh +++ b/stack.sh @@ -112,6 +112,13 @@ else NOVA_ROOTWRAP=/usr/bin/nova-rootwrap fi +# ``stack.sh`` keeps function libraries here +# Make sure ``$TOP_DIR/lib`` directory is present +if [ ! -d $TOP_DIR/lib ]; then + echo "ERROR: missing devstack/lib - did you grab more than just stack.sh?" + exit 1 +fi + # stack.sh keeps the list of ``apt`` and ``pip`` dependencies in external # files, along with config templates and other useful files. You can find these # in the ``files`` directory (next to this script). We will reference this @@ -130,6 +137,12 @@ if type -p screen >/dev/null && screen -ls | egrep -q "[0-9].stack"; then exit 1 fi +# Make sure we only have one volume service enabled. +if is_service_enabled cinder && is_service_enabled n-vol; then + echo "ERROR: n-vol and cinder must not be enabled at the same time" + exit 1 +fi + # OpenStack is designed to be run as a regular user (Horizon will fail to run # as root, since apache refused to startup serve content from root user). If # ``stack.sh`` is run as **root**, it automatically creates a **stack** user with @@ -201,6 +214,19 @@ fi # prerequisites and initialize ``$DEST``. OFFLINE=`trueorfalse False $OFFLINE` +# Destination path for service data +DATA_DIR=${DATA_DIR:-${DEST}/data} +sudo mkdir -p $DATA_DIR +sudo chown `whoami` $DATA_DIR + + +# Projects +# -------- + +# Get project function libraries +source $TOP_DIR/lib/cinder + + # Set the destination directories for openstack projects NOVA_DIR=$DEST/nova HORIZON_DIR=$DEST/horizon @@ -234,7 +260,7 @@ M_HOST=${M_HOST:-localhost} M_MAC_RANGE=${M_MAC_RANGE:-FE-EE-DD-00-00-00/24} # Name of the lvm volume group to use/create for iscsi volumes -VOLUME_GROUP=${VOLUME_GROUP:-nova-volumes} +VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-} @@ -607,6 +633,10 @@ function get_packages() { if [[ ! $file_to_parse =~ glance ]]; then file_to_parse="${file_to_parse} glance" fi + elif [[ $service == c-* ]]; then + if [[ ! $file_to_parse =~ cinder ]]; then + file_to_parse="${file_to_parse} cinder" + fi elif [[ $service == n-* ]]; then if [[ ! $file_to_parse =~ nova ]]; then file_to_parse="${file_to_parse} nova" @@ -704,10 +734,12 @@ if is_service_enabled m-svc; then # melange git_clone $MELANGE_REPO $MELANGE_DIR $MELANGE_BRANCH fi - if is_service_enabled melange; then git_clone $MELANGECLIENT_REPO $MELANGECLIENT_DIR $MELANGECLIENT_BRANCH fi +if is_service_enabled cinder; then + install_cinder +fi # Initialization @@ -743,6 +775,9 @@ fi if is_service_enabled melange; then setup_develop $MELANGECLIENT_DIR fi +if is_service_enabled cinder; then + configure_cinder +fi # Do this _after_ glance is installed to override the old binary setup_develop $GLANCECLIENT_DIR @@ -1643,17 +1678,18 @@ fi # Volume Service # -------------- -if is_service_enabled n-vol; then - # - # Configure a default volume group called 'nova-volumes' for the nova-volume +if is_service_enabled cinder; then + init_cinder +elif is_service_enabled n-vol; then + # Configure a default volume group called '`stack-volumes`' for the volume # service if it does not yet exist. If you don't wish to use a file backed - # volume group, create your own volume group called 'nova-volumes' before - # invoking stack.sh. + # volume group, create your own volume group called ``stack-volumes`` before + # invoking ``stack.sh``. # - # By default, the backing file is 2G in size, and is stored in /opt/stack. + # By default, the backing file is 2G in size, and is stored in ``/opt/stack/data``. if ! sudo vgs $VOLUME_GROUP; then - VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DEST/nova-volumes-backing-file} + VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DATA_DIR/${VOLUME_GROUP}-backing-file} VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-2052M} # Only create if the file doesn't already exists [[ -f $VOLUME_BACKING_FILE ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE @@ -1801,6 +1837,10 @@ else add_nova_opt "logging_context_format_string=%(asctime)s %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" fi +# If cinder is enabled, use the cinder volume driver +if is_service_enabled cinder; then + add_nova_opt "volume_api_class=nova.volume.cinder.API" +fi # Provide some transition from EXTRA_FLAGS to EXTRA_OPTS if [[ -z "$EXTRA_OPTS" && -n "$EXTRA_FLAGS" ]]; then @@ -1968,6 +2008,7 @@ fi # launch the nova-api and wait for it to answer before continuing if is_service_enabled n-api; then + add_nova_opt "enabled_apis=$NOVA_ENABLED_APIS" screen_it n-api "cd $NOVA_DIR && $NOVA_DIR/bin/nova-api" echo "Waiting for nova-api to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:8774; do sleep 1; done"; then @@ -2003,6 +2044,9 @@ screen_it n-sch "cd $NOVA_DIR && $NOVA_DIR/bin/nova-scheduler" screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF --web ." screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF" screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth" +if is_service_enabled cinder; then + start_cinder +fi screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/$APACHE_NAME/horizon_error.log" screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_DIR}/proxy-server.conf -v" diff --git a/stackrc b/stackrc index 3387d43e..cc07906d 100644 --- a/stackrc +++ b/stackrc @@ -9,6 +9,17 @@ RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd) # ENABLED_SERVICES="$ENABLED_SERVICES,swift" ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,mysql,rabbit +# Set the default Nova APIs to enable +NOVA_ENABLED_APIS=ec2,osapi_compute,osapi_volume,metadata + +# volume service +CINDER_REPO=https://github.com/openstack/cinder +CINDER_BRANCH=master + +# volume client +CINDERCLIENT_REPO=https://github.com/openstack/python-cinderclient +CINDERCLIENT_BRANCH=master + # compute service NOVA_REPO=https://github.com/openstack/nova.git NOVA_BRANCH=master diff --git a/unstack.sh b/unstack.sh index 341270d6..879f8424 100755 --- a/unstack.sh +++ b/unstack.sh @@ -44,7 +44,7 @@ if is_service_enabled horizon; then fi # Get the iSCSI volumes -if is_service_enabled n-vol; then +if is_service_enabled cinder n-vol; then TARGETS=$(sudo tgtadm --op show --mode target) if [[ -n "$TARGETS" ]]; then # FIXME(dtroyer): this could very well require more here to From 8b3f438ab83cad88f745bdf9f4f5dc55bdd4ff57 Mon Sep 17 00:00:00 2001 From: Gabriel Hurley Date: Wed, 20 Jun 2012 13:14:38 -0700 Subject: [PATCH 555/967] Execute Horizon bin script with correct group. Fixes bug 1014735. Change-Id: I21217fd722ca85124818c1347680734ccd152eb8 --- files/apache-horizon.template | 1 + 1 file changed, 1 insertion(+) diff --git a/files/apache-horizon.template b/files/apache-horizon.template index e54f16ce..fb98471b 100644 --- a/files/apache-horizon.template +++ b/files/apache-horizon.template @@ -1,6 +1,7 @@ WSGIScriptAlias / %HORIZON_DIR%/openstack_dashboard/wsgi/django.wsgi WSGIDaemonProcess horizon user=%USER% group=%GROUP% processes=3 threads=10 home=%HORIZON_DIR% + WSGIApplicationGroup %{GLOBAL} SetEnv APACHE_RUN_USER %USER% SetEnv APACHE_RUN_GROUP %GROUP% From 4a1f5a723db8cc7508e58e9063555700eb29476c Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 21 Jun 2012 09:57:43 -0500 Subject: [PATCH 556/967] Add f17 to tested distro list Change-Id: I2cbd1f90a0986936fa3e6139f789c1274aead627 --- stack.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index ec1e75aa..8f54611c 100755 --- a/stack.sh +++ b/stack.sh @@ -91,8 +91,8 @@ DEST=${DEST:-/opt/stack} # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|f16) ]]; then - echo "WARNING: this script has been tested on oneiric, precise and f16" +if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|f16|f17) ]]; then + echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then echo "If you wish to run this script anyway run with FORCE=yes" exit 1 From 76a9eaf72b42b289205376a04abcda61d45a664c Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Fri, 22 Jun 2012 15:30:28 +0200 Subject: [PATCH 557/967] Support last version of upcoming rootwrap changes Hopefully last adjustment needed to support the upcoming rootwrap changes at https://review.openstack.org/#/c/8747/. I think core reviewers there finally agree on where things should live and how they should be named. Change-Id: If8814ca0d147856aeed37676e9c3de4767b561c0 --- stack.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index 3bb19bcc..35e7fc16 100755 --- a/stack.sh +++ b/stack.sh @@ -1232,19 +1232,19 @@ cp -p $NOVA_DIR/etc/nova/policy.json $NOVA_CONF_DIR # If Nova ships the new rootwrap filters files, deploy them # (owned by root) and add a parameter to $NOVA_ROOTWRAP ROOTWRAP_SUDOER_CMD="$NOVA_ROOTWRAP" -if [[ -d $NOVA_DIR/etc/nova/rootwrap ]]; then +if [[ -d $NOVA_DIR/etc/nova/rootwrap.d ]]; then # Wipe any existing rootwrap.d files first if [[ -d $NOVA_CONF_DIR/rootwrap.d ]]; then sudo rm -rf $NOVA_CONF_DIR/rootwrap.d fi # Deploy filters to /etc/nova/rootwrap.d sudo mkdir -m 755 $NOVA_CONF_DIR/rootwrap.d - sudo cp $NOVA_DIR/etc/nova/rootwrap/*.filters $NOVA_CONF_DIR/rootwrap.d + sudo cp $NOVA_DIR/etc/nova/rootwrap.d/*.filters $NOVA_CONF_DIR/rootwrap.d sudo chown -R root:root $NOVA_CONF_DIR/rootwrap.d sudo chmod 644 $NOVA_CONF_DIR/rootwrap.d/* # Set up rootwrap.conf, pointing to /etc/nova/rootwrap.d sudo cp $NOVA_DIR/etc/nova/rootwrap.conf $NOVA_CONF_DIR/ - sudo sed -e "s:^path=.*$:path=$NOVA_CONF_DIR/rootwrap.d:" -i $NOVA_CONF_DIR/rootwrap.conf + sudo sed -e "s:^filters_path=.*$:filters_path=$NOVA_CONF_DIR/rootwrap.d:" -i $NOVA_CONF_DIR/rootwrap.conf sudo chown root:root $NOVA_CONF_DIR/rootwrap.conf sudo chmod 0644 $NOVA_CONF_DIR/rootwrap.conf # Specify rootwrap.conf as first parameter to nova-rootwrap From 4402d6e9d7d1ac5cb29e48e4c3e1341623110fd7 Mon Sep 17 00:00:00 2001 From: Aaron Rosen Date: Tue, 19 Jun 2012 16:09:40 -0700 Subject: [PATCH 558/967] update stack.sh due to quantum changes The following git commit a86c31ba1544528cd8473896076e8b1b4ef14bce changes the following variables in ovs_quantum_plugin.ini from (enable-tunneling, local-ip) to enable_tunneling, local_ip). Therefore the quantum configuration section of stack.sh needs to also update these variable names. Fixes bug 1015333 Removes the logic used for plugins.ini which is no longer needed and adds a mv to copy policy.json Fixes bug 1015418 Change-Id: Ib4f39b3095bab3db116395e750cef695113340e8 --- stack.sh | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/stack.sh b/stack.sh index 3bb19bcc..8c677eed 100755 --- a/stack.sh +++ b/stack.sh @@ -1116,20 +1116,15 @@ if is_service_enabled quantum; then echo "OVS 1.4+ is required for tunneling between multiple hosts." exit 1 fi - sudo sed -i -e "s/.*enable-tunneling = .*$/enable-tunneling = $OVS_ENABLE_TUNNELING/g" /$Q_PLUGIN_CONF_FILE + sudo sed -i -e "s/.*enable_tunneling = .*$/enable_tunneling = $OVS_ENABLE_TUNNELING/g" /$Q_PLUGIN_CONF_FILE fi fi # Quantum service (for controller node) if is_service_enabled q-svc; then - # must remove this file from existing location, otherwise Quantum will prefer it - if [[ -e $QUANTUM_DIR/etc/plugins.ini ]]; then - # Support prior to common config - Q_PLUGIN_INI_FILE=/etc/quantum/plugins.ini - sudo mv $QUANTUM_DIR/etc/plugins.ini $Q_PLUGIN_INI_FILE - fi Q_CONF_FILE=/etc/quantum/quantum.conf Q_API_PASTE_FILE=/etc/quantum/api-paste.ini + Q_POLICY_FILE=/etc/quantum/policy.json if [[ -e $QUANTUM_DIR/etc/quantum.conf ]]; then sudo mv $QUANTUM_DIR/etc/quantum.conf $Q_CONF_FILE @@ -1139,6 +1134,10 @@ if is_service_enabled q-svc; then sudo mv $QUANTUM_DIR/etc/api-paste.ini $Q_API_PASTE_FILE fi + if [[ -e $QUANTUM_DIR/etc/policy.json ]]; then + sudo mv $QUANTUM_DIR/etc/policy.json $Q_POLICY_FILE + fi + if is_service_enabled mysql; then mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e "DROP DATABASE IF EXISTS $Q_DB_NAME;" mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e "CREATE DATABASE IF NOT EXISTS $Q_DB_NAME CHARACTER SET utf8;" @@ -1147,14 +1146,8 @@ if is_service_enabled q-svc; then exit 1 fi - # Update either configuration file with plugin or old plugin file - # file checked below exists only in common config version - if [[ -e $QUANTUM_DIR/quantum/tests/etc/quantum.conf.test ]]; then - sudo sed -i -e "s/^core_plugin =.*$/core_plugin = $Q_PLUGIN_CLASS/g" $Q_CONF_FILE - else - sudo sed -i -e "s/^provider =.*$/provider = $Q_PLUGIN_CLASS/g" $Q_PLUGIN_INI_FILE - fi - + # Update either configuration file with plugin + sudo sed -i -e "s/^core_plugin =.*$/core_plugin = $Q_PLUGIN_CLASS/g" $Q_CONF_FILE screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $Q_CONF_FILE" fi @@ -1179,7 +1172,7 @@ if is_service_enabled q-agt; then sudo ovs-vsctl --no-wait -- --if-exists del-br $OVS_BRIDGE sudo ovs-vsctl --no-wait add-br $OVS_BRIDGE sudo ovs-vsctl --no-wait br-set-external-id $OVS_BRIDGE bridge-id br-int - sudo sed -i -e "s/.*local-ip = .*/local-ip = $HOST_IP/g" /$Q_PLUGIN_CONF_FILE + sudo sed -i -e "s/.*local_ip = .*/local_ip = $HOST_IP/g" /$Q_PLUGIN_CONF_FILE AGENT_BINARY=$QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then # Start up the quantum <-> linuxbridge agent From 94cb960009cabe0afbf35bd843ae7580fbbd56d1 Mon Sep 17 00:00:00 2001 From: "James E. Blair" Date: Fri, 22 Jun 2012 15:28:29 -0700 Subject: [PATCH 559/967] Add ERROR_ON_CLONE. This lets the user assert that stack.sh should never need to clone any git repositories. If set to True, and devstack does need to clone a git repo, stack.sh will exit with an error. This is useful in testing environments to make sure that the correct code is being tested instead of silently falling back on cloning from the public repos. Change-Id: Ic0312ab4df492c5cf2e04c08aa7669a81736daa6 --- functions | 4 ++++ stack.sh | 5 +++++ 2 files changed, 9 insertions(+) diff --git a/functions b/functions index a80d06d4..a3e95370 100644 --- a/functions +++ b/functions @@ -142,6 +142,8 @@ GetOSVersion() { # be owned by the installation user, we create the directory and change the # ownership to the proper user. # Set global RECLONE=yes to simulate a clone when dest-dir exists +# Set global ERROR_ON_CLONE=True to abort execution with an error if the git repo +# does not exist (default is False, meaning the repo will be cloned). # git_clone remote dest-dir branch function git_clone { [[ "$OFFLINE" = "True" ]] && return @@ -153,6 +155,7 @@ function git_clone { if echo $GIT_BRANCH | egrep -q "^refs"; then # If our branch name is a gerrit style refs/changes/... if [[ ! -d $GIT_DEST ]]; then + [[ "$ERROR_ON_CLONE" = "True" ]] && exit 1 git clone $GIT_REMOTE $GIT_DEST fi cd $GIT_DEST @@ -160,6 +163,7 @@ function git_clone { else # do a full clone only if the directory doesn't exist if [[ ! -d $GIT_DEST ]]; then + [[ "$ERROR_ON_CLONE" = "True" ]] && exit 1 git clone $GIT_REMOTE $GIT_DEST cd $GIT_DEST # This checkout syntax works for both branches and tags diff --git a/stack.sh b/stack.sh index 3bb19bcc..f11b5e24 100755 --- a/stack.sh +++ b/stack.sh @@ -214,6 +214,11 @@ fi # prerequisites and initialize ``$DEST``. OFFLINE=`trueorfalse False $OFFLINE` +# Set True to configure ``stack.sh`` to exit with an error code if it is asked +# to clone any git repositories. If devstack is used in a testing environment, +# this may be used to ensure that the correct code is being tested. +ERROR_ON_CLONE=`trueorfalse False $ERROR_ON_CLONE` + # Destination path for service data DATA_DIR=${DATA_DIR:-${DEST}/data} sudo mkdir -p $DATA_DIR From a34961b07446b47c11cfec151d12f17a1e267c4d Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 26 Jun 2012 13:05:33 -0500 Subject: [PATCH 560/967] Install glanceclient before horizon tries to Change-Id: I6e8434ee3cce07b740c9c83380114ad6ed4375b0 --- stack.sh | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index 8c3f45b5..5b52b63c 100755 --- a/stack.sh +++ b/stack.sh @@ -766,6 +766,10 @@ fi if is_service_enabled g-api n-api; then setup_develop $GLANCE_DIR fi + +# Do this _after_ glance is installed to override the old binary +setup_develop $GLANCECLIENT_DIR + setup_develop $NOVA_DIR if is_service_enabled horizon; then setup_develop $HORIZON_DIR @@ -784,9 +788,6 @@ if is_service_enabled cinder; then configure_cinder fi -# Do this _after_ glance is installed to override the old binary -setup_develop $GLANCECLIENT_DIR - # Syslog # ------ From c4cd4140d3bb64a02321918df2f0f9258c6c4148 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Wed, 27 Jun 2012 11:01:40 +0200 Subject: [PATCH 561/967] Allow removing services explicitly. - When adding a - (hyphen) at the begining of a service in ENABLED_SERVICES the service will be removed explicitly. Change-Id: I69ce082d13b79aa88426e8012a941c4ae99741f6 --- stack.sh | 10 ++++++++++ stackrc | 7 ++++++- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 4523c2c2..ade87105 100755 --- a/stack.sh +++ b/stack.sh @@ -89,6 +89,16 @@ DEST=${DEST:-/opt/stack} # Sanity Check # ============ +# We are looking for services with a - at the beginning to force +# excluding those services. For example if you want to install all the default +# services but not nova-volume (n-vol) you can have this set in your localrc : +# ENABLED_SERVICES+=",-n-vol" +for service in ${ENABLED_SERVICES//,/ }; do + if [[ ${service} == -* ]]; then + ENABLED_SERVICES=$(echo ${ENABLED_SERVICES}|sed -r "s/(,)?(-)?${service#-}(,)?/,/g") + fi +done + # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|f16) ]]; then diff --git a/stackrc b/stackrc index cc07906d..3a19cdb0 100644 --- a/stackrc +++ b/stackrc @@ -6,7 +6,12 @@ RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd) # by default you can append them in your ENABLED_SERVICES variable in # your localrc. For example for swift you can just add this in your # localrc to add it with the other services: -# ENABLED_SERVICES="$ENABLED_SERVICES,swift" +# ENABLED_SERVICES+=,swift +# +# If you like to explicitly remove services you can add a -$service in +# ENABLED_SERVICES, for example in your localrc to install all defaults but not +# nova-volume you would just need to set this : +# ENABLED_SERVICES+=,-n-vol ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,mysql,rabbit # Set the default Nova APIs to enable From e26232bc9283a6f26a4d37bc0451b2fe06968bad Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 27 Jun 2012 17:55:15 -0500 Subject: [PATCH 562/967] Move DEST ahead of stack account creation Change-Id: I25892e8a9249d3d421062d910d53b8de8134ef80 --- stack.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/stack.sh b/stack.sh index ade87105..f2f12ad7 100755 --- a/stack.sh +++ b/stack.sh @@ -219,6 +219,12 @@ else sudo rm -f /etc/sudoers.d/stack_sh_nova fi +# Create the destination directory and ensure it is writable by the user +sudo mkdir -p $DEST +if [ ! -w $DEST ]; then + sudo chown `whoami` $DEST +fi + # Set True to configure ``stack.sh`` to run cleanly without Internet access. # ``stack.sh`` must have been previously run with Internet access to install # prerequisites and initialize ``$DEST``. @@ -602,12 +608,6 @@ failed() { # an error. It is also useful for following along as the install occurs. set -o xtrace -# create the destination directory and ensure it is writable by the user -sudo mkdir -p $DEST -if [ ! -w $DEST ]; then - sudo chown `whoami` $DEST -fi - # Install Packages # ================ From ad101767b7f611ad5ac9a7972b74d6221b962908 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 27 Jun 2012 22:04:40 -0500 Subject: [PATCH 563/967] Cleanup exercise scripts * whitespace and comment cleanups only Change-Id: Iab9c2d9a25c3473f14190d60f2f2cf5be0ed59dc --- exercises/bundle.sh | 1 + exercises/client-args.sh | 5 ++++- exercises/client-env.sh | 4 ++-- exercises/floating_ips.sh | 3 ++- exercises/swift.sh | 1 + 5 files changed, 10 insertions(+), 4 deletions(-) diff --git a/exercises/bundle.sh b/exercises/bundle.sh index c607c94b..daff5f9c 100755 --- a/exercises/bundle.sh +++ b/exercises/bundle.sh @@ -17,6 +17,7 @@ set -o errexit # an error. It is also useful for following allowing as the install occurs. set -o xtrace + # Settings # ======== diff --git a/exercises/client-args.sh b/exercises/client-args.sh index 1d7d5b6c..7229ecfe 100755 --- a/exercises/client-args.sh +++ b/exercises/client-args.sh @@ -1,11 +1,14 @@ #!/usr/bin/env bash +**client-args.sh** + # Test OpenStack client authentication aguemnts handling echo "*********************************************************************" echo "Begin DevStack Exercise: $0" echo "*********************************************************************" + # Settings # ======== @@ -38,7 +41,7 @@ export x_USERNAME=$OS_USERNAME export x_PASSWORD=$OS_PASSWORD export x_AUTH_URL=$OS_AUTH_URL -#Unset the usual variables to force argument processing +# Unset the usual variables to force argument processing unset OS_TENANT_NAME unset OS_USERNAME unset OS_PASSWORD diff --git a/exercises/client-env.sh b/exercises/client-env.sh index 10871a6a..d242ee53 100755 --- a/exercises/client-env.sh +++ b/exercises/client-env.sh @@ -1,13 +1,13 @@ #!/usr/bin/env bash +**client-env.sh** + # Test OpenStack client enviroment variable handling echo "*********************************************************************" echo "Begin DevStack Exercise: $0" echo "*********************************************************************" -# Verify client workage -VERIFY=${1:-""} # Settings # ======== diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 82f29eb4..51019a34 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -83,7 +83,7 @@ if ! nova secgroup-list | grep -q $SECGROUP; then fi fi -# determinine instance type +# Determinine instance type # ------------------------- # List of instance types: @@ -100,6 +100,7 @@ NAME="ex-float" VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE $NAME --security_groups=$SECGROUP | grep ' id ' | get_field 2` die_if_not_set VM_UUID "Failure launching $NAME" + # Testing # ======= diff --git a/exercises/swift.sh b/exercises/swift.sh index 732445d3..4cd487bc 100755 --- a/exercises/swift.sh +++ b/exercises/swift.sh @@ -40,6 +40,7 @@ CONTAINER=ex-swift # exercise is skipped. is_service_enabled swift || exit 55 + # Testing Swift # ============= From e62ba4d312e3bb9ea6d754b6267f57b793ad0f60 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 27 Jun 2012 22:07:34 -0500 Subject: [PATCH 564/967] Cleanup tools scripts * whitespace and comment cleanups only Change-Id: I4e631e9a9b8151758dc8c96b3aef76b963d5ea7e --- tools/build_bm.sh | 3 +++ tools/build_bm_multi.sh | 3 +++ tools/build_pxe_env.sh | 5 ++++- tools/build_ramdisk.sh | 18 +++++++++++------- tools/build_tempest.sh | 5 +++-- tools/build_uec.sh | 2 ++ tools/build_uec_ramdisk.sh | 9 ++++++--- tools/build_usb_boot.sh | 5 ++++- tools/configure_tempest.sh | 4 +++- tools/copy_dev_environment_to_uec.sh | 2 ++ tools/get_uec_image.sh | 9 ++++++--- tools/info.sh | 5 ++++- tools/install_openvpn.sh | 5 ++++- tools/warm_apts_and_pips_for_uec.sh | 2 ++ 14 files changed, 57 insertions(+), 20 deletions(-) diff --git a/tools/build_bm.sh b/tools/build_bm.sh index 44cf3030..b2d4c366 100755 --- a/tools/build_bm.sh +++ b/tools/build_bm.sh @@ -1,4 +1,7 @@ #!/usr/bin/env bash + +# **build_bm.sh** + # Build an OpenStack install on a bare metal machine. set +x diff --git a/tools/build_bm_multi.sh b/tools/build_bm_multi.sh index 133d5372..f1242ee4 100755 --- a/tools/build_bm_multi.sh +++ b/tools/build_bm_multi.sh @@ -1,4 +1,7 @@ #!/usr/bin/env bash + +# **build_bm_multi.sh** + # Build an OpenStack install on several bare metal machines. SHELL_AFTER_RUN=no diff --git a/tools/build_pxe_env.sh b/tools/build_pxe_env.sh index d01dad0d..e6f98b4b 100755 --- a/tools/build_pxe_env.sh +++ b/tools/build_pxe_env.sh @@ -1,5 +1,8 @@ #!/bin/bash -e -# build_pxe_env.sh - Create a PXE boot environment + +# **build_pxe_env.sh** + +# Create a PXE boot environment # # build_pxe_env.sh destdir # diff --git a/tools/build_ramdisk.sh b/tools/build_ramdisk.sh index 7c1600b1..8e2c0be9 100755 --- a/tools/build_ramdisk.sh +++ b/tools/build_ramdisk.sh @@ -1,7 +1,10 @@ #!/bin/bash -# build_ramdisk.sh - Build RAM disk images -# exit on error to stop unexpected errors +# **build_ramdisk.sh** + +# Build RAM disk images + +# Exit on error to stop unexpected errors set -o errexit if [ ! "$#" -eq "1" ]; then @@ -84,7 +87,7 @@ fi # Finds the next available NBD device # Exits script if error connecting or none free # map_nbd image -# returns full nbd device path +# Returns full nbd device path function map_nbd { for i in `seq 0 15`; do if [ ! -e /sys/block/nbd$i/pid ]; then @@ -105,7 +108,7 @@ function map_nbd { echo $NBD } -# prime image with as many apt/pips as we can +# Prime image with as many apt/pips as we can DEV_FILE=$CACHEDIR/$DIST_NAME-dev.img DEV_FILE_TMP=`mktemp $DEV_FILE.XXXXXX` if [ ! -r $DEV_FILE ]; then @@ -127,11 +130,11 @@ if [ ! -r $DEV_FILE ]; then mkdir -p $MNTDIR/$DEST chroot $MNTDIR chown stack $DEST - # a simple password - pass + # A simple password - pass echo stack:pass | chroot $MNTDIR chpasswd echo root:$ROOT_PASSWORD | chroot $MNTDIR chpasswd - # and has sudo ability (in the future this should be limited to only what + # And has sudo ability (in the future this should be limited to only what # stack requires) echo "stack ALL=(ALL) NOPASSWD: ALL" >> $MNTDIR/etc/sudoers @@ -143,7 +146,8 @@ if [ ! -r $DEV_FILE ]; then fi rm -f $DEV_FILE_TMP -# clone git repositories onto the system + +# Clone git repositories onto the system # ====================================== IMG_FILE_TMP=`mktemp $IMG_FILE.XXXXXX` diff --git a/tools/build_tempest.sh b/tools/build_tempest.sh index 230e8f9b..e72355c9 100755 --- a/tools/build_tempest.sh +++ b/tools/build_tempest.sh @@ -1,7 +1,8 @@ #!/usr/bin/env bash # -# build_tempest.sh - Checkout and prepare a Tempest repo -# (https://github.com/openstack/tempest.git) +# **build_tempest.sh** + +# Checkout and prepare a Tempest repo: https://github.com/openstack/tempest.git function usage { echo "$0 - Check out and prepare a Tempest repo" diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 35a4d6db..48819c95 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +# **build_uec.sh** + # Make sure that we have the proper version of ubuntu (only works on oneiric) if ! egrep -q "oneiric" /etc/lsb-release; then echo "This script only works with ubuntu oneiric." diff --git a/tools/build_uec_ramdisk.sh b/tools/build_uec_ramdisk.sh index 32f90c05..150ecabd 100755 --- a/tools/build_uec_ramdisk.sh +++ b/tools/build_uec_ramdisk.sh @@ -1,7 +1,10 @@ #!/usr/bin/env bash -# build_uec_ramdisk.sh - Build RAM disk images based on UEC image -# exit on error to stop unexpected errors +# **build_uec_ramdisk.sh** + +# Build RAM disk images based on UEC image + +# Exit on error to stop unexpected errors set -o errexit if [ ! "$#" -eq "1" ]; then @@ -58,7 +61,7 @@ DIST_NAME=${DIST_NAME:-oneiric} # Configure how large the VM should be GUEST_SIZE=${GUEST_SIZE:-2G} -# exit on error to stop unexpected errors +# Exit on error to stop unexpected errors set -o errexit set -o xtrace diff --git a/tools/build_usb_boot.sh b/tools/build_usb_boot.sh index cca2a681..f64b7b68 100755 --- a/tools/build_usb_boot.sh +++ b/tools/build_usb_boot.sh @@ -1,5 +1,8 @@ #!/bin/bash -e -# build_usb_boot.sh - Create a syslinux boot environment + +# **build_usb_boot.sh** + +# Create a syslinux boot environment # # build_usb_boot.sh destdev # diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index 2c069343..bfb552dc 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -1,6 +1,8 @@ #!/usr/bin/env bash # -# configure_tempest.sh - Build a tempest configuration file from devstack +# **configure_tempest.sh** + +# Build a tempest configuration file from devstack echo "**************************************************" echo "Configuring Tempest" diff --git a/tools/copy_dev_environment_to_uec.sh b/tools/copy_dev_environment_to_uec.sh index d5687dc1..683a0d6a 100755 --- a/tools/copy_dev_environment_to_uec.sh +++ b/tools/copy_dev_environment_to_uec.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +# **copy_dev_environment_to_uec.sh** + # Echo commands set -o xtrace diff --git a/tools/get_uec_image.sh b/tools/get_uec_image.sh index 09630740..ca74a030 100755 --- a/tools/get_uec_image.sh +++ b/tools/get_uec_image.sh @@ -1,5 +1,8 @@ #!/bin/bash -# get_uec_image.sh - Prepare Ubuntu UEC images + +# **get_uec_image.sh** + +# Download and prepare Ubuntu UEC images CACHEDIR=${CACHEDIR:-/opt/stack/cache} ROOTSIZE=${ROOTSIZE:-2000} @@ -11,12 +14,12 @@ TOP_DIR=$(cd $TOOLS_DIR/..; pwd) # Import common functions . $TOP_DIR/functions -# exit on error to stop unexpected errors +# Exit on error to stop unexpected errors set -o errexit set -o xtrace usage() { - echo "Usage: $0 - Fetch and prepare Ubuntu images" + echo "Usage: $0 - Download and prepare Ubuntu UEC images" echo "" echo "$0 [-r rootsize] release imagefile [kernel]" echo "" diff --git a/tools/info.sh b/tools/info.sh index edff617f..bdca06e8 100755 --- a/tools/info.sh +++ b/tools/info.sh @@ -1,5 +1,8 @@ #!/usr/bin/env bash -# info.sh - Produce a report on the state of devstack installs + +# **info.sh** + +# Produce a report on the state of devstack installs # # Output fields are separated with '|' chars # Output types are git,localrc,os,pip,pkg: diff --git a/tools/install_openvpn.sh b/tools/install_openvpn.sh index 44eee728..2f52aa14 100755 --- a/tools/install_openvpn.sh +++ b/tools/install_openvpn.sh @@ -1,5 +1,8 @@ #!/bin/bash -# install_openvpn.sh - Install OpenVPN and generate required certificates + +# **install_openvpn.sh** + +# Install OpenVPN and generate required certificates # # install_openvpn.sh --client name # install_openvpn.sh --server [name] diff --git a/tools/warm_apts_and_pips_for_uec.sh b/tools/warm_apts_and_pips_for_uec.sh index 23a28dec..fe389ffe 100755 --- a/tools/warm_apts_and_pips_for_uec.sh +++ b/tools/warm_apts_and_pips_for_uec.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +# **warm_apts_and_pips_for_uec.sh** + # Echo commands set -o xtrace From 55576d6f84a9e7ff33803203315fa59130c158fd Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 28 Jun 2012 17:25:54 +0100 Subject: [PATCH 565/967] enable xtrace for keystone_data.sh This is useful for troubleshooting keystone errors. Change-Id: I9b50e88e30d71afd17549d42f04f07a95815906e --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index ade87105..00d1d7cd 100755 --- a/stack.sh +++ b/stack.sh @@ -2024,7 +2024,7 @@ if is_service_enabled key; then SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \ S3_SERVICE_PORT=$S3_SERVICE_PORT KEYSTONE_CATALOG_BACKEND=$KEYSTONE_CATALOG_BACKEND \ DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES \ - bash $FILES/keystone_data.sh + bash -x $FILES/keystone_data.sh # create an access key and secret key for nova ec2 register image if is_service_enabled swift && is_service_enabled nova; then From 7e27051ccb125de4e419cc9b6d33528ca66b9774 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 14 Jun 2012 15:23:24 -0500 Subject: [PATCH 566/967] Move get_packages() to functions. This is a prerequisite to fixing the tools/build_uec* scripts to properly install prereq packages. Change-Id: I1c60f7b9a9d07076841d9aff524c0833dc987c66 --- functions | 85 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ stack.sh | 80 --------------------------------------------------- 2 files changed, 85 insertions(+), 80 deletions(-) diff --git a/functions b/functions index a3e95370..8cf7c74b 100644 --- a/functions +++ b/functions @@ -73,6 +73,91 @@ function get_field() { } +# get_packages() collects a list of package names of any type from the +# prerequisite files in ``files/{apts|pips}``. The list is intended +# to be passed to a package installer such as apt or pip. +# +# Only packages required for the services in ENABLED_SERVICES will be +# included. Two bits of metadata are recognized in the prerequisite files: +# - ``# NOPRIME`` defers installation to be performed later in stack.sh +# - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection +# of the package to the distros listed. The distro names are case insensitive. +# +# get_packages dir +function get_packages() { + local package_dir=$1 + local file_to_parse + local service + + if [[ -z "$package_dir" ]]; then + echo "No package directory supplied" + return 1 + fi + if [[ -z "$DISTRO" ]]; then + echo "No distro set in DISTRO" + return 1 + fi + for service in general ${ENABLED_SERVICES//,/ }; do + # Allow individual services to specify dependencies + if [[ -e ${package_dir}/${service} ]]; then + file_to_parse="${file_to_parse} $service" + fi + # NOTE(sdague) n-api needs glance for now because that's where + # glance client is + if [[ $service == n-api ]]; then + if [[ ! $file_to_parse =~ nova ]]; then + file_to_parse="${file_to_parse} nova" + fi + if [[ ! $file_to_parse =~ glance ]]; then + file_to_parse="${file_to_parse} glance" + fi + elif [[ $service == c-* ]]; then + if [[ ! $file_to_parse =~ cinder ]]; then + file_to_parse="${file_to_parse} cinder" + fi + elif [[ $service == n-* ]]; then + if [[ ! $file_to_parse =~ nova ]]; then + file_to_parse="${file_to_parse} nova" + fi + elif [[ $service == g-* ]]; then + if [[ ! $file_to_parse =~ glance ]]; then + file_to_parse="${file_to_parse} glance" + fi + elif [[ $service == key* ]]; then + if [[ ! $file_to_parse =~ keystone ]]; then + file_to_parse="${file_to_parse} keystone" + fi + fi + done + + for file in ${file_to_parse}; do + local fname=${package_dir}/${file} + local OIFS line package distros distro + [[ -e $fname ]] || continue + + OIFS=$IFS + IFS=$'\n' + for line in $(<${fname}); do + if [[ $line =~ "NOPRIME" ]]; then + continue + fi + + if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then + # We are using BASH regexp matching feature. + package=${BASH_REMATCH[1]} + distros=${BASH_REMATCH[2]} + # In bash ${VAR,,} will lowecase VAR + [[ ${distros,,} =~ ${DISTRO,,} ]] && echo $package + continue + fi + + echo ${line%#*} + done + IFS=$OIFS + done +} + + # Determine OS Vendor, Release and Update # Tested with OS/X, Ubuntu, RedHat, CentOS, Fedora # Returns results in global variables: diff --git a/stack.sh b/stack.sh index ade87105..13e74c4f 100755 --- a/stack.sh +++ b/stack.sh @@ -614,86 +614,6 @@ fi # # Openstack uses a fair number of other projects. -# get_packages() collects a list of package names of any type from the -# prerequisite files in ``files/{apts|pips}``. The list is intended -# to be passed to a package installer such as apt or pip. -# -# Only packages required for the services in ENABLED_SERVICES will be -# included. Two bits of metadata are recognized in the prerequisite files: -# - ``# NOPRIME`` defers installation to be performed later in stack.sh -# - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection -# of the package to the distros listed. The distro names are case insensitive. -# -# get_packages dir -function get_packages() { - local package_dir=$1 - local file_to_parse - local service - - if [[ -z "$package_dir" ]]; then - echo "No package directory supplied" - return 1 - fi - for service in general ${ENABLED_SERVICES//,/ }; do - # Allow individual services to specify dependencies - if [[ -e ${package_dir}/${service} ]]; then - file_to_parse="${file_to_parse} $service" - fi - # NOTE(sdague) n-api needs glance for now because that's where - # glance client is - if [[ $service == n-api ]]; then - if [[ ! $file_to_parse =~ nova ]]; then - file_to_parse="${file_to_parse} nova" - fi - if [[ ! $file_to_parse =~ glance ]]; then - file_to_parse="${file_to_parse} glance" - fi - elif [[ $service == c-* ]]; then - if [[ ! $file_to_parse =~ cinder ]]; then - file_to_parse="${file_to_parse} cinder" - fi - elif [[ $service == n-* ]]; then - if [[ ! $file_to_parse =~ nova ]]; then - file_to_parse="${file_to_parse} nova" - fi - elif [[ $service == g-* ]]; then - if [[ ! $file_to_parse =~ glance ]]; then - file_to_parse="${file_to_parse} glance" - fi - elif [[ $service == key* ]]; then - if [[ ! $file_to_parse =~ keystone ]]; then - file_to_parse="${file_to_parse} keystone" - fi - fi - done - - for file in ${file_to_parse}; do - local fname=${package_dir}/${file} - local OIFS line package distros distro - [[ -e $fname ]] || continue - - OIFS=$IFS - IFS=$'\n' - for line in $(<${fname}); do - if [[ $line =~ "NOPRIME" ]]; then - continue - fi - - if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then - # We are using BASH regexp matching feature. - package=${BASH_REMATCH[1]} - distros=${BASH_REMATCH[2]} - # In bash ${VAR,,} will lowecase VAR - [[ ${distros,,} =~ ${DISTRO,,} ]] && echo $package - continue - fi - - echo ${line%#*} - done - IFS=$OIFS - done -} - # install package requirements if [[ "$os_PACKAGE" = "deb" ]]; then apt_get update From 5cc2129c4b160b3d03c5514abc9cda1ca263071c Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 28 Jun 2012 17:50:28 -0500 Subject: [PATCH 567/967] Fix keystone auth failures Set up environment credentials after keystone is initialized Fixes bug 1019056 Change-Id: Ifd0080f102e08c1f5517a08681ac277709f191c0 --- stack.sh | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/stack.sh b/stack.sh index ade87105..db829a40 100755 --- a/stack.sh +++ b/stack.sh @@ -2026,11 +2026,17 @@ if is_service_enabled key; then DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES \ bash $FILES/keystone_data.sh + # Set up auth creds now that keystone is bootstrapped + export OS_AUTH_URL=$SERVICE_ENDPOINT + export OS_TENANT_NAME=admin + export OS_USERNAME=admin + export OS_PASSWORD=$ADMIN_PASSWORD + # create an access key and secret key for nova ec2 register image if is_service_enabled swift && is_service_enabled nova; then NOVA_USER_ID=$(keystone user-list | grep ' nova ' | get_field 1) NOVA_TENANT_ID=$(keystone tenant-list | grep " $SERVICE_TENANT_NAME " | get_field 1) - CREDS=$(keystone ec2-credentials-create --user $NOVA_USER_ID --tenant_id $NOVA_TENANT_ID) + CREDS=$(keystone ec2-credentials-create --user_id $NOVA_USER_ID --tenant_id $NOVA_TENANT_ID) ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }') SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }') add_nova_opt "s3_access_key=$ACCESS_KEY" @@ -2108,9 +2114,7 @@ if is_service_enabled g-reg; then # Create a directory for the downloaded image tarballs. mkdir -p $FILES/images - ADMIN_USER=admin - ADMIN_TENANT=admin - TOKEN=$(keystone --os_tenant_name $ADMIN_TENANT --os_username $ADMIN_USER --os_password $ADMIN_PASSWORD --os_auth_url http://$HOST_IP:5000/v2.0 token-get | grep ' id ' | get_field 2) + TOKEN=$(keystone token-get | grep ' id ' | get_field 2) # Option to upload legacy ami-tty, which works with xenserver if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then From 9604b7427199a3dd65f67f7f32b423cdac4479ff Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Fri, 29 Jun 2012 15:04:19 +0200 Subject: [PATCH 568/967] Stop avoiding calling systemctl for tgtd on Fedora The bug that lead to this workaround was fixed and an update is available for Fedora: https://bugzilla.redhat.com/show_bug.cgi?id=797913 Keeping this workaround would be annoying as --skip-redirect doesn't exist on all rpm-based distributions, and we'd have to remember using it everywhere we interact with the tgtd service. Change-Id: I22a5d4fe154ea04bd35d89db6d63734b5bd405aa --- stack.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index 77ef642f..18878272 100755 --- a/stack.sh +++ b/stack.sh @@ -1735,8 +1735,7 @@ elif is_service_enabled n-vol; then sudo stop tgt || true sudo start tgt else - # bypass redirection to systemctl during restart - sudo /sbin/service --skip-redirect tgtd restart + restart_service tgtd fi fi From 5119f6b8b75307e4f1fa764c0c56d3953a18e2ed Mon Sep 17 00:00:00 2001 From: Adam Young Date: Wed, 27 Jun 2012 21:23:38 -0400 Subject: [PATCH 569/967] Setup PKI for Keystone Required for http://wiki.openstack.org/PKI specifically Delegation and Scaling section when complete, you should see certificates and keys in /etc/keystone/ssl The important files are: /etc/keystone/ssl/private/signing_key.pem /etc/keystone/ssl/certs/ca.pem /etc/keystone/ssl/certs/signing_cert.pem Change-Id: Iece395413af767042235d6e1cc0421879a810044 --- stack.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stack.sh b/stack.sh index eb302cdb..b21989b2 100755 --- a/stack.sh +++ b/stack.sh @@ -1928,6 +1928,8 @@ if is_service_enabled key; then # Set up the keystone database $KEYSTONE_DIR/bin/keystone-manage db_sync + # set up certificates + $KEYSTONE_DIR/bin/keystone-manage pki_setup # launch keystone and wait for it to answer before continuing screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug" From 70edafc9d396db3f47de24d4b5224268396360f0 Mon Sep 17 00:00:00 2001 From: John Garbutt Date: Tue, 3 Jul 2012 10:24:08 +0100 Subject: [PATCH 570/967] Fix bug 1020474 by correcting substitutions in configure_tempest.sh This is caused by the following commit in tempest: f38eaace9cb39d98b1203bd4c86b33d20501b7a3 Change-Id: I0cf6b49e81b696411a0c01f66c2b89508dc701b9 --- tools/configure_tempest.sh | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index 2c069343..7a720455 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -162,9 +162,13 @@ sed -e " s,%IMAGE_HOST%,$IMAGE_HOST,g; s,%IMAGE_PORT%,$IMAGE_PORT,g; s,%IMAGE_API_VERSION%,$IMAGE_API_VERSION,g; - s,%ADMIN_USERNAME%,$ADMIN_USERNAME,g; - s,%ADMIN_PASSWORD%,$ADMIN_PASSWORD,g; - s,%ADMIN_TENANT_NAME%,$ADMIN_TENANT_NAME,g; + s,%COMPUTE_ADMIN_USERNAME%,$ADMIN_USERNAME,g; + s,%COMPUTE_ADMIN_PASSWORD%,$ADMIN_PASSWORD,g; + s,%COMPUTE_ADMIN_TENANT_NAME%,$ADMIN_TENANT_NAME,g; + s,%IDENTITY_ADMIN_USERNAME%,$ADMIN_USERNAME,g; + s,%IDENTITY_ADMIN_PASSWORD%,$ADMIN_PASSWORD,g; + s,%IDENTITY_ADMIN_TENANT_NAME%,$ADMIN_TENANT_NAME,g; + s,%COMPUTE_ALLOW_TENANT_ISOLATION%,true,g; " -i $TEMPEST_CONF echo "Created tempest configuration file:" From 1cdf5fa4b5cef19672786ef3073fae8faf4172e4 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Tue, 3 Jul 2012 13:57:39 -0700 Subject: [PATCH 571/967] Register cinder when using sql keystone catalog. * Fixes bug #1020735 Change-Id: Ibd954cdda3c855800ae1080950c62dd71b094a01 --- files/keystone_data.sh | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 78860725..ba14a47f 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -283,4 +283,16 @@ if [[ "$ENABLED_SERVICES" =~ "cinder" ]]; then keystone user-role-add --tenant_id $SERVICE_TENANT \ --user_id $CINDER_USER \ --role_id $ADMIN_ROLE + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + CINDER_SERVICE=$(get_id keystone service-create \ + --name=cinder \ + --type=volume \ + --description="Cinder Service") + keystone endpoint-create \ + --region RegionOne \ + --service_id $CINDER_SERVICE \ + --publicurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" \ + --adminurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" \ + --internalurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" + fi fi From 03086e18b4ea63e7b3a398e7edead425087410f2 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Wed, 4 Jul 2012 12:27:42 +0200 Subject: [PATCH 572/967] Add tar to list of packages to install It turns out a minimal Fedora installation doesn't have tar by default. Since we use tar when uploading the default image to glance, we need to make sure it's installed. And since it might be useful for other very generic tasks DevStack might do, just always install it (by putting it in the general list of packages). Change-Id: I4dae3b43a700d80b8752a5b846a6ce302fadb751 --- files/apts/general | 1 + files/rpms/general | 1 + 2 files changed, 2 insertions(+) diff --git a/files/apts/general b/files/apts/general index 31fa7527..f04f9556 100644 --- a/files/apts/general +++ b/files/apts/general @@ -17,3 +17,4 @@ wget curl tcpdump euca2ools # only for testing client +tar diff --git a/files/rpms/general b/files/rpms/general index af199d54..52184d00 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -9,6 +9,7 @@ python-pip python-unittest2 python-virtualenv screen +tar tcpdump unzip wget From ea6b2d3f3249b766fffb460d28b5feef110858fc Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Wed, 4 Jul 2012 16:24:47 +0100 Subject: [PATCH 573/967] add TENANT_ARG to swift command line. Fix bug 1020985. Change-Id: I51733d37a50159a4b4872d61fa768d5bba3d6a03 --- exercises/client-args.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exercises/client-args.sh b/exercises/client-args.sh index 1d7d5b6c..5e8d6f7c 100755 --- a/exercises/client-args.sh +++ b/exercises/client-args.sh @@ -113,7 +113,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then STATUS_SWIFT="Skipped" else echo -e "\nTest Swift" - if swift $ARGS stat; then + if swift $TENANT_ARG $ARGS stat; then STATUS_SWIFT="Succeeded" else STATUS_SWIFT="Failed" From d63044662501e9ef3bb6c4593dff789ef13eec5b Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Thu, 5 Jul 2012 16:58:00 -0400 Subject: [PATCH 574/967] Add Eric Windisch to AUTHORS Change-Id: Iaf0a90a3fff99c012d5093a30098329996ae800a --- AUTHORS | 1 + 1 file changed, 1 insertion(+) diff --git a/AUTHORS b/AUTHORS index b5f972fd..566ded0f 100644 --- a/AUTHORS +++ b/AUTHORS @@ -10,6 +10,7 @@ Dean Troyer Devin Carlen Eddie Hebert Eoghan Glynn +Eric Windisch Gabriel Hurley Hengqing Hu Hua ZHANG From e29b94e26eb3fd98f18cd9b6a8f765bf4e241f54 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Tue, 12 Jun 2012 14:21:18 +0200 Subject: [PATCH 575/967] Do not pass -U to useradd The -U option doesn't have the same meaning on all distributions, unfortunately: in some cases, it means "create a group for the user with the same name", and in others, it means "default umask for the user". Instead, manually create a stack group with groupadd. Change-Id: I32f4c0603785e54a465c2d3b47a1852b8635fde0 --- stack.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index f71283e1..e0134765 100755 --- a/stack.sh +++ b/stack.sh @@ -171,9 +171,13 @@ if [[ $EUID -eq 0 ]]; then else rpm -qa | grep sudo || install_package sudo fi + if ! getent group stack >/dev/null; then + echo "Creating a group called stack" + groupadd stack + fi if ! getent passwd stack >/dev/null; then echo "Creating a user called stack" - useradd -U -s /bin/bash -d $DEST -m stack + useradd -g stack -s /bin/bash -d $DEST -m stack fi echo "Giving stack user passwordless sudo priviledges" From 7d28a0e1fceae826b4c25fbd4236bcb547ade92d Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 27 Jun 2012 17:55:52 -0500 Subject: [PATCH 576/967] Cleanup stack/unstack * whitespace and comment cleanups only Change-Id: I31b3057657bb3bc5c4e085886ef6ac561967706f --- stack.sh | 57 ++++++++++++++++++++++++++++++------------------------ unstack.sh | 4 +++- 2 files changed, 35 insertions(+), 26 deletions(-) diff --git a/stack.sh b/stack.sh index f71283e1..513f8be4 100755 --- a/stack.sh +++ b/stack.sh @@ -245,7 +245,6 @@ sudo chown `whoami` $DATA_DIR # Get project function libraries source $TOP_DIR/lib/cinder - # Set the destination directories for openstack projects NOVA_DIR=$DEST/nova HORIZON_DIR=$DEST/horizon @@ -463,9 +462,9 @@ fi GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$SERVICE_HOST:9292} -# SWIFT +# Swift # ----- -# TODO: implement glance support + # TODO: add logging to different location. # By default the location of swift drives and objects is located inside @@ -477,7 +476,7 @@ SWIFT_DATA_DIR=${SWIFT_DATA_DIR:-${DEST}/data/swift} # directory, change SWIFT_CONFIG_DIR if you want to adjust that. SWIFT_CONFIG_DIR=${SWIFT_CONFIG_DIR:-/etc/swift} -# devstack will create a loop-back disk formatted as XFS to store the +# DevStack will create a loop-back disk formatted as XFS to store the # swift data. By default the disk size is 1 gigabyte. The variable # SWIFT_LOOPBACK_DISK_SIZE specified in bytes allow you to change # that. @@ -512,6 +511,7 @@ fi # Set default port for nova-objectstore S3_SERVICE_PORT=${S3_SERVICE_PORT:-3333} + # Keystone # -------- @@ -609,10 +609,10 @@ set -o xtrace # Install Packages # ================ -# + # Openstack uses a fair number of other projects. -# install package requirements +# Install package requirements if [[ "$os_PACKAGE" = "deb" ]]; then apt_get update install_package $(get_packages $FILES/apts) @@ -620,12 +620,13 @@ else install_package $(get_packages $FILES/rpms) fi -# install python requirements +# Install python requirements pip_install $(get_packages $FILES/pips | sort -u) -# compute service +# Check out OpenStack sources git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH -# python client library to nova that horizon (and others) use + +# Check out the client libs that are used most git_clone $KEYSTONECLIENT_REPO $KEYSTONECLIENT_DIR $KEYSTONECLIENT_BRANCH git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH git_clone $OPENSTACKCLIENT_REPO $OPENSTACKCLIENT_DIR $OPENSTACKCLIENT_BRANCH @@ -678,7 +679,7 @@ fi # Initialization # ============== -# setup our checkouts so they are installed into python path +# Set up our checkouts so they are installed into python path # allowing ``import nova`` or ``import glance.client`` setup_develop $KEYSTONECLIENT_DIR setup_develop $NOVACLIENT_DIR @@ -874,16 +875,17 @@ function screen_it { fi } -# create a new named screen to run processes in +# Create a new named screen to run processes in screen -d -m -S stack -t stack -s /bin/bash sleep 1 -# set a reasonable statusbar +# Set a reasonable statusbar screen -r stack -X hardstatus alwayslastline "$SCREEN_HARDSTATUS" + # Horizon # ------- -# Setup the django horizon application to serve via apache/wsgi +# Set up the django horizon application to serve via apache/wsgi if is_service_enabled horizon; then @@ -900,7 +902,7 @@ if is_service_enabled horizon; then python manage.py syncdb cd $TOP_DIR - # create an empty directory that apache uses as docroot + # Create an empty directory that apache uses as docroot sudo mkdir -p $HORIZON_DIR/.blackhole if [[ "$os_PACKAGE" = "deb" ]]; then @@ -1007,8 +1009,10 @@ if is_service_enabled g-reg; then fi -# Quantum (for controller or agent nodes) + +# Quantum # ------- + if is_service_enabled quantum; then # Put config files in /etc/quantum for everyone to find if [[ ! -d /etc/quantum ]]; then @@ -1034,7 +1038,7 @@ if is_service_enabled quantum; then exit 1 fi - # if needed, move config file from $QUANTUM_DIR/etc/quantum to /etc/quantum + # If needed, move config file from $QUANTUM_DIR/etc/quantum to /etc/quantum mkdir -p /$Q_PLUGIN_CONF_PATH Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME if [[ -e $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE ]]; then @@ -1143,7 +1147,6 @@ if is_service_enabled m-svc; then fi - # Nova # ---- @@ -1279,7 +1282,7 @@ if is_service_enabled n-cpu; then QEMU_CONF=/etc/libvirt/qemu.conf if is_service_enabled quantum && [[ $Q_PLUGIN = "openvswitch" ]] && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF ; then - # add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces + # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces sudo chmod 666 $QEMU_CONF sudo cat <> /etc/libvirt/qemu.conf cgroup_device_acl = [ @@ -1363,7 +1366,10 @@ if is_service_enabled n-net; then sudo sysctl -w net.ipv4.ip_forward=1 fi + # Storage Service +# --------------- + if is_service_enabled swift; then # Install memcached for swift. install_package memcached @@ -1663,7 +1669,7 @@ function add_nova_opt { echo "$1" >> $NOVA_CONF_DIR/$NOVA_CONF } -# remove legacy nova.conf +# Remove legacy nova.conf rm -f $NOVA_DIR/bin/nova.conf # (re)create nova.conf @@ -1924,7 +1930,7 @@ if is_service_enabled key; then iniset $KEYSTONE_CONF_DIR/logging.conf logger_root level "DEBUG" iniset $KEYSTONE_CONF_DIR/logging.conf logger_root handlers "devel,production" - # Set up the keystone database + # Initialize keystone database $KEYSTONE_DIR/bin/keystone-manage db_sync # launch keystone and wait for it to answer before continuing @@ -1950,7 +1956,7 @@ if is_service_enabled key; then export OS_USERNAME=admin export OS_PASSWORD=$ADMIN_PASSWORD - # create an access key and secret key for nova ec2 register image + # Create an access key and secret key for nova ec2 register image if is_service_enabled swift && is_service_enabled nova; then NOVA_USER_ID=$(keystone user-list | grep ' nova ' | get_field 1) NOVA_TENANT_ID=$(keystone tenant-list | grep " $SERVICE_TENANT_NAME " | get_field 1) @@ -1963,7 +1969,7 @@ if is_service_enabled key; then fi fi -# launch the nova-api and wait for it to answer before continuing +# Launch the nova-api and wait for it to answer before continuing if is_service_enabled n-api; then add_nova_opt "enabled_apis=$NOVA_ENABLED_APIS" screen_it n-api "cd $NOVA_DIR && $NOVA_DIR/bin/nova-api" @@ -1977,13 +1983,13 @@ fi # If we're using Quantum (i.e. q-svc is enabled), network creation has to # happen after we've started the Quantum service. if is_service_enabled mysql && is_service_enabled nova; then - # create a small network + # Create a small network $NOVA_DIR/bin/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS - # create some floating ips + # Create some floating ips $NOVA_DIR/bin/nova-manage floating create $FLOATING_RANGE - # create a second pool + # Create a second pool $NOVA_DIR/bin/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL fi @@ -2012,6 +2018,7 @@ screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF is_service_enabled swift || \ screen_it n-obj "cd $NOVA_DIR && $NOVA_DIR/bin/nova-objectstore" + # Install Images # ============== diff --git a/unstack.sh b/unstack.sh index 879f8424..641d34e4 100755 --- a/unstack.sh +++ b/unstack.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash -# + +# **unstack.sh** + # Stops that which is started by ``stack.sh`` (mostly) # mysql and rabbit are left running as OpenStack code refreshes # do not require them to be restarted. From 1214d9d05a1f61752f8723ceefa8d55e890268dc Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 6 Jul 2012 09:39:07 -0500 Subject: [PATCH 577/967] Define DEST in stackrc Move the initial definition of DEST so it is available in localrc and the tools/*.sh scripts. Change-Id: Ifc1b3cf3c3cd7b732007e83e74710f14f20f7624 --- stackrc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stackrc b/stackrc index 3a19cdb0..55101de9 100644 --- a/stackrc +++ b/stackrc @@ -1,6 +1,9 @@ # Find the other rc files RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd) +# Destination path for installation +DEST=/opt/stack + # Specify which services to launch. These generally correspond to # screen tabs. If you like to add other services that are not enabled # by default you can append them in your ENABLED_SERVICES variable in From a548fc9740a78a997da70e2e76f23e5028ccd32a Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Fri, 6 Jul 2012 10:10:10 -0400 Subject: [PATCH 578/967] update list of files for git to ignore These files are all created in the course of running devstack and openstack but should not be tracked as part of the devstack sources. Change-Id: Ia3939c6e20f8d6fadaa18d2f3619361dc9375696 Signed-off-by: Doug Hellmann --- .gitignore | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.gitignore b/.gitignore index 83c54197..c5744b3b 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,10 @@ proto *~ .*.sw[nop] *.log +*.log.[1-9] src localrc local.sh +files/*.gz +files/images +stack-screenrc From f04178fd12731a58c899c309ad43f55d339909c5 Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Thu, 5 Jul 2012 17:10:03 -0400 Subject: [PATCH 579/967] add functions to manipulate ENABLED_SERVICES Editing ENABLED_SERVICES directly can get tricky when the user wants to disable something. This patch includes two new functions for adding or removing services safely, and a third (for completeness) to clear the settings entirely before adding a minimal set of services. It also moves the logic for dealing with "negated" services into a function so it can be tested and applied by the new functions for manipulating ENABLED_SERVICES. Change-Id: I88f205f3666b86e6f0b6a94e0ec32a26c4bc6873 Signed-off-by: Doug Hellmann --- AUTHORS | 1 + README.md | 5 ++- functions | 71 ++++++++++++++++++++++++++++++++++ openrc | 3 ++ stack.sh | 13 ++----- tests/functions.sh | 96 ++++++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 178 insertions(+), 11 deletions(-) diff --git a/AUTHORS b/AUTHORS index b5f972fd..67120f6c 100644 --- a/AUTHORS +++ b/AUTHORS @@ -8,6 +8,7 @@ Chmouel Boudjnah Dan Prince Dean Troyer Devin Carlen +Doug hellmann Eddie Hebert Eoghan Glynn Gabriel Hurley diff --git a/README.md b/README.md index cfcfe7c3..ed9d9d12 100644 --- a/README.md +++ b/README.md @@ -61,11 +61,12 @@ You can override environment variables used in `stack.sh` by creating file name Swift is not installed by default, you can enable easily by adding this to your `localrc`: - ENABLED_SERVICE="$ENABLED_SERVICES,swift" + enable_service swift If you want a minimal Swift install with only Swift and Keystone you can have this instead in your `localrc`: - ENABLED_SERVICES="key,mysql,swift" + disable_all_services + enable_service key mysql swift If you use Swift with Keystone, Swift will authenticate against it. You will need to make sure to use the Keystone URL to auth against. diff --git a/functions b/functions index 8cf7c74b..a22d8b76 100644 --- a/functions +++ b/functions @@ -1,3 +1,4 @@ +# -*- mode: Shell-script -*- # functions - Common functions used by DevStack components # # ENABLED_SERVICES is used by is_service_enabled() @@ -349,6 +350,76 @@ function is_service_enabled() { return 1 } +# remove extra commas from the input string (ENABLED_SERVICES) +function _cleanup_service_list () { + echo "$1" | sed -e ' + s/,,/,/g; + s/^,//; + s/,$// + ' +} + +# enable_service() adds the services passed as argument to the +# **ENABLED_SERVICES** list, if they are not already present. +# +# For example: +# +# enable_service n-vol +# +# This function does not know about the special cases +# for nova, glance, and quantum built into is_service_enabled(). +function enable_service() { + local tmpsvcs="${ENABLED_SERVICES}" + for service in $@; do + if ! is_service_enabled $service; then + tmpsvcs+=",$service" + fi + done + ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs") + disable_negated_services +} + +# disable_service() removes the services passed as argument to the +# **ENABLED_SERVICES** list, if they are present. +# +# For example: +# +# disable_service n-vol +# +# This function does not know about the special cases +# for nova, glance, and quantum built into is_service_enabled(). +function disable_service() { + local tmpsvcs=",${ENABLED_SERVICES}," + local service + for service in $@; do + if is_service_enabled $service; then + tmpsvcs=${tmpsvcs//,$service,/,} + fi + done + ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs") +} + +# disable_all_services() removes all current services +# from **ENABLED_SERVICES** to reset the configuration +# before a minimal installation +function disable_all_services() { + ENABLED_SERVICES="" +} + +# We are looking for services with a - at the beginning to force +# excluding those services. For example if you want to install all the default +# services but not nova-volume (n-vol) you can have this set in your localrc : +# ENABLED_SERVICES+=",-n-vol" +function disable_negated_services() { + local tmpsvcs="${ENABLED_SERVICES}" + local service + for service in ${tmpsvcs//,/ }; do + if [[ ${service} == -* ]]; then + tmpsvcs=$(echo ${tmpsvcs}|sed -r "s/(,)?(-)?${service#-}(,)?/,/g") + fi + done + ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs") +} # Distro-agnostic package installer # install_package package [package ...] diff --git a/openrc b/openrc index be7850b5..4430e829 100644 --- a/openrc +++ b/openrc @@ -20,6 +20,9 @@ fi # Find the other rc files RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd) +# Import common functions +source $RC_DIR/functions + # Load local configuration source $RC_DIR/stackrc diff --git a/stack.sh b/stack.sh index 513f8be4..1ee70a6d 100755 --- a/stack.sh +++ b/stack.sh @@ -89,15 +89,10 @@ DEST=${DEST:-/opt/stack} # Sanity Check # ============ -# We are looking for services with a - at the beginning to force -# excluding those services. For example if you want to install all the default -# services but not nova-volume (n-vol) you can have this set in your localrc : -# ENABLED_SERVICES+=",-n-vol" -for service in ${ENABLED_SERVICES//,/ }; do - if [[ ${service} == -* ]]; then - ENABLED_SERVICES=$(echo ${ENABLED_SERVICES}|sed -r "s/(,)?(-)?${service#-}(,)?/,/g") - fi -done +# Remove services which were negated in ENABLED_SERVICES +# using the "-" prefix (e.g., "-n-vol") instead of +# calling disable_service(). +disable_negated_services # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` diff --git a/tests/functions.sh b/tests/functions.sh index e436ed97..f111a48d 100755 --- a/tests/functions.sh +++ b/tests/functions.sh @@ -143,3 +143,99 @@ else fi rm test.ini + +# Enabling/disabling services + +echo "Testing enable_service()" + +function test_enable_service() { + local start="$1" + local add="$2" + local finish="$3" + + ENABLED_SERVICES="$start" + enable_service $add + if [ "$ENABLED_SERVICES" = "$finish" ] + then + echo "OK: $start + $add -> $ENABLED_SERVICES" + else + echo "changing $start to $finish with $add failed: $ENABLED_SERVICES" + fi +} + +test_enable_service '' a 'a' +test_enable_service 'a' b 'a,b' +test_enable_service 'a,b' c 'a,b,c' +test_enable_service 'a,b' c 'a,b,c' +test_enable_service 'a,b,' c 'a,b,c' +test_enable_service 'a,b' c,d 'a,b,c,d' +test_enable_service 'a,b' "c d" 'a,b,c,d' +test_enable_service 'a,b,c' c 'a,b,c' + +test_enable_service 'a,b,-c' c 'a,b' +test_enable_service 'a,b,c' -c 'a,b' + +function test_disable_service() { + local start="$1" + local del="$2" + local finish="$3" + + ENABLED_SERVICES="$start" + disable_service "$del" + if [ "$ENABLED_SERVICES" = "$finish" ] + then + echo "OK: $start - $del -> $ENABLED_SERVICES" + else + echo "changing $start to $finish with $del failed: $ENABLED_SERVICES" + fi +} + +echo "Testing disable_service()" +test_disable_service 'a,b,c' a 'b,c' +test_disable_service 'a,b,c' b 'a,c' +test_disable_service 'a,b,c' c 'a,b' + +test_disable_service 'a,b,c' a 'b,c' +test_disable_service 'b,c' b 'c' +test_disable_service 'c' c '' +test_disable_service '' d '' + +test_disable_service 'a,b,c,' c 'a,b' +test_disable_service 'a,b' c 'a,b' + + +echo "Testing disable_all_services()" +ENABLED_SERVICES=a,b,c +disable_all_services + +if [[ -z "$ENABLED_SERVICES" ]] +then + echo "OK" +else + echo "disabling all services FAILED: $ENABLED_SERVICES" +fi + +echo "Testing disable_negated_services()" + + +function test_disable_negated_services() { + local start="$1" + local finish="$2" + + ENABLED_SERVICES="$start" + disable_negated_services + if [ "$ENABLED_SERVICES" = "$finish" ] + then + echo "OK: $start + $add -> $ENABLED_SERVICES" + else + echo "changing $start to $finish failed: $ENABLED_SERVICES" + fi +} + +test_disable_negated_services '-a' '' +test_disable_negated_services '-a,a' '' +test_disable_negated_services '-a,-a' '' +test_disable_negated_services 'a,-a' '' +test_disable_negated_services 'b,a,-a' 'b' +test_disable_negated_services 'a,b,-a' 'b' +test_disable_negated_services 'a,-a,b' 'b' From 3edd5b41bcb969bc6f8403b03ae41700701fac1c Mon Sep 17 00:00:00 2001 From: Jay Pipes Date: Sun, 8 Jul 2012 21:04:12 -0400 Subject: [PATCH 580/967] Adds new parameter interpolations for tempest config Tempest recently added a number of new config variables and this patch merely adds those variables to the template replacement process in tools/configure_tempest.sh This needs to go in before this: https://review.openstack.org/#/c/8738/2 Additionally, this patch sets the build_interval to 3 and the build_timeout to 400. This faster status check interval should allow Tempest to run a bit quicker in the Jenkins gate job. Change-Id: I8835ccdee6af58da66c6327ddae8a05b6789d454 --- tools/configure_tempest.sh | 51 ++++++++++++++++++++++++++++---------- 1 file changed, 38 insertions(+), 13 deletions(-) diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index bb995f8c..22a8c43b 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -95,9 +95,13 @@ fi # copy every time, because the image UUIDS are going to change cp $TEMPEST_CONF.tpl $TEMPEST_CONF -ADMIN_USERNAME=${ADMIN_USERNAME:-admin} -ADMIN_PASSWORD=${ADMIN_PASSWORD:-secrete} -ADMIN_TENANT_NAME=${ADMIN_TENANT:-admin} +COMPUTE_ADMIN_USERNAME=${ADMIN_USERNAME:-admin} +COMPUTE_ADMIN_PASSWORD=${ADMIN_PASSWORD:-secrete} +COMPUTE_ADMIN_TENANT_NAME=${ADMIN_TENANT:-admin} + +IDENTITY_ADMIN_USERNAME=${ADMIN_USERNAME:-admin} +IDENTITY_ADMIN_PASSWORD=${ADMIN_PASSWORD:-secrete} +IDENTITY_ADMIN_TENANT_NAME=${ADMIN_TENANT:-admin} IDENTITY_USE_SSL=${IDENTITY_USE_SSL:-False} IDENTITY_HOST=${IDENTITY_HOST:-127.0.0.1} @@ -107,6 +111,7 @@ IDENTITY_API_VERSION="v2.0" # Note: need v for now... # from the Tempest configuration file entirely... IDENTITY_PATH=${IDENTITY_PATH:-tokens} IDENTITY_STRATEGY=${IDENTITY_STRATEGY:-keystone} +IDENTITY_CATALOG_TYPE=identity # We use regular, non-admin users in Tempest for the USERNAME # substitutions and use ADMIN_USERNAME et al for the admin stuff. @@ -128,15 +133,26 @@ FLAVOR_REF_ALT=2 # Do any of the following need to be configurable? COMPUTE_CATALOG_TYPE=compute COMPUTE_CREATE_IMAGE_ENABLED=True +COMPUTE_ALLOW_TENANT_ISOLATION=True COMPUTE_RESIZE_AVAILABLE=False # not supported with QEMU... COMPUTE_LOG_LEVEL=ERROR -BUILD_INTERVAL=10 -BUILD_TIMEOUT=600 +BUILD_INTERVAL=3 +BUILD_TIMEOUT=400 +RUN_SSH=True +SSH_USER=$OS_USERNAME +NETWORK_FOR_SSH=private +IP_VERSION_FOR_SSH=4 +SSH_TIMEOUT=4 # Image test configuration options... IMAGE_HOST=${IMAGE_HOST:-127.0.0.1} IMAGE_PORT=${IMAGE_PORT:-9292} -IMAGE_API_VERSION="1" +IMAGE_API_VERSION=1 +IMAGE_CATALOG_TYPE=image + +# Network API test configuration +NETWORK_CATALOG_TYPE=network +NETWORK_API_VERSION=2.0 sed -e " s,%IDENTITY_USE_SSL%,$IDENTITY_USE_SSL,g; @@ -145,6 +161,7 @@ sed -e " s,%IDENTITY_API_VERSION%,$IDENTITY_API_VERSION,g; s,%IDENTITY_PATH%,$IDENTITY_PATH,g; s,%IDENTITY_STRATEGY%,$IDENTITY_STRATEGY,g; + s,%IDENTITY_CATALOG_TYPE%,$IDENTITY_CATALOG_TYPE,g; s,%USERNAME%,$OS_USERNAME,g; s,%PASSWORD%,$OS_PASSWORD,g; s,%TENANT_NAME%,$OS_TENANT_NAME,g; @@ -152,11 +169,17 @@ sed -e " s,%ALT_PASSWORD%,$ALT_PASSWORD,g; s,%ALT_TENANT_NAME%,$ALT_TENANT_NAME,g; s,%COMPUTE_CATALOG_TYPE%,$COMPUTE_CATALOG_TYPE,g; + s,%COMPUTE_ALLOW_TENANT_ISOLATION%,$COMPUTE_ALLOW_TENANT_ISOLATION,g; s,%COMPUTE_CREATE_IMAGE_ENABLED%,$COMPUTE_CREATE_IMAGE_ENABLED,g; s,%COMPUTE_RESIZE_AVAILABLE%,$COMPUTE_RESIZE_AVAILABLE,g; s,%COMPUTE_LOG_LEVEL%,$COMPUTE_LOG_LEVEL,g; s,%BUILD_INTERVAL%,$BUILD_INTERVAL,g; s,%BUILD_TIMEOUT%,$BUILD_TIMEOUT,g; + s,%RUN_SSH%,$RUN_SSH,g; + s,%SSH_USER%,$SSH_USER,g; + s,%NETWORK_FOR_SSH%,$NETWORK_FOR_SSH,g; + s,%IP_VERSION_FOR_SSH%,$IP_VERSION_FOR_SSH,g; + s,%SSH_TIMEOUT%,$SSH_TIMEOUT,g; s,%IMAGE_ID%,$IMAGE_UUID,g; s,%IMAGE_ID_ALT%,$IMAGE_UUID_ALT,g; s,%FLAVOR_REF%,$FLAVOR_REF,g; @@ -164,13 +187,15 @@ sed -e " s,%IMAGE_HOST%,$IMAGE_HOST,g; s,%IMAGE_PORT%,$IMAGE_PORT,g; s,%IMAGE_API_VERSION%,$IMAGE_API_VERSION,g; - s,%COMPUTE_ADMIN_USERNAME%,$ADMIN_USERNAME,g; - s,%COMPUTE_ADMIN_PASSWORD%,$ADMIN_PASSWORD,g; - s,%COMPUTE_ADMIN_TENANT_NAME%,$ADMIN_TENANT_NAME,g; - s,%IDENTITY_ADMIN_USERNAME%,$ADMIN_USERNAME,g; - s,%IDENTITY_ADMIN_PASSWORD%,$ADMIN_PASSWORD,g; - s,%IDENTITY_ADMIN_TENANT_NAME%,$ADMIN_TENANT_NAME,g; - s,%COMPUTE_ALLOW_TENANT_ISOLATION%,true,g; + s,%IMAGE_CATALOG_TYPE%,$IMAGE_CATALOG_TYPE,g; + s,%COMPUTE_ADMIN_USERNAME%,$COMPUTE_ADMIN_USERNAME,g; + s,%COMPUTE_ADMIN_PASSWORD%,$COMPUTE_ADMIN_PASSWORD,g; + s,%COMPUTE_ADMIN_TENANT_NAME%,$COMPUTE_ADMIN_TENANT_NAME,g; + s,%IDENTITY_ADMIN_USERNAME%,$IDENTITY_ADMIN_USERNAME,g; + s,%IDENTITY_ADMIN_PASSWORD%,$IDENTITY_ADMIN_PASSWORD,g; + s,%IDENTITY_ADMIN_TENANT_NAME%,$IDENTITY_ADMIN_TENANT_NAME,g; + s,%NETWORK_CATALOG_TYPE%,$NETWORK_CATALOG_TYPE,g; + s,%NETWORK_API_VERSION%,$NETWORK_API_VERSION,g; " -i $TEMPEST_CONF echo "Created tempest configuration file:" From 8156062dc4f0e8338590dcb74cfade5af31daa8a Mon Sep 17 00:00:00 2001 From: Gabriel Hurley Date: Sun, 8 Jul 2012 20:03:55 -0700 Subject: [PATCH 581/967] Run syncdb command without user input. Change-Id: Ic21455e640ece9d77409a2589c531d124f7d2a1b --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 513f8be4..87865dcf 100755 --- a/stack.sh +++ b/stack.sh @@ -899,7 +899,7 @@ if is_service_enabled horizon; then # Initialize the horizon database (it stores sessions and notices shown to # users). The user system is external (keystone). cd $HORIZON_DIR - python manage.py syncdb + python manage.py syncdb --noinput cd $TOP_DIR # Create an empty directory that apache uses as docroot From a9e0a488cf72431aabc972f7aac76abb56aaba02 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 9 Jul 2012 14:07:23 -0500 Subject: [PATCH 582/967] Update info.sh * Works properly on Fedora 17 now, possibly other RPM-based distros * Add GetDistro() function taken from logic in stack.sh * Source functions in tools/info.sh * Use GetDistro() and get_packages() in tools/info.sh * Report all installed pips * Don't sort localrc output as order is important Change-Id: I1b3e48e94786378c7313a0a6bea88d5cf9d0f0c0 --- functions | 18 +++++++ stack.sh | 15 +----- tools/info.sh | 142 +++++++++++++++----------------------------------- 3 files changed, 61 insertions(+), 114 deletions(-) diff --git a/functions b/functions index 8cf7c74b..db104633 100644 --- a/functions +++ b/functions @@ -223,6 +223,24 @@ GetOSVersion() { } +# Translate the OS version values into common nomenclature +# Sets ``DISTRO`` from the ``os_*`` values +function GetDistro() { + GetOSVersion + if [[ "$os_VENDOR" =~ (Ubuntu) ]]; then + # 'Everyone' refers to Ubuntu releases by the code name adjective + DISTRO=$os_CODENAME + elif [[ "$os_VENDOR" =~ (Fedora) ]]; then + # For Fedora, just use 'f' and the release + DISTRO="f$os_RELEASE" + else + # Catch-all for now is Vendor + Release + Update + DISTRO="$os_VENDOR-$os_RELEASE.$os_UPDATE" + fi + export DISTRO +} + + # git clone only if directory doesn't exist already. Since ``DEST`` might not # be owned by the installation user, we create the directory and change the # ownership to the proper user. diff --git a/stack.sh b/stack.sh index 513f8be4..ebeec52f 100755 --- a/stack.sh +++ b/stack.sh @@ -26,19 +26,8 @@ source $TOP_DIR/functions # Determine what system we are running on. This provides ``os_VENDOR``, # ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` -GetOSVersion - -# Translate the OS version values into common nomenclature -if [[ "$os_VENDOR" =~ (Ubuntu) ]]; then - # 'Everyone' refers to Ubuntu releases by the code name adjective - DISTRO=$os_CODENAME -elif [[ "$os_VENDOR" =~ (Fedora) ]]; then - # For Fedora, just use 'f' and the release - DISTRO="f$os_RELEASE" -else - # Catch-all for now is Vendor + Release + Update - DISTRO="$os_VENDOR-$os_RELEASE.$os_UPDATE" -fi +# and ``DISTRO`` +GetDistro # Settings diff --git a/tools/info.sh b/tools/info.sh index bdca06e8..bf40e827 100755 --- a/tools/info.sh +++ b/tools/info.sh @@ -28,6 +28,9 @@ TOOLS_DIR=$(cd $(dirname "$0") && pwd) TOP_DIR=$(cd $TOOLS_DIR/..; pwd) cd $TOP_DIR +# Import common functions +source $TOP_DIR/functions + # Source params source $TOP_DIR/stackrc @@ -38,6 +41,21 @@ if [[ ! -d $FILES ]]; then exit 1 fi + +# OS +# -- + +# Determine what OS we're using +GetDistro + +echo "os|distro=$DISTRO" +echo "os|vendor=$os_VENDOR" +echo "os|release=$os_RELEASE" +if [ -n "$os_UPDATE" ]; then + echo "os|version=$os_UPDATE" +fi + + # Repos # ----- @@ -62,123 +80,44 @@ for i in $DEST/*; do fi done -# OS -# -- - -GetOSInfo() { - # Figure out which vedor we are - if [ -r /etc/lsb-release ]; then - . /etc/lsb-release - VENDORNAME=$DISTRIB_ID - RELEASE=$DISTRIB_RELEASE - else - for r in RedHat CentOS Fedora; do - VENDORPKG="`echo $r | tr [:upper:] [:lower:]`-release" - VENDORNAME=$r - RELEASE=`rpm -q --queryformat '%{VERSION}' $VENDORPKG` - if [ $? = 0 ]; then - break - fi - VENDORNAME="" - done - # Get update level - if [ -n "`grep Update /etc/redhat-release`" ]; then - # Get update - UPDATE=`cat /etc/redhat-release | sed s/.*Update\ // | sed s/\)$//` - else - # Assume update 0 - UPDATE=0 - fi - fi - - echo "os|vendor=$VENDORNAME" - echo "os|release=$RELEASE" - if [ -n "$UPDATE" ]; then - echo "os|version=$UPDATE" - fi -} - -GetOSInfo # Packages # -------- # - We are going to check packages only for the services needed. # - We are parsing the packages files and detecting metadatas. -# - If we have the meta-keyword dist:DISTRO or -# dist:DISTRO1,DISTRO2 it will be installed only for those -# distros (case insensitive). -function get_packages() { - local file_to_parse="general" - local service - - for service in ${ENABLED_SERVICES//,/ }; do - # Allow individual services to specify dependencies - if [[ -e $FILES/apts/${service} ]]; then - file_to_parse="${file_to_parse} $service" - fi - if [[ $service == n-* ]]; then - if [[ ! $file_to_parse =~ nova ]]; then - file_to_parse="${file_to_parse} nova" - fi - elif [[ $service == g-* ]]; then - if [[ ! $file_to_parse =~ glance ]]; then - file_to_parse="${file_to_parse} glance" - fi - elif [[ $service == key* ]]; then - if [[ ! $file_to_parse =~ keystone ]]; then - file_to_parse="${file_to_parse} keystone" - fi - fi - done - - for file in ${file_to_parse}; do - local fname=${FILES}/apts/${file} - local OIFS line package distros distro - [[ -e $fname ]] || { echo "missing: $fname"; exit 1; } - - OIFS=$IFS - IFS=$'\n' - for line in $(<${fname}); do - if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then # We are using BASH regexp matching feature. - package=${BASH_REMATCH[1]} - distros=${BASH_REMATCH[2]} - for distro in ${distros//,/ }; do #In bash ${VAR,,} will lowecase VAR - [[ ${distro,,} == ${DISTRO,,} ]] && echo $package - done - continue - fi - - echo ${line%#*} - done - IFS=$OIFS - done -} -for p in $(get_packages); do - ver=$(dpkg -s $p 2>/dev/null | grep '^Version: ' | cut -d' ' -f2) +if [[ "$os_PACKAGE" = "deb" ]]; then + PKG_DIR=$FILES/apts +else + PKG_DIR=$FILES/rpms +fi + +for p in $(get_packages $PKG_DIR); do + if [[ "$os_PACKAGE" = "deb" ]]; then + ver=$(dpkg -s $p 2>/dev/null | grep '^Version: ' | cut -d' ' -f2) + else + ver=$(rpm -q --queryformat "%{VERSION}-%{RELEASE}\n" $p) + fi echo "pkg|${p}|${ver}" done + # Pips # ---- -function get_pips() { - cat $FILES/pips/* | uniq -} +if [[ "$os_PACKAGE" = "deb" ]]; then + CMD_PIP=/usr/bin/pip +else + CMD_PIP=/usr/bin/pip-python +fi # Pip tells us what is currently installed FREEZE_FILE=$(mktemp --tmpdir freeze.XXXXXX) -pip freeze >$FREEZE_FILE 2>/dev/null +$CMD_PIP freeze >$FREEZE_FILE 2>/dev/null # Loop through our requirements and look for matches -for p in $(get_pips); do - [[ "$p" = "-e" ]] && continue - if [[ "$p" =~ \+?([^#]*)#? ]]; then - # Get the URL from a remote reference - p=${BASH_REMATCH[1]} - fi - line="`grep -i $p $FREEZE_FILE`" +while read line; do if [[ -n "$line" ]]; then if [[ "$line" =~ \+(.*)@(.*)#egg=(.*) ]]; then # Handle URLs @@ -199,10 +138,11 @@ for p in $(get_pips); do #echo "unknown: $p" continue fi -done +done <$FREEZE_FILE rm $FREEZE_FILE + # localrc # ------- @@ -212,5 +152,5 @@ if [[ -r $TOP_DIR/localrc ]]; then /PASSWORD/d; /^#/d; s/^/localrc\|/; - ' $TOP_DIR/localrc | sort + ' $TOP_DIR/localrc fi From 6a3912de8589d505a019e9630ce930a4cf5f7354 Mon Sep 17 00:00:00 2001 From: Evgeniy Afonichev Date: Tue, 10 Jul 2012 14:02:43 +0300 Subject: [PATCH 583/967] Add git update tag support Change-Id: I5ce1f05186d05b9cf0ccd74708af926ba054d2f0 --- functions | 47 +++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 39 insertions(+), 8 deletions(-) diff --git a/functions b/functions index 8cf7c74b..5d3a481c 100644 --- a/functions +++ b/functions @@ -222,6 +222,30 @@ GetOSVersion() { export os_VENDOR os_RELEASE os_UPDATE os_PACKAGE os_CODENAME } +# git update using reference as a branch. +function git_update_branch() { + + GIT_BRANCH=$1 + + git checkout -f origin/$GIT_BRANCH + # a local branch might not exist + git branch -D $GIT_BRANCH || true + git checkout -b $GIT_BRANCH +} + + +# git update using reference as a tag. Be careful editing source at that repo +# as working copy will be in a detached mode +function git_update_tag() { + + GIT_TAG=$1 + + git tag -d $GIT_TAG + # fetching given tag only + git fetch origin tag $GIT_TAG + git checkout -f $GIT_TAG +} + # git clone only if directory doesn't exist already. Since ``DEST`` might not # be owned by the installation user, we create the directory and change the @@ -235,16 +259,16 @@ function git_clone { GIT_REMOTE=$1 GIT_DEST=$2 - GIT_BRANCH=$3 + GIT_REF=$3 - if echo $GIT_BRANCH | egrep -q "^refs"; then + if echo $GIT_REF | egrep -q "^refs"; then # If our branch name is a gerrit style refs/changes/... if [[ ! -d $GIT_DEST ]]; then [[ "$ERROR_ON_CLONE" = "True" ]] && exit 1 git clone $GIT_REMOTE $GIT_DEST fi cd $GIT_DEST - git fetch $GIT_REMOTE $GIT_BRANCH && git checkout FETCH_HEAD + git fetch $GIT_REMOTE $GIT_REF && git checkout FETCH_HEAD else # do a full clone only if the directory doesn't exist if [[ ! -d $GIT_DEST ]]; then @@ -252,7 +276,7 @@ function git_clone { git clone $GIT_REMOTE $GIT_DEST cd $GIT_DEST # This checkout syntax works for both branches and tags - git checkout $GIT_BRANCH + git checkout $GIT_REF elif [[ "$RECLONE" == "yes" ]]; then # if it does exist then simulate what clone does if asked to RECLONE cd $GIT_DEST @@ -263,10 +287,17 @@ function git_clone { # (due to the py files having older timestamps than our pyc, so python # thinks the pyc files are correct using them) find $GIT_DEST -name '*.pyc' -delete - git checkout -f origin/$GIT_BRANCH - # a local branch might not exist - git branch -D $GIT_BRANCH || true - git checkout -b $GIT_BRANCH + + # handle GIT_REF accordingly to type (tag, branch) + if [[ -n "`git show-ref refs/tags/$GIT_REF`" ]]; then + git_update_tag $GIT_REF + elif [[ -n "`git show-ref refs/heads/$GIT_REF`" ]]; then + git_update_branch $GIT_REF + else + echo $GIT_REF is neither branch nor tag + exit 1 + fi + fi fi } From e0e91d2c697522083827abc0f20637ab01b28e2d Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 11 Jul 2012 09:35:42 -0400 Subject: [PATCH 584/967] convert from connection_type to compute_driver connection_type is deprecated for Folsom, now using compute_driver to specify virt driver. This makes the change so that devstack uses the prefered way. Change-Id: I35c5ce64dc329121a61b888d44c05e3c70c6aecc --- stack.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index 87865dcf..f12eca94 100755 --- a/stack.sh +++ b/stack.sh @@ -1803,7 +1803,7 @@ done if [ "$VIRT_DRIVER" = 'xenserver' ]; then read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN." - add_nova_opt "connection_type=xenapi" + add_nova_opt "compute_driver=xenapi.XenAPIDriver" XENAPI_CONNECTION_URL=${XENAPI_CONNECTION_URL:-"http://169.254.0.1"} XENAPI_USER=${XENAPI_USER:-"root"} add_nova_opt "xenapi_connection_url=$XENAPI_CONNECTION_URL" @@ -1814,7 +1814,7 @@ if [ "$VIRT_DRIVER" = 'xenserver' ]; then XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"} add_nova_opt "firewall_driver=$XEN_FIREWALL_DRIVER" else - add_nova_opt "connection_type=libvirt" + add_nova_opt "compute_driver=libvirt.LibvirtDriver" LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} add_nova_opt "firewall_driver=$LIBVIRT_FIREWALL_DRIVER" fi From 6ae9ea595b346a6ce0d37a8104b67405293c8411 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 5 Jul 2012 06:50:51 +0000 Subject: [PATCH 585/967] Disable swift3 by default. - Disable swift3 by default but add a new service `swift3` to enable it. - Fixes bug 1021150. Change-Id: I6b635008659e1a77fdfce364dfceaca593273c54 --- README.md | 2 +- stack.sh | 52 +++++++++++++++++++++++++++++++++------------------- 2 files changed, 34 insertions(+), 20 deletions(-) diff --git a/README.md b/README.md index cfcfe7c3..b860afd9 100644 --- a/README.md +++ b/README.md @@ -69,7 +69,7 @@ If you want a minimal Swift install with only Swift and Keystone you can have th If you use Swift with Keystone, Swift will authenticate against it. You will need to make sure to use the Keystone URL to auth against. -Swift will be acting as a S3 endpoint for Keystone so effectively replacing the `nova-objectstore`. +If you are enabling `swift3` in `ENABLED_SERVICES` devstack will install the swift3 middleware emulation. Swift will be configured to act as a S3 endpoint for Keystone so effectively replacing the `nova-objectstore`. Only Swift proxy server is launched in the screen session all other services are started in background and managed by `swift-init` tool. diff --git a/stack.sh b/stack.sh index 87865dcf..1e10b104 100755 --- a/stack.sh +++ b/stack.sh @@ -499,9 +499,11 @@ SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9} SWIFT_REPLICAS=${SWIFT_REPLICAS:-3} if is_service_enabled swift; then - # If we are using swift, we can default the s3 port to swift instead + # If we are using swift3, we can default the s3 port to swift instead # of nova-objectstore - S3_SERVICE_PORT=${S3_SERVICE_PORT:-8080} + if is_service_enabled swift3;then + S3_SERVICE_PORT=${S3_SERVICE_PORT:-8080} + fi # We only ask for Swift Hash if we have enabled swift service. # SWIFT_HASH is a random unique string for a swift cluster that # can never change. @@ -642,8 +644,10 @@ if is_service_enabled swift; then git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH # storage service client and and Library git_clone $SWIFTCLIENT_REPO $SWIFTCLIENT_DIR $SWIFTCLIENT_BRANCH - # swift3 middleware to provide S3 emulation to Swift - git_clone $SWIFT3_REPO $SWIFT3_DIR $SWIFT3_BRANCH + if is_service_enabled swift3; then + # swift3 middleware to provide S3 emulation to Swift + git_clone $SWIFT3_REPO $SWIFT3_DIR $SWIFT3_BRANCH + fi fi if is_service_enabled g-api n-api; then # image catalog service @@ -1449,11 +1453,15 @@ if is_service_enabled swift; then sudo sed -i '/disable *= *yes/ { s/yes/no/ }' /etc/xinetd.d/rsync fi + if is_service_enabled swift3;then + swift_auth_server="s3token " + fi + # By default Swift will be installed with the tempauth middleware # which has some default username and password if you have # configured keystone it will checkout the directory. if is_service_enabled key; then - swift_auth_server="s3token authtoken keystone" + swift_auth_server+="authtoken keystone" else swift_auth_server=tempauth fi @@ -1476,7 +1484,10 @@ if is_service_enabled swift; then iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080} - iniset ${SWIFT_CONFIG_PROXY_SERVER} pipeline:main pipeline "catch_errors healthcheck cache ratelimit swift3 ${swift_auth_server} proxy-logging proxy-server" + # Only enable Swift3 if we have it enabled in ENABLED_SERVICES + is_service_enabled swift3 && swift3=swift3 || swift3="" + + iniset ${SWIFT_CONFIG_PROXY_SERVER} pipeline:main pipeline "catch_errors healthcheck cache ratelimit ${swift3} ${swift_auth_server} proxy-logging proxy-server" iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true @@ -1486,16 +1497,6 @@ if is_service_enabled swift; then paste.filter_factory = keystone.middleware.swift_auth:filter_factory operator_roles = Member,admin -# NOTE(chmou): s3token middleware is not updated yet to use only -# username and password. -[filter:s3token] -paste.filter_factory = keystone.middleware.s3_token:filter_factory -auth_port = ${KEYSTONE_AUTH_PORT} -auth_host = ${KEYSTONE_AUTH_HOST} -auth_protocol = ${KEYSTONE_AUTH_PROTOCOL} -auth_token = ${SERVICE_TOKEN} -admin_token = ${SERVICE_TOKEN} - [filter:authtoken] paste.filter_factory = keystone.middleware.auth_token:filter_factory auth_host = ${KEYSTONE_AUTH_HOST} @@ -1505,10 +1506,23 @@ auth_uri = ${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}:${KEYSTONE_SE admin_tenant_name = ${SERVICE_TENANT_NAME} admin_user = swift admin_password = ${SERVICE_PASSWORD} +EOF + if is_service_enabled swift3;then + cat <>${SWIFT_CONFIG_PROXY_SERVER} +# NOTE(chmou): s3token middleware is not updated yet to use only +# username and password. +[filter:s3token] +paste.filter_factory = keystone.middleware.s3_token:filter_factory +auth_port = ${KEYSTONE_AUTH_PORT} +auth_host = ${KEYSTONE_AUTH_HOST} +auth_protocol = ${KEYSTONE_AUTH_PROTOCOL} +auth_token = ${SERVICE_TOKEN} +admin_token = ${SERVICE_TOKEN} [filter:swift3] use = egg:swift3#swift3 EOF + fi cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONFIG_DIR}/swift.conf iniset ${SWIFT_CONFIG_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH} @@ -1957,7 +1971,7 @@ if is_service_enabled key; then export OS_PASSWORD=$ADMIN_PASSWORD # Create an access key and secret key for nova ec2 register image - if is_service_enabled swift && is_service_enabled nova; then + if is_service_enabled swift3 && is_service_enabled nova; then NOVA_USER_ID=$(keystone user-list | grep ' nova ' | get_field 1) NOVA_TENANT_ID=$(keystone tenant-list | grep " $SERVICE_TENANT_NAME " | get_field 1) CREDS=$(keystone ec2-credentials-create --user_id $NOVA_USER_ID --tenant_id $NOVA_TENANT_ID) @@ -2013,9 +2027,9 @@ fi screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/$APACHE_NAME/horizon_error.log" screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_DIR}/proxy-server.conf -v" -# Starting the nova-objectstore only if swift service is not enabled. +# Starting the nova-objectstore only if swift3 service is not enabled. # Swift will act as s3 objectstore. -is_service_enabled swift || \ +is_service_enabled swift3 || \ screen_it n-obj "cd $NOVA_DIR && $NOVA_DIR/bin/nova-objectstore" From 2a5f681be6f7a9fc1858ab5b9e20a9e20df1c696 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 12 Jul 2012 05:22:44 +0000 Subject: [PATCH 586/967] Use swift-init to kill swift processes. - That pkill wasn't working. Change-Id: Ibcb6fc6d9433dca08e0584ae811a2ca276cc469a --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 87865dcf..a568dfdc 100755 --- a/stack.sh +++ b/stack.sh @@ -1375,7 +1375,7 @@ if is_service_enabled swift; then install_package memcached # We make sure to kill all swift processes first - pkill -f -9 swift- + swift-init all stop || true # We first do a bit of setup by creating the directories and # changing the permissions so we can run it as our user. From e80c0be554a54337b6300c6429d037f61cfc90b0 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Thu, 12 Jul 2012 11:06:50 -0400 Subject: [PATCH 587/967] Set libvirt_cpu_mode=none in nova.conf. This patch sets the libvirt cpu mode to "none" unconditionally. Optimal CPU performance isn't really critical with devstack. Without this option, we are getting some devstack failures on some of the cloud providers used by jenkins. Fixes bug 1023513. Change-Id: I01dc89a3edc6f118e248a965db858b26acbfeafe --- stack.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/stack.sh b/stack.sh index 87865dcf..f24d96d3 100755 --- a/stack.sh +++ b/stack.sh @@ -1726,6 +1726,7 @@ if [ -n "$FLAT_INTERFACE" ]; then fi add_nova_opt "sql_connection=$BASE_SQL_CONN/nova?charset=utf8" add_nova_opt "libvirt_type=$LIBVIRT_TYPE" +add_nova_opt "libvirt_cpu_mode=none" add_nova_opt "instance_name_template=${INSTANCE_NAME_PREFIX}%08x" # All nova-compute workers need to know the vnc configuration options # These settings don't hurt anything if n-xvnc and n-novnc are disabled From 4d5e29d906068a7a2ed8815b27c48c3b3622af76 Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Fri, 13 Jul 2012 11:11:50 -0400 Subject: [PATCH 588/967] Allow user to override pip cache location Building a VM containing devstack is faster if you can save the cache files for pip in a location that is not deleted when the VM is rebuilt. This change allows the user to set the PIP_DOWNLOAD_CACHE in their localrc file to point to any directory writable by root, including a directory mounted from the host containing the VM. Change-Id: I27179c1c212140d6734f6725f498be52fd18e9ed Signed-off-by: Doug Hellmann --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index 8cf7c74b..c53d3d7e 100644 --- a/functions +++ b/functions @@ -387,7 +387,7 @@ function pip_install { else CMD_PIP=/usr/bin/pip-python fi - sudo PIP_DOWNLOAD_CACHE=/var/cache/pip \ + sudo PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \ HTTP_PROXY=$http_proxy \ HTTPS_PROXY=$https_proxy \ $CMD_PIP install --use-mirrors $@ From 9313dfac0358503b679090aba3884937c7d1d813 Mon Sep 17 00:00:00 2001 From: Aaron Rosen Date: Fri, 6 Jul 2012 16:08:49 -0400 Subject: [PATCH 589/967] devstack support for v2 nova/quantum integration Adds support to use v1 or v2 quantum api via NOVA_USE_QUANTUM_API This fixes bug 1017760 Change-Id: Iaf77b830edc51fd4c820ddff9e1f482bc4c9904d --- AUTHORS | 1 + stack.sh | 93 +++++++++++++++++++++++++++++++++++++++++--------------- 2 files changed, 69 insertions(+), 25 deletions(-) diff --git a/AUTHORS b/AUTHORS index 67120f6c..c6c9b6a3 100644 --- a/AUTHORS +++ b/AUTHORS @@ -1,4 +1,5 @@ Aaron Lee +Aaron Rosen Adam Gandelman Andy Smith Anthony Young diff --git a/stack.sh b/stack.sh index 2c30bf84..64e4032d 100755 --- a/stack.sh +++ b/stack.sh @@ -264,6 +264,13 @@ Q_PLUGIN=${Q_PLUGIN:-openvswitch} Q_PORT=${Q_PORT:-9696} # Default Quantum Host Q_HOST=${Q_HOST:-localhost} +# Which Quantum API nova should use +NOVA_USE_QUANTUM_API=${NOVA_USE_QUANTUM_API:-v1} +# Default admin username +Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-quantum} +# Default auth strategy +Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone} + # Default Melange Port M_PORT=${M_PORT:-9898} @@ -375,6 +382,7 @@ PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT} PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-br100} FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24} FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256} +NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1} FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.224/28} NET_MAN=${NET_MAN:-FlatDHCPManager} EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST} @@ -1023,7 +1031,11 @@ if is_service_enabled quantum; then Q_PLUGIN_CONF_PATH=etc/quantum/plugins/openvswitch Q_PLUGIN_CONF_FILENAME=ovs_quantum_plugin.ini Q_DB_NAME="ovs_quantum" - Q_PLUGIN_CLASS="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin" + if [[ "$NOVA_USE_QUANTUM_API" = "v1" ]]; then + Q_PLUGIN_CLASS="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin" + elif [[ "$NOVA_USE_QUANTUM_API" = "v2" ]]; then + Q_PLUGIN_CLASS="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2" + fi elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then # Install deps # FIXME add to files/apts/quantum, but don't install if not needed! @@ -1031,7 +1043,11 @@ if is_service_enabled quantum; then Q_PLUGIN_CONF_PATH=etc/quantum/plugins/linuxbridge Q_PLUGIN_CONF_FILENAME=linuxbridge_conf.ini Q_DB_NAME="quantum_linux_bridge" - Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.LinuxBridgePlugin.LinuxBridgePlugin" + if [[ "$NOVA_USE_QUANTUM_API" = "v1" ]]; then + Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.LinuxBridgePlugin.LinuxBridgePlugin" + elif [[ "$NOVA_USE_QUANTUM_API" = "v2" ]]; then + Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.lb_quantum_plugin.LinuxBridgePluginV2" + fi else echo "Unknown Quantum plugin '$Q_PLUGIN'.. exiting" exit 1 @@ -1055,6 +1071,12 @@ if is_service_enabled quantum; then fi sudo sed -i -e "s/.*enable_tunneling = .*$/enable_tunneling = $OVS_ENABLE_TUNNELING/g" /$Q_PLUGIN_CONF_FILE fi + + if [[ "$NOVA_USE_QUANTUM_API" = "v1" ]]; then + iniset /$Q_PLUGIN_CONF_FILE AGENT target_v2_api False + elif [[ "$NOVA_USE_QUANTUM_API" = "v2" ]]; then + iniset /$Q_PLUGIN_CONF_FILE AGENT target_v2_api True + fi fi # Quantum service (for controller node) @@ -1064,15 +1086,15 @@ if is_service_enabled q-svc; then Q_POLICY_FILE=/etc/quantum/policy.json if [[ -e $QUANTUM_DIR/etc/quantum.conf ]]; then - sudo mv $QUANTUM_DIR/etc/quantum.conf $Q_CONF_FILE + sudo cp $QUANTUM_DIR/etc/quantum.conf $Q_CONF_FILE fi if [[ -e $QUANTUM_DIR/etc/api-paste.ini ]]; then - sudo mv $QUANTUM_DIR/etc/api-paste.ini $Q_API_PASTE_FILE + sudo cp $QUANTUM_DIR/etc/api-paste.ini $Q_API_PASTE_FILE fi if [[ -e $QUANTUM_DIR/etc/policy.json ]]; then - sudo mv $QUANTUM_DIR/etc/policy.json $Q_POLICY_FILE + sudo cp $QUANTUM_DIR/etc/policy.json $Q_POLICY_FILE fi if is_service_enabled mysql; then @@ -1110,14 +1132,14 @@ if is_service_enabled q-agt; then sudo ovs-vsctl --no-wait add-br $OVS_BRIDGE sudo ovs-vsctl --no-wait br-set-external-id $OVS_BRIDGE bridge-id br-int sudo sed -i -e "s/.*local_ip = .*/local_ip = $HOST_IP/g" /$Q_PLUGIN_CONF_FILE - AGENT_BINARY=$QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py + AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py" elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then # Start up the quantum <-> linuxbridge agent install_package bridge-utils #set the default network interface QUANTUM_LB_PRIVATE_INTERFACE=${QUANTUM_LB_PRIVATE_INTERFACE:-$GUEST_INTERFACE_DEFAULT} sudo sed -i -e "s/^physical_interface = .*$/physical_interface = $QUANTUM_LB_PRIVATE_INTERFACE/g" /$Q_PLUGIN_CONF_FILE - AGENT_BINARY=$QUANTUM_DIR/quantum/plugins/linuxbridge/agent/linuxbridge_quantum_agent.py + AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/linuxbridge/agent/linuxbridge_quantum_agent.py" fi # Start up the quantum agent screen_it q-agt "sudo python $AGENT_BINARY /$Q_PLUGIN_CONF_FILE -v" @@ -1694,15 +1716,27 @@ add_nova_opt "fixed_range=$FIXED_RANGE" add_nova_opt "s3_host=$SERVICE_HOST" add_nova_opt "s3_port=$S3_SERVICE_PORT" if is_service_enabled quantum; then - add_nova_opt "network_manager=nova.network.quantum.manager.QuantumManager" - add_nova_opt "quantum_connection_host=$Q_HOST" - add_nova_opt "quantum_connection_port=$Q_PORT" + if [[ "$NOVA_USE_QUANTUM_API" = "v1" ]]; then + add_nova_opt "network_manager=nova.network.quantum.manager.QuantumManager" + add_nova_opt "quantum_connection_host=$Q_HOST" + add_nova_opt "quantum_connection_port=$Q_PORT" + add_nova_opt "quantum_use_dhcp=True" + + if is_service_enabled melange; then + add_nova_opt "quantum_ipam_lib=nova.network.quantum.melange_ipam_lib" + add_nova_opt "use_melange_mac_generation=True" + add_nova_opt "melange_host=$M_HOST" + add_nova_opt "melange_port=$M_PORT" + fi - if is_service_enabled melange; then - add_nova_opt "quantum_ipam_lib=nova.network.quantum.melange_ipam_lib" - add_nova_opt "use_melange_mac_generation=True" - add_nova_opt "melange_host=$M_HOST" - add_nova_opt "melange_port=$M_PORT" + elif [[ "$NOVA_USE_QUANTUM_API" = "v2" ]]; then + add_nova_opt "network_api_class=nova.network.quantumv2.api.API" + add_nova_opt "quantum_admin_username=$Q_ADMIN_USERNAME" + add_nova_opt "quantum_admin_password=$SERVICE_PASSWORD" + add_nova_opt "quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0" + add_nova_opt "quantum_auth_strategy=$Q_AUTH_STRATEGY" + add_nova_opt "quantum_admin_tenant_name=$SERVICE_TENANT_NAME" + add_nova_opt "quantum_url=http://$Q_HOST:$Q_PORT" fi if [[ "$Q_PLUGIN" = "openvswitch" ]]; then @@ -1715,7 +1749,6 @@ if is_service_enabled quantum; then add_nova_opt "libvirt_vif_type=ethernet" add_nova_opt "libvirt_vif_driver=$NOVA_VIF_DRIVER" add_nova_opt "linuxnet_interface_driver=$LINUXNET_VIF_DRIVER" - add_nova_opt "quantum_use_dhcp=True" else add_nova_opt "network_manager=nova.network.manager.$NET_MAN" fi @@ -1914,9 +1947,9 @@ if is_service_enabled key; then # Add quantum endpoints to service catalog if quantum is enabled if is_service_enabled quantum; then - echo "catalog.RegionOne.network.publicURL = http://%SERVICE_HOST%:9696/" >> $KEYSTONE_CATALOG - echo "catalog.RegionOne.network.adminURL = http://%SERVICE_HOST%:9696/" >> $KEYSTONE_CATALOG - echo "catalog.RegionOne.network.internalURL = http://%SERVICE_HOST%:9696/" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.network.publicURL = http://%SERVICE_HOST%:$Q_PORT/" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.network.adminURL = http://%SERVICE_HOST%:$Q_PORT/" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.network.internalURL = http://%SERVICE_HOST%:$Q_PORT/" >> $KEYSTONE_CATALOG echo "catalog.RegionOne.network.name = Quantum Service" >> $KEYSTONE_CATALOG fi @@ -1995,14 +2028,24 @@ fi # If we're using Quantum (i.e. q-svc is enabled), network creation has to # happen after we've started the Quantum service. if is_service_enabled mysql && is_service_enabled nova; then - # Create a small network - $NOVA_DIR/bin/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS + if [[ "$NOVA_USE_QUANTUM_API" = "v1" ]]; then + # Create a small network + $NOVA_DIR/bin/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS + + # Create some floating ips + $NOVA_DIR/bin/nova-manage floating create $FLOATING_RANGE - # Create some floating ips - $NOVA_DIR/bin/nova-manage floating create $FLOATING_RANGE + # Create a second pool + $NOVA_DIR/bin/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL + elif [[ "$NOVA_USE_QUANTUM_API" = "v2" ]]; then + TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1) - # Create a second pool - $NOVA_DIR/bin/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL + # Create a small network + NET_ID=$(quantum net-create --os_token $Q_ADMIN_USERNAME --os_url http://$Q_HOST:$Q_PORT --tenant_id $TENANT_ID net1 | grep ' id ' | get_field 2) + + # Create a subnet + quantum subnet-create --os_token $Q_ADMIN_USERNAME --os_url http://$Q_HOST:$Q_PORT --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE + fi fi # Launching nova-compute should be as simple as running ``nova-compute`` but From 7fc6dcd092d3bd638514403e888b05808a0ad911 Mon Sep 17 00:00:00 2001 From: John Garbutt Date: Tue, 3 Jul 2012 12:25:21 +0100 Subject: [PATCH 590/967] Fix bug 1020513 by downloading xenapi plugins from correct place Change-Id: I2fc9c55b444bac355347f0d53b5971d4b809fbff --- tools/xen/install_os_domU.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 352f63ac..19453c12 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -57,8 +57,9 @@ then fi # get nova -wget https://github.com/openstack/nova/zipball/master --no-check-certificate -unzip -o master -d ./nova +nova_zipball=$(echo $NOVA_REPO | sed "s:\.git$::;s:$:/zipball/$NOVA_BRANCH:g") +wget $nova_zipball -O nova-zipball --no-check-certificate +unzip -o nova-zipball -d ./nova # install xapi plugins XAPI_PLUGIN_DIR=/etc/xapi.d/plugins/ From 3b73df764686b592f84ea6da1d5f0c1ca37f2fed Mon Sep 17 00:00:00 2001 From: "James E. Blair" Date: Mon, 16 Jul 2012 14:28:24 -0700 Subject: [PATCH 591/967] Don't setup swift3 if it's not being used. "setup_develop" was still being called for swift3, even if it's not in enabled services. This wraps that call in an "is_service_enbled" conditional so that stack.sh doesn't attempt to set it up if it's not enabled. - Fixes bug 1021150. Change-Id: I26ed1a1e73eee24da78580eda3b13e5d3eb65361 --- stack.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stack.sh b/stack.sh index f7663acd..b6d55aab 100755 --- a/stack.sh +++ b/stack.sh @@ -697,6 +697,8 @@ fi if is_service_enabled swift; then setup_develop $SWIFT_DIR setup_develop $SWIFTCLIENT_DIR +fi +if is_service_enabled swift3; then setup_develop $SWIFT3_DIR fi if is_service_enabled g-api n-api; then From bff593d9282742aa7e7f2feeb3f3862978a1ea99 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Mon, 16 Jul 2012 18:02:37 -0400 Subject: [PATCH 592/967] Devstack support for Quantum DHCP Fixes bug 1019462. A new screen q-dhcp is created for the agent output. Change-Id: Ie7f21a5f76ea7988ce817568edac7222cf7063d1 --- AUTHORS | 1 + stack.sh | 31 +++++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/AUTHORS b/AUTHORS index 80d8942f..6141d675 100644 --- a/AUTHORS +++ b/AUTHORS @@ -14,6 +14,7 @@ Eddie Hebert Eoghan Glynn Eric Windisch Gabriel Hurley +Gary Kotton Hengqing Hu Hua ZHANG Jake Dahn diff --git a/stack.sh b/stack.sh index f7663acd..d3de7162 100755 --- a/stack.sh +++ b/stack.sh @@ -1145,6 +1145,37 @@ if is_service_enabled q-agt; then screen_it q-agt "sudo python $AGENT_BINARY /$Q_PLUGIN_CONF_FILE -v" fi +# Quantum DHCP +if is_service_enabled q-dhcp; then + AGENT_DHCP_BINARY="$QUANTUM_DIR/bin/quantum-dhcp-agent" + + Q_DHCP_CONF_FILE=/etc/quantum/dhcp_agent.ini + + if [[ -e $QUANTUM_DIR/etc/dhcp_agent.ini ]]; then + sudo cp $QUANTUM_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE + fi + + # Set verbose + iniset $Q_DHCP_CONF_FILE DEFAULT verbose True + # Set debug + iniset $Q_DHCP_CONF_FILE DEFAULT debug True + + # Update database + iniset $Q_DHCP_CONF_FILE DEFAULT db_connection "mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/$Q_DB_NAME?charset=utf8" + iniset $Q_DHCP_CONF_FILE DEFAULT auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0" + iniset $Q_DHCP_CONF_FILE DEFAULT admin_tenant_name $SERVICE_TENANT_NAME + iniset $Q_DHCP_CONF_FILE DEFAULT admin_user $Q_ADMIN_USERNAME + iniset $Q_DHCP_CONF_FILE DEFAULT admin_password $SERVICE_PASSWORD + + if [[ "$Q_PLUGIN" = "openvswitch" ]]; then + iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver + elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then + iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver + fi + # Start up the quantum agent + screen_it q-dhcp "sudo python $AGENT_DHCP_BINARY --config-file=$Q_DHCP_CONF_FILE" +fi + # Melange service if is_service_enabled m-svc; then if is_service_enabled mysql; then From 345363023da70404180dc645538547a84fa326e2 Mon Sep 17 00:00:00 2001 From: Sascha Peilicke Date: Thu, 21 Jun 2012 16:09:27 +0200 Subject: [PATCH 593/967] Set a valid / unique SECRET_KEY others than the empty default. Django's default SECRET_KEY is an empty string, which is actually not secure. Use horizon.util.secret key to generate a unique key and store it securely. Change-Id: I7b6deed7db6136ee15ac7ea315019a5b78698f7d --- files/horizon_settings.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/files/horizon_settings.py b/files/horizon_settings.py index 1a6c17af..487c06ea 100644 --- a/files/horizon_settings.py +++ b/files/horizon_settings.py @@ -20,6 +20,10 @@ CACHE_BACKEND = 'dummy://' SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db' +# Set a secure and unique SECRET_KEY (the Django default is '') +from horizon.utils import secret_key +SECRET_KEY = secret_key.generate_or_read_from_file(os.path.join(LOCAL_PATH, '.secret_key_store')) + # Send email to the console by default EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' # Or send them to /dev/null From 0230aa899bb6900ca5c343d6360e79e205ac8f2c Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Thu, 14 Jun 2012 08:51:01 +0200 Subject: [PATCH 594/967] Start tgtd service after installing the rpm The later call to tgtadm needs the daemon to be running, and the rpm package doesn't start it. Rebased and add a corresponding change for cinder. Change-Id: Ia72b935575ed8b7c635e8f8edeea835754b2d49f --- lib/cinder | 5 +++++ stack.sh | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/lib/cinder b/lib/cinder index f0715a4d..5b38719a 100644 --- a/lib/cinder +++ b/lib/cinder @@ -106,6 +106,11 @@ function init_cinder() { fi if sudo vgs $VOLUME_GROUP; then + if [[ "$os_PACKAGE" = "rpm" ]]; then + # RPM doesn't start the service + start_service tgtd + fi + # Remove iscsi targets sudo tgtadm --op show --mode target | grep $VOLUME_NAME_PREFIX | grep Target | cut -f3 -d ' ' | sudo xargs -n1 tgt-admin --delete || true # Clean out existing volumes diff --git a/stack.sh b/stack.sh index d3de7162..e4b79448 100755 --- a/stack.sh +++ b/stack.sh @@ -1704,6 +1704,11 @@ elif is_service_enabled n-vol; then fi if sudo vgs $VOLUME_GROUP; then + if [[ "$os_PACKAGE" = "rpm" ]]; then + # RPM doesn't start the service + start_service tgtd + fi + # Remove nova iscsi targets sudo tgtadm --op show --mode target | grep $VOLUME_NAME_PREFIX | grep Target | cut -f3 -d ' ' | sudo xargs -n1 tgt-admin --delete || true # Clean out existing volumes From 3aabdbad1d1b73144ad5aad522706dd473ff5bb4 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Tue, 3 Jul 2012 16:20:41 +0000 Subject: [PATCH 595/967] Enable delay_auth_decision for swift/auth_token. - Add delay_auth_decision=1 in swift proxy configuration to allow anonymous access or tempurl. Rebased Change-Id: I6a97d0ff6d24a3ffef56d3d426683e48206eff71 --- stack.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/stack.sh b/stack.sh index 974b2cf8..e19f2749 100755 --- a/stack.sh +++ b/stack.sh @@ -1556,6 +1556,7 @@ auth_uri = ${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}:${KEYSTONE_SE admin_tenant_name = ${SERVICE_TENANT_NAME} admin_user = swift admin_password = ${SERVICE_PASSWORD} +delay_auth_decision = 1 EOF if is_service_enabled swift3;then cat <>${SWIFT_CONFIG_PROXY_SERVER} From 722fe6732a2165bdf610d6dc10060a3285e534d7 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Wed, 18 Jul 2012 07:43:01 -0400 Subject: [PATCH 596/967] Ensure that dnsmasq is terminated when running unstack.sh If the Quantum dhcp-agent is enabled then we ensure that the dnsmasq process is termated when running unstack.sh Change-Id: I0aee8b806c61dff42400a3b8552b7c748e9c5adf --- unstack.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/unstack.sh b/unstack.sh index 641d34e4..6a55a0a1 100755 --- a/unstack.sh +++ b/unstack.sh @@ -63,3 +63,8 @@ if [[ -n "$UNSTACK_ALL" ]]; then stop_service mysql fi fi + +# Quantum dhcp agent runs dnsmasq +if is_service_enabled q-dhcp; then + sudo kill -9 $(ps aux | awk '/[d]nsmasq.+interface=tap/ { print $2 }') +fi From 91b0acdefff432edf0021b00fe535637ecb8d789 Mon Sep 17 00:00:00 2001 From: Zhang Hua Date: Mon, 2 Jul 2012 13:57:53 +0800 Subject: [PATCH 597/967] Allow wget to handle Non-English output The stack.sh has a loop to wait for keystone to start. But the output of wget tool has been globalized which means it won't return the English word 'refused' on Non-English environment even the keystone is not up. So the script will assume the keystone has been started and continue. The command of keystone tenant-create always failed immediately after skipping this loop since it require keystone to be started to authenticate. That's why you can see authentication error in the log because the tenant information was not correctly set up. Fix bug 1019942 Change-Id: Icc2bd9551e235173683e3677880c2641faee62a9 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index a035f550..98af10a1 100755 --- a/stack.sh +++ b/stack.sh @@ -2008,7 +2008,7 @@ if is_service_enabled key; then # launch keystone and wait for it to answer before continuing screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug" echo "Waiting for keystone to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while http_proxy= wget -O- $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/ 2>&1 | grep -q 'refused'; do sleep 1; done"; then + if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/ >/dev/null; do sleep 1; done"; then echo "keystone did not start" exit 1 fi From 9cb1776a9206dcc4279c9d7c3b587d246b54ae1c Mon Sep 17 00:00:00 2001 From: Eoghan Glynn Date: Sun, 15 Jul 2012 10:22:45 +0100 Subject: [PATCH 598/967] 2.5x increase in default volume backing file size The current default volume backing file size of ~2Gb seem unfeasibly small and precludes running exercises that require multiple volumes and/or snapshots to be created. We increase this to ~5Gb. Change-Id: Id6b8d122be919d1b0041288e81b735af21958ff7 --- lib/cinder | 3 +-- stack.sh | 3 +-- stackrc | 3 +++ 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/lib/cinder b/lib/cinder index f0715a4d..cd64aaf8 100644 --- a/lib/cinder +++ b/lib/cinder @@ -93,11 +93,10 @@ function init_cinder() { # volume group, create your own volume group called ``stack-volumes`` before # invoking ``stack.sh``. # - # By default, the backing file is 2G in size, and is stored in ``/opt/stack/data``. + # By default, the backing file is 5G in size, and is stored in ``/opt/stack/data``. if ! sudo vgs $VOLUME_GROUP; then VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DATA_DIR/${VOLUME_GROUP}-backing-file} - VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-2052M} # Only create if the file doesn't already exists [[ -f $VOLUME_BACKING_FILE ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE` diff --git a/stack.sh b/stack.sh index 2c30bf84..dafe6641 100755 --- a/stack.sh +++ b/stack.sh @@ -1638,11 +1638,10 @@ elif is_service_enabled n-vol; then # volume group, create your own volume group called ``stack-volumes`` before # invoking ``stack.sh``. # - # By default, the backing file is 2G in size, and is stored in ``/opt/stack/data``. + # By default, the backing file is 5G in size, and is stored in ``/opt/stack/data``. if ! sudo vgs $VOLUME_GROUP; then VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DATA_DIR/${VOLUME_GROUP}-backing-file} - VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-2052M} # Only create if the file doesn't already exists [[ -f $VOLUME_BACKING_FILE ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE` diff --git a/stackrc b/stackrc index 3a19cdb0..badde686 100644 --- a/stackrc +++ b/stackrc @@ -120,3 +120,6 @@ esac if [ -f $RC_DIR/localrc ]; then source $RC_DIR/localrc fi + +# 5Gb default volume backing file size +VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-5130M} From 6a57f2649d6d8b28c8fa1a03c8b5eb8b8b6789aa Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Thu, 14 Jun 2012 10:07:42 +0200 Subject: [PATCH 599/967] Use latin1 character set when creating the nova database Nova expects the latin1 character set to be used in its database by default and then alters the database to utf8 when upgrading the database (in 082_essex.py). Nova works this way to work around bug 829209 ("Specified key was too long" errors) that will be hit with the dns_domains table. Change-Id: I81e6ed476e8a310fe01809e9afcc2c8d5360e9df --- stack.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index a035f550..27b0eb53 100755 --- a/stack.sh +++ b/stack.sh @@ -1899,7 +1899,10 @@ fi if is_service_enabled mysql && is_service_enabled nova; then # (re)create nova database mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS nova;' - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE nova;' + # Explicitly use latin1: to avoid lp#829209, nova expects the database to + # use latin1 by default, and then upgrades the database to utf8 (see the + # 082_essex.py in nova) + mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE nova CHARACTER SET latin1;' # (re)create nova database $NOVA_DIR/bin/nova-manage db sync From d55509d46e59b1b734605965fae3bc5d94161ce9 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Fri, 20 Jul 2012 15:00:02 +0000 Subject: [PATCH 600/967] Use c-api for service when creating cinder user. - cinder is not a service but c-api is. Change-Id: I580fe91216d3fa066120dc774811bfe08119ca02 --- files/keystone_data.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index ba14a47f..6987797a 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -275,7 +275,7 @@ if [[ "$ENABLED_SERVICES" =~ "tempest" ]]; then --role_id $MEMBER_ROLE fi -if [[ "$ENABLED_SERVICES" =~ "cinder" ]]; then +if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then CINDER_USER=$(get_id keystone user-create --name=cinder \ --pass="$SERVICE_PASSWORD" \ --tenant_id $SERVICE_TENANT \ From 4e823ff4ad8fb22602c6d70c02b5910e0ad467ef Mon Sep 17 00:00:00 2001 From: John Griffith Date: Fri, 20 Jul 2012 13:18:17 -0600 Subject: [PATCH 601/967] Add Cinder to ROOTWRAP_SUDOERS * Perform operations in lib/cinder * Use specific cidner-rootwrap file * Add root_helper to cinder.conf Change-Id: Ibcae8bae2d06c9cbb4c41c39791e8dbdc8bbb0ac --- lib/cinder | 38 ++++++++++++++++++++++++++++++++++++++ stack.sh | 4 ++-- 2 files changed, 40 insertions(+), 2 deletions(-) diff --git a/lib/cinder b/lib/cinder index 119cc913..0e42600b 100644 --- a/lib/cinder +++ b/lib/cinder @@ -52,6 +52,42 @@ function configure_cinder() { cp -p $CINDER_DIR/etc/cinder/policy.json $CINDER_CONF_DIR + # Set the paths of certain binaries + if [[ "$os_PACKAGE" = "deb" ]]; then + CINDER_ROOTWRAP=/usr/local/bin/cinder-rootwrap + else + CINDER_ROOTWRAP=/usr/bin/cinder-rootwrap + fi + + # If Cinder ships the new rootwrap filters files, deploy them + # (owned by root) and add a parameter to $CINDER_ROOTWRAP + ROOTWRAP_CINDER_SUDOER_CMD="$CINDER_ROOTWRAP" + if [[ -d $CINDER_DIR/etc/cinder/rootwrap.d ]]; then + # Wipe any existing rootwrap.d files first + if [[ -d $CINDER_CONF_DIR/rootwrap.d ]]; then + sudo rm -rf $CINDER_CONF_DIR/rootwrap.d + fi + # Deploy filters to /etc/cinder/rootwrap.d + sudo mkdir -m 755 $CINDER_CONF_DIR/rootwrap.d + sudo cp $CINDER_DIR/etc/cinder/rootwrap.d/*.filters $CINDER_CONF_DIR/rootwrap.d + sudo chown -R root:root $CINDER_CONF_DIR/rootwrap.d + sudo chmod 644 $CINDER_CONF_DIR/rootwrap.d/* + # Set up rootwrap.conf, pointing to /etc/cinder/rootwrap.d + sudo cp $CINDER_DIR/etc/cinder/rootwrap.conf $CINDER_CONF_DIR/ + sudo sed -e "s:^filters_path=.*$:filters_path=$CINDER_CONF_DIR/rootwrap.d:" -i $CINDER_CONF_DIR/rootwrap.conf + sudo chown root:root $CINDER_CONF_DIR/rootwrap.conf + sudo chmod 0644 $CINDER_CONF_DIR/rootwrap.conf + # Specify rootwrap.conf as first parameter to cinder-rootwrap + CINDER_ROOTWRAP="$CINDER_ROOTWRAP $CINDER_CONF_DIR/rootwrap.conf" + ROOTWRAP_CINDER_SUDOER_CMD="$CINDER_ROOTWRAP *" + fi + + TEMPFILE=`mktemp` + echo "$USER ALL=(root) NOPASSWD: $ROOTWRAP_CINDER_SUDOER_CMD" >$TEMPFILE + chmod 0440 $TEMPFILE + sudo chown root:root $TEMPFILE + sudo mv $TEMPFILE /etc/sudoers.d/cinder-rootwrap + CINDER_API_PASTE_INI=$CINDER_CONF_DIR/api-paste.ini cp $CINDER_DIR/etc/cinder/api-paste.ini $CINDER_API_PASTE_INI iniset $CINDER_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST @@ -71,6 +107,8 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT rabbit_host $RABBIT_HOST iniset $CINDER_CONF DEFAULT rabbit_password $RABBIT_PASSWORD iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI + iniset $CINDER_CONF DEFAULT root_helper "sudo ${CINDER_ROOTWRAP}" + } # init_cinder() - Initialize database and volume group diff --git a/stack.sh b/stack.sh index 5d4ce9f0..d0e2262f 100755 --- a/stack.sh +++ b/stack.sh @@ -1225,7 +1225,7 @@ if [[ -d $NOVA_DIR/etc/nova/rootwrap.d ]]; then ROOTWRAP_SUDOER_CMD="$NOVA_ROOTWRAP *" fi -# Set up the rootwrap sudoers +# Set up the rootwrap sudoers for nova TEMPFILE=`mktemp` echo "$USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE chmod 0440 $TEMPFILE @@ -1495,7 +1495,7 @@ if is_service_enabled swift; then if is_service_enabled swift3;then swift_auth_server="s3token " fi - + # By default Swift will be installed with the tempauth middleware # which has some default username and password if you have # configured keystone it will checkout the directory. From 686f98ea10e137669cc09e903944261ce6fbc2c5 Mon Sep 17 00:00:00 2001 From: David Kranz Date: Tue, 24 Jul 2012 09:15:44 -0400 Subject: [PATCH 602/967] Add flag for availablility of change password API. Change-Id: Ic63754050ef4a5838144edd1d24a2e9206cc0fa5 --- tools/configure_tempest.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index 22a8c43b..b858d0ed 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -135,6 +135,7 @@ COMPUTE_CATALOG_TYPE=compute COMPUTE_CREATE_IMAGE_ENABLED=True COMPUTE_ALLOW_TENANT_ISOLATION=True COMPUTE_RESIZE_AVAILABLE=False # not supported with QEMU... +COMPUTE_CHANGE_PASSWORD_AVAILABLE=False # not supported with QEMU... COMPUTE_LOG_LEVEL=ERROR BUILD_INTERVAL=3 BUILD_TIMEOUT=400 @@ -172,6 +173,7 @@ sed -e " s,%COMPUTE_ALLOW_TENANT_ISOLATION%,$COMPUTE_ALLOW_TENANT_ISOLATION,g; s,%COMPUTE_CREATE_IMAGE_ENABLED%,$COMPUTE_CREATE_IMAGE_ENABLED,g; s,%COMPUTE_RESIZE_AVAILABLE%,$COMPUTE_RESIZE_AVAILABLE,g; + s,%COMPUTE_CHANGE_PASSWORD_AVAILABLE%,$COMPUTE_CHANGE_PASSWORD_AVAILABLE,g; s,%COMPUTE_LOG_LEVEL%,$COMPUTE_LOG_LEVEL,g; s,%BUILD_INTERVAL%,$BUILD_INTERVAL,g; s,%BUILD_TIMEOUT%,$BUILD_TIMEOUT,g; From d586e1c2b9f33a63237a2ccd5f3440522360aa96 Mon Sep 17 00:00:00 2001 From: John Griffith Date: Wed, 11 Jul 2012 13:21:08 -0600 Subject: [PATCH 603/967] Change default volume service to Cinder * Modifies stackrc to load Cinder instead of n-vol by default * Depends on https://review.openstack.org/#/c/9746/ * Depends on https://review.openstack.org/#/c/9747/ Change-Id: I1fcb4bd274311d048dee54826b6408e5abb493d2 --- stackrc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stackrc b/stackrc index badde686..9ac17ac9 100644 --- a/stackrc +++ b/stackrc @@ -10,9 +10,9 @@ RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd) # # If you like to explicitly remove services you can add a -$service in # ENABLED_SERVICES, for example in your localrc to install all defaults but not -# nova-volume you would just need to set this : -# ENABLED_SERVICES+=,-n-vol -ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,mysql,rabbit +# cinder you would just need to set this : +# ENABLED_SERVICES+=,-cinder +ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,mysql,rabbit # Set the default Nova APIs to enable NOVA_ENABLED_APIS=ec2,osapi_compute,osapi_volume,metadata From d5b18ecb724ba64a57a7bf97f4982b670d639da5 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 26 Jul 2012 09:21:01 -0500 Subject: [PATCH 604/967] Add ability to override base git location. Change-Id: I823cf5ac467e903677783aee082785ec85c36186 --- stackrc | 38 +++++++++++++++++++++----------------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/stackrc b/stackrc index badde686..d2bb1514 100644 --- a/stackrc +++ b/stackrc @@ -17,39 +17,43 @@ ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-vol,n-sch,n-nov # Set the default Nova APIs to enable NOVA_ENABLED_APIS=ec2,osapi_compute,osapi_volume,metadata +# Base GIT Repo URL +# Another option is http://review.openstack.org/p +GIT_BASE=https://github.com + # volume service -CINDER_REPO=https://github.com/openstack/cinder +CINDER_REPO=${GIT_BASE}/openstack/cinder CINDER_BRANCH=master # volume client -CINDERCLIENT_REPO=https://github.com/openstack/python-cinderclient +CINDERCLIENT_REPO=${GIT_BASE}/openstack/python-cinderclient CINDERCLIENT_BRANCH=master # compute service -NOVA_REPO=https://github.com/openstack/nova.git +NOVA_REPO=${GIT_BASE}/openstack/nova.git NOVA_BRANCH=master # storage service -SWIFT_REPO=https://github.com/openstack/swift.git +SWIFT_REPO=${GIT_BASE}/openstack/swift.git SWIFT_BRANCH=master SWIFT3_REPO=https://github.com/fujita/swift3.git SWIFT3_BRANCH=master # python swift client library -SWIFTCLIENT_REPO=https://github.com/openstack/python-swiftclient +SWIFTCLIENT_REPO=${GIT_BASE}/openstack/python-swiftclient SWIFTCLIENT_BRANCH=master # image catalog service -GLANCE_REPO=https://github.com/openstack/glance.git +GLANCE_REPO=${GIT_BASE}/openstack/glance.git GLANCE_BRANCH=master # python glance client library -GLANCECLIENT_REPO=https://github.com/openstack/python-glanceclient +GLANCECLIENT_REPO=${GIT_BASE}/openstack/python-glanceclient GLANCECLIENT_BRANCH=master # unified auth system (manages accounts/tokens) -KEYSTONE_REPO=https://github.com/openstack/keystone.git +KEYSTONE_REPO=${GIT_BASE}/openstack/keystone.git KEYSTONE_BRANCH=master # a websockets/html5 or flash powered VNC console for vm instances @@ -57,39 +61,39 @@ NOVNC_REPO=https://github.com/kanaka/noVNC.git NOVNC_BRANCH=master # django powered web control panel for openstack -HORIZON_REPO=https://github.com/openstack/horizon.git +HORIZON_REPO=${GIT_BASE}/openstack/horizon.git HORIZON_BRANCH=master # python client library to nova that horizon (and others) use -NOVACLIENT_REPO=https://github.com/openstack/python-novaclient.git +NOVACLIENT_REPO=${GIT_BASE}/openstack/python-novaclient.git NOVACLIENT_BRANCH=master # Shared openstack python client library -OPENSTACKCLIENT_REPO=https://github.com/openstack/python-openstackclient.git +OPENSTACKCLIENT_REPO=${GIT_BASE}/openstack/python-openstackclient.git OPENSTACKCLIENT_BRANCH=master # python keystone client library to nova that horizon uses -KEYSTONECLIENT_REPO=https://github.com/openstack/python-keystoneclient +KEYSTONECLIENT_REPO=${GIT_BASE}/openstack/python-keystoneclient KEYSTONECLIENT_BRANCH=master # quantum service -QUANTUM_REPO=https://github.com/openstack/quantum +QUANTUM_REPO=${GIT_BASE}/openstack/quantum QUANTUM_BRANCH=master # quantum client -QUANTUM_CLIENT_REPO=https://github.com/openstack/python-quantumclient +QUANTUM_CLIENT_REPO=${GIT_BASE}/openstack/python-quantumclient QUANTUM_CLIENT_BRANCH=master # Tempest test suite -TEMPEST_REPO=https://github.com/openstack/tempest.git +TEMPEST_REPO=${GIT_BASE}/openstack/tempest.git TEMPEST_BRANCH=master # melange service -MELANGE_REPO=https://github.com/openstack/melange.git +MELANGE_REPO=${GIT_BASE}/openstack/melange.git MELANGE_BRANCH=master # python melange client library -MELANGECLIENT_REPO=https://github.com/openstack/python-melangeclient.git +MELANGECLIENT_REPO=${GIT_BASE}/openstack/python-melangeclient.git MELANGECLIENT_BRANCH=master # Specify a comma-separated list of uec images to download and install into glance. From 47f02060ad0500ccb2d4ceeb6015ad7a4c56b4e9 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 26 Jul 2012 11:09:24 -0500 Subject: [PATCH 605/967] Optionally install all pip into a global venv. This is useful for tracking what pip is causing to be installed over and beyond os pacakges. In support of this, move all package installation to before the section where we install via pip. Leave the deferred configuration until later though. Change-Id: I89677fd54635e82b10ab674ddeb9ffb3f1a755f0 --- functions | 22 ++++-- stack.sh | 213 +++++++++++++++++++++++++++++++++++------------------- 2 files changed, 155 insertions(+), 80 deletions(-) diff --git a/functions b/functions index 46a6f8a4..b66dc159 100644 --- a/functions +++ b/functions @@ -471,12 +471,19 @@ function pip_install { if [[ -z "$os_PACKAGE" ]]; then GetOSVersion fi - if [[ "$os_PACKAGE" = "deb" ]]; then - CMD_PIP=/usr/bin/pip + if [[ $TRACK_DEPENDS = True ]] ; then + source $DEST/.venv/bin/activate + CMD_PIP=$DEST/.venv/bin/pip + SUDO_PIP="env" else - CMD_PIP=/usr/bin/pip-python + SUDO_PIP="sudo" + if [[ "$os_PACKAGE" = "deb" ]]; then + CMD_PIP=/usr/bin/pip + else + CMD_PIP=/usr/bin/pip-python + fi fi - sudo PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \ + $SUDO_PIP PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \ HTTP_PROXY=$http_proxy \ HTTPS_PROXY=$https_proxy \ $CMD_PIP install --use-mirrors $@ @@ -501,12 +508,17 @@ function restart_service() { # develop, so that pip and not distutils process the dependency chain # setup_develop directory function setup_develop() { + if [[ $TRACK_DEPENDS = True ]] ; then + SUDO_CMD="env" + else + SUDO_CMD="sudo" + fi (cd $1; \ python setup.py egg_info; \ raw_links=$(awk '/^.+/ {print "-f " $1}' *.egg-info/dependency_links.txt); \ depend_links=$(echo $raw_links | xargs); \ pip_install -r *-info/requires.txt $depend_links; \ - sudo \ + $SUDO_CMD \ HTTP_PROXY=$http_proxy \ HTTPS_PROXY=$https_proxy \ python setup.py develop \ diff --git a/stack.sh b/stack.sh index d0e2262f..058b2502 100755 --- a/stack.sh +++ b/stack.sh @@ -614,6 +614,130 @@ else install_package $(get_packages $FILES/rpms) fi +if [[ $SYSLOG != "False" ]]; then + install_package rsyslog-relp +fi + +if is_service_enabled rabbit; then + # Install rabbitmq-server + # the temp file is necessary due to LP: #878600 + tfile=$(mktemp) + install_package rabbitmq-server > "$tfile" 2>&1 + cat "$tfile" + rm -f "$tfile" +elif is_service_enabled qpid; then + if [[ "$os_PACKAGE" = "rpm" ]]; then + install_package qpid-cpp-server + else + install_package qpidd + fi +fi + +if is_service_enabled mysql; then + + if [[ "$os_PACKAGE" = "deb" ]]; then + # Seed configuration with mysql password so that apt-get install doesn't + # prompt us for a password upon install. + cat <$HOME/.my.cnf +[client] +user=$MYSQL_USER +password=$MYSQL_PASSWORD +host=$MYSQL_HOST +EOF + chmod 0600 $HOME/.my.cnf + fi + # Install mysql-server + install_package mysql-server +fi + +if is_service_enabled quantum; then + if [[ "$Q_PLUGIN" = "linuxbridge" ]]; then + # Install deps + # FIXME add to files/apts/quantum, but don't install if not needed! + install_package python-configobj + fi +fi + +if is_service_enabled horizon; then + if [[ "$os_PACKAGE" = "deb" ]]; then + # Install apache2, which is NOPRIME'd + install_package apache2 libapache2-mod-wsgi + else + sudo rm -f /etc/httpd/conf.d/000-* + install_package httpd mod_wsgi + fi +fi + +if is_service_enabled q-agt; then + if [[ "$Q_PLUGIN" = "openvswitch" ]]; then + # Install deps + # FIXME add to files/apts/quantum, but don't install if not needed! + if [[ "$os_PACKAGE" = "deb" ]]; then + kernel_version=`cat /proc/version | cut -d " " -f3` + install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version + else + ### FIXME(dtroyer): Find RPMs for OpenVSwitch + echo "OpenVSwitch packages need to be located" + fi + elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then + install_package bridge-utils + fi +fi + +if is_service_enabled n-cpu; then + + # Virtualization Configuration + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + if [[ "$os_PACKAGE" = "deb" ]]; then + LIBVIRT_PKG_NAME=libvirt-bin + else + LIBVIRT_PKG_NAME=libvirt + fi + install_package $LIBVIRT_PKG_NAME + # Install and configure **LXC** if specified. LXC is another approach to + # splitting a system into many smaller parts. LXC uses cgroups and chroot + # to simulate multiple systems. + if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then + if [[ "$os_PACKAGE" = "deb" ]]; then + if [[ "$DISTRO" > natty ]]; then + install_package cgroup-lite + fi + else + ### FIXME(dtroyer): figure this out + echo "RPM-based cgroup not implemented yet" + yum_install libcgroup-tools + fi + fi +fi + +if is_service_enabled swift; then + # Install memcached for swift. + install_package memcached +fi + +TRACK_DEPENDS=${TRACK_DEPENDS:-False} + +# Install python packages into a virtualenv so that we can track them +if [[ $TRACK_DEPENDS = True ]] ; then + install_package python-virtualenv + + rm -rf $DEST/.venv + virtualenv --system-site-packages $DEST/.venv + source $DEST/.venv/bin/activate + $DEST/.venv/bin/pip freeze > $DEST/requires-pre-pip +fi + # Install python requirements pip_install $(get_packages $FILES/pips | sort -u) @@ -671,7 +795,6 @@ if is_service_enabled cinder; then install_cinder fi - # Initialization # ============== @@ -715,12 +838,19 @@ if is_service_enabled cinder; then configure_cinder fi +if [[ $TRACK_DEPENDS = True ]] ; then + $DEST/.venv/bin/pip freeze > $DEST/requires-post-pip + if ! diff -Nru $DEST/requires-pre-pip $DEST/requires-post-pip > $DEST/requires.diff ; then + cat $DEST/requires.diff + fi + echo "Ran stack.sh in depend tracking mode, bailing out now" + exit 0 +fi # Syslog # ------ if [[ $SYSLOG != "False" ]]; then - install_package rsyslog-relp if [[ "$SYSLOG_HOST" = "$HOST_IP" ]]; then # Configure the master host to receive cat </tmp/90-stack-m.conf @@ -743,12 +873,7 @@ fi # -------------- if is_service_enabled rabbit; then - # Install and start rabbitmq-server - # the temp file is necessary due to LP: #878600 - tfile=$(mktemp) - install_package rabbitmq-server > "$tfile" 2>&1 - cat "$tfile" - rm -f "$tfile" + # Start rabbitmq-server if [[ "$os_PACKAGE" = "rpm" ]]; then # RPM doesn't start the service restart_service rabbitmq-server @@ -756,45 +881,17 @@ if is_service_enabled rabbit; then # change the rabbit password since the default is "guest" sudo rabbitmqctl change_password guest $RABBIT_PASSWORD elif is_service_enabled qpid; then - if [[ "$os_PACKAGE" = "rpm" ]]; then - install_package qpid-cpp-server - restart_service qpidd - else - install_package qpidd - fi + restart_service qpidd fi # Mysql # ----- -if is_service_enabled mysql; then - if [[ "$os_PACKAGE" = "deb" ]]; then - # Seed configuration with mysql password so that apt-get install doesn't - # prompt us for a password upon install. - cat <$HOME/.my.cnf -[client] -user=$MYSQL_USER -password=$MYSQL_PASSWORD -host=$MYSQL_HOST -EOF - chmod 0600 $HOME/.my.cnf - fi +if is_service_enabled mysql; then - # Install and start mysql-server - install_package mysql-server + #start mysql-server if [[ "$os_PACKAGE" = "rpm" ]]; then # RPM doesn't start the service start_service mysqld @@ -904,10 +1001,8 @@ if is_service_enabled horizon; then sudo mkdir -p $HORIZON_DIR/.blackhole if [[ "$os_PACKAGE" = "deb" ]]; then - # Install apache2, which is NOPRIME'd APACHE_NAME=apache2 APACHE_CONF=sites-available/horizon - install_package apache2 libapache2-mod-wsgi # Clean up the old config name sudo rm -f /etc/apache2/sites-enabled/000-default # Be a good citizen and use the distro tools here @@ -917,8 +1012,6 @@ if is_service_enabled horizon; then # Install httpd, which is NOPRIME'd APACHE_NAME=httpd APACHE_CONF=conf.d/horizon.conf - sudo rm -f /etc/httpd/conf.d/000-* - install_package httpd mod_wsgi sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf fi ## Configure apache to run horizon @@ -1028,9 +1121,6 @@ if is_service_enabled quantum; then Q_PLUGIN_CLASS="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2" fi elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then - # Install deps - # FIXME add to files/apts/quantum, but don't install if not needed! - install_package python-configobj Q_PLUGIN_CONF_PATH=etc/quantum/plugins/linuxbridge Q_PLUGIN_CONF_FILENAME=linuxbridge_conf.ini Q_DB_NAME="quantum_linux_bridge" @@ -1104,15 +1194,6 @@ fi # Quantum agent (for compute nodes) if is_service_enabled q-agt; then if [[ "$Q_PLUGIN" = "openvswitch" ]]; then - # Install deps - # FIXME add to files/apts/quantum, but don't install if not needed! - if [[ "$os_PACKAGE" = "deb" ]]; then - kernel_version=`cat /proc/version | cut -d " " -f3` - install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version - else - ### FIXME(dtroyer): Find RPMs for OpenVSwitch - echo "OpenVSwitch packages need to be located" - fi # Set up integration bridge OVS_BRIDGE=${OVS_BRIDGE:-br-int} for PORT in `sudo ovs-vsctl --no-wait list-ports $OVS_BRIDGE`; do @@ -1126,8 +1207,7 @@ if is_service_enabled q-agt; then AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py" elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then # Start up the quantum <-> linuxbridge agent - install_package bridge-utils - #set the default network interface + # set the default network interface QUANTUM_LB_PRIVATE_INTERFACE=${QUANTUM_LB_PRIVATE_INTERFACE:-$GUEST_INTERFACE_DEFAULT} sudo sed -i -e "s/^physical_interface = .*$/physical_interface = $QUANTUM_LB_PRIVATE_INTERFACE/g" /$Q_PLUGIN_CONF_FILE AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/linuxbridge/agent/linuxbridge_quantum_agent.py" @@ -1273,15 +1353,6 @@ function clean_iptables() { if is_service_enabled n-cpu; then - # Virtualization Configuration - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - if [[ "$os_PACKAGE" = "deb" ]]; then - LIBVIRT_PKG_NAME=libvirt-bin - else - LIBVIRT_PKG_NAME=libvirt - fi - install_package $LIBVIRT_PKG_NAME - # Force IP forwarding on, just on case sudo sysctl -w net.ipv4.ip_forward=1 @@ -1304,9 +1375,7 @@ if is_service_enabled n-cpu; then # to simulate multiple systems. if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then if [[ "$os_PACKAGE" = "deb" ]]; then - if [[ "$DISTRO" > natty ]]; then - install_package cgroup-lite - else + if [[ ! "$DISTRO" > natty ]]; then cgline="none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0" sudo mkdir -p /cgroup if ! grep -q cgroup /etc/fstab; then @@ -1316,10 +1385,6 @@ if is_service_enabled n-cpu; then sudo mount /cgroup fi fi - else - ### FIXME(dtroyer): figure this out - echo "RPM-based cgroup not implemented yet" - yum_install libcgroup-tools fi fi @@ -1414,8 +1479,6 @@ fi # --------------- if is_service_enabled swift; then - # Install memcached for swift. - install_package memcached # We make sure to kill all swift processes first swift-init all stop || true From e9659e503ebaad23db0ed2a3489ece204b0f1640 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 26 Jul 2012 13:22:42 -0500 Subject: [PATCH 606/967] Add python-cmd2 install for openstackclient. Change-Id: I5cf6772c6fa1c813efa4993bd0461c45bbaae4ea --- files/apts/general | 1 + 1 file changed, 1 insertion(+) diff --git a/files/apts/general b/files/apts/general index f04f9556..971a5ff9 100644 --- a/files/apts/general +++ b/files/apts/general @@ -18,3 +18,4 @@ curl tcpdump euca2ools # only for testing client tar +python-cmd2 From 8301f1475b9a6aa1d5c731585d2463763a71cb26 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Wed, 25 Jul 2012 03:26:23 -0400 Subject: [PATCH 607/967] Enable Quantum agents and plugins to use more than one config file Change-Id: I039101471d264c84f6e05cc3f33073932e71f788 --- stack.sh | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/stack.sh b/stack.sh index d0e2262f..0fbb957a 100755 --- a/stack.sh +++ b/stack.sh @@ -1047,9 +1047,8 @@ if is_service_enabled quantum; then # If needed, move config file from $QUANTUM_DIR/etc/quantum to /etc/quantum mkdir -p /$Q_PLUGIN_CONF_PATH Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME - if [[ -e $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE ]]; then - sudo mv $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE - fi + cp $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE + sudo sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/$Q_DB_NAME?charset=utf8/g" /$Q_PLUGIN_CONF_FILE OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-True} @@ -1098,7 +1097,7 @@ if is_service_enabled q-svc; then # Update either configuration file with plugin sudo sed -i -e "s/^core_plugin =.*$/core_plugin = $Q_PLUGIN_CLASS/g" $Q_CONF_FILE - screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $Q_CONF_FILE" + screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE" fi # Quantum agent (for compute nodes) @@ -1133,7 +1132,7 @@ if is_service_enabled q-agt; then AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/linuxbridge/agent/linuxbridge_quantum_agent.py" fi # Start up the quantum agent - screen_it q-agt "sudo python $AGENT_BINARY /$Q_PLUGIN_CONF_FILE -v" + screen_it q-agt "sudo python $AGENT_BINARY --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE" fi # Quantum DHCP From 1a4166ca66e71e5ed57ace68f4bfea9af2346ce1 Mon Sep 17 00:00:00 2001 From: Akihiro MOTOKI Date: Wed, 25 Jul 2012 17:53:40 +0900 Subject: [PATCH 608/967] Enables keystone-enabled Quantum in devstack. Fixes bug 1028075. Whether keystone is enabled for Quantum is determined according to Q_AUTH_STRATEGY. * 'keystone' (default): Enables authN with keystone for Quantum * 'noauth': No authN used by Quantum. Change-Id: Icfc77089e085b43e97601869d9c61c9f4da1164b --- stack.sh | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/stack.sh b/stack.sh index 0fbb957a..8a1c5d54 100755 --- a/stack.sh +++ b/stack.sh @@ -1075,17 +1075,9 @@ if is_service_enabled q-svc; then Q_API_PASTE_FILE=/etc/quantum/api-paste.ini Q_POLICY_FILE=/etc/quantum/policy.json - if [[ -e $QUANTUM_DIR/etc/quantum.conf ]]; then - sudo cp $QUANTUM_DIR/etc/quantum.conf $Q_CONF_FILE - fi - - if [[ -e $QUANTUM_DIR/etc/api-paste.ini ]]; then - sudo cp $QUANTUM_DIR/etc/api-paste.ini $Q_API_PASTE_FILE - fi - - if [[ -e $QUANTUM_DIR/etc/policy.json ]]; then - sudo cp $QUANTUM_DIR/etc/policy.json $Q_POLICY_FILE - fi + cp $QUANTUM_DIR/etc/quantum.conf $Q_CONF_FILE + cp $QUANTUM_DIR/etc/api-paste.ini $Q_API_PASTE_FILE + cp $QUANTUM_DIR/etc/policy.json $Q_POLICY_FILE if is_service_enabled mysql; then mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e "DROP DATABASE IF EXISTS $Q_DB_NAME;" @@ -1096,7 +1088,16 @@ if is_service_enabled q-svc; then fi # Update either configuration file with plugin - sudo sed -i -e "s/^core_plugin =.*$/core_plugin = $Q_PLUGIN_CLASS/g" $Q_CONF_FILE + iniset $Q_CONF_FILE DEFAULT core_plugin $Q_PLUGIN_CLASS + + iniset $Q_CONF_FILE DEFAULT auth_strategy $Q_AUTH_STRATEGY + iniset $Q_API_PASTE_FILE filter:authtoken auth_host $KEYSTONE_SERVICE_HOST + iniset $Q_API_PASTE_FILE filter:authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $Q_API_PASTE_FILE filter:authtoken auth_protocol $KEYSTONE_SERVICE_PROTOCOL + iniset $Q_API_PASTE_FILE filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $Q_API_PASTE_FILE filter:authtoken admin_user $Q_ADMIN_USERNAME + iniset $Q_API_PASTE_FILE filter:authtoken admin_password $SERVICE_PASSWORD + screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE" fi @@ -2069,10 +2070,10 @@ if is_service_enabled mysql && is_service_enabled nova; then TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1) # Create a small network - NET_ID=$(quantum net-create --os_token $Q_ADMIN_USERNAME --os_url http://$Q_HOST:$Q_PORT --tenant_id $TENANT_ID net1 | grep ' id ' | get_field 2) - - # Create a subnet - quantum subnet-create --os_token $Q_ADMIN_USERNAME --os_url http://$Q_HOST:$Q_PORT --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE + # Since quantum command is executed in admin context at this point, + # --tenant_id needs to be specified. + NET_ID=$(quantum net-create --tenant_id $TENANT_ID net1 | grep ' id ' | get_field 2) + quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE fi fi From c9ad14bd3879e54c30cc88b186128dca3f6ab21a Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 3 Jul 2012 20:29:01 +0000 Subject: [PATCH 609/967] Use default route to find HOST_IP When running devstack, nova moves the host ip from eth0 onto the bridge. This causes devstack to fail on the second run unless you explicitly set HOST_IP in localrc. This patch searches for an ip on the interface that is used for the default route. This will be eth0 (or en0) in most cases, but it will search br100 instead if nova has moved the ip, since it moves the default route as well. It also will filter out ips from the potential list that are part of the fixed range and floating range if the netaddr library is installed. This allows us to find the proper ip even if we have accidentally left a floating ip or fixed ip on the bridge. Change-Id: I13288e53ee2786c5ae0edb3f9ab457be8303f1f6 --- functions | 12 ++++++++++++ stack.sh | 30 ++++++++++++++++++++++-------- 2 files changed, 34 insertions(+), 8 deletions(-) diff --git a/functions b/functions index 46a6f8a4..e46a2fc6 100644 --- a/functions +++ b/functions @@ -9,6 +9,18 @@ XTRACE=$(set +o | grep xtrace) set +o xtrace +# Exit 0 if address is in network or 1 if +# address is not in network or netaddr library +# is not installed. +function address_in_net() { + python -c " +import netaddr +import sys +sys.exit(netaddr.IPAddress('$1') not in netaddr.IPNetwork('$2')) +" +} + + # apt-get wrapper to set arguments correctly # apt_get operation package [package ...] function apt_get() { diff --git a/stack.sh b/stack.sh index 0fbb957a..79f085fa 100755 --- a/stack.sh +++ b/stack.sh @@ -283,13 +283,30 @@ LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm} # cases. SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler} -HOST_IP_IFACE=${HOST_IP_IFACE:-eth0} -# Use the eth0 IP unless an explicit is set by ``HOST_IP`` environment variable +# Set fixed and floating range here so we can make sure not to use addresses +# from either range when attempting to guess the ip to use for the host +FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24} +FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.224/28} + +# Find the interface used for the default route +HOST_IP_IFACE=${HOST_IP_IFACE:-$(ip route | sed -n '/^default/{ s/.*dev \(\w\+\)\s\+.*/\1/; p; }')} +# Search for an IP unless an explicit is set by ``HOST_IP`` environment variable if [ -z "$HOST_IP" -o "$HOST_IP" == "dhcp" ]; then - HOST_IP=`LC_ALL=C ip -f inet addr show ${HOST_IP_IFACE} | awk '/inet/ {split($2,parts,"/"); print parts[1]}' | head -n1` - if [ "$HOST_IP" = "" ]; then + HOST_IP="" + HOST_IPS=`LC_ALL=C ip -f inet addr show ${HOST_IP_IFACE} | awk '/inet/ {split($2,parts,"/"); print parts[1]}'` + for IP in $HOST_IPS; do + # Attempt to filter out ip addresses that are part of the fixed and + # floating range. Note that this method only works if the 'netaddr' + # python library is installed. If it is not installed, an error + # will be printed and the first ip from the interface will be used. + if ! (address_in_net $IP $FIXED_RANGE || address_in_net $IP $FLOATING_RANGE); then + HOST_IP=$IP + break; + fi + done + if [ "$HOST_IP" == "" ]; then echo "Could not determine host ip address." - echo "Either localrc specified dhcp on ${HOST_IP_IFACE} or defaulted to eth0" + echo "Either localrc specified dhcp on ${HOST_IP_IFACE} or defaulted" exit 1 fi fi @@ -368,11 +385,8 @@ else fi PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT} -PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-br100} -FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24} FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256} NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1} -FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.224/28} NET_MAN=${NET_MAN:-FlatDHCPManager} EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST} FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT} From 7abe4f24a3b77a1507ab0d1c994c3f5052aa0fe9 Mon Sep 17 00:00:00 2001 From: Osamu Habuka Date: Wed, 25 Jul 2012 12:39:32 +0900 Subject: [PATCH 610/967] support no_proxy environment variable Change-Id: I5175f9752abe358cca0d2e3e5cf6d94605df451a --- AUTHORS | 1 + functions | 4 ++++ stack.sh | 9 +++++++-- 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/AUTHORS b/AUTHORS index 6141d675..2bf49419 100644 --- a/AUTHORS +++ b/AUTHORS @@ -29,6 +29,7 @@ Josh Kearney Justin Shepherd Ken Pepple Kiall Mac Innes +Osamu Habuka Russell Bryant Scott Moser Thierry Carrez diff --git a/functions b/functions index 46a6f8a4..2f5cdbc5 100644 --- a/functions +++ b/functions @@ -17,6 +17,7 @@ function apt_get() { [[ "$(id -u)" = "0" ]] && sudo="env" $sudo DEBIAN_FRONTEND=noninteractive \ http_proxy=$http_proxy https_proxy=$https_proxy \ + no_proxy=$no_proxy \ apt-get --option "Dpkg::Options::=--force-confold" --assume-yes "$@" } @@ -479,6 +480,7 @@ function pip_install { sudo PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \ HTTP_PROXY=$http_proxy \ HTTPS_PROXY=$https_proxy \ + NO_PROXY=$no_proxy \ $CMD_PIP install --use-mirrors $@ } @@ -509,6 +511,7 @@ function setup_develop() { sudo \ HTTP_PROXY=$http_proxy \ HTTPS_PROXY=$https_proxy \ + NO_PROXY=$no_proxy \ python setup.py develop \ ) } @@ -562,6 +565,7 @@ function yum_install() { local sudo="sudo" [[ "$(id -u)" = "0" ]] && sudo="env" $sudo http_proxy=$http_proxy https_proxy=$https_proxy \ + no_proxy=$no_proxy \ yum install -y "$@" } diff --git a/stack.sh b/stack.sh index 0fbb957a..5d2627aa 100755 --- a/stack.sh +++ b/stack.sh @@ -60,16 +60,21 @@ fi source $TOP_DIR/stackrc # HTTP and HTTPS proxy servers are supported via the usual environment variables -# ``http_proxy`` and ``https_proxy``. They can be set in ``localrc`` if necessary +# ``http_proxy`` and ``https_proxy``. Additionally if you would like to access +# to specific server directly and not through the proxy server, you can use +# ``no_proxy`` environment variable. They can be set in ``localrc`` if necessary # or on the command line:: # -# http_proxy=http://proxy.example.com:3128/ ./stack.sh +# http_proxy=http://proxy.example.com:3128/ no_proxy=repo.example.net ./stack.sh if [[ -n "$http_proxy" ]]; then export http_proxy=$http_proxy fi if [[ -n "$https_proxy" ]]; then export https_proxy=$https_proxy fi +if [[ -n "$no_proxy" ]]; then + export no_proxy=$no_proxy +fi # Destination path for installation ``DEST`` DEST=${DEST:-/opt/stack} From 5ef90475937a0ac6b7b160cd2385c406d93a64c4 Mon Sep 17 00:00:00 2001 From: Rohit Karajgi Date: Tue, 31 Jul 2012 06:20:35 -0700 Subject: [PATCH 611/967] Adds parameter interpolation for Tempest config Adds Tempest variable for Volume catalog Type Change-Id: I4ab968f30146bd9e40533d67dba49114649f029d --- tools/configure_tempest.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index b858d0ed..456b3c0d 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -155,6 +155,9 @@ IMAGE_CATALOG_TYPE=image NETWORK_CATALOG_TYPE=network NETWORK_API_VERSION=2.0 +# Volume API test configuration +VOLUME_CATALOG_TYPE=volume + sed -e " s,%IDENTITY_USE_SSL%,$IDENTITY_USE_SSL,g; s,%IDENTITY_HOST%,$IDENTITY_HOST,g; @@ -198,6 +201,7 @@ sed -e " s,%IDENTITY_ADMIN_TENANT_NAME%,$IDENTITY_ADMIN_TENANT_NAME,g; s,%NETWORK_CATALOG_TYPE%,$NETWORK_CATALOG_TYPE,g; s,%NETWORK_API_VERSION%,$NETWORK_API_VERSION,g; + s,%VOLUME_CATALOG_TYPE%,$VOLUME_CATALOG_TYPE,g; " -i $TEMPEST_CONF echo "Created tempest configuration file:" From 3f603d93c87ad200d61ec3827bb847cccd699929 Mon Sep 17 00:00:00 2001 From: Chuck Short Date: Sat, 28 Jul 2012 13:28:33 -0500 Subject: [PATCH 612/967] Setup tgtd configuration files Setup devstack to use tgtd confiuration files. Change-Id: Icb2a1a0c5ca517604f9a3930e7c89e3be9a36b0c Signed-off-by: Chuck Short --- lib/cinder | 5 +++++ stack.sh | 9 +++++++++ 2 files changed, 14 insertions(+) diff --git a/lib/cinder b/lib/cinder index 0e42600b..49ad4afe 100644 --- a/lib/cinder +++ b/lib/cinder @@ -142,6 +142,8 @@ function init_cinder() { if ! sudo vgs $VOLUME_GROUP; then sudo vgcreate $VOLUME_GROUP $DEV; fi fi + mkdir -p $CINDER_DIR/volumes + if sudo vgs $VOLUME_GROUP; then if [[ "$os_PACKAGE" = "rpm" ]]; then # RPM doesn't start the service @@ -171,6 +173,9 @@ function install_cinder() { function start_cinder() { if is_service_enabled c-vol; then if [[ "$os_PACKAGE" = "deb" ]]; then + if [[ ! -f /etc/tgt/conf.d/cinder.conf ]]; then + echo "include $CINDER_DIR/volumes/*" | sudo tee /etc/tgt/conf.d/cinder.conf + fi # tgt in oneiric doesn't restart properly if tgtd isn't running # do it in two steps sudo stop tgt || true diff --git a/stack.sh b/stack.sh index 0fbb957a..5ca52e4e 100755 --- a/stack.sh +++ b/stack.sh @@ -1699,6 +1699,9 @@ elif is_service_enabled n-vol; then start_service tgtd fi + # Setup tgtd configuration files + mkdir -p $NOVA_DIR/volumes + # Remove nova iscsi targets sudo tgtadm --op show --mode target | grep $VOLUME_NAME_PREFIX | grep Target | cut -f3 -d ' ' | sudo xargs -n1 tgt-admin --delete || true # Clean out existing volumes @@ -1711,6 +1714,12 @@ elif is_service_enabled n-vol; then fi if [[ "$os_PACKAGE" = "deb" ]]; then + + # Setup the tgt configuration file + if [[ ! -f /etc/tgt/conf.d/nova.conf ]]; then + echo "include $NOVA_DIR/volumes/*" | sudo tee /etc/tgt/conf.d/nova.conf + fi + # tgt in oneiric doesn't restart properly if tgtd isn't running # do it in two steps sudo stop tgt || true From 2ee1fd1e82971dc522ae71d2bfc97678e73cf84e Mon Sep 17 00:00:00 2001 From: John Garbutt Date: Tue, 31 Jul 2012 11:59:43 +0100 Subject: [PATCH 613/967] Removing python-cmd2 at it is not available on Ubuntu 11.10 This fixes bug 1030899 Ubuntu 11.10 is used by the XenAPI devstack install because of issues running Ubuntu 12.04 on XenServer 6.0.2 and ea11.10 Change-Id: I35bca1f484e0caea8316e78ca6543d3b7af49088 --- files/apts/general | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/apts/general b/files/apts/general index 971a5ff9..3fa07a75 100644 --- a/files/apts/general +++ b/files/apts/general @@ -18,4 +18,4 @@ curl tcpdump euca2ools # only for testing client tar -python-cmd2 +python-cmd2 # dist:precise From b7ef539b126e845ceeb2a724e8abc0d2e5e36a14 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Wed, 1 Aug 2012 16:13:42 -0700 Subject: [PATCH 614/967] Improve exercises/aggregates.sh * Update for blueprint general-host-aggregates * Test for add/remove hosts * Now uses nova host-list Change-Id: Id6fef649c13032cf9148d7152fa2b28654717892 --- exercises/aggregates.sh | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh index 38fac120..8a4f9c19 100755 --- a/exercises/aggregates.sh +++ b/exercises/aggregates.sh @@ -7,14 +7,15 @@ # * Updating Aggregate details # * Testing Aggregate metadata # * Testing Aggregate delete -# * TODO(johngar) - test adding a host (idealy with two hosts) +# * Testing General Aggregates (https://blueprints.launchpad.net/nova/+spec/general-host-aggregates) +# * Testing add/remove hosts (with one host) echo "**************************************************" echo "Begin DevStack Exercise: $0" echo "**************************************************" # This script exits on an error so that errors don't compound and you see -# only the first error that occured. +# only the first error that occurred. set -o errexit # Print the commands being run so that we can see the command that triggers @@ -47,6 +48,7 @@ OS_USERNAME=admin # =================== AGGREGATE_NAME=test_aggregate_$RANDOM +AGGREGATE2_NAME=test_aggregate_$RANDOM AGGREGATE_A_ZONE=nova exit_if_aggregate_present() { @@ -63,6 +65,7 @@ exit_if_aggregate_present() { exit_if_aggregate_present $AGGREGATE_NAME AGGREGATE_ID=`nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE_NAME " | get_field 1` +AGGREGATE2_ID=`nova aggregate-create $AGGREGATE2_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE2_NAME " | get_field 1` # check aggregate created nova aggregate-list | grep -q " $AGGREGATE_NAME " || die "Aggregate $AGGREGATE_NAME not created" @@ -120,13 +123,23 @@ nova aggregate-details $AGGREGATE_ID | grep {} # Test aggregate-add/remove-host # ============================== if [ "$VIRT_DRIVER" == "xenserver" ]; then - echo "TODO(johngarbutt) add tests for add/remove host from aggregate" + echo "TODO(johngarbutt) add tests for add/remove host from pool aggregate" fi - +HOST=`nova host-list | grep compute | get_field 1` +# Make sure can add two aggregates to same host +nova aggregate-add-host $AGGREGATE_ID $HOST +nova aggregate-add-host $AGGREGATE2_ID $HOST +if nova aggregate-add-host $AGGREGATE2_ID $HOST; then + echo "ERROR could add duplicate host to single aggregate" + exit -1 +fi +nova aggregate-remove-host $AGGREGATE2_ID $HOST +nova aggregate-remove-host $AGGREGATE_ID $HOST # Test aggregate-delete # ===================== nova aggregate-delete $AGGREGATE_ID +nova aggregate-delete $AGGREGATE2_ID exit_if_aggregate_present $AGGREGATE_NAME From 32c520f46d25dd95db1fd2283e5621068d9223f3 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 19 Jul 2012 18:35:50 -0500 Subject: [PATCH 615/967] Add bridge-utils to deps for Ubuntu and Fedora. They don't always get installed via dependencies any more. Change-Id: I46f333c79ecccca9e10170d06039611fad5813dc --- files/apts/general | 1 + files/rpms/general | 1 + 2 files changed, 2 insertions(+) diff --git a/files/apts/general b/files/apts/general index 3fa07a75..be7bf98c 100644 --- a/files/apts/general +++ b/files/apts/general @@ -1,3 +1,4 @@ +bridge-utils pep8 pylint python-pip diff --git a/files/rpms/general b/files/rpms/general index 52184d00..6d89d2ef 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -1,3 +1,4 @@ +bridge-utils curl euca2ools # only for testing client git-core From 05530caf2cc12716f6b22c103212ba3ea7fe7910 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 6 Jul 2012 15:09:10 -0500 Subject: [PATCH 616/967] Source functions before stackrc The enable_service() and disable_service() functions in https://review.openstack.org/9407 require the functions file be sourced before stackrc. * exercise.sh * samples/local.sh (this will require manual addition to the user's local.sh if they based it on the sample) * tools/build_bm.sh Change-Id: I1bed687867e870bef5748289d712376435a776af --- exercise.sh | 3 +++ samples/local.sh | 3 +++ tools/build_bm.sh | 7 +++++++ 3 files changed, 13 insertions(+) diff --git a/exercise.sh b/exercise.sh index 15f264f4..a0349ce4 100755 --- a/exercise.sh +++ b/exercise.sh @@ -5,6 +5,9 @@ # Keep track of the current devstack directory. TOP_DIR=$(cd $(dirname "$0") && pwd) +# Import common functions +source $TOP_DIR/functions + # Load local configuration source $TOP_DIR/stackrc diff --git a/samples/local.sh b/samples/local.sh index 83637f98..2c54b10f 100755 --- a/samples/local.sh +++ b/samples/local.sh @@ -13,6 +13,9 @@ # Keep track of the devstack directory TOP_DIR=$(cd $(dirname "$0") && pwd) +# Import common functions +source $TOP_DIR/functions + # Use openrc + stackrc + localrc for settings source $TOP_DIR/stackrc diff --git a/tools/build_bm.sh b/tools/build_bm.sh index b2d4c366..ab0ba0ef 100755 --- a/tools/build_bm.sh +++ b/tools/build_bm.sh @@ -5,6 +5,13 @@ # Build an OpenStack install on a bare metal machine. set +x +# Keep track of the current directory +TOOLS_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $TOOLS_DIR/..; pwd) + +# Import common functions +source $TOP_DIR/functions + # Source params source ./stackrc From d8f1a87a45080cd2514f620c94c7eec97de74877 Mon Sep 17 00:00:00 2001 From: John Garbutt Date: Tue, 26 Jun 2012 11:16:38 +0100 Subject: [PATCH 617/967] Make it easier to use Precise or Oneric with XenServer DevStack This is a partial fix for bug 1009937 Change-Id: I1fc7059cd812bce1539a5050f60717db4cbd81ef --- tools/xen/build_xva.sh | 20 ++--- tools/xen/install_os_domU.sh | 24 ++---- tools/xen/scripts/install_ubuntu_template.sh | 78 ++++++++++++++++++++ tools/xen/scripts/xenoneirictemplate.sh | 63 ---------------- tools/xen/xenrc | 40 ++++++++-- 5 files changed, 126 insertions(+), 99 deletions(-) create mode 100755 tools/xen/scripts/install_ubuntu_template.sh delete mode 100755 tools/xen/scripts/xenoneirictemplate.sh diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh index fdc6a606..9eae1903 100755 --- a/tools/xen/build_xva.sh +++ b/tools/xen/build_xva.sh @@ -44,19 +44,14 @@ if [ ! -d $STAGING_DIR/etc ]; then exit 1 fi -# Directory where our conf files are stored -FILES_DIR=$TOP_DIR/files -TEMPLATES_DIR=$TOP_DIR/templates - -# Directory for supporting script files -SCRIPT_DIR=$TOP_DIR/scripts - -# Version of ubuntu with which we are working -UBUNTU_VERSION=`cat $STAGING_DIR/etc/lsb-release | grep "DISTRIB_CODENAME=" | sed "s/DISTRIB_CODENAME=//"` -KERNEL_VERSION=`ls $STAGING_DIR/boot/vmlinuz* | head -1 | sed "s/.*vmlinuz-//"` - # Configure dns (use same dns as dom0) -cp /etc/resolv.conf $STAGING_DIR/etc/resolv.conf +# but only when not precise +if [ "$UBUNTU_INST_RELEASE" != "precise" ]; then + cp /etc/resolv.conf $STAGING_DIR/etc/resolv.conf +elif [ "$MGT_IP" != "dhcp" ] && [ "$PUB_IP" != "dhcp" ]; then + echo "Configuration without DHCP not supported on Precise" + exit 1 +fi # Copy over devstack rm -f /tmp/devstack.tar @@ -90,6 +85,7 @@ EOF # Configure the network INTERFACES=$STAGING_DIR/etc/network/interfaces +TEMPLATES_DIR=$TOP_DIR/templates cp $TEMPLATES_DIR/interfaces.in $INTERFACES if [ $VM_IP == "dhcp" ]; then echo 'eth1 on dhcp' diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 19453c12..0bb6ac8a 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -169,7 +169,7 @@ fi HOST_IP=${HOST_IP:-`ifconfig xenbr0 | grep "inet addr" | cut -d ":" -f2 | sed "s/ .*//"`} # Set up ip forwarding, but skip on xcp-xapi -if [ -a /etc/sysconfig/network]; then +if [ -a /etc/sysconfig/network ]; then if ! grep -q "FORWARD_IPV4=YES" /etc/sysconfig/network; then # FIXME: This doesn't work on reboot! echo "FORWARD_IPV4=YES" >> /etc/sysconfig/network @@ -218,7 +218,7 @@ fi # GUEST_NAME=${GUEST_NAME:-"DevStackOSDomU"} -TNAME="devstack_template_folsom_11.10" +TNAME="devstack_template" SNAME_PREPARED="template_prepared" SNAME_FIRST_BOOT="before_first_boot" @@ -242,19 +242,6 @@ if [ -z "$templateuuid" ]; then # Install Ubuntu over network # - # try to find ubuntu template - ubuntu_template_name="Ubuntu 11.10 for DevStack (64-bit)" - ubuntu_template=$(xe_min template-list name-label="$ubuntu_template_name") - - # remove template, if we are in CLEAN_TEMPLATE mode - if [ -n "$ubuntu_template" ]; then - if $CLEAN_TEMPLATES; then - xe template-param-clear param-name=other-config uuid=$ubuntu_template - xe template-uninstall template-uuid=$ubuntu_template force=true - ubuntu_template="" - fi - fi - # always update the preseed file, incase we have a newer one PRESEED_URL=${PRESEED_URL:-""} if [ -z "$PRESEED_URL" ]; then @@ -272,13 +259,12 @@ if [ -z "$templateuuid" ]; then fi fi - if [ -z "$ubuntu_template" ]; then - $TOP_DIR/scripts/xenoneirictemplate.sh $PRESEED_URL - fi + # Update the template + $TOP_DIR/scripts/install_ubuntu_template.sh $PRESEED_URL # create a new VM with the given template # creating the correct VIFs and metadata - $TOP_DIR/scripts/install-os-vpx.sh -t "$ubuntu_template_name" -v $VM_BR -m $MGT_BR -p $PUB_BR -l $GUEST_NAME -r $OSDOMU_MEM_MB -k "flat_network_bridge=${VM_BR}" + $TOP_DIR/scripts/install-os-vpx.sh -t "$UBUNTU_INST_TEMPLATE_NAME" -v $VM_BR -m $MGT_BR -p $PUB_BR -l $GUEST_NAME -r $OSDOMU_MEM_MB -k "flat_network_bridge=${VM_BR}" # wait for install to finish wait_for_VM_to_halt diff --git a/tools/xen/scripts/install_ubuntu_template.sh b/tools/xen/scripts/install_ubuntu_template.sh new file mode 100755 index 00000000..f67547b0 --- /dev/null +++ b/tools/xen/scripts/install_ubuntu_template.sh @@ -0,0 +1,78 @@ +#!/bin/bash +# +# This creates an Ubuntu Server 32bit or 64bit template +# on Xenserver 5.6.x, 6.0.x and 6.1.x +# The template does a net install only +# +# Based on a script by: David Markey +# + +# Exit on errors +set -o errexit +# Echo commands +set -o xtrace + +# This directory +BASE_DIR=$(cd $(dirname "$0") && pwd) + +# For default setings see xenrc +source $BASE_DIR/../xenrc + +# Get the params +preseed_url=$1 + +# Delete template or skip template creation as required +previous_template=$(xe template-list name-label="$UBUNTU_INST_TEMPLATE_NAME" \ + params=uuid --minimal) +if [ -n "$previous_template" ]; then + if $CLEAN_TEMPLATES; then + xe template-param-clear param-name=other-config uuid=$previous_template + xe template-uninstall template-uuid=$previous_template force=true + else + echo "Template $UBUNTU_INST_TEMPLATE_NAME already present" + exit 0 + fi +fi + +# Get built-in template +builtin_name="Debian Squeeze 6.0 (32-bit)" +builtin_uuid=$(xe template-list name-label="$builtin_name" --minimal) +if [[ -z $builtin_uuid ]]; then + echo "Cant find the Debian Squeeze 32bit template on your XenServer." + exit 1 +fi + +# Clone built-in template to create new template +new_uuid=$(xe vm-clone uuid=$builtin_uuid \ + new-name-label="$UBUNTU_INST_TEMPLATE_NAME") + +# Some of these settings can be found in example preseed files +# however these need to be answered before the netinstall +# is ready to fetch the preseed file, and as such must be here +# to get a fully automated install +pvargs="-- quiet console=hvc0 partman/default_filesystem=ext3 \ +console-setup/ask_detect=false locale=${UBUNTU_INST_LOCALE} \ +keyboard-configuration/layoutcode=${UBUNTU_INST_KEYBOARD} \ +netcfg/choose_interface=${HOST_IP_IFACE} \ +netcfg/get_hostname=os netcfg/get_domain=os auto \ +url=${preseed_url}" + +if [ "$NETINSTALLIP" != "dhcp" ]; then + netcfgargs="netcfg/disable_autoconfig=true \ +netcfg/get_nameservers=${UBUNTU_INST_NAMESERVERS} \ +netcfg/get_ipaddress=${UBUNTU_INST_IP} \ +netcfg/get_netmask=${UBUNTU_INST_NETMASK} \ +netcfg/get_gateway=${UBUNTU_INST_GATEWAY} \ +netcfg/confirm_static=true" + pvargs="${pvargs} ${netcfgargs}" +fi + +xe template-param-set uuid=$new_uuid \ + other-config:install-methods=http \ + other-config:install-repository="$UBUNTU_INST_REPOSITORY" \ + PV-args="$pvargs" \ + other-config:debian-release="$UBUNTU_INST_RELEASE" \ + other-config:default_template=true \ + other-config:install-arch="$UBUNTU_INST_ARCH" + +echo "Ubuntu template installed uuid:$new_uuid" diff --git a/tools/xen/scripts/xenoneirictemplate.sh b/tools/xen/scripts/xenoneirictemplate.sh deleted file mode 100755 index 7f10c336..00000000 --- a/tools/xen/scripts/xenoneirictemplate.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/bin/bash -## makeubuntu.sh, this creates Ubuntu server 11.10 32 and 64 bit templates -## on Xenserver 6.0.2 Net install only -## Original Author: David Markey -## Author: Renuka Apte -## This is not an officially supported guest OS on XenServer 6.0.2 - -BASE_DIR=$(cd $(dirname "$0") && pwd) -source $BASE_DIR/../../../localrc - -LENNY=$(xe template-list name-label=Debian\ Squeeze\ 6.0\ \(32-bit\) --minimal) - -if [[ -z $LENNY ]] ; then - echo "Cant find Squeeze 32bit template." - exit 1 -fi - -distro="Ubuntu 11.10 for DevStack" -arches=("32-bit" "64-bit") - -preseedurl=${1:-"http://images.ansolabs.com/devstackubuntupreseed.cfg"} - -NETINSTALL_LOCALE=${NETINSTALL_LOCALE:-en_US} -NETINSTALL_KEYBOARD=${NETINSTALL_KEYBOARD:-us} -NETINSTALL_IFACE=${NETINSTALL_IFACE:-eth3} - -for arch in ${arches[@]} ; do - echo "Attempting $distro ($arch)" - if [[ -n $(xe template-list name-label="$distro ($arch)" params=uuid --minimal) ]] ; then - echo "$distro ($arch)" already exists, Skipping - else - if [ -z $NETINSTALLIP ] - then - echo "NETINSTALLIP not set in localrc" - exit 1 - fi - # Some of these settings can be found in example preseed files - # however these need to be answered before the netinstall - # is ready to fetch the preseed file, and as such must be here - # to get a fully automated install - pvargs="-- quiet console=hvc0 partman/default_filesystem=ext3 locale=${NETINSTALL_LOCALE} console-setup/ask_detect=false keyboard-configuration/layoutcode=${NETINSTALL_KEYBOARD} netcfg/choose_interface=${NETINSTALL_IFACE} netcfg/get_hostname=os netcfg/get_domain=os auto url=${preseedurl}" - if [ "$NETINSTALLIP" != "dhcp" ] - then - netcfgargs="netcfg/disable_autoconfig=true netcfg/get_nameservers=${NAMESERVERS} netcfg/get_ipaddress=${NETINSTALLIP} netcfg/get_netmask=${NETMASK} netcfg/get_gateway=${GATEWAY} netcfg/confirm_static=true" - pvargs="${pvargs} ${netcfgargs}" - fi - NEWUUID=$(xe vm-clone uuid=$LENNY new-name-label="$distro ($arch)") - xe template-param-set uuid=$NEWUUID other-config:install-methods=http,ftp \ - other-config:install-repository=http://archive.ubuntu.net/ubuntu \ - PV-args="$pvargs" \ - other-config:debian-release=oneiric \ - other-config:default_template=true - - if [[ "$arch" == "32-bit" ]] ; then - xe template-param-set uuid=$NEWUUID other-config:install-arch="i386" - else - xe template-param-set uuid=$NEWUUID other-config:install-arch="amd64" - fi - echo "Success" - fi -done - -echo "Done" diff --git a/tools/xen/xenrc b/tools/xen/xenrc index 102a492e..0365a25e 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -1,5 +1,10 @@ #!/bin/bash +# +# XenServer specific defaults for the /tools/xen/ scripts +# Similar to stackrc, you can override these in your localrc +# + # Name of this guest GUEST_NAME=${GUEST_NAME:-DevStackOSDomU} @@ -10,13 +15,18 @@ OSDOMU_MEM_MB=1024 # VM Password GUEST_PASSWORD=${GUEST_PASSWORD:-secrete} -# Host Interface, i.e. the interface on the nova vm you want to expose the services on -# Usually either eth2 (management network) or eth3 (public network) +# Host Interface, i.e. the interface on the nova vm you want to expose the +# services on. Usually eth2 (management network) or eth3 (public network) and # not eth0 (private network with XenServer host) or eth1 (VM traffic network) +# This is also used as the interface for the Ubuntu install HOST_IP_IFACE=${HOST_IP_IFACE:-eth3} +# # Our nova host's network info -VM_IP=${VM_IP:-10.255.255.255} # A host-only ip that let's the interface come up, otherwise unused +# + +# A host-only ip that let's the interface come up, otherwise unused +VM_IP=${VM_IP:-10.255.255.255} MGT_IP=${MGT_IP:-172.16.100.55} PUB_IP=${PUB_IP:-192.168.1.55} @@ -38,8 +48,28 @@ MGT_BR=${MGT_BR:-""} MGT_VLAN=${MGT_VLAN:-101} MGT_DEV=${MGT_DEV:-eth0} -# Guest installer network +# Decide if you should enable eth0, +# the guest installer network +# You need to disable this on xcp-xapi on Ubuntu 12.04 ENABLE_GI=true -# Source params +# Ubuntu install settings +UBUNTU_INST_RELEASE="oneiric" +UBUNTU_INST_TEMPLATE_NAME="Ubuntu 11.10 (64-bit) for DevStack" +# For 12.04 use "precise" and update template name +# However, for 12.04, you should be using +# XenServer 6.1 and later or XCP 1.6 or later +# 11.10 is only really supported with XenServer 6.0.2 and later +UBUNTU_INST_ARCH="amd64" +UBUNTU_INST_REPOSITORY="http://archive.ubuntu.net/ubuntu" +UBUNTU_INST_LOCALE="en_US" +UBUNTU_INST_KEYBOARD="us" +# network configuration for HOST_IP_IFACE during install +UBUNTU_INST_IP="dhcp" +UBUNTU_INST_NAMESERVERS="" +UBUNTU_INST_NETMASK="" +UBUNTU_INST_GATEWAY="" + +# Load stackrc defaults +# then override with settings from localrc cd ../.. && source ./stackrc && cd $TOP_DIR From 4ba6eeb53afe4b06f87f034cfe2d9cd82eb1634f Mon Sep 17 00:00:00 2001 From: Jay Pipes Date: Wed, 1 Aug 2012 11:55:12 -0400 Subject: [PATCH 618/967] Adds compute whitebox configuration options Adds the following options to the tempest configuration file: COMPUTE_CONFIG_PATH COMPUTE_SOURCE_DIR COMPUTE_WHITEBOX_ENABLED COMPUTE_PATH_TO_PRIVATE_KEY COMPUTE_DB_URI COMPUTE_BIN_DIR These options are used in an upcoming Tempest patchset Change-Id: I50409d03255b92f154112b57e96ad71f8542ac96 --- tools/configure_tempest.sh | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index 456b3c0d..4d029d84 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -52,6 +52,7 @@ source $TOP_DIR/openrc # Where Openstack code lives DEST=${DEST:-/opt/stack} +NOVA_SOURCE_DIR=$DEST/nova TEMPEST_DIR=$DEST/tempest CONFIG_DIR=$TEMPEST_DIR/etc TEMPEST_CONF=$CONFIG_DIR/tempest.conf @@ -134,7 +135,7 @@ FLAVOR_REF_ALT=2 COMPUTE_CATALOG_TYPE=compute COMPUTE_CREATE_IMAGE_ENABLED=True COMPUTE_ALLOW_TENANT_ISOLATION=True -COMPUTE_RESIZE_AVAILABLE=False # not supported with QEMU... +COMPUTE_RESIZE_AVAILABLE=False COMPUTE_CHANGE_PASSWORD_AVAILABLE=False # not supported with QEMU... COMPUTE_LOG_LEVEL=ERROR BUILD_INTERVAL=3 @@ -144,6 +145,15 @@ SSH_USER=$OS_USERNAME NETWORK_FOR_SSH=private IP_VERSION_FOR_SSH=4 SSH_TIMEOUT=4 +# Whitebox testing configuration for Compute... +COMPUTE_WHITEBOX_ENABLED=True +COMPUTE_SOURCE_DIR=$NOVA_SOURCE_DIR +COMPUTE_BIN_DIR=/usr/bin/nova +COMPUTE_CONFIG_PATH=/etc/nova/nova.conf +# TODO(jaypipes): Create the key file here... right now, no whitebox +# tests actually use a key. +COMPUTE_PATH_TO_PRIVATE_KEY=$TEMPEST_DIR/id_rsa +COMPUTE_DB_URI=mysql://root:$MYSQL_PASSWORD@localhost/nova # Image test configuration options... IMAGE_HOST=${IMAGE_HOST:-127.0.0.1} @@ -177,6 +187,7 @@ sed -e " s,%COMPUTE_CREATE_IMAGE_ENABLED%,$COMPUTE_CREATE_IMAGE_ENABLED,g; s,%COMPUTE_RESIZE_AVAILABLE%,$COMPUTE_RESIZE_AVAILABLE,g; s,%COMPUTE_CHANGE_PASSWORD_AVAILABLE%,$COMPUTE_CHANGE_PASSWORD_AVAILABLE,g; + s,%COMPUTE_WHITEBOX_ENABLED%,$COMPUTE_WHITEBOX_ENABLED,g; s,%COMPUTE_LOG_LEVEL%,$COMPUTE_LOG_LEVEL,g; s,%BUILD_INTERVAL%,$BUILD_INTERVAL,g; s,%BUILD_TIMEOUT%,$BUILD_TIMEOUT,g; @@ -189,6 +200,11 @@ sed -e " s,%IMAGE_ID_ALT%,$IMAGE_UUID_ALT,g; s,%FLAVOR_REF%,$FLAVOR_REF,g; s,%FLAVOR_REF_ALT%,$FLAVOR_REF_ALT,g; + s,%COMPUTE_CONFIG_PATH%,$COMPUTE_CONFIG_PATH,g; + s,%COMPUTE_SOURCE_DIR%,$COMPUTE_SOURCE_DIR,g; + s,%COMPUTE_BIN_DIR%,$COMPUTE_BIN_DIR,g; + s,%COMPUTE_PATH_TO_PRIVATE_KEY%,$COMPUTE_PATH_TO_PRIVATE_KEY,g; + s,%COMPUTE_DB_URI%,$COMPUTE_DB_URI,g; s,%IMAGE_HOST%,$IMAGE_HOST,g; s,%IMAGE_PORT%,$IMAGE_PORT,g; s,%IMAGE_API_VERSION%,$IMAGE_API_VERSION,g; From f2a25b776bd1e7bffadf6939ffdc76384da34848 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Wed, 13 Jun 2012 17:45:43 +0200 Subject: [PATCH 619/967] Ensure that we have access to sbin binaries through sudo We need to add the sbin paths to the secure path in sudoers for the user running the script, to make sure that running sbin binaries from sudo will work. Change-Id: I7942407df768bfa8dd035f15fa8b43ba05319779 --- stack.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stack.sh b/stack.sh index c1e06cb1..3827d776 100755 --- a/stack.sh +++ b/stack.sh @@ -193,6 +193,9 @@ else # Set up devstack sudoers TEMPFILE=`mktemp` echo "`whoami` ALL=(root) NOPASSWD:ALL" >$TEMPFILE + # Some binaries might be under /sbin or /usr/sbin, so make sure sudo will + # see them by forcing PATH + echo "Defaults:`whoami` secure_path=/sbin:/usr/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/local/bin" >> $TEMPFILE chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh From b6c87144085815c2bfabd73a670b3caf0434f821 Mon Sep 17 00:00:00 2001 From: John Garbutt Date: Thu, 2 Aug 2012 12:34:03 +0100 Subject: [PATCH 620/967] Install XenServer tools from the iso on XenServer Fixes bug 1032122 by ensuring, where possible, we use the tools shipped with XenServer rather than downloading the tools from images.ansolabs.com Change-Id: I4592eca315b4700e73097d678309d00323923c8b --- tools/xen/prepare_guest.sh | 7 +++---- tools/xen/prepare_guest_template.sh | 26 +++++++++++++++++++++++++- 2 files changed, 28 insertions(+), 5 deletions(-) diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh index 89a01694..4aa4554f 100755 --- a/tools/xen/prepare_guest.sh +++ b/tools/xen/prepare_guest.sh @@ -18,6 +18,7 @@ set -o xtrace GUEST_PASSWORD=${GUEST_PASSWORD:-secrete} STAGING_DIR=${STAGING_DIR:-stage} DO_TGZ=${DO_TGZ:-1} +XS_TOOLS_PATH=${XS_TOOLS_PATH:-"/root/xs-tools.deb"} # Install basics chroot $STAGING_DIR apt-get update @@ -26,10 +27,8 @@ chroot $STAGING_DIR apt-get install -y curl wget ssh openssh-server python-pip g chroot $STAGING_DIR pip install xenapi # Install XenServer guest utilities -XEGUEST=xe-guest-utilities_5.6.100-651_amd64.deb -wget http://images.ansolabs.com/xen/$XEGUEST -O $XEGUEST -cp $XEGUEST $STAGING_DIR/root -chroot $STAGING_DIR dpkg -i /root/$XEGUEST +cp $XS_TOOLS_PATH ${STAGING_DIR}${XS_TOOLS_PATH} +chroot $STAGING_DIR dpkg -i $XS_TOOLS_PATH chroot $STAGING_DIR update-rc.d -f xe-linux-distribution remove chroot $STAGING_DIR update-rc.d xe-linux-distribution defaults diff --git a/tools/xen/prepare_guest_template.sh b/tools/xen/prepare_guest_template.sh index 7c6dec4f..60782d0e 100755 --- a/tools/xen/prepare_guest_template.sh +++ b/tools/xen/prepare_guest_template.sh @@ -44,6 +44,28 @@ if [ ! -d $STAGING_DIR/etc ]; then exit 1 fi +# Copy XenServer tools deb into the VM +ISO_DIR="/opt/xensource/packages/iso" +XS_TOOLS_FILE_NAME="xs-tools.deb" +XS_TOOLS_PATH="/root/$XS_TOOLS_FILE_NAME" +if [ -e "$ISO_DIR" ]; then + TOOLS_ISO=$(ls $ISO_DIR/xs-tools-*.iso) + TMP_DIR=/tmp/temp.$RANDOM + mkdir -p $TMP_DIR + mount -o loop $TOOLS_ISO $TMP_DIR + DEB_FILE=$(ls $TMP_DIR/Linux/*amd64.deb) + echo "Copying XenServer tools into VM from: $DEB_FILE" + cp $DEB_FILE "${STAGING_DIR}${XS_TOOLS_PATH}" + umount $TMP_DIR + rm -rf $TMP_DIR +else + echo "WARNING: no XenServer tools found, falling back to 5.6 tools" + TOOLS_URL="http://images.ansolabs.com/xen/xe-guest-utilities_5.6.100-651_amd64.deb" + wget $TOOLS_URL -O $XS_TOOLS_FILE_NAME + cp $XS_TOOLS_FILE_NAME "${STAGING_DIR}${XS_TOOLS_PATH}" + rm -rf $XS_TOOLS_FILE_NAME +fi + # Copy prepare_guest.sh to VM mkdir -p $STAGING_DIR/opt/stack/ cp $TOP_DIR/prepare_guest.sh $STAGING_DIR/opt/stack/prepare_guest.sh @@ -53,5 +75,7 @@ cp $STAGING_DIR/etc/rc.local $STAGING_DIR/etc/rc.local.preparebackup # run prepare_guest.sh on boot cat <$STAGING_DIR/etc/rc.local -GUEST_PASSWORD=$GUEST_PASSWORD STAGING_DIR=/ DO_TGZ=0 bash /opt/stack/prepare_guest.sh > /opt/stack/prepare_guest.log 2>&1 +GUEST_PASSWORD=$GUEST_PASSWORD STAGING_DIR=/ \ + DO_TGZ=0 XS_TOOLS_PATH=$XS_TOOLS_PATH \ + bash /opt/stack/prepare_guest.sh > /opt/stack/prepare_guest.log 2>&1 EOF From 2163aa18579a1a76161cc14d3d79f93c13d3d993 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Fri, 3 Aug 2012 12:56:21 -0700 Subject: [PATCH 621/967] Change glance client flag --public to --is-public The --public flag is deprecated. Let's not use it. Change-Id: Ic6785ec2d1d5d3f7ab7cf2f584010ac38e339e0a --- stack.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/stack.sh b/stack.sh index c1e06cb1..093dfef4 100755 --- a/stack.sh +++ b/stack.sh @@ -2255,19 +2255,19 @@ if is_service_enabled g-reg; then esac if [ "$CONTAINER_FORMAT" = "bare" ]; then - glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --public --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < <(zcat --force "${IMAGE}") + glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < <(zcat --force "${IMAGE}") else # Use glance client to add the kernel the root filesystem. # We parse the results of the first upload to get the glance ID of the # kernel for use when uploading the root filesystem. KERNEL_ID=""; RAMDISK_ID=""; if [ -n "$KERNEL" ]; then - KERNEL_ID=$(glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-kernel" --public --container-format aki --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2) + KERNEL_ID=$(glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-kernel" --is-public=True --container-format aki --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2) fi if [ -n "$RAMDISK" ]; then - RAMDISK_ID=$(glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-ramdisk" --public --container-format ari --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2) + RAMDISK_ID=$(glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-ramdisk" --is-public=True --container-format ari --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2) fi - glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "${IMAGE_NAME%.img}" --public --container-format ami --disk-format ami ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}" + glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "${IMAGE_NAME%.img}" --is-public=True --container-format ami --disk-format ami ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}" fi done fi From c0c6f00698ca88f3c88f90ba211b234096eb3ac6 Mon Sep 17 00:00:00 2001 From: Devananda van der Veen Date: Fri, 6 Jul 2012 17:49:12 -0700 Subject: [PATCH 622/967] enable testing of openvz guests This patch adds several options to allow using OpenVZ virt layer. - modifies stack.sh and stackrc to recognize a new VIRT_TYPE option - set IMAGE_URLS to an openvz image, if VIRT_TYPE == openvz It also makes a few changes to some tests so that some implicit defaults (such as the guest user account) can be overridden. Change-Id: I0dde2dffbf3848fac1dd27eb37af84c0ac73d9aa --- exercises/boot_from_volume.sh | 10 ++++---- exercises/euca.sh | 5 +++- exercises/floating_ips.sh | 2 +- stack.sh | 21 ++++++++++++----- stackrc | 43 +++++++++++++++++++++++++---------- tools/configure_tempest.sh | 42 ++++++++++++++++++++++++++++------ 6 files changed, 92 insertions(+), 31 deletions(-) diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index 6a0937ab..7fe81ba0 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -46,6 +46,8 @@ DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} # Default floating IP pool name DEFAULT_FLOATING_POOL=${DEFAULT_FLOATING_POOL:-nova} +# Default user +DEFAULT_INSTANCE_USER=${DEFAULT_INSTANCE_USER:-cirros} # Launching servers # ================= @@ -150,7 +152,7 @@ fi # To do this, ssh to the builder instance, mount volume, and build a volume-backed image. STAGING_DIR=/tmp/stage CIRROS_DIR=/tmp/cirros -ssh -o StrictHostKeyChecking=no -i $KEY_FILE cirros@$FLOATING_IP << EOF +ssh -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP << EOF set -o errexit set -o xtrace sudo mkdir -p $STAGING_DIR @@ -168,10 +170,10 @@ if [ ! -e cirros-0.3.0-x86_64-rootfs.img.gz ]; then fi # Copy cirros onto the volume -scp -o StrictHostKeyChecking=no -i $KEY_FILE cirros-0.3.0-x86_64-rootfs.img.gz cirros@$FLOATING_IP:$STAGING_DIR +scp -o StrictHostKeyChecking=no -i $KEY_FILE cirros-0.3.0-x86_64-rootfs.img.gz ${DEFAULT_INSTANCE_USER}@$FLOATING_IP:$STAGING_DIR # Unpack cirros into volume -ssh -o StrictHostKeyChecking=no -i $KEY_FILE cirros@$FLOATING_IP << EOF +ssh -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP << EOF set -o errexit set -o xtrace cd $STAGING_DIR @@ -221,7 +223,7 @@ if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sle fi # Make sure our volume-backed instance launched -ssh -o StrictHostKeyChecking=no -i $KEY_FILE cirros@$FLOATING_IP << EOF +ssh -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP << EOF echo "success!" EOF diff --git a/exercises/euca.sh b/exercises/euca.sh index 4a538c63..9f7aed17 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -40,12 +40,15 @@ source $TOP_DIR/exerciserc # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} +# Boot this image, use first AMI-format image if unset +DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} + # Launching a server # ================== # Find a machine image to boot -IMAGE=`euca-describe-images | grep machine | cut -f2 | head -n1` +IMAGE=`euca-describe-images | grep machine | grep ${DEFAULT_IMAGE_NAME} | cut -f2 | head -n1` # Define secgroup SECGROUP=euca_secgroup diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 51019a34..02259c08 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -185,7 +185,7 @@ fi nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || die "Failure deleting security group rule from $SECGROUP" # FIXME (anthony): make xs support security groups -if [ "$VIRT_DRIVER" != "xenserver" ]; then +if [ "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then # test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then print "Security group failure - ping should not be allowed!" diff --git a/stack.sh b/stack.sh index 3827d776..6b01bad7 100755 --- a/stack.sh +++ b/stack.sh @@ -276,12 +276,6 @@ VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-} -# Nova hypervisor configuration. We default to libvirt with **kvm** but will -# drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can -# also install an **LXC** based system. -VIRT_DRIVER=${VIRT_DRIVER:-libvirt} -LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm} - # Nova supports pluggable schedulers. ``FilterScheduler`` should work in most # cases. SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler} @@ -1957,6 +1951,13 @@ if [ "$VIRT_DRIVER" = 'xenserver' ]; then # Need to avoid crash due to new firewall support XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"} add_nova_opt "firewall_driver=$XEN_FIREWALL_DRIVER" +elif [ "$VIRT_DRIVER" = 'openvz' ]; then + # TODO(deva): OpenVZ driver does not yet work if compute_driver is set here. + # Replace connection_type when this is fixed. + # add_nova_opt "compute_driver=openvz.connection.OpenVzConnection" + add_nova_opt "connection_type=openvz" + LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} + add_nova_opt "firewall_driver=$LIBVIRT_FIREWALL_DRIVER" else add_nova_opt "compute_driver=libvirt.LibvirtDriver" LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} @@ -2212,6 +2213,14 @@ if is_service_enabled g-reg; then wget -c $image_url -O $FILES/$IMAGE_FNAME fi + # OpenVZ-format images are provided as .tar.gz, but not decompressed prior to loading + if [[ "$image_url" =~ 'openvz' ]]; then + IMAGE="$FILES/${IMAGE_FNAME}" + IMAGE_NAME="${IMAGE_FNAME%.tar.gz}" + glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --public --container-format ami --disk-format ami < "$IMAGE" + continue + fi + KERNEL="" RAMDISK="" DISK_FORMAT="" diff --git a/stackrc b/stackrc index 3bbc4755..bd4fe14c 100644 --- a/stackrc +++ b/stackrc @@ -99,6 +99,17 @@ MELANGE_BRANCH=master MELANGECLIENT_REPO=${GIT_BASE}/openstack/python-melangeclient.git MELANGECLIENT_BRANCH=master +# Nova hypervisor configuration. We default to libvirt with **kvm** but will +# drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can +# also install an **LXC** or **OpenVZ** based system. +VIRT_DRIVER=${VIRT_DRIVER:-libvirt} +LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm} + +# allow local overrides of env variables +if [ -f $RC_DIR/localrc ]; then + source $RC_DIR/localrc +fi + # Specify a comma-separated list of uec images to download and install into glance. # supported urls here are: # * "uec-style" images: @@ -114,19 +125,27 @@ MELANGECLIENT_BRANCH=master # http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-rootfs.img.gz #IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image #IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img" # cirros full disk image -case "$LIBVIRT_TYPE" in - lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc - DEFAULT_IMAGE_NAME=cirros-0.3.0-x86_64-rootfs - IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-rootfs.img.gz";; - *) # otherwise, use the uec style image (with kernel, ramdisk, disk) - DEFAULT_IMAGE_NAME=cirros-0.3.0-x86_64-uec - IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-uec.tar.gz";; +# +# Set default image based on LIBVIRT_TYPE or VIRT_DRIVER, which may be set in localrc +# but allow DEFAULT_IMAGE_NAME and IMAGE_URLS to be set directly in localrc, too. +case "$VIRT_DRIVER" in + openvz) # OpenVZ uses its own format of image, and does not support uec style images + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-11.10-x86_64} + IMAGE_URLS=${IMAGE_URLS:-"http://download.openvz.org/template/precreated/ubuntu-11.10-x86_64.tar.gz"};; + libvirt) + case "$LIBVIRT_TYPE" in + lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.0-x86_64-rootfs} + IMAGE_URLS=${IMAGE_URLS:-"http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-rootfs.img.gz"};; + *) # otherwise, use the uec style image (with kernel, ramdisk, disk) + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.0-x86_64-uec} + IMAGE_URLS=${IMAGE_URLS:-"http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-uec.tar.gz"};; + esac + ;; + *) # otherwise, use the uec style image (with kernel, ramdisk, disk) + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.0-x86_64-uec} + IMAGE_URLS=${IMAGE_URLS:-"http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-uec.tar.gz"};; esac -# allow local overrides of env variables -if [ -f $RC_DIR/localrc ]; then - source $RC_DIR/localrc -fi - # 5Gb default volume backing file size VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-5130M} diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index 4d029d84..5be709aa 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -67,15 +67,20 @@ fi # Glance should already contain images to be used in tempest # testing. Here we simply look for images stored in Glance # and set the appropriate variables for use in the tempest config -# We ignore ramdisk and kernel images and set the IMAGE_UUID to -# the first image returned and set IMAGE_UUID_ALT to the second, +# We ignore ramdisk and kernel images, look for the default image +# DEFAULT_IMAGE_NAME. If not found, we set the IMAGE_UUID to the +# first image returned and set IMAGE_UUID_ALT to the second, # if there is more than one returned... # ... Also ensure we only take active images, so we don't get snapshots in process IMAGE_LINES=`glance image-list` IFS="$(echo -e "\n\r")" IMAGES="" for line in $IMAGE_LINES; do - IMAGES="$IMAGES `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | grep 'active' | cut -d' ' -f2`" + if [ -z $DEFAULT_IMAGE_NAME ]; then + IMAGES="$IMAGES `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | grep 'active' | cut -d' ' -f2`" + else + IMAGES="$IMAGES `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | grep 'active' | grep "$DEFAULT_IMAGE_NAME" | cut -d' ' -f2`" + fi done # Create array of image UUIDs... IFS=" " @@ -127,9 +132,31 @@ ALT_USERNAME=${ALT_USERNAME:-alt_demo} ALT_TENANT_NAME=${ALT_TENANT_NAME:-alt_demo} ALT_PASSWORD=$OS_PASSWORD -# TODO(jaypipes): Support configurable flavor refs here... -FLAVOR_REF=1 -FLAVOR_REF_ALT=2 +# Check Nova for existing flavors and, if set, look for the +# DEFAULT_INSTANCE_TYPE and use that. Otherwise, just use the first flavor. +FLAVOR_LINES=`nova flavor-list` +IFS="$(echo -e "\n\r")" +FLAVORS="" +for line in $FLAVOR_LINES; do + if [ -z $DEFAULT_INSTANCE_TYPE ]; then + FLAVORS="$FLAVORS `echo $line | grep -v "^\(ID\|+--\)" | cut -d' ' -f2`" + else + FLAVORS="$FLAVORS `echo $line | grep -v "^\(ID\|+--\)" | grep "$DEFAULT_INSTANCE_TYPE" | cut -d' ' -f2`" + fi +done +IFS=" " +FLAVORS=($FLAVORS) +NUM_FLAVORS=${#FLAVORS[*]} +echo "Found $NUM_FLAVORS flavors" +if [[ $NUM_FLAVORS -eq 0 ]]; then + echo "Found no valid flavors to use!" + exit 1 +fi +FLAVOR_REF=${FLAVORS[0]} +FLAVOR_REF_ALT=$FLAVOR_REF +if [[ $NUM_FLAVORS -gt 1 ]]; then + FLAVOR_REF_ALT=${FLAVORS[1]} +fi # Do any of the following need to be configurable? COMPUTE_CATALOG_TYPE=compute @@ -141,7 +168,8 @@ COMPUTE_LOG_LEVEL=ERROR BUILD_INTERVAL=3 BUILD_TIMEOUT=400 RUN_SSH=True -SSH_USER=$OS_USERNAME +# Check for DEFAULT_INSTANCE_USER and try to connect with that account +SSH_USER=${DEFAULT_INSTANCE_USER:-$OS_USERNAME} NETWORK_FOR_SSH=private IP_VERSION_FOR_SSH=4 SSH_TIMEOUT=4 From 6f85ab3583b31a2f84c66331f8c091b08d2d2b78 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Mon, 6 Aug 2012 16:56:10 +0900 Subject: [PATCH 623/967] q-dhcp: When q-dhcp is enabled, dangling dnsmasq needs to be killed like n-net fixes bug 1033428 Kill dnsmasq processes on startup like n-net case. q-dhcp(quantum dhcp agent) forks dns-masq child processes. But those dns-masq processes are not killed on initialization unlike n-net. So non-first run of q-dhcp fails due to stale dns-masq process. Change-Id: I15f464e527a5258f88da573facfa2eef4c03f753 Signed-off-by: Isaku Yamahata --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 3827d776..e8d05f3f 100755 --- a/stack.sh +++ b/stack.sh @@ -1466,7 +1466,7 @@ EOF' sudo rm -rf $NOVA_DIR/instances/* fi -if is_service_enabled n-net; then +if is_service_enabled n-net q-dhcp; then # Delete traces of nova networks from prior runs sudo killall dnsmasq || true clean_iptables From f71bf1929c234f9af0c13b9d989ccb949e4ef891 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Mon, 6 Aug 2012 11:15:36 -0400 Subject: [PATCH 624/967] Enable cinder to work with qpid Fixes bug 1033544 Change-Id: I015f61e8eb3612f970b4acfa3930a2f61c6c216c --- lib/cinder | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/cinder b/lib/cinder index 49ad4afe..adcc52c6 100644 --- a/lib/cinder +++ b/lib/cinder @@ -104,11 +104,16 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT volume_name_template ${VOLUME_NAME_PREFIX}%s iniset $CINDER_CONF DEFAULT iscsi_helper tgtadm iniset $CINDER_CONF DEFAULT sql_connection $BASE_SQL_CONN/cinder?charset=utf8 - iniset $CINDER_CONF DEFAULT rabbit_host $RABBIT_HOST - iniset $CINDER_CONF DEFAULT rabbit_password $RABBIT_PASSWORD iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI iniset $CINDER_CONF DEFAULT root_helper "sudo ${CINDER_ROOTWRAP}" + if is_service_enabled qpid ; then + iniset $CINDER_CONF DEFAULT rpc_backend cinder.openstack.common.rpc.impl_qpid + elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then + iniset $CINDER_CONF DEFAULT rabbit_host $RABBIT_HOST + iniset $CINDER_CONF DEFAULT rabbit_password $RABBIT_PASSWORD + fi + } # init_cinder() - Initialize database and volume group From d1804b32fbf462161caa314a7d20494321eb721b Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Tue, 7 Aug 2012 09:43:22 +0200 Subject: [PATCH 625/967] Add Vincent Untz to AUTHORS Several of my patches are in already. Change-Id: I0b96f4d262205346e112ec8ac23dd7d5a0f252dd --- AUTHORS | 1 + 1 file changed, 1 insertion(+) diff --git a/AUTHORS b/AUTHORS index 6141d675..ab929ca4 100644 --- a/AUTHORS +++ b/AUTHORS @@ -34,6 +34,7 @@ Scott Moser Thierry Carrez Todd Willey Tres Henry +Vincent Untz Vishvananda Ishaya Yun Mao Yong Sheng Gong From 5da67fe42f88987875bbfa40d9c81a7519e72abc Mon Sep 17 00:00:00 2001 From: Sascha Peilicke Date: Wed, 18 Jul 2012 13:27:32 +0200 Subject: [PATCH 626/967] Use right service name when stopping tgt in unstack.sh On non-deb systems, 'tgt' is 'tgtd'. Change-Id: I357b47cf117a5e615eb4af9603b7c5670e5cff1c --- unstack.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/unstack.sh b/unstack.sh index 6a55a0a1..64de9150 100755 --- a/unstack.sh +++ b/unstack.sh @@ -54,7 +54,12 @@ if is_service_enabled cinder n-vol; then echo "iSCSI target cleanup needed:" echo "$TARGETS" fi - stop_service tgt + + if [[ "$os_PACKAGE" = "deb" ]]; then + stop_service tgt + else + stop_service tgtd + fi fi if [[ -n "$UNSTACK_ALL" ]]; then From 2eb9a13cce822251fd8e3115bf71ee62b777c265 Mon Sep 17 00:00:00 2001 From: Rohit Karajgi Date: Wed, 8 Aug 2012 02:34:51 -0700 Subject: [PATCH 627/967] Splits out build config params in Tempest. The build parameters for instances and volumes should be configured separately. This patch adds the following Tempest vars: COMPUTE_BUILD_INTERVAL COMPUTE_BUILD_TIMEOUT VOLUME_BUILD_INTERVAL VOLUME_BUILD_TIMEOUT Change-Id: Ia5357114f8e4248a8de4bd0327e08323c487e897 --- tools/configure_tempest.sh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index 5be709aa..ffb3777e 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -167,6 +167,10 @@ COMPUTE_CHANGE_PASSWORD_AVAILABLE=False # not supported with QEMU... COMPUTE_LOG_LEVEL=ERROR BUILD_INTERVAL=3 BUILD_TIMEOUT=400 +COMPUTE_BUILD_INTERVAL=3 +COMPUTE_BUILD_TIMEOUT=400 +VOLUME_BUILD_INTERVAL=3 +VOLUME_BUILD_TIMEOUT=300 RUN_SSH=True # Check for DEFAULT_INSTANCE_USER and try to connect with that account SSH_USER=${DEFAULT_INSTANCE_USER:-$OS_USERNAME} @@ -219,6 +223,8 @@ sed -e " s,%COMPUTE_LOG_LEVEL%,$COMPUTE_LOG_LEVEL,g; s,%BUILD_INTERVAL%,$BUILD_INTERVAL,g; s,%BUILD_TIMEOUT%,$BUILD_TIMEOUT,g; + s,%COMPUTE_BUILD_INTERVAL%,$COMPUTE_BUILD_INTERVAL,g; + s,%COMPUTE_BUILD_TIMEOUT%,$COMPUTE_BUILD_TIMEOUT,g; s,%RUN_SSH%,$RUN_SSH,g; s,%SSH_USER%,$SSH_USER,g; s,%NETWORK_FOR_SSH%,$NETWORK_FOR_SSH,g; @@ -246,6 +252,8 @@ sed -e " s,%NETWORK_CATALOG_TYPE%,$NETWORK_CATALOG_TYPE,g; s,%NETWORK_API_VERSION%,$NETWORK_API_VERSION,g; s,%VOLUME_CATALOG_TYPE%,$VOLUME_CATALOG_TYPE,g; + s,%VOLUME_BUILD_INTERVAL%,$VOLUME_BUILD_INTERVAL,g; + s,%VOLUME_BUILD_TIMEOUT%,$VOLUME_BUILD_TIMEOUT,g; " -i $TEMPEST_CONF echo "Created tempest configuration file:" From 396a014b6fed2b4b12f03d34ecc96147b269389e Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Sun, 29 Jul 2012 04:28:47 -0400 Subject: [PATCH 628/967] RPC support for Quantum and devstack The patch does the following: 1. RPC support 2. Enables agent and service to run on different hosts (ensures that the file quantum.conf is copied) 3. Removes sudo for the quantum file copying 4. Uses iniset for plugin configuration Change-Id: I0f84ec72b97f0dd8d74ac5683efa2cda5be28dd5 --- stack.sh | 34 +++++++++++++++++++++++----------- 1 file changed, 23 insertions(+), 11 deletions(-) diff --git a/stack.sh b/stack.sh index 20e3e0c5..ac076daf 100755 --- a/stack.sh +++ b/stack.sh @@ -1158,15 +1158,15 @@ if is_service_enabled quantum; then elif [[ "$NOVA_USE_QUANTUM_API" = "v2" ]]; then iniset /$Q_PLUGIN_CONF_FILE AGENT target_v2_api True fi + Q_CONF_FILE=/etc/quantum/quantum.conf + cp $QUANTUM_DIR/etc/quantum.conf $Q_CONF_FILE fi # Quantum service (for controller node) if is_service_enabled q-svc; then - Q_CONF_FILE=/etc/quantum/quantum.conf Q_API_PASTE_FILE=/etc/quantum/api-paste.ini Q_POLICY_FILE=/etc/quantum/policy.json - cp $QUANTUM_DIR/etc/quantum.conf $Q_CONF_FILE cp $QUANTUM_DIR/etc/api-paste.ini $Q_API_PASTE_FILE cp $QUANTUM_DIR/etc/policy.json $Q_POLICY_FILE @@ -1188,8 +1188,6 @@ if is_service_enabled q-svc; then iniset $Q_API_PASTE_FILE filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $Q_API_PASTE_FILE filter:authtoken admin_user $Q_ADMIN_USERNAME iniset $Q_API_PASTE_FILE filter:authtoken admin_password $SERVICE_PASSWORD - - screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE" fi # Quantum agent (for compute nodes) @@ -1213,8 +1211,6 @@ if is_service_enabled q-agt; then sudo sed -i -e "s/^physical_interface = .*$/physical_interface = $QUANTUM_LB_PRIVATE_INTERFACE/g" /$Q_PLUGIN_CONF_FILE AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/linuxbridge/agent/linuxbridge_quantum_agent.py" fi - # Start up the quantum agent - screen_it q-agt "sudo python $AGENT_BINARY --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE" fi # Quantum DHCP @@ -1223,9 +1219,7 @@ if is_service_enabled q-dhcp; then Q_DHCP_CONF_FILE=/etc/quantum/dhcp_agent.ini - if [[ -e $QUANTUM_DIR/etc/dhcp_agent.ini ]]; then - sudo cp $QUANTUM_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE - fi + cp $QUANTUM_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE # Set verbose iniset $Q_DHCP_CONF_FILE DEFAULT verbose True @@ -1244,10 +1238,28 @@ if is_service_enabled q-dhcp; then elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver fi - # Start up the quantum agent - screen_it q-dhcp "sudo python $AGENT_DHCP_BINARY --config-file=$Q_DHCP_CONF_FILE" fi +# Quantum RPC support - must be updated prior to starting any of the services +if is_service_enabled quantum; then + iniset $Q_CONF_FILE DEFAULT control_exchange quantum + if is_service_enabled qpid ; then + iniset $Q_CONF_FILE DEFAULT rpc_backend quantum.openstack.common.rpc.impl_qpid + elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then + iniset $Q_CONF_FILE DEFAULT rabbit_host $RABBIT_HOST + iniset $Q_CONF_FILE DEFAULT rabbit_password $RABBIT_PASSWORD + fi +fi + +# Start the Quantum services +screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE" + +# Start up the quantum agent +screen_it q-agt "sudo python $AGENT_BINARY --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE" + +# Start up the quantum agent +screen_it q-dhcp "sudo python $AGENT_DHCP_BINARY --config-file=$Q_DHCP_CONF_FILE" + # Melange service if is_service_enabled m-svc; then if is_service_enabled mysql; then From 37dda8d7a559914bc492264dddfac0dd41e7ca84 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Wed, 8 Aug 2012 03:46:33 -0400 Subject: [PATCH 629/967] Remove Quantum V1 support The update includes the following: 1. Removal of melange support (this is replaced by Quantum) 2. Removal of Quantum V1 support. This is all now pure V2. Change-Id: Ief3982fe8fa7402eef5db3e115df741c2cc9f4d1 --- stack.sh | 139 +++++++++++-------------------------------------------- stackrc | 8 ---- 2 files changed, 28 insertions(+), 119 deletions(-) diff --git a/stack.sh b/stack.sh index ac076daf..954db591 100755 --- a/stack.sh +++ b/stack.sh @@ -2,7 +2,7 @@ # ``stack.sh`` is an opinionated OpenStack developer installation. It # installs and configures various combinations of **Glance**, **Horizon**, -# **Keystone**, **Melange**, **Nova**, **Quantum** and **Swift** +# **Keystone**, **Nova**, **Quantum** and **Swift** # This script allows you to specify configuration options of what git # repositories to use, enabled services, network configuration and various @@ -251,8 +251,6 @@ SWIFT3_DIR=$DEST/swift3 SWIFTCLIENT_DIR=$DEST/python-swiftclient QUANTUM_DIR=$DEST/quantum QUANTUM_CLIENT_DIR=$DEST/python-quantumclient -MELANGE_DIR=$DEST/melange -MELANGECLIENT_DIR=$DEST/python-melangeclient # Default Quantum Plugin Q_PLUGIN=${Q_PLUGIN:-openvswitch} @@ -261,20 +259,12 @@ Q_PORT=${Q_PORT:-9696} # Default Quantum Host Q_HOST=${Q_HOST:-localhost} # Which Quantum API nova should use -NOVA_USE_QUANTUM_API=${NOVA_USE_QUANTUM_API:-v1} # Default admin username Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-quantum} # Default auth strategy Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone} -# Default Melange Port -M_PORT=${M_PORT:-9898} -# Default Melange Host -M_HOST=${M_HOST:-localhost} -# Melange MAC Address Range -M_MAC_RANGE=${M_MAC_RANGE:-FE-EE-DD-00-00-00/24} - # Name of the lvm volume group to use/create for iscsi volumes VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} @@ -419,14 +409,6 @@ FLAT_INTERFACE=${FLAT_INTERFACE:-$GUEST_INTERFACE_DEFAULT} # # With Quantum networking the NET_MAN variable is ignored. -# Using Melange IPAM: -# -# Make sure that quantum and melange are enabled in ENABLED_SERVICES. -# If they are then the melange IPAM lib will be set in the QuantumManager. -# Adding m-svc to ENABLED_SERVICES will start the melange service on this -# host. - - # MySQL & (RabbitMQ or Qpid) # -------------------------- @@ -785,13 +767,6 @@ if is_service_enabled quantum; then # quantum git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH fi -if is_service_enabled m-svc; then - # melange - git_clone $MELANGE_REPO $MELANGE_DIR $MELANGE_BRANCH -fi -if is_service_enabled melange; then - git_clone $MELANGECLIENT_REPO $MELANGECLIENT_DIR $MELANGECLIENT_BRANCH -fi if is_service_enabled cinder; then install_cinder fi @@ -829,12 +804,6 @@ if is_service_enabled quantum; then setup_develop $QUANTUM_CLIENT_DIR setup_develop $QUANTUM_DIR fi -if is_service_enabled m-svc; then - setup_develop $MELANGE_DIR -fi -if is_service_enabled melange; then - setup_develop $MELANGECLIENT_DIR -fi if is_service_enabled cinder; then configure_cinder fi @@ -1116,20 +1085,12 @@ if is_service_enabled quantum; then Q_PLUGIN_CONF_PATH=etc/quantum/plugins/openvswitch Q_PLUGIN_CONF_FILENAME=ovs_quantum_plugin.ini Q_DB_NAME="ovs_quantum" - if [[ "$NOVA_USE_QUANTUM_API" = "v1" ]]; then - Q_PLUGIN_CLASS="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin" - elif [[ "$NOVA_USE_QUANTUM_API" = "v2" ]]; then - Q_PLUGIN_CLASS="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2" - fi + Q_PLUGIN_CLASS="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2" elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then Q_PLUGIN_CONF_PATH=etc/quantum/plugins/linuxbridge Q_PLUGIN_CONF_FILENAME=linuxbridge_conf.ini Q_DB_NAME="quantum_linux_bridge" - if [[ "$NOVA_USE_QUANTUM_API" = "v1" ]]; then - Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.LinuxBridgePlugin.LinuxBridgePlugin" - elif [[ "$NOVA_USE_QUANTUM_API" = "v2" ]]; then - Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.lb_quantum_plugin.LinuxBridgePluginV2" - fi + Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.lb_quantum_plugin.LinuxBridgePluginV2" else echo "Unknown Quantum plugin '$Q_PLUGIN'.. exiting" exit 1 @@ -1153,11 +1114,7 @@ if is_service_enabled quantum; then sudo sed -i -e "s/.*enable_tunneling = .*$/enable_tunneling = $OVS_ENABLE_TUNNELING/g" /$Q_PLUGIN_CONF_FILE fi - if [[ "$NOVA_USE_QUANTUM_API" = "v1" ]]; then - iniset /$Q_PLUGIN_CONF_FILE AGENT target_v2_api False - elif [[ "$NOVA_USE_QUANTUM_API" = "v2" ]]; then - iniset /$Q_PLUGIN_CONF_FILE AGENT target_v2_api True - fi + iniset /$Q_PLUGIN_CONF_FILE AGENT target_v2_api True Q_CONF_FILE=/etc/quantum/quantum.conf cp $QUANTUM_DIR/etc/quantum.conf $Q_CONF_FILE fi @@ -1260,29 +1217,6 @@ screen_it q-agt "sudo python $AGENT_BINARY --config-file $Q_CONF_FILE --config-f # Start up the quantum agent screen_it q-dhcp "sudo python $AGENT_DHCP_BINARY --config-file=$Q_DHCP_CONF_FILE" -# Melange service -if is_service_enabled m-svc; then - if is_service_enabled mysql; then - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS melange;' - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE melange CHARACTER SET utf8;' - else - echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin." - exit 1 - fi - MELANGE_CONFIG_FILE=$MELANGE_DIR/etc/melange/melange.conf - cp $MELANGE_CONFIG_FILE.sample $MELANGE_CONFIG_FILE - sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/melange?charset=utf8/g" $MELANGE_CONFIG_FILE - cd $MELANGE_DIR && PYTHONPATH=.:$PYTHONPATH python $MELANGE_DIR/bin/melange-manage --config-file=$MELANGE_CONFIG_FILE db_sync - screen_it m-svc "cd $MELANGE_DIR && PYTHONPATH=.:$PYTHONPATH python $MELANGE_DIR/bin/melange-server --config-file=$MELANGE_CONFIG_FILE" - echo "Waiting for melange to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:9898; do sleep 1; done"; then - echo "melange-server did not start" - exit 1 - fi - melange mac_address_range create cidr=$M_MAC_RANGE -fi - - # Nova # ---- @@ -1827,28 +1761,13 @@ add_nova_opt "fixed_range=$FIXED_RANGE" add_nova_opt "s3_host=$SERVICE_HOST" add_nova_opt "s3_port=$S3_SERVICE_PORT" if is_service_enabled quantum; then - if [[ "$NOVA_USE_QUANTUM_API" = "v1" ]]; then - add_nova_opt "network_manager=nova.network.quantum.manager.QuantumManager" - add_nova_opt "quantum_connection_host=$Q_HOST" - add_nova_opt "quantum_connection_port=$Q_PORT" - add_nova_opt "quantum_use_dhcp=True" - - if is_service_enabled melange; then - add_nova_opt "quantum_ipam_lib=nova.network.quantum.melange_ipam_lib" - add_nova_opt "use_melange_mac_generation=True" - add_nova_opt "melange_host=$M_HOST" - add_nova_opt "melange_port=$M_PORT" - fi - - elif [[ "$NOVA_USE_QUANTUM_API" = "v2" ]]; then - add_nova_opt "network_api_class=nova.network.quantumv2.api.API" - add_nova_opt "quantum_admin_username=$Q_ADMIN_USERNAME" - add_nova_opt "quantum_admin_password=$SERVICE_PASSWORD" - add_nova_opt "quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0" - add_nova_opt "quantum_auth_strategy=$Q_AUTH_STRATEGY" - add_nova_opt "quantum_admin_tenant_name=$SERVICE_TENANT_NAME" - add_nova_opt "quantum_url=http://$Q_HOST:$Q_PORT" - fi + add_nova_opt "network_api_class=nova.network.quantumv2.api.API" + add_nova_opt "quantum_admin_username=$Q_ADMIN_USERNAME" + add_nova_opt "quantum_admin_password=$SERVICE_PASSWORD" + add_nova_opt "quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0" + add_nova_opt "quantum_auth_strategy=$Q_AUTH_STRATEGY" + add_nova_opt "quantum_admin_tenant_name=$SERVICE_TENANT_NAME" + add_nova_opt "quantum_url=http://$Q_HOST:$Q_PORT" if [[ "$Q_PLUGIN" = "openvswitch" ]]; then NOVA_VIF_DRIVER="nova.virt.libvirt.vif.LibvirtOpenVswitchDriver" @@ -2148,25 +2067,23 @@ fi # If we're using Quantum (i.e. q-svc is enabled), network creation has to # happen after we've started the Quantum service. -if is_service_enabled mysql && is_service_enabled nova; then - if [[ "$NOVA_USE_QUANTUM_API" = "v1" ]]; then - # Create a small network - $NOVA_DIR/bin/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS - - # Create some floating ips - $NOVA_DIR/bin/nova-manage floating create $FLOATING_RANGE - - # Create a second pool - $NOVA_DIR/bin/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL - elif [[ "$NOVA_USE_QUANTUM_API" = "v2" ]]; then - TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1) - - # Create a small network - # Since quantum command is executed in admin context at this point, - # --tenant_id needs to be specified. - NET_ID=$(quantum net-create --tenant_id $TENANT_ID net1 | grep ' id ' | get_field 2) - quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE - fi +if is_service_enabled q-svc; then + TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1) + + # Create a small network + # Since quantum command is executed in admin context at this point, + # --tenant_id needs to be specified. + NET_ID=$(quantum net-create --tenant_id $TENANT_ID net1 | grep ' id ' | get_field 2) + quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE +elif is_service_enabled mysql && is_service_enabled nova; then + # Create a small network + $NOVA_DIR/bin/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS + + # Create some floating ips + $NOVA_DIR/bin/nova-manage floating create $FLOATING_RANGE + + # Create a second pool + $NOVA_DIR/bin/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL fi # Launching nova-compute should be as simple as running ``nova-compute`` but diff --git a/stackrc b/stackrc index bd4fe14c..c906f951 100644 --- a/stackrc +++ b/stackrc @@ -91,14 +91,6 @@ QUANTUM_CLIENT_BRANCH=master TEMPEST_REPO=${GIT_BASE}/openstack/tempest.git TEMPEST_BRANCH=master -# melange service -MELANGE_REPO=${GIT_BASE}/openstack/melange.git -MELANGE_BRANCH=master - -# python melange client library -MELANGECLIENT_REPO=${GIT_BASE}/openstack/python-melangeclient.git -MELANGECLIENT_BRANCH=master - # Nova hypervisor configuration. We default to libvirt with **kvm** but will # drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can # also install an **LXC** or **OpenVZ** based system. From 84394b9f8f0894eb174b3d002ae833c33aa37fab Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Mon, 6 Aug 2012 10:07:43 +0000 Subject: [PATCH 630/967] Use keystoneauth middleware from swift. - We have moved swift to keystone middleware to swift use it. - Should be referenced now as keystoneauth. - Make swift-init silently pass in unstack even when swift was not yet installed. Change-Id: Ibf9443608b7aa81be5f48f555e95ff5f4c8065bd --- stack.sh | 29 +++++++++++++---------------- unstack.sh | 2 +- 2 files changed, 14 insertions(+), 17 deletions(-) diff --git a/stack.sh b/stack.sh index 6b8362c6..76b37829 100755 --- a/stack.sh +++ b/stack.sh @@ -1510,7 +1510,7 @@ if is_service_enabled swift; then # which has some default username and password if you have # configured keystone it will checkout the directory. if is_service_enabled key; then - swift_auth_server+="authtoken keystone" + swift_auth_server+="authtoken keystoneauth" else swift_auth_server=tempauth fi @@ -1540,23 +1540,20 @@ if is_service_enabled swift; then iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true - cat <>${SWIFT_CONFIG_PROXY_SERVER} + # Configure Keystone + sed -i '/^# \[filter:authtoken\]/,/^# \[filter:keystoneauth\]$/ s/^#[ \t]*//' ${SWIFT_CONFIG_PROXY_SERVER} + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_host $KEYSTONE_AUTH_HOST + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_port $KEYSTONE_AUTH_PORT + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_user swift + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_password $SERVICE_PASSWORD -[filter:keystone] -paste.filter_factory = keystone.middleware.swift_auth:filter_factory -operator_roles = Member,admin + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth use + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles "Member, admin" -[filter:authtoken] -paste.filter_factory = keystone.middleware.auth_token:filter_factory -auth_host = ${KEYSTONE_AUTH_HOST} -auth_port = ${KEYSTONE_AUTH_PORT} -auth_protocol = ${KEYSTONE_AUTH_PROTOCOL} -auth_uri = ${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}:${KEYSTONE_SERVICE_PORT}/ -admin_tenant_name = ${SERVICE_TENANT_NAME} -admin_user = swift -admin_password = ${SERVICE_PASSWORD} -delay_auth_decision = 1 -EOF if is_service_enabled swift3;then cat <>${SWIFT_CONFIG_PROXY_SERVER} # NOTE(chmou): s3token middleware is not updated yet to use only diff --git a/unstack.sh b/unstack.sh index 64de9150..17752a8b 100755 --- a/unstack.sh +++ b/unstack.sh @@ -37,7 +37,7 @@ fi # Swift runs daemons if is_service_enabled swift; then - swift-init all stop + swift-init all stop 2>/dev/null || true fi # Apache has the WSGI processes From 7b0f002b7cf38b261e645ebb0adcbd0679a213d7 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 10 Aug 2012 22:31:19 +0000 Subject: [PATCH 631/967] Turn off caching of the token by default Change-Id: I176f4595370b37e7928a96dd89629830aeae6dae --- openrc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/openrc b/openrc index 4430e829..08ef98be 100644 --- a/openrc +++ b/openrc @@ -41,6 +41,10 @@ export OS_USERNAME=${OS_USERNAME:-demo} # or NOVA_PASSWORD. export OS_PASSWORD=${ADMIN_PASSWORD:-secrete} +# Don't put the key into a keyring by default. Testing for development is much +# easier with this off. +export OS_NO_CACHE=${OS_NO_CACHE:-1} + # Set api HOST_IP endpoint. SERVICE_HOST may also be used to specify the endpoint, # which is convenient for some localrc configurations. HOST_IP=${HOST_IP:-127.0.0.1} From c29e3df0f1267f612b112e0bd23da6facafd79d6 Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Sun, 12 Aug 2012 15:25:17 +1200 Subject: [PATCH 632/967] Fix for glance image-create of *.qcow2 and *.img files Currently *.qcow2 and *.img files in IMAGE_URLS are piped into glance using zcat, which fails and leaves the image in a perpetual SAVING state. This change makes only *.img.gz files use zcat. Change-Id: I6e02ccff93a42bbc149d8f1058bba7825c910e05 --- stack.sh | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index ccbbc6b8..3221a6c4 100755 --- a/stack.sh +++ b/stack.sh @@ -2177,6 +2177,8 @@ if is_service_enabled g-reg; then RAMDISK="" DISK_FORMAT="" CONTAINER_FORMAT="" + UNPACK="" + case "$IMAGE_FNAME" in *.tar.gz|*.tgz) # Extract ami and aki files @@ -2208,6 +2210,7 @@ if is_service_enabled g-reg; then IMAGE_NAME=$(basename "$IMAGE" ".img.gz") DISK_FORMAT=raw CONTAINER_FORMAT=bare + UNPACK=zcat ;; *.qcow2) IMAGE="$FILES/${IMAGE_FNAME}" @@ -2219,7 +2222,11 @@ if is_service_enabled g-reg; then esac if [ "$CONTAINER_FORMAT" = "bare" ]; then - glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < <(zcat --force "${IMAGE}") + if [ "$UNPACK" = "zcat" ]; then + glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < <(zcat --force "${IMAGE}") + else + glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < ${IMAGE} + fi else # Use glance client to add the kernel the root filesystem. # We parse the results of the first upload to get the glance ID of the From 8227a7757295d98e83b272e09b071a6a75b8f446 Mon Sep 17 00:00:00 2001 From: Bob Kukura Date: Mon, 13 Aug 2012 02:29:54 -0400 Subject: [PATCH 633/967] Update stack.sh for Quantum linuxbridge plugin changes. With support for multiple physical networks being added to the Quantum linuxbridge plugin via https://review.openstack.org/#/c/10938/, the configuration of physical interfaces for the agent has changed. The physical_interface_mappings variable, a list of mappings between physical network names and physical interfaces, replaces the physical_interface variable. Also, all remnants of the V1 quantum API have been removed, so the target_v2_api variable no longer needs to be set. Change-Id: I5f9be772f71b5ce3fd92eb258e0131705d341647 --- stack.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index ccbbc6b8..b091fe3c 100755 --- a/stack.sh +++ b/stack.sh @@ -1133,7 +1133,6 @@ if is_service_enabled quantum; then sudo sed -i -e "s/.*enable_tunneling = .*$/enable_tunneling = $OVS_ENABLE_TUNNELING/g" /$Q_PLUGIN_CONF_FILE fi - iniset /$Q_PLUGIN_CONF_FILE AGENT target_v2_api True Q_CONF_FILE=/etc/quantum/quantum.conf cp $QUANTUM_DIR/etc/quantum.conf $Q_CONF_FILE fi @@ -1184,7 +1183,7 @@ if is_service_enabled q-agt; then # Start up the quantum <-> linuxbridge agent # set the default network interface QUANTUM_LB_PRIVATE_INTERFACE=${QUANTUM_LB_PRIVATE_INTERFACE:-$GUEST_INTERFACE_DEFAULT} - sudo sed -i -e "s/^physical_interface = .*$/physical_interface = $QUANTUM_LB_PRIVATE_INTERFACE/g" /$Q_PLUGIN_CONF_FILE + iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE physical_interface_mappings default:$QUANTUM_LB_PRIVATE_INTERFACE AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/linuxbridge/agent/linuxbridge_quantum_agent.py" fi fi From 2053753baf197bd1f7f51b361eda24910b3f7c94 Mon Sep 17 00:00:00 2001 From: Mark McClain Date: Mon, 13 Aug 2012 11:34:01 -0400 Subject: [PATCH 634/967] Update stack.sh to pass quantum.conf to DHCP agent The DHCP agent now utiliizes the the main Quantum configuration file to get RPC information. Change-Id: Ia42350d7d18ff25a77712a43ff20bc4669174380 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index ccbbc6b8..7c709cb3 100755 --- a/stack.sh +++ b/stack.sh @@ -1234,7 +1234,7 @@ screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --con screen_it q-agt "sudo python $AGENT_BINARY --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE" # Start up the quantum agent -screen_it q-dhcp "sudo python $AGENT_DHCP_BINARY --config-file=$Q_DHCP_CONF_FILE" +screen_it q-dhcp "sudo python $AGENT_DHCP_BINARY --config-file $Q_CONF_FILE --config-file=$Q_DHCP_CONF_FILE" # Nova # ---- From 93361643c6f06319fd3b6b7bb8aff48eb0473edd Mon Sep 17 00:00:00 2001 From: "John H. Tran" Date: Thu, 26 Jul 2012 11:22:05 -0700 Subject: [PATCH 635/967] Add ceilometer Implements bug 1023972. Add mongodb and pymongo installation. Change-Id: I631dc8a37f3269e55112ec5a5f375d3a2fd3a604 --- files/apts/ceilometer-collector | 2 ++ files/rpms/ceilometer-collector | 2 ++ functions | 5 +++ lib/ceilometer | 60 +++++++++++++++++++++++++++++++++ stack.sh | 8 +++++ stackrc | 4 +++ 6 files changed, 81 insertions(+) create mode 100644 files/apts/ceilometer-collector create mode 100644 files/rpms/ceilometer-collector create mode 100644 lib/ceilometer diff --git a/files/apts/ceilometer-collector b/files/apts/ceilometer-collector new file mode 100644 index 00000000..c67ade3c --- /dev/null +++ b/files/apts/ceilometer-collector @@ -0,0 +1,2 @@ +python-pymongo +mongodb-server diff --git a/files/rpms/ceilometer-collector b/files/rpms/ceilometer-collector new file mode 100644 index 00000000..c5c855c0 --- /dev/null +++ b/files/rpms/ceilometer-collector @@ -0,0 +1,2 @@ +mongodb-server +pymongo diff --git a/functions b/functions index 7a7406d4..cd062331 100644 --- a/functions +++ b/functions @@ -129,6 +129,10 @@ function get_packages() { if [[ ! $file_to_parse =~ cinder ]]; then file_to_parse="${file_to_parse} cinder" fi + elif [[ $service == ceilometer-* ]]; then + if [[ ! $file_to_parse =~ ceilometer ]]; then + file_to_parse="${file_to_parse} ceilometer" + fi elif [[ $service == n-* ]]; then if [[ ! $file_to_parse =~ nova ]]; then file_to_parse="${file_to_parse} nova" @@ -375,6 +379,7 @@ function is_service_enabled() { [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && return 0 [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && return 0 [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && return 0 + [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && return 0 [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0 [[ ${service} == "quantum" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0 done diff --git a/lib/ceilometer b/lib/ceilometer new file mode 100644 index 00000000..069e5395 --- /dev/null +++ b/lib/ceilometer @@ -0,0 +1,60 @@ +# lib/ceilometer +# Install and start Ceilometer service + +# Dependencies: +# - functions + +# stack.sh +# --------- +# install_XXX +# configure_XXX +# init_XXX +# start_XXX +# stop_XXX +# cleanup_XXX + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following along as the install occurs. +set -o xtrace + + +# Defaults +# -------- + +# set up default directories +CEILOMETER_DIR=$DEST/ceilometer +CEILOMETER_CONF_DIR=/etc/ceilometer +CEILOMETER_AGENT_CONF=$CEILOMETER_CONF_DIR/ceilometer-agent.conf +CEILOMETER_COLLECTOR_CONF=$CEILOMETER_CONF_DIR/ceilometer-collector.conf + +# cleanup_ceilometer() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_ceilometer() { + # This function intentionally left blank + : +} + +# configure_ceilometer() - Set config files, create data dirs, etc +function configure_ceilometer() { + setup_develop $CEILOMETER_DIR + if [ ! -d $CEILOMETER_CONF_DIR ]; then + sudo mkdir -m 755 -p $CEILOMETER_CONF_DIR + fi + sudo chown `whoami` $CEILOMETER_CONF_DIR + + # ceilometer confs are copy of /etc/nova/nova.conf which must exist first + grep -v format_string $NOVA_CONF_DIR/$NOVA_CONF > $CEILOMETER_AGENT_CONF + grep -v format_string $NOVA_CONF_DIR/$NOVA_CONF > $CEILOMETER_COLLECTOR_CONF +} + +# install_ceilometer() - Collect source and prepare +function install_ceilometer() { + git_clone $CEILOMETER_REPO $CEILOMETER_DIR $CEILOMETER_BRANCH +} + +# start_ceilometer() - Start running processes, including screen +function start_ceilometer() { + screen_it ceilometer-acompute "cd $CEILOMETER_DIR && $CEILOMETER_DIR/bin/ceilometer-agent-compute --config-file $CEILOMETER_AGENT_CONF" + screen_it ceilometer-acentral "cd $CEILOMETER_DIR && $CEILOMETER_DIR/bin/ceilometer-agent-central --config-file $CEILOMETER_AGENT_CONF" + screen_it ceilometer-collector "cd $CEILOMETER_DIR && $CEILOMETER_DIR/bin/ceilometer-collector --config-file $CEILOMETER_COLLECTOR_CONF" +} diff --git a/stack.sh b/stack.sh index ccbbc6b8..1619dfcf 100755 --- a/stack.sh +++ b/stack.sh @@ -240,6 +240,7 @@ sudo chown `whoami` $DATA_DIR # Get project function libraries source $TOP_DIR/lib/cinder +source $TOP_DIR/lib/ceilometer # Set the destination directories for openstack projects NOVA_DIR=$DEST/nova @@ -789,6 +790,9 @@ fi if is_service_enabled cinder; then install_cinder fi +if is_service_enabled ceilometer; then + install_ceilometer +fi # Initialization # ============== @@ -2122,6 +2126,10 @@ screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth" if is_service_enabled cinder; then start_cinder fi +if is_service_enabled ceilometer; then + configure_ceilometer + start_ceilometer +fi screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/$APACHE_NAME/horizon_error.log" screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_DIR}/proxy-server.conf -v" diff --git a/stackrc b/stackrc index c906f951..cd70284d 100644 --- a/stackrc +++ b/stackrc @@ -24,6 +24,10 @@ NOVA_ENABLED_APIS=ec2,osapi_compute,osapi_volume,metadata # Another option is http://review.openstack.org/p GIT_BASE=https://github.com +# metering service +CEILOMETER_REPO=https://github.com/stackforge/ceilometer.git +CEILOMETER_BRANCH=master + # volume service CINDER_REPO=${GIT_BASE}/openstack/cinder CINDER_BRANCH=master From 5a4039a4166c1d869dd1f7bd363258a2cce3334c Mon Sep 17 00:00:00 2001 From: Renuka Apte Date: Thu, 9 Aug 2012 17:45:17 -0700 Subject: [PATCH 636/967] Fix nova volume install for devstack This changes does mkdir -p /etc/tgt/conf.d to avoid the following error, when nova volume is enabled (perhaps limited to xenapi) tee: /etc/tgt/conf.d/nova.conf: No such file or directory Change-Id: Ia5804c7ff50fbf1bc41e14ad13923c9261b000e0 --- stack.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/stack.sh b/stack.sh index b091fe3c..c7d83f55 100755 --- a/stack.sh +++ b/stack.sh @@ -1746,6 +1746,7 @@ elif is_service_enabled n-vol; then # Setup the tgt configuration file if [[ ! -f /etc/tgt/conf.d/nova.conf ]]; then + sudo mkdir -p /etc/tgt/conf.d echo "include $NOVA_DIR/volumes/*" | sudo tee /etc/tgt/conf.d/nova.conf fi From 2700492c349c32a4e3a93a327231256c8db50ec5 Mon Sep 17 00:00:00 2001 From: Devananda van der Veen Date: Wed, 15 Aug 2012 16:38:29 -0700 Subject: [PATCH 637/967] Fix tools/configure_tempest FLAVOR_LIST Correct the grep expression in tools/configure_tempest.sh which generates the FLAVORS that tempest will boot. Fixes bug 1037347 Change-Id: I50352fd23ca1e5332592bb343821c6355b274e9f --- tools/configure_tempest.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index 5be709aa..d5022487 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -139,9 +139,9 @@ IFS="$(echo -e "\n\r")" FLAVORS="" for line in $FLAVOR_LINES; do if [ -z $DEFAULT_INSTANCE_TYPE ]; then - FLAVORS="$FLAVORS `echo $line | grep -v "^\(ID\|+--\)" | cut -d' ' -f2`" + FLAVORS="$FLAVORS `echo $line | grep -v "^\(|\s*ID\|+--\)" | cut -d' ' -f2`" else - FLAVORS="$FLAVORS `echo $line | grep -v "^\(ID\|+--\)" | grep "$DEFAULT_INSTANCE_TYPE" | cut -d' ' -f2`" + FLAVORS="$FLAVORS `echo $line | grep -v "^\(|\s*ID\|+--\)" | grep "$DEFAULT_INSTANCE_TYPE" | cut -d' ' -f2`" fi done IFS=" " From 1057bffa37cd2cbda088f747cd81122b42ff22a6 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Fri, 3 Aug 2012 11:42:51 +0000 Subject: [PATCH 638/967] Add log colouring to cinder. - based on vish's log coulouring version for nova. Change-Id: I9d4251b4292188c0174ebac1dcd98318df44c0e3 --- lib/cinder | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/cinder b/lib/cinder index adcc52c6..796c1071 100644 --- a/lib/cinder +++ b/lib/cinder @@ -114,6 +114,13 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT rabbit_password $RABBIT_PASSWORD fi + if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then + # Add color to logging output + iniset $CINDER_CONF DEFAULT logging_context_format_string "%(asctime)s %(color)s%(levelname)s %(name)s [%(request_id)s %(user_id)s %(project_id)s%(color)s] %(instance)s%(color)s%(message)s" + iniset $CINDER_CONF DEFAULT logging_default_format_string "%(asctime)s %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" + iniset $CINDER_CONF DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" + iniset $CINDER_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s TRACE %(name)s %(instance)s" + fi } # init_cinder() - Initialize database and volume group From ca0e3d0230879fbad55472f742d3166a5579208c Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 13 Apr 2012 15:58:37 -0500 Subject: [PATCH 639/967] Add tools/upload_image.sh * moves the image upload logic from stack.sh to functions upload_image() * tools/upload_image.sh which is a thin wrapper around upload_image() Change-Id: I8746beebf50cf623b6fe903d6497e66e3fa5dda6 --- functions | 101 +++++++++++++++++++++++++++++++++++++++++- stack.sh | 92 ++------------------------------------ tools/upload_image.sh | 42 ++++++++++++++++++ 3 files changed, 146 insertions(+), 89 deletions(-) create mode 100755 tools/upload_image.sh diff --git a/functions b/functions index f61aed5c..386af090 100644 --- a/functions +++ b/functions @@ -419,7 +419,7 @@ function is_service_enabled() { # remove extra commas from the input string (ENABLED_SERVICES) function _cleanup_service_list () { - echo "$1" | sed -e ' + echo "$1" | sed -e ' s/,,/,/g; s/^,//; s/,$// @@ -618,6 +618,105 @@ function trueorfalse() { } +# Retrieve an image from a URL and upload into Glance +# Uses the following variables: +# **FILES** must be set to the cache dir +# **GLANCE_HOSTPORT** +# upload_image image-url glance-token +function upload_image() { + local image_url=$1 + local token=$2 + + # Create a directory for the downloaded image tarballs. + mkdir -p $FILES/images + + # Downloads the image (uec ami+aki style), then extracts it. + IMAGE_FNAME=`basename "$image_url"` + if [[ ! -f $FILES/$IMAGE_FNAME || "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then + wget -c $image_url -O $FILES/$IMAGE_FNAME + if [[ $? -ne 0 ]]; then + echo "Not found: $image_url" + return + fi + fi + + # OpenVZ-format images are provided as .tar.gz, but not decompressed prior to loading + if [[ "$image_url" =~ 'openvz' ]]; then + IMAGE="$FILES/${IMAGE_FNAME}" + IMAGE_NAME="${IMAGE_FNAME%.tar.gz}" + glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format ami --disk-format ami < "${IMAGE}" + return + fi + + KERNEL="" + RAMDISK="" + DISK_FORMAT="" + CONTAINER_FORMAT="" + UNPACK="" + case "$IMAGE_FNAME" in + *.tar.gz|*.tgz) + # Extract ami and aki files + [ "${IMAGE_FNAME%.tar.gz}" != "$IMAGE_FNAME" ] && + IMAGE_NAME="${IMAGE_FNAME%.tar.gz}" || + IMAGE_NAME="${IMAGE_FNAME%.tgz}" + xdir="$FILES/images/$IMAGE_NAME" + rm -Rf "$xdir"; + mkdir "$xdir" + tar -zxf $FILES/$IMAGE_FNAME -C "$xdir" + KERNEL=$(for f in "$xdir/"*-vmlinuz* "$xdir/"aki-*/image; do + [ -f "$f" ] && echo "$f" && break; done; true) + RAMDISK=$(for f in "$xdir/"*-initrd* "$xdir/"ari-*/image; do + [ -f "$f" ] && echo "$f" && break; done; true) + IMAGE=$(for f in "$xdir/"*.img "$xdir/"ami-*/image; do + [ -f "$f" ] && echo "$f" && break; done; true) + if [[ -z "$IMAGE_NAME" ]]; then + IMAGE_NAME=$(basename "$IMAGE" ".img") + fi + ;; + *.img) + IMAGE="$FILES/$IMAGE_FNAME"; + IMAGE_NAME=$(basename "$IMAGE" ".img") + DISK_FORMAT=raw + CONTAINER_FORMAT=bare + ;; + *.img.gz) + IMAGE="$FILES/${IMAGE_FNAME}" + IMAGE_NAME=$(basename "$IMAGE" ".img.gz") + DISK_FORMAT=raw + CONTAINER_FORMAT=bare + UNPACK=zcat + ;; + *.qcow2) + IMAGE="$FILES/${IMAGE_FNAME}" + IMAGE_NAME=$(basename "$IMAGE" ".qcow2") + DISK_FORMAT=qcow2 + CONTAINER_FORMAT=bare + ;; + *) echo "Do not know what to do with $IMAGE_FNAME"; false;; + esac + + if [ "$CONTAINER_FORMAT" = "bare" ]; then + if [ "$UNPACK" = "zcat" ]; then + glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --public --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < <(zcat --force "${IMAGE}") + else + glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --public --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < "${IMAGE}" + fi + else + # Use glance client to add the kernel the root filesystem. + # We parse the results of the first upload to get the glance ID of the + # kernel for use when uploading the root filesystem. + KERNEL_ID=""; RAMDISK_ID=""; + if [ -n "$KERNEL" ]; then + KERNEL_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-kernel" --public --container-format aki --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2) + fi + if [ -n "$RAMDISK" ]; then + RAMDISK_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-ramdisk" --public --container-format ari --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2) + fi + glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "${IMAGE_NAME%.img}" --public --container-format ami --disk-format ami ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}" + fi +} + + # yum wrapper to set arguments correctly # yum_install package [package ...] function yum_install() { diff --git a/stack.sh b/stack.sh index 2eef0c66..70685b06 100755 --- a/stack.sh +++ b/stack.sh @@ -2141,21 +2141,17 @@ is_service_enabled swift3 || \ # Upload an image to glance. # -# The default image is a small ***TTY*** testing image, which lets you login -# the username/password of root/password. +# The default image is cirros, a small testing image, which lets you login as root # -# TTY also uses ``cloud-init``, supporting login via keypair and sending scripts as +# cirros also uses ``cloud-init``, supporting login via keypair and sending scripts as # userdata. See https://help.ubuntu.com/community/CloudInit for more on cloud-init # # Override ``IMAGE_URLS`` with a comma-separated list of uec images. # -# * **natty**: http://uec-images.ubuntu.com/natty/current/natty-server-cloudimg-amd64.tar.gz # * **oneiric**: http://uec-images.ubuntu.com/oneiric/current/oneiric-server-cloudimg-amd64.tar.gz +# * **precise**: http://uec-images.ubuntu.com/precise/current/precise-server-cloudimg-amd64.tar.gz if is_service_enabled g-reg; then - # Create a directory for the downloaded image tarballs. - mkdir -p $FILES/images - TOKEN=$(keystone token-get | grep ' id ' | get_field 2) # Option to upload legacy ami-tty, which works with xenserver @@ -2164,87 +2160,7 @@ if is_service_enabled g-reg; then fi for image_url in ${IMAGE_URLS//,/ }; do - # Downloads the image (uec ami+aki style), then extracts it. - IMAGE_FNAME=`basename "$image_url"` - if [[ ! -f $FILES/$IMAGE_FNAME || "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then - wget -c $image_url -O $FILES/$IMAGE_FNAME - fi - - # OpenVZ-format images are provided as .tar.gz, but not decompressed prior to loading - if [[ "$image_url" =~ 'openvz' ]]; then - IMAGE="$FILES/${IMAGE_FNAME}" - IMAGE_NAME="${IMAGE_FNAME%.tar.gz}" - glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --public --container-format ami --disk-format ami < "$IMAGE" - continue - fi - - KERNEL="" - RAMDISK="" - DISK_FORMAT="" - CONTAINER_FORMAT="" - UNPACK="" - - case "$IMAGE_FNAME" in - *.tar.gz|*.tgz) - # Extract ami and aki files - [ "${IMAGE_FNAME%.tar.gz}" != "$IMAGE_FNAME" ] && - IMAGE_NAME="${IMAGE_FNAME%.tar.gz}" || - IMAGE_NAME="${IMAGE_FNAME%.tgz}" - xdir="$FILES/images/$IMAGE_NAME" - rm -Rf "$xdir"; - mkdir "$xdir" - tar -zxf $FILES/$IMAGE_FNAME -C "$xdir" - KERNEL=$(for f in "$xdir/"*-vmlinuz* "$xdir/"aki-*/image; do - [ -f "$f" ] && echo "$f" && break; done; true) - RAMDISK=$(for f in "$xdir/"*-initrd* "$xdir/"ari-*/image; do - [ -f "$f" ] && echo "$f" && break; done; true) - IMAGE=$(for f in "$xdir/"*.img "$xdir/"ami-*/image; do - [ -f "$f" ] && echo "$f" && break; done; true) - if [[ -z "$IMAGE_NAME" ]]; then - IMAGE_NAME=$(basename "$IMAGE" ".img") - fi - ;; - *.img) - IMAGE="$FILES/$IMAGE_FNAME"; - IMAGE_NAME=$(basename "$IMAGE" ".img") - DISK_FORMAT=raw - CONTAINER_FORMAT=bare - ;; - *.img.gz) - IMAGE="$FILES/${IMAGE_FNAME}" - IMAGE_NAME=$(basename "$IMAGE" ".img.gz") - DISK_FORMAT=raw - CONTAINER_FORMAT=bare - UNPACK=zcat - ;; - *.qcow2) - IMAGE="$FILES/${IMAGE_FNAME}" - IMAGE_NAME=$(basename "$IMAGE" ".qcow2") - DISK_FORMAT=qcow2 - CONTAINER_FORMAT=bare - ;; - *) echo "Do not know what to do with $IMAGE_FNAME"; false;; - esac - - if [ "$CONTAINER_FORMAT" = "bare" ]; then - if [ "$UNPACK" = "zcat" ]; then - glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < <(zcat --force "${IMAGE}") - else - glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < ${IMAGE} - fi - else - # Use glance client to add the kernel the root filesystem. - # We parse the results of the first upload to get the glance ID of the - # kernel for use when uploading the root filesystem. - KERNEL_ID=""; RAMDISK_ID=""; - if [ -n "$KERNEL" ]; then - KERNEL_ID=$(glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-kernel" --is-public=True --container-format aki --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2) - fi - if [ -n "$RAMDISK" ]; then - RAMDISK_ID=$(glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-ramdisk" --is-public=True --container-format ari --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2) - fi - glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "${IMAGE_NAME%.img}" --is-public=True --container-format ami --disk-format ami ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}" - fi + upload_image $image_url $TOKEN done fi diff --git a/tools/upload_image.sh b/tools/upload_image.sh new file mode 100755 index 00000000..dd21c9f2 --- /dev/null +++ b/tools/upload_image.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +# upload_image.sh - Retrieve and upload an image into Glance +# +# upload_image.sh +# +# Assumes credentials are set via OS_* environment variables + +function usage { + echo "$0 - Retrieve and upload an image into Glance" + echo "" + echo "Usage: $0 [...]" + echo "" + echo "Assumes credentials are set via OS_* environment variables" + exit 1 +} + +# Keep track of the current directory +TOOLS_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $TOOLS_DIR/..; pwd) + +# Import common functions +source $TOP_DIR/functions + +# Import configuration +source $TOP_DIR/openrc "" "" "" "" + +# Find the cache dir +FILES=$TOP_DIR/files + +if [[ -z "$1" ]]; then + usage +fi + +# Get a token to authenticate to glance +TOKEN=$(keystone token-get | grep ' id ' | get_field 2) + +# Glance connection info. Note the port must be specified. +GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$GLANCE_HOST:9292} + +for IMAGE in "$*"; do + upload_image $IMAGE $TOKEN +done From 9fbeeddc3b6ea28c06d5468b0bc28966d4fbc6ce Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 17 Aug 2012 12:52:27 -0400 Subject: [PATCH 640/967] Support the possibility of using entry-points. If console scripts are generated via entry-points, they will go into /usr/local/bin on python setup.py develop and they will not be found in /opt/stack/$PROJECT/bin any more. This patch supports a transition to entry-points console scripts, but should still allow the old thing too. Change-Id: I816f5f796ad00ac55a8352743ba01723df140072 --- exercises/quantum.sh | 5 ++++- lib/ceilometer | 12 +++++++++--- lib/cinder | 13 +++++++++---- stack.sh | 29 ++++++++++++++++++----------- 4 files changed, 40 insertions(+), 19 deletions(-) diff --git a/exercises/quantum.sh b/exercises/quantum.sh index e19a78e8..33ae14ea 100755 --- a/exercises/quantum.sh +++ b/exercises/quantum.sh @@ -75,7 +75,10 @@ OVS_HOSTS=${DEFAULT_OVS_HOSTS:-"localhost"} #------------------------------------------------------------------------------ # Nova settings. #------------------------------------------------------------------------------ -NOVA_MANAGE=/opt/stack/nova/bin/nova-manage +if [ -f /opt/stack/nova/bin/nova-manage ] ; then + NOVA_MANAGE=/opt/stack/nova/bin/nova-manage +else + NOVA_MANAGE=/usr/local/bin/nova-manage NOVA=/usr/local/bin/nova NOVA_CONF=/etc/nova/nova.conf diff --git a/lib/ceilometer b/lib/ceilometer index 069e5395..4c3bb52a 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -23,6 +23,12 @@ set -o xtrace # set up default directories CEILOMETER_DIR=$DEST/ceilometer +# Support potential entry-points console scripts +if [ -d $CEILOMETER_DIR/bin ] ; then + CEILOMETER_BIN_DIR=$CEILOMETER_DIR/bin +else + CEILOMETER_BIN_DIR=/usr/local/bin +fi CEILOMETER_CONF_DIR=/etc/ceilometer CEILOMETER_AGENT_CONF=$CEILOMETER_CONF_DIR/ceilometer-agent.conf CEILOMETER_COLLECTOR_CONF=$CEILOMETER_CONF_DIR/ceilometer-collector.conf @@ -54,7 +60,7 @@ function install_ceilometer() { # start_ceilometer() - Start running processes, including screen function start_ceilometer() { - screen_it ceilometer-acompute "cd $CEILOMETER_DIR && $CEILOMETER_DIR/bin/ceilometer-agent-compute --config-file $CEILOMETER_AGENT_CONF" - screen_it ceilometer-acentral "cd $CEILOMETER_DIR && $CEILOMETER_DIR/bin/ceilometer-agent-central --config-file $CEILOMETER_AGENT_CONF" - screen_it ceilometer-collector "cd $CEILOMETER_DIR && $CEILOMETER_DIR/bin/ceilometer-collector --config-file $CEILOMETER_COLLECTOR_CONF" + screen_it ceilometer-acompute "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_AGENT_CONF" + screen_it ceilometer-acentral "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-agent-central --config-file $CEILOMETER_AGENT_CONF" + screen_it ceilometer-collector "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-collector --config-file $CEILOMETER_COLLECTOR_CONF" } diff --git a/lib/cinder b/lib/cinder index 796c1071..525b6c6e 100644 --- a/lib/cinder +++ b/lib/cinder @@ -25,6 +25,11 @@ set -o xtrace # set up default directories CINDER_DIR=$DEST/cinder +if [ -d $CINDER_DIR/bin ] ; then + CINDER_BIN_DIR=$CINDER_DIR/bin +else + CINDER_BIN_DIR=/usr/local/bin +fi CINDERCLIENT_DIR=$DEST/python-cinderclient CINDER_CONF_DIR=/etc/cinder CINDER_CONF=$CINDER_CONF_DIR/cinder.conf @@ -134,7 +139,7 @@ function init_cinder() { mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE cinder;' # (re)create cinder database - $CINDER_DIR/bin/cinder-manage db sync + $CINDER_BIN_DIR/cinder-manage db sync fi if is_service_enabled c-vol; then @@ -198,9 +203,9 @@ function start_cinder() { fi fi - screen_it c-api "cd $CINDER_DIR && $CINDER_DIR/bin/cinder-api --config-file $CINDER_CONF" - screen_it c-vol "cd $CINDER_DIR && $CINDER_DIR/bin/cinder-volume --config-file $CINDER_CONF" - screen_it c-sch "cd $CINDER_DIR && $CINDER_DIR/bin/cinder-scheduler --config-file $CINDER_CONF" + screen_it c-api "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-api --config-file $CINDER_CONF" + screen_it c-vol "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF" + screen_it c-sch "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-scheduler --config-file $CINDER_CONF" } # stop_cinder() - Stop running processes (non-screen) diff --git a/stack.sh b/stack.sh index 2eef0c66..50cefe8d 100755 --- a/stack.sh +++ b/stack.sh @@ -1760,6 +1760,13 @@ elif is_service_enabled n-vol; then fi fi +# Support entry points installation of console scripts +if [ -d $NOVA_DIR/bin ] ; then + NOVA_BIN_DIR=$NOVA_DIR/bin +else + NOVA_BIN_DIR=/usr/local/bin +fi + NOVA_CONF=nova.conf function add_nova_opt { echo "$1" >> $NOVA_CONF_DIR/$NOVA_CONF @@ -1935,7 +1942,7 @@ if is_service_enabled mysql && is_service_enabled nova; then mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE nova CHARACTER SET latin1;' # (re)create nova database - $NOVA_DIR/bin/nova-manage db sync + $NOVA_BIN_DIR/nova-manage db sync fi @@ -2077,7 +2084,7 @@ fi # Launch the nova-api and wait for it to answer before continuing if is_service_enabled n-api; then add_nova_opt "enabled_apis=$NOVA_ENABLED_APIS" - screen_it n-api "cd $NOVA_DIR && $NOVA_DIR/bin/nova-api" + screen_it n-api "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api" echo "Waiting for nova-api to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:8774; do sleep 1; done"; then echo "nova-api did not start" @@ -2097,13 +2104,13 @@ if is_service_enabled q-svc; then quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE elif is_service_enabled mysql && is_service_enabled nova; then # Create a small network - $NOVA_DIR/bin/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS + $NOVA_BIN_DIR/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS # Create some floating ips - $NOVA_DIR/bin/nova-manage floating create $FLOATING_RANGE + $NOVA_BIN_DIR/nova-manage floating create $FLOATING_RANGE # Create a second pool - $NOVA_DIR/bin/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL + $NOVA_BIN_DIR/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL fi # Launching nova-compute should be as simple as running ``nova-compute`` but @@ -2112,11 +2119,11 @@ fi # within the context of our original shell (so our groups won't be updated). # Use 'sg' to execute nova-compute as a member of the libvirtd group. # We don't check for is_service_enable as screen_it does it for us -screen_it n-cpu "cd $NOVA_DIR && sg libvirtd $NOVA_DIR/bin/nova-compute" -screen_it n-crt "cd $NOVA_DIR && $NOVA_DIR/bin/nova-cert" -screen_it n-vol "cd $NOVA_DIR && $NOVA_DIR/bin/nova-volume" -screen_it n-net "cd $NOVA_DIR && $NOVA_DIR/bin/nova-network" -screen_it n-sch "cd $NOVA_DIR && $NOVA_DIR/bin/nova-scheduler" +screen_it n-cpu "cd $NOVA_DIR && sg libvirtd $NOVA_BIN_DIR/nova-compute" +screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert" +screen_it n-vol "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-volume" +screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network" +screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler" screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF --web ." screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF" screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth" @@ -2133,7 +2140,7 @@ screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF # Starting the nova-objectstore only if swift3 service is not enabled. # Swift will act as s3 objectstore. is_service_enabled swift3 || \ - screen_it n-obj "cd $NOVA_DIR && $NOVA_DIR/bin/nova-objectstore" + screen_it n-obj "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-objectstore" # Install Images From f724fb784504256ef50d390b9394b36b30698bf8 Mon Sep 17 00:00:00 2001 From: Matt Joyce Date: Mon, 20 Aug 2012 14:54:58 -0700 Subject: [PATCH 641/967] BUG #1039180 - misconfiguration in horizon settings leads to stack trace Change-Id: I328df7dc91bde723acb0dcd6fa0e8986255b9c50 --- AUTHORS | 1 + files/horizon_settings.py | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/AUTHORS b/AUTHORS index 4f771ce1..dde2c7be 100644 --- a/AUTHORS +++ b/AUTHORS @@ -29,6 +29,7 @@ Josh Kearney Justin Shepherd Ken Pepple Kiall Mac Innes +Matt Joyce Osamu Habuka Russell Bryant Scott Moser diff --git a/files/horizon_settings.py b/files/horizon_settings.py index 487c06ea..d18fd1a5 100644 --- a/files/horizon_settings.py +++ b/files/horizon_settings.py @@ -41,7 +41,6 @@ HORIZON_CONFIG = { 'dashboards': ('nova', 'syspanel', 'settings',), 'default_dashboard': 'nova', - 'user_home': 'openstack_dashboard.views.user_home', } # TODO(tres): Remove these once Keystone has an API to identify auth backend. From 43bedda56c8a695a7362cd0ceaa499bb58bc0020 Mon Sep 17 00:00:00 2001 From: John Griffith Date: Tue, 21 Aug 2012 15:26:15 -0600 Subject: [PATCH 642/967] Add standard extensions to cinder.conf * Set the standard extensions in the default cinder.conf file Change-Id: Ib56f38b61a7a95b313031bf60bff005b17b888c4 --- lib/cinder | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/cinder b/lib/cinder index 525b6c6e..1bad5c00 100644 --- a/lib/cinder +++ b/lib/cinder @@ -111,6 +111,7 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT sql_connection $BASE_SQL_CONN/cinder?charset=utf8 iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI iniset $CINDER_CONF DEFAULT root_helper "sudo ${CINDER_ROOTWRAP}" + iniset $CINDER_CONF DEFAULT osapi_volume_extension cinder.api.openstack.volume.contrib.standard_extensions if is_service_enabled qpid ; then iniset $CINDER_CONF DEFAULT rpc_backend cinder.openstack.common.rpc.impl_qpid From bfdad75eda98c5eec4c7a59ad7dd7ac45a5712cf Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Sat, 18 Aug 2012 09:00:42 +1200 Subject: [PATCH 643/967] Add support to optionally launch the heat service. This allows the heat service to be started as a devstack service. Heat is disabled by default, and can be enabled with this in your localrc: ENABLED_SERVICES+=,heat There is now a repo of heat-enabled images here: https://github.com/heat-api/prebuilt-jeos-images/downloads These can be added to the IMAGE_URLS in your localrc. After devstack is launched, a template can be invoked with: nova keypair-add --pub_key $HOME/.ssh/id_rsa.pub heat_key heat -d create wordpress \ --template-file=../heat/templates/WordPress_Single_Instance.template \ --parameters="InstanceType=m1.tiny;DBUsername=wpuser;DBPassword=wppassword;\ KeyName=heat_key;LinuxDistribution=F16" Change-Id: I07591295eb2b9eb7868b1577dd3c24b19812a689 --- files/default_catalog.templates | 5 + files/keystone_data.sh | 25 +++++ lib/heat | 156 ++++++++++++++++++++++++++++++++ stack.sh | 18 +++- stackrc | 4 + 5 files changed, 207 insertions(+), 1 deletion(-) create mode 100644 lib/heat diff --git a/files/default_catalog.templates b/files/default_catalog.templates index 66052b6a..ceb6458f 100644 --- a/files/default_catalog.templates +++ b/files/default_catalog.templates @@ -34,3 +34,8 @@ catalog.RegionOne.image.publicURL = http://%SERVICE_HOST%:9292 catalog.RegionOne.image.adminURL = http://%SERVICE_HOST%:9292 catalog.RegionOne.image.internalURL = http://%SERVICE_HOST%:9292 catalog.RegionOne.image.name = Image Service + +catalog.RegionOne.heat.publicURL = http://%SERVICE_HOST%:8000/v1 +catalog.RegionOne.heat.adminURL = http://%SERVICE_HOST%:8000/v1 +catalog.RegionOne.heat.internalURL = http://%SERVICE_HOST%:8000/v1 +catalog.RegionOne.heat.name = Heat Service diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 6987797a..2a8d0703 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -10,6 +10,7 @@ # service quantum admin # if enabled # service swift admin # if enabled # service cinder admin # if enabled +# service heat admin # if enabled # demo admin admin # demo demo Member, anotherrole # invisible_to_admin demo Member @@ -154,6 +155,29 @@ if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then fi fi +# Heat +if [[ "$ENABLED_SERVICES" =~ "heat" ]]; then + HEAT_USER=$(get_id keystone user-create --name=heat \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=heat@example.com) + keystone user-role-add --tenant_id $SERVICE_TENANT \ + --user_id $HEAT_USER \ + --role_id $ADMIN_ROLE + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + HEAT_SERVICE=$(get_id keystone service-create \ + --name=heat \ + --type=orchestration \ + --description="Heat Service") + keystone endpoint-create \ + --region RegionOne \ + --service_id $HEAT_SERVICE \ + --publicurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1" \ + --adminurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1" \ + --internalurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1" + fi +fi + # Glance if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then GLANCE_USER=$(get_id keystone user-create \ @@ -296,3 +320,4 @@ if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then --internalurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" fi fi + diff --git a/lib/heat b/lib/heat new file mode 100644 index 00000000..6f442f87 --- /dev/null +++ b/lib/heat @@ -0,0 +1,156 @@ +# lib/heat +# Install and start Heat service +# To enable, add the following to localrc +# ENABLED_SERVICES+=,heat,h-api,h-eng,h-meta + +# Dependencies: +# - functions + +# stack.sh +# --------- +# install_XXX +# configure_XXX +# init_XXX +# start_XXX +# stop_XXX +# cleanup_XXX + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following along as the install occurs. +set -o xtrace + + +# Defaults +# -------- +HEAT_DIR=$DEST/heat + +# set up default directories + +# cleanup_heat() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_heat() { + # This function intentionally left blank + : +} + +# configure_heat() - Set config files, create data dirs, etc +function configure_heat() { + setup_develop $HEAT_DIR + + HEAT_CONF_DIR=/etc/heat + if [[ ! -d $HEAT_CONF_DIR ]]; then + sudo mkdir -p $HEAT_CONF_DIR + fi + sudo chown `whoami` $HEAT_CONF_DIR + + HEAT_API_HOST=${HEAT_API_HOST:-$SERVICE_HOST} + HEAT_API_PORT=${HEAT_API_PORT:-8000} + HEAT_ENGINE_HOST=${HEAT_ENGINE_HOST:-$SERVICE_HOST} + HEAT_ENGINE_PORT=${HEAT_ENGINE_PORT:-8001} + HEAT_METADATA_HOST=${HEAT_METADATA_HOST:-$SERVICE_HOST} + HEAT_METADATA_PORT=${HEAT_METADATA_PORT:-8002} + + HEAT_API_CONF=$HEAT_CONF_DIR/heat-api.conf + cp $HEAT_DIR/etc/heat-api.conf $HEAT_API_CONF + iniset $HEAT_API_CONF DEFAULT debug True + inicomment $HEAT_API_CONF DEFAULT log_file + iniset $HEAT_API_CONF DEFAULT use_syslog $SYSLOG + iniset $HEAT_API_CONF DEFAULT bind_host $HEAT_API_HOST + iniset $HEAT_API_CONF DEFAULT bind_port $HEAT_API_PORT + + if is_service_enabled rabbit; then + iniset $HEAT_API_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_kombu + iniset $HEAT_API_CONF DEFAULT rabbit_password $RABBIT_PASSWORD + iniset $HEAT_API_CONF DEFAULT rabbit_host $RABBIT_HOST + elif is_service_enabled qpid; then + iniset $HEAT_API_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid + fi + + HEAT_API_PASTE_INI=$HEAT_CONF_DIR/heat-api-paste.ini + cp $HEAT_DIR/etc/heat-api-paste.ini $HEAT_API_PASTE_INI + iniset $HEAT_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $HEAT_API_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $HEAT_API_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $HEAT_API_PASTE_INI filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 + iniset $HEAT_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $HEAT_API_PASTE_INI filter:authtoken admin_user heat + iniset $HEAT_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD + iniset $HEAT_API_PASTE_INI filter:ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 + iniset $HEAT_API_PASTE_INI filter:ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens + + HEAT_ENGINE_CONF=$HEAT_CONF_DIR/heat-engine.conf + cp $HEAT_DIR/etc/heat-engine.conf $HEAT_ENGINE_CONF + iniset $HEAT_ENGINE_CONF DEFAULT debug True + inicomment $HEAT_ENGINE_CONF DEFAULT log_file + iniset $HEAT_ENGINE_CONF DEFAULT use_syslog $SYSLOG + iniset $HEAT_ENGINE_CONF DEFAULT bind_host $HEAT_ENGINE_HOST + iniset $HEAT_ENGINE_CONF DEFAULT bind_port $HEAT_ENGINE_PORT + iniset $HEAT_ENGINE_CONF DEFAULT sql_connection $BASE_SQL_CONN/heat?charset=utf8 + iniset $HEAT_ENGINE_CONF DEFAULT auth_encryption_key `hexdump -n 16 -v -e '/1 "%02x"' /dev/random` + + if is_service_enabled rabbit; then + iniset $HEAT_ENGINE_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_kombu + iniset $HEAT_ENGINE_CONF DEFAULT rabbit_password $RABBIT_PASSWORD + iniset $HEAT_ENGINE_CONF DEFAULT rabbit_host $RABBIT_HOST + elif is_service_enabled qpid; then + iniset $HEAT_ENGINE_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid + fi + + HEAT_ENGINE_PASTE_INI=$HEAT_CONF_DIR/heat-engine-paste.ini + cp $HEAT_DIR/etc/heat-engine-paste.ini $HEAT_ENGINE_PASTE_INI + iniset $HEAT_ENGINE_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $HEAT_ENGINE_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $HEAT_ENGINE_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $HEAT_ENGINE_PASTE_INI filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 + iniset $HEAT_ENGINE_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $HEAT_ENGINE_PASTE_INI filter:authtoken admin_user heat + iniset $HEAT_ENGINE_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD + + HEAT_METADATA_CONF=$HEAT_CONF_DIR/heat-metadata.conf + cp $HEAT_DIR/etc/heat-metadata.conf $HEAT_METADATA_CONF + iniset $HEAT_METADATA_CONF DEFAULT debug True + inicomment $HEAT_METADATA_CONF DEFAULT log_file + iniset $HEAT_METADATA_CONF DEFAULT use_syslog $SYSLOG + iniset $HEAT_METADATA_CONF DEFAULT bind_host $HEAT_METADATA_HOST + iniset $HEAT_METADATA_CONF DEFAULT bind_port $HEAT_METADATA_PORT + + if is_service_enabled rabbit; then + iniset $HEAT_METADATA_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_kombu + iniset $HEAT_METADATA_CONF DEFAULT rabbit_password $RABBIT_PASSWORD + iniset $HEAT_METADATA_CONF DEFAULT rabbit_host $RABBIT_HOST + elif is_service_enabled qpid; then + iniset $HEAT_METADATA_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid + fi + + HEAT_METADATA_PASTE_INI=$HEAT_CONF_DIR/heat-metadata-paste.ini + cp $HEAT_DIR/etc/heat-metadata-paste.ini $HEAT_METADATA_PASTE_INI + +} + +# init_heat() - Initialize database +function init_heat() { + + # (re)create heat database + mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS heat;' + mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE heat CHARACTER SET utf8;' + + $HEAT_DIR/bin/heat-db-setup $os_PACKAGE -r $MYSQL_PASSWORD +} + +# install_heat() - Collect source and prepare +function install_heat() { + git_clone $HEAT_REPO $HEAT_DIR $HEAT_BRANCH +} + +# start_heat() - Start running processes, including screen +function start_heat() { + screen_it h-eng "cd $HEAT_DIR; bin/heat-engine --config-file=$HEAT_CONF_DIR/heat-engine.conf" + screen_it h-api "cd $HEAT_DIR; bin/heat-api --config-dir=$HEAT_CONF_DIR/heat-api.conf" + screen_it h-meta "cd $HEAT_DIR; bin/heat-metadata --config-dir=$HEAT_CONF_DIR/heat-metadata.conf" +} + +# stop_heat() - Stop running processes (non-screen) +function stop_heat() { + # This function intentionally left blank + : +} diff --git a/stack.sh b/stack.sh index e6576e03..910c3877 100755 --- a/stack.sh +++ b/stack.sh @@ -2,7 +2,7 @@ # ``stack.sh`` is an opinionated OpenStack developer installation. It # installs and configures various combinations of **Glance**, **Horizon**, -# **Keystone**, **Nova**, **Quantum** and **Swift** +# **Keystone**, **Nova**, **Quantum**, **Heat** and **Swift** # This script allows you to specify configuration options of what git # repositories to use, enabled services, network configuration and various @@ -241,6 +241,7 @@ sudo chown `whoami` $DATA_DIR # Get project function libraries source $TOP_DIR/lib/cinder source $TOP_DIR/lib/ceilometer +source $TOP_DIR/lib/heat # Set the destination directories for openstack projects NOVA_DIR=$DEST/nova @@ -787,6 +788,9 @@ if is_service_enabled quantum; then # quantum git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH fi +if is_service_enabled heat; then + install_heat +fi if is_service_enabled cinder; then install_cinder fi @@ -827,6 +831,9 @@ if is_service_enabled quantum; then setup_develop $QUANTUM_CLIENT_DIR setup_develop $QUANTUM_DIR fi +if is_service_enabled heat; then + configure_heat +fi if is_service_enabled cinder; then configure_cinder fi @@ -1945,6 +1952,11 @@ if is_service_enabled mysql && is_service_enabled nova; then $NOVA_BIN_DIR/nova-manage db sync fi +# Heat +# ------ +if is_service_enabled heat; then + init_heat +fi # Launch Services # =============== @@ -2142,6 +2154,10 @@ screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF is_service_enabled swift3 || \ screen_it n-obj "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-objectstore" +# launch heat engine, api and metadata +if is_service_enabled heat; then + start_heat +fi # Install Images # ============== diff --git a/stackrc b/stackrc index cd70284d..d8d10086 100644 --- a/stackrc +++ b/stackrc @@ -95,6 +95,10 @@ QUANTUM_CLIENT_BRANCH=master TEMPEST_REPO=${GIT_BASE}/openstack/tempest.git TEMPEST_BRANCH=master +# heat service +HEAT_REPO=${GIT_BASE}/heat-api/heat.git +HEAT_BRANCH=master + # Nova hypervisor configuration. We default to libvirt with **kvm** but will # drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can # also install an **LXC** or **OpenVZ** based system. From ea23e686643778ae9e421e3772f8f93e127c383f Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Wed, 22 Aug 2012 05:53:17 -0700 Subject: [PATCH 644/967] Exercise quantum v2 api without namespace Added quantum_v2.sh for quantum v2 api exercise. Quantum exercise script requires the following settings: - Q_USE_NAMESPACE=False - LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver And the following services enabled: - quantum, q-svc, q-agt, q-dhcp Change-Id: I5767f94c94187a4ca0fd189244fa6f5781519ab4 --- exercises/quantum-adv-test.sh | 486 ++++++++++++++++++++++++++++++++++ exercises/quantum.sh | 396 --------------------------- stack.sh | 4 +- 3 files changed, 489 insertions(+), 397 deletions(-) create mode 100755 exercises/quantum-adv-test.sh delete mode 100755 exercises/quantum.sh diff --git a/exercises/quantum-adv-test.sh b/exercises/quantum-adv-test.sh new file mode 100755 index 00000000..cff29d2e --- /dev/null +++ b/exercises/quantum-adv-test.sh @@ -0,0 +1,486 @@ +#!/usr/bin/env bash +# + +# **quantum.sh** + +# We will use this test to perform integration testing of nova and +# other components with Quantum. + +echo "*********************************************************************" +echo "Begin DevStack Exercise: $0" +echo "*********************************************************************" + +# This script exits on an error so that errors don't compound and you see +# only the first error that occured. + +set -o errtrace +trap failed ERR +failed() { + local r=$? + set +o errtrace + set +o xtrace + echo "Failed to execute" + echo "Starting cleanup..." + delete_all + echo "Finished cleanup" + exit $r +} + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + +#------------------------------------------------------------------------------ +# Quantum config check +#------------------------------------------------------------------------------ +# Warn if quantum is not enabled +if [[ ! "$ENABLED_SERVICES" =~ "q-svc" ]]; then + echo "WARNING: Running quantum test without enabling quantum" +fi + +#------------------------------------------------------------------------------ +# Environment +#------------------------------------------------------------------------------ + +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) + +# Import common functions +source $TOP_DIR/functions + +# Import configuration +source $TOP_DIR/openrc + +# Import exercise configuration +source $TOP_DIR/exerciserc + +# If quantum is not enabled we exit with exitcode 55 which mean +# exercise is skipped. +is_service_enabled quantum && is_service_enabled q-agt && is_service_enabled q-dhcp || exit 55 + +#------------------------------------------------------------------------------ +# Test settings for quantum +#------------------------------------------------------------------------------ + +TENANTS="DEMO1" +# TODO (nati)_Test public network +#TENANTS="DEMO1,DEMO2" + +PUBLIC_NAME="admin" +DEMO1_NAME="demo1" +DEMO2_NAME="demo2" + +PUBLIC_NUM_NET=1 +DEMO1_NUM_NET=1 +DEMO2_NUM_NET=2 + +PUBLIC_NET1_CIDR="200.0.0.0/24" +DEMO1_NET1_CIDR="190.0.0.0/24" +DEMO2_NET1_CIDR="191.0.0.0/24" +DEMO2_NET2_CIDR="191.0.1.0/24" + +PUBLIC_NET1_GATEWAY="200.0.0.1" +DEMO1_NET1_GATEWAY="190.0.0.1" +DEMO2_NET1_GATEWAY="191.0.0.1" +DEMO2_NET2_GATEWAY="191.0.1.1" + +PUBLIC_NUM_VM=1 +DEMO1_NUM_VM=1 +DEMO2_NUM_VM=2 + +PUBLIC_VM1_NET='admin-net1' +DEMO1_VM1_NET='demo1-net1' +# Multinic settings. But this is fail without nic setting in OS image +DEMO2_VM1_NET='demo2-net1' +DEMO2_VM2_NET='demo2-net2' + +PUBLIC_NUM_ROUTER=1 +DEMO1_NUM_ROUTER=1 +DEMO2_NUM_ROUTER=1 + +PUBLIC_ROUTER1_NET="admin-net1" +DEMO1_ROUTER1_NET="demo1-net1" +DEMO2_ROUTER1_NET="demo2-net1" + +#------------------------------------------------------------------------------ +# Keystone settings. +#------------------------------------------------------------------------------ +KEYSTONE="keystone" + +#------------------------------------------------------------------------------ +# Get a token for clients that don't support service catalog +#------------------------------------------------------------------------------ + +# manually create a token by querying keystone (sending JSON data). Keystone +# returns a token and catalog of endpoints. We use python to parse the token +# and save it. + +TOKEN=`keystone token-get | grep ' id ' | awk '{print $4}'` + +#------------------------------------------------------------------------------ +# Various functions. +#------------------------------------------------------------------------------ +function foreach_tenant { + COMMAND=$1 + for TENANT in ${TENANTS//,/ };do + eval ${COMMAND//%TENANT%/$TENANT} + done +} + +function foreach_tenant_resource { + COMMAND=$1 + RESOURCE=$2 + for TENANT in ${TENANTS//,/ };do + eval 'NUM=$'"${TENANT}_NUM_$RESOURCE" + for i in `seq $NUM`;do + local COMMAND_LOCAL=${COMMAND//%TENANT%/$TENANT} + COMMAND_LOCAL=${COMMAND_LOCAL//%NUM%/$i} + eval $COMMAND_LOCAL + done + done +} + +function foreach_tenant_vm { + COMMAND=$1 + foreach_tenant_resource "$COMMAND" 'VM' +} + +function foreach_tenant_net { + COMMAND=$1 + foreach_tenant_resource "$COMMAND" 'NET' +} + +function get_image_id { + local IMAGE_ID=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) + echo "$IMAGE_ID" +} + +function get_tenant_id { + local TENANT_NAME=$1 + local TENANT_ID=`keystone tenant-list | grep " $TENANT_NAME " | head -n 1 | get_field 1` + echo "$TENANT_ID" +} + +function get_user_id { + local USER_NAME=$1 + local USER_ID=`keystone user-list | grep $USER_NAME | awk '{print $2}'` + echo "$USER_ID" +} + +function get_role_id { + local ROLE_NAME=$1 + local ROLE_ID=`keystone role-list | grep $ROLE_NAME | awk '{print $2}'` + echo "$ROLE_ID" +} + +function get_network_id { + local NETWORK_NAME="$1" + local NETWORK_ID=`quantum net-list -F id -- --name=$NETWORK_NAME | awk "NR==4" | awk '{print $2}'` + echo $NETWORK_ID +} + +function get_flavor_id { + local INSTANCE_TYPE=$1 + local FLAVOR_ID=`nova flavor-list | grep $INSTANCE_TYPE | awk '{print $2}'` + echo "$FLAVOR_ID" +} + +function confirm_server_active { + local VM_UUID=$1 + if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova --no_cache show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then + echo "server '$VM_UUID' did not become active!" + false +fi + +} + +function add_tenant { + local TENANT=$1 + local USER=$2 + + $KEYSTONE tenant-create --name=$TENANT + $KEYSTONE user-create --name=$USER --pass=${ADMIN_PASSWORD} + + local USER_ID=$(get_user_id $USER) + local TENANT_ID=$(get_tenant_id $TENANT) + + $KEYSTONE user-role-add --user-id $USER_ID --role-id $(get_role_id Member) --tenant-id $TENANT_ID +} + +function remove_tenant { + local TENANT=$1 + local TENANT_ID=$(get_tenant_id $TENANT) + + $KEYSTONE tenant-delete $TENANT_ID +} + +function remove_user { + local USER=$1 + local USER_ID=$(get_user_id $USER) + + $KEYSTONE user-delete $USER_ID +} + + + +#------------------------------------------------------------------------------ +# "Create" functions +#------------------------------------------------------------------------------ + +function create_tenants { + source $TOP_DIR/openrc admin admin + add_tenant demo1 demo1 demo1 + add_tenant demo2 demo2 demo2 +} + +function delete_tenants_and_users { + source $TOP_DIR/openrc admin admin + remove_user demo1 + remove_tenant demo1 + remove_user demo2 + remove_tenant demo2 + echo "removed all tenants" +} + +function create_network { + local TENANT=$1 + local GATEWAY=$2 + local CIDR=$3 + local NUM=$4 + local EXTRA=$5 + local NET_NAME="${TENANT}-net$NUM" + local ROUTER_NAME="${TENANT}-router${NUM}" + source $TOP_DIR/openrc admin admin + local TENANT_ID=$(get_tenant_id $TENANT) + source $TOP_DIR/openrc $TENANT $TENANT + local NET_ID=$(quantum net-create --tenant_id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' ) + quantum subnet-create --ip_version 4 --tenant_id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR + #T0DO(nati) comment out until l3-agent is merged + #local ROUTER_ID=$($QUANTUM router-create --tenant_id $TENANT_ID $ROUTER_NAME| grep ' id ' | awk '{print $4}' ) + #for NET_NAME in ${NET_NAMES//,/ };do + # SUBNET_ID=`get_subnet_id $NET_NAME` + # $QUANTUM router-interface-create $NAME --subnet_id $SUBNET_ID + #done +} + +function create_networks { + foreach_tenant_net 'create_network ${%TENANT%_NAME} ${%TENANT%_NET%NUM%_GATEWAY} ${%TENANT%_NET%NUM%_CIDR} %NUM% ${%TENANT%_NET%NUM%_EXTRA}' + #TODO(nati) test security group function + # allow ICMP for both tenant's security groups + #source $TOP_DIR/openrc demo1 demo1 + #$NOVA secgroup-add-rule default icmp -1 -1 0.0.0.0/0 + #source $TOP_DIR/openrc demo2 demo2 + #$NOVA secgroup-add-rule default icmp -1 -1 0.0.0.0/0 +} + +function create_vm { + local TENANT=$1 + local NUM=$2 + local NET_NAMES=$3 + source $TOP_DIR/openrc $TENANT $TENANT + local NIC="" + for NET_NAME in ${NET_NAMES//,/ };do + NIC="$NIC --nic net-id="`get_network_id $NET_NAME` + done + #TODO (nati) Add multi-nic test + #TODO (nati) Add public-net test + local VM_UUID=`nova --no_cache boot --flavor $(get_flavor_id m1.tiny) \ + --image $(get_image_id) \ + $NIC \ + $TENANT-server$NUM | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'` + die_if_not_set VM_UUID "Failure launching $TENANT-server$NUM" VM_UUID + confirm_server_active $VM_UUID +} + +function create_vms { + foreach_tenant_vm 'create_vm ${%TENANT%_NAME} %NUM% ${%TENANT%_VM%NUM%_NET}' +} + +function ping_ip { + # Test agent connection. Assumes namespaces are disabled, and + # that DHCP is in use, but not L3 + local VM_NAME=$1 + IP=`nova --no_cache show $VM_NAME | grep 'network' | awk '{print $5}'` + if ! timeout $BOOT_TIMEOUT sh -c "while ! ping -c1 -w1 $IP; do sleep 1; done"; then + echo "Could not ping $VM_NAME" + false + fi +} + +function check_vm { + local TENANT=$1 + local NUM=$2 + local VM_NAME="$TENANT-server$NUM" + source $TOP_DIR/openrc $TENANT $TENANT + ping_ip $VM_NAME + # TODO (nati) test ssh connection + # TODO (nati) test inter connection between vm + # TODO (nati) test namespace dhcp + # TODO (nati) test dhcp host routes + # TODO (nati) test multi-nic + # TODO (nati) use test-agent + # TODO (nati) test L3 forwarding + # TODO (nati) test floating ip + # TODO (nati) test security group +} + +function check_vms { + foreach_tenant_vm 'check_vm ${%TENANT%_NAME} %NUM%' +} + +function shutdown_vm { + local TENANT=$1 + local NUM=$2 + source $TOP_DIR/openrc $TENANT $TENANT + VM_NAME=${TENANT}-server$NUM + nova --no_cache delete $VM_NAME +} + +function shutdown_vms { + foreach_tenant_vm 'shutdown_vm ${%TENANT%_NAME} %NUM%' + if ! timeout $TERMINATE_TIMEOUT sh -c "while nova --no_cache list | grep -q ACTIVE; do sleep 1; done"; then + echo "Some VMs failed to shutdown" + false + fi +} + +function delete_network { + local TENANT=$1 + source $TOP_DIR/openrc admin admin + local TENANT_ID=$(get_tenant_id $TENANT) + #TODO(nati) comment out until l3-agent merged + #for res in port subnet net router;do + for res in port subnet net;do + quantum ${res}-list -F id -F tenant_id | grep $TENANT_ID | awk '{print $2}' | xargs -I % quantum ${res}-delete % + done +} + +function delete_networks { + foreach_tenant 'delete_network ${%TENANT%_NAME}' + #TODO(nati) add secuirty group check after it is implemented + # source $TOP_DIR/openrc demo1 demo1 + # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0 + # source $TOP_DIR/openrc demo2 demo2 + # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0 +} + +function create_all { + create_tenants + create_networks + create_vms +} + +function delete_all { + shutdown_vms + delete_networks + delete_tenants_and_users +} + +function all { + create_all + check_vms + delete_all +} + +#------------------------------------------------------------------------------ +# Test functions. +#------------------------------------------------------------------------------ +function test_functions { + IMAGE=$(get_image_id) + echo $IMAGE + + TENANT_ID=$(get_tenant_id demo) + echo $TENANT_ID + + FLAVOR_ID=$(get_flavor_id m1.tiny) + echo $FLAVOR_ID + + NETWORK_ID=$(get_network_id admin) + echo $NETWORK_ID +} + +#------------------------------------------------------------------------------ +# Usage and main. +#------------------------------------------------------------------------------ +usage() { + echo "$0: [-h]" + echo " -h, --help Display help message" + echo " -t, --tenant Create tenants" + echo " -n, --net Create networks" + echo " -v, --vm Create vms" + echo " -c, --check Check connection" + echo " -x, --delete-tenants Delete tenants" + echo " -y, --delete-nets Delete networks" + echo " -z, --delete-vms Delete vms" + echo " -T, --test Test functions" +} + +main() { + + echo Description + echo + echo Copyright 2012, Cisco Systems + echo Copyright 2012, Nicira Networks, Inc. + echo Copyright 2012, NTT MCL, Inc. + echo + echo Please direct any questions to dedutta@cisco.com, dan@nicira.com, nachi@nttmcl.com + echo + + + if [ $# -eq 0 ] ; then + # if no args are provided, run all tests + all + else + + while [ "$1" != "" ]; do + case $1 in + -h | --help ) usage + exit + ;; + -n | --net ) create_networks + exit + ;; + -v | --vm ) create_vms + exit + ;; + -t | --tenant ) create_tenants + exit + ;; + -c | --check ) check_vms + exit + ;; + -T | --test ) test_functions + exit + ;; + -x | --delete-tenants ) delete_tenants_and_users + exit + ;; + -y | --delete-nets ) delete_networks + exit + ;; + -z | --delete-vms ) shutdown_vms + exit + ;; + -a | --all ) all + exit + ;; + * ) usage + exit 1 + esac + shift + done + fi +} + + +#------------------------------------------------------------------------------- +# Kick off script. +#------------------------------------------------------------------------------- +echo $* +main $* + +set +o xtrace +echo "*********************************************************************" +echo "SUCCESS: End DevStack Exercise: $0" +echo "*********************************************************************" diff --git a/exercises/quantum.sh b/exercises/quantum.sh deleted file mode 100755 index 33ae14ea..00000000 --- a/exercises/quantum.sh +++ /dev/null @@ -1,396 +0,0 @@ -#!/usr/bin/env bash -# - -# **quantum.sh** - -# We will use this test to perform integration testing of nova and -# other components with Quantum. - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occured. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - -#------------------------------------------------------------------------------ -# Quantum config check -#------------------------------------------------------------------------------ -# Warn if quantum is not enabled -if [[ ! "$ENABLED_SERVICES" =~ "q-svc" ]]; then - echo "WARNING: Running quantum test without enabling quantum" -fi - -#------------------------------------------------------------------------------ -# Environment -#------------------------------------------------------------------------------ - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import configuration -source $TOP_DIR/openrc - -# Import exercise configuration -source $TOP_DIR/exerciserc - -# If quantum is not enabled we exit with exitcode 55 which mean -# exercise is skipped. -is_service_enabled quantum || exit 55 - -#------------------------------------------------------------------------------ -# Various default parameters. -#------------------------------------------------------------------------------ - -# Max time to wait while vm goes from build to active state -ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30} - -# Max time till the vm is bootable -BOOT_TIMEOUT=${BOOT_TIMEOUT:-60} - -# Max time to wait for proper association and dis-association. -ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-15} - -# Max time to wait before delete VMs and delete Networks -VM_NET_DELETE_TIMEOUT=${VM_NET_TIMEOUT:-10} - -# Instance type to create -DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} - -# Boot this image, use first AMi image if unset -DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} - -# OVS Hosts -OVS_HOSTS=${DEFAULT_OVS_HOSTS:-"localhost"} - -#------------------------------------------------------------------------------ -# Nova settings. -#------------------------------------------------------------------------------ -if [ -f /opt/stack/nova/bin/nova-manage ] ; then - NOVA_MANAGE=/opt/stack/nova/bin/nova-manage -else - NOVA_MANAGE=/usr/local/bin/nova-manage -NOVA=/usr/local/bin/nova -NOVA_CONF=/etc/nova/nova.conf - -#------------------------------------------------------------------------------ -# Mysql settings. -#------------------------------------------------------------------------------ -MYSQL="/usr/bin/mysql --skip-column-name --host=$MYSQL_HOST" - -#------------------------------------------------------------------------------ -# Keystone settings. -#------------------------------------------------------------------------------ -KEYSTONE="keystone" - -#------------------------------------------------------------------------------ -# Get a token for clients that don't support service catalog -#------------------------------------------------------------------------------ - -# manually create a token by querying keystone (sending JSON data). Keystone -# returns a token and catalog of endpoints. We use python to parse the token -# and save it. - -TOKEN=`keystone token-get | grep ' id ' | awk '{print $4}'` - -#------------------------------------------------------------------------------ -# Various functions. -#------------------------------------------------------------------------------ -function get_image_id { - local IMAGE_ID=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) - echo "$IMAGE_ID" -} - -function get_tenant_id { - local TENANT_NAME=$1 - local TENANT_ID=`keystone tenant-list | grep $TENANT_NAME | awk '{print $2}'` - echo "$TENANT_ID" -} - -function get_user_id { - local USER_NAME=$1 - local USER_ID=`keystone user-list | grep $USER_NAME | awk '{print $2}'` - echo "$USER_ID" -} - -function get_role_id { - local ROLE_NAME=$1 - local ROLE_ID=`keystone role-list | grep $ROLE_NAME | awk '{print $2}'` - echo "$ROLE_ID" -} - -# TODO: (Debo) Change Quantum client CLI and then remove the MYSQL stuff. -function get_network_id { - local NETWORK_NAME=$1 - local QUERY="select uuid from networks where label='$NETWORK_NAME'" - local NETWORK_ID=`echo $QUERY | $MYSQL -u root -p$MYSQL_PASSWORD nova` - echo "$NETWORK_ID" -} - -function get_flavor_id { - local INSTANCE_TYPE=$1 - local FLAVOR_ID=`nova flavor-list | grep $INSTANCE_TYPE | awk '{print $2}'` - echo "$FLAVOR_ID" -} - -function add_tenant { - local TENANT=$1 - local USER=$3 - local PASSWORD=$2 - - $KEYSTONE tenant-create --name=$TENANT - $KEYSTONE user-create --name=$USER --pass=${PASSWORD} - - local USER_ID=$(get_user_id $USER) - local TENANT_ID=$(get_tenant_id $TENANT) - - $KEYSTONE user-role-add --user $USER_ID --role $(get_role_id Member) --tenant_id $TENANT_ID - $KEYSTONE user-role-add --user $USER_ID --role $(get_role_id admin) --tenant_id $TENANT_ID - $KEYSTONE user-role-add --user $USER_ID --role $(get_role_id anotherrole) --tenant_id $TENANT_ID - #$KEYSTONE user-role-add --user $USER_ID --role $(get_role_id sysadmin) --tenant_id $TENANT_ID - #$KEYSTONE user-role-add --user $USER_ID --role $(get_role_id netadmin) --tenant_id $TENANT_ID -} - -function remove_tenant { - local TENANT=$1 - local TENANT_ID=$(get_tenant_id $TENANT) - - $KEYSTONE tenant-delete $TENANT_ID -} - -function remove_user { - local USER=$1 - local USER_ID=$(get_user_id $USER) - - $KEYSTONE user-delete $USER_ID -} - - -#------------------------------------------------------------------------------ -# "Create" functions -#------------------------------------------------------------------------------ - -function create_tenants { - add_tenant demo1 nova demo1 - add_tenant demo2 nova demo2 -} - -function delete_tenants_and_users { - remove_tenant demo1 - remove_tenant demo2 - remove_user demo1 - remove_user demo2 -} - -function create_networks { - $NOVA_MANAGE --flagfile=$NOVA_CONF network create \ - --label=public-net1 \ - --fixed_range_v4=11.0.0.0/24 - - $NOVA_MANAGE --flagfile=$NOVA_CONF network create \ - --label=demo1-net1 \ - --fixed_range_v4=12.0.0.0/24 \ - --project_id=$(get_tenant_id demo1) \ - --priority=1 - - $NOVA_MANAGE --flagfile=$NOVA_CONF network create \ - --label=demo2-net1 \ - --fixed_range_v4=13.0.0.0/24 \ - --project_id=$(get_tenant_id demo2) \ - --priority=1 -} - -function create_vms { - PUBLIC_NET1_ID=$(get_network_id public-net1) - DEMO1_NET1_ID=$(get_network_id demo1-net1) - DEMO2_NET1_ID=$(get_network_id demo2-net1) - - export OS_TENANT_NAME=demo1 - export OS_USERNAME=demo1 - export OS_PASSWORD=nova - VM_UUID1=`$NOVA boot --flavor $(get_flavor_id m1.tiny) \ - --image $(get_image_id) \ - --nic net-id=$PUBLIC_NET1_ID \ - --nic net-id=$DEMO1_NET1_ID \ - demo1-server1 | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'` - die_if_not_set VM_UUID1 "Failure launching demo1-server1" - - export OS_TENANT_NAME=demo2 - export OS_USERNAME=demo2 - export OS_PASSWORD=nova - VM_UUID2=`$NOVA boot --flavor $(get_flavor_id m1.tiny) \ - --image $(get_image_id) \ - --nic net-id=$PUBLIC_NET1_ID \ - --nic net-id=$DEMO2_NET1_ID \ - demo2-server1 | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'` - die_if_not_set VM_UUID2 "Failure launching demo2-server1" - - VM_UUID3=`$NOVA boot --flavor $(get_flavor_id m1.tiny) \ - --image $(get_image_id) \ - --nic net-id=$PUBLIC_NET1_ID \ - --nic net-id=$DEMO2_NET1_ID \ - demo2-server2 | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'` - die_if_not_set VM_UUID3 "Failure launching demo2-server2" - -} - -function ping_vms { - - echo "Sleeping a bit let the VMs come up" - sleep $ACTIVE_TIMEOUT - - export OS_TENANT_NAME=demo1 - export OS_USERNAME=demo1 - export OS_PASSWORD=nova - # get the IP of the servers - PUBLIC_IP1=`nova show $VM_UUID1 | grep public-net1 | awk '{print $5}'` - export OS_TENANT_NAME=demo2 - export OS_USERNAME=demo2 - export OS_PASSWORD=nova - PUBLIC_IP2=`nova show $VM_UUID2 | grep public-net1 | awk '{print $5}'` - - MULTI_HOST=`trueorfalse False $MULTI_HOST` - if [ "$MULTI_HOST" = "False" ]; then - # sometimes the first ping fails (10 seconds isn't enough time for the VM's - # network to respond?), so let's ping for a default of 15 seconds with a - # timeout of a second for each ping. - if ! timeout $BOOT_TIMEOUT sh -c "while ! ping -c1 -w1 $PUBLIC_IP1; do sleep 1; done"; then - echo "Couldn't ping server" - exit 1 - fi - if ! timeout $BOOT_TIMEOUT sh -c "while ! ping -c1 -w1 $PUBLIC_IP2; do sleep 1; done"; then - echo "Couldn't ping server" - exit 1 - fi - else - # On a multi-host system, without vm net access, do a sleep to wait for the boot - sleep $BOOT_TIMEOUT - fi -} - -function shutdown_vms { - export OS_TENANT_NAME=demo1 - export OS_USERNAME=demo1 - export OS_PASSWORD=nova - nova delete $VM_UUID1 - - export OS_TENANT_NAME=demo2 - export OS_USERNAME=demo2 - export OS_PASSWORD=nova - nova delete $VM_UUID2 - nova delete $VM_UUID3 - -} - -function delete_networks { - PUBLIC_NET1_ID=$(get_network_id public-net1) - DEMO1_NET1_ID=$(get_network_id demo1-net1) - DEMO2_NET1_ID=$(get_network_id demo2-net1) - nova-manage network delete --uuid=$PUBLIC_NET1_ID - nova-manage network delete --uuid=$DEMO1_NET1_ID - nova-manage network delete --uuid=$DEMO2_NET1_ID -} - -function all { - create_tenants - create_networks - create_vms - ping_vms - shutdown_vms - delete_networks - delete_tenants_and_users -} - -#------------------------------------------------------------------------------ -# Test functions. -#------------------------------------------------------------------------------ -function test_functions { - IMAGE=$(get_image_id) - echo $IMAGE - - TENANT_ID=$(get_tenant_id demo) - echo $TENANT_ID - - FLAVOR_ID=$(get_flavor_id m1.tiny) - echo $FLAVOR_ID - - NETWORK_ID=$(get_network_id private) - echo $NETWORK_ID -} - -#------------------------------------------------------------------------------ -# Usage and main. -#------------------------------------------------------------------------------ -usage() { - echo "$0: [-h]" - echo " -h, --help Display help message" - echo " -n, --net Create networks" - echo " -v, --vm Create vms" - echo " -t, --tenant Create tenants" - echo " -T, --test Test functions" -} - -main() { - if [ $# -eq 0 ] ; then - usage - exit - fi - - echo Description - echo - echo Copyright 2012, Cisco Systems - echo Copyright 2012, Nicira Networks, Inc. - echo - echo Please direct any questions to dedutta@cisco.com, dlapsley@nicira.com - echo - - while [ "$1" != "" ]; do - case $1 in - -h | --help ) usage - exit - ;; - -n | --net ) create_networks - exit - ;; - -v | --vm ) create_vms - exit - ;; - -t | --tenant ) create_tenants - exit - ;; - -p | --ping ) ping_vms - exit - ;; - -T | --test ) test_functions - exit - ;; - -a | --all ) all - exit - ;; - * ) usage - exit 1 - esac - shift - done -} - - -#------------------------------------------------------------------------------- -# Kick off script. -#------------------------------------------------------------------------------- -echo $* -main -a - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End DevStack Exercise: $0" -echo "*********************************************************************" diff --git a/stack.sh b/stack.sh index e6576e03..dafff2b9 100755 --- a/stack.sh +++ b/stack.sh @@ -269,7 +269,8 @@ Q_HOST=${Q_HOST:-localhost} Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-quantum} # Default auth strategy Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone} - +# Use namespace or not +Q_USE_NAMESPACE=${Q_USE_NAMESPACE:-True} # Name of the lvm volume group to use/create for iscsi volumes VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} @@ -1204,6 +1205,7 @@ if is_service_enabled q-dhcp; then iniset $Q_DHCP_CONF_FILE DEFAULT verbose True # Set debug iniset $Q_DHCP_CONF_FILE DEFAULT debug True + iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE # Update database iniset $Q_DHCP_CONF_FILE DEFAULT db_connection "mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/$Q_DB_NAME?charset=utf8" From b7988b61080763e6d211cf6284343f9f5acc9f4f Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Tue, 28 Aug 2012 09:20:24 -0700 Subject: [PATCH 645/967] Add quantum files for apts + rpms bug 1042446 Lists are based off of Nova packages and quantum tools/pip-requires This is needed urgently to get quantum gate up and running, which is failing because python-netaddr was not installed by the devstack gate. Change-Id: I8cdac75e577468dd826ac71b70112033d7bd9468 --- files/apts/quantum | 16 ++++++++++++++++ files/rpms/quantum | 23 +++++++++++++++++++++++ 2 files changed, 39 insertions(+) create mode 100644 files/apts/quantum create mode 100644 files/rpms/quantum diff --git a/files/apts/quantum b/files/apts/quantum new file mode 100644 index 00000000..568438f8 --- /dev/null +++ b/files/apts/quantum @@ -0,0 +1,16 @@ +iptables +mysql #NOPRIME +sudo +python-paste +python-routes +python-netaddr +python-pastedeploy +python-greenlet +python-kombu +python-eventlet +python-sqlalchemy +python-mysqldb +python-pyudev +python-qpid # dist:precise +dnsmasq-base +dnsmasq-utils # for dhcp_release only available in dist:oneiric,precise,quantal diff --git a/files/rpms/quantum b/files/rpms/quantum new file mode 100644 index 00000000..6ca9c355 --- /dev/null +++ b/files/rpms/quantum @@ -0,0 +1,23 @@ +MySQL-python +dnsmasq-utils # for dhcp_release +ebtables +iptables +iputils +mysql-server # NOPRIME +python-boto +python-eventlet +python-greenlet +python-iso8601 +python-kombu +python-netaddr +python-paste +python-paste-deploy +python-qpid +python-routes +python-sqlalchemy +python-suds +rabbitmq-server # NOPRIME +qpid-cpp-server # NOPRIME +sqlite +sudo +vconfig From 4a43b7bd90f8e42baaf950a8177cb13fc30f5f2f Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 28 Aug 2012 17:43:40 -0500 Subject: [PATCH 646/967] Cosmetic, comment and text cleanups * functions * stack.sh * stackrc * unstack.sh A recent commit to stack.sh broke the RST formatting done by shocco to produce the HTML-formatted files on devstack.org. A bunch of comment and spacing fixes were done (ala pep8 if there were such a thing for shell scripts). The only non-comment changes made were to the content of some error messages. Fixes bug 1042271 Change-Id: Id1c74cf25c03c4f18ed741f8026e36b0d4a598dd --- functions | 100 +++++++++++------ stack.sh | 306 +++++++++++++++++++++++++++++------------------------ stackrc | 38 ++++--- unstack.sh | 2 +- 4 files changed, 259 insertions(+), 187 deletions(-) diff --git a/functions b/functions index 386af090..af154b0c 100644 --- a/functions +++ b/functions @@ -1,7 +1,16 @@ -# -*- mode: Shell-script -*- # functions - Common functions used by DevStack components # -# ENABLED_SERVICES is used by is_service_enabled() +# The following variables are assumed to be defined by certain functions: +# ``DISTRO`` +# ``ENABLED_SERVICES`` +# ``EROR_ON_CLONE`` +# ``FILES`` +# ``GLANCE_HOSTPORT`` +# ``OFFLINE`` +# ``PIP_DOWNLOAD_CACHE`` +# ``RECLONE`` +# ``TRACK_DEPENDS`` +# ``http_proxy``, ``https_proxy``, ``no_proxy`` # Save trace setting @@ -9,9 +18,9 @@ XTRACE=$(set +o | grep xtrace) set +o xtrace -# Exit 0 if address is in network or 1 if -# address is not in network or netaddr library -# is not installed. +# Exit 0 if address is in network or 1 if address is not in +# network or netaddr library is not installed. +# address_in_net ip-address ip-range function address_in_net() { python -c " import netaddr @@ -21,7 +30,8 @@ sys.exit(netaddr.IPAddress('$1') not in netaddr.IPNetwork('$2')) } -# apt-get wrapper to set arguments correctly +# Wrapper for ``apt-get`` to set cache and proxy environment variables +# Uses globals ``OFFLINE``, ``*_proxy` # apt_get operation package [package ...] function apt_get() { [[ "$OFFLINE" = "True" || -z "$@" ]] && return @@ -88,15 +98,16 @@ function get_field() { # get_packages() collects a list of package names of any type from the -# prerequisite files in ``files/{apts|pips}``. The list is intended -# to be passed to a package installer such as apt or pip. +# prerequisite files in ``files/{apts|rpms}``. The list is intended +# to be passed to a package installer such as apt or yum. # -# Only packages required for the services in ENABLED_SERVICES will be +# Only packages required for the services in ``ENABLED_SERVICES`` will be # included. Two bits of metadata are recognized in the prerequisite files: # - ``# NOPRIME`` defers installation to be performed later in stack.sh # - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection # of the package to the distros listed. The distro names are case insensitive. # +# Uses globals ``DISTRO``, ``ENABLED_SERVICES`` # get_packages dir function get_packages() { local package_dir=$1 @@ -241,6 +252,7 @@ GetOSVersion() { } # git update using reference as a branch. +# git_update_branch ref function git_update_branch() { GIT_BRANCH=$1 @@ -254,6 +266,7 @@ function git_update_branch() { # git update using reference as a tag. Be careful editing source at that repo # as working copy will be in a detached mode +# git_update_tag ref function git_update_tag() { GIT_TAG=$1 @@ -289,6 +302,7 @@ function GetDistro() { # Set global RECLONE=yes to simulate a clone when dest-dir exists # Set global ERROR_ON_CLONE=True to abort execution with an error if the git repo # does not exist (default is False, meaning the repo will be cloned). +# Uses global ``OFFLINE`` # git_clone remote dest-dir branch function git_clone { [[ "$OFFLINE" = "True" ]] && return @@ -394,16 +408,20 @@ $option = $value # is_service_enabled() checks if the service(s) specified as arguments are -# enabled by the user in **ENABLED_SERVICES**. +# enabled by the user in ``ENABLED_SERVICES``. # -# If there are multiple services specified as arguments the test performs a -# boolean OR or if any of the services specified on the command line -# return true. +# Multiple services specified as arguments are ``OR``'ed together; the test +# is a short-circuit boolean, i.e it returns on the first match. # -# There is a special cases for some 'catch-all' services:: +# There are special cases for some 'catch-all' services:: # **nova** returns true if any service enabled start with **n-** +# **cinder** returns true if any service enabled start with **c-** +# **ceilometer** returns true if any service enabled start with **ceilometer** # **glance** returns true if any service enabled start with **g-** # **quantum** returns true if any service enabled start with **q-** +# +# Uses global ``ENABLED_SERVICES`` +# is_service_enabled service [service ...] function is_service_enabled() { services=$@ for service in ${services}; do @@ -417,7 +435,9 @@ function is_service_enabled() { return 1 } -# remove extra commas from the input string (ENABLED_SERVICES) + +# remove extra commas from the input string (i.e. ``ENABLED_SERVICES``) +# _cleanup_service_list service-list function _cleanup_service_list () { echo "$1" | sed -e ' s/,,/,/g; @@ -426,15 +446,17 @@ function _cleanup_service_list () { ' } + # enable_service() adds the services passed as argument to the -# **ENABLED_SERVICES** list, if they are not already present. +# ``ENABLED_SERVICES`` list, if they are not already present. # # For example: -# # enable_service n-vol # # This function does not know about the special cases # for nova, glance, and quantum built into is_service_enabled(). +# Uses global ``ENABLED_SERVICES`` +# enable_service service [service ...] function enable_service() { local tmpsvcs="${ENABLED_SERVICES}" for service in $@; do @@ -446,15 +468,17 @@ function enable_service() { disable_negated_services } + # disable_service() removes the services passed as argument to the -# **ENABLED_SERVICES** list, if they are present. +# ``ENABLED_SERVICES`` list, if they are present. # # For example: -# # disable_service n-vol # # This function does not know about the special cases # for nova, glance, and quantum built into is_service_enabled(). +# Uses global ``ENABLED_SERVICES`` +# disable_service service [service ...] function disable_service() { local tmpsvcs=",${ENABLED_SERVICES}," local service @@ -466,17 +490,22 @@ function disable_service() { ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs") } + # disable_all_services() removes all current services -# from **ENABLED_SERVICES** to reset the configuration +# from ``ENABLED_SERVICES`` to reset the configuration # before a minimal installation +# Uses global ``ENABLED_SERVICES`` +# disable_all_services function disable_all_services() { ENABLED_SERVICES="" } -# We are looking for services with a - at the beginning to force -# excluding those services. For example if you want to install all the default -# services but not nova-volume (n-vol) you can have this set in your localrc : + +# Remove all services starting with '-'. For example, to install all default +# services except nova-volume (n-vol) set in ``localrc``: # ENABLED_SERVICES+=",-n-vol" +# Uses global ``ENABLED_SERVICES`` +# disable_negated_services function disable_negated_services() { local tmpsvcs="${ENABLED_SERVICES}" local service @@ -488,6 +517,7 @@ function disable_negated_services() { ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs") } + # Distro-agnostic package installer # install_package package [package ...] function install_package() { @@ -513,7 +543,8 @@ function is_set() { } -# pip install wrapper to set cache and proxy environment variables +# Wrapper for ``pip install`` to set cache and proxy environment variables +# Uses globals ``OFFLINE``, ``PIP_DOWNLOAD_CACHE``, ``TRACK_DEPENDES``, ``*_proxy` # pip_install package [package ...] function pip_install { [[ "$OFFLINE" = "True" || -z "$@" ]] && return @@ -554,8 +585,9 @@ function restart_service() { } -# pip install the dependencies of the package before we do the setup.py -# develop, so that pip and not distutils process the dependency chain +# ``pip install`` the dependencies of the package before ``setup.py develop`` +# so pip and not distutils processes the dependency chain +# Uses globals ``TRACK_DEPENDES``, ``*_proxy` # setup_develop directory function setup_develop() { if [[ $TRACK_DEPENDS = True ]] ; then @@ -606,7 +638,9 @@ function stop_service() { # Normalize config values to True or False -# VAR=`trueorfalse default-value test-value` +# Accepts as False: 0 no false False FALSE +# Accepts as True: 1 yes true True TRUE +# VAR=$(trueorfalse default-value test-value) function trueorfalse() { local default=$1 local testval=$2 @@ -620,8 +654,8 @@ function trueorfalse() { # Retrieve an image from a URL and upload into Glance # Uses the following variables: -# **FILES** must be set to the cache dir -# **GLANCE_HOSTPORT** +# ``FILES`` must be set to the cache dir +# ``GLANCE_HOSTPORT`` # upload_image image-url glance-token function upload_image() { local image_url=$1 @@ -717,7 +751,8 @@ function upload_image() { } -# yum wrapper to set arguments correctly +# Wrapper for ``yum`` to set proxy environment variables +# Uses globals ``OFFLINE``, ``*_proxy` # yum_install package [package ...] function yum_install() { [[ "$OFFLINE" = "True" ]] && return @@ -731,3 +766,8 @@ function yum_install() { # Restore xtrace $XTRACE + + +# Local variables: +# -*- mode: Shell-script -*- +# End: \ No newline at end of file diff --git a/stack.sh b/stack.sh index 665a3663..12fa3723 100755 --- a/stack.sh +++ b/stack.sh @@ -1,8 +1,9 @@ #!/usr/bin/env bash # ``stack.sh`` is an opinionated OpenStack developer installation. It -# installs and configures various combinations of **Glance**, **Horizon**, -# **Keystone**, **Nova**, **Quantum**, **Heat** and **Swift** +# installs and configures various combinations of **Ceilometer**, **Cinder**, +# **Glance**, **Heat**, **Horizon**, **Keystone**, **Nova**, **Quantum** +# and **Swift** # This script allows you to specify configuration options of what git # repositories to use, enabled services, network configuration and various @@ -10,14 +11,14 @@ # shared settings for common resources (mysql, rabbitmq) and build a multi-node # developer install. -# To keep this script simple we assume you are running on an **Ubuntu 11.10 -# Oneiric** or **Ubuntu 12.04 Precise** machine. It should work in a VM or -# physical server. Additionally we put the list of ``apt`` and ``pip`` -# dependencies and other configuration files in this repo. So start by -# grabbing this script and the dependencies. +# To keep this script simple we assume you are running on a recent **Ubuntu** +# (11.10 Oneiric or 12.04 Precise) or **Fedora** (F16 or F17) machine. It +# should work in a VM or physical server. Additionally we put the list of +# ``apt`` and ``rpm`` dependencies and other configuration files in this repo. # Learn more and get the most recent version at http://devstack.org + # Keep track of the devstack directory TOP_DIR=$(cd $(dirname "$0") && pwd) @@ -47,25 +48,31 @@ GetDistro # MYSQL_USER=hellaroot # # We try to have sensible defaults, so you should be able to run ``./stack.sh`` -# in most cases. +# in most cases. ``localrc`` is not distributed with DevStack and will never +# be overwritten by a DevStack update. # # DevStack distributes ``stackrc`` which contains locations for the OpenStack # repositories and branches to configure. ``stackrc`` sources ``localrc`` to -# allow you to safely override those settings without being overwritten -# when updating DevStack. +# allow you to safely override those settings. + if [[ ! -r $TOP_DIR/stackrc ]]; then echo "ERROR: missing $TOP_DIR/stackrc - did you grab more than just stack.sh?" exit 1 fi source $TOP_DIR/stackrc -# HTTP and HTTPS proxy servers are supported via the usual environment variables -# ``http_proxy`` and ``https_proxy``. Additionally if you would like to access -# to specific server directly and not through the proxy server, you can use -# ``no_proxy`` environment variable. They can be set in ``localrc`` if necessary -# or on the command line:: + +# Proxy Settings +# -------------- + +# HTTP and HTTPS proxy servers are supported via the usual environment variables [1] +# ``http_proxy``, ``https_proxy`` and ``no_proxy``. They can be set in +# ``localrc`` if necessary or on the command line:: +# +# [1] http://www.w3.org/Daemon/User/Proxies/ProxyClients.html # # http_proxy=http://proxy.example.com:3128/ no_proxy=repo.example.net ./stack.sh + if [[ -n "$http_proxy" ]]; then export http_proxy=$http_proxy fi @@ -98,6 +105,7 @@ if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|f16|f17) ]]; then fi fi +# Disallow qpid on oneiric if [ "${DISTRO}" = "oneiric" ] && is_service_enabled qpid ; then # Qpid was introduced in precise echo "You must use Ubuntu Precise or newer for Qpid support." @@ -114,17 +122,15 @@ fi # ``stack.sh`` keeps function libraries here # Make sure ``$TOP_DIR/lib`` directory is present if [ ! -d $TOP_DIR/lib ]; then - echo "ERROR: missing devstack/lib - did you grab more than just stack.sh?" + echo "ERROR: missing devstack/lib" exit 1 fi -# stack.sh keeps the list of ``apt`` and ``pip`` dependencies in external -# files, along with config templates and other useful files. You can find these -# in the ``files`` directory (next to this script). We will reference this -# directory using the ``FILES`` variable in this script. +# ``stack.sh`` keeps the list of ``apt`` and ``rpm`` dependencies and config +# templates and other useful files in the ``files`` subdirectory FILES=$TOP_DIR/files if [ ! -d $FILES ]; then - echo "ERROR: missing devstack/files - did you grab more than just stack.sh?" + echo "ERROR: missing devstack/files" exit 1 fi @@ -132,7 +138,7 @@ fi if type -p screen >/dev/null && screen -ls | egrep -q "[0-9].stack"; then echo "You are already running a stack.sh session." echo "To rejoin this session type 'screen -x stack'." - echo "To destroy this session, kill the running screen." + echo "To destroy this session, type './unstack.sh'." exit 1 fi @@ -142,8 +148,12 @@ if is_service_enabled cinder && is_service_enabled n-vol; then exit 1 fi -# OpenStack is designed to be run as a regular user (Horizon will fail to run -# as root, since apache refused to startup serve content from root user). If + +# root Access +# ----------- + +# OpenStack is designed to be run as a non-root user; Horizon will fail to run +# as **root** since Apache will not serve content from **root** user). If # ``stack.sh`` is run as **root**, it automatically creates a **stack** user with # sudo privileges and runs as that user. @@ -153,8 +163,7 @@ if [[ $EUID -eq 0 ]]; then echo "In $ROOTSLEEP seconds, we will create a user 'stack' and run as that user" sleep $ROOTSLEEP - # since this script runs as a normal user, we need to give that user - # ability to run sudo + # Give the non-root user the ability to run as **root** via ``sudo`` if [[ "$os_PACKAGE" = "deb" ]]; then dpkg -l sudo || apt_get update && install_package sudo else @@ -170,7 +179,7 @@ if [[ $EUID -eq 0 ]]; then fi echo "Giving stack user passwordless sudo priviledges" - # some uec images sudoers does not have a '#includedir'. add one. + # UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || echo "#includedir /etc/sudoers.d" >> /etc/sudoers ( umask 226 && echo "stack ALL=(ALL) NOPASSWD:ALL" \ @@ -187,7 +196,7 @@ if [[ $EUID -eq 0 ]]; then fi exit 1 else - # We're not root, make sure sudo is available + # We're not **root**, make sure ``sudo`` is available if [[ "$os_PACKAGE" = "deb" ]]; then CHECK_SUDO_CMD="dpkg -l sudo" else @@ -195,7 +204,7 @@ else fi $CHECK_SUDO_CMD || die "Sudo is required. Re-run stack.sh as root ONE TIME ONLY to set up sudo." - # UEC images /etc/sudoers does not have a '#includedir'. add one. + # UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || echo "#includedir /etc/sudoers.d" | sudo tee -a /etc/sudoers @@ -219,14 +228,14 @@ if [ ! -w $DEST ]; then sudo chown `whoami` $DEST fi -# Set True to configure ``stack.sh`` to run cleanly without Internet access. -# ``stack.sh`` must have been previously run with Internet access to install -# prerequisites and initialize ``$DEST``. +# Set ``OFFLINE`` to ``True`` to configure ``stack.sh`` to run cleanly without +# Internet access. ``stack.sh`` must have been previously run with Internet +# access to install prerequisites and fetch repositories. OFFLINE=`trueorfalse False $OFFLINE` -# Set True to configure ``stack.sh`` to exit with an error code if it is asked -# to clone any git repositories. If devstack is used in a testing environment, -# this may be used to ensure that the correct code is being tested. +# Set ``ERROR_ON_CLONE`` to ``True`` to configure ``stack.sh`` to exit if +# the destination git repository does not exist during the ``git_clone`` +# operation. ERROR_ON_CLONE=`trueorfalse False $ERROR_ON_CLONE` # Destination path for service data @@ -235,15 +244,15 @@ sudo mkdir -p $DATA_DIR sudo chown `whoami` $DATA_DIR -# Projects -# -------- +# Configure Projects +# ================== # Get project function libraries source $TOP_DIR/lib/cinder source $TOP_DIR/lib/ceilometer source $TOP_DIR/lib/heat -# Set the destination directories for openstack projects +# Set the destination directories for OpenStack projects NOVA_DIR=$DEST/nova HORIZON_DIR=$DEST/horizon GLANCE_DIR=$DEST/glance @@ -273,17 +282,19 @@ Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone} # Use namespace or not Q_USE_NAMESPACE=${Q_USE_NAMESPACE:-True} -# Name of the lvm volume group to use/create for iscsi volumes +# Name of the LVM volume group to use/create for iscsi volumes VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-} -# Nova supports pluggable schedulers. ``FilterScheduler`` should work in most -# cases. +# Nova supports pluggable schedulers. The default ``FilterScheduler`` +# should work in most cases. SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler} # Set fixed and floating range here so we can make sure not to use addresses -# from either range when attempting to guess the ip to use for the host +# from either range when attempting to guess the IP to use for the host. +# Note that setting FIXED_RANGE may be necessary when running DevStack +# in an OpenStack cloud that uses eith of these address ranges internally. FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24} FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.224/28} @@ -294,10 +305,12 @@ if [ -z "$HOST_IP" -o "$HOST_IP" == "dhcp" ]; then HOST_IP="" HOST_IPS=`LC_ALL=C ip -f inet addr show ${HOST_IP_IFACE} | awk '/inet/ {split($2,parts,"/"); print parts[1]}'` for IP in $HOST_IPS; do - # Attempt to filter out ip addresses that are part of the fixed and - # floating range. Note that this method only works if the 'netaddr' + # Attempt to filter out IP addresses that are part of the fixed and + # floating range. Note that this method only works if the ``netaddr`` # python library is installed. If it is not installed, an error - # will be printed and the first ip from the interface will be used. + # will be printed and the first IP from the interface will be used. + # If that is not correct set ``HOST_IP`` in ``localrc`` to the correct + # address. if ! (address_in_net $IP $FIXED_RANGE || address_in_net $IP $FLOATING_RANGE); then HOST_IP=$IP break; @@ -318,7 +331,7 @@ SYSLOG=`trueorfalse False $SYSLOG` SYSLOG_HOST=${SYSLOG_HOST:-$HOST_IP} SYSLOG_PORT=${SYSLOG_PORT:-516} -# Use color for logging output +# Use color for logging output (only available if syslog is not used) LOG_COLOR=`trueorfalse True $LOG_COLOR` # Service startup timeout @@ -374,7 +387,7 @@ function read_password { if [ "$VIRT_DRIVER" = 'xenserver' ]; then PUBLIC_INTERFACE_DEFAULT=eth3 - # allow build_domU.sh to specify the flat network bridge via kernel args + # Allow ``build_domU.sh`` to specify the flat network bridge via kernel args FLAT_NETWORK_BRIDGE_DEFAULT=$(grep -o 'flat_network_bridge=[[:alnum:]]*' /proc/cmdline | cut -d= -f 2 | sort -u) GUEST_INTERFACE_DEFAULT=eth1 else @@ -396,19 +409,19 @@ VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT} TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test} TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29} -# **MULTI_HOST** is a mode where each compute node runs its own network node. This +# ``MULTI_HOST`` is a mode where each compute node runs its own network node. This # allows network operations and routing for a VM to occur on the server that is # running the VM - removing a SPOF and bandwidth bottleneck. MULTI_HOST=`trueorfalse False $MULTI_HOST` -# If you are using FlatDHCP on multiple hosts, set the ``FLAT_INTERFACE`` -# variable but make sure that the interface doesn't already have an -# ip or you risk breaking things. +# If you are using the FlatDHCP network mode on multiple hosts, set the +# ``FLAT_INTERFACE`` variable but make sure that the interface doesn't already +# have an IP or you risk breaking things. # # **DHCP Warning**: If your flat interface device uses DHCP, there will be a # hiccup while the network is moved from the flat interface to the flat network # bridge. This will happen when you launch your first instance. Upon launch -# you will lose all connectivity to the node, and the vm launch will probably +# you will lose all connectivity to the node, and the VM launch will probably # fail. # # If you are running on a single node and don't need to access the VMs from @@ -431,6 +444,7 @@ FLAT_INTERFACE=${FLAT_INTERFACE:-$GUEST_INTERFACE_DEFAULT} # # With Quantum networking the NET_MAN variable is ignored. + # MySQL & (RabbitMQ or Qpid) # -------------------------- @@ -446,7 +460,7 @@ MYSQL_HOST=${MYSQL_HOST:-localhost} MYSQL_USER=${MYSQL_USER:-root} read_password MYSQL_PASSWORD "ENTER A PASSWORD TO USE FOR MYSQL." -# NOTE: Don't specify /db in this string so we can use it for multiple services +# NOTE: Don't specify ``/db`` in this string so we can use it for multiple services BASE_SQL_CONN=${BASE_SQL_CONN:-mysql://$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST} # Rabbit connection info @@ -455,6 +469,10 @@ if is_service_enabled rabbit; then read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT." fi + +# Glance +# ------ + # Glance connection info. Note the port must be specified. GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$SERVICE_HOST:9292} @@ -464,19 +482,17 @@ GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$SERVICE_HOST:9292} # TODO: add logging to different location. -# By default the location of swift drives and objects is located inside -# the swift source directory. SWIFT_DATA_DIR variable allow you to redefine -# this. +# Set ``SWIFT_DATA_DIR`` to the location of swift drives and objects. +# Default is the common DevStack data directory. SWIFT_DATA_DIR=${SWIFT_DATA_DIR:-${DEST}/data/swift} -# We are going to have the configuration files inside the source -# directory, change SWIFT_CONFIG_DIR if you want to adjust that. +# Set ``SWIFT_CONFIG_DIR`` to the location of the configuration files. +# Default is ``/etc/swift``. SWIFT_CONFIG_DIR=${SWIFT_CONFIG_DIR:-/etc/swift} # DevStack will create a loop-back disk formatted as XFS to store the -# swift data. By default the disk size is 1 gigabyte. The variable -# SWIFT_LOOPBACK_DISK_SIZE specified in bytes allow you to change -# that. +# swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in bytes. +# Default is 1 gigabyte. SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000} # The ring uses a configurable number of bits from a path’s MD5 hash as @@ -489,7 +505,7 @@ SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000} # By default we define 9 for the partition count (which mean 512). SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9} -# This variable allows you to configure how many replicas you want to be +# Set ``SWIFT_REPLICAS`` to configure how many replicas are to be # configured for your Swift cluster. By default the three replicas would need a # bit of IO and Memory on a VM you may want to lower that to 1 if you want to do # only some quick testing. @@ -514,8 +530,8 @@ S3_SERVICE_PORT=${S3_SERVICE_PORT:-3333} # Keystone # -------- -# Service Token - Openstack components need to have an admin token -# to validate user tokens. +# The ``SERVICE_TOKEN`` is used to bootstrap the Keystone database. It is +# just a string and is not a 'real' Keystone token. read_password SERVICE_TOKEN "ENTER A SERVICE_TOKEN TO USE FOR THE SERVICE ADMIN TOKEN." # Services authenticate to Identity with servicename/SERVICE_PASSWORD read_password SERVICE_PASSWORD "ENTER A SERVICE_PASSWORD TO USE FOR THE SERVICE AUTHENTICATION." @@ -547,10 +563,10 @@ APACHE_GROUP=${APACHE_GROUP:-$APACHE_USER} # Log files # --------- -# Set up logging for stack.sh -# Set LOGFILE to turn on logging -# We append '.xxxxxxxx' to the given name to maintain history -# where xxxxxxxx is a representation of the date the file was created +# Set up logging for ``stack.sh`` +# Set ``LOGFILE`` to turn on logging +# Append '.xxxxxxxx' to the given name to maintain history +# where 'xxxxxxxx' is a representation of the date the file was created if [[ -n "$LOGFILE" || -n "$SCREEN_LOGDIR" ]]; then LOGDAYS=${LOGDAYS:-7} TIMESTAMP_FORMAT=${TIMESTAMP_FORMAT:-"%F-%H%M%S"} @@ -558,7 +574,7 @@ if [[ -n "$LOGFILE" || -n "$SCREEN_LOGDIR" ]]; then fi if [[ -n "$LOGFILE" ]]; then - # First clean up old log files. Use the user-specified LOGFILE + # First clean up old log files. Use the user-specified ``LOGFILE`` # as the template to search for, appending '.*' to match the date # we added on earlier runs. LOGDIR=$(dirname "$LOGFILE") @@ -575,11 +591,11 @@ if [[ -n "$LOGFILE" ]]; then fi # Set up logging of screen windows -# Set SCREEN_LOGDIR to turn on logging of screen windows to the -# directory specified in SCREEN_LOGDIR, we will log to the the file -# screen-$SERVICE_NAME-$TIMESTAMP.log in that dir and have a link -# screen-$SERVICE_NAME.log to the latest log file. -# Logs are kept for as long specified in LOGDAYS. +# Set ``SCREEN_LOGDIR`` to turn on logging of screen windows to the +# directory specified in ``SCREEN_LOGDIR``, we will log to the the file +# ``screen-$SERVICE_NAME-$TIMESTAMP.log`` in that dir and have a link +# ``screen-$SERVICE_NAME.log`` to the latest log file. +# Logs are kept for as long specified in ``LOGDAYS``. if [[ -n "$SCREEN_LOGDIR" ]]; then # We make sure the directory is created. @@ -591,8 +607,11 @@ if [[ -n "$SCREEN_LOGDIR" ]]; then fi fi -# So that errors don't compound we exit on any errors so you see only the -# first error that occurred. + +# Set Up Script Execution +# ----------------------- + +# Exit on any errors so that errors don't compound trap failed ERR failed() { local r=$? @@ -609,7 +628,7 @@ set -o xtrace # Install Packages # ================ -# Openstack uses a fair number of other projects. +# OpenStack uses a fair number of other projects. # Install package requirements if [[ "$os_PACKAGE" = "deb" ]]; then @@ -650,7 +669,7 @@ mysql-server-5.1 mysql-server/start_on_boot boolean true MYSQL_PRESEED fi - # while ``.my.cnf`` is not needed for openstack to function, it is useful + # while ``.my.cnf`` is not needed for OpenStack to function, it is useful # as it allows you to access the mysql databases via ``mysql nova`` instead # of having to specify the username/password each time. if [[ ! -e $HOME/.my.cnf ]]; then @@ -702,8 +721,6 @@ fi if is_service_enabled n-cpu; then - # Virtualization Configuration - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if [[ "$os_PACKAGE" = "deb" ]]; then LIBVIRT_PKG_NAME=libvirt-bin else @@ -746,7 +763,10 @@ fi # Install python requirements pip_install $(get_packages $FILES/pips | sort -u) -# Check out OpenStack sources + +# Check Out Source +# ---------------- + git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH # Check out the client libs that are used most @@ -799,6 +819,7 @@ if is_service_enabled ceilometer; then install_ceilometer fi + # Initialization # ============== @@ -822,6 +843,7 @@ if is_service_enabled g-api n-api; then fi # Do this _after_ glance is installed to override the old binary +# TODO(dtroyer): figure out when this is no longer necessary setup_develop $GLANCECLIENT_DIR setup_develop $NOVA_DIR @@ -848,6 +870,7 @@ if [[ $TRACK_DEPENDS = True ]] ; then exit 0 fi + # Syslog # ------ @@ -889,10 +912,9 @@ fi # Mysql # ----- - if is_service_enabled mysql; then - #start mysql-server + # Start mysql-server if [[ "$os_PACKAGE" = "rpm" ]]; then # RPM doesn't start the service start_service mysqld @@ -1015,7 +1037,8 @@ if is_service_enabled horizon; then APACHE_CONF=conf.d/horizon.conf sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf fi - ## Configure apache to run horizon + + # Configure apache to run horizon sudo sh -c "sed -e \" s,%USER%,$APACHE_USER,g; s,%GROUP%,$APACHE_GROUP,g; @@ -1023,6 +1046,7 @@ if is_service_enabled horizon; then s,%APACHE_NAME%,$APACHE_NAME,g; s,%DEST%,$DEST,g; \" $FILES/apache-horizon.template >/etc/$APACHE_NAME/$APACHE_CONF" + restart_service $APACHE_NAME fi @@ -1106,7 +1130,7 @@ fi # ------- if is_service_enabled quantum; then - # Put config files in /etc/quantum for everyone to find + # Put config files in ``/etc/quantum`` for everyone to find if [[ ! -d /etc/quantum ]]; then sudo mkdir -p /etc/quantum fi @@ -1127,7 +1151,7 @@ if is_service_enabled quantum; then exit 1 fi - # If needed, move config file from $QUANTUM_DIR/etc/quantum to /etc/quantum + # If needed, move config file from ``$QUANTUM_DIR/etc/quantum`` to ``/etc/quantum`` mkdir -p /$Q_PLUGIN_CONF_PATH Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME cp $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE @@ -1248,10 +1272,11 @@ screen_it q-agt "sudo python $AGENT_BINARY --config-file $Q_CONF_FILE --config-f # Start up the quantum agent screen_it q-dhcp "sudo python $AGENT_DHCP_BINARY --config-file $Q_CONF_FILE --config-file=$Q_DHCP_CONF_FILE" + # Nova # ---- -# Put config files in /etc/nova for everyone to find +# Put config files in ``/etc/nova`` for everyone to find NOVA_CONF_DIR=/etc/nova if [[ ! -d $NOVA_CONF_DIR ]]; then sudo mkdir -p $NOVA_CONF_DIR @@ -1261,7 +1286,7 @@ sudo chown `whoami` $NOVA_CONF_DIR cp -p $NOVA_DIR/etc/nova/policy.json $NOVA_CONF_DIR # If Nova ships the new rootwrap filters files, deploy them -# (owned by root) and add a parameter to $NOVA_ROOTWRAP +# (owned by root) and add a parameter to ``$NOVA_ROOTWRAP`` ROOTWRAP_SUDOER_CMD="$NOVA_ROOTWRAP" if [[ -d $NOVA_DIR/etc/nova/rootwrap.d ]]; then # Wipe any existing rootwrap.d files first @@ -1334,7 +1359,7 @@ if is_service_enabled n-cpu; then # Force IP forwarding on, just on case sudo sysctl -w net.ipv4.ip_forward=1 - # attempt to load modules: network block device - used to manage qcow images + # Attempt to load modules: network block device - used to manage qcow images sudo modprobe nbd || true # Check for kvm (hardware based virtualization). If unable to initialize @@ -1398,9 +1423,11 @@ ResultActive=yes EOF' LIBVIRT_DAEMON=libvirtd fi - # The user that nova runs as needs to be member of libvirtd group otherwise + + # The user that nova runs as needs to be member of **libvirtd** group otherwise # nova-compute will be unable to use libvirt. sudo usermod -a -G libvirtd `whoami` + # libvirt detects various settings on startup, as we potentially changed # the system configuration (modules, filesystems), we need to restart # libvirt to detect those changes. @@ -1458,17 +1485,17 @@ fi if is_service_enabled swift; then - # We make sure to kill all swift processes first + # Make sure to kill all swift processes first swift-init all stop || true - # We first do a bit of setup by creating the directories and + # First do a bit of setup by creating the directories and # changing the permissions so we can run it as our user. USER_GROUP=$(id -g) sudo mkdir -p ${SWIFT_DATA_DIR}/drives sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_DIR} - # We then create a loopback disk and format it to XFS. + # Create a loopback disk and format it to XFS. if [[ -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 @@ -1481,24 +1508,22 @@ if is_service_enabled swift; then dd if=/dev/zero of=${SWIFT_DATA_DIR}/drives/images/swift.img \ bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE} fi + # Make a fresh XFS filesystem mkfs.xfs -f -i size=1024 ${SWIFT_DATA_DIR}/drives/images/swift.img - # After the drive being created we mount the disk with a few mount - # options to make it most efficient as possible for swift. + # Mount the disk with mount options to make it as efficient as possible mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1 if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 \ ${SWIFT_DATA_DIR}/drives/images/swift.img ${SWIFT_DATA_DIR}/drives/sdb1 fi - # We then create link to that mounted location so swift would know - # where to go. + # Create a link to the above mount for x in $(seq ${SWIFT_REPLICAS}); do sudo ln -sf ${SWIFT_DATA_DIR}/drives/sdb1/$x ${SWIFT_DATA_DIR}/$x; done - # We now have to emulate a few different servers into one we - # create all the directories needed for swift + # Create all of the directories needed to emulate a few different servers for x in $(seq ${SWIFT_REPLICAS}); do drive=${SWIFT_DATA_DIR}/drives/sdb1/${x} node=${SWIFT_DATA_DIR}/${x}/node @@ -1514,7 +1539,7 @@ if is_service_enabled swift; then sudo chown -R $USER: ${SWIFT_CONFIG_DIR} /var/run/swift if [[ "$SWIFT_CONFIG_DIR" != "/etc/swift" ]]; then - # Some swift tools are hard-coded to use /etc/swift and are apparenty not going to be fixed. + # Some swift tools are hard-coded to use ``/etc/swift`` and are apparenty not going to be fixed. # Create a symlink if the config dir is moved sudo ln -sf ${SWIFT_CONFIG_DIR} /etc/swift fi @@ -1605,9 +1630,8 @@ EOF cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONFIG_DIR}/swift.conf iniset ${SWIFT_CONFIG_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH} - # We need to generate a object/account/proxy configuration - # emulating 4 nodes on different ports we have a little function - # that help us doing that. + # This function generates an object/account/proxy configuration + # emulating 4 nodes on different ports function generate_swift_configuration() { local server_type=$1 local bind_port=$2 @@ -1650,8 +1674,8 @@ EOF generate_swift_configuration container 6011 2 generate_swift_configuration account 6012 2 - # We have some specific configuration for swift for rsyslog. See - # the file /etc/rsyslog.d/10-swift.conf for more info. + # Specific configuration for swift for rsyslog. See + # ``/etc/rsyslog.d/10-swift.conf`` for more info. swift_log_dir=${SWIFT_DATA_DIR}/logs rm -rf ${swift_log_dir} mkdir -p ${swift_log_dir}/hourly @@ -1692,7 +1716,7 @@ EOF } && popd >/dev/null - # We then can start rsync. + # Start rsync if [[ "$os_PACKAGE" = "deb" ]]; then sudo /etc/init.d/rsync restart || : else @@ -1745,7 +1769,7 @@ elif is_service_enabled n-vol; then sudo tgtadm --op show --mode target | grep $VOLUME_NAME_PREFIX | grep Target | cut -f3 -d ' ' | sudo xargs -n1 tgt-admin --delete || true # Clean out existing volumes for lv in `sudo lvs --noheadings -o lv_name $VOLUME_GROUP`; do - # VOLUME_NAME_PREFIX prefixes the LVs we want + # ``VOLUME_NAME_PREFIX`` prefixes the LVs we want if [[ "${lv#$VOLUME_NAME_PREFIX}" != "$lv" ]]; then sudo lvremove -f $VOLUME_GROUP/$lv fi @@ -1781,10 +1805,10 @@ function add_nova_opt { echo "$1" >> $NOVA_CONF_DIR/$NOVA_CONF } -# Remove legacy nova.conf +# Remove legacy ``nova.conf`` rm -f $NOVA_DIR/bin/nova.conf -# (re)create nova.conf +# (Re)create ``nova.conf`` rm -f $NOVA_CONF_DIR/$NOVA_CONF add_nova_opt "[DEFAULT]" add_nova_opt "verbose=True" @@ -1894,13 +1918,13 @@ if is_service_enabled cinder; then add_nova_opt "volume_api_class=nova.volume.cinder.API" fi -# Provide some transition from EXTRA_FLAGS to EXTRA_OPTS +# Provide some transition from ``EXTRA_FLAGS`` to ``EXTRA_OPTS`` if [[ -z "$EXTRA_OPTS" && -n "$EXTRA_FLAGS" ]]; then EXTRA_OPTS=$EXTRA_FLAGS fi -# You can define extra nova conf flags by defining the array EXTRA_OPTS, -# For Example: EXTRA_OPTS=(foo=true bar=2) +# Define extra nova conf flags by defining the array ``EXTRA_OPTS``. +# For Example: ``EXTRA_OPTS=(foo=true bar=2)`` for I in "${EXTRA_OPTS[@]}"; do # Attempt to convert flags to options add_nova_opt ${I//--} @@ -1937,42 +1961,46 @@ fi # Nova Database -# ~~~~~~~~~~~~~ +# ------------- # All nova components talk to a central database. We will need to do this step # only once for an entire cluster. if is_service_enabled mysql && is_service_enabled nova; then - # (re)create nova database + # (Re)create nova database mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS nova;' + # Explicitly use latin1: to avoid lp#829209, nova expects the database to # use latin1 by default, and then upgrades the database to utf8 (see the # 082_essex.py in nova) mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE nova CHARACTER SET latin1;' - # (re)create nova database + # (Re)create nova database $NOVA_BIN_DIR/nova-manage db sync fi + # Heat -# ------ +# ---- + if is_service_enabled heat; then init_heat fi + # Launch Services # =============== -# nova api crashes if we start it with a regular screen command, +# Nova api crashes if we start it with a regular screen command, # so send the start command by forcing text into the window. # Only run the services specified in ``ENABLED_SERVICES`` -# launch the glance registry service +# Launch the glance registry service if is_service_enabled g-reg; then screen_it g-reg "cd $GLANCE_DIR; bin/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf" fi -# launch the glance api and wait for it to answer before continuing +# Launch the glance api and wait for it to answer before continuing if is_service_enabled g-api; then screen_it g-api "cd $GLANCE_DIR; bin/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf" echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..." @@ -1983,7 +2011,7 @@ if is_service_enabled g-api; then fi if is_service_enabled key; then - # (re)create keystone database + # (Re)create keystone database mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS keystone;' mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE keystone CHARACTER SET utf8;' @@ -2001,7 +2029,7 @@ if is_service_enabled key; then cp -p $KEYSTONE_DIR/etc/policy.json $KEYSTONE_CONF_DIR fi - # Rewrite stock keystone.conf: + # Rewrite stock ``keystone.conf`` iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN" iniset $KEYSTONE_CONF sql connection "$BASE_SQL_CONN/keystone?charset=utf8" iniset $KEYSTONE_CONF ec2 driver "keystone.contrib.ec2.backends.sql.Ec2" @@ -2012,12 +2040,13 @@ if is_service_enabled key; then iniset $KEYSTONE_CONF filter:s3_extension paste.filter_factory "keystone.contrib.s3:S3Extension.factory" if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then - # Configure keystone.conf to use sql + # Configure ``keystone.conf`` to use sql iniset $KEYSTONE_CONF catalog driver keystone.catalog.backends.sql.Catalog inicomment $KEYSTONE_CONF catalog template_file else KEYSTONE_CATALOG=$KEYSTONE_CONF_DIR/default_catalog.templates cp -p $FILES/default_catalog.templates $KEYSTONE_CATALOG + # Add swift endpoints to service catalog if swift is enabled if is_service_enabled swift; then echo "catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG @@ -2039,7 +2068,7 @@ if is_service_enabled key; then s,%S3_SERVICE_PORT%,$S3_SERVICE_PORT,g; " -i $KEYSTONE_CATALOG - # Configure keystone.conf to use templates + # Configure ``keystone.conf`` to use templates iniset $KEYSTONE_CONF catalog driver "keystone.catalog.backends.templated.TemplatedCatalog" iniset $KEYSTONE_CONF catalog template_file "$KEYSTONE_CATALOG" fi @@ -2056,10 +2085,11 @@ if is_service_enabled key; then # Initialize keystone database $KEYSTONE_DIR/bin/keystone-manage db_sync - # set up certificates + + # Set up certificates $KEYSTONE_DIR/bin/keystone-manage pki_setup - # launch keystone and wait for it to answer before continuing + # Launch keystone and wait for it to answer before continuing screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug" echo "Waiting for keystone to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/ >/dev/null; do sleep 1; done"; then @@ -2067,7 +2097,7 @@ if is_service_enabled key; then exit 1 fi - # keystone_data.sh creates services, admin and demo users, and roles. + # ``keystone_data.sh`` creates services, admin and demo users, and roles. SERVICE_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0 ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \ @@ -2113,7 +2143,7 @@ if is_service_enabled q-svc; then # Create a small network # Since quantum command is executed in admin context at this point, - # --tenant_id needs to be specified. + # ``--tenant_id`` needs to be specified. NET_ID=$(quantum net-create --tenant_id $TENANT_ID net1 | grep ' id ' | get_field 2) quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE elif is_service_enabled mysql && is_service_enabled nova; then @@ -2127,12 +2157,9 @@ elif is_service_enabled mysql && is_service_enabled nova; then $NOVA_BIN_DIR/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL fi -# Launching nova-compute should be as simple as running ``nova-compute`` but -# have to do a little more than that in our script. Since we add the group -# ``libvirtd`` to our user in this script, when nova-compute is run it is -# within the context of our original shell (so our groups won't be updated). -# Use 'sg' to execute nova-compute as a member of the libvirtd group. -# We don't check for is_service_enable as screen_it does it for us +# The group **libvirtd** is added to the current user in this script. +# Use 'sg' to execute nova-compute as a member of the **libvirtd** group. +# ``screen_it`` checks ``is_service_enabled``, it is not needed here screen_it n-cpu "cd $NOVA_DIR && sg libvirtd $NOVA_BIN_DIR/nova-compute" screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert" screen_it n-vol "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-volume" @@ -2161,18 +2188,17 @@ if is_service_enabled heat; then start_heat fi + # Install Images # ============== # Upload an image to glance. # -# The default image is cirros, a small testing image, which lets you login as root -# +# The default image is cirros, a small testing image which lets you login as **root** # cirros also uses ``cloud-init``, supporting login via keypair and sending scripts as # userdata. See https://help.ubuntu.com/community/CloudInit for more on cloud-init # -# Override ``IMAGE_URLS`` with a comma-separated list of uec images. -# +# Override ``IMAGE_URLS`` with a comma-separated list of UEC images. # * **oneiric**: http://uec-images.ubuntu.com/oneiric/current/oneiric-server-cloudimg-amd64.tar.gz # * **precise**: http://uec-images.ubuntu.com/precise/current/precise-server-cloudimg-amd64.tar.gz @@ -2207,7 +2233,7 @@ set +o xtrace # Using the cloud -# =============== +# --------------- echo "" echo "" @@ -2227,7 +2253,7 @@ if is_service_enabled key; then echo "The password: $ADMIN_PASSWORD" fi -# Echo HOST_IP - useful for build_uec.sh, which uses dhcp to give the instance an address +# Echo ``HOST_IP`` - useful for ``build_uec.sh``, which uses dhcp to give the instance an address echo "This is your host ip: $HOST_IP" # Warn that ``EXTRA_FLAGS`` needs to be converted to ``EXTRA_OPTS`` @@ -2235,5 +2261,5 @@ if [[ -n "$EXTRA_FLAGS" ]]; then echo "WARNING: EXTRA_FLAGS is defined and may need to be converted to EXTRA_OPTS" fi -# Indicate how long this took to run (bash maintained variable 'SECONDS') +# Indicate how long this took to run (bash maintained variable ``SECONDS``) echo "stack.sh completed in $SECONDS seconds." diff --git a/stackrc b/stackrc index d8d10086..3002c463 100644 --- a/stackrc +++ b/stackrc @@ -1,3 +1,5 @@ +# stackrc +# # Find the other rc files RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd) @@ -5,21 +7,22 @@ RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd) DEST=/opt/stack # Specify which services to launch. These generally correspond to -# screen tabs. If you like to add other services that are not enabled -# by default you can append them in your ENABLED_SERVICES variable in -# your localrc. For example for swift you can just add this in your -# localrc to add it with the other services: -# ENABLED_SERVICES+=,swift +# screen tabs. To change the default list, use the ``enable_service`` and +# ``disable_service`` functions in ``localrc``. +# For example, to enable Swift add this to ``localrc``: +# enable_service swift # -# If you like to explicitly remove services you can add a -$service in -# ENABLED_SERVICES, for example in your localrc to install all defaults but not -# cinder you would just need to set this : -# ENABLED_SERVICES+=,-cinder +# And to disable Cinder and use Nova Volumes instead: +# disable_service c-api c-sch c-vol cinder +# enable_service n-vol ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,mysql,rabbit # Set the default Nova APIs to enable NOVA_ENABLED_APIS=ec2,osapi_compute,osapi_volume,metadata +# Repositories +# ------------ + # Base GIT Repo URL # Another option is http://review.openstack.org/p GIT_BASE=https://github.com @@ -46,7 +49,6 @@ SWIFT_BRANCH=master SWIFT3_REPO=https://github.com/fujita/swift3.git SWIFT3_BRANCH=master - # python swift client library SWIFTCLIENT_REPO=${GIT_BASE}/openstack/python-swiftclient SWIFTCLIENT_BRANCH=master @@ -75,7 +77,7 @@ HORIZON_BRANCH=master NOVACLIENT_REPO=${GIT_BASE}/openstack/python-novaclient.git NOVACLIENT_BRANCH=master -# Shared openstack python client library +# consolidated openstack python client OPENSTACKCLIENT_REPO=${GIT_BASE}/openstack/python-openstackclient.git OPENSTACKCLIENT_BRANCH=master @@ -110,7 +112,7 @@ if [ -f $RC_DIR/localrc ]; then source $RC_DIR/localrc fi -# Specify a comma-separated list of uec images to download and install into glance. +# Specify a comma-separated list of UEC images to download and install into glance. # supported urls here are: # * "uec-style" images: # If the file ends in .tar.gz, uncompress the tarball and and select the first @@ -123,13 +125,17 @@ fi # example: # http://cloud-images.ubuntu.com/releases/oneiric/release/ubuntu-11.10-server-cloudimg-armel-disk1.img # http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-rootfs.img.gz +# * OpenVZ image: +# OpenVZ uses its own format of image, and does not support UEC style images + #IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image #IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img" # cirros full disk image -# -# Set default image based on LIBVIRT_TYPE or VIRT_DRIVER, which may be set in localrc -# but allow DEFAULT_IMAGE_NAME and IMAGE_URLS to be set directly in localrc, too. + +# Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of +# which may be set in ``localrc``. Also allow ``DEFAULT_IMAGE_NAME`` and +# ``IMAGE_URLS`` to be set directly in ``localrc``. case "$VIRT_DRIVER" in - openvz) # OpenVZ uses its own format of image, and does not support uec style images + openvz) DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-11.10-x86_64} IMAGE_URLS=${IMAGE_URLS:-"http://download.openvz.org/template/precreated/ubuntu-11.10-x86_64.tar.gz"};; libvirt) diff --git a/unstack.sh b/unstack.sh index 17752a8b..e73cc2dc 100755 --- a/unstack.sh +++ b/unstack.sh @@ -6,7 +6,7 @@ # mysql and rabbit are left running as OpenStack code refreshes # do not require them to be restarted. # -# Stop all processes by setting UNSTACK_ALL or specifying ``--all`` +# Stop all processes by setting ``UNSTACK_ALL`` or specifying ``--all`` # on the command line # Keep track of the current devstack directory. From b26a27aef06d12d423a70eec078103aa53216fc5 Mon Sep 17 00:00:00 2001 From: Bob Kukura Date: Mon, 27 Aug 2012 01:53:11 -0400 Subject: [PATCH 647/967] Update stack.sh for Quantum openvswitch plugin changes With support for multiple physical networks being added to the Quantum openvswitch plugin via https://review.openstack.org/#/c/11388/, the configuration needed to enable either tunneling or VLANs has changed. See http://wiki.openstack.org/ConfigureOpenvswitch for configuration and usage details. Change-Id: I82ca587e097a0f9612af46f2f89a19ac27c73432 --- stack.sh | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index 665a3663..6f68646a 100755 --- a/stack.sh +++ b/stack.sh @@ -1135,14 +1135,19 @@ if is_service_enabled quantum; then sudo sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/$Q_DB_NAME?charset=utf8/g" /$Q_PLUGIN_CONF_FILE OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-True} - if [[ "$Q_PLUGIN" = "openvswitch" && $OVS_ENABLE_TUNNELING = "True" ]]; then + if [[ "$Q_PLUGIN" = "openvswitch" && "$OVS_ENABLE_TUNNELING" = "True" ]]; then OVS_VERSION=`ovs-vsctl --version | head -n 1 | awk '{print $4;}'` if [ $OVS_VERSION \< "1.4" ] && ! is_service_enabled q-svc ; then echo "You are running OVS version $OVS_VERSION." echo "OVS 1.4+ is required for tunneling between multiple hosts." exit 1 fi - sudo sed -i -e "s/.*enable_tunneling = .*$/enable_tunneling = $OVS_ENABLE_TUNNELING/g" /$Q_PLUGIN_CONF_FILE + if [[ "$OVS_DEFAULT_BRIDGE" = "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE OVS network_vlan_ranges "" + else + iniset /$Q_PLUGIN_CONF_FILE OVS network_vlan_ranges default + fi + iniset /$Q_PLUGIN_CONF_FILE OVS tunnel_id_ranges 1:1000 fi Q_CONF_FILE=/etc/quantum/quantum.conf @@ -1189,7 +1194,19 @@ if is_service_enabled q-agt; then sudo ovs-vsctl --no-wait -- --if-exists del-br $OVS_BRIDGE sudo ovs-vsctl --no-wait add-br $OVS_BRIDGE sudo ovs-vsctl --no-wait br-set-external-id $OVS_BRIDGE bridge-id br-int - sudo sed -i -e "s/.*local_ip = .*/local_ip = $HOST_IP/g" /$Q_PLUGIN_CONF_FILE + if [[ "$OVS_ENABLE_TUNNELING" == "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE OVS local_ip $HOST_IP + else + # Need bridge if not tunneling + OVS_DEFAULT_BRIDGE=${OVS_DEFAULT_BRIDGE:-br-$GUEST_INTERFACE_DEFAULT} + fi + if [[ "$OVS_DEFAULT_BRIDGE" = "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings "" + else + # Configure bridge manually with physical interface as port for multi-node + sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_DEFAULT_BRIDGE + iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings default:$OVS_DEFAULT_BRIDGE + fi AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py" elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then # Start up the quantum <-> linuxbridge agent From 05f23656d016f99c56b1a9c193c5715d6d614529 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 29 Aug 2012 15:20:21 -0500 Subject: [PATCH 648/967] Add lib/template Add a template for the lib/* sub-scripts and a description to HACKING. Change-Id: Ia490de8e565982c517525e09d8941a847ba530aa --- HACKING.rst | 20 ++++++++++++-- lib/template | 77 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 94 insertions(+), 3 deletions(-) create mode 100644 lib/template diff --git a/HACKING.rst b/HACKING.rst index 7262cff6..e8f90c78 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -53,9 +53,23 @@ configuration of the user environment:: source $TOP_DIR/openrc ``stack.sh`` is a rather large monolithic script that flows through from beginning -to end. There is a proposal to segment it to put the OpenStack projects -into their own sub-scripts to better document the projects as a unit rather than -have it scattered throughout ``stack.sh``. Someday. +to end. The process of breaking it down into project-level sub-scripts has begun +with the introduction of ``lib/cinder`` and ``lib/ceilometer``. + +These library sub-scripts have a number of fixed entry points, some of which may +just be stubs. These entry points will be called by ``stack.sh`` in the +following order:: + + install_XXXX + configure_XXXX + init_XXXX + start_XXXX + stop_XXXX + cleanup_XXXX + +There is a sub-script template in ``lib/templates`` to be used in creating new +service sub-scripts. The comments in ``<>`` are meta comments describing +how to use the template and should be removed. Documentation diff --git a/lib/template b/lib/template new file mode 100644 index 00000000..d70f2189 --- /dev/null +++ b/lib/template @@ -0,0 +1,77 @@ +# lib/template +# Functions to control the configuration and operation of the XXXX service +# + +# Dependencies: +# ``functions`` file +# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined +# + +# ``stack.sh`` calls the entry points in this order: +# +# install_XXXX +# configure_XXXX +# init_XXXX +# start_XXXX +# stop_XXXX +# cleanup_XXXX + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following along as the install occurs. +set -o xtrace + + +# Defaults +# -------- + +# + +# Set up default directories +XXXX_DIR=$DEST/XXXX +XXX_CONF_DIR=/etc/XXXX + + +# Entry Points +# ------------ + +# cleanup_XXXX() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_XXXX() { + # kill instances (nova) + # delete image files (glance) + # This function intentionally left blank + : +} + +# configure_XXXX() - Set config files, create data dirs, etc +function configure_XXXX() { + # sudo python setup.py deploy + # iniset $XXXX_CONF ... + # This function intentionally left blank + : +} + +# init_XXXX() - Initialize databases, etc. +function init_XXXX() { + # clean up from previous (possibly aborted) runs + # create required data files + : +} + +# install_XXXX() - Collect source and prepare +function install_XXXX() { + # git clone xxx + : +} + +# start_XXXX() - Start running processes, including screen +function start_XXXX() + # screen_it XXXX "cd $XXXX_DIR && $XXXX_DIR/bin/XXXX-bin" + : +} + +# stop_XXXX() - Stop running processes (non-screen) +function stop_XXXX() { + # FIXME(dtroyer): stop only our screen screen window? + : +} From 0fe0606a8fb6bfd01fbdbc0f81bb7f481524faac Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Wed, 29 Aug 2012 13:30:03 -0700 Subject: [PATCH 649/967] Set glance authtoken in config, not paste Glance can use the keystone_authtoken config section in glance-api-conf and glance-registry.conf instead of having to write into paste files. Change-Id: Iaf372f3a01558b8305b5c5f5f64113a3674e9782 --- stack.sh | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/stack.sh b/stack.sh index 665a3663..c2f384d3 100755 --- a/stack.sh +++ b/stack.sh @@ -1055,16 +1055,13 @@ if is_service_enabled g-reg; then iniset $GLANCE_REGISTRY_CONF DEFAULT sql_connection $BASE_SQL_CONN/glance?charset=utf8 iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone - - GLANCE_REGISTRY_PASTE_INI=$GLANCE_CONF_DIR/glance-registry-paste.ini - cp $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI - iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST - iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT - iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ - iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken admin_user glance - iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD + iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ + iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_user glance + iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_password $SERVICE_PASSWORD GLANCE_API_CONF=$GLANCE_CONF_DIR/glance-api.conf cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF @@ -1074,6 +1071,13 @@ if is_service_enabled g-reg; then iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_API_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/ iniset $GLANCE_API_CONF paste_deploy flavor keystone + iniset $GLANCE_API_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $GLANCE_API_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $GLANCE_API_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $GLANCE_API_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ + iniset $GLANCE_API_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $GLANCE_API_CONF keystone_authtoken admin_user glance + iniset $GLANCE_API_CONF keystone_authtoken admin_password $SERVICE_PASSWORD # Store the images in swift if enabled. if is_service_enabled swift; then @@ -1084,15 +1088,11 @@ if is_service_enabled g-reg; then iniset $GLANCE_API_CONF DEFAULT swift_store_create_container_on_put True fi + GLANCE_REGISTRY_PASTE_INI=$GLANCE_CONF_DIR/glance-registry-paste.ini + cp $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI + GLANCE_API_PASTE_INI=$GLANCE_CONF_DIR/glance-api-paste.ini cp $GLANCE_DIR/etc/glance-api-paste.ini $GLANCE_API_PASTE_INI - iniset $GLANCE_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST - iniset $GLANCE_API_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT - iniset $GLANCE_API_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset $GLANCE_API_PASTE_INI filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ - iniset $GLANCE_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $GLANCE_API_PASTE_INI filter:authtoken admin_user glance - iniset $GLANCE_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json cp $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON From ea78a6a1ed1f9b6da1716e6422a0ff676f96c9ed Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Thu, 30 Aug 2012 13:44:39 -0700 Subject: [PATCH 650/967] Configure Glance caching and cachemanagement This configures and enables Glance's image caching layer. * Set glance-api paste flavor to keystone+cachemanagement * Copy glance-cache.conf into config dir * Set appropriate values in glance-cache.conf Change-Id: I230a984a08618c554727584538ed35d72f9bfbd5 --- stack.sh | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index c2f384d3..89f8c324 100755 --- a/stack.sh +++ b/stack.sh @@ -1036,13 +1036,17 @@ if is_service_enabled g-reg; then sudo mkdir -p $GLANCE_CONF_DIR fi sudo chown `whoami` $GLANCE_CONF_DIR + GLANCE_IMAGE_DIR=$DEST/glance/images # Delete existing images rm -rf $GLANCE_IMAGE_DIR - - # Use local glance directories mkdir -p $GLANCE_IMAGE_DIR + GLANCE_CACHE_DIR=$DEST/glance/cache + # Delete existing images + rm -rf $GLANCE_CACHE_DIR + mkdir -p $GLANCE_CACHE_DIR + # (re)create glance database mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS glance;' mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE glance CHARACTER SET utf8;' @@ -1070,7 +1074,8 @@ if is_service_enabled g-reg; then iniset $GLANCE_API_CONF DEFAULT sql_connection $BASE_SQL_CONN/glance?charset=utf8 iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_API_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/ - iniset $GLANCE_API_CONF paste_deploy flavor keystone + iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ + iniset $GLANCE_API_CONF paste_deploy flavor keystone+cachemanagement iniset $GLANCE_API_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST iniset $GLANCE_API_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT iniset $GLANCE_API_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL @@ -1094,6 +1099,23 @@ if is_service_enabled g-reg; then GLANCE_API_PASTE_INI=$GLANCE_CONF_DIR/glance-api-paste.ini cp $GLANCE_DIR/etc/glance-api-paste.ini $GLANCE_API_PASTE_INI + GLANCE_CACHE_CONF=$GLANCE_CONF_DIR/glance-cache.conf + cp $GLANCE_DIR/etc/glance-cache.conf $GLANCE_CACHE_CONF + iniset $GLANCE_CACHE_CONF DEFAULT debug True + inicomment $GLANCE_CACHE_CONF DEFAULT log_file + iniset $GLANCE_CACHE_CONF DEFAULT use_syslog $SYSLOG + iniset $GLANCE_CACHE_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/ + iniset $GLANCE_CACHE_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ + iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_url + iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0 + iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_tenant_name + iniset $GLANCE_CACHE_CONF DEFAULT admin_tenant_name $SERVICE_TENANT_NAME + iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_user + iniset $GLANCE_CACHE_CONF DEFAULT admin_user glance + iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_password + iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD + + GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json cp $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON From 5bf7d5ccb297bcfe4d429be4bacce2f0b9d04687 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 17 Aug 2012 13:12:38 -0500 Subject: [PATCH 651/967] Move nova volumes to lib/n-vol The next in a line of changes to break down stack.sh and make it a bit more manageable. Part of blueprint devstack-modular Change-Id: I9f7ba23391851959412779f842934f5b26724713 --- lib/n-vol | 118 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ stack.sh | 57 +++----------------------- 2 files changed, 123 insertions(+), 52 deletions(-) create mode 100644 lib/n-vol diff --git a/lib/n-vol b/lib/n-vol new file mode 100644 index 00000000..30be0cdd --- /dev/null +++ b/lib/n-vol @@ -0,0 +1,118 @@ +# lib/n-vol +# Install and start Nova volume service + +# Dependencies: +# - functions +# - KEYSTONE_AUTH_* must be defined +# SERVICE_{TENANT_NAME|PASSWORD} must be defined + +# stack.sh +# --------- +# install_nvol +# configure_nvol +# init_nvol +# start_nvol +# stop_nvol +# cleanup_nvol + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following along as the install occurs. +set -o xtrace + + +# Defaults +# -------- + +# Name of the LVM volume group to use/create for iscsi volumes +VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} +VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} + + +# cleanup_nvol() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_nvol() { + # kill instances (nova) + # delete image files (glance) + # This function intentionally left blank + : +} + +# configure_nvol() - Set config files, create data dirs, etc +function configure_nvol() { + # sudo python setup.py deploy + # iniset $XXX_CONF ... + # This function intentionally left blank + : +} + +# init_nvol() - Initialize databases, etc. +function init_nvol() { + # Configure a default volume group called '`stack-volumes`' for the volume + # service if it does not yet exist. If you don't wish to use a file backed + # volume group, create your own volume group called ``stack-volumes`` before + # invoking ``stack.sh``. + # + # By default, the backing file is 5G in size, and is stored in ``/opt/stack/data``. + + if ! sudo vgs $VOLUME_GROUP; then + VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DATA_DIR/${VOLUME_GROUP}-backing-file} + # Only create if the file doesn't already exists + [[ -f $VOLUME_BACKING_FILE ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE + DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE` + # Only create if the loopback device doesn't contain $VOLUME_GROUP + if ! sudo vgs $VOLUME_GROUP; then sudo vgcreate $VOLUME_GROUP $DEV; fi + fi + + mkdir -p $NOVA_DIR/volumes + + if sudo vgs $VOLUME_GROUP; then + if [[ "$os_PACKAGE" = "rpm" ]]; then + # RPM doesn't start the service + start_service tgtd + fi + + # Remove nova iscsi targets + sudo tgtadm --op show --mode target | grep $VOLUME_NAME_PREFIX | grep Target | cut -f3 -d ' ' | sudo xargs -n1 tgt-admin --delete || true + # Clean out existing volumes + for lv in `sudo lvs --noheadings -o lv_name $VOLUME_GROUP`; do + # ``VOLUME_NAME_PREFIX`` prefixes the LVs we want + if [[ "${lv#$VOLUME_NAME_PREFIX}" != "$lv" ]]; then + sudo lvremove -f $VOLUME_GROUP/$lv + fi + done + fi +} + +# install_nvol() - Collect source and prepare +function install_nvol() { + # git clone xxx + # Install is handled when installing Nova + : +} + +# start_nvol() - Start running processes, including screen +function start_nvol() { + # Setup the tgt configuration file + if [[ ! -f /etc/tgt/conf.d/nova.conf ]]; then + sudo mkdir -p /etc/tgt/conf.d + echo "include $NOVA_DIR/volumes/*" | sudo tee /etc/tgt/conf.d/nova.conf + fi + + if [[ "$os_PACKAGE" = "deb" ]]; then + # tgt in oneiric doesn't restart properly if tgtd isn't running + # do it in two steps + sudo stop tgt || true + sudo start tgt + else + restart_service tgtd + fi + + screen_it n-vol "cd $NOVA_DIR && $NOVA_DIR/bin/nova-volume" +} + +# stop_nvol() - Stop running processes (non-screen) +function stop_nvol() { + # FIXME(dtroyer): stop only the n-vol screen window? + + stop_service tgt +} diff --git a/stack.sh b/stack.sh index 91380c5b..ac675e8c 100755 --- a/stack.sh +++ b/stack.sh @@ -249,6 +249,7 @@ sudo chown `whoami` $DATA_DIR # Get project function libraries source $TOP_DIR/lib/cinder +source $TOP_DIR/lib/n-vol source $TOP_DIR/lib/ceilometer source $TOP_DIR/lib/heat @@ -1757,57 +1758,7 @@ fi if is_service_enabled cinder; then init_cinder elif is_service_enabled n-vol; then - # Configure a default volume group called '`stack-volumes`' for the volume - # service if it does not yet exist. If you don't wish to use a file backed - # volume group, create your own volume group called ``stack-volumes`` before - # invoking ``stack.sh``. - # - # By default, the backing file is 5G in size, and is stored in ``/opt/stack/data``. - - if ! sudo vgs $VOLUME_GROUP; then - VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DATA_DIR/${VOLUME_GROUP}-backing-file} - # Only create if the file doesn't already exists - [[ -f $VOLUME_BACKING_FILE ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE - DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE` - # Only create if the loopback device doesn't contain $VOLUME_GROUP - if ! sudo vgs $VOLUME_GROUP; then sudo vgcreate $VOLUME_GROUP $DEV; fi - fi - - if sudo vgs $VOLUME_GROUP; then - if [[ "$os_PACKAGE" = "rpm" ]]; then - # RPM doesn't start the service - start_service tgtd - fi - - # Setup tgtd configuration files - mkdir -p $NOVA_DIR/volumes - - # Remove nova iscsi targets - sudo tgtadm --op show --mode target | grep $VOLUME_NAME_PREFIX | grep Target | cut -f3 -d ' ' | sudo xargs -n1 tgt-admin --delete || true - # Clean out existing volumes - for lv in `sudo lvs --noheadings -o lv_name $VOLUME_GROUP`; do - # ``VOLUME_NAME_PREFIX`` prefixes the LVs we want - if [[ "${lv#$VOLUME_NAME_PREFIX}" != "$lv" ]]; then - sudo lvremove -f $VOLUME_GROUP/$lv - fi - done - fi - - if [[ "$os_PACKAGE" = "deb" ]]; then - - # Setup the tgt configuration file - if [[ ! -f /etc/tgt/conf.d/nova.conf ]]; then - sudo mkdir -p /etc/tgt/conf.d - echo "include $NOVA_DIR/volumes/*" | sudo tee /etc/tgt/conf.d/nova.conf - fi - - # tgt in oneiric doesn't restart properly if tgtd isn't running - # do it in two steps - sudo stop tgt || true - sudo start tgt - else - restart_service tgtd - fi + init_nvol fi # Support entry points installation of console scripts @@ -2179,12 +2130,14 @@ fi # ``screen_it`` checks ``is_service_enabled``, it is not needed here screen_it n-cpu "cd $NOVA_DIR && sg libvirtd $NOVA_BIN_DIR/nova-compute" screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert" -screen_it n-vol "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-volume" screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network" screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler" screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF --web ." screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF" screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth" +if is_service_enabled n-vol; then + start_nvol +fi if is_service_enabled cinder; then start_cinder fi From 3bae7c2c61ba69bb0cac05560acace943c9ebc8e Mon Sep 17 00:00:00 2001 From: ewindisch Date: Wed, 18 Jan 2012 11:18:35 -0500 Subject: [PATCH 652/967] Add ZeroMQ RPC backend Now prevents more than one RPC backend selection. (ZeroMQ, Qpid, and Rabbit are mutually exclusive) Configure quantum and cinder to use ZeroMQ Adds qpid to cinder config. Change-Id: I229c4c632213a303d097d4a029e986598073665a --- README.md | 16 ++++++++++++++++ lib/cinder | 2 ++ stack.sh | 35 ++++++++++++++++++++++++++++++++--- 3 files changed, 50 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index fd66e960..872b16b8 100644 --- a/README.md +++ b/README.md @@ -57,6 +57,22 @@ If the EC2 API is your cup-o-tea, you can create credentials and use euca2ools: You can override environment variables used in `stack.sh` by creating file name `localrc`. It is likely that you will need to do this to tweak your networking configuration should you need to access your cloud from a different host. +# RPC Backend + +Multiple RPC backends are available. Currently, this +includes RabbitMQ (default), Qpid, and ZeroMQ. Your backend of +choice may be selected via the `localrc`. + +Note that selecting more than one RPC backend will result in a failure. + +Example (ZeroMQ): + + ENABLED_SERVICES="$ENABLED_SERVICES,-rabbit,-qpid,zeromq" + +Example (Qpid): + + ENABLED_SERVICES="$ENABLED_SERVICES,-rabbit,-zeromq,qpid" + # Swift Swift is not installed by default, you can enable easily by adding this to your `localrc`: diff --git a/lib/cinder b/lib/cinder index 1bad5c00..5f0b2553 100644 --- a/lib/cinder +++ b/lib/cinder @@ -115,6 +115,8 @@ function configure_cinder() { if is_service_enabled qpid ; then iniset $CINDER_CONF DEFAULT rpc_backend cinder.openstack.common.rpc.impl_qpid + elif is_service_enabled zeromq; then + iniset $CINDER_CONF DEFAULT rpc_backend nova.openstack.common.rpc.impl_zmq elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then iniset $CINDER_CONF DEFAULT rabbit_host $RABBIT_HOST iniset $CINDER_CONF DEFAULT rabbit_password $RABBIT_PASSWORD diff --git a/stack.sh b/stack.sh index 91380c5b..3e3d8cf5 100755 --- a/stack.sh +++ b/stack.sh @@ -142,6 +142,23 @@ if type -p screen >/dev/null && screen -ls | egrep -q "[0-9].stack"; then exit 1 fi +# Make sure we only have one rpc backend enabled. +rpc_backend_cnt=0 +for svc in qpid zeromq rabbit; do + is_service_enabled $svc && + ((rpc_backend_cnt++)) +done +if [ "$rpc_backend_cnt" -gt 1 ]; then + echo "ERROR: only one rpc backend may be enabled," + echo " set only one of 'rabbit', 'qpid', 'zeromq'" + echo " via ENABLED_SERVICES." +elif [ "$rpc_backend_cnt" == 0 ]; then + echo "ERROR: at least one rpc backend must be enabled," + echo " set one of 'rabbit', 'qpid', 'zeromq'" + echo " via ENABLED_SERVICES." +fi +unset rpc_backend_cnt + # Make sure we only have one volume service enabled. if is_service_enabled cinder && is_service_enabled n-vol; then echo "ERROR: n-vol and cinder must not be enabled at the same time" @@ -655,6 +672,12 @@ elif is_service_enabled qpid; then else install_package qpidd fi +elif is_service_enabled zeromq; then + if [[ "$os_PACKAGE" = "rpm" ]]; then + install_package zeromq python-zmq + else + install_package libzmq1 python-zmq + fi fi if is_service_enabled mysql; then @@ -893,8 +916,8 @@ EOF fi -# Rabbit or Qpid -# -------------- +# Finalize queue instllation +# -------------------------- if is_service_enabled rabbit; then # Start rabbitmq-server @@ -1274,6 +1297,8 @@ if is_service_enabled quantum; then iniset $Q_CONF_FILE DEFAULT control_exchange quantum if is_service_enabled qpid ; then iniset $Q_CONF_FILE DEFAULT rpc_backend quantum.openstack.common.rpc.impl_qpid + elif is_service_enabled zeromq; then + iniset $Q_CONF_FILE DEFAULT rpc_backend quantum.openstack.common.rpc.impl_zmq elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then iniset $Q_CONF_FILE DEFAULT rabbit_host $RABBIT_HOST iniset $Q_CONF_FILE DEFAULT rabbit_password $RABBIT_PASSWORD @@ -1898,7 +1923,9 @@ add_nova_opt "vncserver_proxyclient_address=$VNCSERVER_PROXYCLIENT_ADDRESS" add_nova_opt "api_paste_config=$NOVA_CONF_DIR/api-paste.ini" add_nova_opt "image_service=nova.image.glance.GlanceImageService" add_nova_opt "ec2_dmz_host=$EC2_DMZ_HOST" -if is_service_enabled qpid ; then +if is_service_enabled zeromq; then + add_nova_opt "rpc_backend=nova.openstack.common.rpc.impl_zmq" +elif is_service_enabled qpid; then add_nova_opt "rpc_backend=nova.rpc.impl_qpid" elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then add_nova_opt "rabbit_host=$RABBIT_HOST" @@ -2142,6 +2169,8 @@ if is_service_enabled key; then fi fi +screen_it zeromq "cd $NOVA_DIR && $NOVA_DIR/bin/nova-rpc-zmq-receiver" + # Launch the nova-api and wait for it to answer before continuing if is_service_enabled n-api; then add_nova_opt "enabled_apis=$NOVA_ENABLED_APIS" From 5547baa5bb26a58d0eea6cf73adfad4866ac8fde Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 31 Aug 2012 10:55:36 -0500 Subject: [PATCH 653/967] Freshen samples Tend the bit rot in the sample local.sh and localrc files. Change-Id: I58bc3929629d088ac1c3f1dc355c5bec2c6e6b98 --- samples/local.sh | 10 ++++++---- samples/localrc | 44 ++++++++++++++++++++++++++++---------------- 2 files changed, 34 insertions(+), 20 deletions(-) diff --git a/samples/local.sh b/samples/local.sh index 2c54b10f..eb9bc241 100755 --- a/samples/local.sh +++ b/samples/local.sh @@ -7,9 +7,10 @@ # work properly. # This is a collection of some of the things we have found to be useful to run -# after stack.sh to tweak the OpenStack configuration that DevStack produces. +# after ``stack.sh`` to tweak the OpenStack configuration that DevStack produces. # These should be considered as samples and are unsupported DevStack code. + # Keep track of the devstack directory TOP_DIR=$(cd $(dirname "$0") && pwd) @@ -34,7 +35,7 @@ source $TOP_DIR/openrc # Add first keypair found in localhost:$HOME/.ssh for i in $HOME/.ssh/id_rsa.pub $HOME/.ssh/id_dsa.pub; do - if [[ -f $i ]]; then + if [[ -r $i ]]; then nova keypair-add --pub_key=$i `hostname` break fi @@ -55,8 +56,9 @@ MI_NAME=m1.micro if [[ -z $(nova flavor-list | grep $MI_NAME) ]]; then nova flavor-create $MI_NAME 6 128 0 1 fi + + # Other Uses # ---------- -# Add tcp/22 to default security group - +# Add tcp/22 and icmp to default security group diff --git a/samples/localrc b/samples/localrc index 4fb093dc..bcaa788a 100644 --- a/samples/localrc +++ b/samples/localrc @@ -1,9 +1,10 @@ # Sample ``localrc`` for user-configurable variables in ``stack.sh`` -# NOTE: Copy this file to the root ``devstack`` directory for it to work properly. +# NOTE: Copy this file to the root ``devstack`` directory for it to +# work properly. -# ``localrc`` is a user-maintained setings file that is sourced at the end of -# ``stackrc``. This gives it the ability to override any variables set in ``stackrc``. +# ``localrc`` is a user-maintained setings file that is sourced from ``stackrc``. +# This gives it the ability to override any variables set in ``stackrc``. # Also, most of the settings in ``stack.sh`` are written to only be set if no # value has already been set; this lets ``localrc`` effectively override the # default values. @@ -21,40 +22,51 @@ # there are a few minimal variables set: # If the ``*_PASSWORD`` variables are not set here you will be prompted to enter -# values for them by ``stack.sh``. +# values for them by ``stack.sh`` and they will be added to ``localrc``. ADMIN_PASSWORD=nomoresecrete MYSQL_PASSWORD=stackdb RABBIT_PASSWORD=stackqueue SERVICE_PASSWORD=$ADMIN_PASSWORD -# HOST_IP should be set manually for best results. It is auto-detected during the -# first run of ``stack.sh`` but often is indeterminate on later runs due to the IP -# being moved from an Ethernet interface to a bridge on the host. Setting it here -# also makes it available for ``openrc`` to include when setting ``OS_AUTH_URL``. +# ``HOST_IP`` should be set manually for best results if the NIC configuration +# of the host is unusual, i.e. ``eth1`` has the default route but ``eth0`` is the +# public interface. It is auto-detected in ``stack.sh`` but often is indeterminate +# on later runs due to the IP moving from an Ethernet interface to a bridge on +# the host. Setting it here also makes it available for ``openrc`` to include +# when setting ``OS_AUTH_URL``. # ``HOST_IP`` is not set by default. -HOST_IP=w.x.y.z +#HOST_IP=w.x.y.z -# Set DevStack Install Directory -# ------------------------------ +# Logging +# ------- -# The DevStack install directory is set by the ``DEST`` variable. By setting it -# early in ``localrc`` you can reference it in later variables. The default value -# is ``/opt/stack``. It can be useful to set it even though it is not changed from -# the default value. -DEST=/opt/stack +# By default ``stack.sh`` output only goes to the terminal where it runs. It can +# be configured to additionally log to a file by setting ``LOGFILE`` to the full +# path of the destination log file. A timestamp will be appended to the given name. +LOGFILE=$DEST/logs/stack.sh.log + +# Old log files are automatically removed after 7 days to keep things neat. Change +# the number of days by setting ``LOGDAYS``. +LOGDAYS=2 + +# Nova logs will be colorized if ``SYSLOG`` is not set; turn this off by setting +# ``LOG_COLOR`` false. +#LOG_COLOR=False # Using milestone-proposed branches # --------------------------------- # Uncomment these to grab the milestone-proposed branches from the repos: +#CINDER_BRANCH=milestone-proposed #GLANCE_BRANCH=milestone-proposed #HORIZON_BRANCH=milestone-proposed #KEYSTONE_BRANCH=milestone-proposed #KEYSTONECLIENT_BRANCH=milestone-proposed #NOVA_BRANCH=milestone-proposed #NOVACLIENT_BRANCH=milestone-proposed +#QUANTUM_BRANCH=milestone-proposed #SWIFT_BRANCH=milestone-proposed From 88ae3761dd370f6979047ae4fc33cf126518a3a7 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Tue, 4 Sep 2012 10:18:18 -0700 Subject: [PATCH 654/967] Support new allow_tenant_reuse parameter in tempest config This adds support for setting the above-mentioned attribute, which was added in tempest recently. Change-Id: I14fe21f97d1c57f538254fa0420e9bc56115fc3b --- tools/configure_tempest.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index 0da5597c..2df0315c 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -162,6 +162,7 @@ fi COMPUTE_CATALOG_TYPE=compute COMPUTE_CREATE_IMAGE_ENABLED=True COMPUTE_ALLOW_TENANT_ISOLATION=True +COMPUTE_ALLOW_TENANT_REUSE=True COMPUTE_RESIZE_AVAILABLE=False COMPUTE_CHANGE_PASSWORD_AVAILABLE=False # not supported with QEMU... COMPUTE_LOG_LEVEL=ERROR @@ -216,6 +217,7 @@ sed -e " s,%ALT_TENANT_NAME%,$ALT_TENANT_NAME,g; s,%COMPUTE_CATALOG_TYPE%,$COMPUTE_CATALOG_TYPE,g; s,%COMPUTE_ALLOW_TENANT_ISOLATION%,$COMPUTE_ALLOW_TENANT_ISOLATION,g; + s,%COMPUTE_ALLOW_TENANT_REUSE%,$COMPUTE_ALLOW_TENANT_REUSE,g; s,%COMPUTE_CREATE_IMAGE_ENABLED%,$COMPUTE_CREATE_IMAGE_ENABLED,g; s,%COMPUTE_RESIZE_AVAILABLE%,$COMPUTE_RESIZE_AVAILABLE,g; s,%COMPUTE_CHANGE_PASSWORD_AVAILABLE%,$COMPUTE_CHANGE_PASSWORD_AVAILABLE,g; From 1de40cf69110c1ca58844c6731dd820488594fad Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 4 Sep 2012 14:19:24 -0500 Subject: [PATCH 655/967] Fix start_XXXX function header Change-Id: I14e1f80d327e723408993a955d9fce9501ba8ef5 --- lib/template | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/template b/lib/template index d70f2189..78b848dc 100644 --- a/lib/template +++ b/lib/template @@ -65,7 +65,7 @@ function install_XXXX() { } # start_XXXX() - Start running processes, including screen -function start_XXXX() +function start_XXXX() { # screen_it XXXX "cd $XXXX_DIR && $XXXX_DIR/bin/XXXX-bin" : } From 694a42a02c36816db524b548e97874eb4b448e51 Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Wed, 5 Sep 2012 08:19:39 +1200 Subject: [PATCH 656/967] Fixes for heat keystone registration. - Pass HEAT_API_PORT into keystone_data.sh from stack.sh - Use the correct service type in default_catalog.templates key Change-Id: I17dad3a0255dc0307b052927a8ad1971cb9142a8 --- files/default_catalog.templates | 8 ++++---- stack.sh | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/files/default_catalog.templates b/files/default_catalog.templates index ceb6458f..990cc0e9 100644 --- a/files/default_catalog.templates +++ b/files/default_catalog.templates @@ -35,7 +35,7 @@ catalog.RegionOne.image.adminURL = http://%SERVICE_HOST%:9292 catalog.RegionOne.image.internalURL = http://%SERVICE_HOST%:9292 catalog.RegionOne.image.name = Image Service -catalog.RegionOne.heat.publicURL = http://%SERVICE_HOST%:8000/v1 -catalog.RegionOne.heat.adminURL = http://%SERVICE_HOST%:8000/v1 -catalog.RegionOne.heat.internalURL = http://%SERVICE_HOST%:8000/v1 -catalog.RegionOne.heat.name = Heat Service +catalog.RegionOne.orchestration.publicURL = http://%SERVICE_HOST%:8000/v1 +catalog.RegionOne.orchestration.adminURL = http://%SERVICE_HOST%:8000/v1 +catalog.RegionOne.orchestration.internalURL = http://%SERVICE_HOST%:8000/v1 +catalog.RegionOne.orchestration.name = Heat Service diff --git a/stack.sh b/stack.sh index 3e3d8cf5..fd6e9eee 100755 --- a/stack.sh +++ b/stack.sh @@ -2147,7 +2147,7 @@ if is_service_enabled key; then ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \ SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \ S3_SERVICE_PORT=$S3_SERVICE_PORT KEYSTONE_CATALOG_BACKEND=$KEYSTONE_CATALOG_BACKEND \ - DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES \ + DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES HEAT_API_PORT=$HEAT_API_PORT \ bash -x $FILES/keystone_data.sh # Set up auth creds now that keystone is bootstrapped From 96288ba9a9fffa0d45545d091bd9781476503f7c Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 17 Aug 2012 14:11:55 -0500 Subject: [PATCH 657/967] Clean up security groups in exercises * fix problem with deleting security groups too early in floating_ips.sh and euca.sh * create and clean up security groups * cosmetic cleanups Change-Id: Ie45e03f889c540ec83f27a02b10e787060c5d4d7 --- exercises/boot_from_volume.sh | 12 +++++++----- exercises/euca.sh | 11 +++++------ exercises/floating_ips.sh | 8 ++++---- exercises/volumes.sh | 35 +++++++++++++++++++++++++++++++++-- 4 files changed, 49 insertions(+), 17 deletions(-) diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index 7fe81ba0..c967e391 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -49,6 +49,10 @@ DEFAULT_FLOATING_POOL=${DEFAULT_FLOATING_POOL:-nova} # Default user DEFAULT_INSTANCE_USER=${DEFAULT_INSTANCE_USER:-cirros} +# Security group name +SECGROUP=${SECGROUP:-boot_secgroup} + + # Launching servers # ================= @@ -72,7 +76,6 @@ if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $INSTANCE_NAME; do sleep 1; fi # Configure Security Groups -SECGROUP=${SECGROUP:-test_secgroup} nova secgroup-delete $SECGROUP || true nova secgroup-create $SECGROUP "$SECGROUP description" nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 @@ -246,8 +249,8 @@ nova delete $INSTANCE_NAME || \ die "Failure deleting instance $INSTANCE_NAME" # Wait for termination -if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $INSTANCE_NAME; do sleep 1; done"; then - echo "server didn't terminate!" +if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then + echo "Server $NAME not deleted" exit 1 fi @@ -256,8 +259,7 @@ nova floating-ip-delete $FLOATING_IP || \ die "Failure deleting floating IP $FLOATING_IP" # Delete a secgroup -nova secgroup-delete $SECGROUP || \ - die "Failure deleting security group $SECGROUP" +nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP" set +o xtrace echo "*********************************************************************" diff --git a/exercises/euca.sh b/exercises/euca.sh index 9f7aed17..fb052dd5 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -43,6 +43,9 @@ DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} # Boot this image, use first AMI-format image if unset DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} +# Security group name +SECGROUP=${SECGROUP:-euca_secgroup} + # Launching a server # ================== @@ -50,9 +53,6 @@ DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} # Find a machine image to boot IMAGE=`euca-describe-images | grep machine | grep ${DEFAULT_IMAGE_NAME} | cut -f2 | head -n1` -# Define secgroup -SECGROUP=euca_secgroup - # Add a secgroup if ! euca-describe-groups | grep -q $SECGROUP; then euca-add-group -d "$SECGROUP description" $SECGROUP @@ -119,14 +119,13 @@ euca-terminate-instances $INSTANCE || \ die "Failure terminating instance $INSTANCE" # Assure it has terminated within a reasonable time -if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then +if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -q $INSTANCE; do sleep 1; done"; then echo "server didn't terminate within $TERMINATE_TIMEOUT seconds" exit 1 fi # Delete group -euca-delete-group $SECGROUP || \ - die "Failure deleting security group $SECGROUP" +euca-delete-group $SECGROUP || die "Failure deleting security group $SECGROUP" set +o xtrace echo "*********************************************************************" diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 02259c08..77f020e2 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -200,12 +200,12 @@ nova floating-ip-delete $FLOATING_IP || die "Failure deleting floating IP $FLOAT # Delete second floating IP nova floating-ip-delete $TEST_FLOATING_IP || die "Failure deleting floating IP $TEST_FLOATING_IP" -# shutdown the server +# Shutdown the server nova delete $VM_UUID || die "Failure deleting instance $NAME" -# make sure the VM shuts down within a reasonable time -if ! timeout $TERMINATE_TIMEOUT sh -c "while nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then - echo "server didn't shut down!" +# Wait for termination +if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then + echo "Server $NAME not deleted" exit 1 fi diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 0f25355f..5db10d39 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -43,6 +43,9 @@ DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} # Boot this image, use first AMi image if unset DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} +# Security group name +SECGROUP=${SECGROUP:-vol_secgroup} + # Launching a server # ================== @@ -62,6 +65,25 @@ glance image-list # Grab the id of the image to launch IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) +# Security Groups +# --------------- + +# List of secgroups: +nova secgroup-list + +# Create a secgroup +if ! nova secgroup-list | grep -q $SECGROUP; then + nova secgroup-create $SECGROUP "$SECGROUP description" + if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then + echo "Security group not created" + exit 1 + fi +fi + +# Configure Security Group Rules +nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 +nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0 + # determinine instance type # ------------------------- @@ -171,8 +193,17 @@ if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME; d exit 1 fi -# shutdown the server -nova delete $NAME || die "Failure deleting instance $NAME" +# Shutdown the server +nova delete $VM_UUID || die "Failure deleting instance $NAME" + +# Wait for termination +if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then + echo "Server $NAME not deleted" + exit 1 +fi + +# Delete a secgroup +nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP" set +o xtrace echo "*********************************************************************" From 178b8402d962d629dbaffea8e2c454f391a16331 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Wed, 5 Sep 2012 10:42:10 +0100 Subject: [PATCH 658/967] Fix aggregates test with multi host setup Fixes bug 1046222. The aggregate exercise assumed that you have only one compute node, thus it failed with syntax error for two hosts. With this fix, the exercise will pick the first compute host, and use that for the tests. Change-Id: I85d76552295d640e1a9d86fbbed781f15529d047 --- exercises/aggregates.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh index 8a4f9c19..adc3393b 100755 --- a/exercises/aggregates.sh +++ b/exercises/aggregates.sh @@ -125,16 +125,16 @@ nova aggregate-details $AGGREGATE_ID | grep {} if [ "$VIRT_DRIVER" == "xenserver" ]; then echo "TODO(johngarbutt) add tests for add/remove host from pool aggregate" fi -HOST=`nova host-list | grep compute | get_field 1` +FIRST_HOST=`nova host-list | grep compute | get_field 1 | head -1` # Make sure can add two aggregates to same host -nova aggregate-add-host $AGGREGATE_ID $HOST -nova aggregate-add-host $AGGREGATE2_ID $HOST -if nova aggregate-add-host $AGGREGATE2_ID $HOST; then +nova aggregate-add-host $AGGREGATE_ID $FIRST_HOST +nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST +if nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST; then echo "ERROR could add duplicate host to single aggregate" exit -1 fi -nova aggregate-remove-host $AGGREGATE2_ID $HOST -nova aggregate-remove-host $AGGREGATE_ID $HOST +nova aggregate-remove-host $AGGREGATE2_ID $FIRST_HOST +nova aggregate-remove-host $AGGREGATE_ID $FIRST_HOST # Test aggregate-delete # ===================== From f900bd79463821c8f5a677c566f9af053d81a860 Mon Sep 17 00:00:00 2001 From: Andrew Laski Date: Wed, 5 Sep 2012 17:23:14 -0400 Subject: [PATCH 659/967] Add git_update_remote_branch to functions. When $RECLONE=yes, and the branch specified for a project in stackrc is a remote branch, the git_clone function would not recognize it as a branch. git_clone now attempts to recognize a remote branch and handle it appropriately. Change-Id: Ie23ff12eb5a7905088a9c2bb0ea2b02453ec0258 --- AUTHORS | 1 + functions | 14 +++++++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/AUTHORS b/AUTHORS index 4f771ce1..22d5f32b 100644 --- a/AUTHORS +++ b/AUTHORS @@ -1,6 +1,7 @@ Aaron Lee Aaron Rosen Adam Gandelman +Andrew Laski Andy Smith Anthony Young Armando Migliaccio diff --git a/functions b/functions index af154b0c..5fdae26a 100644 --- a/functions +++ b/functions @@ -278,6 +278,16 @@ function git_update_tag() { } +# git update using reference as a branch. +# git_update_remote_branch ref +function git_update_remote_branch() { + + GIT_BRANCH=$1 + + git checkout -b $GIT_BRANCH -t origin/$GIT_BRANCH +} + + # Translate the OS version values into common nomenclature # Sets ``DISTRO`` from the ``os_*`` values function GetDistro() { @@ -343,6 +353,8 @@ function git_clone { git_update_tag $GIT_REF elif [[ -n "`git show-ref refs/heads/$GIT_REF`" ]]; then git_update_branch $GIT_REF + elif [[ -n "`git show-ref refs/remotes/origin/$GIT_REF`" ]]; then + git_update_remote_branch $GIT_REF else echo $GIT_REF is neither branch nor tag exit 1 @@ -770,4 +782,4 @@ $XTRACE # Local variables: # -*- mode: Shell-script -*- -# End: \ No newline at end of file +# End: From 15733351ad5650fc9ab93eb3b7b405d51ac2a3d6 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 6 Sep 2012 11:51:30 -0500 Subject: [PATCH 660/967] Move screen_it() and screen_rc() to functions Change-Id: I9b119e2c6d4d88a67d485f61662037984c2d9b15 --- functions | 46 ++++++++++++++++++++++++++++++++++++++++++++++ stack.sh | 47 ++++------------------------------------------- 2 files changed, 50 insertions(+), 43 deletions(-) diff --git a/functions b/functions index af154b0c..fa7c8058 100644 --- a/functions +++ b/functions @@ -585,6 +585,52 @@ function restart_service() { } +# Helper to launch a service in a named screen +# screen_it service "command-line" +function screen_it { + NL=`echo -ne '\015'` + SCREEN_NAME=${SCREEN_NAME:-stack} + if is_service_enabled $1; then + # Append the service to the screen rc file + screen_rc "$1" "$2" + + screen -S $SCREEN_NAME -X screen -t $1 + # sleep to allow bash to be ready to be send the command - we are + # creating a new window in screen and then sends characters, so if + # bash isn't running by the time we send the command, nothing happens + sleep 1.5 + + if [[ -n ${SCREEN_LOGDIR} ]]; then + screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log + screen -S $SCREEN_NAME -p $1 -X log on + ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log + fi + screen -S $SCREEN_NAME -p $1 -X stuff "$2$NL" + fi +} + + +# Screen rc file builder +# screen_rc service "command-line" +function screen_rc { + SCREEN_NAME=${SCREEN_NAME:-stack} + SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc + if [[ ! -e $SCREENRC ]]; then + # Name the screen session + echo "sessionname $SCREEN_NAME" > $SCREENRC + # Set a reasonable statusbar + echo "hardstatus alwayslastline '$SCREEN_HARDSTATUS'" >> $SCREENRC + echo "screen -t shell bash" >> $SCREENRC + fi + # If this service doesn't already exist in the screenrc file + if ! grep $1 $SCREENRC 2>&1 > /dev/null; then + NL=`echo -ne '\015'` + echo "screen -t $1 bash" >> $SCREENRC + echo "stuff \"$2$NL\"" >> $SCREENRC + fi +} + + # ``pip install`` the dependencies of the package before ``setup.py develop`` # so pip and not distutils processes the dependency chain # Uses globals ``TRACK_DEPENDES``, ``*_proxy` diff --git a/stack.sh b/stack.sh index 3e3d8cf5..2c103eab 100755 --- a/stack.sh +++ b/stack.sh @@ -134,8 +134,9 @@ if [ ! -d $FILES ]; then exit 1 fi +SCREEN_NAME=${SCREEN_NAME:-stack} # Check to see if we are already running DevStack -if type -p screen >/dev/null && screen -ls | egrep -q "[0-9].stack"; then +if type -p screen >/dev/null && screen -ls | egrep -q "[0-9].$SCREEN_NAME"; then echo "You are already running a stack.sh session." echo "To rejoin this session type 'screen -x stack'." echo "To destroy this session, type './unstack.sh'." @@ -976,51 +977,11 @@ if [ -z "$SCREEN_HARDSTATUS" ]; then SCREEN_HARDSTATUS='%{= .} %-Lw%{= .}%> %n%f %t*%{= .}%+Lw%< %-=%{g}(%{d}%H/%l%{g})' fi -# Our screenrc file builder -function screen_rc { - SCREENRC=$TOP_DIR/stack-screenrc - if [[ ! -e $SCREENRC ]]; then - # Name the screen session - echo "sessionname stack" > $SCREENRC - # Set a reasonable statusbar - echo "hardstatus alwayslastline '$SCREEN_HARDSTATUS'" >> $SCREENRC - echo "screen -t stack bash" >> $SCREENRC - fi - # If this service doesn't already exist in the screenrc file - if ! grep $1 $SCREENRC 2>&1 > /dev/null; then - NL=`echo -ne '\015'` - echo "screen -t $1 bash" >> $SCREENRC - echo "stuff \"$2$NL\"" >> $SCREENRC - fi -} - -# Our screen helper to launch a service in a hidden named screen -function screen_it { - NL=`echo -ne '\015'` - if is_service_enabled $1; then - # Append the service to the screen rc file - screen_rc "$1" "$2" - - screen -S stack -X screen -t $1 - # sleep to allow bash to be ready to be send the command - we are - # creating a new window in screen and then sends characters, so if - # bash isn't running by the time we send the command, nothing happens - sleep 1.5 - - if [[ -n ${SCREEN_LOGDIR} ]]; then - screen -S stack -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log - screen -S stack -p $1 -X log on - ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log - fi - screen -S stack -p $1 -X stuff "$2$NL" - fi -} - # Create a new named screen to run processes in -screen -d -m -S stack -t stack -s /bin/bash +screen -d -m -S $SCREEN_NAME -t shell -s /bin/bash sleep 1 # Set a reasonable statusbar -screen -r stack -X hardstatus alwayslastline "$SCREEN_HARDSTATUS" +screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS" # Horizon From 659eabfb6fd9896d90e0ba3267d9e3470002b0d5 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 6 Sep 2012 13:47:06 -0500 Subject: [PATCH 661/967] Shut down rabbitmq-server when shutting down all services. Fixes bug 978205. Change-Id: I688887e4b4426db36438f2bf0e537956f4f94757 --- unstack.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/unstack.sh b/unstack.sh index e73cc2dc..30ee512e 100755 --- a/unstack.sh +++ b/unstack.sh @@ -67,6 +67,11 @@ if [[ -n "$UNSTACK_ALL" ]]; then if is_service_enabled mysql; then stop_service mysql fi + + # Stop rabbitmq-server + if is_service_enabled rabbit; then + stop_service rabbitmq-server + fi fi # Quantum dhcp agent runs dnsmasq From 6c32c6edabe38b9886555c5fadf7010b1ee6b338 Mon Sep 17 00:00:00 2001 From: Maru Newby Date: Thu, 6 Sep 2012 13:47:49 -0700 Subject: [PATCH 662/967] Fix quantum exercise script to use private CIDRs. * Addresses bug 1047024 Change-Id: Iba5a369b94a6c0e14edea9d13079a80a85adf907 --- exercises/quantum-adv-test.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/exercises/quantum-adv-test.sh b/exercises/quantum-adv-test.sh index cff29d2e..8f15b634 100755 --- a/exercises/quantum-adv-test.sh +++ b/exercises/quantum-adv-test.sh @@ -76,14 +76,14 @@ DEMO1_NUM_NET=1 DEMO2_NUM_NET=2 PUBLIC_NET1_CIDR="200.0.0.0/24" -DEMO1_NET1_CIDR="190.0.0.0/24" -DEMO2_NET1_CIDR="191.0.0.0/24" -DEMO2_NET2_CIDR="191.0.1.0/24" +DEMO1_NET1_CIDR="10.1.0.0/24" +DEMO2_NET1_CIDR="10.2.0.0/24" +DEMO2_NET2_CIDR="10.2.1.0/24" PUBLIC_NET1_GATEWAY="200.0.0.1" -DEMO1_NET1_GATEWAY="190.0.0.1" -DEMO2_NET1_GATEWAY="191.0.0.1" -DEMO2_NET2_GATEWAY="191.0.1.1" +DEMO1_NET1_GATEWAY="10.1.0.1" +DEMO2_NET1_GATEWAY="10.2.0.1" +DEMO2_NET2_GATEWAY="10.2.1.1" PUBLIC_NUM_VM=1 DEMO1_NUM_VM=1 From d6767d0d459466ddd84043329cfeaab0b68c2316 Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Fri, 31 Aug 2012 04:55:20 -0700 Subject: [PATCH 663/967] Devstack support for Quantum L3 agent Fixes bug #1036910 Starts the L3 agent for Quantum, if the corresponding service is enabled. Supports L3 agent with or without namespaces. Available with openvswitch plugin only at this time. Change-Id: Ic2dd7a2d32e985c5df0a94eee1ecb602f555cd14 --- lib/quantum | 37 +++++++++++++++++ stack.sh | 116 +++++++++++++++++++++++++++++++++++++--------------- 2 files changed, 121 insertions(+), 32 deletions(-) create mode 100644 lib/quantum diff --git a/lib/quantum b/lib/quantum new file mode 100644 index 00000000..1025d2b4 --- /dev/null +++ b/lib/quantum @@ -0,0 +1,37 @@ +# lib/quantum +# functions - funstions specific to quantum + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# Configures keystone integration for quantum service and agents +function quantum_setup_keystone() { + local conf_file=$1 + local section=$2 + local use_auth_url=$3 + if [[ -n $use_auth_url ]]; then + iniset $conf_file $section auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0" + else + iniset $conf_file $section auth_host $KEYSTONE_SERVICE_HOST + iniset $conf_file $section auth_port $KEYSTONE_AUTH_PORT + iniset $conf_file $section auth_protocol $KEYSTONE_SERVICE_PROTOCOL + fi + iniset $conf_file $section admin_tenant_name $SERVICE_TENANT_NAME + iniset $conf_file $section admin_user $Q_ADMIN_USERNAME + iniset $conf_file $section admin_password $SERVICE_PASSWORD +} + +function quantum_setup_ovs_bridge() { + local bridge=$1 + for PORT in `sudo ovs-vsctl --no-wait list-ports $bridge`; do + if [[ "$PORT" =~ tap* ]]; then echo `sudo ip link delete $PORT` > /dev/null; fi + sudo ovs-vsctl --no-wait del-port $bridge $PORT + done + sudo ovs-vsctl --no-wait -- --if-exists del-br $bridge + sudo ovs-vsctl --no-wait add-br $bridge + sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge +} + +# Restore xtrace +$XTRACE diff --git a/stack.sh b/stack.sh index 3e3d8cf5..72443615 100755 --- a/stack.sh +++ b/stack.sh @@ -268,6 +268,7 @@ sudo chown `whoami` $DATA_DIR source $TOP_DIR/lib/cinder source $TOP_DIR/lib/ceilometer source $TOP_DIR/lib/heat +source $TOP_DIR/lib/quantum # Set the destination directories for OpenStack projects NOVA_DIR=$DEST/nova @@ -298,6 +299,8 @@ Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-quantum} Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone} # Use namespace or not Q_USE_NAMESPACE=${Q_USE_NAMESPACE:-True} +# Meta data IP +Q_META_DATA_IP=${Q_META_DATA_IP:-} # Name of the LVM volume group to use/create for iscsi volumes VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} @@ -1179,7 +1182,7 @@ if is_service_enabled quantum; then Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME cp $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE - sudo sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/$Q_DB_NAME?charset=utf8/g" /$Q_PLUGIN_CONF_FILE + iniset /$Q_PLUGIN_CONF_FILE DATABASE sql_connection mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/$Q_DB_NAME?charset=utf8 OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-True} if [[ "$Q_PLUGIN" = "openvswitch" && "$OVS_ENABLE_TUNNELING" = "True" ]]; then @@ -1221,12 +1224,7 @@ if is_service_enabled q-svc; then iniset $Q_CONF_FILE DEFAULT core_plugin $Q_PLUGIN_CLASS iniset $Q_CONF_FILE DEFAULT auth_strategy $Q_AUTH_STRATEGY - iniset $Q_API_PASTE_FILE filter:authtoken auth_host $KEYSTONE_SERVICE_HOST - iniset $Q_API_PASTE_FILE filter:authtoken auth_port $KEYSTONE_AUTH_PORT - iniset $Q_API_PASTE_FILE filter:authtoken auth_protocol $KEYSTONE_SERVICE_PROTOCOL - iniset $Q_API_PASTE_FILE filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $Q_API_PASTE_FILE filter:authtoken admin_user $Q_ADMIN_USERNAME - iniset $Q_API_PASTE_FILE filter:authtoken admin_password $SERVICE_PASSWORD + quantum_setup_keystone $Q_API_PASTE_FILE filter:authtoken fi # Quantum agent (for compute nodes) @@ -1234,13 +1232,7 @@ if is_service_enabled q-agt; then if [[ "$Q_PLUGIN" = "openvswitch" ]]; then # Set up integration bridge OVS_BRIDGE=${OVS_BRIDGE:-br-int} - for PORT in `sudo ovs-vsctl --no-wait list-ports $OVS_BRIDGE`; do - if [[ "$PORT" =~ tap* ]]; then echo `sudo ip link delete $PORT` > /dev/null; fi - sudo ovs-vsctl --no-wait del-port $OVS_BRIDGE $PORT - done - sudo ovs-vsctl --no-wait -- --if-exists del-br $OVS_BRIDGE - sudo ovs-vsctl --no-wait add-br $OVS_BRIDGE - sudo ovs-vsctl --no-wait br-set-external-id $OVS_BRIDGE bridge-id br-int + quantum_setup_ovs_bridge $OVS_BRIDGE if [[ "$OVS_ENABLE_TUNNELING" == "True" ]]; then iniset /$Q_PLUGIN_CONF_FILE OVS local_ip $HOST_IP else @@ -1280,10 +1272,7 @@ if is_service_enabled q-dhcp; then # Update database iniset $Q_DHCP_CONF_FILE DEFAULT db_connection "mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/$Q_DB_NAME?charset=utf8" - iniset $Q_DHCP_CONF_FILE DEFAULT auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0" - iniset $Q_DHCP_CONF_FILE DEFAULT admin_tenant_name $SERVICE_TENANT_NAME - iniset $Q_DHCP_CONF_FILE DEFAULT admin_user $Q_ADMIN_USERNAME - iniset $Q_DHCP_CONF_FILE DEFAULT admin_password $SERVICE_PASSWORD + quantum_setup_keystone $Q_DHCP_CONF_FILE DEFAULT set_auth_url if [[ "$Q_PLUGIN" = "openvswitch" ]]; then iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver @@ -1292,6 +1281,45 @@ if is_service_enabled q-dhcp; then fi fi +# Quantum L3 +if is_service_enabled q-l3; then + AGENT_L3_BINARY="$QUANTUM_DIR/bin/quantum-l3-agent" + PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} + Q_L3_CONF_FILE=/etc/quantum/l3_agent.ini + + cp $QUANTUM_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE + + # Set verbose + iniset $Q_L3_CONF_FILE DEFAULT verbose True + # Set debug + iniset $Q_L3_CONF_FILE DEFAULT debug True + + iniset $Q_L3_CONF_FILE DEFAULT metadata_ip $Q_META_DATA_IP + iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE + iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE + + quantum_setup_keystone $Q_L3_CONF_FILE DEFAULT set_auth_url + if [[ "$Q_PLUGIN" == "openvswitch" ]]; then + iniset $Q_L3_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver + # Set up external bridge + # Create it if it does not exist + sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE + sudo ovs-vsctl --no-wait br-set-external-id $PUBLIC_BRIDGE bridge-id $PUBLIC_BRIDGE + # remove internal ports + for PORT in `sudo ovs-vsctl --no-wait list-ports $PUBLIC_BRIDGE`; do + TYPE=$(sudo ovs-vsctl get interface $PORT type) + if [[ "$TYPE" == "internal" ]]; then + echo `sudo ip link delete $PORT` > /dev/null + sudo ovs-vsctl --no-wait del-port $bridge $PORT + fi + done + # ensure no IP is configured on the public bridge + sudo ip addr flush dev $PUBLIC_BRIDGE + elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then + iniset $Q_L3_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver + fi +fi + # Quantum RPC support - must be updated prior to starting any of the services if is_service_enabled quantum; then iniset $Q_CONF_FILE DEFAULT control_exchange quantum @@ -1305,16 +1333,6 @@ if is_service_enabled quantum; then fi fi -# Start the Quantum services -screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE" - -# Start up the quantum agent -screen_it q-agt "sudo python $AGENT_BINARY --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE" - -# Start up the quantum agent -screen_it q-dhcp "sudo python $AGENT_DHCP_BINARY --config-file $Q_CONF_FILE --config-file=$Q_DHCP_CONF_FILE" - - # Nova # ---- @@ -2140,7 +2158,6 @@ if is_service_enabled key; then echo "keystone did not start" exit 1 fi - # ``keystone_data.sh`` creates services, admin and demo users, and roles. SERVICE_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0 @@ -2182,16 +2199,51 @@ if is_service_enabled n-api; then fi fi -# If we're using Quantum (i.e. q-svc is enabled), network creation has to -# happen after we've started the Quantum service. if is_service_enabled q-svc; then + # Start the Quantum service + screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE" + echo "Waiting for Quantum to start..." + if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:9696; do sleep 1; done"; then + echo "Quantum did not start" + exit 1 + fi + + # Configure Quantum elements + # Configure internal network & subnet + TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1) # Create a small network # Since quantum command is executed in admin context at this point, # ``--tenant_id`` needs to be specified. NET_ID=$(quantum net-create --tenant_id $TENANT_ID net1 | grep ' id ' | get_field 2) - quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE + SUBNET_ID=$(quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) + if is_service_enabled q-l3; then + # Create a router, and add the private subnet as one of its interfaces + ROUTER_ID=$(quantum router-create --tenant_id $TENANT_ID router1 | grep ' id ' | get_field 2) + quantum router-interface-add $ROUTER_ID $SUBNET_ID + # Create an external network, and a subnet. Configure the external network as router gw + EXT_NET_ID=$(quantum net-create ext_net -- --router:external=True | grep ' id ' | get_field 2) + EXT_GW_IP=$(quantum subnet-create --ip_version 4 $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2) + quantum router-gateway-set $ROUTER_ID $EXT_NET_ID + if [[ "$Q_PLUGIN" = "openvswitch" ]]; then + CIDR_LEN=${FLOATING_RANGE#*/} + sudo ip addr add $EXT_GW_IP/$CIDR_LEN dev $PUBLIC_BRIDGE + sudo ip link set $PUBLIC_BRIDGE up + fi + if [[ "$Q_USE_NAMESPACE" == "False" ]]; then + # Explicitly set router id in l3 agent configuration + iniset $Q_L3_CONF_FILE DEFAULT router_id $ROUTER_ID + fi + fi + + # Start up the quantum agent + screen_it q-agt "sudo python $AGENT_BINARY --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE" + # Start up the quantum dhcp agent + screen_it q-dhcp "sudo python $AGENT_DHCP_BINARY --config-file $Q_CONF_FILE --config-file=$Q_DHCP_CONF_FILE" + # Start up the quantum l3 agent + screen_it q-l3 "sudo python $AGENT_L3_BINARY --config-file $Q_CONF_FILE --config-file=$Q_L3_CONF_FILE" + elif is_service_enabled mysql && is_service_enabled nova; then # Create a small network $NOVA_BIN_DIR/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS From 4797e8f7bc4fb7ac801a7fb2b99bcbd00a2da45f Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Fri, 7 Sep 2012 11:09:06 -0400 Subject: [PATCH 664/967] Set virt_use_execmem boolean if SELinux is enabled. If SELinux is enabled, this boolean is required to be able to launch VMs using qemu. Set the boolean if we're switching the libvirt_type to 'qemu' and SELinux is enabled. Change-Id: Ieead35aae94c9fa86df1f4829584f71c97dcbeb8 --- stack.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/stack.sh b/stack.sh index 665a3663..5c27462a 100755 --- a/stack.sh +++ b/stack.sh @@ -1345,6 +1345,10 @@ if is_service_enabled n-cpu; then if [ ! -e /dev/kvm ]; then echo "WARNING: Switching to QEMU" LIBVIRT_TYPE=qemu + if which selinuxenabled 2>&1 > /dev/null && selinuxenabled; then + # https://bugzilla.redhat.com/show_bug.cgi?id=753589 + sudo setsebool virt_use_execmem on + fi fi fi From 20df2a839d9e890ef5dc065777dcba4642e7416d Mon Sep 17 00:00:00 2001 From: Nikola Dipanov Date: Sat, 8 Sep 2012 18:36:35 +0200 Subject: [PATCH 665/967] Changes the qpid package that gets installed on Fedora Changes the qpid package that will be installed on Fedora when running stack.sh. The original package (qpid-cpp-server) was split up into qpid-cpp-server and qpid-cpp-server-daemon. systemd unit files were moved into the second package and if only the first package is installed, qpid service fails to start. Change-Id: Ia7cae795d7e456f0e21e0bedaee583a9e8b35f2d --- files/rpms/nova | 2 +- files/rpms/quantum | 2 +- stack.sh | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/files/rpms/nova b/files/rpms/nova index bb920267..88ad8c31 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -35,7 +35,7 @@ python-sqlalchemy python-suds python-tempita rabbitmq-server # NOPRIME -qpid-cpp-server # NOPRIME +qpid-cpp-server-daemon # NOPRIME sqlite sudo vconfig diff --git a/files/rpms/quantum b/files/rpms/quantum index 6ca9c355..05398fcf 100644 --- a/files/rpms/quantum +++ b/files/rpms/quantum @@ -17,7 +17,7 @@ python-routes python-sqlalchemy python-suds rabbitmq-server # NOPRIME -qpid-cpp-server # NOPRIME +qpid-cpp-server-daemon # NOPRIME sqlite sudo vconfig diff --git a/stack.sh b/stack.sh index 3e3d8cf5..87e3336d 100755 --- a/stack.sh +++ b/stack.sh @@ -668,7 +668,7 @@ if is_service_enabled rabbit; then rm -f "$tfile" elif is_service_enabled qpid; then if [[ "$os_PACKAGE" = "rpm" ]]; then - install_package qpid-cpp-server + install_package qpid-cpp-server-daemon else install_package qpidd fi From c5dfecd81829a08986ce3e5f2ab2aba4d9909886 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Sat, 8 Sep 2012 14:20:43 -0500 Subject: [PATCH 666/967] Fix RST formatting errors Clean up some shocco RST errors Change-Id: I9afa0f155f2bfcc73638ae11447c693579288355 --- exercises/client-args.sh | 2 +- exercises/client-env.sh | 2 +- exercises/sec_groups.sh | 2 +- tools/info.sh | 1 + 4 files changed, 4 insertions(+), 3 deletions(-) diff --git a/exercises/client-args.sh b/exercises/client-args.sh index 39241a29..9cbb6a66 100755 --- a/exercises/client-args.sh +++ b/exercises/client-args.sh @@ -25,7 +25,7 @@ source $TOP_DIR/openrc # Import exercise configuration source $TOP_DIR/exerciserc -# Unset all of the known NOVA_ vars +# Unset all of the known NOVA_* vars unset NOVA_API_KEY unset NOVA_ENDPOINT_NAME unset NOVA_PASSWORD diff --git a/exercises/client-env.sh b/exercises/client-env.sh index d242ee53..94f4a82c 100755 --- a/exercises/client-env.sh +++ b/exercises/client-env.sh @@ -25,7 +25,7 @@ source $TOP_DIR/openrc # Import exercise configuration source $TOP_DIR/exerciserc -# Unset all of the known NOVA_ vars +# Unset all of the known NOVA_* vars unset NOVA_API_KEY unset NOVA_ENDPOINT_NAME unset NOVA_PASSWORD diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh index 49cb58fd..f6810e3e 100755 --- a/exercises/sec_groups.sh +++ b/exercises/sec_groups.sh @@ -35,7 +35,7 @@ source $TOP_DIR/exerciserc # Testing Security Groups -# ============= +# ======================= # List security groups nova secgroup-list diff --git a/tools/info.sh b/tools/info.sh index bf40e827..5c9a1d3d 100755 --- a/tools/info.sh +++ b/tools/info.sh @@ -6,6 +6,7 @@ # # Output fields are separated with '|' chars # Output types are git,localrc,os,pip,pkg: +# # git||[] # localtc|= # os|= From 8dac568ad1205b4fdea7b962d056d294a07dee60 Mon Sep 17 00:00:00 2001 From: long-wang Date: Sun, 9 Sep 2012 11:19:58 +0800 Subject: [PATCH 667/967] add command for Add icmp tcp/22 to default security group Change-Id: Ic4aa7a310638dc42d77a78a521344a6c1c804191 --- samples/local.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/samples/local.sh b/samples/local.sh index eb9bc241..59015259 100755 --- a/samples/local.sh +++ b/samples/local.sh @@ -62,3 +62,6 @@ fi # ---------- # Add tcp/22 and icmp to default security group +nova secgroup-add-rule default tcp 22 22 0.0.0.0/0 +nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0 + From 3a19d18e41487755e5fd9d48760de6451bb5e6ae Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sat, 8 Sep 2012 23:16:40 -0700 Subject: [PATCH 668/967] Allow empty FLAT_INTERFACE for local-only access Change-Id: Icdee4ba6419bb89fd128a1dbd9e792fef6a62f23 --- stack.sh | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/stack.sh b/stack.sh index 80ea271d..f2d5fd69 100755 --- a/stack.sh +++ b/stack.sh @@ -447,10 +447,9 @@ MULTI_HOST=`trueorfalse False $MULTI_HOST` # fail. # # If you are running on a single node and don't need to access the VMs from -# devices other than that node, you can set the flat interface to the same -# value as ``FLAT_NETWORK_BRIDGE``. This will stop the network hiccup from -# occurring. -FLAT_INTERFACE=${FLAT_INTERFACE:-$GUEST_INTERFACE_DEFAULT} +# devices other than that node, you can set FLAT_INTERFACE= +# This will stop nova from bridging any interfaces into FLAT_NETWORK_BRIDGE. +FLAT_INTERFACE=${FLAT_INTERFACE-$GUEST_INTERFACE_DEFAULT} ## FIXME(ja): should/can we check that FLAT_INTERFACE is sane? From 6e77163b9d0bc2703a98f20a6964187511b1b9e3 Mon Sep 17 00:00:00 2001 From: Bob Kukura Date: Wed, 5 Sep 2012 15:07:15 -0400 Subject: [PATCH 669/967] improved Quantum plugin configuration The configuration defaults for the openvswitch and linuxbridge plugins are changing in https://review.openstack.org/#/c/12362/ to address https://bugs.launchpad.net/quantum/+bug/1045142. To summarize, with no overriding of default configuration values, tenant networks will now work on all systems, but are now local to the host. Using GRE tunnels (openvswitch) or VLANs (openvswitch or linuxbridge) for external connectivity requires additional configuration. This patch provides and documents a set of simple shell variables that can be set in localrc to achieve a range of quantum network configurations. To use GRE tunnels for remote connectivity with openvswitch, localrc should include: Q_PLUGIN=openvswitch ENABLE_TENANT_TUNNELS=True Note that OVS GRE tunnels require kernel support that is not in the Linux kernel source tree, and is not included in all versions of Linux on which devstack runs. To use VLANs 1000 through 1999 on eth1 for remote connectivity with linuxbridge, localrc should include: Q_PLUGIN=openvswitch ENABLE_TENANT_VLANS=True TENANT_VLAN_RANGE=1000:1999 PHYSICAL_NETWORK=default OVS_PHYSICAL_BRIDGE=br-eth1 The OVS bridge br-eth1 must be manually created, and the physical interface eth1 must be manually added as a port. Any needed host IP address must be set on br-eth1 rather than eth1. Note that OVS bridges and ports are persistent. To use VLANs 1000 through 1999 on eth1 for remote connectivity with linuxbridge, localrc should include: Q_PLUGIN=linuxbridge ENABLE_TENANT_VLANS=True TENANT_VLAN_RANGE=1000:1999 PHYSICAL_NETWORK=default LB_PHYSICAL_INTERFACE=eth1 The physical interface eth1 must be up, but does not have to have an IP address. Any existing host IP address configured on eth1 will be moved to a bridge when the network is activated by the agent, and moved back when the network is deleted. Change-Id: I72e9aba1335c55077f4a34495e2d2d9ec1857cd5 --- stack.sh | 205 +++++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 154 insertions(+), 51 deletions(-) diff --git a/stack.sh b/stack.sh index 80ea271d..fc92c4fe 100755 --- a/stack.sh +++ b/stack.sh @@ -456,14 +456,20 @@ FLAT_INTERFACE=${FLAT_INTERFACE:-$GUEST_INTERFACE_DEFAULT} # Using Quantum networking: # -# Make sure that quantum is enabled in ENABLED_SERVICES. If it is the network -# manager will be set to the QuantumManager. If you want to run Quantum on -# this host, make sure that q-svc is also in ENABLED_SERVICES. -# -# If you're planning to use the Quantum openvswitch plugin, set Q_PLUGIN to -# "openvswitch" and make sure the q-agt service is enabled in +# Make sure that quantum is enabled in ENABLED_SERVICES. If you want +# to run Quantum on this host, make sure that q-svc is also in # ENABLED_SERVICES. # +# If you're planning to use the Quantum openvswitch plugin, set +# Q_PLUGIN to "openvswitch" and make sure the q-agt service is enabled +# in ENABLED_SERVICES. If you're planning to use the Quantum +# linuxbridge plugin, set Q_PLUGIN to "linuxbridge" and make sure the +# q-agt service is enabled in ENABLED_SERVICES. +# +# See "Quantum Network Configuration" below for additional variables +# that must be set in localrc for connectivity across hosts with +# Quantum. +# # With Quantum networking the NET_MAN variable is ignored. @@ -713,14 +719,6 @@ EOF install_package mysql-server fi -if is_service_enabled quantum; then - if [[ "$Q_PLUGIN" = "linuxbridge" ]]; then - # Install deps - # FIXME add to files/apts/quantum, but don't install if not needed! - install_package python-configobj - fi -fi - if is_service_enabled horizon; then if [[ "$os_PACKAGE" = "deb" ]]; then # Install apache2, which is NOPRIME'd @@ -1140,6 +1138,66 @@ fi # ------- if is_service_enabled quantum; then + # + # Quantum Network Configuration + # + # The following variables control the Quantum openvswitch and + # linuxbridge plugins' allocation of tenant networks and + # availability of provider networks. If these are not configured + # in localrc, tenant networks will be local to the host (with no + # remote connectivity), and no physical resources will be + # available for the allocation of provider networks. + + # To use GRE tunnels for tenant networks, set to True in + # localrc. GRE tunnels are only supported by the openvswitch + # plugin, and currently only on Ubuntu. + ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-False} + + # If using GRE tunnels for tenant networks, specify the range of + # tunnel IDs from which tenant networks are allocated. Can be + # overriden in localrc in necesssary. + TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGE:-1:1000} + + # To use VLANs for tenant networks, set to True in localrc. VLANs + # are supported by the openvswitch and linuxbridge plugins, each + # requiring additional configuration described below. + ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False} + + # If using VLANs for tenant networks, set in localrc to specify + # the range of VLAN VIDs from which tenant networks are + # allocated. An external network switch must be configured to + # trunk these VLANs between hosts for multi-host connectivity. + # + # Example: TENANT_VLAN_RANGE=1000:1999 + TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} + + # If using VLANs for tenant networks, or if using flat or VLAN + # provider networks, set in localrc to the name of the physical + # network, and also configure OVS_PHYSICAL_BRIDGE for the + # openvswitch agent or LB_PHYSICAL_INTERFACE for the linuxbridge + # agent, as described below. + # + # Example: PHYSICAL_NETWORK=default + PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-} + + # With the openvswitch plugin, if using VLANs for tenant networks, + # or if using flat or VLAN provider networks, set in localrc to + # the name of the OVS bridge to use for the physical network. The + # bridge will be created if it does not already exist, but a + # physical interface must be manually added to the bridge as a + # port for external connectivity. + # + # Example: OVS_PHYSICAL_BRIDGE=br-eth1 + OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-} + + # With the linuxbridge plugin, if using VLANs for tenant networks, + # or if using flat or VLAN provider networks, set in localrc to + # the name of the network interface to use for the physical + # network. + # + # Example: LB_PHYSICAL_INTERFACE=eth1 + LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-} + # Put config files in ``/etc/quantum`` for everyone to find if [[ ! -d /etc/quantum ]]; then sudo mkdir -p /etc/quantum @@ -1168,22 +1226,6 @@ if is_service_enabled quantum; then iniset /$Q_PLUGIN_CONF_FILE DATABASE sql_connection mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/$Q_DB_NAME?charset=utf8 - OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-True} - if [[ "$Q_PLUGIN" = "openvswitch" && "$OVS_ENABLE_TUNNELING" = "True" ]]; then - OVS_VERSION=`ovs-vsctl --version | head -n 1 | awk '{print $4;}'` - if [ $OVS_VERSION \< "1.4" ] && ! is_service_enabled q-svc ; then - echo "You are running OVS version $OVS_VERSION." - echo "OVS 1.4+ is required for tunneling between multiple hosts." - exit 1 - fi - if [[ "$OVS_DEFAULT_BRIDGE" = "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE OVS network_vlan_ranges "" - else - iniset /$Q_PLUGIN_CONF_FILE OVS network_vlan_ranges default - fi - iniset /$Q_PLUGIN_CONF_FILE OVS tunnel_id_ranges 1:1000 - fi - Q_CONF_FILE=/etc/quantum/quantum.conf cp $QUANTUM_DIR/etc/quantum.conf $Q_CONF_FILE fi @@ -1209,33 +1251,96 @@ if is_service_enabled q-svc; then iniset $Q_CONF_FILE DEFAULT auth_strategy $Q_AUTH_STRATEGY quantum_setup_keystone $Q_API_PASTE_FILE filter:authtoken + + # Configure plugin + if [[ "$Q_PLUGIN" = "openvswitch" ]]; then + if [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE OVS tenant_network_type gre + iniset /$Q_PLUGIN_CONF_FILE OVS tunnel_id_ranges $TENANT_TUNNEL_RANGES + elif [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE OVS tenant_network_type vlan + else + echo "WARNING - The openvswitch plugin is using local tenant networks, with no connectivity between hosts." + fi + + # Override OVS_VLAN_RANGES and OVS_BRIDGE_MAPPINGS in localrc + # for more complex physical network configurations. + if [[ "$OVS_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then + OVS_VLAN_RANGES=$PHYSICAL_NETWORK + if [[ "$TENANT_VLAN_RANGE" != "" ]]; then + OVS_VLAN_RANGES=$OVS_VLAN_RANGES:$TENANT_VLAN_RANGE + fi + fi + if [[ "$OVS_VLAN_RANGES" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE OVS network_vlan_ranges $OVS_VLAN_RANGES + fi + elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then + if [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE VLANS tenant_network_type vlan + else + echo "WARNING - The linuxbridge plugin is using local tenant networks, with no connectivity between hosts." + fi + + # Override LB_VLAN_RANGES and LB_INTERFACE_MAPPINGS in localrc + # for more complex physical network configurations. + if [[ "$LB_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then + LB_VLAN_RANGES=$PHYSICAL_NETWORK + if [[ "$TENANT_VLAN_RANGE" != "" ]]; then + LB_VLAN_RANGES=$LB_VLAN_RANGES:$TENANT_VLAN_RANGE + fi + fi + if [[ "$LB_VLAN_RANGES" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE VLANS network_vlan_ranges $LB_VLAN_RANGES + fi + fi fi # Quantum agent (for compute nodes) if is_service_enabled q-agt; then + # Configure agent for plugin if [[ "$Q_PLUGIN" = "openvswitch" ]]; then - # Set up integration bridge + # Setup integration bridge OVS_BRIDGE=${OVS_BRIDGE:-br-int} quantum_setup_ovs_bridge $OVS_BRIDGE - if [[ "$OVS_ENABLE_TUNNELING" == "True" ]]; then + + # Setup agent for tunneling + if [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then + # Verify tunnels are supported + # REVISIT - also check kernel module support for GRE and patch ports + OVS_VERSION=`ovs-vsctl --version | head -n 1 | awk '{print $4;}'` + if [ $OVS_VERSION \< "1.4" ] && ! is_service_enabled q-svc ; then + echo "You are running OVS version $OVS_VERSION." + echo "OVS 1.4+ is required for tunneling between multiple hosts." + exit 1 + fi iniset /$Q_PLUGIN_CONF_FILE OVS local_ip $HOST_IP - else - # Need bridge if not tunneling - OVS_DEFAULT_BRIDGE=${OVS_DEFAULT_BRIDGE:-br-$GUEST_INTERFACE_DEFAULT} fi - if [[ "$OVS_DEFAULT_BRIDGE" = "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings "" - else + + # Setup physical network bridge mappings. Override + # OVS_VLAN_RANGES and OVS_BRIDGE_MAPPINGS in localrc for more + # complex physical network configurations. + if [[ "$OVS_BRIDGE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then + OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE + # Configure bridge manually with physical interface as port for multi-node - sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_DEFAULT_BRIDGE - iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings default:$OVS_DEFAULT_BRIDGE + sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_PHYSICAL_BRIDGE + fi + if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings $OVS_BRIDGE_MAPPINGS fi + AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py" elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then - # Start up the quantum <-> linuxbridge agent - # set the default network interface - QUANTUM_LB_PRIVATE_INTERFACE=${QUANTUM_LB_PRIVATE_INTERFACE:-$GUEST_INTERFACE_DEFAULT} - iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE physical_interface_mappings default:$QUANTUM_LB_PRIVATE_INTERFACE + # Setup physical network interface mappings. Override + # LB_VLAN_RANGES and LB_INTERFACE_MAPPINGS in localrc for more + # complex physical network configurations. + if [[ "$LB_INTERFACE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$LB_PHYSICAL_INTERFACE" != "" ]]; then + LB_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$LB_PHYSICAL_INTERFACE + fi + if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE physical_interface_mappings $LB_INTERFACE_MAPPINGS + fi + AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/linuxbridge/agent/linuxbridge_quantum_agent.py" fi fi @@ -2175,13 +2280,6 @@ if is_service_enabled q-svc; then fi fi - # Start up the quantum agent - screen_it q-agt "sudo python $AGENT_BINARY --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE" - # Start up the quantum dhcp agent - screen_it q-dhcp "sudo python $AGENT_DHCP_BINARY --config-file $Q_CONF_FILE --config-file=$Q_DHCP_CONF_FILE" - # Start up the quantum l3 agent - screen_it q-l3 "sudo python $AGENT_L3_BINARY --config-file $Q_CONF_FILE --config-file=$Q_L3_CONF_FILE" - elif is_service_enabled mysql && is_service_enabled nova; then # Create a small network $NOVA_BIN_DIR/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS @@ -2193,6 +2291,11 @@ elif is_service_enabled mysql && is_service_enabled nova; then $NOVA_BIN_DIR/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL fi +# Start up the quantum agents if enabled +screen_it q-agt "sudo python $AGENT_BINARY --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE" +screen_it q-dhcp "sudo python $AGENT_DHCP_BINARY --config-file $Q_CONF_FILE --config-file=$Q_DHCP_CONF_FILE" +screen_it q-l3 "sudo python $AGENT_L3_BINARY --config-file $Q_CONF_FILE --config-file=$Q_L3_CONF_FILE" + # The group **libvirtd** is added to the current user in this script. # Use 'sg' to execute nova-compute as a member of the **libvirtd** group. # ``screen_it`` checks ``is_service_enabled``, it is not needed here From c0482e6efcc34515b9383e6a0de1e03b75d6ce62 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Tue, 12 Jun 2012 11:30:43 +0200 Subject: [PATCH 670/967] Add contitional update package repositories to install_package() This helps us ensure that we update the repositories only the first time we need to install packages Rebased and incorporated into install_package() Change-Id: Id987aa7742f5d6807bc97eb6784cf18557c919d2 --- functions | 4 ++++ stack.sh | 3 +-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/functions b/functions index c109eaea..8ed99604 100644 --- a/functions +++ b/functions @@ -536,7 +536,11 @@ function install_package() { if [[ -z "$os_PACKAGE" ]]; then GetOSVersion fi + if [[ "$os_PACKAGE" = "deb" ]]; then + [[ "$NO_UPDATE_REPOS" = "True" ]] || apt_get update + NO_UPDATE_REPOS=True + apt_get install "$@" else yum_install "$@" diff --git a/stack.sh b/stack.sh index 80ea271d..90d8c62e 100755 --- a/stack.sh +++ b/stack.sh @@ -183,7 +183,7 @@ if [[ $EUID -eq 0 ]]; then # Give the non-root user the ability to run as **root** via ``sudo`` if [[ "$os_PACKAGE" = "deb" ]]; then - dpkg -l sudo || apt_get update && install_package sudo + dpkg -l sudo || install_package sudo else rpm -qa | grep sudo || install_package sudo fi @@ -654,7 +654,6 @@ set -o xtrace # Install package requirements if [[ "$os_PACKAGE" = "deb" ]]; then - apt_get update install_package $(get_packages $FILES/apts) else install_package $(get_packages $FILES/rpms) From 71ebc6ff65e6ae3982a7e8a7ecf9ff80fd18d6bb Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Tue, 12 Jun 2012 13:45:15 +0200 Subject: [PATCH 671/967] Add is_package_installed function to know if a package is installed This helps reduce the distro-dependent code in stack.sh, and also fixes the bug where "rpm -qa | grep sudo" will work if gnome-sudoku is installed. Rebased Change-Id: Ib1330b29b915b41d9724197edd791f0d4e0fe373 --- functions | 20 ++++++++++++++++++++ stack.sh | 13 ++----------- tests/functions.sh | 41 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 63 insertions(+), 11 deletions(-) diff --git a/functions b/functions index 8ed99604..664cfa0c 100644 --- a/functions +++ b/functions @@ -548,6 +548,26 @@ function install_package() { } +# Distro-agnostic function to tell if a package is installed +# is_package_installed package [package ...] +function is_package_installed() { + if [[ -z "$@" ]]; then + return 1 + fi + + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + if [[ "$os_PACKAGE" = "deb" ]]; then + dpkg -l "$@" > /dev/null + return $? + else + rpm --quiet -q "$@" + return $? + fi +} + + # Test if the named environment variable is set and not zero length # is_set env-var function is_set() { diff --git a/stack.sh b/stack.sh index 687e5bf2..2c488b4a 100755 --- a/stack.sh +++ b/stack.sh @@ -182,11 +182,7 @@ if [[ $EUID -eq 0 ]]; then sleep $ROOTSLEEP # Give the non-root user the ability to run as **root** via ``sudo`` - if [[ "$os_PACKAGE" = "deb" ]]; then - dpkg -l sudo || install_package sudo - else - rpm -qa | grep sudo || install_package sudo - fi + is_package_installed sudo || install_package sudo if ! getent group stack >/dev/null; then echo "Creating a group called stack" groupadd stack @@ -215,12 +211,7 @@ if [[ $EUID -eq 0 ]]; then exit 1 else # We're not **root**, make sure ``sudo`` is available - if [[ "$os_PACKAGE" = "deb" ]]; then - CHECK_SUDO_CMD="dpkg -l sudo" - else - CHECK_SUDO_CMD="rpm -q sudo" - fi - $CHECK_SUDO_CMD || die "Sudo is required. Re-run stack.sh as root ONE TIME ONLY to set up sudo." + is_package_installed sudo || die "Sudo is required. Re-run stack.sh as root ONE TIME ONLY to set up sudo." # UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || diff --git a/tests/functions.sh b/tests/functions.sh index f111a48d..3a0f3199 100755 --- a/tests/functions.sh +++ b/tests/functions.sh @@ -239,3 +239,44 @@ test_disable_negated_services 'a,-a' '' test_disable_negated_services 'b,a,-a' 'b' test_disable_negated_services 'a,b,-a' 'b' test_disable_negated_services 'a,-a,b' 'b' + + +echo "Testing is_package_installed()" + +if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion +fi + +if [[ "$os_PACKAGE" = "deb" ]]; then + is_package_installed dpkg + VAL=$? +else + is_package_installed rpm + VAL=$? +fi +if [[ "$VAL" -eq 0 ]]; then + echo "OK" +else + echo "is_package_installed() on existing package failed" +fi + +if [[ "$os_PACKAGE" = "deb" ]]; then + is_package_installed dpkg bash + VAL=$? +else + is_package_installed rpm bash + VAL=$? +fi +if [[ "$VAL" -eq 0 ]]; then + echo "OK" +else + echo "is_package_installed() on more than one existing package failed" +fi + +is_package_installed zzzZZZzzz +VAL=$? +if [[ "$VAL" -ne 0 ]]; then + echo "OK" +else + echo "is_package_installed() on non-existing package failed" +fi From d81a0274aa083531bb70a49d77074850e1adfc48 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 31 Aug 2012 18:04:55 -0500 Subject: [PATCH 672/967] Move keystone to lib/keystone The next in a line of changes to break down stack.sh and make it a bit more manageable. Part of blueprint devstack-modular Change-Id: I40405af07b776f045d6bf801f7e4f1ad863139ae --- lib/keystone | 172 ++++++++++++++++++++++++++++++++ stack.sh | 272 +++++++++++++++++++-------------------------------- 2 files changed, 270 insertions(+), 174 deletions(-) create mode 100644 lib/keystone diff --git a/lib/keystone b/lib/keystone new file mode 100644 index 00000000..a0cc6014 --- /dev/null +++ b/lib/keystone @@ -0,0 +1,172 @@ +# lib/keystone +# Functions to control the configuration and operation of **Keystone** + +# Dependencies: +# ``functions`` file +# ``BASE_SQL_CONN`` +# ``SERVICE_HOST`` +# ``SERVICE_TOKEN`` +# ``S3_SERVICE_PORT`` (template backend only) + + +# ``stack.sh`` calls the entry points in this order: +# +# install_keystone +# configure_keystone +# init_keystone +# start_keystone +# stop_keystone +# cleanup_keystone + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following along as the install occurs. +set -o xtrace + + +# Defaults +# -------- + +# + +# Set up default directories +KEYSTONE_DIR=$DEST/keystone +KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone} +KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf + +KEYSTONECLIENT_DIR=$DEST/python-keystoneclient + +# Select the backend for Keystopne's service catalog +KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-template} +KEYSTONE_CATALOG=$KEYSTONE_CONF_DIR/default_catalog.templates + +# Set Keystone interface configuration +KEYSTONE_API_PORT=${KEYSTONE_API_PORT:-5000} +KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST} +KEYSTONE_AUTH_PORT=${KEYSTONE_AUTH_PORT:-35357} +KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-http} +KEYSTONE_SERVICE_HOST=${KEYSTONE_SERVICE_HOST:-$SERVICE_HOST} +KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000} +KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-http} + + +# Entry Points +# ------------ + +# cleanup_keystone() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_keystone() { + # kill instances (nova) + # delete image files (glance) + # This function intentionally left blank + : +} + +# configure_keystoneclient() - Set config files, create data dirs, etc +function configure_keystoneclient() { + setup_develop $KEYSTONECLIENT_DIR +} + +# configure_keystone() - Set config files, create data dirs, etc +function configure_keystone() { + setup_develop $KEYSTONE_DIR + + if [[ ! -d $KEYSTONE_CONF_DIR ]]; then + sudo mkdir -p $KEYSTONE_CONF_DIR + sudo chown `whoami` $KEYSTONE_CONF_DIR + fi + + if [[ "$KEYSTONE_CONF_DIR" != "$KEYSTONE_DIR/etc" ]]; then + cp -p $KEYSTONE_DIR/etc/keystone.conf.sample $KEYSTONE_CONF + cp -p $KEYSTONE_DIR/etc/policy.json $KEYSTONE_CONF_DIR + fi + + # Rewrite stock ``keystone.conf`` + iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN" + iniset $KEYSTONE_CONF sql connection "$BASE_SQL_CONN/keystone?charset=utf8" + iniset $KEYSTONE_CONF ec2 driver "keystone.contrib.ec2.backends.sql.Ec2" + sed -e " + /^pipeline.*ec2_extension crud_/s|ec2_extension crud_extension|ec2_extension s3_extension crud_extension|; + " -i $KEYSTONE_CONF + + # Append the S3 bits + iniset $KEYSTONE_CONF filter:s3_extension paste.filter_factory "keystone.contrib.s3:S3Extension.factory" + + if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then + # Configure ``keystone.conf`` to use sql + iniset $KEYSTONE_CONF catalog driver keystone.catalog.backends.sql.Catalog + inicomment $KEYSTONE_CONF catalog template_file + else + cp -p $FILES/default_catalog.templates $KEYSTONE_CATALOG + + # Add swift endpoints to service catalog if swift is enabled + if is_service_enabled swift; then + echo "catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:8080/" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.object_store.name = Swift Service" >> $KEYSTONE_CATALOG + fi + + # Add quantum endpoints to service catalog if quantum is enabled + if is_service_enabled quantum; then + echo "catalog.RegionOne.network.publicURL = http://%SERVICE_HOST%:$Q_PORT/" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.network.adminURL = http://%SERVICE_HOST%:$Q_PORT/" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.network.internalURL = http://%SERVICE_HOST%:$Q_PORT/" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.network.name = Quantum Service" >> $KEYSTONE_CATALOG + fi + + sudo sed -e " + s,%SERVICE_HOST%,$SERVICE_HOST,g; + s,%S3_SERVICE_PORT%,$S3_SERVICE_PORT,g; + " -i $KEYSTONE_CATALOG + + # Configure ``keystone.conf`` to use templates + iniset $KEYSTONE_CONF catalog driver "keystone.catalog.backends.templated.TemplatedCatalog" + iniset $KEYSTONE_CONF catalog template_file "$KEYSTONE_CATALOG" + fi + + # Set up logging + LOGGING_ROOT="devel" + if [ "$SYSLOG" != "False" ]; then + LOGGING_ROOT="$LOGGING_ROOT,production" + fi + KEYSTONE_LOG_CONFIG="--log-config $KEYSTONE_CONF_DIR/logging.conf" + cp $KEYSTONE_DIR/etc/logging.conf.sample $KEYSTONE_CONF_DIR/logging.conf + iniset $KEYSTONE_CONF_DIR/logging.conf logger_root level "DEBUG" + iniset $KEYSTONE_CONF_DIR/logging.conf logger_root handlers "devel,production" + +} + +# init_keystone() - Initialize databases, etc. +function init_keystone() { + # (Re)create keystone database + mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS keystone;' + mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE keystone CHARACTER SET utf8;' + + # Initialize keystone database + $KEYSTONE_DIR/bin/keystone-manage db_sync + + # Set up certificates + $KEYSTONE_DIR/bin/keystone-manage pki_setup +} + +# install_keystoneclient() - Collect source and prepare +function install_keystoneclient() { + git_clone $KEYSTONECLIENT_REPO $KEYSTONECLIENT_DIR $KEYSTONECLIENT_BRANCH +} + +# install_keystone() - Collect source and prepare +function install_keystone() { + git_clone $KEYSTONE_REPO $KEYSTONE_DIR $KEYSTONE_BRANCH +} + +# start_keystone() - Start running processes, including screen +function start_keystone() { + # Start Keystone in a screen window + screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug" +} + +# stop_keystone() - Stop running processes +function stop_keystone() { + # Kill the Keystone screen window + screen -S $SCREEN_NAME -p key -X kill +} diff --git a/stack.sh b/stack.sh index 687e5bf2..1c85e6ce 100755 --- a/stack.sh +++ b/stack.sh @@ -262,10 +262,63 @@ sudo mkdir -p $DATA_DIR sudo chown `whoami` $DATA_DIR +# Common Configuration +# ==================== + +# Set fixed and floating range here so we can make sure not to use addresses +# from either range when attempting to guess the IP to use for the host. +# Note that setting FIXED_RANGE may be necessary when running DevStack +# in an OpenStack cloud that uses either of these address ranges internally. +FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.224/28} +FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24} +FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256} +NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1} + +# Find the interface used for the default route +HOST_IP_IFACE=${HOST_IP_IFACE:-$(ip route | sed -n '/^default/{ s/.*dev \(\w\+\)\s\+.*/\1/; p; }')} +# Search for an IP unless an explicit is set by ``HOST_IP`` environment variable +if [ -z "$HOST_IP" -o "$HOST_IP" == "dhcp" ]; then + HOST_IP="" + HOST_IPS=`LC_ALL=C ip -f inet addr show ${HOST_IP_IFACE} | awk '/inet/ {split($2,parts,"/"); print parts[1]}'` + for IP in $HOST_IPS; do + # Attempt to filter out IP addresses that are part of the fixed and + # floating range. Note that this method only works if the ``netaddr`` + # python library is installed. If it is not installed, an error + # will be printed and the first IP from the interface will be used. + # If that is not correct set ``HOST_IP`` in ``localrc`` to the correct + # address. + if ! (address_in_net $IP $FIXED_RANGE || address_in_net $IP $FLOATING_RANGE); then + HOST_IP=$IP + break; + fi + done + if [ "$HOST_IP" == "" ]; then + echo "Could not determine host ip address." + echo "Either localrc specified dhcp on ${HOST_IP_IFACE} or defaulted" + exit 1 + fi +fi + +# Allow the use of an alternate hostname (such as localhost/127.0.0.1) for service endpoints. +SERVICE_HOST=${SERVICE_HOST:-$HOST_IP} + +# Configure services to use syslog instead of writing to individual log files +SYSLOG=`trueorfalse False $SYSLOG` +SYSLOG_HOST=${SYSLOG_HOST:-$HOST_IP} +SYSLOG_PORT=${SYSLOG_PORT:-516} + +# Use color for logging output (only available if syslog is not used) +LOG_COLOR=`trueorfalse True $LOG_COLOR` + +# Service startup timeout +SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} + + # Configure Projects # ================== # Get project function libraries +source $TOP_DIR/lib/keystone source $TOP_DIR/lib/cinder source $TOP_DIR/lib/n-vol source $TOP_DIR/lib/ceilometer @@ -277,9 +330,7 @@ NOVA_DIR=$DEST/nova HORIZON_DIR=$DEST/horizon GLANCE_DIR=$DEST/glance GLANCECLIENT_DIR=$DEST/python-glanceclient -KEYSTONE_DIR=$DEST/keystone NOVACLIENT_DIR=$DEST/python-novaclient -KEYSTONECLIENT_DIR=$DEST/python-keystoneclient OPENSTACKCLIENT_DIR=$DEST/python-openstackclient NOVNC_DIR=$DEST/noVNC SWIFT_DIR=$DEST/swift @@ -313,52 +364,6 @@ INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-} # should work in most cases. SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler} -# Set fixed and floating range here so we can make sure not to use addresses -# from either range when attempting to guess the IP to use for the host. -# Note that setting FIXED_RANGE may be necessary when running DevStack -# in an OpenStack cloud that uses eith of these address ranges internally. -FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24} -FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.224/28} - -# Find the interface used for the default route -HOST_IP_IFACE=${HOST_IP_IFACE:-$(ip route | sed -n '/^default/{ s/.*dev \(\w\+\)\s\+.*/\1/; p; }')} -# Search for an IP unless an explicit is set by ``HOST_IP`` environment variable -if [ -z "$HOST_IP" -o "$HOST_IP" == "dhcp" ]; then - HOST_IP="" - HOST_IPS=`LC_ALL=C ip -f inet addr show ${HOST_IP_IFACE} | awk '/inet/ {split($2,parts,"/"); print parts[1]}'` - for IP in $HOST_IPS; do - # Attempt to filter out IP addresses that are part of the fixed and - # floating range. Note that this method only works if the ``netaddr`` - # python library is installed. If it is not installed, an error - # will be printed and the first IP from the interface will be used. - # If that is not correct set ``HOST_IP`` in ``localrc`` to the correct - # address. - if ! (address_in_net $IP $FIXED_RANGE || address_in_net $IP $FLOATING_RANGE); then - HOST_IP=$IP - break; - fi - done - if [ "$HOST_IP" == "" ]; then - echo "Could not determine host ip address." - echo "Either localrc specified dhcp on ${HOST_IP_IFACE} or defaulted" - exit 1 - fi -fi - -# Allow the use of an alternate hostname (such as localhost/127.0.0.1) for service endpoints. -SERVICE_HOST=${SERVICE_HOST:-$HOST_IP} - -# Configure services to use syslog instead of writing to individual log files -SYSLOG=`trueorfalse False $SYSLOG` -SYSLOG_HOST=${SYSLOG_HOST:-$HOST_IP} -SYSLOG_PORT=${SYSLOG_PORT:-516} - -# Use color for logging output (only available if syslog is not used) -LOG_COLOR=`trueorfalse True $LOG_COLOR` - -# Service startup timeout -SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} - # Generic helper to configure passwords function read_password { set +o xtrace @@ -419,8 +424,6 @@ else fi PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT} -FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256} -NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1} NET_MAN=${NET_MAN:-FlatDHCPManager} EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST} FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT} @@ -568,14 +571,6 @@ read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE ( # Set the tenant for service accounts in Keystone SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service} -# Set Keystone interface configuration -KEYSTONE_API_PORT=${KEYSTONE_API_PORT:-5000} -KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST} -KEYSTONE_AUTH_PORT=${KEYSTONE_AUTH_PORT:-35357} -KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-http} -KEYSTONE_SERVICE_HOST=${KEYSTONE_SERVICE_HOST:-$SERVICE_HOST} -KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000} -KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-http} # Horizon @@ -791,10 +786,11 @@ pip_install $(get_packages $FILES/pips | sort -u) # Check Out Source # ---------------- +install_keystoneclient + git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH # Check out the client libs that are used most -git_clone $KEYSTONECLIENT_REPO $KEYSTONECLIENT_DIR $KEYSTONECLIENT_BRANCH git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH git_clone $OPENSTACKCLIENT_REPO $OPENSTACKCLIENT_DIR $OPENSTACKCLIENT_BRANCH git_clone $GLANCECLIENT_REPO $GLANCECLIENT_DIR $GLANCECLIENT_BRANCH @@ -802,7 +798,7 @@ git_clone $GLANCECLIENT_REPO $GLANCECLIENT_DIR $GLANCECLIENT_BRANCH # glance, swift middleware and nova api needs keystone middleware if is_service_enabled key g-api n-api swift; then # unified auth system (manages accounts/tokens) - git_clone $KEYSTONE_REPO $KEYSTONE_DIR $KEYSTONE_BRANCH + install_keystone fi if is_service_enabled swift; then # storage service @@ -849,11 +845,11 @@ fi # Set up our checkouts so they are installed into python path # allowing ``import nova`` or ``import glance.client`` -setup_develop $KEYSTONECLIENT_DIR +configure_keystoneclient setup_develop $NOVACLIENT_DIR setup_develop $OPENSTACKCLIENT_DIR if is_service_enabled key g-api n-api swift; then - setup_develop $KEYSTONE_DIR + configure_keystone fi if is_service_enabled swift; then setup_develop $SWIFT_DIR @@ -984,6 +980,36 @@ sleep 1 screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS" +# Keystone +# -------- + +if is_service_enabled key; then + configure_keystone + init_keystone + start_keystone + echo "Waiting for keystone to start..." + if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/ >/dev/null; do sleep 1; done"; then + echo "keystone did not start" + exit 1 + fi + + # ``keystone_data.sh`` creates services, admin and demo users, and roles. + SERVICE_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0 + + ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \ + SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \ + S3_SERVICE_PORT=$S3_SERVICE_PORT KEYSTONE_CATALOG_BACKEND=$KEYSTONE_CATALOG_BACKEND \ + DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES HEAT_API_PORT=$HEAT_API_PORT \ + bash -x $FILES/keystone_data.sh + + # Set up auth creds now that keystone is bootstrapped + export OS_AUTH_URL=$SERVICE_ENDPOINT + export OS_TENANT_NAME=admin + export OS_USERNAME=admin + export OS_PASSWORD=$ADMIN_PASSWORD +fi + + # Horizon # ------- @@ -2113,118 +2139,16 @@ if is_service_enabled g-api; then fi fi -if is_service_enabled key; then - # (Re)create keystone database - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS keystone;' - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE keystone CHARACTER SET utf8;' - - KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone} - KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf - KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-template} - - if [[ ! -d $KEYSTONE_CONF_DIR ]]; then - sudo mkdir -p $KEYSTONE_CONF_DIR - sudo chown `whoami` $KEYSTONE_CONF_DIR - fi - - if [[ "$KEYSTONE_CONF_DIR" != "$KEYSTONE_DIR/etc" ]]; then - cp -p $KEYSTONE_DIR/etc/keystone.conf.sample $KEYSTONE_CONF - cp -p $KEYSTONE_DIR/etc/policy.json $KEYSTONE_CONF_DIR - fi - - # Rewrite stock ``keystone.conf`` - iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN" - iniset $KEYSTONE_CONF sql connection "$BASE_SQL_CONN/keystone?charset=utf8" - iniset $KEYSTONE_CONF ec2 driver "keystone.contrib.ec2.backends.sql.Ec2" - sed -e " - /^pipeline.*ec2_extension crud_/s|ec2_extension crud_extension|ec2_extension s3_extension crud_extension|; - " -i $KEYSTONE_CONF - # Append the S3 bits - iniset $KEYSTONE_CONF filter:s3_extension paste.filter_factory "keystone.contrib.s3:S3Extension.factory" - - if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then - # Configure ``keystone.conf`` to use sql - iniset $KEYSTONE_CONF catalog driver keystone.catalog.backends.sql.Catalog - inicomment $KEYSTONE_CONF catalog template_file - else - KEYSTONE_CATALOG=$KEYSTONE_CONF_DIR/default_catalog.templates - cp -p $FILES/default_catalog.templates $KEYSTONE_CATALOG - - # Add swift endpoints to service catalog if swift is enabled - if is_service_enabled swift; then - echo "catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG - echo "catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:8080/" >> $KEYSTONE_CATALOG - echo "catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG - echo "catalog.RegionOne.object_store.name = Swift Service" >> $KEYSTONE_CATALOG - fi - - # Add quantum endpoints to service catalog if quantum is enabled - if is_service_enabled quantum; then - echo "catalog.RegionOne.network.publicURL = http://%SERVICE_HOST%:$Q_PORT/" >> $KEYSTONE_CATALOG - echo "catalog.RegionOne.network.adminURL = http://%SERVICE_HOST%:$Q_PORT/" >> $KEYSTONE_CATALOG - echo "catalog.RegionOne.network.internalURL = http://%SERVICE_HOST%:$Q_PORT/" >> $KEYSTONE_CATALOG - echo "catalog.RegionOne.network.name = Quantum Service" >> $KEYSTONE_CATALOG - fi - - sudo sed -e " - s,%SERVICE_HOST%,$SERVICE_HOST,g; - s,%S3_SERVICE_PORT%,$S3_SERVICE_PORT,g; - " -i $KEYSTONE_CATALOG - - # Configure ``keystone.conf`` to use templates - iniset $KEYSTONE_CONF catalog driver "keystone.catalog.backends.templated.TemplatedCatalog" - iniset $KEYSTONE_CONF catalog template_file "$KEYSTONE_CATALOG" - fi - - # Set up logging - LOGGING_ROOT="devel" - if [ "$SYSLOG" != "False" ]; then - LOGGING_ROOT="$LOGGING_ROOT,production" - fi - KEYSTONE_LOG_CONFIG="--log-config $KEYSTONE_CONF_DIR/logging.conf" - cp $KEYSTONE_DIR/etc/logging.conf.sample $KEYSTONE_CONF_DIR/logging.conf - iniset $KEYSTONE_CONF_DIR/logging.conf logger_root level "DEBUG" - iniset $KEYSTONE_CONF_DIR/logging.conf logger_root handlers "devel,production" - - # Initialize keystone database - $KEYSTONE_DIR/bin/keystone-manage db_sync - - # Set up certificates - $KEYSTONE_DIR/bin/keystone-manage pki_setup - - # Launch keystone and wait for it to answer before continuing - screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug" - echo "Waiting for keystone to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/ >/dev/null; do sleep 1; done"; then - echo "keystone did not start" - exit 1 - fi - # ``keystone_data.sh`` creates services, admin and demo users, and roles. - SERVICE_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0 - - ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \ - SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \ - S3_SERVICE_PORT=$S3_SERVICE_PORT KEYSTONE_CATALOG_BACKEND=$KEYSTONE_CATALOG_BACKEND \ - DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES HEAT_API_PORT=$HEAT_API_PORT \ - bash -x $FILES/keystone_data.sh - - # Set up auth creds now that keystone is bootstrapped - export OS_AUTH_URL=$SERVICE_ENDPOINT - export OS_TENANT_NAME=admin - export OS_USERNAME=admin - export OS_PASSWORD=$ADMIN_PASSWORD - - # Create an access key and secret key for nova ec2 register image - if is_service_enabled swift3 && is_service_enabled nova; then - NOVA_USER_ID=$(keystone user-list | grep ' nova ' | get_field 1) - NOVA_TENANT_ID=$(keystone tenant-list | grep " $SERVICE_TENANT_NAME " | get_field 1) - CREDS=$(keystone ec2-credentials-create --user_id $NOVA_USER_ID --tenant_id $NOVA_TENANT_ID) - ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }') - SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }') - add_nova_opt "s3_access_key=$ACCESS_KEY" - add_nova_opt "s3_secret_key=$SECRET_KEY" - add_nova_opt "s3_affix_tenant=True" - fi +# Create an access key and secret key for nova ec2 register image +if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nova; then + NOVA_USER_ID=$(keystone user-list | grep ' nova ' | get_field 1) + NOVA_TENANT_ID=$(keystone tenant-list | grep " $SERVICE_TENANT_NAME " | get_field 1) + CREDS=$(keystone ec2-credentials-create --user_id $NOVA_USER_ID --tenant_id $NOVA_TENANT_ID) + ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }') + SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }') + add_nova_opt "s3_access_key=$ACCESS_KEY" + add_nova_opt "s3_secret_key=$SECRET_KEY" + add_nova_opt "s3_affix_tenant=True" fi screen_it zeromq "cd $NOVA_DIR && $NOVA_DIR/bin/nova-rpc-zmq-receiver" From a39caacad341234e5d9e86ae2b7cf8c2ecbbc190 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Mon, 3 Sep 2012 15:45:53 +0100 Subject: [PATCH 673/967] Make sure tgt is using config.d approach Fixes bug 1045358. On ubuntu Oneiric, the tgt configuration was not using the config.d approach. tgt was unable to find the config files, so no volumes could be created. This fix makes sure, that the config.d directory is there, and the configuration includes files from there. Was Cinder only, added same fix for Nova volumes Change-Id: I6752cb628dd22e91e640f0f584fafefa4cd6d0f1 --- lib/cinder | 9 +++++++++ lib/n-vol | 2 ++ 2 files changed, 11 insertions(+) diff --git a/lib/cinder b/lib/cinder index 5f0b2553..ec491dda 100644 --- a/lib/cinder +++ b/lib/cinder @@ -189,10 +189,19 @@ function install_cinder() { git_clone $CINDERCLIENT_REPO $CINDERCLIENT_DIR $CINDERCLIENT_BRANCH } +# apply config.d approach (e.g. Oneiric does not have this) +function _configure_tgt_for_config_d() { + if [[ ! -d /etc/tgt/conf.d/ ]]; then + sudo mkdir /etc/tgt/conf.d + echo "include /etc/tgt/conf.d/*.conf" | sudo tee -a /etc/tgt/targets.conf + fi +} + # start_cinder() - Start running processes, including screen function start_cinder() { if is_service_enabled c-vol; then if [[ "$os_PACKAGE" = "deb" ]]; then + _configure_tgt_for_config_d if [[ ! -f /etc/tgt/conf.d/cinder.conf ]]; then echo "include $CINDER_DIR/volumes/*" | sudo tee /etc/tgt/conf.d/cinder.conf fi diff --git a/lib/n-vol b/lib/n-vol index 30be0cdd..a9d1c7d1 100644 --- a/lib/n-vol +++ b/lib/n-vol @@ -5,6 +5,7 @@ # - functions # - KEYSTONE_AUTH_* must be defined # SERVICE_{TENANT_NAME|PASSWORD} must be defined +# _configure_tgt_for_config_d() from lib/cinder # stack.sh # --------- @@ -94,6 +95,7 @@ function install_nvol() { function start_nvol() { # Setup the tgt configuration file if [[ ! -f /etc/tgt/conf.d/nova.conf ]]; then + _configure_tgt_for_config_d sudo mkdir -p /etc/tgt/conf.d echo "include $NOVA_DIR/volumes/*" | sudo tee /etc/tgt/conf.d/nova.conf fi From fddd8f8f05f52cea220369235c80a84019d1f995 Mon Sep 17 00:00:00 2001 From: Bob Kukura Date: Mon, 10 Sep 2012 00:59:24 -0400 Subject: [PATCH 674/967] Support enable_tunneling openvswitch configuration variable Adds support for setting the new openvswitch configuration variable added in https://review.openstack.org/#/c/12686/. Change-Id: Ic599de0fbdc922160580189b94c666a597abe182 --- stack.sh | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 687e5bf2..6d7a21e6 100755 --- a/stack.sh +++ b/stack.sh @@ -1196,6 +1196,12 @@ if is_service_enabled quantum; then # Example: LB_PHYSICAL_INTERFACE=eth1 LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-} + # With the openvswitch plugin, set to True in localrc to enable + # provider GRE tunnels when ENABLE_TENANT_TUNNELS is False. + # + # Example: OVS_ENABLE_TUNNELING=True + OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS} + # Put config files in ``/etc/quantum`` for everyone to find if [[ ! -d /etc/quantum ]]; then sudo mkdir -p /etc/quantum @@ -1272,6 +1278,11 @@ if is_service_enabled q-svc; then if [[ "$OVS_VLAN_RANGES" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE OVS network_vlan_ranges $OVS_VLAN_RANGES fi + + # Enable tunnel networks if selected + if [[ $OVS_ENABLE_TUNNELING = "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True + fi elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then if [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then iniset /$Q_PLUGIN_CONF_FILE VLANS tenant_network_type vlan @@ -1302,7 +1313,7 @@ if is_service_enabled q-agt; then quantum_setup_ovs_bridge $OVS_BRIDGE # Setup agent for tunneling - if [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then + if [[ "$OVS_ENABLE_TUNNELING" = "True" ]]; then # Verify tunnels are supported # REVISIT - also check kernel module support for GRE and patch ports OVS_VERSION=`ovs-vsctl --version | head -n 1 | awk '{print $4;}'` @@ -1311,6 +1322,7 @@ if is_service_enabled q-agt; then echo "OVS 1.4+ is required for tunneling between multiple hosts." exit 1 fi + iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True iniset /$Q_PLUGIN_CONF_FILE OVS local_ip $HOST_IP fi From 4e30ba886f577f53a9a340c5195d2e40625a6f24 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Thu, 6 Sep 2012 17:41:54 -0700 Subject: [PATCH 675/967] Switch from root_helper to rootwrap_config root_helper is deprecated, and has been replaced by rootwrap_config. Change-Id: I088ab1c6054fe5bbe0ad3c430432b84c8702fba8 --- stack.sh | 41 ++++++++++++++++++----------------------- 1 file changed, 18 insertions(+), 23 deletions(-) diff --git a/stack.sh b/stack.sh index 3e3d8cf5..8887678e 100755 --- a/stack.sh +++ b/stack.sh @@ -1327,28 +1327,23 @@ sudo chown `whoami` $NOVA_CONF_DIR cp -p $NOVA_DIR/etc/nova/policy.json $NOVA_CONF_DIR -# If Nova ships the new rootwrap filters files, deploy them -# (owned by root) and add a parameter to ``$NOVA_ROOTWRAP`` -ROOTWRAP_SUDOER_CMD="$NOVA_ROOTWRAP" -if [[ -d $NOVA_DIR/etc/nova/rootwrap.d ]]; then - # Wipe any existing rootwrap.d files first - if [[ -d $NOVA_CONF_DIR/rootwrap.d ]]; then - sudo rm -rf $NOVA_CONF_DIR/rootwrap.d - fi - # Deploy filters to /etc/nova/rootwrap.d - sudo mkdir -m 755 $NOVA_CONF_DIR/rootwrap.d - sudo cp $NOVA_DIR/etc/nova/rootwrap.d/*.filters $NOVA_CONF_DIR/rootwrap.d - sudo chown -R root:root $NOVA_CONF_DIR/rootwrap.d - sudo chmod 644 $NOVA_CONF_DIR/rootwrap.d/* - # Set up rootwrap.conf, pointing to /etc/nova/rootwrap.d - sudo cp $NOVA_DIR/etc/nova/rootwrap.conf $NOVA_CONF_DIR/ - sudo sed -e "s:^filters_path=.*$:filters_path=$NOVA_CONF_DIR/rootwrap.d:" -i $NOVA_CONF_DIR/rootwrap.conf - sudo chown root:root $NOVA_CONF_DIR/rootwrap.conf - sudo chmod 0644 $NOVA_CONF_DIR/rootwrap.conf - # Specify rootwrap.conf as first parameter to nova-rootwrap - NOVA_ROOTWRAP="$NOVA_ROOTWRAP $NOVA_CONF_DIR/rootwrap.conf" - ROOTWRAP_SUDOER_CMD="$NOVA_ROOTWRAP *" -fi +# Deploy new rootwrap filters files (owned by root). +# Wipe any existing rootwrap.d files first +if [[ -d $NOVA_CONF_DIR/rootwrap.d ]]; then + sudo rm -rf $NOVA_CONF_DIR/rootwrap.d +fi +# Deploy filters to /etc/nova/rootwrap.d +sudo mkdir -m 755 $NOVA_CONF_DIR/rootwrap.d +sudo cp $NOVA_DIR/etc/nova/rootwrap.d/*.filters $NOVA_CONF_DIR/rootwrap.d +sudo chown -R root:root $NOVA_CONF_DIR/rootwrap.d +sudo chmod 644 $NOVA_CONF_DIR/rootwrap.d/* +# Set up rootwrap.conf, pointing to /etc/nova/rootwrap.d +sudo cp $NOVA_DIR/etc/nova/rootwrap.conf $NOVA_CONF_DIR/ +sudo sed -e "s:^filters_path=.*$:filters_path=$NOVA_CONF_DIR/rootwrap.d:" -i $NOVA_CONF_DIR/rootwrap.conf +sudo chown root:root $NOVA_CONF_DIR/rootwrap.conf +sudo chmod 0644 $NOVA_CONF_DIR/rootwrap.conf +# Specify rootwrap.conf as first parameter to nova-rootwrap +ROOTWRAP_SUDOER_CMD="$NOVA_ROOTWRAP $NOVA_CONF_DIR/rootwrap.conf *" # Set up the rootwrap sudoers for nova TEMPFILE=`mktemp` @@ -1856,7 +1851,7 @@ add_nova_opt "[DEFAULT]" add_nova_opt "verbose=True" add_nova_opt "auth_strategy=keystone" add_nova_opt "allow_resize_to_same_host=True" -add_nova_opt "root_helper=sudo $NOVA_ROOTWRAP" +add_nova_opt "rootwrap_config=$NOVA_CONF_DIR/rootwrap.conf" add_nova_opt "compute_scheduler_driver=$SCHEDULER" add_nova_opt "dhcpbridge_flagfile=$NOVA_CONF_DIR/$NOVA_CONF" add_nova_opt "fixed_range=$FIXED_RANGE" From 8d6c9bcabbd5cd4c4b278a2d541a6569d72af960 Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Tue, 11 Sep 2012 10:05:14 +1200 Subject: [PATCH 676/967] Fix keystone_data.sh to match default_catalog.templates Endpoint creating in keystone_data.sh tends to drift because KEYSTONE_CATALOG_BACKEND=sql is not the default. This patch should bring them closer together, and fix a problem I was having with keystone auth Change-Id: Ifac57b8e69234975d1ff65ace72d46d3a5808119 --- files/keystone_data.sh | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 2a8d0703..37919174 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -101,7 +101,7 @@ if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then --service_id $KEYSTONE_SERVICE \ --publicurl "http://$SERVICE_HOST:\$(public_port)s/v2.0" \ --adminurl "http://$SERVICE_HOST:\$(admin_port)s/v2.0" \ - --internalurl "http://$SERVICE_HOST:\$(admin_port)s/v2.0" + --internalurl "http://$SERVICE_HOST:\$(public_port)s/v2.0" fi # Nova @@ -123,9 +123,9 @@ if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then keystone endpoint-create \ --region RegionOne \ --service_id $NOVA_SERVICE \ - --publicurl "http://$SERVICE_HOST:\$(compute_port)s/v1.1/\$(tenant_id)s" \ - --adminurl "http://$SERVICE_HOST:\$(compute_port)s/v1.1/\$(tenant_id)s" \ - --internalurl "http://$SERVICE_HOST:\$(compute_port)s/v1.1/\$(tenant_id)s" + --publicurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" \ + --adminurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" \ + --internalurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" fi # Nova needs ResellerAdmin role to download images when accessing # swift through the s3 api. The admin role in swift allows a user @@ -197,9 +197,9 @@ if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then keystone endpoint-create \ --region RegionOne \ --service_id $GLANCE_SERVICE \ - --publicurl "http://$SERVICE_HOST:9292/v1" \ - --adminurl "http://$SERVICE_HOST:9292/v1" \ - --internalurl "http://$SERVICE_HOST:9292/v1" + --publicurl "http://$SERVICE_HOST:9292" \ + --adminurl "http://$SERVICE_HOST:9292" \ + --internalurl "http://$SERVICE_HOST:9292" fi fi @@ -223,7 +223,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then --region RegionOne \ --service_id $SWIFT_SERVICE \ --publicurl "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" \ - --adminurl "http://$SERVICE_HOST:8080/v1" \ + --adminurl "http://$SERVICE_HOST:8080" \ --internalurl "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" fi fi From b80379c3fc85d04619bfa81c559a3917eaeb23ce Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 10 Sep 2012 18:30:37 -0500 Subject: [PATCH 677/967] Change default keystone backend to sql The templated backend for Keystone is limited and does not support the CRUD operations so does not fully exercise the Identity API. Change the default to SQL but leave the templated back-end in place for now. Set KEYSTONE_CATALOG_BACKEND=template in localrc to restore the old behaviour. Change-Id: Id4490194d49b8004583016a9666cb9439cd4700a --- lib/keystone | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index a0cc6014..a6ab5a36 100644 --- a/lib/keystone +++ b/lib/keystone @@ -36,7 +36,7 @@ KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf KEYSTONECLIENT_DIR=$DEST/python-keystoneclient # Select the backend for Keystopne's service catalog -KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-template} +KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-sql} KEYSTONE_CATALOG=$KEYSTONE_CONF_DIR/default_catalog.templates # Set Keystone interface configuration From e2790210108cf808bcf317aa8fc3bfe926a655a5 Mon Sep 17 00:00:00 2001 From: Angus Salkeld Date: Tue, 11 Sep 2012 11:24:09 +1000 Subject: [PATCH 678/967] heat had it's api split into two binaries (cfn & cloudwatch) - Rename heat-api to heat-api-cfn - Add heat-api-cloudwatch - Also removed unused heat-engine-paste.ini file. - Fix the path to the conf dir (etc/heat not etc/) Change-Id: I9b2c7c5cd7052d5eb6d730833c65812c2f8a0ee1 Signed-off-by: Angus Salkeld --- files/keystone_data.sh | 8 ++-- lib/heat | 106 +++++++++++++++++++++++++---------------- stack.sh | 2 +- 3 files changed, 70 insertions(+), 46 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 37919174..e0d5c63a 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -171,10 +171,10 @@ if [[ "$ENABLED_SERVICES" =~ "heat" ]]; then --description="Heat Service") keystone endpoint-create \ --region RegionOne \ - --service_id $HEAT_SERVICE \ - --publicurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1" \ - --adminurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1" \ - --internalurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1" + --service_id $HEAT_CFN_SERVICE \ + --publicurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" \ + --adminurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" \ + --internalurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" fi fi diff --git a/lib/heat b/lib/heat index 6f442f87..0b234c4b 100644 --- a/lib/heat +++ b/lib/heat @@ -1,7 +1,7 @@ # lib/heat # Install and start Heat service # To enable, add the following to localrc -# ENABLED_SERVICES+=,heat,h-api,h-eng,h-meta +# ENABLED_SERVICES+=,heat,h-api-cfn,h-api-cw,h-eng,h-meta # Dependencies: # - functions @@ -43,43 +43,47 @@ function configure_heat() { fi sudo chown `whoami` $HEAT_CONF_DIR - HEAT_API_HOST=${HEAT_API_HOST:-$SERVICE_HOST} - HEAT_API_PORT=${HEAT_API_PORT:-8000} + HEAT_API_CFN_HOST=${HEAT_API_CFN_HOST:-$SERVICE_HOST} + HEAT_API_CFN_PORT=${HEAT_API_CFN_PORT:-8000} HEAT_ENGINE_HOST=${HEAT_ENGINE_HOST:-$SERVICE_HOST} HEAT_ENGINE_PORT=${HEAT_ENGINE_PORT:-8001} HEAT_METADATA_HOST=${HEAT_METADATA_HOST:-$SERVICE_HOST} HEAT_METADATA_PORT=${HEAT_METADATA_PORT:-8002} - - HEAT_API_CONF=$HEAT_CONF_DIR/heat-api.conf - cp $HEAT_DIR/etc/heat-api.conf $HEAT_API_CONF - iniset $HEAT_API_CONF DEFAULT debug True - inicomment $HEAT_API_CONF DEFAULT log_file - iniset $HEAT_API_CONF DEFAULT use_syslog $SYSLOG - iniset $HEAT_API_CONF DEFAULT bind_host $HEAT_API_HOST - iniset $HEAT_API_CONF DEFAULT bind_port $HEAT_API_PORT + HEAT_API_CW_HOST=${HEAT_API_CW_HOST:-$SERVICE_HOST} + HEAT_API_CW_PORT=${HEAT_API_CW_PORT:-8003} + + # cloudformation api + HEAT_API_CFN_CONF=$HEAT_CONF_DIR/heat-api-cfn.conf + cp $HEAT_DIR/etc/heat/heat-api-cfn.conf $HEAT_API_CFN_CONF + iniset $HEAT_API_CFN_CONF DEFAULT debug True + inicomment $HEAT_API_CFN_CONF DEFAULT log_file + iniset $HEAT_API_CFN_CONF DEFAULT use_syslog $SYSLOG + iniset $HEAT_API_CFN_CONF DEFAULT bind_host $HEAT_API_CFN_HOST + iniset $HEAT_API_CFN_CONF DEFAULT bind_port $HEAT_API_CFN_PORT if is_service_enabled rabbit; then - iniset $HEAT_API_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_kombu - iniset $HEAT_API_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - iniset $HEAT_API_CONF DEFAULT rabbit_host $RABBIT_HOST + iniset $HEAT_API_CFN_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_kombu + iniset $HEAT_API_CFN_CONF DEFAULT rabbit_password $RABBIT_PASSWORD + iniset $HEAT_API_CFN_CONF DEFAULT rabbit_host $RABBIT_HOST elif is_service_enabled qpid; then - iniset $HEAT_API_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid + iniset $HEAT_API_CFN_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid fi - HEAT_API_PASTE_INI=$HEAT_CONF_DIR/heat-api-paste.ini - cp $HEAT_DIR/etc/heat-api-paste.ini $HEAT_API_PASTE_INI - iniset $HEAT_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST - iniset $HEAT_API_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT - iniset $HEAT_API_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset $HEAT_API_PASTE_INI filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 - iniset $HEAT_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $HEAT_API_PASTE_INI filter:authtoken admin_user heat - iniset $HEAT_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD - iniset $HEAT_API_PASTE_INI filter:ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 - iniset $HEAT_API_PASTE_INI filter:ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens - + HEAT_API_CFN_PASTE_INI=$HEAT_CONF_DIR/heat-api-cfn-paste.ini + cp $HEAT_DIR/etc/heat/heat-api-cfn-paste.ini $HEAT_API_CFN_PASTE_INI + iniset $HEAT_API_CFN_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $HEAT_API_CFN_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $HEAT_API_CFN_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $HEAT_API_CFN_PASTE_INI filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 + iniset $HEAT_API_CFN_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $HEAT_API_CFN_PASTE_INI filter:authtoken admin_user heat + iniset $HEAT_API_CFN_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD + iniset $HEAT_API_CFN_PASTE_INI filter:ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 + iniset $HEAT_API_CFN_PASTE_INI filter:ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens + + # engine HEAT_ENGINE_CONF=$HEAT_CONF_DIR/heat-engine.conf - cp $HEAT_DIR/etc/heat-engine.conf $HEAT_ENGINE_CONF + cp $HEAT_DIR/etc/heat/heat-engine.conf $HEAT_ENGINE_CONF iniset $HEAT_ENGINE_CONF DEFAULT debug True inicomment $HEAT_ENGINE_CONF DEFAULT log_file iniset $HEAT_ENGINE_CONF DEFAULT use_syslog $SYSLOG @@ -96,18 +100,9 @@ function configure_heat() { iniset $HEAT_ENGINE_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid fi - HEAT_ENGINE_PASTE_INI=$HEAT_CONF_DIR/heat-engine-paste.ini - cp $HEAT_DIR/etc/heat-engine-paste.ini $HEAT_ENGINE_PASTE_INI - iniset $HEAT_ENGINE_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST - iniset $HEAT_ENGINE_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT - iniset $HEAT_ENGINE_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset $HEAT_ENGINE_PASTE_INI filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 - iniset $HEAT_ENGINE_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $HEAT_ENGINE_PASTE_INI filter:authtoken admin_user heat - iniset $HEAT_ENGINE_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD - + # metadata api HEAT_METADATA_CONF=$HEAT_CONF_DIR/heat-metadata.conf - cp $HEAT_DIR/etc/heat-metadata.conf $HEAT_METADATA_CONF + cp $HEAT_DIR/etc/heat/heat-metadata.conf $HEAT_METADATA_CONF iniset $HEAT_METADATA_CONF DEFAULT debug True inicomment $HEAT_METADATA_CONF DEFAULT log_file iniset $HEAT_METADATA_CONF DEFAULT use_syslog $SYSLOG @@ -123,8 +118,36 @@ function configure_heat() { fi HEAT_METADATA_PASTE_INI=$HEAT_CONF_DIR/heat-metadata-paste.ini - cp $HEAT_DIR/etc/heat-metadata-paste.ini $HEAT_METADATA_PASTE_INI + cp $HEAT_DIR/etc/heat/heat-metadata-paste.ini $HEAT_METADATA_PASTE_INI + + # cloudwatch api + HEAT_API_CW_CONF=$HEAT_CONF_DIR/heat-api-cloudwatch.conf + cp $HEAT_DIR/etc/heat/heat-api-cloudwatch.conf $HEAT_API_CW_CONF + iniset $HEAT_API_CW_CONF DEFAULT debug True + inicomment $HEAT_API_CW_CONF DEFAULT log_file + iniset $HEAT_API_CW_CONF DEFAULT use_syslog $SYSLOG + iniset $HEAT_API_CW_CONF DEFAULT bind_host $HEAT_API_CW_HOST + iniset $HEAT_API_CW_CONF DEFAULT bind_port $HEAT_API_CW_PORT + + if is_service_enabled rabbit; then + iniset $HEAT_API_CW_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_kombu + iniset $HEAT_API_CW_CONF DEFAULT rabbit_password $RABBIT_PASSWORD + iniset $HEAT_API_CW_CONF DEFAULT rabbit_host $RABBIT_HOST + elif is_service_enabled qpid; then + iniset $HEAT_API_CW_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid + fi + HEAT_API_CW_PASTE_INI=$HEAT_CONF_DIR/heat-api-cloudwatch-paste.ini + cp $HEAT_DIR/etc/heat/heat-api-cloudwatch-paste.ini $HEAT_API_CW_PASTE_INI + iniset $HEAT_API_CW_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $HEAT_API_CW_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $HEAT_API_CW_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $HEAT_API_CW_PASTE_INI filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 + iniset $HEAT_API_CW_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $HEAT_API_CW_PASTE_INI filter:authtoken admin_user heat + iniset $HEAT_API_CW_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD + iniset $HEAT_API_CW_PASTE_INI filter:ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 + iniset $HEAT_API_CW_PASTE_INI filter:ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens } # init_heat() - Initialize database @@ -145,7 +168,8 @@ function install_heat() { # start_heat() - Start running processes, including screen function start_heat() { screen_it h-eng "cd $HEAT_DIR; bin/heat-engine --config-file=$HEAT_CONF_DIR/heat-engine.conf" - screen_it h-api "cd $HEAT_DIR; bin/heat-api --config-dir=$HEAT_CONF_DIR/heat-api.conf" + screen_it h-api-cfn "cd $HEAT_DIR; bin/heat-api-cfn --config-dir=$HEAT_CONF_DIR/heat-api-cfn.conf" + screen_it h-api-cw "cd $HEAT_DIR; bin/heat-api-cloudwatch --config-dir=$HEAT_CONF_DIR/heat-api-cloudwatch.conf" screen_it h-meta "cd $HEAT_DIR; bin/heat-metadata --config-dir=$HEAT_CONF_DIR/heat-metadata.conf" } diff --git a/stack.sh b/stack.sh index a3160d58..efbf38c5 100755 --- a/stack.sh +++ b/stack.sh @@ -999,7 +999,7 @@ if is_service_enabled key; then ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \ SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \ S3_SERVICE_PORT=$S3_SERVICE_PORT KEYSTONE_CATALOG_BACKEND=$KEYSTONE_CATALOG_BACKEND \ - DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES HEAT_API_PORT=$HEAT_API_PORT \ + DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES HEAT_API_CFN_PORT=$HEAT_API_CFN_PORT \ bash -x $FILES/keystone_data.sh # Set up auth creds now that keystone is bootstrapped From 37258958ce7550c0662cf104b007b79e3dc76003 Mon Sep 17 00:00:00 2001 From: Chuck Short Date: Tue, 7 Aug 2012 10:38:44 -0500 Subject: [PATCH 679/967] Add volume tests to exercises/euca.sh Excercise euca2ools volumes commands when exercising the other euca2ools as well. Change-Id: Ia43bd233c63224eac5e851b3b8a3dbdbf3b5e1f0 Signed-off-by: Chuck Short --- exercises/euca.sh | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/exercises/euca.sh b/exercises/euca.sh index fb052dd5..79405c20 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -72,6 +72,48 @@ if ! timeout $RUNNING_TIMEOUT sh -c "while ! euca-describe-instances $INSTANCE | exit 1 fi +# Volumes +# ------- +if [[ "$ENABLED_SERVICES" =~ "n-vol" || "$ENABLED_SERVICES" =~ "c-vol" ]]; then + VOLUME=`euca-create-volume -s 1 -z $VOLUME_ZONE | cut -f2` + die_if_not_set VOLUME "Failure to create volume" + + # Test that volume has been created + VOLUME=`euca-describe-volumes | cut -f2` + + # Test volume has become available + if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then + echo "volume didnt become available within $RUNNING_TIMEOUT seconds" + exit 1 + fi + + # Attach volume to an instance + euca-attach-volume -i $INSTANCE -d $ATTACH_DEVICE $VOLUME || \ + die "Failure attaching volume $VOLUME to $INSTANCE" + if ! timeout $ACTIVE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q in-use; do sleep 1; done"; then + echo "Could not attach $VOLUME to $INSTANCE" + exit 1 + fi + + # Detach volume from an instance + euca-detach-volume $VOLUME || \ + die "Failure detaching volume $VOLUME to $INSTANCE" + if ! timeout $ACTIVE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then + echo "Could not detach $VOLUME to $INSTANCE" + exit 1 + fi + + # Remove volume + euca-delete-volume $VOLUME || \ + die "Failure to delete volume" + if ! timeout $ACTIVE_TIMEOUT sh -c "while euca-describe-volumes | grep $VOLUME; do sleep 1; done"; then + echo "Could not delete $VOLUME" + exit 1 + fi +else + echo "Volume Tests Skipped" +fi + # Allocate floating address FLOATING_IP=`euca-allocate-address | cut -f2` die_if_not_set FLOATING_IP "Failure allocating floating IP" From 5bc706268f1f1dab489293a6ea9538bf825e0799 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 12 Sep 2012 12:34:50 -0500 Subject: [PATCH 680/967] Add python-netaddr to horizon requirements Fixes bug 1036416 Change-Id: I97c1f193d30ad43e434ff33e4aacd1230be0341c --- files/apts/horizon | 1 + files/rpms/horizon | 1 + 2 files changed, 2 insertions(+) diff --git a/files/apts/horizon b/files/apts/horizon index 53bddf09..2161ccd3 100644 --- a/files/apts/horizon +++ b/files/apts/horizon @@ -21,3 +21,4 @@ python-coverage python-cherrypy3 # why? python-migrate nodejs +python-netaddr diff --git a/files/rpms/horizon b/files/rpms/horizon index 5e368208..12f75ba5 100644 --- a/files/rpms/horizon +++ b/files/rpms/horizon @@ -15,6 +15,7 @@ python-httplib2 python-kombu python-migrate python-mox +python-netaddr python-nose python-paste python-paste-deploy From 98ab500c2c9f1bd98309915643a44bf140a40391 Mon Sep 17 00:00:00 2001 From: Andrew Melton Date: Thu, 6 Sep 2012 15:18:11 -0400 Subject: [PATCH 681/967] prepare_guest_template.sh fails when there are multiple xs-tools iso's in ISO_DIR In the case of two iso's ('xs-tools-6.0.2.iso' and 'xs-tools-6.0.2-1111.iso'), TOOLS_ISO gets set to 'xs-tools-6.0.2-1111.iso xs-tools-6.0.2.iso' This causes 'mount -o loop xs-tools-6.0.2-1111.iso xs-tools-6.0.2.iso $TMP_DIR' to get called, which fails as it's meant to be called with only one file. This fix simply sets TOOLS_ISO to the first iso that ls returns. Change-Id: Id4d883e2b1e33b233c9ee907016b4ce117c28021 --- tools/xen/prepare_guest_template.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/xen/prepare_guest_template.sh b/tools/xen/prepare_guest_template.sh index 60782d0e..baf9c3a2 100755 --- a/tools/xen/prepare_guest_template.sh +++ b/tools/xen/prepare_guest_template.sh @@ -49,7 +49,7 @@ ISO_DIR="/opt/xensource/packages/iso" XS_TOOLS_FILE_NAME="xs-tools.deb" XS_TOOLS_PATH="/root/$XS_TOOLS_FILE_NAME" if [ -e "$ISO_DIR" ]; then - TOOLS_ISO=$(ls $ISO_DIR/xs-tools-*.iso) + TOOLS_ISO=$(ls -1 $ISO_DIR/xs-tools-*.iso | head -1) TMP_DIR=/tmp/temp.$RANDOM mkdir -p $TMP_DIR mount -o loop $TOOLS_ISO $TMP_DIR From 834805350a1f75b6a301cc9e6ffb2d6222e423be Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 12 Sep 2012 14:45:48 -0500 Subject: [PATCH 682/967] Fix errors in client-* exercises Change-Id: Ib100c7a2a4bb7f7c02cf4a66cb8bcf506aa11339 --- exercises/client-args.sh | 10 ++++++---- exercises/client-env.sh | 10 ++++++---- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/exercises/client-args.sh b/exercises/client-args.sh index 9cbb6a66..b3e2ad8d 100755 --- a/exercises/client-args.sh +++ b/exercises/client-args.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -**client-args.sh** +# **client-args.sh** # Test OpenStack client authentication aguemnts handling @@ -140,8 +140,10 @@ report "Nova" $STATUS_NOVA report "Glance" $STATUS_GLANCE report "Swift" $STATUS_SWIFT -echo "*********************************************************************" -echo "SUCCESS: End DevStack Exercise: $0" -echo "*********************************************************************" +if (( $RETURN == 0 )); then + echo "*********************************************************************" + echo "SUCCESS: End DevStack Exercise: $0" + echo "*********************************************************************" +fi exit $RETURN diff --git a/exercises/client-env.sh b/exercises/client-env.sh index 94f4a82c..68c0e5ad 100755 --- a/exercises/client-env.sh +++ b/exercises/client-env.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -**client-env.sh** +# **client-env.sh** # Test OpenStack client enviroment variable handling @@ -149,8 +149,10 @@ report "EC2" $STATUS_EC2 report "Glance" $STATUS_GLANCE report "Swift" $STATUS_SWIFT -echo "*********************************************************************" -echo "SUCCESS: End DevStack Exercise: $0" -echo "*********************************************************************" +if (( $RETURN == 0 )); then + echo "*********************************************************************" + echo "SUCCESS: End DevStack Exercise: $0" + echo "*********************************************************************" +fi exit $RETURN From cea6c51251d9614d878a490775612cdeb888e397 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Mon, 20 Aug 2012 09:09:25 -0400 Subject: [PATCH 683/967] Quantum enhancements 1. Calls agent binaries instead of agent files directly 2. Updates for l3 agent with linuxbridge 3. Uses LibvirtHybridOVSBridgeDriver for OVS Change-Id: I442aee913e515a8af75ac3539be4937c5d9da9fb --- stack.sh | 29 +++++++++++------------------ 1 file changed, 11 insertions(+), 18 deletions(-) diff --git a/stack.sh b/stack.sh index 1151f7f1..9d4b2388 100755 --- a/stack.sh +++ b/stack.sh @@ -1355,8 +1355,7 @@ if is_service_enabled q-agt; then if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings $OVS_BRIDGE_MAPPINGS fi - - AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py" + AGENT_BINARY="$QUANTUM_DIR/bin/quantum-openvswitch-agent" elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then # Setup physical network interface mappings. Override # LB_VLAN_RANGES and LB_INTERFACE_MAPPINGS in localrc for more @@ -1367,8 +1366,7 @@ if is_service_enabled q-agt; then if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE physical_interface_mappings $LB_INTERFACE_MAPPINGS fi - - AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/linuxbridge/agent/linuxbridge_quantum_agent.py" + AGENT_BINARY="$QUANTUM_DIR/bin/quantum-linuxbridge-agent" fi fi @@ -1386,8 +1384,6 @@ if is_service_enabled q-dhcp; then iniset $Q_DHCP_CONF_FILE DEFAULT debug True iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE - # Update database - iniset $Q_DHCP_CONF_FILE DEFAULT db_connection "mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/$Q_DB_NAME?charset=utf8" quantum_setup_keystone $Q_DHCP_CONF_FILE DEFAULT set_auth_url if [[ "$Q_PLUGIN" = "openvswitch" ]]; then @@ -1412,15 +1408,14 @@ if is_service_enabled q-l3; then iniset $Q_L3_CONF_FILE DEFAULT metadata_ip $Q_META_DATA_IP iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE - iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE quantum_setup_keystone $Q_L3_CONF_FILE DEFAULT set_auth_url if [[ "$Q_PLUGIN" == "openvswitch" ]]; then iniset $Q_L3_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver + iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE # Set up external bridge # Create it if it does not exist sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE - sudo ovs-vsctl --no-wait br-set-external-id $PUBLIC_BRIDGE bridge-id $PUBLIC_BRIDGE # remove internal ports for PORT in `sudo ovs-vsctl --no-wait list-ports $PUBLIC_BRIDGE`; do TYPE=$(sudo ovs-vsctl get interface $PORT type) @@ -1433,6 +1428,7 @@ if is_service_enabled q-l3; then sudo ip addr flush dev $PUBLIC_BRIDGE elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then iniset $Q_L3_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver + iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge '' fi fi @@ -1955,17 +1951,20 @@ if is_service_enabled quantum; then add_nova_opt "quantum_url=http://$Q_HOST:$Q_PORT" if [[ "$Q_PLUGIN" = "openvswitch" ]]; then - NOVA_VIF_DRIVER="nova.virt.libvirt.vif.LibvirtOpenVswitchDriver" - LINUXNET_VIF_DRIVER="nova.network.linux_net.LinuxOVSInterfaceDriver" + NOVA_VIF_DRIVER="nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver" elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then NOVA_VIF_DRIVER="nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver" - LINUXNET_VIF_DRIVER="nova.network.linux_net.QuantumLinuxBridgeInterfaceDriver" fi - add_nova_opt "libvirt_vif_type=ethernet" add_nova_opt "libvirt_vif_driver=$NOVA_VIF_DRIVER" add_nova_opt "linuxnet_interface_driver=$LINUXNET_VIF_DRIVER" else add_nova_opt "network_manager=nova.network.manager.$NET_MAN" + add_nova_opt "public_interface=$PUBLIC_INTERFACE" + add_nova_opt "vlan_interface=$VLAN_INTERFACE" + add_nova_opt "flat_network_bridge=$FLAT_NETWORK_BRIDGE" + if [ -n "$FLAT_INTERFACE" ]; then + add_nova_opt "flat_interface=$FLAT_INTERFACE" + fi fi if is_service_enabled n-vol; then add_nova_opt "volume_group=$VOLUME_GROUP" @@ -1975,12 +1974,6 @@ if is_service_enabled n-vol; then fi add_nova_opt "osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions" add_nova_opt "my_ip=$HOST_IP" -add_nova_opt "public_interface=$PUBLIC_INTERFACE" -add_nova_opt "vlan_interface=$VLAN_INTERFACE" -add_nova_opt "flat_network_bridge=$FLAT_NETWORK_BRIDGE" -if [ -n "$FLAT_INTERFACE" ]; then - add_nova_opt "flat_interface=$FLAT_INTERFACE" -fi add_nova_opt "sql_connection=$BASE_SQL_CONN/nova?charset=utf8" add_nova_opt "libvirt_type=$LIBVIRT_TYPE" add_nova_opt "libvirt_cpu_mode=none" From 50ac792157fa2d86c6bc2cb5866f6767aa861b02 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 13 Sep 2012 14:02:01 -0500 Subject: [PATCH 684/967] Move Glance data and Nova state dirs out of source dir * allow NOVA_STATE_PATH to be overridden, default is now /opt/stack/data/nova * add NOVA_INSTANCES_PATH to move the instances dir separately from the state dir * allow GLANCE_CACHE_DIR to be overridden, default is now /opt/stack/data/glance/cache * allow GLANCE_IMAGE_DIR to be overridden, default is inow /opt/stack/data/glance/images * set GLANCE_BIN_DIR to support entry points (future) * allow CINDER_STATE_PATH to be overridden, default is now /opt/stack/data/cinder Change-Id: If95dc19b957ef5b9b14397835cd0543f82717f50 --- lib/cinder | 15 ++++++++---- lib/n-vol | 8 ++++--- stack.sh | 69 ++++++++++++++++++++++++++++++++++-------------------- 3 files changed, 58 insertions(+), 34 deletions(-) diff --git a/lib/cinder b/lib/cinder index ec491dda..250c0291 100644 --- a/lib/cinder +++ b/lib/cinder @@ -3,6 +3,7 @@ # Dependencies: # - functions +# - DEST, DATA_DIR must be defined # - KEYSTONE_AUTH_* must be defined # SERVICE_{TENANT_NAME|PASSWORD} must be defined @@ -25,14 +26,17 @@ set -o xtrace # set up default directories CINDER_DIR=$DEST/cinder -if [ -d $CINDER_DIR/bin ] ; then +CINDERCLIENT_DIR=$DEST/python-cinderclient +CINDER_STATE_PATH=${CINDER_STATE_PATH:=$DATA_DIR/cinder} +CINDER_CONF_DIR=/etc/cinder +CINDER_CONF=$CINDER_CONF_DIR/cinder.conf + +# Support entry points installation of console scripts +if [[ -d $CINDER_DIR/bin ]]; then CINDER_BIN_DIR=$CINDER_DIR/bin else CINDER_BIN_DIR=/usr/local/bin fi -CINDERCLIENT_DIR=$DEST/python-cinderclient -CINDER_CONF_DIR=/etc/cinder -CINDER_CONF=$CINDER_CONF_DIR/cinder.conf # Name of the lvm volume group to use/create for iscsi volumes VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} @@ -112,6 +116,7 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI iniset $CINDER_CONF DEFAULT root_helper "sudo ${CINDER_ROOTWRAP}" iniset $CINDER_CONF DEFAULT osapi_volume_extension cinder.api.openstack.volume.contrib.standard_extensions + iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH if is_service_enabled qpid ; then iniset $CINDER_CONF DEFAULT rpc_backend cinder.openstack.common.rpc.impl_qpid @@ -162,7 +167,7 @@ function init_cinder() { if ! sudo vgs $VOLUME_GROUP; then sudo vgcreate $VOLUME_GROUP $DEV; fi fi - mkdir -p $CINDER_DIR/volumes + mkdir -p $CINDER_STATE_PATH/volumes if sudo vgs $VOLUME_GROUP; then if [[ "$os_PACKAGE" = "rpm" ]]; then diff --git a/lib/n-vol b/lib/n-vol index a9d1c7d1..99b8cb17 100644 --- a/lib/n-vol +++ b/lib/n-vol @@ -3,7 +3,9 @@ # Dependencies: # - functions +# - DATA_DIR must be defined # - KEYSTONE_AUTH_* must be defined +# - NOVA_DIR, NOVA_BIN_DIR, NOVA_STATE_PATH must be defined # SERVICE_{TENANT_NAME|PASSWORD} must be defined # _configure_tgt_for_config_d() from lib/cinder @@ -64,7 +66,7 @@ function init_nvol() { if ! sudo vgs $VOLUME_GROUP; then sudo vgcreate $VOLUME_GROUP $DEV; fi fi - mkdir -p $NOVA_DIR/volumes + mkdir -p $NOVA_STATE_PATH/volumes if sudo vgs $VOLUME_GROUP; then if [[ "$os_PACKAGE" = "rpm" ]]; then @@ -97,7 +99,7 @@ function start_nvol() { if [[ ! -f /etc/tgt/conf.d/nova.conf ]]; then _configure_tgt_for_config_d sudo mkdir -p /etc/tgt/conf.d - echo "include $NOVA_DIR/volumes/*" | sudo tee /etc/tgt/conf.d/nova.conf + echo "include $NOVA_STATE_PATH/volumes/*" | sudo tee /etc/tgt/conf.d/nova.conf fi if [[ "$os_PACKAGE" = "deb" ]]; then @@ -109,7 +111,7 @@ function start_nvol() { restart_service tgtd fi - screen_it n-vol "cd $NOVA_DIR && $NOVA_DIR/bin/nova-volume" + screen_it n-vol "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-volume" } # stop_nvol() - Stop running processes (non-screen) diff --git a/stack.sh b/stack.sh index c352a275..fa1bf46e 100755 --- a/stack.sh +++ b/stack.sh @@ -317,11 +317,7 @@ source $TOP_DIR/lib/heat source $TOP_DIR/lib/quantum # Set the destination directories for OpenStack projects -NOVA_DIR=$DEST/nova HORIZON_DIR=$DEST/horizon -GLANCE_DIR=$DEST/glance -GLANCECLIENT_DIR=$DEST/python-glanceclient -NOVACLIENT_DIR=$DEST/python-novaclient OPENSTACKCLIENT_DIR=$DEST/python-openstackclient NOVNC_DIR=$DEST/noVNC SWIFT_DIR=$DEST/swift @@ -330,6 +326,33 @@ SWIFTCLIENT_DIR=$DEST/python-swiftclient QUANTUM_DIR=$DEST/quantum QUANTUM_CLIENT_DIR=$DEST/python-quantumclient +# Nova defaults +NOVA_DIR=$DEST/nova +NOVACLIENT_DIR=$DEST/python-novaclient +NOVA_STATE_PATH=${NOVA_STATE_PATH:=$DATA_DIR/nova} +# INSTANCES_PATH is the previous name for this +NOVA_INSTANCES_PATH=${NOVA_INSTANCES_PATH:=${INSTANCES_PATH:=$NOVA_STATE_PATH/instances}} + +# Support entry points installation of console scripts +if [[ -d $NOVA_DIR/bin ]]; then + NOVA_BIN_DIR=$NOVA_DIR/bin +else + NOVA_BIN_DIR=/usr/local/bin +fi + +# Glance defaults +GLANCE_DIR=$DEST/glance +GLANCECLIENT_DIR=$DEST/python-glanceclient +GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache} +GLANCE_IMAGE_DIR=${GLANCE_IMAGE_DIR:=$DATA_DIR/glance/images} + +# Support entry points installation of console scripts +if [[ -d $GLANCE_DIR/bin ]]; then + GLANCE_BIN_DIR=$GLANCE_DIR/bin +else + GLANCE_BIN_DIR=/usr/local/bin +fi + # Default Quantum Plugin Q_PLUGIN=${Q_PLUGIN:-openvswitch} # Default Quantum Port @@ -1062,13 +1085,11 @@ if is_service_enabled g-reg; then fi sudo chown `whoami` $GLANCE_CONF_DIR - GLANCE_IMAGE_DIR=$DEST/glance/images # Delete existing images rm -rf $GLANCE_IMAGE_DIR mkdir -p $GLANCE_IMAGE_DIR - GLANCE_CACHE_DIR=$DEST/glance/cache - # Delete existing images + # Delete existing cache rm -rf $GLANCE_CACHE_DIR mkdir -p $GLANCE_CACHE_DIR @@ -1144,7 +1165,7 @@ if is_service_enabled g-reg; then GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json cp $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON - $GLANCE_DIR/bin/glance-manage db_sync + $GLANCE_BIN_DIR/glance-manage db_sync fi @@ -1613,15 +1634,15 @@ EOF' # ~~~~~~~~~~~~~~~~ # Nova stores each instance in its own directory. - mkdir -p $NOVA_DIR/instances + mkdir -p $NOVA_INSTANCES_PATH # You can specify a different disk to be mounted and used for backing the # virtual machines. If there is a partition labeled nova-instances we # mount it (ext filesystems can be labeled via e2label). if [ -L /dev/disk/by-label/nova-instances ]; then - if ! mount -n | grep -q $NOVA_DIR/instances; then - sudo mount -L nova-instances $NOVA_DIR/instances - sudo chown -R `whoami` $NOVA_DIR/instances + if ! mount -n | grep -q $NOVA_INSTANCES_PATH; then + sudo mount -L nova-instances $NOVA_INSTANCES_PATH + sudo chown -R `whoami` $NOVA_INSTANCES_PATH fi fi @@ -1640,15 +1661,15 @@ EOF' sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d " " -f2 | sudo iscsiadm --mode node --op delete || true # Clean out the instances directory. - sudo rm -rf $NOVA_DIR/instances/* + sudo rm -rf $NOVA_INSTANCES_PATH/* fi if is_service_enabled n-net q-dhcp; then # Delete traces of nova networks from prior runs sudo killall dnsmasq || true clean_iptables - rm -rf $NOVA_DIR/networks - mkdir -p $NOVA_DIR/networks + rm -rf $NOVA_STATE_PATH/networks + mkdir -p $NOVA_STATE_PATH/networks # Force IP forwarding on, just on case sudo sysctl -w net.ipv4.ip_forward=1 @@ -1918,13 +1939,6 @@ elif is_service_enabled n-vol; then init_nvol fi -# Support entry points installation of console scripts -if [ -d $NOVA_DIR/bin ] ; then - NOVA_BIN_DIR=$NOVA_DIR/bin -else - NOVA_BIN_DIR=/usr/local/bin -fi - NOVA_CONF=nova.conf function add_nova_opt { echo "$1" >> $NOVA_CONF_DIR/$NOVA_CONF @@ -2016,8 +2030,11 @@ elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then fi add_nova_opt "glance_api_servers=$GLANCE_HOSTPORT" add_nova_opt "force_dhcp_release=True" -if [ -n "$INSTANCES_PATH" ]; then - add_nova_opt "instances_path=$INSTANCES_PATH" +if [ -n "$NOVA_STATE_PATH" ]; then + add_nova_opt "state_path=$NOVA_STATE_PATH" +fi +if [ -n "$NOVA_INSTANCES_PATH" ]; then + add_nova_opt "instances_path=$NOVA_INSTANCES_PATH" fi if [ "$MULTI_HOST" != "False" ]; then add_nova_opt "multi_host=True" @@ -2124,12 +2141,12 @@ fi # Launch the glance registry service if is_service_enabled g-reg; then - screen_it g-reg "cd $GLANCE_DIR; bin/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf" + screen_it g-reg "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf" fi # Launch the glance api and wait for it to answer before continuing if is_service_enabled g-api; then - screen_it g-api "cd $GLANCE_DIR; bin/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf" + screen_it g-api "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf" echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then echo "g-api did not start" From 636a3ff4294a6841f6076283bbed561dc2676e30 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 14 Sep 2012 11:36:07 -0500 Subject: [PATCH 685/967] Spiff up the upload_image() format handling * attempt to detect format of *.img files automatically, recognizing: qcow2,raw,vdi,vmdk,vpc Change-Id: I92ec141584ba8237b67ca640e401a1b88860747e --- functions | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/functions b/functions index 664cfa0c..0cfffb87 100644 --- a/functions +++ b/functions @@ -792,7 +792,12 @@ function upload_image() { *.img) IMAGE="$FILES/$IMAGE_FNAME"; IMAGE_NAME=$(basename "$IMAGE" ".img") - DISK_FORMAT=raw + format=$(qemu-img info ${IMAGE} | awk '/^file format/ { print $3; exit }') + if [[ ",qcow2,raw,vdi,vmdk,vpc," =~ ",$format," ]]; then + DISK_FORMAT=$format + else + DISK_FORMAT=raw + fi CONTAINER_FORMAT=bare ;; *.img.gz) From e5d923808b4f23f82830232914b8c962ddc5cf5a Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Thu, 13 Sep 2012 17:19:03 -0700 Subject: [PATCH 686/967] Spelling fixes Change-Id: Ia3c494be4460dc95f1119492dfef730c62467f34 --- stack.sh | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/stack.sh b/stack.sh index 1151f7f1..86643d58 100755 --- a/stack.sh +++ b/stack.sh @@ -192,7 +192,7 @@ if [[ $EUID -eq 0 ]]; then useradd -g stack -s /bin/bash -d $DEST -m stack fi - echo "Giving stack user passwordless sudo priviledges" + echo "Giving stack user passwordless sudo privileges" # UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || echo "#includedir /etc/sudoers.d" >> /etc/sudoers @@ -904,8 +904,8 @@ EOF fi -# Finalize queue instllation -# -------------------------- +# Finalize queue installation +# ---------------------------- if is_service_enabled rabbit; then # Start rabbitmq-server @@ -967,7 +967,7 @@ fi # Create a new named screen to run processes in screen -d -m -S $SCREEN_NAME -t shell -s /bin/bash sleep 1 -# Set a reasonable statusbar +# Set a reasonable status bar screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS" @@ -1501,8 +1501,8 @@ if is_service_enabled n-api; then # Get the sample configuration file in place cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_CONF_DIR - # Rewrite the authtoken configration for our Keystone service. - # This is a bit defensive to allow the sample file some varaince. + # Rewrite the authtoken configuration for our Keystone service. + # This is a bit defensive to allow the sample file some variance. sed -e " /^admin_token/i admin_tenant_name = $SERVICE_TENANT_NAME /admin_tenant_name/s/^.*$/admin_tenant_name = $SERVICE_TENANT_NAME/; @@ -1714,12 +1714,12 @@ if is_service_enabled swift; then sudo chown -R $USER: ${SWIFT_CONFIG_DIR} /var/run/swift if [[ "$SWIFT_CONFIG_DIR" != "/etc/swift" ]]; then - # Some swift tools are hard-coded to use ``/etc/swift`` and are apparenty not going to be fixed. + # Some swift tools are hard-coded to use ``/etc/swift`` and are apparently not going to be fixed. # Create a symlink if the config dir is moved sudo ln -sf ${SWIFT_CONFIG_DIR} /etc/swift fi - # Swift use rsync to syncronize between all the different + # Swift use rsync to synchronize between all the different # partitions (which make more sense when you have a multi-node # setup) we configure it with our version of rsync. sed -e " From d53bedc6bbe4de8daabca2015d05ddda50574ca8 Mon Sep 17 00:00:00 2001 From: John Griffith Date: Tue, 11 Sep 2012 14:15:54 -0600 Subject: [PATCH 687/967] Enhance tgt cleanup in unstack.sh *Check that tgt driver is in good state when we start if not try to just restart it *Remove targets based on iqn entries in persist files *Remove the persist files themeselves *Stop the tgt service Updated to use *_STATE_DIR vars Change-Id: I3ba9f2b8c099c7f290696760527fe2147d9b1d38 --- unstack.sh | 42 ++++++++++++++++++++++++++++++++++++++---- 1 file changed, 38 insertions(+), 4 deletions(-) diff --git a/unstack.sh b/unstack.sh index 30ee512e..1bace156 100755 --- a/unstack.sh +++ b/unstack.sh @@ -18,6 +18,13 @@ source $TOP_DIR/functions # Load local configuration source $TOP_DIR/stackrc +# Destination path for service data +DATA_DIR=${DATA_DIR:-${DEST}/data} + +# Get project function libraries +source $TOP_DIR/lib/cinder +source $TOP_DIR/lib/n-vol + # Determine what system we are running on. This provides ``os_VENDOR``, # ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` GetOSVersion @@ -45,14 +52,41 @@ if is_service_enabled horizon; then stop_service apache2 fi +SCSI_PERSIST_DIR=$CINDER_STATE_PATH/volumes/* + # Get the iSCSI volumes if is_service_enabled cinder n-vol; then + if is_service_enabled n-vol; then + SCSI_PERSIST_DIR=$NOVA_STATE_PATH/volumes/* + fi + TARGETS=$(sudo tgtadm --op show --mode target) + if [ $? -ne 0 ]; then + # If tgt driver isn't running this won't work obviously + # So check the response and restart if need be + echo "tgtd seems to be in a bad state, restarting..." + if [[ "$os_PACKAGE" = "deb" ]]; then + restart_service tgt + else + restart_service tgtd + fi + TARGETS=$(sudo tgtadm --op show --mode target) + fi + if [[ -n "$TARGETS" ]]; then - # FIXME(dtroyer): this could very well require more here to - # clean up left-over volumes - echo "iSCSI target cleanup needed:" - echo "$TARGETS" + iqn_list=( $(grep --no-filename -r iqn $SCSI_PERSIST_DIR | sed 's///') ) + for i in "${iqn_list[@]}"; do + echo removing iSCSI target: $i + sudo tgt-admin --delete $i + done + fi + + if is_service_enabled cinder; then + sudo rm -rf $CINDER_STATE_PATH/volumes/* + fi + + if is_service_enabled n-vol; then + sudo rm -rf $NOVA_STATE_PATH/volumes/* fi if [[ "$os_PACKAGE" = "deb" ]]; then From 31d31855feb25c23664c191b05f94db626f6495d Mon Sep 17 00:00:00 2001 From: Surya Prabhakar Date: Mon, 17 Sep 2012 20:25:41 +0530 Subject: [PATCH 688/967] Adding ceilometer-api for devstack unable to use ubuntu repos(apt) since they were having previous Flask version. Adding it in files/pips/ceilometer-api Change-Id: Ia397468eacd3cd2b4233617e1ffeb763ac207478 --- lib/ceilometer | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 4c3bb52a..3ef4e067 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -1,5 +1,8 @@ # lib/ceilometer # Install and start Ceilometer service +# To enable, add the following to localrc +# ENABLED_SERVICES+=ceilometer-acompute,ceilometer-acentral,ceilometer-collector,ceilometer-api + # Dependencies: # - functions @@ -32,6 +35,7 @@ fi CEILOMETER_CONF_DIR=/etc/ceilometer CEILOMETER_AGENT_CONF=$CEILOMETER_CONF_DIR/ceilometer-agent.conf CEILOMETER_COLLECTOR_CONF=$CEILOMETER_CONF_DIR/ceilometer-collector.conf +CEILOMETER_API_LOG_DIR=/var/log/ceilometer-api # cleanup_ceilometer() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up @@ -43,10 +47,12 @@ function cleanup_ceilometer() { # configure_ceilometer() - Set config files, create data dirs, etc function configure_ceilometer() { setup_develop $CEILOMETER_DIR - if [ ! -d $CEILOMETER_CONF_DIR ]; then - sudo mkdir -m 755 -p $CEILOMETER_CONF_DIR - fi - sudo chown `whoami` $CEILOMETER_CONF_DIR + + [ -d $CEILOMETER_CONF_DIR ] && sudo mkdir -m 755 -p $CEILOMETER_CONF_DIR + sudo chown $USER $CEILOMETER_CONF_DIR + + [ ! -d $CEILOMETER_API_LOG_DIR ] && sudo mkdir -m 755 -p $CEILOMETER_API_LOG_DIR + sudo chown $USER $CEILOMETER_API_LOG_DIR # ceilometer confs are copy of /etc/nova/nova.conf which must exist first grep -v format_string $NOVA_CONF_DIR/$NOVA_CONF > $CEILOMETER_AGENT_CONF @@ -63,4 +69,5 @@ function start_ceilometer() { screen_it ceilometer-acompute "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_AGENT_CONF" screen_it ceilometer-acentral "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-agent-central --config-file $CEILOMETER_AGENT_CONF" screen_it ceilometer-collector "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-collector --config-file $CEILOMETER_COLLECTOR_CONF" + screen_it ceilometer-api "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR" } From 7903b795dd4be552e4d21c879958fadc82472259 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 13 Sep 2012 17:16:12 -0500 Subject: [PATCH 689/967] Add non-verbose output mode Set VERBOSE=False to turn off the noise of stack.sh output. All output still is written to the logfile if LOGFILE is set. Rebased Change-Id: I316bc4d68c997ec907a48e720e2f7778428d935b --- lib/ceilometer | 9 +++-- lib/cinder | 9 +++-- lib/heat | 9 +++-- lib/keystone | 9 +++-- lib/n-vol | 9 +++-- lib/template | 9 +++-- stack.sh | 98 ++++++++++++++++++++++++++++++++++++++++++++++---- 7 files changed, 127 insertions(+), 25 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 3ef4e067..02087537 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -16,9 +16,9 @@ # stop_XXX # cleanup_XXX -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following along as the install occurs. -set -o xtrace +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace # Defaults @@ -71,3 +71,6 @@ function start_ceilometer() { screen_it ceilometer-collector "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-collector --config-file $CEILOMETER_COLLECTOR_CONF" screen_it ceilometer-api "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR" } + +# Restore xtrace +$XTRACE diff --git a/lib/cinder b/lib/cinder index 250c0291..de2debce 100644 --- a/lib/cinder +++ b/lib/cinder @@ -16,9 +16,9 @@ # stop_XXX # cleanup_XXX -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following along as the install occurs. -set -o xtrace +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace # Defaults @@ -233,3 +233,6 @@ function stop_cinder() { stop_service tgt fi } + +# Restore xtrace +$XTRACE diff --git a/lib/heat b/lib/heat index 0b234c4b..e713b399 100644 --- a/lib/heat +++ b/lib/heat @@ -15,9 +15,9 @@ # stop_XXX # cleanup_XXX -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following along as the install occurs. -set -o xtrace +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace # Defaults @@ -178,3 +178,6 @@ function stop_heat() { # This function intentionally left blank : } + +# Restore xtrace +$XTRACE diff --git a/lib/keystone b/lib/keystone index a6ab5a36..06920f84 100644 --- a/lib/keystone +++ b/lib/keystone @@ -18,9 +18,9 @@ # stop_keystone # cleanup_keystone -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following along as the install occurs. -set -o xtrace +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace # Defaults @@ -170,3 +170,6 @@ function stop_keystone() { # Kill the Keystone screen window screen -S $SCREEN_NAME -p key -X kill } + +# Restore xtrace +$XTRACE diff --git a/lib/n-vol b/lib/n-vol index 99b8cb17..cc669cbd 100644 --- a/lib/n-vol +++ b/lib/n-vol @@ -18,9 +18,9 @@ # stop_nvol # cleanup_nvol -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following along as the install occurs. -set -o xtrace +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace # Defaults @@ -120,3 +120,6 @@ function stop_nvol() { stop_service tgt } + +# Restore xtrace +$XTRACE diff --git a/lib/template b/lib/template index 78b848dc..02de5cef 100644 --- a/lib/template +++ b/lib/template @@ -16,9 +16,9 @@ # stop_XXXX # cleanup_XXXX -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following along as the install occurs. -set -o xtrace +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace # Defaults @@ -75,3 +75,6 @@ function stop_XXXX() { # FIXME(dtroyer): stop only our screen screen window? : } + +# Restore xtrace +$XTRACE diff --git a/stack.sh b/stack.sh index d7e951c2..9184c29e 100755 --- a/stack.sh +++ b/stack.sh @@ -166,6 +166,9 @@ if is_service_enabled cinder && is_service_enabled n-vol; then exit 1 fi +# Set up logging level +VERBOSE=$(trueorfalse True $VERBOSE) + # root Access # ----------- @@ -380,6 +383,7 @@ SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler} # Generic helper to configure passwords function read_password { + XTRACE=$(set +o | grep xtrace) set +o xtrace var=$1; msg=$2 pw=${!var} @@ -416,7 +420,7 @@ function read_password { eval "$var=$pw" echo "$var=$pw" >> $localrc fi - set -o xtrace + $XTRACE } @@ -599,6 +603,18 @@ APACHE_GROUP=${APACHE_GROUP:-$APACHE_USER} # Log files # --------- +# Echo text to the log file, summary log file and stdout +# echo_summary "something to say" +function echo_summary() { + echo $@ >&6 +} + +# Echo text only to stdout, no log files +# echo_nolog "something not for the logs" +function echo_nolog() { + echo $@ >&3 +} + # Set up logging for ``stack.sh`` # Set ``LOGFILE`` to turn on logging # Append '.xxxxxxxx' to the given name to maintain history @@ -617,13 +633,38 @@ if [[ -n "$LOGFILE" ]]; then LOGNAME=$(basename "$LOGFILE") mkdir -p $LOGDIR find $LOGDIR -maxdepth 1 -name $LOGNAME.\* -mtime +$LOGDAYS -exec rm {} \; - LOGFILE=$LOGFILE.${CURRENT_LOG_TIME} - # Redirect stdout/stderr to tee to write the log file - exec 1> >( tee "${LOGFILE}" ) 2>&1 - echo "stack.sh log $LOGFILE" + SUMFILE=$LOGFILE.${CURRENT_LOG_TIME}.summary + + # Redirect output according to config + # Copy stdout to fd 3 + exec 3>&1 + if [[ "$VERBOSE" == "True" ]]; then + # Redirect stdout/stderr to tee to write the log file + exec 1> >( tee "${LOGFILE}" ) 2>&1 + # Set up a second fd for output + exec 6> >( tee "${SUMFILE}" ) + else + # Set fd 1 and 2 to primary logfile + exec 1> "${LOGFILE}" 2>&1 + # Set fd 6 to summary logfile and stdout + exec 6> >( tee "${SUMFILE}" /dev/fd/3 ) + fi + + echo_summary "stack.sh log $LOGFILE" # Specified logfile name always links to the most recent log ln -sf $LOGFILE $LOGDIR/$LOGNAME + ln -sf $SUMFILE $LOGDIR/$LOGNAME.summary +else + # Set up output redirection without log files + # Copy stdout to fd 3 + exec 3>&1 + if [[ "$VERBOSE" != "yes" ]]; then + # Throw away stdout and stderr + exec 1>/dev/null 2>&1 + fi + # Always send summary fd to original stdout + exec 6>&3 fi # Set up logging of screen windows @@ -667,6 +708,7 @@ set -o xtrace # OpenStack uses a fair number of other projects. # Install package requirements +echo_summary "Installing package prerequisites" if [[ "$os_PACKAGE" = "deb" ]]; then install_package $(get_packages $FILES/apts) else @@ -785,6 +827,7 @@ TRACK_DEPENDS=${TRACK_DEPENDS:-False} # Install python packages into a virtualenv so that we can track them if [[ $TRACK_DEPENDS = True ]] ; then + echo_summary "Installing Python packages into a virtualenv $DEST/.venv" install_package python-virtualenv rm -rf $DEST/.venv @@ -794,12 +837,15 @@ if [[ $TRACK_DEPENDS = True ]] ; then fi # Install python requirements +echo_summary "Installing Python prerequisites" pip_install $(get_packages $FILES/pips | sort -u) # Check Out Source # ---------------- +echo_summary "Installing OpenStack project source" + install_keystoneclient git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH @@ -857,6 +903,8 @@ fi # Initialization # ============== +echo_summary "Configuring OpenStack projects" + # Set up our checkouts so they are installed into python path # allowing ``import nova`` or ``import glance.client`` configure_keystoneclient @@ -923,6 +971,7 @@ EOF EOF sudo mv /tmp/90-stack-s.conf /etc/rsyslog.d fi + echo_summary "Starting rsyslog" restart_service rsyslog fi @@ -932,6 +981,7 @@ fi if is_service_enabled rabbit; then # Start rabbitmq-server + echo_summary "Starting RabbitMQ" if [[ "$os_PACKAGE" = "rpm" ]]; then # RPM doesn't start the service restart_service rabbitmq-server @@ -939,6 +989,7 @@ if is_service_enabled rabbit; then # change the rabbit password since the default is "guest" sudo rabbitmqctl change_password guest $RABBIT_PASSWORD elif is_service_enabled qpid; then + echo_summary "Starting qpid" restart_service qpidd fi @@ -947,6 +998,7 @@ fi # ----- if is_service_enabled mysql; then + echo_summary "Configuring and starting MySQL" # Start mysql-server if [[ "$os_PACKAGE" = "rpm" ]]; then @@ -998,6 +1050,7 @@ screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS" # -------- if is_service_enabled key; then + echo_summary "Starting Keystone" configure_keystone init_keystone start_keystone @@ -1030,6 +1083,7 @@ fi # Set up the django horizon application to serve via apache/wsgi if is_service_enabled horizon; then + echo_summary "Configuring and starting Horizon" # Remove stale session database. rm -f $HORIZON_DIR/openstack_dashboard/local/dashboard_openstack.sqlite3 @@ -1079,6 +1133,8 @@ fi # ------ if is_service_enabled g-reg; then + echo_summary "Configuring Glance" + GLANCE_CONF_DIR=/etc/glance if [[ ! -d $GLANCE_CONF_DIR ]]; then sudo mkdir -p $GLANCE_CONF_DIR @@ -1174,6 +1230,7 @@ fi # ------- if is_service_enabled quantum; then + echo_summary "Configuring Quantum" # # Quantum Network Configuration # @@ -1469,6 +1526,8 @@ fi # Nova # ---- +echo_summary "Configuring Nova" + # Put config files in ``/etc/nova`` for everyone to find NOVA_CONF_DIR=/etc/nova if [[ ! -d $NOVA_CONF_DIR ]]; then @@ -1676,6 +1735,7 @@ fi # --------------- if is_service_enabled swift; then + echo_summary "Configuring Swift" # Make sure to kill all swift processes first swift-init all stop || true @@ -1930,8 +1990,10 @@ fi # -------------- if is_service_enabled cinder; then + echo_summary "Configuring Cinder" init_cinder elif is_service_enabled n-vol; then + echo_summary "Configuring Nova volumes" init_nvol fi @@ -2072,6 +2134,7 @@ done # --------- if [ "$VIRT_DRIVER" = 'xenserver' ]; then + echo_summary "Using XenServer virtualization driver" read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN." add_nova_opt "compute_driver=xenapi.XenAPIDriver" XENAPI_CONNECTION_URL=${XENAPI_CONNECTION_URL:-"http://169.254.0.1"} @@ -2084,6 +2147,7 @@ if [ "$VIRT_DRIVER" = 'xenserver' ]; then XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"} add_nova_opt "firewall_driver=$XEN_FIREWALL_DRIVER" elif [ "$VIRT_DRIVER" = 'openvz' ]; then + echo_summary "Using OpenVZ virtualization driver" # TODO(deva): OpenVZ driver does not yet work if compute_driver is set here. # Replace connection_type when this is fixed. # add_nova_opt "compute_driver=openvz.connection.OpenVzConnection" @@ -2091,6 +2155,7 @@ elif [ "$VIRT_DRIVER" = 'openvz' ]; then LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} add_nova_opt "firewall_driver=$LIBVIRT_FIREWALL_DRIVER" else + echo_summary "Using libvirt virtualization driver" add_nova_opt "compute_driver=libvirt.LibvirtDriver" LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} add_nova_opt "firewall_driver=$LIBVIRT_FIREWALL_DRIVER" @@ -2121,6 +2186,7 @@ fi # ---- if is_service_enabled heat; then + echo_summary "Configuring Heat" init_heat fi @@ -2134,6 +2200,7 @@ fi # Launch the glance registry service if is_service_enabled g-reg; then + echo_summary "Starting Glance" screen_it g-reg "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf" fi @@ -2163,6 +2230,7 @@ screen_it zeromq "cd $NOVA_DIR && $NOVA_DIR/bin/nova-rpc-zmq-receiver" # Launch the nova-api and wait for it to answer before continuing if is_service_enabled n-api; then + echo_summary "Starting Nova API" add_nova_opt "enabled_apis=$NOVA_ENABLED_APIS" screen_it n-api "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api" echo "Waiting for nova-api to start..." @@ -2173,6 +2241,7 @@ if is_service_enabled n-api; then fi if is_service_enabled q-svc; then + echo_summary "Starting Quantum" # Start the Quantum service screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE" echo "Waiting for Quantum to start..." @@ -2226,6 +2295,7 @@ screen_it q-agt "sudo python $AGENT_BINARY --config-file $Q_CONF_FILE --config-f screen_it q-dhcp "sudo python $AGENT_DHCP_BINARY --config-file $Q_CONF_FILE --config-file=$Q_DHCP_CONF_FILE" screen_it q-l3 "sudo python $AGENT_L3_BINARY --config-file $Q_CONF_FILE --config-file=$Q_L3_CONF_FILE" +echo_summary "Starting Nova" # The group **libvirtd** is added to the current user in this script. # Use 'sg' to execute nova-compute as a member of the **libvirtd** group. # ``screen_it`` checks ``is_service_enabled``, it is not needed here @@ -2237,12 +2307,15 @@ screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --config-file $NOVA_ screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF" screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth" if is_service_enabled n-vol; then + echo_summary "Starting Nova volumes" start_nvol fi if is_service_enabled cinder; then + echo_summary "Starting Cinder" start_cinder fi if is_service_enabled ceilometer; then + echo_summary "Starting Ceilometer" configure_ceilometer start_ceilometer fi @@ -2256,6 +2329,7 @@ is_service_enabled swift3 || \ # launch heat engine, api and metadata if is_service_enabled heat; then + echo_summary "Starting Heat" start_heat fi @@ -2274,6 +2348,7 @@ fi # * **precise**: http://uec-images.ubuntu.com/precise/current/precise-server-cloudimg-amd64.tar.gz if is_service_enabled g-reg; then + echo_summary "Uploading images" TOKEN=$(keystone token-get | grep ' id ' | get_field 2) # Option to upload legacy ami-tty, which works with xenserver @@ -2302,6 +2377,15 @@ fi set +o xtrace +if [[ -n "$LOGFILE" ]]; then + exec 1>&3 + # Force all output to stdout and logs now + exec 1> >( tee "${LOGFILE}" ) 2>&1 +else + # Force all output to stdout now + exec 1>&3 +fi + # Using the cloud # --------------- @@ -2329,8 +2413,8 @@ echo "This is your host ip: $HOST_IP" # Warn that ``EXTRA_FLAGS`` needs to be converted to ``EXTRA_OPTS`` if [[ -n "$EXTRA_FLAGS" ]]; then - echo "WARNING: EXTRA_FLAGS is defined and may need to be converted to EXTRA_OPTS" + echo_summary "WARNING: EXTRA_FLAGS is defined and may need to be converted to EXTRA_OPTS" fi # Indicate how long this took to run (bash maintained variable ``SECONDS``) -echo "stack.sh completed in $SECONDS seconds." +echo_summary "stack.sh completed in $SECONDS seconds." From 36cf7ee141204c392499cdd351307d1d7838f087 Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Mon, 17 Sep 2012 16:49:24 +1200 Subject: [PATCH 690/967] Fix the variable used for the service_id of the heat endpoint Change-Id: I0c8190713722e3bcef19e94629d4f0d4fbfaa8fa --- files/keystone_data.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index e0d5c63a..17549101 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -165,7 +165,7 @@ if [[ "$ENABLED_SERVICES" =~ "heat" ]]; then --user_id $HEAT_USER \ --role_id $ADMIN_ROLE if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - HEAT_SERVICE=$(get_id keystone service-create \ + HEAT_CFN_SERVICE=$(get_id keystone service-create \ --name=heat \ --type=orchestration \ --description="Heat Service") From 73f6f25b87d0f06276540b5ea67dc924bebf0581 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 17 Sep 2012 11:22:21 -0500 Subject: [PATCH 691/967] Move glance to lib/glance The next in a line of changes to break down stack.sh and make it a bit more manageable. Part of blueprint devstack-modular Change-Id: Ie0104f0de281497f2c10f653aebb8e7cbedc4204 --- lib/glance | 180 +++++++++++++++++++++++++++++++++++++++++++++++++++++ stack.sh | 125 +++---------------------------------- 2 files changed, 189 insertions(+), 116 deletions(-) create mode 100644 lib/glance diff --git a/lib/glance b/lib/glance new file mode 100644 index 00000000..44990f14 --- /dev/null +++ b/lib/glance @@ -0,0 +1,180 @@ +# lib/glance +# Functions to control the configuration and operation of the Glance service + +# Dependencies: +# ``functions`` file +# ``DEST``, ``DATA_DIR`` must be defined +# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined +# ``SERVICE_HOST`` + +# ``stack.sh`` calls the entry points in this order: +# +# install_glance +# configure_glance +# init_glance +# start_glance +# stop_glance +# cleanup_glance + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# + +# Set up default directories +GLANCE_DIR=$DEST/glance +GLANCECLIENT_DIR=$DEST/python-glanceclient +GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache} +GLANCE_IMAGE_DIR=${GLANCE_IMAGE_DIR:=$DATA_DIR/glance/images} + +GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance} +GLANCE_REGISTRY_CONF=$GLANCE_CONF_DIR/glance-registry.conf +GLANCE_API_CONF=$GLANCE_CONF_DIR/glance-api.conf +GLANCE_REGISTRY_PASTE_INI=$GLANCE_CONF_DIR/glance-registry-paste.ini +GLANCE_API_PASTE_INI=$GLANCE_CONF_DIR/glance-api-paste.ini +GLANCE_CACHE_CONF=$GLANCE_CONF_DIR/glance-cache.conf +GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json + +# Support entry points installation of console scripts +if [[ -d $GLANCE_DIR/bin ]]; then + GLANCE_BIN_DIR=$GLANCE_DIR/bin +else + GLANCE_BIN_DIR=/usr/local/bin +fi + +# Glance connection info. Note the port must be specified. +GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$SERVICE_HOST:9292} + + +# Entry Points +# ------------ + +# cleanup_glance() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_glance() { + # kill instances (nova) + # delete image files (glance) + # This function intentionally left blank + : +} + +# configure_glanceclient() - Set config files, create data dirs, etc +function configure_glanceclient() { + setup_develop $GLANCECLIENT_DIR +} + +# configure_glance() - Set config files, create data dirs, etc +function configure_glance() { + setup_develop $GLANCE_DIR + + if [[ ! -d $GLANCE_CONF_DIR ]]; then + sudo mkdir -p $GLANCE_CONF_DIR + fi + sudo chown `whoami` $GLANCE_CONF_DIR + + # Copy over our glance configurations and update them + cp $GLANCE_DIR/etc/glance-registry.conf $GLANCE_REGISTRY_CONF + iniset $GLANCE_REGISTRY_CONF DEFAULT debug True + inicomment $GLANCE_REGISTRY_CONF DEFAULT log_file + iniset $GLANCE_REGISTRY_CONF DEFAULT sql_connection $BASE_SQL_CONN/glance?charset=utf8 + iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG + iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone + iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ + iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_user glance + iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_password $SERVICE_PASSWORD + + cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF + iniset $GLANCE_API_CONF DEFAULT debug True + inicomment $GLANCE_API_CONF DEFAULT log_file + iniset $GLANCE_API_CONF DEFAULT sql_connection $BASE_SQL_CONN/glance?charset=utf8 + iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG + iniset $GLANCE_API_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/ + iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ + iniset $GLANCE_API_CONF paste_deploy flavor keystone+cachemanagement + iniset $GLANCE_API_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $GLANCE_API_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $GLANCE_API_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $GLANCE_API_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ + iniset $GLANCE_API_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $GLANCE_API_CONF keystone_authtoken admin_user glance + iniset $GLANCE_API_CONF keystone_authtoken admin_password $SERVICE_PASSWORD + + cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI + + cp -p $GLANCE_DIR/etc/glance-api-paste.ini $GLANCE_API_PASTE_INI + + cp $GLANCE_DIR/etc/glance-cache.conf $GLANCE_CACHE_CONF + iniset $GLANCE_CACHE_CONF DEFAULT debug True + inicomment $GLANCE_CACHE_CONF DEFAULT log_file + iniset $GLANCE_CACHE_CONF DEFAULT use_syslog $SYSLOG + iniset $GLANCE_CACHE_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/ + iniset $GLANCE_CACHE_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ + iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_url + iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0 + iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_tenant_name + iniset $GLANCE_CACHE_CONF DEFAULT admin_tenant_name $SERVICE_TENANT_NAME + iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_user + iniset $GLANCE_CACHE_CONF DEFAULT admin_user glance + iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_password + iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD + + cp -p $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON + +} + +# init_glance() - Initialize databases, etc. +function init_glance() { + # Delete existing images + rm -rf $GLANCE_IMAGE_DIR + mkdir -p $GLANCE_IMAGE_DIR + + # Delete existing cache + rm -rf $GLANCE_CACHE_DIR + mkdir -p $GLANCE_CACHE_DIR + + # (re)create glance database + mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS glance;' + mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE glance CHARACTER SET utf8;' + + $GLANCE_BIN_DIR/glance-manage db_sync +} + +# install_glanceclient() - Collect source and prepare +function install_glanceclient() { + git_clone $GLANCECLIENT_REPO $GLANCECLIENT_DIR $GLANCECLIENT_BRANCH +} + +# install_glance() - Collect source and prepare +function install_glance() { + git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH +} + +# start_glance() - Start running processes, including screen +function start_glance() { + screen_it g-reg "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf" + screen_it g-api "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf" + echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..." + if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then + echo "g-api did not start" + exit 1 + fi +} + +# stop_glance() - Stop running processes (non-screen) +function stop_glance() { + # Kill the Glance screen windows + screen -S $SCREEN_NAME -p g-api -X kill + screen -S $SCREEN_NAME -p g-reg -X kill +} + +# Restore xtrace +$XTRACE diff --git a/stack.sh b/stack.sh index 9184c29e..66b7dda2 100755 --- a/stack.sh +++ b/stack.sh @@ -313,6 +313,7 @@ SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} # Get project function libraries source $TOP_DIR/lib/keystone +source $TOP_DIR/lib/glance source $TOP_DIR/lib/cinder source $TOP_DIR/lib/n-vol source $TOP_DIR/lib/ceilometer @@ -343,19 +344,6 @@ else NOVA_BIN_DIR=/usr/local/bin fi -# Glance defaults -GLANCE_DIR=$DEST/glance -GLANCECLIENT_DIR=$DEST/python-glanceclient -GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache} -GLANCE_IMAGE_DIR=${GLANCE_IMAGE_DIR:=$DATA_DIR/glance/images} - -# Support entry points installation of console scripts -if [[ -d $GLANCE_DIR/bin ]]; then - GLANCE_BIN_DIR=$GLANCE_DIR/bin -else - GLANCE_BIN_DIR=/usr/local/bin -fi - # Default Quantum Plugin Q_PLUGIN=${Q_PLUGIN:-openvswitch} # Default Quantum Port @@ -518,13 +506,6 @@ if is_service_enabled rabbit; then fi -# Glance -# ------ - -# Glance connection info. Note the port must be specified. -GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$SERVICE_HOST:9292} - - # Swift # ----- @@ -847,13 +828,13 @@ pip_install $(get_packages $FILES/pips | sort -u) echo_summary "Installing OpenStack project source" install_keystoneclient +install_glanceclient git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH # Check out the client libs that are used most git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH git_clone $OPENSTACKCLIENT_REPO $OPENSTACKCLIENT_DIR $OPENSTACKCLIENT_BRANCH -git_clone $GLANCECLIENT_REPO $GLANCECLIENT_DIR $GLANCECLIENT_BRANCH # glance, swift middleware and nova api needs keystone middleware if is_service_enabled key g-api n-api swift; then @@ -872,7 +853,7 @@ if is_service_enabled swift; then fi if is_service_enabled g-api n-api; then # image catalog service - git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH + install_glance fi if is_service_enabled n-novnc; then # a websockets/html5 or flash powered VNC console for vm instances @@ -921,12 +902,12 @@ if is_service_enabled swift3; then setup_develop $SWIFT3_DIR fi if is_service_enabled g-api n-api; then - setup_develop $GLANCE_DIR + configure_glance fi # Do this _after_ glance is installed to override the old binary # TODO(dtroyer): figure out when this is no longer necessary -setup_develop $GLANCECLIENT_DIR +configure_glanceclient setup_develop $NOVA_DIR if is_service_enabled horizon; then @@ -1135,56 +1116,7 @@ fi if is_service_enabled g-reg; then echo_summary "Configuring Glance" - GLANCE_CONF_DIR=/etc/glance - if [[ ! -d $GLANCE_CONF_DIR ]]; then - sudo mkdir -p $GLANCE_CONF_DIR - fi - sudo chown `whoami` $GLANCE_CONF_DIR - - # Delete existing images - rm -rf $GLANCE_IMAGE_DIR - mkdir -p $GLANCE_IMAGE_DIR - - # Delete existing cache - rm -rf $GLANCE_CACHE_DIR - mkdir -p $GLANCE_CACHE_DIR - - # (re)create glance database - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS glance;' - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE glance CHARACTER SET utf8;' - - # Copy over our glance configurations and update them - GLANCE_REGISTRY_CONF=$GLANCE_CONF_DIR/glance-registry.conf - cp $GLANCE_DIR/etc/glance-registry.conf $GLANCE_REGISTRY_CONF - iniset $GLANCE_REGISTRY_CONF DEFAULT debug True - inicomment $GLANCE_REGISTRY_CONF DEFAULT log_file - iniset $GLANCE_REGISTRY_CONF DEFAULT sql_connection $BASE_SQL_CONN/glance?charset=utf8 - iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG - iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone - iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST - iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT - iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ - iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_user glance - iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_password $SERVICE_PASSWORD - - GLANCE_API_CONF=$GLANCE_CONF_DIR/glance-api.conf - cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF - iniset $GLANCE_API_CONF DEFAULT debug True - inicomment $GLANCE_API_CONF DEFAULT log_file - iniset $GLANCE_API_CONF DEFAULT sql_connection $BASE_SQL_CONN/glance?charset=utf8 - iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG - iniset $GLANCE_API_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/ - iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ - iniset $GLANCE_API_CONF paste_deploy flavor keystone+cachemanagement - iniset $GLANCE_API_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST - iniset $GLANCE_API_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT - iniset $GLANCE_API_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset $GLANCE_API_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ - iniset $GLANCE_API_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $GLANCE_API_CONF keystone_authtoken admin_user glance - iniset $GLANCE_API_CONF keystone_authtoken admin_password $SERVICE_PASSWORD + init_glance # Store the images in swift if enabled. if is_service_enabled swift; then @@ -1194,35 +1126,6 @@ if is_service_enabled g-reg; then iniset $GLANCE_API_CONF DEFAULT swift_store_key $SERVICE_PASSWORD iniset $GLANCE_API_CONF DEFAULT swift_store_create_container_on_put True fi - - GLANCE_REGISTRY_PASTE_INI=$GLANCE_CONF_DIR/glance-registry-paste.ini - cp $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI - - GLANCE_API_PASTE_INI=$GLANCE_CONF_DIR/glance-api-paste.ini - cp $GLANCE_DIR/etc/glance-api-paste.ini $GLANCE_API_PASTE_INI - - GLANCE_CACHE_CONF=$GLANCE_CONF_DIR/glance-cache.conf - cp $GLANCE_DIR/etc/glance-cache.conf $GLANCE_CACHE_CONF - iniset $GLANCE_CACHE_CONF DEFAULT debug True - inicomment $GLANCE_CACHE_CONF DEFAULT log_file - iniset $GLANCE_CACHE_CONF DEFAULT use_syslog $SYSLOG - iniset $GLANCE_CACHE_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/ - iniset $GLANCE_CACHE_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ - iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_url - iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0 - iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_tenant_name - iniset $GLANCE_CACHE_CONF DEFAULT admin_tenant_name $SERVICE_TENANT_NAME - iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_user - iniset $GLANCE_CACHE_CONF DEFAULT admin_user glance - iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_password - iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD - - - GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json - cp $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON - - $GLANCE_BIN_DIR/glance-manage db_sync - fi @@ -2198,20 +2101,10 @@ fi # so send the start command by forcing text into the window. # Only run the services specified in ``ENABLED_SERVICES`` -# Launch the glance registry service -if is_service_enabled g-reg; then +# Launch the Glance services +if is_service_enabled g-api g-reg; then echo_summary "Starting Glance" - screen_it g-reg "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf" -fi - -# Launch the glance api and wait for it to answer before continuing -if is_service_enabled g-api; then - screen_it g-api "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf" - echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then - echo "g-api did not start" - exit 1 - fi + start_glance fi # Create an access key and secret key for nova ec2 register image From b782a2c0f329653a94e9fcbb9de62659b47cd663 Mon Sep 17 00:00:00 2001 From: John Dunning Date: Tue, 11 Sep 2012 16:13:37 -0400 Subject: [PATCH 692/967] Update stack.sh to track quantum rootwrap Resubmit of https://review.openstack.org/12822 Fix bugs 1044084 and 1048483 Copy new conf files into /etc at stack time. iniset the agents' init files to include new rootwrap conf Launch agents as regular user, not root Fix service launch of ovs Correctly handle qemu.conf permissions Change-Id: Ib6b8a97698df1b816eecc18d1df11267cb027a3d --- stack.sh | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/stack.sh b/stack.sh index 66b7dda2..545c0714 100755 --- a/stack.sh +++ b/stack.sh @@ -769,6 +769,8 @@ if is_service_enabled q-agt; then else ### FIXME(dtroyer): Find RPMs for OpenVSwitch echo "OpenVSwitch packages need to be located" + # Fedora does not started OVS by default + restart_service openvswitch fi elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then install_package bridge-utils @@ -1230,6 +1232,13 @@ if is_service_enabled quantum; then Q_CONF_FILE=/etc/quantum/quantum.conf cp $QUANTUM_DIR/etc/quantum.conf $Q_CONF_FILE + Q_RR_CONF_FILE=/etc/quantum/rootwrap.conf + cp -p $QUANTUM_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE + + # Copy over the config and filter bits + Q_CONF_ROOTWRAP_D=/etc/quantum/rootwrap.d + mkdir -p $Q_CONF_ROOTWRAP_D + cp -pr $QUANTUM_DIR/etc/quantum/rootwrap.d/* $Q_CONF_ROOTWRAP_D/ fi # Quantum service (for controller node) @@ -1336,6 +1345,8 @@ if is_service_enabled q-agt; then if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings $OVS_BRIDGE_MAPPINGS fi + # Update config w/rootwrap + iniset /$Q_PLUGIN_CONF_FILE OVS root_helper #Q_RR_CONF_FILE AGENT_BINARY="$QUANTUM_DIR/bin/quantum-openvswitch-agent" elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then # Setup physical network interface mappings. Override @@ -1347,6 +1358,8 @@ if is_service_enabled q-agt; then if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE physical_interface_mappings $LB_INTERFACE_MAPPINGS fi + # Update config w/rootwrap + iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE root_helper #Q_RR_CONF_FILE AGENT_BINARY="$QUANTUM_DIR/bin/quantum-linuxbridge-agent" fi fi @@ -1367,6 +1380,9 @@ if is_service_enabled q-dhcp; then quantum_setup_keystone $Q_DHCP_CONF_FILE DEFAULT set_auth_url + # Update config w/rootwrap + iniset /$Q_DHCP_CONF_FILE DEFAULT root_helper #Q_RR_CONF_FILE + if [[ "$Q_PLUGIN" = "openvswitch" ]]; then iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then @@ -1548,8 +1564,7 @@ if is_service_enabled n-cpu; then QEMU_CONF=/etc/libvirt/qemu.conf if is_service_enabled quantum && [[ $Q_PLUGIN = "openvswitch" ]] && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF ; then # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces - sudo chmod 666 $QEMU_CONF - sudo cat <> /etc/libvirt/qemu.conf + cat < Date: Fri, 21 Sep 2012 00:23:41 +0000 Subject: [PATCH 693/967] Change default zone back to nova Change-Id: I9a097ab77c6b9fab3f33a245bca3bc0037f67fd1 --- exercises/euca.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exercises/euca.sh b/exercises/euca.sh index 79405c20..fd116227 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -24,7 +24,7 @@ set -o xtrace # Keep track of the current directory EXERCISE_DIR=$(cd $(dirname "$0") && pwd) TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) -VOLUME_ZONE=cinder +VOLUME_ZONE=nova VOLUME_SIZE=1 ATTACH_DEVICE=/dev/vdc From 699a29f72decbfc1edabde3042a236ba0ae5760f Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 10 Sep 2012 14:10:27 -0500 Subject: [PATCH 694/967] Implement screen process stop() function Implements screen process stop in ceilometer, cinder, glance, heat, n-vol Change-Id: Ic5e02926f026d0e6f4b39846bfe77634dd414c60 --- lib/ceilometer | 8 ++++++++ lib/cinder | 7 +++++-- lib/glance | 2 +- lib/heat | 8 +++++--- lib/n-vol | 5 +++-- 5 files changed, 22 insertions(+), 8 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 02087537..972621d6 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -72,5 +72,13 @@ function start_ceilometer() { screen_it ceilometer-api "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR" } +# stop_ceilometer() - Stop running processes +function stop_ceilometer() { + # Kill the ceilometer screen windows + for serv in ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api; do + screen -S $SCREEN_NAME -p $serv -X kill + done +} + # Restore xtrace $XTRACE diff --git a/lib/cinder b/lib/cinder index de2debce..14c2df8b 100644 --- a/lib/cinder +++ b/lib/cinder @@ -225,9 +225,12 @@ function start_cinder() { screen_it c-sch "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-scheduler --config-file $CINDER_CONF" } -# stop_cinder() - Stop running processes (non-screen) +# stop_cinder() - Stop running processes function stop_cinder() { - # FIXME(dtroyer): stop only the cinder screen window? + # Kill the cinder screen windows + for serv in c-api c-sch c-vol; do + screen -S $SCREEN_NAME -p $serv -X kill + done if is_service_enabled c-vol; then stop_service tgt diff --git a/lib/glance b/lib/glance index 44990f14..4cc6253e 100644 --- a/lib/glance +++ b/lib/glance @@ -169,7 +169,7 @@ function start_glance() { fi } -# stop_glance() - Stop running processes (non-screen) +# stop_glance() - Stop running processes function stop_glance() { # Kill the Glance screen windows screen -S $SCREEN_NAME -p g-api -X kill diff --git a/lib/heat b/lib/heat index e713b399..6e823f2f 100644 --- a/lib/heat +++ b/lib/heat @@ -173,10 +173,12 @@ function start_heat() { screen_it h-meta "cd $HEAT_DIR; bin/heat-metadata --config-dir=$HEAT_CONF_DIR/heat-metadata.conf" } -# stop_heat() - Stop running processes (non-screen) +# stop_heat() - Stop running processes function stop_heat() { - # This function intentionally left blank - : + # Kill the cinder screen windows + for serv in h-eng h-api-cfn h-api-cw h-meta; do + screen -S $SCREEN_NAME -p $serv -X kill + done } # Restore xtrace diff --git a/lib/n-vol b/lib/n-vol index cc669cbd..db53582b 100644 --- a/lib/n-vol +++ b/lib/n-vol @@ -114,9 +114,10 @@ function start_nvol() { screen_it n-vol "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-volume" } -# stop_nvol() - Stop running processes (non-screen) +# stop_nvol() - Stop running processes function stop_nvol() { - # FIXME(dtroyer): stop only the n-vol screen window? + # Kill the nova volume screen window + screen -S $SCREEN_NAME -p n-vol -X kill stop_service tgt } From dc9e2880a3510205f3e2d0a7db3185005303b42d Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 21 Sep 2012 23:20:06 +0000 Subject: [PATCH 695/967] Modify euca test to get zone using describe This modifies the euca test to include describe-availability-zones so that it is covered. This allows us to not need to hard-code a zone name for the volume tests. Change-Id: Iaae0589d4338d948981ca6e2229d2ceb73ff38ef --- exercises/euca.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/exercises/euca.sh b/exercises/euca.sh index fd116227..58b5d914 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -24,7 +24,6 @@ set -o xtrace # Keep track of the current directory EXERCISE_DIR=$(cd $(dirname "$0") && pwd) TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) -VOLUME_ZONE=nova VOLUME_SIZE=1 ATTACH_DEVICE=/dev/vdc @@ -75,11 +74,15 @@ fi # Volumes # ------- if [[ "$ENABLED_SERVICES" =~ "n-vol" || "$ENABLED_SERVICES" =~ "c-vol" ]]; then + VOLUME_ZONE=`euca-describe-availability-zones | head -n1 | cut -f2` + die_if_not_set VOLUME_ZONE "Failure to find zone for volume" + VOLUME=`euca-create-volume -s 1 -z $VOLUME_ZONE | cut -f2` die_if_not_set VOLUME "Failure to create volume" # Test that volume has been created VOLUME=`euca-describe-volumes | cut -f2` + die_if_not_set VOLUME "Failure to get volume" # Test volume has become available if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then From 918360ee87d37f232868ce4c9fd4ff5340ffbeff Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Sat, 22 Sep 2012 10:51:31 -0400 Subject: [PATCH 696/967] Ignore .pem files Ignore certificate files created by running stack.sh. Change-Id: Ib11e87261dad5985e7852c339243035d715924ed Signed-off-by: Doug Hellmann --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index c5744b3b..17cb38c8 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,4 @@ local.sh files/*.gz files/images stack-screenrc +*.pem From c5259b4ab54841b8f9c4761d17b1c564fb88768c Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Sat, 22 Sep 2012 10:52:31 -0400 Subject: [PATCH 697/967] Fix test for creating ceilometer conf dir Fix the test run before the ceilometer configuration directory is created so that the dir is made if it does not exist. Change-Id: I2d6acd4fe7959f976ce99582aed69a49fc3f212e Signed-off-by: Doug Hellmann --- lib/ceilometer | 4 ++-- stack.sh | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 02087537..568a544b 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -48,10 +48,10 @@ function cleanup_ceilometer() { function configure_ceilometer() { setup_develop $CEILOMETER_DIR - [ -d $CEILOMETER_CONF_DIR ] && sudo mkdir -m 755 -p $CEILOMETER_CONF_DIR + [ ! -d $CEILOMETER_CONF_DIR ] && sudo mkdir -m 755 -p $CEILOMETER_CONF_DIR sudo chown $USER $CEILOMETER_CONF_DIR - [ ! -d $CEILOMETER_API_LOG_DIR ] && sudo mkdir -m 755 -p $CEILOMETER_API_LOG_DIR + [ ! -d $CEILOMETER_API_LOG_DIR ] && sudo mkdir -m 755 -p $CEILOMETER_API_LOG_DIR sudo chown $USER $CEILOMETER_API_LOG_DIR # ceilometer confs are copy of /etc/nova/nova.conf which must exist first diff --git a/stack.sh b/stack.sh index 545c0714..e0df6ec1 100755 --- a/stack.sh +++ b/stack.sh @@ -2222,8 +2222,9 @@ if is_service_enabled cinder; then start_cinder fi if is_service_enabled ceilometer; then - echo_summary "Starting Ceilometer" + echo_summary "Configuring Ceilometer" configure_ceilometer + echo_summary "Starting Ceilometer" start_ceilometer fi screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/$APACHE_NAME/horizon_error.log" From cbeeccbb8530fdbf00091a11893454fe318b0398 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Sun, 23 Sep 2012 08:18:10 +0000 Subject: [PATCH 698/967] Fixes Quantum rootwrap configuration The patch enables the user to configure the quantum rootwrap status (Q_USE_ROOTWRAP). This is enabled by default. Change-Id: I2513caef972f20faa1e4fc8b3905a1f0ea0ba2db --- stack.sh | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/stack.sh b/stack.sh index 545c0714..01e7bdef 100755 --- a/stack.sh +++ b/stack.sh @@ -357,6 +357,7 @@ Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-quantum} Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone} # Use namespace or not Q_USE_NAMESPACE=${Q_USE_NAMESPACE:-True} +Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP=:-True} # Meta data IP Q_META_DATA_IP=${Q_META_DATA_IP:-} @@ -1233,6 +1234,11 @@ if is_service_enabled quantum; then Q_CONF_FILE=/etc/quantum/quantum.conf cp $QUANTUM_DIR/etc/quantum.conf $Q_CONF_FILE Q_RR_CONF_FILE=/etc/quantum/rootwrap.conf + if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + Q_RR_COMMAND="sudo" + else + Q_RR_COMMAND="sudo $QUANTUM_DIR/bin/quantum-rootwrap $Q_RR_CONF_FILE" + fi cp -p $QUANTUM_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE # Copy over the config and filter bits @@ -1345,8 +1351,6 @@ if is_service_enabled q-agt; then if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings $OVS_BRIDGE_MAPPINGS fi - # Update config w/rootwrap - iniset /$Q_PLUGIN_CONF_FILE OVS root_helper #Q_RR_CONF_FILE AGENT_BINARY="$QUANTUM_DIR/bin/quantum-openvswitch-agent" elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then # Setup physical network interface mappings. Override @@ -1358,10 +1362,10 @@ if is_service_enabled q-agt; then if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE physical_interface_mappings $LB_INTERFACE_MAPPINGS fi - # Update config w/rootwrap - iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE root_helper #Q_RR_CONF_FILE AGENT_BINARY="$QUANTUM_DIR/bin/quantum-linuxbridge-agent" fi + # Update config w/rootwrap + iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" fi # Quantum DHCP @@ -1381,7 +1385,7 @@ if is_service_enabled q-dhcp; then quantum_setup_keystone $Q_DHCP_CONF_FILE DEFAULT set_auth_url # Update config w/rootwrap - iniset /$Q_DHCP_CONF_FILE DEFAULT root_helper #Q_RR_CONF_FILE + iniset $Q_DHCP_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" if [[ "$Q_PLUGIN" = "openvswitch" ]]; then iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver @@ -1406,6 +1410,8 @@ if is_service_enabled q-l3; then iniset $Q_L3_CONF_FILE DEFAULT metadata_ip $Q_META_DATA_IP iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE + iniset $Q_L3_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" + quantum_setup_keystone $Q_L3_CONF_FILE DEFAULT set_auth_url if [[ "$Q_PLUGIN" == "openvswitch" ]]; then iniset $Q_L3_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver From efdf3ffc9fafe24feb885399807e2bad29b60caa Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Mon, 24 Sep 2012 12:47:44 -0400 Subject: [PATCH 699/967] VERBOSE is True if true, not 'yes' fixes bug #1052901 Change-Id: I77b08cbc713cc62808c1ea17aa7f766a56783587 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 545c0714..36c4b9b5 100755 --- a/stack.sh +++ b/stack.sh @@ -640,7 +640,7 @@ else # Set up output redirection without log files # Copy stdout to fd 3 exec 3>&1 - if [[ "$VERBOSE" != "yes" ]]; then + if [[ "$VERBOSE" != "True" ]]; then # Throw away stdout and stderr exec 1>/dev/null 2>&1 fi From baa8b42af8d128d145d7a0d2b100bf8577007145 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 24 Sep 2012 15:02:05 -0500 Subject: [PATCH 700/967] Fix logfile output getting stomped The LOGFILE would get stomped due to a non-appending tee command. Change-Id: I851ee83171c27fc425c0d666fcf84437f0d118b2 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 545c0714..4dfa9aa3 100755 --- a/stack.sh +++ b/stack.sh @@ -2287,7 +2287,7 @@ set +o xtrace if [[ -n "$LOGFILE" ]]; then exec 1>&3 # Force all output to stdout and logs now - exec 1> >( tee "${LOGFILE}" ) 2>&1 + exec 1> >( tee -a "${LOGFILE}" ) 2>&1 else # Force all output to stdout now exec 1>&3 From 7879a3cecc6bd46f86f440e44f6ef4515680240c Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 21 Sep 2012 15:36:06 -0700 Subject: [PATCH 701/967] Add a spinner so users know the script is going The spinner will appear if VERBOSE=False. Change-Id: I6413decbab2da3996dce5150a4600701eadd6f82 --- stack.sh | 37 ++++++++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 545c0714..23c728ba 100755 --- a/stack.sh +++ b/stack.sh @@ -584,10 +584,35 @@ APACHE_GROUP=${APACHE_GROUP:-$APACHE_USER} # Log files # --------- +# Draw a spinner so the user knows something is happening +function spinner() +{ + local delay=0.75 + local spinstr='|/-\' + printf "..." >&3 + while [ true ]; do + local temp=${spinstr#?} + printf "[%c]" "$spinstr" >&3 + local spinstr=$temp${spinstr%"$temp"} + sleep $delay + printf "\b\b\b" >&3 + done +} + # Echo text to the log file, summary log file and stdout # echo_summary "something to say" function echo_summary() { - echo $@ >&6 + if [[ -t 3 && "$VERBOSE" != "True" ]]; then + kill >/dev/null 2>&1 $LAST_SPINNER_PID + if [ ! -z "$LAST_SPINNER_PID" ]; then + printf "\b\b\bdone\n" >&3 + fi + echo -n $@ >&6 + spinner & + LAST_SPINNER_PID=$! + else + echo $@ >&6 + fi } # Echo text only to stdout, no log files @@ -669,10 +694,20 @@ fi # Set Up Script Execution # ----------------------- +# Kill background processes on exit +trap clean EXIT +clean() { + local r=$? + kill >/dev/null 2>&1 $(jobs -p) + exit $r +} + + # Exit on any errors so that errors don't compound trap failed ERR failed() { local r=$? + kill >/dev/null 2>&1 $(jobs -p) set +o xtrace [ -n "$LOGFILE" ] && echo "${0##*/} failed: full log in $LOGFILE" exit $r From c6cc585f9740fdf98d7068e0269f7ba8276cb24a Mon Sep 17 00:00:00 2001 From: Eoghan Glynn Date: Tue, 25 Sep 2012 18:16:59 +0100 Subject: [PATCH 702/967] Ensure correct cinder dir is written to tgt config The wrong directory was being placed in the tgtd config. This change will allow https://review.openstack.org/13633 to gate. Change-Id: Icbf7b5ecc9bc53ccc2aed0cacb9f5f61abe8f882 --- lib/cinder | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/cinder b/lib/cinder index 14c2df8b..08c840e8 100644 --- a/lib/cinder +++ b/lib/cinder @@ -208,7 +208,7 @@ function start_cinder() { if [[ "$os_PACKAGE" = "deb" ]]; then _configure_tgt_for_config_d if [[ ! -f /etc/tgt/conf.d/cinder.conf ]]; then - echo "include $CINDER_DIR/volumes/*" | sudo tee /etc/tgt/conf.d/cinder.conf + echo "include $CINDER_STATE_PATH/volumes/*" | sudo tee /etc/tgt/conf.d/cinder.conf fi # tgt in oneiric doesn't restart properly if tgtd isn't running # do it in two steps From c8dc1f363ae8e0265bd945ebdf3516bb3e278871 Mon Sep 17 00:00:00 2001 From: Dan Wendlandt Date: Wed, 26 Sep 2012 01:04:55 -0700 Subject: [PATCH 703/967] update quantum setup to automatically add route to fixed_range - allows metadata service to work out of the box for VMs in fixed_range - allows direct access to VMs via their fixed_ips from the devstack host. Change-Id: I24da91fdf184e195185462554c044ee6f65d58ce --- stack.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index 5ea1b9b7..fe6c896e 100755 --- a/stack.sh +++ b/stack.sh @@ -359,7 +359,7 @@ Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone} Q_USE_NAMESPACE=${Q_USE_NAMESPACE:-True} Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP=:-True} # Meta data IP -Q_META_DATA_IP=${Q_META_DATA_IP:-} +Q_META_DATA_IP=${Q_META_DATA_IP:-$HOST_IP} # Name of the LVM volume group to use/create for iscsi volumes VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} @@ -2216,10 +2216,12 @@ if is_service_enabled q-svc; then EXT_NET_ID=$(quantum net-create ext_net -- --router:external=True | grep ' id ' | get_field 2) EXT_GW_IP=$(quantum subnet-create --ip_version 4 $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2) quantum router-gateway-set $ROUTER_ID $EXT_NET_ID - if [[ "$Q_PLUGIN" = "openvswitch" ]]; then + if [[ "$Q_PLUGIN" = "openvswitch" ]] && [[ "$Q_USE_NAMESPACE" = "True" ]]; then CIDR_LEN=${FLOATING_RANGE#*/} sudo ip addr add $EXT_GW_IP/$CIDR_LEN dev $PUBLIC_BRIDGE sudo ip link set $PUBLIC_BRIDGE up + ROUTER_GW_IP=`quantum port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' '{ print $8; }'` + sudo route add -net $FIXED_RANGE gw $ROUTER_GW_IP fi if [[ "$Q_USE_NAMESPACE" == "False" ]]; then # Explicitly set router id in l3 agent configuration From 496ffc74bb255efce367033d56f23f626efa4db4 Mon Sep 17 00:00:00 2001 From: John Griffith Date: Wed, 26 Sep 2012 15:09:52 -0600 Subject: [PATCH 704/967] Add timing info to volume exercises. Looking at some failures lately in Jenkins/Devstack runs and it would be handy to see if failures were time-out related versus flat out failed operations. More interestingly it might be worthwile to harvest the completion time info from the jenkins logs and keep track of any significant deviations introduced by code changes. Change-Id: I3bbcc5b9f8a4da2fcdb9f6f70913c2d6bc6e2b9b --- exercises/volumes.sh | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 5db10d39..1c6320c0 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -154,10 +154,16 @@ if [[ $? != 0 ]]; then echo "Failure creating volume $VOL_NAME" exit 1 fi + +start_time=`date +%s` if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep available; do sleep 1; done"; then echo "Volume $VOL_NAME not created" + end_time=`date +%s` + echo "Failed volume-create after $((end_time - start_time)) seconds" exit 1 fi +end_time=`date +%s` +echo "Completed volume-create in $((end_time - start_time)) seconds" # Get volume ID VOL_ID=`nova volume-list | grep $VOL_NAME | head -1 | get_field 1` @@ -165,12 +171,17 @@ die_if_not_set VOL_ID "Failure retrieving volume ID for $VOL_NAME" # Attach to server DEVICE=/dev/vdb +start_time=`date +%s` nova volume-attach $VM_UUID $VOL_ID $DEVICE || \ die "Failure attaching volume $VOL_NAME to $NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep in-use; do sleep 1; done"; then echo "Volume $VOL_NAME not attached to $NAME" + end_time=`date +%s` + echo "Failed volume-attach after $((end_time - start_time)) seconds" exit 1 fi +end_time=`date +%s` +echo "Completed volume-attach in $((end_time - start_time)) seconds" VOL_ATTACH=`nova volume-list | grep $VOL_NAME | head -1 | get_field -1` die_if_not_set VOL_ATTACH "Failure retrieving $VOL_NAME status" @@ -180,18 +191,28 @@ if [[ "$VOL_ATTACH" != $VM_UUID ]]; then fi # Detach volume +start_time=`date +%s` nova volume-detach $VM_UUID $VOL_ID || die "Failure detaching volume $VOL_NAME from $NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep available; do sleep 1; done"; then echo "Volume $VOL_NAME not detached from $NAME" + end_time=`date +%s` + echo "Failed volume-detach after $((end_time - start_time)) seconds" exit 1 fi +end_time=`date +%s` +echo "Completed volume-detach in $((end_time - start_time)) seconds" # Delete volume +start_time=`date +%s` nova volume-delete $VOL_ID || die "Failure deleting volume $VOL_NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME; do sleep 1; done"; then echo "Volume $VOL_NAME not deleted" + end_time=`date +%s` + echo "Failed volume-delete after $((end_time - start_time)) seconds" exit 1 fi +end_time=`date +%s` +echo "Completed volume-delete in $((end_time - start_time)) seconds" # Shutdown the server nova delete $VM_UUID || die "Failure deleting instance $NAME" From ad80eadb084b476ca5df67c968cb22df722d8cfc Mon Sep 17 00:00:00 2001 From: Eoghan Glynn Date: Thu, 27 Sep 2012 09:36:33 +0100 Subject: [PATCH 705/967] Config for ceilometer gathering from glance Some changes are required so that ceilometer can gather usage data from glance (notification & polling) out-of-the-box in devstack: - configure glance to emit notifications if rabbitmq or qpid is enabled - configure the ceilometer collector to consume notifications on the default glance topic (glance_notifications.*) - pass credentials to ceilometer central agent so that it authtenticate polling calls to glance Change-Id: I0eac223eddb615266e28447b18fcaaadcd40dddf --- lib/ceilometer | 6 +++++- lib/glance | 7 +++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/lib/ceilometer b/lib/ceilometer index 35d25079..bea68ed0 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -6,6 +6,9 @@ # Dependencies: # - functions +# - OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME, OS_AUTH_URL set for admin credentials +# - DEST set to the destination directory +# - NOVA_CONF_DIR, NOVA_CONF set to the nova configuration directory & file # stack.sh # --------- @@ -57,6 +60,7 @@ function configure_ceilometer() { # ceilometer confs are copy of /etc/nova/nova.conf which must exist first grep -v format_string $NOVA_CONF_DIR/$NOVA_CONF > $CEILOMETER_AGENT_CONF grep -v format_string $NOVA_CONF_DIR/$NOVA_CONF > $CEILOMETER_COLLECTOR_CONF + iniset $CEILOMETER_COLLECTOR_CONF DEFAULT notification_topics 'notifications,glance_notifications' } # install_ceilometer() - Collect source and prepare @@ -67,7 +71,7 @@ function install_ceilometer() { # start_ceilometer() - Start running processes, including screen function start_ceilometer() { screen_it ceilometer-acompute "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_AGENT_CONF" - screen_it ceilometer-acentral "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-agent-central --config-file $CEILOMETER_AGENT_CONF" + screen_it ceilometer-acentral "export OS_USERNAME=$OS_USERNAME OS_PASSWORD=$OS_PASSWORD OS_TENANT_NAME=$OS_TENANT_NAME OS_AUTH_URL=$OS_AUTH_URL && cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-agent-central --config-file $CEILOMETER_AGENT_CONF" screen_it ceilometer-collector "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-collector --config-file $CEILOMETER_COLLECTOR_CONF" screen_it ceilometer-api "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR" } diff --git a/lib/glance b/lib/glance index 4cc6253e..070c80d1 100644 --- a/lib/glance +++ b/lib/glance @@ -107,6 +107,13 @@ function configure_glance() { iniset $GLANCE_API_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $GLANCE_API_CONF keystone_authtoken admin_user glance iniset $GLANCE_API_CONF keystone_authtoken admin_password $SERVICE_PASSWORD + if is_service_enabled qpid; then + iniset $GLANCE_API_CONF DEFAULT notifier_strategy qpid + elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then + iniset $GLANCE_API_CONF DEFAULT notifier_strategy rabbit + iniset $GLANCE_API_CONF DEFAULT rabbit_host $RABBIT_HOST + iniset $GLANCE_API_CONF DEFAULT rabbit_password $RABBIT_PASSWORD + fi cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI From bf67c19c30ec0f14034e74a86c57f7f9396a9b4d Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 21 Sep 2012 15:09:37 -0500 Subject: [PATCH 706/967] Move Nova to lib/nova The next in a line of changes to break down stack.sh and make it a bit more manageable. Part of blueprint devstack-modular Change-Id: I3fae739996aad0b340dae72ef51acd669a3ab893 --- lib/nova | 433 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ stack.sh | 377 +++--------------------------------------------- 2 files changed, 455 insertions(+), 355 deletions(-) create mode 100644 lib/nova diff --git a/lib/nova b/lib/nova new file mode 100644 index 00000000..8308f05b --- /dev/null +++ b/lib/nova @@ -0,0 +1,433 @@ +# lib/nova +# Functions to control the configuration and operation of the XXXX service + +# Dependencies: +# ``functions`` file +# ``DEST``, ``DATA_DIR`` must be defined +# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined +# ``LIBVIRT_TYPE`` must be defined +# ``INSTANCE_NAME_PREFIX``, ``VOLUME_NAME_PREFIX`` must be defined + +# ``stack.sh`` calls the entry points in this order: +# +# install_nova +# configure_nova +# init_nova +# start_nova +# stop_nova +# cleanup_nova + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories +NOVA_DIR=$DEST/nova +NOVACLIENT_DIR=$DEST/python-novaclient +NOVA_STATE_PATH=${NOVA_STATE_PATH:=$DATA_DIR/nova} +# INSTANCES_PATH is the previous name for this +NOVA_INSTANCES_PATH=${NOVA_INSTANCES_PATH:=${INSTANCES_PATH:=$NOVA_STATE_PATH/instances}} + +NOVA_CONF_DIR=/etc/nova +NOVA_CONF=$NOVA_CONF_DIR/nova.conf +NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini} + +# Support entry points installation of console scripts +if [[ -d $NOVA_DIR/bin ]]; then + NOVA_BIN_DIR=$NOVA_DIR/bin +else + NOVA_BIN_DIR=/usr/local/bin +fi + +# Set the paths of certain binaries +if [[ "$os_PACKAGE" = "deb" ]]; then + NOVA_ROOTWRAP=/usr/local/bin/nova-rootwrap +else + NOVA_ROOTWRAP=/usr/bin/nova-rootwrap +fi + +# Allow rate limiting to be turned off for testing, like for Tempest +# NOTE: Set API_RATE_LIMIT="False" to turn OFF rate limiting +API_RATE_LIMIT=${API_RATE_LIMIT:-"True"} + +# Nova supports pluggable schedulers. The default ``FilterScheduler`` +# should work in most cases. +SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler} + +QEMU_CONF=/etc/libvirt/qemu.conf + + +# Entry Points +# ------------ + +function add_nova_opt { + echo "$1" >>$NOVA_CONF +} + +# Helper to clean iptables rules +function clean_iptables() { + # Delete rules + sudo iptables -S -v | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-A" | sed "s/-A/-D/g" | awk '{print "sudo iptables",$0}' | bash + # Delete nat rules + sudo iptables -S -v -t nat | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-A" | sed "s/-A/-D/g" | awk '{print "sudo iptables -t nat",$0}' | bash + # Delete chains + sudo iptables -S -v | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-N" | sed "s/-N/-X/g" | awk '{print "sudo iptables",$0}' | bash + # Delete nat chains + sudo iptables -S -v -t nat | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-N" | sed "s/-N/-X/g" | awk '{print "sudo iptables -t nat",$0}' | bash +} + +# cleanup_nova() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_nova() { + if is_service_enabled n-cpu; then + # Clean iptables from previous runs + clean_iptables + + # Destroy old instances + instances=`sudo virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"` + if [ ! "$instances" = "" ]; then + echo $instances | xargs -n1 sudo virsh destroy || true + echo $instances | xargs -n1 sudo virsh undefine || true + fi + + # Logout and delete iscsi sessions + sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d " " -f2 | xargs sudo iscsiadm --mode node --logout || true + sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d " " -f2 | sudo iscsiadm --mode node --op delete || true + + # Clean out the instances directory. + sudo rm -rf $NOVA_INSTANCES_PATH/* + fi +} + +# configure_novaclient() - Set config files, create data dirs, etc +function configure_novaclient() { + setup_develop $NOVACLIENT_DIR +} + +# configure_nova_rootwrap() - configure Nova's rootwrap +function configure_nova_rootwrap() { + # Deploy new rootwrap filters files (owned by root). + # Wipe any existing rootwrap.d files first + if [[ -d $NOVA_CONF_DIR/rootwrap.d ]]; then + sudo rm -rf $NOVA_CONF_DIR/rootwrap.d + fi + # Deploy filters to /etc/nova/rootwrap.d + sudo mkdir -m 755 $NOVA_CONF_DIR/rootwrap.d + sudo cp $NOVA_DIR/etc/nova/rootwrap.d/*.filters $NOVA_CONF_DIR/rootwrap.d + sudo chown -R root:root $NOVA_CONF_DIR/rootwrap.d + sudo chmod 644 $NOVA_CONF_DIR/rootwrap.d/* + # Set up rootwrap.conf, pointing to /etc/nova/rootwrap.d + sudo cp $NOVA_DIR/etc/nova/rootwrap.conf $NOVA_CONF_DIR/ + sudo sed -e "s:^filters_path=.*$:filters_path=$NOVA_CONF_DIR/rootwrap.d:" -i $NOVA_CONF_DIR/rootwrap.conf + sudo chown root:root $NOVA_CONF_DIR/rootwrap.conf + sudo chmod 0644 $NOVA_CONF_DIR/rootwrap.conf + # Specify rootwrap.conf as first parameter to nova-rootwrap + ROOTWRAP_SUDOER_CMD="$NOVA_ROOTWRAP $NOVA_CONF_DIR/rootwrap.conf *" + + # Set up the rootwrap sudoers for nova + TEMPFILE=`mktemp` + echo "$USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE + chmod 0440 $TEMPFILE + sudo chown root:root $TEMPFILE + sudo mv $TEMPFILE /etc/sudoers.d/nova-rootwrap +} + +# configure_nova() - Set config files, create data dirs, etc +function configure_nova() { + setup_develop $NOVA_DIR + + # Put config files in ``/etc/nova`` for everyone to find + if [[ ! -d $NOVA_CONF_DIR ]]; then + sudo mkdir -p $NOVA_CONF_DIR + fi + sudo chown `whoami` $NOVA_CONF_DIR + + cp -p $NOVA_DIR/etc/nova/policy.json $NOVA_CONF_DIR + + configure_nova_rootwrap + + if is_service_enabled n-api; then + # Use the sample http middleware configuration supplied in the + # Nova sources. This paste config adds the configuration required + # for Nova to validate Keystone tokens. + + # Remove legacy paste config if present + rm -f $NOVA_DIR/bin/nova-api-paste.ini + + # Get the sample configuration file in place + cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_CONF_DIR + + # Rewrite the authtoken configuration for our Keystone service. + # This is a bit defensive to allow the sample file some variance. + sed -e " + /^admin_token/i admin_tenant_name = $SERVICE_TENANT_NAME + /admin_tenant_name/s/^.*$/admin_tenant_name = $SERVICE_TENANT_NAME/; + /admin_user/s/^.*$/admin_user = nova/; + /admin_password/s/^.*$/admin_password = $SERVICE_PASSWORD/; + s,%SERVICE_TENANT_NAME%,$SERVICE_TENANT_NAME,g; + s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g; + " -i $NOVA_API_PASTE_INI + fi + + if is_service_enabled n-cpu; then + # Force IP forwarding on, just on case + sudo sysctl -w net.ipv4.ip_forward=1 + + # Attempt to load modules: network block device - used to manage qcow images + sudo modprobe nbd || true + + # Check for kvm (hardware based virtualization). If unable to initialize + # kvm, we drop back to the slower emulation mode (qemu). Note: many systems + # come with hardware virtualization disabled in BIOS. + if [[ "$LIBVIRT_TYPE" == "kvm" ]]; then + sudo modprobe kvm || true + if [ ! -e /dev/kvm ]; then + echo "WARNING: Switching to QEMU" + LIBVIRT_TYPE=qemu + if which selinuxenabled 2>&1 > /dev/null && selinuxenabled; then + # https://bugzilla.redhat.com/show_bug.cgi?id=753589 + sudo setsebool virt_use_execmem on + fi + fi + fi + + # Install and configure **LXC** if specified. LXC is another approach to + # splitting a system into many smaller parts. LXC uses cgroups and chroot + # to simulate multiple systems. + if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then + if [[ "$os_PACKAGE" = "deb" ]]; then + if [[ ! "$DISTRO" > natty ]]; then + cgline="none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0" + sudo mkdir -p /cgroup + if ! grep -q cgroup /etc/fstab; then + echo "$cgline" | sudo tee -a /etc/fstab + fi + if ! mount -n | grep -q cgroup; then + sudo mount /cgroup + fi + fi + fi + fi + + if is_service_enabled quantum && [[ $Q_PLUGIN = "openvswitch" ]] && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF ; then + # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces + cat </dev/null; then + sudo groupadd libvirtd + fi + sudo bash -c 'cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla +[libvirt Management Access] +Identity=unix-group:libvirtd +Action=org.libvirt.unix.manage +ResultAny=yes +ResultInactive=yes +ResultActive=yes +EOF' + LIBVIRT_DAEMON=libvirtd + fi + + # The user that nova runs as needs to be member of **libvirtd** group otherwise + # nova-compute will be unable to use libvirt. + sudo usermod -a -G libvirtd `whoami` + + # libvirt detects various settings on startup, as we potentially changed + # the system configuration (modules, filesystems), we need to restart + # libvirt to detect those changes. + restart_service $LIBVIRT_DAEMON + + + # Instance Storage + # ---------------- + + # Nova stores each instance in its own directory. + mkdir -p $NOVA_INSTANCES_PATH + + # You can specify a different disk to be mounted and used for backing the + # virtual machines. If there is a partition labeled nova-instances we + # mount it (ext filesystems can be labeled via e2label). + if [ -L /dev/disk/by-label/nova-instances ]; then + if ! mount -n | grep -q $NOVA_INSTANCES_PATH; then + sudo mount -L nova-instances $NOVA_INSTANCES_PATH + sudo chown -R `whoami` $NOVA_INSTANCES_PATH + fi + fi + + # Clean up old instances + cleanup_nova + fi +} + +# init_nova() - Initialize databases, etc. +function init_nova() { + # Remove legacy ``nova.conf`` + rm -f $NOVA_DIR/bin/nova.conf + + # (Re)create ``nova.conf`` + rm -f $NOVA_CONF_DIR/$NOVA_CONF + add_nova_opt "[DEFAULT]" + add_nova_opt "verbose=True" + add_nova_opt "auth_strategy=keystone" + add_nova_opt "allow_resize_to_same_host=True" + add_nova_opt "api_paste_config=$NOVA_API_PASTE_INI" + add_nova_opt "rootwrap_config=$NOVA_CONF_DIR/rootwrap.conf" + add_nova_opt "compute_scheduler_driver=$SCHEDULER" + add_nova_opt "dhcpbridge_flagfile=$NOVA_CONF_DIR/$NOVA_CONF" + add_nova_opt "force_dhcp_release=True" + add_nova_opt "fixed_range=$FIXED_RANGE" + add_nova_opt "s3_host=$SERVICE_HOST" + add_nova_opt "s3_port=$S3_SERVICE_PORT" + add_nova_opt "osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions" + add_nova_opt "my_ip=$HOST_IP" + add_nova_opt "sql_connection=$BASE_SQL_CONN/nova?charset=utf8" + add_nova_opt "libvirt_type=$LIBVIRT_TYPE" + add_nova_opt "libvirt_cpu_mode=none" + add_nova_opt "instance_name_template=${INSTANCE_NAME_PREFIX}%08x" + add_nova_opt "image_service=nova.image.glance.GlanceImageService" + + if is_service_enabled n-api; then + add_nova_opt "enabled_apis=$NOVA_ENABLED_APIS" + fi + if is_service_enabled n-vol; then + add_nova_opt "volume_api_class=nova.volume.api.API" + add_nova_opt "volume_group=$VOLUME_GROUP" + add_nova_opt "volume_name_template=${VOLUME_NAME_PREFIX}%s" + # oneiric no longer supports ietadm + add_nova_opt "iscsi_helper=tgtadm" + fi + if is_service_enabled cinder; then + add_nova_opt "volume_api_class=nova.volume.cinder.API" + fi + if [ -n "$NOVA_STATE_PATH" ]; then + add_nova_opt "state_path=$NOVA_STATE_PATH" + fi + if [ -n "$NOVA_INSTANCES_PATH" ]; then + add_nova_opt "instances_path=$NOVA_INSTANCES_PATH" + fi + if [ "$MULTI_HOST" != "False" ]; then + add_nova_opt "multi_host=True" + add_nova_opt "send_arp_for_ha=True" + fi + if [ "$SYSLOG" != "False" ]; then + add_nova_opt "use_syslog=True" + fi + if [ "$API_RATE_LIMIT" != "True" ]; then + add_nova_opt "api_rate_limit=False" + fi + if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then + # Add color to logging output + add_nova_opt "logging_context_format_string=%(asctime)s %(color)s%(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s%(color)s] %(instance)s%(color)s%(message)s" + add_nova_opt "logging_default_format_string=%(asctime)s %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" + add_nova_opt "logging_debug_format_suffix=from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" + add_nova_opt "logging_exception_prefix=%(color)s%(asctime)s TRACE %(name)s %(instance)s" + else + # Show user_name and project_name instead of user_id and project_id + add_nova_opt "logging_context_format_string=%(asctime)s %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" + fi + + # Provide some transition from ``EXTRA_FLAGS`` to ``EXTRA_OPTS`` + if [[ -z "$EXTRA_OPTS" && -n "$EXTRA_FLAGS" ]]; then + EXTRA_OPTS=$EXTRA_FLAGS + fi + + # Define extra nova conf flags by defining the array ``EXTRA_OPTS``. + # For Example: ``EXTRA_OPTS=(foo=true bar=2)`` + for I in "${EXTRA_OPTS[@]}"; do + # Attempt to convert flags to options + add_nova_opt ${I//--} + done + + # Nova Database + # ------------- + + # All nova components talk to a central database. We will need to do this step + # only once for an entire cluster. + + if is_service_enabled mysql && is_service_enabled nova; then + # (Re)create nova database + mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS nova;' + + # Explicitly use latin1: to avoid lp#829209, nova expects the database to + # use latin1 by default, and then upgrades the database to utf8 (see the + # 082_essex.py in nova) + mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE nova CHARACTER SET latin1;' + + # (Re)create nova database + $NOVA_BIN_DIR/nova-manage db sync + fi + +} + +# install_novaclient() - Collect source and prepare +function install_novaclient() { + git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH +} + +# install_nova() - Collect source and prepare +function install_nova() { + if is_service_enabled n-cpu; then + if [[ "$os_PACKAGE" = "deb" ]]; then + LIBVIRT_PKG_NAME=libvirt-bin + else + LIBVIRT_PKG_NAME=libvirt + fi + install_package $LIBVIRT_PKG_NAME + # Install and configure **LXC** if specified. LXC is another approach to + # splitting a system into many smaller parts. LXC uses cgroups and chroot + # to simulate multiple systems. + if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then + if [[ "$os_PACKAGE" = "deb" ]]; then + if [[ "$DISTRO" > natty ]]; then + install_package cgroup-lite + fi + else + ### FIXME(dtroyer): figure this out + echo "RPM-based cgroup not implemented yet" + yum_install libcgroup-tools + fi + fi + fi + + git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH +} + +# start_nova() - Start running processes, including screen +function start_nova() { + # The group **libvirtd** is added to the current user in this script. + # Use 'sg' to execute nova-compute as a member of the **libvirtd** group. + # ``screen_it`` checks ``is_service_enabled``, it is not needed here + screen_it n-cpu "cd $NOVA_DIR && sg libvirtd $NOVA_BIN_DIR/nova-compute" + screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert" + screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network" + screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler" + screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF --web ." + screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF" + screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth" +} + +# stop_nova() - Stop running processes (non-screen) +function stop_nova() { + # Kill the nova screen windows + for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth; do + screen -S $SCREEN_NAME -p $serv -X kill + done +} + +# Restore xtrace +$XTRACE diff --git a/stack.sh b/stack.sh index 5ea1b9b7..9e6ca756 100755 --- a/stack.sh +++ b/stack.sh @@ -112,13 +112,6 @@ if [ "${DISTRO}" = "oneiric" ] && is_service_enabled qpid ; then exit 1 fi -# Set the paths of certain binaries -if [[ "$os_PACKAGE" = "deb" ]]; then - NOVA_ROOTWRAP=/usr/local/bin/nova-rootwrap -else - NOVA_ROOTWRAP=/usr/bin/nova-rootwrap -fi - # ``stack.sh`` keeps function libraries here # Make sure ``$TOP_DIR/lib`` directory is present if [ ! -d $TOP_DIR/lib ]; then @@ -314,6 +307,7 @@ SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} # Get project function libraries source $TOP_DIR/lib/keystone source $TOP_DIR/lib/glance +source $TOP_DIR/lib/nova source $TOP_DIR/lib/cinder source $TOP_DIR/lib/n-vol source $TOP_DIR/lib/ceilometer @@ -330,20 +324,6 @@ SWIFTCLIENT_DIR=$DEST/python-swiftclient QUANTUM_DIR=$DEST/quantum QUANTUM_CLIENT_DIR=$DEST/python-quantumclient -# Nova defaults -NOVA_DIR=$DEST/nova -NOVACLIENT_DIR=$DEST/python-novaclient -NOVA_STATE_PATH=${NOVA_STATE_PATH:=$DATA_DIR/nova} -# INSTANCES_PATH is the previous name for this -NOVA_INSTANCES_PATH=${NOVA_INSTANCES_PATH:=${INSTANCES_PATH:=$NOVA_STATE_PATH/instances}} - -# Support entry points installation of console scripts -if [[ -d $NOVA_DIR/bin ]]; then - NOVA_BIN_DIR=$NOVA_DIR/bin -else - NOVA_BIN_DIR=/usr/local/bin -fi - # Default Quantum Plugin Q_PLUGIN=${Q_PLUGIN:-openvswitch} # Default Quantum Port @@ -366,10 +346,6 @@ VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-} -# Nova supports pluggable schedulers. The default ``FilterScheduler`` -# should work in most cases. -SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler} - # Generic helper to configure passwords function read_password { XTRACE=$(set +o | grep xtrace) @@ -813,30 +789,6 @@ if is_service_enabled q-agt; then fi fi -if is_service_enabled n-cpu; then - - if [[ "$os_PACKAGE" = "deb" ]]; then - LIBVIRT_PKG_NAME=libvirt-bin - else - LIBVIRT_PKG_NAME=libvirt - fi - install_package $LIBVIRT_PKG_NAME - # Install and configure **LXC** if specified. LXC is another approach to - # splitting a system into many smaller parts. LXC uses cgroups and chroot - # to simulate multiple systems. - if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then - if [[ "$os_PACKAGE" = "deb" ]]; then - if [[ "$DISTRO" > natty ]]; then - install_package cgroup-lite - fi - else - ### FIXME(dtroyer): figure this out - echo "RPM-based cgroup not implemented yet" - yum_install libcgroup-tools - fi - fi -fi - if is_service_enabled swift; then # Install memcached for swift. install_package memcached @@ -867,11 +819,9 @@ echo_summary "Installing OpenStack project source" install_keystoneclient install_glanceclient - -git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH +install_novaclient # Check out the client libs that are used most -git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH git_clone $OPENSTACKCLIENT_REPO $OPENSTACKCLIENT_DIR $OPENSTACKCLIENT_BRANCH # glance, swift middleware and nova api needs keystone middleware @@ -893,6 +843,10 @@ if is_service_enabled g-api n-api; then # image catalog service install_glance fi +if is_service_enabled nova; then + # compute service + install_nova +fi if is_service_enabled n-novnc; then # a websockets/html5 or flash powered VNC console for vm instances git_clone $NOVNC_REPO $NOVNC_DIR $NOVNC_BRANCH @@ -927,7 +881,7 @@ echo_summary "Configuring OpenStack projects" # Set up our checkouts so they are installed into python path # allowing ``import nova`` or ``import glance.client`` configure_keystoneclient -setup_develop $NOVACLIENT_DIR +configure_novaclient setup_develop $OPENSTACKCLIENT_DIR if is_service_enabled key g-api n-api swift; then configure_keystone @@ -947,7 +901,9 @@ fi # TODO(dtroyer): figure out when this is no longer necessary configure_glanceclient -setup_develop $NOVA_DIR +if is_service_enabled nova; then + configure_nova +fi if is_service_enabled horizon; then setup_develop $HORIZON_DIR fi @@ -1486,195 +1442,9 @@ fi # Nova # ---- -echo_summary "Configuring Nova" - -# Put config files in ``/etc/nova`` for everyone to find -NOVA_CONF_DIR=/etc/nova -if [[ ! -d $NOVA_CONF_DIR ]]; then - sudo mkdir -p $NOVA_CONF_DIR -fi -sudo chown `whoami` $NOVA_CONF_DIR - -cp -p $NOVA_DIR/etc/nova/policy.json $NOVA_CONF_DIR - -# Deploy new rootwrap filters files (owned by root). -# Wipe any existing rootwrap.d files first -if [[ -d $NOVA_CONF_DIR/rootwrap.d ]]; then - sudo rm -rf $NOVA_CONF_DIR/rootwrap.d -fi -# Deploy filters to /etc/nova/rootwrap.d -sudo mkdir -m 755 $NOVA_CONF_DIR/rootwrap.d -sudo cp $NOVA_DIR/etc/nova/rootwrap.d/*.filters $NOVA_CONF_DIR/rootwrap.d -sudo chown -R root:root $NOVA_CONF_DIR/rootwrap.d -sudo chmod 644 $NOVA_CONF_DIR/rootwrap.d/* -# Set up rootwrap.conf, pointing to /etc/nova/rootwrap.d -sudo cp $NOVA_DIR/etc/nova/rootwrap.conf $NOVA_CONF_DIR/ -sudo sed -e "s:^filters_path=.*$:filters_path=$NOVA_CONF_DIR/rootwrap.d:" -i $NOVA_CONF_DIR/rootwrap.conf -sudo chown root:root $NOVA_CONF_DIR/rootwrap.conf -sudo chmod 0644 $NOVA_CONF_DIR/rootwrap.conf -# Specify rootwrap.conf as first parameter to nova-rootwrap -ROOTWRAP_SUDOER_CMD="$NOVA_ROOTWRAP $NOVA_CONF_DIR/rootwrap.conf *" - -# Set up the rootwrap sudoers for nova -TEMPFILE=`mktemp` -echo "$USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE -chmod 0440 $TEMPFILE -sudo chown root:root $TEMPFILE -sudo mv $TEMPFILE /etc/sudoers.d/nova-rootwrap - -if is_service_enabled n-api; then - # Use the sample http middleware configuration supplied in the - # Nova sources. This paste config adds the configuration required - # for Nova to validate Keystone tokens. - - # Allow rate limiting to be turned off for testing, like for Tempest - # NOTE: Set API_RATE_LIMIT="False" to turn OFF rate limiting - API_RATE_LIMIT=${API_RATE_LIMIT:-"True"} - - # Remove legacy paste config if present - rm -f $NOVA_DIR/bin/nova-api-paste.ini - - # Get the sample configuration file in place - cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_CONF_DIR - - # Rewrite the authtoken configuration for our Keystone service. - # This is a bit defensive to allow the sample file some variance. - sed -e " - /^admin_token/i admin_tenant_name = $SERVICE_TENANT_NAME - /admin_tenant_name/s/^.*$/admin_tenant_name = $SERVICE_TENANT_NAME/; - /admin_user/s/^.*$/admin_user = nova/; - /admin_password/s/^.*$/admin_password = $SERVICE_PASSWORD/; - s,%SERVICE_TENANT_NAME%,$SERVICE_TENANT_NAME,g; - s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g; - " -i $NOVA_CONF_DIR/api-paste.ini -fi - -# Helper to clean iptables rules -function clean_iptables() { - # Delete rules - sudo iptables -S -v | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-A" | sed "s/-A/-D/g" | awk '{print "sudo iptables",$0}' | bash - # Delete nat rules - sudo iptables -S -v -t nat | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-A" | sed "s/-A/-D/g" | awk '{print "sudo iptables -t nat",$0}' | bash - # Delete chains - sudo iptables -S -v | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-N" | sed "s/-N/-X/g" | awk '{print "sudo iptables",$0}' | bash - # Delete nat chains - sudo iptables -S -v -t nat | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-N" | sed "s/-N/-X/g" | awk '{print "sudo iptables -t nat",$0}' | bash -} - -if is_service_enabled n-cpu; then - - # Force IP forwarding on, just on case - sudo sysctl -w net.ipv4.ip_forward=1 - - # Attempt to load modules: network block device - used to manage qcow images - sudo modprobe nbd || true - - # Check for kvm (hardware based virtualization). If unable to initialize - # kvm, we drop back to the slower emulation mode (qemu). Note: many systems - # come with hardware virtualization disabled in BIOS. - if [[ "$LIBVIRT_TYPE" == "kvm" ]]; then - sudo modprobe kvm || true - if [ ! -e /dev/kvm ]; then - echo "WARNING: Switching to QEMU" - LIBVIRT_TYPE=qemu - if which selinuxenabled 2>&1 > /dev/null && selinuxenabled; then - # https://bugzilla.redhat.com/show_bug.cgi?id=753589 - sudo setsebool virt_use_execmem on - fi - fi - fi - - # Install and configure **LXC** if specified. LXC is another approach to - # splitting a system into many smaller parts. LXC uses cgroups and chroot - # to simulate multiple systems. - if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then - if [[ "$os_PACKAGE" = "deb" ]]; then - if [[ ! "$DISTRO" > natty ]]; then - cgline="none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0" - sudo mkdir -p /cgroup - if ! grep -q cgroup /etc/fstab; then - echo "$cgline" | sudo tee -a /etc/fstab - fi - if ! mount -n | grep -q cgroup; then - sudo mount /cgroup - fi - fi - fi - fi - - QEMU_CONF=/etc/libvirt/qemu.conf - if is_service_enabled quantum && [[ $Q_PLUGIN = "openvswitch" ]] && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF ; then - # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces - cat </dev/null; then - sudo groupadd libvirtd - fi - sudo bash -c 'cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla -[libvirt Management Access] -Identity=unix-group:libvirtd -Action=org.libvirt.unix.manage -ResultAny=yes -ResultInactive=yes -ResultActive=yes -EOF' - LIBVIRT_DAEMON=libvirtd - fi - - # The user that nova runs as needs to be member of **libvirtd** group otherwise - # nova-compute will be unable to use libvirt. - sudo usermod -a -G libvirtd `whoami` - - # libvirt detects various settings on startup, as we potentially changed - # the system configuration (modules, filesystems), we need to restart - # libvirt to detect those changes. - restart_service $LIBVIRT_DAEMON - - - # Instance Storage - # ~~~~~~~~~~~~~~~~ - - # Nova stores each instance in its own directory. - mkdir -p $NOVA_INSTANCES_PATH - - # You can specify a different disk to be mounted and used for backing the - # virtual machines. If there is a partition labeled nova-instances we - # mount it (ext filesystems can be labeled via e2label). - if [ -L /dev/disk/by-label/nova-instances ]; then - if ! mount -n | grep -q $NOVA_INSTANCES_PATH; then - sudo mount -L nova-instances $NOVA_INSTANCES_PATH - sudo chown -R `whoami` $NOVA_INSTANCES_PATH - fi - fi - - # Clean iptables from previous runs - clean_iptables - - # Destroy old instances - instances=`sudo virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"` - if [ ! "$instances" = "" ]; then - echo $instances | xargs -n1 sudo virsh destroy || true - echo $instances | xargs -n1 sudo virsh undefine || true - fi - - # Logout and delete iscsi sessions - sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d " " -f2 | xargs sudo iscsiadm --mode node --logout || true - sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d " " -f2 | sudo iscsiadm --mode node --op delete || true - - # Clean out the instances directory. - sudo rm -rf $NOVA_INSTANCES_PATH/* +if is_service_enabled nova; then + echo_summary "Configuring Nova" + configure_nova fi if is_service_enabled n-net q-dhcp; then @@ -1955,26 +1725,12 @@ elif is_service_enabled n-vol; then init_nvol fi -NOVA_CONF=nova.conf -function add_nova_opt { - echo "$1" >> $NOVA_CONF_DIR/$NOVA_CONF -} +if is_service_enabled nova; then + echo_summary "Configuring Nova" + init_nova +fi -# Remove legacy ``nova.conf`` -rm -f $NOVA_DIR/bin/nova.conf - -# (Re)create ``nova.conf`` -rm -f $NOVA_CONF_DIR/$NOVA_CONF -add_nova_opt "[DEFAULT]" -add_nova_opt "verbose=True" -add_nova_opt "auth_strategy=keystone" -add_nova_opt "allow_resize_to_same_host=True" -add_nova_opt "rootwrap_config=$NOVA_CONF_DIR/rootwrap.conf" -add_nova_opt "compute_scheduler_driver=$SCHEDULER" -add_nova_opt "dhcpbridge_flagfile=$NOVA_CONF_DIR/$NOVA_CONF" -add_nova_opt "fixed_range=$FIXED_RANGE" -add_nova_opt "s3_host=$SERVICE_HOST" -add_nova_opt "s3_port=$S3_SERVICE_PORT" +# Additional Nova configuration that is dependent on other services if is_service_enabled quantum; then add_nova_opt "network_api_class=nova.network.quantumv2.api.API" add_nova_opt "quantum_admin_username=$Q_ADMIN_USERNAME" @@ -2000,18 +1756,6 @@ else add_nova_opt "flat_interface=$FLAT_INTERFACE" fi fi -if is_service_enabled n-vol; then - add_nova_opt "volume_group=$VOLUME_GROUP" - add_nova_opt "volume_name_template=${VOLUME_NAME_PREFIX}%s" - # oneiric no longer supports ietadm - add_nova_opt "iscsi_helper=tgtadm" -fi -add_nova_opt "osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions" -add_nova_opt "my_ip=$HOST_IP" -add_nova_opt "sql_connection=$BASE_SQL_CONN/nova?charset=utf8" -add_nova_opt "libvirt_type=$LIBVIRT_TYPE" -add_nova_opt "libvirt_cpu_mode=none" -add_nova_opt "instance_name_template=${INSTANCE_NAME_PREFIX}%08x" # All nova-compute workers need to know the vnc configuration options # These settings don't hurt anything if n-xvnc and n-novnc are disabled if is_service_enabled n-cpu; then @@ -2030,8 +1774,6 @@ fi VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1} add_nova_opt "vncserver_listen=$VNCSERVER_LISTEN" add_nova_opt "vncserver_proxyclient_address=$VNCSERVER_PROXYCLIENT_ADDRESS" -add_nova_opt "api_paste_config=$NOVA_CONF_DIR/api-paste.ini" -add_nova_opt "image_service=nova.image.glance.GlanceImageService" add_nova_opt "ec2_dmz_host=$EC2_DMZ_HOST" if is_service_enabled zeromq; then add_nova_opt "rpc_backend=nova.openstack.common.rpc.impl_zmq" @@ -2042,51 +1784,6 @@ elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then add_nova_opt "rabbit_password=$RABBIT_PASSWORD" fi add_nova_opt "glance_api_servers=$GLANCE_HOSTPORT" -add_nova_opt "force_dhcp_release=True" -if [ -n "$NOVA_STATE_PATH" ]; then - add_nova_opt "state_path=$NOVA_STATE_PATH" -fi -if [ -n "$NOVA_INSTANCES_PATH" ]; then - add_nova_opt "instances_path=$NOVA_INSTANCES_PATH" -fi -if [ "$MULTI_HOST" != "False" ]; then - add_nova_opt "multi_host=True" - add_nova_opt "send_arp_for_ha=True" -fi -if [ "$SYSLOG" != "False" ]; then - add_nova_opt "use_syslog=True" -fi -if [ "$API_RATE_LIMIT" != "True" ]; then - add_nova_opt "api_rate_limit=False" -fi -if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - # Add color to logging output - add_nova_opt "logging_context_format_string=%(asctime)s %(color)s%(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s%(color)s] %(instance)s%(color)s%(message)s" - add_nova_opt "logging_default_format_string=%(asctime)s %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" - add_nova_opt "logging_debug_format_suffix=from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" - add_nova_opt "logging_exception_prefix=%(color)s%(asctime)s TRACE %(name)s %(instance)s" -else - # Show user_name and project_name instead of user_id and project_id - add_nova_opt "logging_context_format_string=%(asctime)s %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" -fi - -# If cinder is enabled, use the cinder volume driver -if is_service_enabled cinder; then - add_nova_opt "volume_api_class=nova.volume.cinder.API" -fi - -# Provide some transition from ``EXTRA_FLAGS`` to ``EXTRA_OPTS`` -if [[ -z "$EXTRA_OPTS" && -n "$EXTRA_FLAGS" ]]; then - EXTRA_OPTS=$EXTRA_FLAGS -fi - -# Define extra nova conf flags by defining the array ``EXTRA_OPTS``. -# For Example: ``EXTRA_OPTS=(foo=true bar=2)`` -for I in "${EXTRA_OPTS[@]}"; do - # Attempt to convert flags to options - add_nova_opt ${I//--} -done - # XenServer # --------- @@ -2120,26 +1817,6 @@ else fi -# Nova Database -# ------------- - -# All nova components talk to a central database. We will need to do this step -# only once for an entire cluster. - -if is_service_enabled mysql && is_service_enabled nova; then - # (Re)create nova database - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS nova;' - - # Explicitly use latin1: to avoid lp#829209, nova expects the database to - # use latin1 by default, and then upgrades the database to utf8 (see the - # 082_essex.py in nova) - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE nova CHARACTER SET latin1;' - - # (Re)create nova database - $NOVA_BIN_DIR/nova-manage db sync -fi - - # Heat # ---- @@ -2152,8 +1829,6 @@ fi # Launch Services # =============== -# Nova api crashes if we start it with a regular screen command, -# so send the start command by forcing text into the window. # Only run the services specified in ``ENABLED_SERVICES`` # Launch the Glance services @@ -2179,7 +1854,6 @@ screen_it zeromq "cd $NOVA_DIR && $NOVA_DIR/bin/nova-rpc-zmq-receiver" # Launch the nova-api and wait for it to answer before continuing if is_service_enabled n-api; then echo_summary "Starting Nova API" - add_nova_opt "enabled_apis=$NOVA_ENABLED_APIS" screen_it n-api "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api" echo "Waiting for nova-api to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:8774; do sleep 1; done"; then @@ -2243,17 +1917,10 @@ screen_it q-agt "python $AGENT_BINARY --config-file $Q_CONF_FILE --config-file / screen_it q-dhcp "python $AGENT_DHCP_BINARY --config-file $Q_CONF_FILE --config-file=$Q_DHCP_CONF_FILE" screen_it q-l3 "python $AGENT_L3_BINARY --config-file $Q_CONF_FILE --config-file=$Q_L3_CONF_FILE" -echo_summary "Starting Nova" -# The group **libvirtd** is added to the current user in this script. -# Use 'sg' to execute nova-compute as a member of the **libvirtd** group. -# ``screen_it`` checks ``is_service_enabled``, it is not needed here -screen_it n-cpu "cd $NOVA_DIR && sg libvirtd $NOVA_BIN_DIR/nova-compute" -screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert" -screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network" -screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler" -screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF --web ." -screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF" -screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth" +if is_service_enabled nova; then + echo_summary "Starting Nova" + start_nova +fi if is_service_enabled n-vol; then echo_summary "Starting Nova volumes" start_nvol From 1a3c9fe41b9b20d98b0b7afe6dbec9fcb025d551 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Sat, 29 Sep 2012 17:25:02 -0500 Subject: [PATCH 707/967] Fix setup_develop Change-Id: I64324436eebebfb05ad724335eece10cba107d91 --- functions | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/functions b/functions index 0cfffb87..677621f2 100644 --- a/functions +++ b/functions @@ -681,7 +681,8 @@ function setup_develop() { python setup.py egg_info; \ raw_links=$(awk '/^.+/ {print "-f " $1}' *.egg-info/dependency_links.txt); \ depend_links=$(echo $raw_links | xargs); \ - pip_install -r *-info/requires.txt $depend_links; \ + require_file=$([ ! -r *-info/requires.txt ] || echo "-r *-info/requires.txt"); \ + pip_install $require_file $depend_links; \ $SUDO_CMD \ HTTP_PROXY=$http_proxy \ HTTPS_PROXY=$https_proxy \ From 7e3c3f8fe9e705a518b62e0b67adba585f9d8414 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Tue, 2 Oct 2012 12:13:56 +0200 Subject: [PATCH 708/967] Do not hardcode mysql service name when we have a variable for it This is useful in case a different service name will be used (when porting to another distribution, for instance). Change-Id: I5b66ada02f3c4424384c728f1dadb4872bf4d490 --- stack.sh | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/stack.sh b/stack.sh index d05de6a4..118c2ef8 100755 --- a/stack.sh +++ b/stack.sh @@ -975,24 +975,25 @@ fi if is_service_enabled mysql; then echo_summary "Configuring and starting MySQL" + if [[ "$os_PACKAGE" = "deb" ]]; then + MY_CONF=/etc/mysql/my.cnf + MYSQL=mysql + else + MY_CONF=/etc/my.cnf + MYSQL=mysqld + fi + # Start mysql-server if [[ "$os_PACKAGE" = "rpm" ]]; then # RPM doesn't start the service - start_service mysqld + start_service $MYSQL # Set the root password - only works the first time sudo mysqladmin -u root password $MYSQL_PASSWORD || true fi # Update the DB to give user ‘$MYSQL_USER’@’%’ full control of the all databases: sudo mysql -uroot -p$MYSQL_PASSWORD -h127.0.0.1 -e "GRANT ALL PRIVILEGES ON *.* TO '$MYSQL_USER'@'%' identified by '$MYSQL_PASSWORD';" - # Update ``my.cnf`` for some local needs and restart the mysql service - if [[ "$os_PACKAGE" = "deb" ]]; then - MY_CONF=/etc/mysql/my.cnf - MYSQL=mysql - else - MY_CONF=/etc/my.cnf - MYSQL=mysqld - fi + # Now update ``my.cnf`` for some local needs and restart the mysql service # Change ‘bind-address’ from localhost (127.0.0.1) to any (0.0.0.0) sudo sed -i '/^bind-address/s/127.0.0.1/0.0.0.0/g' $MY_CONF From 53a5f42e780535e05c24c75801e122b72339201c Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Tue, 2 Oct 2012 17:29:23 -0400 Subject: [PATCH 709/967] Fix ceilometer configuration Correct the use of NOVA_CONF to find the source file for the ceilometer configuration files. Set up notifications and RPC to use the classes from ceilometer.openstack.common instead of nova.openstack.common. Run the ceilometer compute agent under "sg libvirtd" so it has permission to talk to libvirt. Change-Id: I75b2e563d654f4f89b182e146e54572618f25261 Signed-off-by: Doug Hellmann --- lib/ceilometer | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index bea68ed0..10ceb457 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -58,8 +58,11 @@ function configure_ceilometer() { sudo chown $USER $CEILOMETER_API_LOG_DIR # ceilometer confs are copy of /etc/nova/nova.conf which must exist first - grep -v format_string $NOVA_CONF_DIR/$NOVA_CONF > $CEILOMETER_AGENT_CONF - grep -v format_string $NOVA_CONF_DIR/$NOVA_CONF > $CEILOMETER_COLLECTOR_CONF + grep -v format_string $NOVA_CONF > $CEILOMETER_AGENT_CONF + iniset $CEILOMETER_AGENT_CONF DEFAULT rpc_backend 'ceilometer.openstack.common.rpc.impl_kombu' + + grep -v format_string $NOVA_CONF > $CEILOMETER_COLLECTOR_CONF + iniset $CEILOMETER_COLLECTOR_CONF DEFAULT rpc_backend 'ceilometer.openstack.common.rpc.impl_kombu' iniset $CEILOMETER_COLLECTOR_CONF DEFAULT notification_topics 'notifications,glance_notifications' } @@ -70,7 +73,7 @@ function install_ceilometer() { # start_ceilometer() - Start running processes, including screen function start_ceilometer() { - screen_it ceilometer-acompute "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_AGENT_CONF" + screen_it ceilometer-acompute "cd $CEILOMETER_DIR && sg libvirtd \"$CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_AGENT_CONF\"" screen_it ceilometer-acentral "export OS_USERNAME=$OS_USERNAME OS_PASSWORD=$OS_PASSWORD OS_TENANT_NAME=$OS_TENANT_NAME OS_AUTH_URL=$OS_AUTH_URL && cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-agent-central --config-file $CEILOMETER_AGENT_CONF" screen_it ceilometer-collector "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-collector --config-file $CEILOMETER_COLLECTOR_CONF" screen_it ceilometer-api "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR" From 3cf1ffbcdda2f74a7f0d57eb2b8bac8d90af108f Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 2 Oct 2012 11:51:27 -0500 Subject: [PATCH 710/967] Fix NOVA_CONF usage NOVA_CONF now includes NOVA_CONF_DIR, fix remaining usage in lib/nova Change-Id: I750d6e3ad73bed5c8a911f6dfe61770bbc804704 --- lib/ceilometer | 2 +- lib/nova | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 10ceb457..7154ccb4 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -8,7 +8,7 @@ # - functions # - OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME, OS_AUTH_URL set for admin credentials # - DEST set to the destination directory -# - NOVA_CONF_DIR, NOVA_CONF set to the nova configuration directory & file +# - NOVA_CONF set to the nova configuration file # stack.sh # --------- diff --git a/lib/nova b/lib/nova index 8308f05b..333695ea 100644 --- a/lib/nova +++ b/lib/nova @@ -280,7 +280,7 @@ function init_nova() { rm -f $NOVA_DIR/bin/nova.conf # (Re)create ``nova.conf`` - rm -f $NOVA_CONF_DIR/$NOVA_CONF + rm -f $NOVA_CONF add_nova_opt "[DEFAULT]" add_nova_opt "verbose=True" add_nova_opt "auth_strategy=keystone" @@ -288,7 +288,7 @@ function init_nova() { add_nova_opt "api_paste_config=$NOVA_API_PASTE_INI" add_nova_opt "rootwrap_config=$NOVA_CONF_DIR/rootwrap.conf" add_nova_opt "compute_scheduler_driver=$SCHEDULER" - add_nova_opt "dhcpbridge_flagfile=$NOVA_CONF_DIR/$NOVA_CONF" + add_nova_opt "dhcpbridge_flagfile=$NOVA_CONF" add_nova_opt "force_dhcp_release=True" add_nova_opt "fixed_range=$FIXED_RANGE" add_nova_opt "s3_host=$SERVICE_HOST" @@ -416,8 +416,8 @@ function start_nova() { screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert" screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network" screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler" - screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF --web ." - screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF" + screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --config-file $NOVA_CONF --web ." + screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --config-file $NOVA_CONF" screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth" } From 855c5875c7756db192fe7078bed207ab280f7780 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Thu, 4 Oct 2012 13:36:46 +0200 Subject: [PATCH 711/967] Directly use GetDistro instead of failing if $DISTRO is not set Change-Id: I81d73a767e1c7f5e83eb535b2e1645e6ab29f347 --- functions | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/functions b/functions index 677621f2..0d0df51f 100644 --- a/functions +++ b/functions @@ -1,7 +1,6 @@ # functions - Common functions used by DevStack components # # The following variables are assumed to be defined by certain functions: -# ``DISTRO`` # ``ENABLED_SERVICES`` # ``EROR_ON_CLONE`` # ``FILES`` @@ -107,7 +106,7 @@ function get_field() { # - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection # of the package to the distros listed. The distro names are case insensitive. # -# Uses globals ``DISTRO``, ``ENABLED_SERVICES`` +# Uses globals ``ENABLED_SERVICES`` # get_packages dir function get_packages() { local package_dir=$1 @@ -119,8 +118,7 @@ function get_packages() { return 1 fi if [[ -z "$DISTRO" ]]; then - echo "No distro set in DISTRO" - return 1 + GetDistro fi for service in general ${ENABLED_SERVICES//,/ }; do # Allow individual services to specify dependencies From 1b23d7cd6e49b124f77765bc2e1e324357f8812c Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 4 Oct 2012 13:52:22 -0500 Subject: [PATCH 712/967] Update horizon config to current example from horizon repo includes https://review.openstack.org/14048 Change-Id: I49952dd34408d2e94bcecd9063c21633f4139a99 --- files/horizon_settings.py | 96 +++++++++++++++++++++++++++++++-------- 1 file changed, 78 insertions(+), 18 deletions(-) diff --git a/files/horizon_settings.py b/files/horizon_settings.py index d18fd1a5..ce92e2c9 100644 --- a/files/horizon_settings.py +++ b/files/horizon_settings.py @@ -1,10 +1,28 @@ import os +from django.utils.translation import ugettext_lazy as _ + DEBUG = True TEMPLATE_DEBUG = DEBUG PROD = False USE_SSL = False +# Set SSL proxy settings: +# For Django 1.4+ pass this header from the proxy after terminating the SSL, +# and don't forget to strip it from the client's request. +# For more information see: +# https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header +# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https') + +# Specify a regular expression to validate user passwords. +# HORIZON_CONFIG = { +# "password_validator": { +# "regex": '.*', +# "help_text": _("Your password does not meet the requirements.") +# }, +# 'help_url': "http://docs.openstack.org" +# } + LOCAL_PATH = os.path.dirname(os.path.abspath(__file__)) # FIXME: We need to change this to mysql, instead of sqlite. @@ -16,14 +34,24 @@ }, } -# The default values for these two settings seem to cause issues with apache -CACHE_BACKEND = 'dummy://' -SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db' - -# Set a secure and unique SECRET_KEY (the Django default is '') +# Set custom secret key: +# You can either set it to a specific value or you can let horizion generate a +# default secret key that is unique on this machine, e.i. regardless of the +# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, there +# may be situations where you would want to set this explicitly, e.g. when +# multiple dashboard instances are distributed on different machines (usually +# behind a load-balancer). Either you have to make sure that a session gets all +# requests routed to the same dashboard instance or you set the same SECRET_KEY +# for all of them. from horizon.utils import secret_key SECRET_KEY = secret_key.generate_or_read_from_file(os.path.join(LOCAL_PATH, '.secret_key_store')) +# We recommend you use memcached for development; otherwise after every reload +# of the django development server, you will have to login again. To use +# memcached set CACHE_BACKED to something like 'memcached://127.0.0.1:11211/' +CACHE_BACKEND = 'dummy://' +SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db' + # Send email to the console by default EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' # Or send them to /dev/null @@ -38,31 +66,55 @@ # EMAIL_HOST_USER = 'djangomail' # EMAIL_HOST_PASSWORD = 'top-secret!' +# For multiple regions uncomment this configuration, and add (endpoint, title). +# AVAILABLE_REGIONS = [ +# ('http://cluster1.example.com:5000/v2.0', 'cluster1'), +# ('http://cluster2.example.com:5000/v2.0', 'cluster2'), +# ] + +OPENSTACK_HOST = "127.0.0.1" +OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST +OPENSTACK_KEYSTONE_DEFAULT_ROLE = "Member" + +# Disable SSL certificate checks (useful for self-signed certificates): +# OPENSTACK_SSL_NO_VERIFY = True + HORIZON_CONFIG = { - 'dashboards': ('nova', 'syspanel', 'settings',), - 'default_dashboard': 'nova', + 'dashboards': ('project', 'admin', 'settings',), + 'default_dashboard': 'project', } +# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the +# capabilities of the auth backend for Keystone. +# If Keystone has been configured to use LDAP as the auth backend then set +# can_edit_user to False and name to 'ldap'. +# # TODO(tres): Remove these once Keystone has an API to identify auth backend. OPENSTACK_KEYSTONE_BACKEND = { 'name': 'native', 'can_edit_user': True } -OPENSTACK_HOST = "127.0.0.1" -OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST -# FIXME: this is only needed until keystone fixes its GET /tenants call -# so that it doesn't return everything for admins -OPENSTACK_KEYSTONE_ADMIN_URL = "http://%s:35357/v2.0" % OPENSTACK_HOST -OPENSTACK_KEYSTONE_DEFAULT_ROLE = "Member" +OPENSTACK_HYPERVISOR_FEATURES = { + 'can_set_mount_point': True +} + +# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints +# in the Keystone service catalog. Use this setting when Horizon is running +# external to the OpenStack environment. The default is 'internalURL'. +#OPENSTACK_ENDPOINT_TYPE = "publicURL" + +# The number of objects (Swift containers/objects or images) to display +# on a single page before providing a paging element (a "more" link) +# to paginate results. +API_RESULT_LIMIT = 1000 +API_RESULT_PAGE_SIZE = 20 SWIFT_PAGINATE_LIMIT = 100 -# If you have external monitoring links, eg: -# EXTERNAL_MONITORING = [ -# ['Nagios','http://foo.com'], -# ['Ganglia','http://bar.com'], -# ] +# The timezone of the server. This should correspond with the timezone +# of your entire OpenStack installation, and hopefully be in UTC. +TIME_ZONE = "UTC" #LOGGING = { # 'version': 1, @@ -93,6 +145,10 @@ # 'handlers': ['console'], # 'propagate': False, # }, +# 'openstack_dashboard': { +# 'handlers': ['console'], +# 'propagate': False, +# }, # 'novaclient': { # 'handlers': ['console'], # 'propagate': False, @@ -101,6 +157,10 @@ # 'handlers': ['console'], # 'propagate': False, # }, +# 'glanceclient': { +# 'handlers': ['console'], +# 'propagate': False, +# }, # 'nose.plugins.manager': { # 'handlers': ['console'], # 'propagate': False, From 32cce9ef3e28309f97416e935aa2c90dbbd40fe6 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Fri, 5 Oct 2012 12:27:51 +0100 Subject: [PATCH 713/967] Default values for live migration tempest tests Related to https://review.openstack.org/#/c/13101/ Add tempest test variables, so tempest tests will be configured with proper default values. Change-Id: Iec13ec3492cbfa6dcce665a4e0723f1b941ae88a --- tools/configure_tempest.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index 2df0315c..1e35036b 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -201,6 +201,10 @@ NETWORK_API_VERSION=2.0 # Volume API test configuration VOLUME_CATALOG_TYPE=volume +# Live migration +LIVE_MIGRATION_AVAILABLE=${LIVE_MIGRATION_AVAILABLE:-False} +USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION=${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} + sed -e " s,%IDENTITY_USE_SSL%,$IDENTITY_USE_SSL,g; s,%IDENTITY_HOST%,$IDENTITY_HOST,g; @@ -256,6 +260,8 @@ sed -e " s,%VOLUME_CATALOG_TYPE%,$VOLUME_CATALOG_TYPE,g; s,%VOLUME_BUILD_INTERVAL%,$VOLUME_BUILD_INTERVAL,g; s,%VOLUME_BUILD_TIMEOUT%,$VOLUME_BUILD_TIMEOUT,g; + s,%LIVE_MIGRATION_AVAILABLE%,$LIVE_MIGRATION_AVAILABLE,g; + s,%USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION%,$USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION,g; " -i $TEMPEST_CONF echo "Created tempest configuration file:" From d093121f3a605b6b8373e3a061e25c1d101c3bae Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 4 Oct 2012 16:06:44 -0400 Subject: [PATCH 714/967] clone tempest if enabled add auto cloning of the tempest tree if it's an enabled service just reduces one step in getting tempest up in a new environment Change-Id: Ia8a2feee96f26dffe96c87d572a31735d90cdabb --- lib/tempest | 56 +++++++++++++++++++++++++++++++++++++++++++++++++++++ stack.sh | 7 +++++++ 2 files changed, 63 insertions(+) create mode 100644 lib/tempest diff --git a/lib/tempest b/lib/tempest new file mode 100644 index 00000000..115c9118 --- /dev/null +++ b/lib/tempest @@ -0,0 +1,56 @@ +# lib/tempest + +# Dependencies: +# ``functions`` file +# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined +# + +# ``stack.sh`` calls the entry points in this order: +# +# install_XXXX +# configure_XXXX +# init_XXXX +# start_XXXX +# stop_XXXX +# cleanup_XXXX + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# + +# Set up default directories +TEMPEST_DIR=$DEST/tempest +TEMPEST_CONF_DIR=$DEST/tempest/etc + +# Entry Points +# ------------ + + +# configure_tempest() - Set config files, create data dirs, etc +function configure_tempest() { + # sudo python setup.py deploy + # iniset $tempest_CONF ... + # This function intentionally left blank + # + # TODO(sdague) actually move the guts of configure tempest + # into this function + cd tools + ./configure_tempest.sh + cd .. +} + + +# install_tempest() - Collect source and prepare +function install_tempest() { + git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH +} + + +# Restore xtrace +$XTRACE diff --git a/stack.sh b/stack.sh index 118c2ef8..957bbd63 100755 --- a/stack.sh +++ b/stack.sh @@ -313,6 +313,7 @@ source $TOP_DIR/lib/n-vol source $TOP_DIR/lib/ceilometer source $TOP_DIR/lib/heat source $TOP_DIR/lib/quantum +source $TOP_DIR/lib/tempest # Set the destination directories for OpenStack projects HORIZON_DIR=$DEST/horizon @@ -871,6 +872,9 @@ fi if is_service_enabled ceilometer; then install_ceilometer fi +if is_service_enabled tempest; then + install_tempest +fi # Initialization @@ -917,6 +921,9 @@ fi if is_service_enabled cinder; then configure_cinder fi +if is_service_enabled tempest; then + configure_tempest +fi if [[ $TRACK_DEPENDS = True ]] ; then $DEST/.venv/bin/pip freeze > $DEST/requires-post-pip From a9c4a8aff202225cd02239c9c76e36ef76f88de4 Mon Sep 17 00:00:00 2001 From: Surya Prabhakar Date: Sat, 6 Oct 2012 19:35:56 +0530 Subject: [PATCH 715/967] kill throws an error while finding pid in quantum dhcp agent section Change-Id: I602df7875710336125cdbb8407ff293a8e7d448e --- unstack.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/unstack.sh b/unstack.sh index 1bace156..49f1e8bb 100755 --- a/unstack.sh +++ b/unstack.sh @@ -110,5 +110,6 @@ fi # Quantum dhcp agent runs dnsmasq if is_service_enabled q-dhcp; then - sudo kill -9 $(ps aux | awk '/[d]nsmasq.+interface=tap/ { print $2 }') + pid=$(ps aux | awk '/[d]nsmasq.+interface=tap/ { print $2 }') + [ ! -z $pid ] && sudo kill -9 $pid fi From 082a3da01f01de6b71e24710b4201f969e69d1c9 Mon Sep 17 00:00:00 2001 From: John Griffith Date: Sat, 6 Oct 2012 22:19:33 -0600 Subject: [PATCH 716/967] Remove extraneous timing messages from volumes.sh Change: I3bbcc5b9f8a4da2fcdb9f6f70913c2d6bc6e2b9b added some timing messages around the volume operations in exercises/volumes.sh. This was a good idea, but some useless timing info was added to failed cases, this patch pulls that back out. The only parameter used to detect failure is a timeout, so outputing the time elapses in these cases is useless. Change-Id: I609a803a7293aa8a8e3cec186984de59bfe9b409 --- exercises/volumes.sh | 8 -------- 1 file changed, 8 deletions(-) diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 1c6320c0..ffa12c46 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -158,8 +158,6 @@ fi start_time=`date +%s` if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep available; do sleep 1; done"; then echo "Volume $VOL_NAME not created" - end_time=`date +%s` - echo "Failed volume-create after $((end_time - start_time)) seconds" exit 1 fi end_time=`date +%s` @@ -176,8 +174,6 @@ nova volume-attach $VM_UUID $VOL_ID $DEVICE || \ die "Failure attaching volume $VOL_NAME to $NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep in-use; do sleep 1; done"; then echo "Volume $VOL_NAME not attached to $NAME" - end_time=`date +%s` - echo "Failed volume-attach after $((end_time - start_time)) seconds" exit 1 fi end_time=`date +%s` @@ -195,8 +191,6 @@ start_time=`date +%s` nova volume-detach $VM_UUID $VOL_ID || die "Failure detaching volume $VOL_NAME from $NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep available; do sleep 1; done"; then echo "Volume $VOL_NAME not detached from $NAME" - end_time=`date +%s` - echo "Failed volume-detach after $((end_time - start_time)) seconds" exit 1 fi end_time=`date +%s` @@ -207,8 +201,6 @@ start_time=`date +%s` nova volume-delete $VOL_ID || die "Failure deleting volume $VOL_NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME; do sleep 1; done"; then echo "Volume $VOL_NAME not deleted" - end_time=`date +%s` - echo "Failed volume-delete after $((end_time - start_time)) seconds" exit 1 fi end_time=`date +%s` From bb421bed58bc8a4b3f21ee3cba719e5d46c4c33a Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Wed, 10 Oct 2012 13:19:10 +1300 Subject: [PATCH 717/967] Configure and launch Heat REST API. This is a REST API in the same style as other OpenStack APIs. This also creates a new endpoint for the REST API which uses the serivce type 'orchestration'. The old endpoint now has the service type 'cloudformation'. This matches the pattern where the native openstack API gets a generic service type while the emulated EC2 API gets a specific type (eg, object-store, s3). There will be breakage for the time period where only one of this change and https://review.openstack.org/#/c/14263/ are approved, since keystone will have the incorrect service type for that period. Change-Id: I6a0d51a63da8017d375b4c065c4c9079dfca8fe3 --- files/keystone_data.sh | 16 +++++++++++++--- lib/heat | 32 ++++++++++++++++++++++++++++++++ stack.sh | 1 + 3 files changed, 46 insertions(+), 3 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 17549101..7da07aaa 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -166,15 +166,25 @@ if [[ "$ENABLED_SERVICES" =~ "heat" ]]; then --role_id $ADMIN_ROLE if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then HEAT_CFN_SERVICE=$(get_id keystone service-create \ - --name=heat \ - --type=orchestration \ - --description="Heat Service") + --name=heat-cfn \ + --type=cloudformation \ + --description="Heat CloudFormation Service") keystone endpoint-create \ --region RegionOne \ --service_id $HEAT_CFN_SERVICE \ --publicurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" \ --adminurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" \ --internalurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" + HEAT_SERVICE=$(get_id keystone service-create \ + --name=heat \ + --type=orchestration \ + --description="Heat Service") + keystone endpoint-create \ + --region RegionOne \ + --service_id $HEAT_SERVICE \ + --publicurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \ + --adminurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \ + --internalurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" fi fi diff --git a/lib/heat b/lib/heat index 6e823f2f..80e3f7ab 100644 --- a/lib/heat +++ b/lib/heat @@ -51,6 +51,8 @@ function configure_heat() { HEAT_METADATA_PORT=${HEAT_METADATA_PORT:-8002} HEAT_API_CW_HOST=${HEAT_API_CW_HOST:-$SERVICE_HOST} HEAT_API_CW_PORT=${HEAT_API_CW_PORT:-8003} + HEAT_API_HOST=${HEAT_API_HOST:-$SERVICE_HOST} + HEAT_API_PORT=${HEAT_API_PORT:-8004} # cloudformation api HEAT_API_CFN_CONF=$HEAT_CONF_DIR/heat-api-cfn.conf @@ -81,6 +83,35 @@ function configure_heat() { iniset $HEAT_API_CFN_PASTE_INI filter:ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 iniset $HEAT_API_CFN_PASTE_INI filter:ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens + # openstack api + HEAT_API_CONF=$HEAT_CONF_DIR/heat-api.conf + cp $HEAT_DIR/etc/heat/heat-api.conf $HEAT_API_CONF + iniset $HEAT_API_CONF DEFAULT debug True + inicomment $HEAT_API_CONF DEFAULT log_file + iniset $HEAT_API_CONF DEFAULT use_syslog $SYSLOG + iniset $HEAT_API_CONF DEFAULT bind_host $HEAT_API_HOST + iniset $HEAT_API_CONF DEFAULT bind_port $HEAT_API_PORT + + if is_service_enabled rabbit; then + iniset $HEAT_API_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_kombu + iniset $HEAT_API_CONF DEFAULT rabbit_password $RABBIT_PASSWORD + iniset $HEAT_API_CONF DEFAULT rabbit_host $RABBIT_HOST + elif is_service_enabled qpid; then + iniset $HEAT_API_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid + fi + + HEAT_API_PASTE_INI=$HEAT_CONF_DIR/heat-api-paste.ini + cp $HEAT_DIR/etc/heat/heat-api-paste.ini $HEAT_API_PASTE_INI + iniset $HEAT_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $HEAT_API_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $HEAT_API_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $HEAT_API_PASTE_INI filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 + iniset $HEAT_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $HEAT_API_PASTE_INI filter:authtoken admin_user heat + iniset $HEAT_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD + iniset $HEAT_API_PASTE_INI filter:ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 + iniset $HEAT_API_PASTE_INI filter:ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens + # engine HEAT_ENGINE_CONF=$HEAT_CONF_DIR/heat-engine.conf cp $HEAT_DIR/etc/heat/heat-engine.conf $HEAT_ENGINE_CONF @@ -168,6 +199,7 @@ function install_heat() { # start_heat() - Start running processes, including screen function start_heat() { screen_it h-eng "cd $HEAT_DIR; bin/heat-engine --config-file=$HEAT_CONF_DIR/heat-engine.conf" + screen_it h-api "cd $HEAT_DIR; bin/heat-api --config-dir=$HEAT_CONF_DIR/heat-api.conf" screen_it h-api-cfn "cd $HEAT_DIR; bin/heat-api-cfn --config-dir=$HEAT_CONF_DIR/heat-api-cfn.conf" screen_it h-api-cw "cd $HEAT_DIR; bin/heat-api-cloudwatch --config-dir=$HEAT_CONF_DIR/heat-api-cloudwatch.conf" screen_it h-meta "cd $HEAT_DIR; bin/heat-metadata --config-dir=$HEAT_CONF_DIR/heat-metadata.conf" diff --git a/stack.sh b/stack.sh index 957bbd63..774c454c 100755 --- a/stack.sh +++ b/stack.sh @@ -1050,6 +1050,7 @@ if is_service_enabled key; then SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \ S3_SERVICE_PORT=$S3_SERVICE_PORT KEYSTONE_CATALOG_BACKEND=$KEYSTONE_CATALOG_BACKEND \ DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES HEAT_API_CFN_PORT=$HEAT_API_CFN_PORT \ + HEAT_API_PORT=$HEAT_API_PORT \ bash -x $FILES/keystone_data.sh # Set up auth creds now that keystone is bootstrapped From 9dae3bd1e53daa886d535cb3cb7fafcbb41546e8 Mon Sep 17 00:00:00 2001 From: "James E. Blair" Date: Tue, 9 Oct 2012 17:54:48 -0700 Subject: [PATCH 718/967] Change mysql -> mysql-server in quantum. There is no package called mysql in precise. Change-Id: If047cb8d03f51c2f56e5da43573af596a8aca367 --- files/apts/quantum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/apts/quantum b/files/apts/quantum index 568438f8..ed3887b4 100644 --- a/files/apts/quantum +++ b/files/apts/quantum @@ -1,5 +1,5 @@ iptables -mysql #NOPRIME +mysql-server #NOPRIME sudo python-paste python-routes From 602cf9bd68399e55448da001eb823be31d03030a Mon Sep 17 00:00:00 2001 From: Yoshihiro Kaneko Date: Mon, 23 Jul 2012 06:27:36 +0000 Subject: [PATCH 719/967] Add support for the Quantum Ryu plugin. This patch allows using the Quantum Ryu plugin. Ryu plugin lets Quantum link Open vSwitch and Ryu OpenFlow controller[1]. Ryu OpenFlow controller is not Openstack component, but I added some processing that is related with Ryu to stack.sh for the convenience of the person who intend to try the plugin. Instructions for using Ryu plugin: 1. Enable services: "q-svc", "q-agt", "q-dhcp", "q-l3", "quantum", "ryu" 2. Set Q_PLUGIN to "ryu" 3. Set an internal network interface name to connect br-int on plural hosts to RYU_INTERNAL_INTERFACE (optional) Example localrc: disable_service n-net enable_service q-svc q-agt q-dhcp q-l3 quantum ryu Q_PLUGIN=ryu RYU_INTERNAL_INTERFACE=eth1 [1] http://osrg.github.com/ryu/ Change-Id: Ic1da132fa421f1c70c10a319ee3239831b0f956f --- files/apts/ryu | 4 +++ files/rpms/ryu | 4 +++ lib/nova | 2 +- lib/quantum | 24 +++++++++++++ stack.sh | 95 +++++++++++++++++++++++++++++++++++++++++--------- stackrc | 4 +++ unstack.sh | 2 +- 7 files changed, 116 insertions(+), 19 deletions(-) create mode 100644 files/apts/ryu create mode 100644 files/rpms/ryu diff --git a/files/apts/ryu b/files/apts/ryu new file mode 100644 index 00000000..1e8f2d2b --- /dev/null +++ b/files/apts/ryu @@ -0,0 +1,4 @@ +python-setuptools +python-gevent +python-gflags +python-sphinx diff --git a/files/rpms/ryu b/files/rpms/ryu new file mode 100644 index 00000000..1e8f2d2b --- /dev/null +++ b/files/rpms/ryu @@ -0,0 +1,4 @@ +python-setuptools +python-gevent +python-gflags +python-sphinx diff --git a/lib/nova b/lib/nova index 333695ea..dbfc2947 100644 --- a/lib/nova +++ b/lib/nova @@ -213,7 +213,7 @@ function configure_nova() { fi fi - if is_service_enabled quantum && [[ $Q_PLUGIN = "openvswitch" ]] && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF ; then + if is_service_enabled quantum && is_quantum_ovs_base_plugin "$Q_PLUGIN" && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF ; then # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces cat < /dev/null + sudo ovs-vsctl --no-wait del-port $bridge $PORT + fi + done + # ensure no IP is configured on the public bridge + sudo ip addr flush dev $bridge +} + +function is_quantum_ovs_base_plugin() { + local plguin=$1 + if [[ ",openvswitch,ryu," =~ ,${plugin}, ]]; then + return 0 + fi + return 1 +} + # Restore xtrace $XTRACE diff --git a/stack.sh b/stack.sh index 774c454c..3fc3204d 100755 --- a/stack.sh +++ b/stack.sh @@ -342,6 +342,18 @@ Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP=:-True} # Meta data IP Q_META_DATA_IP=${Q_META_DATA_IP:-$HOST_IP} +RYU_DIR=$DEST/ryu +# Ryu API Host +RYU_API_HOST=${RYU_API_HOST:-127.0.0.1} +# Ryu API Port +RYU_API_PORT=${RYU_API_PORT:-8080} +# Ryu OFP Host +RYU_OFP_HOST=${RYU_OFP_HOST:-127.0.0.1} +# Ryu OFP Port +RYU_OFP_PORT=${RYU_OFP_PORT:-6633} +# Ryu Applications +RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest} + # Name of the LVM volume group to use/create for iscsi volumes VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} @@ -773,7 +785,7 @@ if is_service_enabled horizon; then fi if is_service_enabled q-agt; then - if [[ "$Q_PLUGIN" = "openvswitch" ]]; then + if is_quantum_ovs_base_plugin "$Q_PLUGIN"; then # Install deps # FIXME add to files/apts/quantum, but don't install if not needed! if [[ "$os_PACKAGE" = "deb" ]]; then @@ -875,7 +887,9 @@ fi if is_service_enabled tempest; then install_tempest fi - +if is_service_enabled ryu || (is_service_enabled quantum && [[ "$Q_PLUGIN" = "ryu" ]]); then + git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH +fi # Initialization # ============== @@ -924,6 +938,9 @@ fi if is_service_enabled tempest; then configure_tempest fi +if is_service_enabled ryu || (is_service_enabled quantum && [[ "$Q_PLUGIN" = "ryu" ]]); then + setup_develop $RYU_DIR +fi if [[ $TRACK_DEPENDS = True ]] ; then $DEST/.venv/bin/pip freeze > $DEST/requires-post-pip @@ -1132,6 +1149,31 @@ if is_service_enabled g-reg; then fi +# Ryu +# --- +# Ryu is not a part of OpenStack project. Please ignore following block if +# you are not interested in Ryu. +# launch ryu manager +if is_service_enabled ryu; then + RYU_CONF_DIR=/etc/ryu + if [[ ! -d $RYU_CONF_DIR ]]; then + sudo mkdir -p $RYU_CONF_DIR + fi + sudo chown `whoami` $RYU_CONF_DIR + RYU_CONF=$RYU_CONF_DIR/ryu.conf + sudo rm -rf $RYU_CONF + + cat < $RYU_CONF +--app_lists=$RYU_APPS +--wsapi_host=$RYU_API_HOST +--wsapi_port=$RYU_API_PORT +--ofp_listen_host=$RYU_OFP_HOST +--ofp_tcp_listen_port=$RYU_OFP_PORT +EOF + screen_it ryu "cd $RYU_DIR && $RYU_DIR/bin/ryu-manager --flagfile $RYU_CONF" +fi + + # Quantum # ------- @@ -1219,6 +1261,11 @@ if is_service_enabled quantum; then Q_PLUGIN_CONF_FILENAME=linuxbridge_conf.ini Q_DB_NAME="quantum_linux_bridge" Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.lb_quantum_plugin.LinuxBridgePluginV2" + elif [[ "$Q_PLUGIN" = "ryu" ]]; then + Q_PLUGIN_CONF_PATH=etc/quantum/plugins/ryu + Q_PLUGIN_CONF_FILENAME=ryu.ini + Q_DB_NAME="ovs_quantum" + Q_PLUGIN_CLASS="quantum.plugins.ryu.ryu_quantum_plugin.RyuQuantumPluginV2" else echo "Unknown Quantum plugin '$Q_PLUGIN'.. exiting" exit 1 @@ -1314,6 +1361,9 @@ if is_service_enabled q-svc; then if [[ "$LB_VLAN_RANGES" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE VLANS network_vlan_ranges $LB_VLAN_RANGES fi + elif [[ "$Q_PLUGIN" = "ryu" ]]; then + iniset /$Q_PLUGIN_CONF_FILE OVS openflow_controller $RYU_OFP_HOST:$RYU_OFP_PORT + iniset /$Q_PLUGIN_CONF_FILE OVS openflow_rest_api $RYU_API_HOST:$RYU_API_PORT fi fi @@ -1363,6 +1413,14 @@ if is_service_enabled q-agt; then iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE physical_interface_mappings $LB_INTERFACE_MAPPINGS fi AGENT_BINARY="$QUANTUM_DIR/bin/quantum-linuxbridge-agent" + elif [[ "$Q_PLUGIN" = "ryu" ]]; then + # Set up integration bridge + OVS_BRIDGE=${OVS_BRIDGE:-br-int} + quantum_setup_ovs_bridge $OVS_BRIDGE + if [ -n "$RYU_INTERNAL_INTERFACE" ]; then + sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_BRIDGE $RYU_INTERNAL_INTERFACE + fi + AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/ryu/agent/ryu_quantum_agent.py" fi # Update config w/rootwrap iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" @@ -1391,6 +1449,9 @@ if is_service_enabled q-dhcp; then iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver + elif [[ "$Q_PLUGIN" = "ryu" ]]; then + iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.RyuInterfaceDriver + iniset $Q_DHCP_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT fi fi @@ -1417,21 +1478,16 @@ if is_service_enabled q-l3; then iniset $Q_L3_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE # Set up external bridge - # Create it if it does not exist - sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE - # remove internal ports - for PORT in `sudo ovs-vsctl --no-wait list-ports $PUBLIC_BRIDGE`; do - TYPE=$(sudo ovs-vsctl get interface $PORT type) - if [[ "$TYPE" == "internal" ]]; then - echo `sudo ip link delete $PORT` > /dev/null - sudo ovs-vsctl --no-wait del-port $bridge $PORT - fi - done - # ensure no IP is configured on the public bridge - sudo ip addr flush dev $PUBLIC_BRIDGE + quantum_setup_external_bridge $PUBLIC_BRIDGE elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then iniset $Q_L3_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge '' + elif [[ "$Q_PLUGIN" = "ryu" ]]; then + iniset $Q_L3_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.RyuInterfaceDriver + iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE + iniset $Q_L3_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT + # Set up external bridge + quantum_setup_external_bridge $PUBLIC_BRIDGE fi fi @@ -1599,8 +1655,8 @@ if is_service_enabled swift; then iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles "Member, admin" - if is_service_enabled swift3;then - cat <>${SWIFT_CONFIG_PROXY_SERVER} + if is_service_enabled swift3; then + cat <>${SWIFT_CONFIG_PROXY_SERVER} # NOTE(chmou): s3token middleware is not updated yet to use only # username and password. [filter:s3token] @@ -1753,6 +1809,11 @@ if is_service_enabled quantum; then NOVA_VIF_DRIVER="nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver" elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then NOVA_VIF_DRIVER="nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver" + elif [[ "$Q_PLUGIN" = "ryu" ]]; then + NOVA_VIF_DRIVER="quantum.plugins.ryu.nova.vif.LibvirtOpenVswitchOFPRyuDriver" + add_nova_opt "libvirt_ovs_integration_bridge=$OVS_BRIDGE" + add_nova_opt "linuxnet_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT" + add_nova_opt "libvirt_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT" fi add_nova_opt "libvirt_vif_driver=$NOVA_VIF_DRIVER" add_nova_opt "linuxnet_interface_driver=$LINUXNET_VIF_DRIVER" @@ -1899,7 +1960,7 @@ if is_service_enabled q-svc; then EXT_NET_ID=$(quantum net-create ext_net -- --router:external=True | grep ' id ' | get_field 2) EXT_GW_IP=$(quantum subnet-create --ip_version 4 $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2) quantum router-gateway-set $ROUTER_ID $EXT_NET_ID - if [[ "$Q_PLUGIN" = "openvswitch" ]] && [[ "$Q_USE_NAMESPACE" = "True" ]]; then + if is_quantum_ovs_base_plugin "$Q_PLUGIN" && [[ "$Q_USE_NAMESPACE" = "True" ]]; then CIDR_LEN=${FLOATING_RANGE#*/} sudo ip addr add $EXT_GW_IP/$CIDR_LEN dev $PUBLIC_BRIDGE sudo ip link set $PUBLIC_BRIDGE up diff --git a/stackrc b/stackrc index 3002c463..f9a41bd7 100644 --- a/stackrc +++ b/stackrc @@ -101,6 +101,10 @@ TEMPEST_BRANCH=master HEAT_REPO=${GIT_BASE}/heat-api/heat.git HEAT_BRANCH=master +# ryu service +RYU_REPO=https://github.com/osrg/ryu.git +RYU_BRANCH=master + # Nova hypervisor configuration. We default to libvirt with **kvm** but will # drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can # also install an **LXC** or **OpenVZ** based system. diff --git a/unstack.sh b/unstack.sh index 49f1e8bb..42cb7aff 100755 --- a/unstack.sh +++ b/unstack.sh @@ -111,5 +111,5 @@ fi # Quantum dhcp agent runs dnsmasq if is_service_enabled q-dhcp; then pid=$(ps aux | awk '/[d]nsmasq.+interface=tap/ { print $2 }') - [ ! -z $pid ] && sudo kill -9 $pid + [ ! -z "$pid" ] && sudo kill -9 $pid fi From fc65cfed553372152ffe8a6c0e4229607706ef8d Mon Sep 17 00:00:00 2001 From: Eoghan Glynn Date: Fri, 19 Oct 2012 21:26:41 +0100 Subject: [PATCH 720/967] Directly create bootable volume based on image ID. Now that a bootable volume can be created directly based on image ID, we can dispense with the complexity around using a builder instance to acheive the same effect. Change-Id: Ied1f6863a4dd21685e2f135841b9e2c4d499675f --- exercises/boot_from_volume.sh | 119 +++++----------------------------- 1 file changed, 16 insertions(+), 103 deletions(-) diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index c967e391..183efa63 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -3,10 +3,8 @@ # **boot_from_volume.sh** # This script demonstrates how to boot from a volume. It does the following: -# * Create a 'builder' instance -# * Attach a volume to the instance -# * Format and install an os onto the volume -# * Detach volume from builder, and then boot volume-backed instance +# * Create a bootable volume +# * Boot a volume-backed instance echo "*********************************************************************" echo "Begin DevStack Exercise: $0" @@ -37,6 +35,10 @@ source $TOP_DIR/openrc # Import exercise configuration source $TOP_DIR/exerciserc +# If cinder or n-vol are not enabled we exit with exitcode 55 so that +# the exercise is skipped +is_service_enabled cinder n-vol || exit 55 + # Boot this image, use first AMI image if unset DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} @@ -61,16 +63,13 @@ IMAGE=`glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1` die_if_not_set IMAGE "Failure getting image" # Instance and volume names -INSTANCE_NAME=${INSTANCE_NAME:-test_instance} VOL_INSTANCE_NAME=${VOL_INSTANCE_NAME:-test_vol_instance} VOL_NAME=${VOL_NAME:-test_volume} # Clean-up from previous runs nova delete $VOL_INSTANCE_NAME || true -nova delete $INSTANCE_NAME || true -# Wait till server is gone -if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $INSTANCE_NAME; do sleep 1; done"; then +if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VOL_INSTANCE_NAME; do sleep 1; done"; then echo "server didn't terminate!" exit 1 fi @@ -95,16 +94,6 @@ nova keypair-delete $KEY_NAME || true nova keypair-add $KEY_NAME > $KEY_FILE chmod 600 $KEY_FILE -# Boot our instance -VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP --key_name $KEY_NAME $INSTANCE_NAME | grep ' id ' | get_field 2` -die_if_not_set VM_UUID "Failure launching $INSTANCE_NAME" - -# check that the status is active within ACTIVE_TIMEOUT seconds -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then - echo "server didn't become active!" - exit 1 -fi - # Delete the old volume nova volume-delete $VOL_NAME || true @@ -122,17 +111,8 @@ if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep -q $ exit 1 fi -# Add floating ip to our server -nova add-floating-ip $VM_UUID $FLOATING_IP - -# Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds -if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then - echo "Couldn't ping server with floating ip" - exit 1 -fi - -# Create our volume -nova volume-create --display_name=$VOL_NAME 1 +# Create the bootable volume +nova volume-create --display_name=$VOL_NAME --image-id $IMAGE 1 # Wait for volume to activate if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep available; do sleep 1; done"; then @@ -140,62 +120,7 @@ if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | exit 1 fi -# FIXME (anthony) - python-novaclient should accept a volume_name for the attachment param? -DEVICE=/dev/vdb VOLUME_ID=`nova volume-list | grep $VOL_NAME | get_field 1` -nova volume-attach $INSTANCE_NAME $VOLUME_ID $DEVICE - -# Wait till volume is attached -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep in-use; do sleep 1; done"; then - echo "Volume $VOL_NAME not created" - exit 1 -fi - -# The following script builds our bootable volume. -# To do this, ssh to the builder instance, mount volume, and build a volume-backed image. -STAGING_DIR=/tmp/stage -CIRROS_DIR=/tmp/cirros -ssh -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP << EOF -set -o errexit -set -o xtrace -sudo mkdir -p $STAGING_DIR -sudo mkfs.ext3 -b 1024 $DEVICE 1048576 -sudo mount $DEVICE $STAGING_DIR -# The following lines create a writable empty file so that we can scp -# the actual file -sudo touch $STAGING_DIR/cirros-0.3.0-x86_64-rootfs.img.gz -sudo chown cirros $STAGING_DIR/cirros-0.3.0-x86_64-rootfs.img.gz -EOF - -# Download cirros -if [ ! -e cirros-0.3.0-x86_64-rootfs.img.gz ]; then - wget http://images.ansolabs.com/cirros-0.3.0-x86_64-rootfs.img.gz -fi - -# Copy cirros onto the volume -scp -o StrictHostKeyChecking=no -i $KEY_FILE cirros-0.3.0-x86_64-rootfs.img.gz ${DEFAULT_INSTANCE_USER}@$FLOATING_IP:$STAGING_DIR - -# Unpack cirros into volume -ssh -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP << EOF -set -o errexit -set -o xtrace -cd $STAGING_DIR -sudo mkdir -p $CIRROS_DIR -sudo gunzip cirros-0.3.0-x86_64-rootfs.img.gz -sudo mount cirros-0.3.0-x86_64-rootfs.img $CIRROS_DIR - -# Copy cirros into our volume -sudo cp -pr $CIRROS_DIR/* $STAGING_DIR/ - -cd -sync -sudo umount $CIRROS_DIR -# The following typically fails. Don't know why. -sudo umount $STAGING_DIR || true -EOF - -# Detach the volume from the builder instance -nova volume-detach $INSTANCE_NAME $VOLUME_ID # Boot instance from volume! This is done with the --block_device_mapping param. # The format of mapping is: @@ -210,12 +135,6 @@ if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VOL_VM_UUID | grep status exit 1 fi -# Add floating ip to our server -nova remove-floating-ip $VM_UUID $FLOATING_IP - -# Gratuitous sleep, probably hiding a race condition :/ -sleep 1 - # Add floating ip to our server nova add-floating-ip $VOL_VM_UUID $FLOATING_IP @@ -226,9 +145,13 @@ if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sle fi # Make sure our volume-backed instance launched -ssh -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP << EOF -echo "success!" -EOF +if ! timeout $ACTIVE_TIMEOUT sh -c "while ! ssh -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP echo success ; do sleep 1; done"; then + echo "server didn't become ssh-able!" + exit 1 +fi + +# Remove floating ip from volume-backed instance +nova remove-floating-ip $VOL_VM_UUID $FLOATING_IP # Delete volume backed instance nova delete $VOL_INSTANCE_NAME || \ @@ -244,16 +167,6 @@ fi nova volume-delete $VOL_NAME || \ die "Failure deleting volume $VOLUME_NAME" -# Delete instance -nova delete $INSTANCE_NAME || \ - die "Failure deleting instance $INSTANCE_NAME" - -# Wait for termination -if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then - echo "Server $NAME not deleted" - exit 1 -fi - # De-allocate the floating ip nova floating-ip-delete $FLOATING_IP || \ die "Failure deleting floating IP $FLOATING_IP" From 782c00b1f54a3b9d21d393b460d5db8b0d5c2328 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Thu, 4 Oct 2012 11:57:37 +0200 Subject: [PATCH 721/967] Add missing debian packages for quantum The rpm list is more complete than the debian list, so complete the debian one based on the rpm one. Change-Id: I707a16d3d2646b4b3d0bc200fed62e5e6743c030 --- files/apts/quantum | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/files/apts/quantum b/files/apts/quantum index ed3887b4..39f45618 100644 --- a/files/apts/quantum +++ b/files/apts/quantum @@ -1,8 +1,13 @@ +ebtables iptables +iputils-ping mysql-server #NOPRIME sudo +python-boto +python-iso8601 python-paste python-routes +python-suds python-netaddr python-pastedeploy python-greenlet @@ -14,3 +19,7 @@ python-pyudev python-qpid # dist:precise dnsmasq-base dnsmasq-utils # for dhcp_release only available in dist:oneiric,precise,quantal +rabbitmq-server # NOPRIME +qpid # NOPRIME +sqlite3 +vlan From bbc54635c81db7cef552b34855d27e982f35d7e0 Mon Sep 17 00:00:00 2001 From: David Ripton Date: Mon, 8 Oct 2012 15:36:22 -0400 Subject: [PATCH 722/967] Install the python-netaddr package before we use it. It's used if installed and not used if not installed, which is confusing. So trying to install it should make things a bit less confusing. It's available under the same name in both Fedora and Ubuntu. Change-Id: I6785dd5e831e9cf2da2eae5b116a9f5859c68c6d --- files/apts/general | 1 + files/rpms/general | 1 + 2 files changed, 2 insertions(+) diff --git a/files/apts/general b/files/apts/general index be7bf98c..12a92e0c 100644 --- a/files/apts/general +++ b/files/apts/general @@ -20,3 +20,4 @@ tcpdump euca2ools # only for testing client tar python-cmd2 # dist:precise +python-netaddr diff --git a/files/rpms/general b/files/rpms/general index 6d89d2ef..e4f143d1 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -5,6 +5,7 @@ git-core openssh-server psmisc pylint +python-netaddr python-pep8 python-pip python-unittest2 From da7b8091b9f62efaa79473e22ad7ea50736dded1 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 8 Oct 2012 18:12:14 -0500 Subject: [PATCH 723/967] Extract create_nova_conf() from init_nova() Allows creating a new nova.conf without wiping the database, say, during an upgrade. Change-Id: I7703b54bc3a32485b3cdab8d6185f93a6675040d --- lib/nova | 8 ++++++-- stack.sh | 2 ++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/lib/nova b/lib/nova index 333695ea..378b1db2 100644 --- a/lib/nova +++ b/lib/nova @@ -12,6 +12,7 @@ # # install_nova # configure_nova +# create_nova_conf # init_nova # start_nova # stop_nova @@ -274,8 +275,8 @@ EOF' fi } -# init_nova() - Initialize databases, etc. -function init_nova() { +# create_nova_conf() - Create a new nova.conf file +function create_nova_conf() { # Remove legacy ``nova.conf`` rm -f $NOVA_DIR/bin/nova.conf @@ -352,7 +353,10 @@ function init_nova() { # Attempt to convert flags to options add_nova_opt ${I//--} done +} +# init_nova() - Initialize databases, etc. +function init_nova() { # Nova Database # ------------- diff --git a/stack.sh b/stack.sh index 118c2ef8..35da9354 100755 --- a/stack.sh +++ b/stack.sh @@ -1728,6 +1728,8 @@ fi if is_service_enabled nova; then echo_summary "Configuring Nova" + # Rebuild the config file from scratch + create_nova_conf init_nova fi From 59d602e743b312cfa73b8c1ec9421a91f4867358 Mon Sep 17 00:00:00 2001 From: Chuck Short Date: Tue, 23 Oct 2012 13:03:38 -0500 Subject: [PATCH 724/967] Add new ubuntu release Raring is the new codename for the next Ubuntu release. Change-Id: I53b5b4d23c0974427fbf0026a55f7b98b0fc3c76 Signed-off-by: Chuck Short --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 3fc3204d..8fb219c9 100755 --- a/stack.sh +++ b/stack.sh @@ -97,7 +97,7 @@ disable_negated_services # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|f16|f17) ]]; then +if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|f16|f17) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then echo "If you wish to run this script anyway run with FORCE=yes" From 5a4148d33adbbae7fc178ba79ee86927a6d7f1c7 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 23 Oct 2012 15:47:01 -0500 Subject: [PATCH 725/967] Fix quantum typo Change-Id: Ib6343f5c6b7dedb8bda33110460372bc52b6c6a7 --- lib/quantum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/quantum b/lib/quantum index 0b266224..f9e17825 100644 --- a/lib/quantum +++ b/lib/quantum @@ -50,7 +50,7 @@ function quantum_setup_external_bridge() { } function is_quantum_ovs_base_plugin() { - local plguin=$1 + local plugin=$1 if [[ ",openvswitch,ryu," =~ ,${plugin}, ]]; then return 0 fi From 1331445b6f360f975e2304a2e063737cc1db4036 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 23 Oct 2012 15:09:50 -0500 Subject: [PATCH 726/967] Clean up local state paths Puts additional state and lock paths for swift, quantum and nova into $DATA_DIR/project that can be set independently of $DEST. Change-Id: I2cca701856d3caa7fe472fefdd8b070a3c7f3adf --- lib/nova | 1 + stack.sh | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index dbfc2947..f4288955 100644 --- a/lib/nova +++ b/lib/nova @@ -316,6 +316,7 @@ function init_nova() { fi if [ -n "$NOVA_STATE_PATH" ]; then add_nova_opt "state_path=$NOVA_STATE_PATH" + add_nova_opt "lock_path=$NOVA_STATE_PATH" fi if [ -n "$NOVA_INSTANCES_PATH" ]; then add_nova_opt "instances_path=$NOVA_INSTANCES_PATH" diff --git a/stack.sh b/stack.sh index 3fc3204d..17025092 100755 --- a/stack.sh +++ b/stack.sh @@ -503,7 +503,7 @@ fi # Set ``SWIFT_DATA_DIR`` to the location of swift drives and objects. # Default is the common DevStack data directory. -SWIFT_DATA_DIR=${SWIFT_DATA_DIR:-${DEST}/data/swift} +SWIFT_DATA_DIR=${SWIFT_DATA_DIR:-${DATA_DIR}/swift} # Set ``SWIFT_CONFIG_DIR`` to the location of the configuration files. # Default is ``/etc/swift``. @@ -1439,6 +1439,7 @@ if is_service_enabled q-dhcp; then # Set debug iniset $Q_DHCP_CONF_FILE DEFAULT debug True iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE + iniset $Q_DHCP_CONF_FILE DEFAULT state_path $DATA_DIR/quantum quantum_setup_keystone $Q_DHCP_CONF_FILE DEFAULT set_auth_url From 0921c453e4cf3bb3bde3133220bbe1b46ca50b98 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Tue, 23 Oct 2012 17:10:56 -0700 Subject: [PATCH 727/967] Remove unused nova.conf option image_service FLAGS.image_service was removed in: [3aaa0b103447d56f8d3b259c693cd9a3a8dcbe36] Cleanup of image service code Change-Id: Ie566f972e2c196228cd83edda0924b5a679a63a6 --- lib/nova | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/nova b/lib/nova index dbfc2947..dbdf2c2a 100644 --- a/lib/nova +++ b/lib/nova @@ -299,7 +299,6 @@ function init_nova() { add_nova_opt "libvirt_type=$LIBVIRT_TYPE" add_nova_opt "libvirt_cpu_mode=none" add_nova_opt "instance_name_template=${INSTANCE_NAME_PREFIX}%08x" - add_nova_opt "image_service=nova.image.glance.GlanceImageService" if is_service_enabled n-api; then add_nova_opt "enabled_apis=$NOVA_ENABLED_APIS" From 61bb2c1b5c5fe406300fd184a2249a0b0c9bb622 Mon Sep 17 00:00:00 2001 From: Jiajun Liu Date: Fri, 19 Oct 2012 09:48:30 +0800 Subject: [PATCH 728/967] clear screen rc file every time you run stack.sh fixes bug 1032022. Devstack will not update service's start up command if the command already exists in screen rc files due to previous deployment. There is no way to clear a service's start up command so this would be a problem if the service's start up command changes between different deployment. Clear the content of screen rc file every time you run stack.sh to deploy openstack can solve this problem. Change-Id: I18d0700bb2169bdb0dadfa7982168aef2eff8b22 --- stack.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/stack.sh b/stack.sh index 774c454c..4904576a 100755 --- a/stack.sh +++ b/stack.sh @@ -1022,6 +1022,11 @@ if [ -z "$SCREEN_HARDSTATUS" ]; then SCREEN_HARDSTATUS='%{= .} %-Lw%{= .}%> %n%f %t*%{= .}%+Lw%< %-=%{g}(%{d}%H/%l%{g})' fi +# Clear screen rc file +SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc +if [[ -e $SCREENRC ]]; then + echo -n > $SCREENRC +fi # Create a new named screen to run processes in screen -d -m -S $SCREEN_NAME -t shell -s /bin/bash sleep 1 From f36afe587b1cdef02797cc5fb36b395b3ff682aa Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 4 Oct 2012 18:00:10 +0200 Subject: [PATCH 729/967] Add ceilometer-api to service catalog This fixes bug #1060344 Change-Id: I0fee6b4660f564c4f8d62274ed52fa4b367f67da Signed-off-by: Julien Danjou --- files/keystone_data.sh | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 7da07aaa..9520b177 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -262,6 +262,21 @@ if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then fi fi +if [[ "$ENABLED_SERVICES" =~ "ceilometer-api" ]]; then + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + CEILOMETER_SERVICE=$(get_id keystone service-create \ + --name=ceilometer \ + --type=metering \ + --description="Ceilometer Service") + keystone endpoint-create \ + --region RegionOne \ + --service_id $CEILOMETER_SERVICE \ + --publicurl "http://$SERVICE_HOST:8777/" \ + --adminurl "http://$SERVICE_HOST:8777/" \ + --internalurl "http://$SERVICE_HOST:8777/" + fi +fi + # EC2 if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then From 4b3e4e529f4c2b78064f6153e690babff77abeaa Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 24 Oct 2012 16:32:01 +0200 Subject: [PATCH 730/967] ceilometer: build the configuration file from scratch Stop using the one from nova. Fix the keystone_authtoken default auth_protocol. Change-Id: Id9b30a4105a7187966c3953958477967cf58fe9b --- lib/ceilometer | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 7154ccb4..043f4814 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -8,7 +8,6 @@ # - functions # - OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME, OS_AUTH_URL set for admin credentials # - DEST set to the destination directory -# - NOVA_CONF set to the nova configuration file # stack.sh # --------- @@ -36,8 +35,7 @@ else CEILOMETER_BIN_DIR=/usr/local/bin fi CEILOMETER_CONF_DIR=/etc/ceilometer -CEILOMETER_AGENT_CONF=$CEILOMETER_CONF_DIR/ceilometer-agent.conf -CEILOMETER_COLLECTOR_CONF=$CEILOMETER_CONF_DIR/ceilometer-collector.conf +CEILOMETER_CONF=$CEILOMETER_CONF_DIR/ceilometer.conf CEILOMETER_API_LOG_DIR=/var/log/ceilometer-api # cleanup_ceilometer() - Remove residual data files, anything left over from previous @@ -57,13 +55,14 @@ function configure_ceilometer() { [ ! -d $CEILOMETER_API_LOG_DIR ] && sudo mkdir -m 755 -p $CEILOMETER_API_LOG_DIR sudo chown $USER $CEILOMETER_API_LOG_DIR - # ceilometer confs are copy of /etc/nova/nova.conf which must exist first - grep -v format_string $NOVA_CONF > $CEILOMETER_AGENT_CONF - iniset $CEILOMETER_AGENT_CONF DEFAULT rpc_backend 'ceilometer.openstack.common.rpc.impl_kombu' + iniset $CEILOMETER_CONF DEFAULT rpc_backend 'ceilometer.openstack.common.rpc.impl_kombu' + iniset $CEILOMETER_CONF DEFAULT notification_topics 'notifications,glance_notifications' + iniset $CEILOMETER_CONF DEFAULT verbose True + iniset $CEILOMETER_CONF DEFAULT rabbit_host $RABBIT_HOST + iniset $CEILOMETER_CONF DEFAULT rabbit_password $RABBIT_PASSWORD + iniset $CEILOMETER_CONF DEFAULT sql_connection $BASE_SQL_CONN/nova?charset=utf8 - grep -v format_string $NOVA_CONF > $CEILOMETER_COLLECTOR_CONF - iniset $CEILOMETER_COLLECTOR_CONF DEFAULT rpc_backend 'ceilometer.openstack.common.rpc.impl_kombu' - iniset $CEILOMETER_COLLECTOR_CONF DEFAULT notification_topics 'notifications,glance_notifications' + iniset $CEILOMETER_CONF keystone_authtoken auth_protocol http } # install_ceilometer() - Collect source and prepare @@ -73,10 +72,10 @@ function install_ceilometer() { # start_ceilometer() - Start running processes, including screen function start_ceilometer() { - screen_it ceilometer-acompute "cd $CEILOMETER_DIR && sg libvirtd \"$CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_AGENT_CONF\"" - screen_it ceilometer-acentral "export OS_USERNAME=$OS_USERNAME OS_PASSWORD=$OS_PASSWORD OS_TENANT_NAME=$OS_TENANT_NAME OS_AUTH_URL=$OS_AUTH_URL && cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-agent-central --config-file $CEILOMETER_AGENT_CONF" - screen_it ceilometer-collector "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-collector --config-file $CEILOMETER_COLLECTOR_CONF" - screen_it ceilometer-api "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR" + screen_it ceilometer-acompute "cd $CEILOMETER_DIR && sg libvirtd \"$CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_CONF\"" + screen_it ceilometer-acentral "export OS_USERNAME=$OS_USERNAME OS_PASSWORD=$OS_PASSWORD OS_TENANT_NAME=$OS_TENANT_NAME OS_AUTH_URL=$OS_AUTH_URL && cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-agent-central --config-file $CEILOMETER_CONF" + screen_it ceilometer-collector "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-collector --config-file $CEILOMETER_CONF" + screen_it ceilometer-api "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" } # stop_ceilometer() - Stop running processes From 1fcc6a1fc9e58cda0501c2bc99d9cc996ce8681a Mon Sep 17 00:00:00 2001 From: Eoghan Glynn Date: Thu, 25 Oct 2012 14:57:14 +0000 Subject: [PATCH 731/967] Modify nova config for ceilometer if enabled If ceilometer is enabled, nova instance usage auditing and the appropriate notification drivers should be automatically enabled, as opposed to relying on manual reconfiguration and service restart. Change-Id: I41643a1418a87942be7b2b7979797ff5eb7e5479 --- lib/nova | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/nova b/lib/nova index 297a3380..95d5d87c 100644 --- a/lib/nova +++ b/lib/nova @@ -341,6 +341,13 @@ function create_nova_conf() { # Show user_name and project_name instead of user_id and project_id add_nova_opt "logging_context_format_string=%(asctime)s %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" fi + if is_service_enabled ceilometer; then + add_nova_opt "instance_usage_audit=True" + add_nova_opt "instance_usage_audit_period=hour" + add_nova_opt "notification_driver=nova.openstack.common.notifier.rabbit_notifier" + add_nova_opt "notification_driver=ceilometer.compute.nova_notifier" + fi + # Provide some transition from ``EXTRA_FLAGS`` to ``EXTRA_OPTS`` if [[ -z "$EXTRA_OPTS" && -n "$EXTRA_FLAGS" ]]; then From adeefbf3e704522b1d2959b28112aab9f049ffbf Mon Sep 17 00:00:00 2001 From: Terry Wilson Date: Thu, 25 Oct 2012 22:20:54 -0400 Subject: [PATCH 732/967] Correct misspelled OVS plugin variable Fixes bug 1071567, the broken setup of external gateway IP/routes when using quantum with the openvswitch plugin due to a misspelled variable name. Change-Id: I950701a55f770cf075bca500e5a935628f6bf14a --- lib/quantum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/quantum b/lib/quantum index 0b266224..f9e17825 100644 --- a/lib/quantum +++ b/lib/quantum @@ -50,7 +50,7 @@ function quantum_setup_external_bridge() { } function is_quantum_ovs_base_plugin() { - local plguin=$1 + local plugin=$1 if [[ ",openvswitch,ryu," =~ ,${plugin}, ]]; then return 0 fi From b4907b2eeef3372d8f709c4640573d1656739def Mon Sep 17 00:00:00 2001 From: Takaaki Suzuki Date: Thu, 25 Oct 2012 14:23:58 +0900 Subject: [PATCH 733/967] change Quantum network plugin set part Currently in devstack, Q_PLUGIN can be set to only one of well-known quantum plugins ("openvswitch", "linuxbridge", "ryu"). Setting it to a custom plugin causes stack.sh to exit with an error. This commit lets you run devstack with Q_PLUGIN set to a custom Quantum plugin. Change-Id: Iafe07cf162b5b07bee120b0c571149c374004bb3 Signed-off-by: Takaaki Suzuki --- stack.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index 9db1262e..f0f94107 100755 --- a/stack.sh +++ b/stack.sh @@ -1266,8 +1266,10 @@ if is_service_enabled quantum; then Q_PLUGIN_CONF_FILENAME=ryu.ini Q_DB_NAME="ovs_quantum" Q_PLUGIN_CLASS="quantum.plugins.ryu.ryu_quantum_plugin.RyuQuantumPluginV2" - else - echo "Unknown Quantum plugin '$Q_PLUGIN'.. exiting" + fi + + if [[ $Q_PLUGIN_CONF_PATH == '' || $Q_PLUGIN_CONF_FILENAME == '' || $Q_PLUGIN_CLASS == '' ]]; then + echo "Quantum plugin not set.. exiting" exit 1 fi From fda946e3eb46fc59ff79f83e68a8488141f3444e Mon Sep 17 00:00:00 2001 From: Nachi Ueno Date: Wed, 24 Oct 2012 17:26:02 -0700 Subject: [PATCH 734/967] Added ping_check and ssh_check functions and use it in exercise.sh Change-Id: I69d41c9db527f60f250b6af36b2d8e9d0dd39684 --- exercises/boot_from_volume.sh | 10 ++-------- exercises/euca.sh | 5 +---- exercises/floating_ips.sh | 35 ++++++++-------------------------- exercises/volumes.sh | 16 ++-------------- functions | 36 +++++++++++++++++++++++++++++++++++ openrc | 3 +++ stack.sh | 8 ++++---- stackrc | 7 +++++-- 8 files changed, 61 insertions(+), 59 deletions(-) diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index 183efa63..460b50cf 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -139,16 +139,10 @@ fi nova add-floating-ip $VOL_VM_UUID $FLOATING_IP # Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds -if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then - echo "Couldn't ping volume-backed server with floating ip" - exit 1 -fi +ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT # Make sure our volume-backed instance launched -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! ssh -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP echo success ; do sleep 1; done"; then - echo "server didn't become ssh-able!" - exit 1 -fi +ssh_check "$PUBLIC_NETWORK_NAME" $KEY_FILE $FLOATING_IP $DEFAULT_INSTANCE_USER $ACTIVE_TIMEOUT # Remove floating ip from volume-backed instance nova remove-floating-ip $VOL_VM_UUID $FLOATING_IP diff --git a/exercises/euca.sh b/exercises/euca.sh index 58b5d914..b1214930 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -130,10 +130,7 @@ euca-authorize -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \ die "Failure authorizing rule in $SECGROUP" # Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds -if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then - echo "Couldn't ping server with floating ip" - exit 1 -fi +ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT # Revoke pinging euca-revoke -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \ diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 77f020e2..67878787 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -118,23 +118,10 @@ if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | g fi # get the IP of the server -IP=`nova show $VM_UUID | grep "private network" | get_field 2` +IP=`nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2` die_if_not_set IP "Failure retrieving IP address" -# for single node deployments, we can ping private ips -MULTI_HOST=`trueorfalse False $MULTI_HOST` -if [ "$MULTI_HOST" = "False" ]; then - # sometimes the first ping fails (10 seconds isn't enough time for the VM's - # network to respond?), so let's ping for a default of 15 seconds with a - # timeout of a second for each ping. - if ! timeout $BOOT_TIMEOUT sh -c "while ! ping -c1 -w1 $IP; do sleep 1; done"; then - echo "Couldn't ping server" - exit 1 - fi -else - # On a multi-host system, without vm net access, do a sleep to wait for the boot - sleep $BOOT_TIMEOUT -fi +ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT # Security Groups & Floating IPs # ------------------------------ @@ -166,10 +153,7 @@ nova add-floating-ip $VM_UUID $FLOATING_IP || \ die "Failure adding floating IP $FLOATING_IP to $NAME" # test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds -if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then - echo "Couldn't ping server with floating ip" - exit 1 -fi +ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT # Allocate an IP from second floating pool TEST_FLOATING_IP=`nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | get_field 1` @@ -187,19 +171,16 @@ nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || die "Failure deletin # FIXME (anthony): make xs support security groups if [ "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then # test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds - if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then - print "Security group failure - ping should not be allowed!" - echo "Couldn't ping server with floating ip" - exit 1 - fi + ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT fi -# de-allocate the floating ip -nova floating-ip-delete $FLOATING_IP || die "Failure deleting floating IP $FLOATING_IP" - # Delete second floating IP nova floating-ip-delete $TEST_FLOATING_IP || die "Failure deleting floating IP $TEST_FLOATING_IP" + +# de-allocate the floating ip +nova floating-ip-delete $FLOATING_IP || die "Failure deleting floating IP $FLOATING_IP" + # Shutdown the server nova delete $VM_UUID || die "Failure deleting instance $NAME" diff --git a/exercises/volumes.sh b/exercises/volumes.sh index ffa12c46..1c73786e 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -119,23 +119,11 @@ if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | g fi # get the IP of the server -IP=`nova show $VM_UUID | grep "private network" | get_field 2` +IP=`nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2` die_if_not_set IP "Failure retrieving IP address" # for single node deployments, we can ping private ips -MULTI_HOST=`trueorfalse False $MULTI_HOST` -if [ "$MULTI_HOST" = "False" ]; then - # sometimes the first ping fails (10 seconds isn't enough time for the VM's - # network to respond?), so let's ping for a default of 15 seconds with a - # timeout of a second for each ping. - if ! timeout $BOOT_TIMEOUT sh -c "while ! ping -c1 -w1 $IP; do sleep 1; done"; then - echo "Couldn't ping server" - exit 1 - fi -else - # On a multi-host system, without vm net access, do a sleep to wait for the boot - sleep $BOOT_TIMEOUT -fi +ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT # Volumes # ------- diff --git a/functions b/functions index 0d0df51f..dbe9d30a 100644 --- a/functions +++ b/functions @@ -849,6 +849,42 @@ function yum_install() { yum install -y "$@" } +# ping check +# Uses globals ``ENABLED_SERVICES`` +function ping_check() { + _ping_check_novanet "$1" $2 $3 +} + +# ping check for nova +# Uses globals ``MULTI_HOST``, ``PRIVATE_NETWORK`` +function _ping_check_novanet() { + local from_net=$1 + local ip=$2 + local boot_timeout=$3 + MULTI_HOST=`trueorfalse False $MULTI_HOST` + if [[ "$MULTI_HOST" = "True" && "$from_net" = "$PRIVATE_NETWORK_NAME" ]]; then + sleep $boot_timeout + return + fi + if ! timeout $boot_timeout sh -c "while ! ping -c1 -w1 $ip; do sleep 1; done"; then + echo "Couldn't ping server" + exit 1 + fi +} + +# ssh check +function ssh_check() { + local NET_NAME=$1 + local KEY_FILE=$2 + local FLOATING_IP=$3 + local DEFAULT_INSTANCE_USER=$4 + local ACTIVE_TIMEOUT=$5 + local probe_cmd = "" + if ! timeout $ACTIVE_TIMEOUT sh -c "while ! ssh -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP echo success ; do sleep 1; done"; then + echo "server didn't become ssh-able!" + exit 1 + fi +} # Restore xtrace $XTRACE diff --git a/openrc b/openrc index 08ef98be..0a6a2150 100644 --- a/openrc +++ b/openrc @@ -72,3 +72,6 @@ export COMPUTE_API_VERSION=${COMPUTE_API_VERSION:-$NOVA_VERSION} # set log level to DEBUG (helps debug issues) # export KEYSTONECLIENT_DEBUG=1 # export NOVACLIENT_DEBUG=1 + +# set qunatum debug command +export TEST_CONFIG_FILE=/etc/quantum/debug.ini diff --git a/stack.sh b/stack.sh index 3fc3204d..8797a63e 100755 --- a/stack.sh +++ b/stack.sh @@ -1950,14 +1950,14 @@ if is_service_enabled q-svc; then # Create a small network # Since quantum command is executed in admin context at this point, # ``--tenant_id`` needs to be specified. - NET_ID=$(quantum net-create --tenant_id $TENANT_ID net1 | grep ' id ' | get_field 2) + NET_ID=$(quantum net-create --tenant_id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) SUBNET_ID=$(quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) if is_service_enabled q-l3; then # Create a router, and add the private subnet as one of its interfaces ROUTER_ID=$(quantum router-create --tenant_id $TENANT_ID router1 | grep ' id ' | get_field 2) quantum router-interface-add $ROUTER_ID $SUBNET_ID # Create an external network, and a subnet. Configure the external network as router gw - EXT_NET_ID=$(quantum net-create ext_net -- --router:external=True | grep ' id ' | get_field 2) + EXT_NET_ID=$(quantum net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2) EXT_GW_IP=$(quantum subnet-create --ip_version 4 $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2) quantum router-gateway-set $ROUTER_ID $EXT_NET_ID if is_quantum_ovs_base_plugin "$Q_PLUGIN" && [[ "$Q_USE_NAMESPACE" = "True" ]]; then @@ -1975,10 +1975,10 @@ if is_service_enabled q-svc; then elif is_service_enabled mysql && is_service_enabled nova; then # Create a small network - $NOVA_BIN_DIR/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS + $NOVA_BIN_DIR/nova-manage network create "$PRIVATE_NETWORK_NAME" $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS # Create some floating ips - $NOVA_BIN_DIR/nova-manage floating create $FLOATING_RANGE + $NOVA_BIN_DIR/nova-manage floating create $FLOATING_RANGE --pool=$PUBLIC_NETWORK # Create a second pool $NOVA_BIN_DIR/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL diff --git a/stackrc b/stackrc index f9a41bd7..e587efa8 100644 --- a/stackrc +++ b/stackrc @@ -136,10 +136,10 @@ fi #IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img" # cirros full disk image # Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of -# which may be set in ``localrc``. Also allow ``DEFAULT_IMAGE_NAME`` and +# which may be set in ``localrc``. Also allow ``DEFAULT_IMAGE_NAME`` and # ``IMAGE_URLS`` to be set directly in ``localrc``. case "$VIRT_DRIVER" in - openvz) + openvz) DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-11.10-x86_64} IMAGE_URLS=${IMAGE_URLS:-"http://download.openvz.org/template/precreated/ubuntu-11.10-x86_64.tar.gz"};; libvirt) @@ -159,3 +159,6 @@ esac # 5Gb default volume backing file size VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-5130M} + +PRIVATE_NETWORK_NAME=${PRIVATE_NETWORK_NAME:-"private"} +PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-"nova"} From e747efb3f6420f16028c3ed2a3ed731c77ab3f33 Mon Sep 17 00:00:00 2001 From: zhang-hare Date: Fri, 26 Oct 2012 15:32:48 +0800 Subject: [PATCH 735/967] Fix the typo of Q_USE_ROOTWRAP expression. Fixes bug 1071636 Change-Id: If384d1d9205300e11722e5280107b773368825ba --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index e58c4392..80860abb 100755 --- a/stack.sh +++ b/stack.sh @@ -338,7 +338,7 @@ Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-quantum} Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone} # Use namespace or not Q_USE_NAMESPACE=${Q_USE_NAMESPACE:-True} -Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP=:-True} +Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True} # Meta data IP Q_META_DATA_IP=${Q_META_DATA_IP:-$HOST_IP} From 4de55e9be40795b053ee658a3a06408d0f0e144e Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Fri, 26 Oct 2012 12:24:28 -0400 Subject: [PATCH 736/967] Add a variable to control keystone token format This change lets the developer running devstack control the token format used by keystone through setting KEYSTONE_TOKEN_FORMAT in their localrc file. Change-Id: Ic1265fcb10b8de112891f61d5e07312322148ec2 Signed-off-by: Doug Hellmann --- lib/keystone | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/keystone b/lib/keystone index 06920f84..73d82c58 100644 --- a/lib/keystone +++ b/lib/keystone @@ -47,6 +47,7 @@ KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-http} KEYSTONE_SERVICE_HOST=${KEYSTONE_SERVICE_HOST:-$SERVICE_HOST} KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000} KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-http} +KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-PKI} # Entry Points @@ -82,6 +83,7 @@ function configure_keystone() { # Rewrite stock ``keystone.conf`` iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN" + iniset $KEYSTONE_CONF signing token_format "$KEYSTONE_TOKEN_FORMAT" iniset $KEYSTONE_CONF sql connection "$BASE_SQL_CONN/keystone?charset=utf8" iniset $KEYSTONE_CONF ec2 driver "keystone.contrib.ec2.backends.sql.Ec2" sed -e " From 867cf42ebdc7e99405615ba455621b00125c46ed Mon Sep 17 00:00:00 2001 From: David Kranz Date: Fri, 26 Oct 2012 13:25:19 -0400 Subject: [PATCH 737/967] Clean database when configuring ceilmeter. Change-Id: If9e35f645b35fbe2e2550930da6b909a40f309b1 --- lib/ceilometer | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 043f4814..b0f03778 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -41,8 +41,7 @@ CEILOMETER_API_LOG_DIR=/var/log/ceilometer-api # cleanup_ceilometer() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_ceilometer() { - # This function intentionally left blank - : + mongo ceilometer --eval "db.dropDatabase();" } # configure_ceilometer() - Set config files, create data dirs, etc @@ -63,6 +62,7 @@ function configure_ceilometer() { iniset $CEILOMETER_CONF DEFAULT sql_connection $BASE_SQL_CONN/nova?charset=utf8 iniset $CEILOMETER_CONF keystone_authtoken auth_protocol http + cleanup_ceilometer } # install_ceilometer() - Collect source and prepare From bad9d89fa0a785a965abaafd423f1e6b9f47ebd3 Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Thu, 25 Oct 2012 14:49:47 +1300 Subject: [PATCH 738/967] If heat is enabled, replace nova flavors. All heat users will need to run heat/tools/nova_create_flavors.sh as an admin user. This change runs nova_create_flavors.sh if heat is enabled. This saves the hassle of switching to an admin user and running this every time devstack is started. Flavors are changed in heat_init, so heat_init is deferred until after nova is running. Change-Id: I4576c83f5ef55809567e40e56c25eb0e1bbe6d45 --- lib/heat | 1 + stack.sh | 20 ++++++++++---------- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/lib/heat b/lib/heat index 80e3f7ab..7fb5fcc5 100644 --- a/lib/heat +++ b/lib/heat @@ -189,6 +189,7 @@ function init_heat() { mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE heat CHARACTER SET utf8;' $HEAT_DIR/bin/heat-db-setup $os_PACKAGE -r $MYSQL_PASSWORD + $HEAT_DIR/tools/nova_create_flavors.sh } # install_heat() - Collect source and prepare diff --git a/stack.sh b/stack.sh index 9db1262e..90e612cb 100755 --- a/stack.sh +++ b/stack.sh @@ -1889,15 +1889,6 @@ else fi -# Heat -# ---- - -if is_service_enabled heat; then - echo_summary "Configuring Heat" - init_heat -fi - - # Launch Services # =============== @@ -2017,8 +2008,12 @@ screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF is_service_enabled swift3 || \ screen_it n-obj "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-objectstore" -# launch heat engine, api and metadata + +# Configure and launch heat engine, api and metadata if is_service_enabled heat; then + # Initialize heat, including replacing nova flavors + echo_summary "Configuring Heat" + init_heat echo_summary "Starting Heat" start_heat fi @@ -2090,6 +2085,11 @@ if is_service_enabled horizon; then echo "Horizon is now available at http://$SERVICE_HOST/" fi +# Warn that the default flavors have been changed by Heat +if is_service_enabled heat; then + echo "Heat has replaced the default flavors. View by running: nova flavor-list" +fi + # If Keystone is present you can point ``nova`` cli to this server if is_service_enabled key; then echo "Keystone is serving at $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/" From bc071bcef0bcb726e49f9ccaa2063f58b7eaf96d Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 1 Oct 2012 14:06:44 -0500 Subject: [PATCH 739/967] Configure PKI cache dirs * Configure Cinder, Glance, Keystone, Nova to put cached credentials from keystone.auth_token into /var/cache/ It is not obvious to me that having each of these service share a credentials cache is a good idea. It does appear to work but this patch takes the conservative approach of putting each service's cache in a distinct directory. More importantly it gets them out of $HOME! Change-Id: If88088fc287a2f2f4f3e34f6d9be9de3da7ee00d --- lib/cinder | 13 ++++++++++++- lib/glance | 16 ++++++++++++++++ lib/keystone | 18 ++++++++++++++---- lib/nova | 11 +++++++++++ stack.sh | 2 +- 5 files changed, 54 insertions(+), 6 deletions(-) diff --git a/lib/cinder b/lib/cinder index 08c840e8..578e2ad7 100644 --- a/lib/cinder +++ b/lib/cinder @@ -4,8 +4,8 @@ # Dependencies: # - functions # - DEST, DATA_DIR must be defined -# - KEYSTONE_AUTH_* must be defined # SERVICE_{TENANT_NAME|PASSWORD} must be defined +# ``KEYSTONE_TOKEN_FORMAT`` must be defined # stack.sh # --------- @@ -30,6 +30,7 @@ CINDERCLIENT_DIR=$DEST/python-cinderclient CINDER_STATE_PATH=${CINDER_STATE_PATH:=$DATA_DIR/cinder} CINDER_CONF_DIR=/etc/cinder CINDER_CONF=$CINDER_CONF_DIR/cinder.conf +CINDER_AUTH_CACHE_DIR=${CINDER_AUTH_CACHE_DIR:-/var/cache/cinder} # Support entry points installation of console scripts if [[ -d $CINDER_DIR/bin ]]; then @@ -106,6 +107,10 @@ function configure_cinder() { iniset $CINDER_API_PASTE_INI filter:authtoken admin_user cinder iniset $CINDER_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD + if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then + iniset $CINDER_API_PASTE_INI filter:authtoken signing_dir $CINDER_AUTH_CACHE_DIR + fi + cp $CINDER_DIR/etc/cinder/cinder.conf.sample $CINDER_CONF iniset $CINDER_CONF DEFAULT auth_strategy keystone iniset $CINDER_CONF DEFAULT verbose True @@ -186,6 +191,12 @@ function init_cinder() { done fi fi + + if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then + # Create cache dir + sudo mkdir -p $CINDER_AUTH_CACHE_DIR + sudo chown `whoami` $CINDER_AUTH_CACHE_DIR + fi } # install_cinder() - Collect source and prepare diff --git a/lib/glance b/lib/glance index 070c80d1..468d9e96 100644 --- a/lib/glance +++ b/lib/glance @@ -6,6 +6,7 @@ # ``DEST``, ``DATA_DIR`` must be defined # ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined # ``SERVICE_HOST`` +# ``KEYSTONE_TOKEN_FORMAT`` must be defined # ``stack.sh`` calls the entry points in this order: # @@ -31,6 +32,7 @@ GLANCE_DIR=$DEST/glance GLANCECLIENT_DIR=$DEST/python-glanceclient GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache} GLANCE_IMAGE_DIR=${GLANCE_IMAGE_DIR:=$DATA_DIR/glance/images} +GLANCE_AUTH_CACHE_DIR=${GLANCE_AUTH_CACHE_DIR:-/var/cache/glance} GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance} GLANCE_REGISTRY_CONF=$GLANCE_CONF_DIR/glance-registry.conf @@ -91,6 +93,9 @@ function configure_glance() { iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_user glance iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_password $SERVICE_PASSWORD + if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then + iniset $GLANCE_REGISTRY_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/registry + fi cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF iniset $GLANCE_API_CONF DEFAULT debug True @@ -114,6 +119,9 @@ function configure_glance() { iniset $GLANCE_API_CONF DEFAULT rabbit_host $RABBIT_HOST iniset $GLANCE_API_CONF DEFAULT rabbit_password $RABBIT_PASSWORD fi + if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then + iniset $GLANCE_API_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/api + fi cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI @@ -153,6 +161,14 @@ function init_glance() { mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE glance CHARACTER SET utf8;' $GLANCE_BIN_DIR/glance-manage db_sync + + if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then + # Create cache dir + sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/api + sudo chown `whoami` $GLANCE_AUTH_CACHE_DIR/api + sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/registry + sudo chown `whoami` $GLANCE_AUTH_CACHE_DIR/registry + fi } # install_glanceclient() - Collect source and prepare diff --git a/lib/keystone b/lib/keystone index 73d82c58..36a0e664 100644 --- a/lib/keystone +++ b/lib/keystone @@ -32,13 +32,18 @@ set +o xtrace KEYSTONE_DIR=$DEST/keystone KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone} KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf +KEYSTONE_AUTH_CACHE_DIR=${KEYSTONE_AUTH_CACHE_DIR:-/var/cache/keystone} KEYSTONECLIENT_DIR=$DEST/python-keystoneclient -# Select the backend for Keystopne's service catalog +# Select the backend for Keystone's service catalog KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-sql} KEYSTONE_CATALOG=$KEYSTONE_CONF_DIR/default_catalog.templates +# Select Keystone's token format +# Choose from 'UUID' and 'PKI' +KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-PKI} + # Set Keystone interface configuration KEYSTONE_API_PORT=${KEYSTONE_API_PORT:-5000} KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST} @@ -47,7 +52,6 @@ KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-http} KEYSTONE_SERVICE_HOST=${KEYSTONE_SERVICE_HOST:-$SERVICE_HOST} KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000} KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-http} -KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-PKI} # Entry Points @@ -147,8 +151,14 @@ function init_keystone() { # Initialize keystone database $KEYSTONE_DIR/bin/keystone-manage db_sync - # Set up certificates - $KEYSTONE_DIR/bin/keystone-manage pki_setup + if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then + # Set up certificates + $KEYSTONE_DIR/bin/keystone-manage pki_setup + + # Create cache dir + sudo mkdir -p $KEYSTONE_AUTH_CACHE_DIR + sudo chown `whoami` $KEYSTONE_AUTH_CACHE_DIR + fi } # install_keystoneclient() - Collect source and prepare diff --git a/lib/nova b/lib/nova index 95d5d87c..b9afa3dc 100644 --- a/lib/nova +++ b/lib/nova @@ -7,6 +7,7 @@ # ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined # ``LIBVIRT_TYPE`` must be defined # ``INSTANCE_NAME_PREFIX``, ``VOLUME_NAME_PREFIX`` must be defined +# ``KEYSTONE_TOKEN_FORMAT`` must be defined # ``stack.sh`` calls the entry points in this order: # @@ -32,6 +33,7 @@ NOVACLIENT_DIR=$DEST/python-novaclient NOVA_STATE_PATH=${NOVA_STATE_PATH:=$DATA_DIR/nova} # INSTANCES_PATH is the previous name for this NOVA_INSTANCES_PATH=${NOVA_INSTANCES_PATH:=${INSTANCES_PATH:=$NOVA_STATE_PATH/instances}} +NOVA_AUTH_CACHE_DIR=${NOVA_AUTH_CACHE_DIR:-/var/cache/nova} NOVA_CONF_DIR=/etc/nova NOVA_CONF=$NOVA_CONF_DIR/nova.conf @@ -174,6 +176,10 @@ function configure_nova() { " -i $NOVA_API_PASTE_INI fi + if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then + iniset $NOVA_API_PASTE_INI filter:authtoken signing_dir $NOVA_AUTH_CACHE_DIR + fi + if is_service_enabled n-cpu; then # Force IP forwarding on, just on case sudo sysctl -w net.ipv4.ip_forward=1 @@ -383,6 +389,11 @@ function init_nova() { $NOVA_BIN_DIR/nova-manage db sync fi + if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then + # Create cache dir + sudo mkdir -p $NOVA_AUTH_CACHE_DIR + sudo chown `whoami` $NOVA_AUTH_CACHE_DIR + fi } # install_novaclient() - Collect source and prepare diff --git a/stack.sh b/stack.sh index 7272ec0a..af8fbf62 100755 --- a/stack.sh +++ b/stack.sh @@ -2042,7 +2042,7 @@ fi if is_service_enabled g-reg; then echo_summary "Uploading images" - TOKEN=$(keystone token-get | grep ' id ' | get_field 2) + TOKEN=$(keystone token-get | grep ' id ' | get_field 2) # Option to upload legacy ami-tty, which works with xenserver if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then From 6e7e1c941a3ea9e2d30a6b59ccce2c5efa44b349 Mon Sep 17 00:00:00 2001 From: Andrew Laski Date: Wed, 31 Oct 2012 16:11:37 -0400 Subject: [PATCH 740/967] Don't enable osapi_volume if n-vol not enabled. Enabling cinder removes osapi_volume from enabled_apis in nova.conf but if neither cinder or n-vol are enabled it should not be there. It seems that volume code is being removed from nova so osapi_volume should not be enabled by default. Fixes bug #1073701 Change-Id: I626a941f434b1c8c1a73b32318e21c99445b5541 --- lib/nova | 2 ++ stackrc | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 95d5d87c..77979276 100644 --- a/lib/nova +++ b/lib/nova @@ -305,6 +305,8 @@ function create_nova_conf() { add_nova_opt "enabled_apis=$NOVA_ENABLED_APIS" fi if is_service_enabled n-vol; then + NOVA_ENABLED_APIS="${NOVA_ENABLED_APIS},osapi_volume" + iniset $NOVA_CONF DEFAULT enabled_apis $NOVA_ENABLED_APIS add_nova_opt "volume_api_class=nova.volume.api.API" add_nova_opt "volume_group=$VOLUME_GROUP" add_nova_opt "volume_name_template=${VOLUME_NAME_PREFIX}%s" diff --git a/stackrc b/stackrc index e587efa8..283b2712 100644 --- a/stackrc +++ b/stackrc @@ -18,7 +18,7 @@ DEST=/opt/stack ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,mysql,rabbit # Set the default Nova APIs to enable -NOVA_ENABLED_APIS=ec2,osapi_compute,osapi_volume,metadata +NOVA_ENABLED_APIS=ec2,osapi_compute,metadata # Repositories # ------------ From ec0865127e6e8c772dcb0838d6ee5f113161c2f3 Mon Sep 17 00:00:00 2001 From: Maru Newby Date: Thu, 1 Nov 2012 23:44:57 +0000 Subject: [PATCH 741/967] Ensure that tempest configuration happens last. * Tempest configuration relies on having other services such as Glance and Nova running and accepting requests. Previously, configuration was happening before these services were started. This change ensures that Tempest configuration is performed after the necessary services have been started. Change-Id: If0b6753dd51671fbc2a2cf6ad32ce9303d0a0479 --- stack.sh | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index 29e49537..b148d534 100755 --- a/stack.sh +++ b/stack.sh @@ -935,9 +935,6 @@ fi if is_service_enabled cinder; then configure_cinder fi -if is_service_enabled tempest; then - configure_tempest -fi if is_service_enabled ryu || (is_service_enabled quantum && [[ "$Q_PLUGIN" = "ryu" ]]); then setup_develop $RYU_DIR fi @@ -2055,6 +2052,13 @@ if is_service_enabled g-reg; then fi +# Configure Tempest last to ensure that the runtime configuration of +# the various OpenStack services can be queried. +if is_service_enabled tempest; then + configure_tempest +fi + + # Run local script # ================ From e88c0a20360ac0b9cef6ab5dde2101127a6c2da7 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 2 Nov 2012 16:59:03 -0500 Subject: [PATCH 742/967] Fix errors when commands return error text rather than data I'm looking at you euca2ools Change-Id: I82bfb8a3ee58fdc54ead6a285c9415593e741892 --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index dbe9d30a..9500b2c7 100644 --- a/functions +++ b/functions @@ -570,7 +570,7 @@ function is_package_installed() { # is_set env-var function is_set() { local var=\$"$1" - if eval "[ -z $var ]"; then + if eval "[ -z \"$var\" ]"; then return 1 fi return 0 From 428af5a257310dbbae9bd7d3b9cd81f92ef7ba9a Mon Sep 17 00:00:00 2001 From: Terry Wilson Date: Thu, 1 Nov 2012 16:12:39 -0400 Subject: [PATCH 743/967] Add PostgreSQL support to devstack This patch adds an interface for supporting multiple database backend types and implemnts support for PostgreSQL. It also adds a function, use_exclusive_service, which serves as a base for enabling a service that conflicts with other services. The use_database function uses it, and it might also be useful for selecting messaging backends. MySQL is still selected by default. Tested on Fedora 17 and Ubuntu 12.04 with MySQL and PostgreSQL. Implements blueprint postgresql-support Change-Id: I4b1373e25676fd9a9809fe70cb4a6450a2479174 --- README.md | 9 +++ files/apts/postgresql | 1 + files/rpms/postgresql | 1 + functions | 15 +++++ lib/cinder | 9 +-- lib/database | 103 ++++++++++++++++++++++++++++++++ lib/databases/mysql | 93 +++++++++++++++++++++++++++++ lib/databases/postgresql | 70 ++++++++++++++++++++++ lib/glance | 9 +-- lib/heat | 7 ++- lib/keystone | 7 ++- lib/nova | 10 ++-- stack.sh | 126 ++++++++++----------------------------- unstack.sh | 7 +++ 14 files changed, 355 insertions(+), 112 deletions(-) create mode 100644 files/apts/postgresql create mode 100644 files/rpms/postgresql create mode 100644 lib/database create mode 100644 lib/databases/mysql create mode 100644 lib/databases/postgresql diff --git a/README.md b/README.md index 872b16b8..93107588 100644 --- a/README.md +++ b/README.md @@ -57,6 +57,15 @@ If the EC2 API is your cup-o-tea, you can create credentials and use euca2ools: You can override environment variables used in `stack.sh` by creating file name `localrc`. It is likely that you will need to do this to tweak your networking configuration should you need to access your cloud from a different host. +# Database Backend + +Multiple database backends are available. The available databases are defined in the lib/databases directory. +To choose a database backend, add a line to your `localrc` like: + + use_database postgresql + +By default, the mysql database backend is used. + # RPC Backend Multiple RPC backends are available. Currently, this diff --git a/files/apts/postgresql b/files/apts/postgresql new file mode 100644 index 00000000..bf19d397 --- /dev/null +++ b/files/apts/postgresql @@ -0,0 +1 @@ +python-psycopg2 diff --git a/files/rpms/postgresql b/files/rpms/postgresql new file mode 100644 index 00000000..bf19d397 --- /dev/null +++ b/files/rpms/postgresql @@ -0,0 +1 @@ +python-psycopg2 diff --git a/functions b/functions index dbe9d30a..917727d7 100644 --- a/functions +++ b/functions @@ -836,6 +836,21 @@ function upload_image() { fi } +# Toggle enable/disable_service for services that must run exclusive of each other +# $1 The name of a variable containing a space-separated list of services +# $2 The name of a variable in which to store the enabled service's name +# $3 The name of the service to enable +function use_exclusive_service { + local options=${!1} + local selection=$3 + out=$2 + [ -z $selection ] || [[ ! "$options" =~ "$selection" ]] && return 1 + for opt in $options;do + [[ "$opt" = "$selection" ]] && enable_service $opt || disable_service $opt + done + eval "$out=$selection" + return 0 +} # Wrapper for ``yum`` to set proxy environment variables # Uses globals ``OFFLINE``, ``*_proxy` diff --git a/lib/cinder b/lib/cinder index 08c840e8..51acfa56 100644 --- a/lib/cinder +++ b/lib/cinder @@ -112,7 +112,9 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT volume_group $VOLUME_GROUP iniset $CINDER_CONF DEFAULT volume_name_template ${VOLUME_NAME_PREFIX}%s iniset $CINDER_CONF DEFAULT iscsi_helper tgtadm - iniset $CINDER_CONF DEFAULT sql_connection $BASE_SQL_CONN/cinder?charset=utf8 + local dburl + database_connection_url dburl cinder + iniset $CINDER_CONF DEFAULT sql_connection $dburl iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI iniset $CINDER_CONF DEFAULT root_helper "sudo ${CINDER_ROOTWRAP}" iniset $CINDER_CONF DEFAULT osapi_volume_extension cinder.api.openstack.volume.contrib.standard_extensions @@ -141,10 +143,9 @@ function init_cinder() { # Force nova volumes off NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/osapi_volume,//") - if is_service_enabled mysql; then + if is_service_enabled $DATABASE_BACKENDS; then # (re)create cinder database - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS cinder;' - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE cinder;' + recreate_database cinder utf8 # (re)create cinder database $CINDER_BIN_DIR/cinder-manage db sync diff --git a/lib/database b/lib/database new file mode 100644 index 00000000..66fb36fb --- /dev/null +++ b/lib/database @@ -0,0 +1,103 @@ +# lib/database +# Interface for interacting with different database backends + +# Dependencies: +# DATABASE_BACKENDS variable must contain a list of available database backends +# DATABASE_TYPE variable must be set + +# Each database must implement four functions: +# recreate_database_$DATABASE_TYPE +# install_database_$DATABASE_TYPE +# configure_database_$DATABASE_TYPE +# database_connection_url_$DATABASE_TYPE +# +# and call register_database $DATABASE_TYPE + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# Register a database backend +# $1 The name of the database backend +function register_database { + [ -z "$DATABASE_BACKENDS" ] && DATABASE_BACKENDS=$1 || DATABASE_BACKENDS+=" $1" +} + +for f in $TOP_DIR/lib/databases/*; do source $f; done + +# Set the database type based on the configuration +function initialize_database_backends { + for backend in $DATABASE_BACKENDS; do + is_service_enabled $backend && DATABASE_TYPE=$backend + done + + [ -z "$DATABASE_TYPE" ] && return 1 + + # For backward-compatibility, read in the MYSQL_HOST/USER variables and use + # them as the default values for the DATABASE_HOST/USER variables. + MYSQL_HOST=${MYSQL_HOST:-localhost} + MYSQL_USER=${MYSQL_USER:-root} + + DATABASE_HOST=${DATABASE_HOST:-${MYSQL_HOST}} + DATABASE_USER=${DATABASE_USER:-${MYSQL_USER}} + + if [ -n "$MYSQL_PASSWORD" ]; then + DATABASE_PASSWORD=$MYSQL_PASSWORD + else + read_password DATABASE_PASSWORD "ENTER A PASSWORD TO USE FOR THE DATABASE." + fi + + # We configure Nova, Horizon, Glance and Keystone to use MySQL as their + # database server. While they share a single server, each has their own + # database and tables. + + # By default this script will install and configure MySQL. If you want to + # use an existing server, you can pass in the user/password/host parameters. + # You will need to send the same ``DATABASE_PASSWORD`` to every host if you are doing + # a multi-node DevStack installation. + + # NOTE: Don't specify ``/db`` in this string so we can use it for multiple services + BASE_SQL_CONN=${BASE_SQL_CONN:-${DATABASE_TYPE}://$DATABASE_USER:$DATABASE_PASSWORD@$DATABASE_HOST} + + return 0 +} + +# Set the database backend to use +# $1 The name of the database backend to use (mysql, postgresql, ...) +function use_database { + use_exclusive_service DATABASE_BACKENDS DATABASE_TYPE $1 && return 0 + ret=$? + echo "Invalid database '$1'" + return $ret +} + +# Recreate a given database +# $1 The name of the database +# $2 The character set/encoding of the database +function recreate_database { + local db=$1 + local charset=$2 + recreate_database_$DATABASE_TYPE $db $charset +} + +# Install the database +function install_database { + install_database_$DATABASE_TYPE +} + +# Configure and start the database +function configure_database { + configure_database_$DATABASE_TYPE +} + +# Generate an SQLAlchemy connection URL and store it in a variable +# $1 The variable name in which to store the connection URL +# $2 The name of the database +function database_connection_url { + local var=$1 + local db=$2 + database_connection_url_$DATABASE_TYPE $var $db +} + +# Restore xtrace +$XTRACE diff --git a/lib/databases/mysql b/lib/databases/mysql new file mode 100644 index 00000000..ed59290a --- /dev/null +++ b/lib/databases/mysql @@ -0,0 +1,93 @@ +# lib/mysql +# Functions to control the configuration and operation of the MySQL database backend + +# Dependencies: +# DATABASE_{HOST,USER,PASSWORD} must be defined + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +register_database mysql + +function recreate_database_mysql { + local db=$1 + local charset=$2 + mysql -u$DATABASE_USER -p$DATABASE_PASSWORD -e "DROP DATABASE IF EXISTS $db;" + mysql -u$DATABASE_USER -p$DATABASE_PASSWORD -e "CREATE DATABASE $db CHARACTER SET $charset;" +} + +function configure_database_mysql { + echo_summary "Configuring and starting MySQL" + + if [[ "$os_PACKAGE" = "deb" ]]; then + MY_CONF=/etc/mysql/my.cnf + MYSQL=mysql + else + MY_CONF=/etc/my.cnf + MYSQL=mysqld + fi + + # Start mysql-server + if [[ "$os_PACKAGE" = "rpm" ]]; then + # RPM doesn't start the service + start_service $MYSQL + # Set the root password - only works the first time + sudo mysqladmin -u root password $DATABASE_PASSWORD || true + fi + # Update the DB to give user ‘$DATABASE_USER’@’%’ full control of the all databases: + sudo mysql -uroot -p$DATABASE_PASSWORD -h127.0.0.1 -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';" + + # Now update ``my.cnf`` for some local needs and restart the mysql service + + # Change ‘bind-address’ from localhost (127.0.0.1) to any (0.0.0.0) + sudo sed -i '/^bind-address/s/127.0.0.1/0.0.0.0/g' $MY_CONF + + # Set default db type to InnoDB + if sudo grep -q "default-storage-engine" $MY_CONF; then + # Change it + sudo bash -c "source $TOP_DIR/functions; iniset $MY_CONF mysqld default-storage-engine InnoDB" + else + # Add it + sudo sed -i -e "/^\[mysqld\]/ a \ +default-storage-engine = InnoDB" $MY_CONF + fi + + restart_service $MYSQL +} + +function install_database_mysql { + if [[ "$os_PACKAGE" = "deb" ]]; then + # Seed configuration with mysql password so that apt-get install doesn't + # prompt us for a password upon install. + cat <$HOME/.my.cnf +[client] +user=$DATABASE_USER +password=$DATABASE_PASSWORD +host=$DATABASE_HOST +EOF + chmod 0600 $HOME/.my.cnf + fi + # Install mysql-server + install_package mysql-server +} + +function database_connection_url_mysql { + local output=$1 + local db=$2 + eval "$output=$BASE_SQL_CONN/$db?charset=utf8" +} + +# Restore xtrace +$XTRACE diff --git a/lib/databases/postgresql b/lib/databases/postgresql new file mode 100644 index 00000000..81989f2e --- /dev/null +++ b/lib/databases/postgresql @@ -0,0 +1,70 @@ +# lib/postgresql +# Functions to control the configuration and operation of the PostgreSQL database backend + +# Dependencies: +# DATABASE_{HOST,USER,PASSWORD} must be defined + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +register_database postgresql + +function recreate_database_postgresql { + local db=$1 + local charset=$2 + # Avoid unsightly error when calling dropdb when the database doesn't exist + psql -h$DATABASE_HOST -U$DATABASE_USER -dtemplate1 -c "DROP DATABASE IF EXISTS $db" + createdb -h $DATABASE_HOST -U$DATABASE_USER -l C -T template0 -E $charset $db +} + +function configure_database_postgresql { + echo_summary "Configuring and starting PostgreSQL" + if [[ "$os_PACKAGE" = "rpm" ]]; then + PG_HBA=/var/lib/pgsql/data/pg_hba.conf + PG_CONF=/var/lib/pgsql/data/postgresql.conf + else + PG_DIR=`find /etc/postgresql -name pg_hba.conf|xargs dirname` + PG_HBA=$PG_DIR/pg_hba.conf + PG_CONF=$PG_DIR/postgresql.conf + fi + sudo [ -e /var/lib/pgsql/data ] || sudo postgresql-setup initdb + # Listen on all addresses + sudo sed -i "/listen_addresses/s/.*/listen_addresses = '*'/" $PG_CONF + # Do password auth from all IPv4 clients + sudo sed -i "/^host/s/all\s\+127.0.0.1\/32\s\+ident/$DATABASE_USER\t0.0.0.0\/0\tpassword/" $PG_HBA + # Do password auth for all IPv6 clients + sudo sed -i "/^host/s/all\s\+::1\/128\s\+ident/$DATABASE_USER\t::0\/0\tpassword/" $PG_HBA + start_service postgresql + + # If creating the role fails, chances are it already existed. Try to alter it. + sudo -u postgres -i psql -c "CREATE ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" || \ + sudo -u postgres -i psql -c "ALTER ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" +} + +function install_database_postgresql { + echo_summary "Installing postgresql" + PGPASS=$HOME/.pgpass + if [[ ! -e $PGPASS ]]; then + cat < $PGPASS +*:*:*:$DATABASE_USER:$DATABASE_PASSWORD +EOF + chmod 0600 $PGPASS + else + sed -i "s/:root:\w\+/:root:$DATABASE_PASSWORD/" $PGPASS + fi + if [[ "$os_PACKAGE" = "rpm" ]]; then + install_package postgresql-server + else + install_package postgresql + fi +} + +function database_connection_url_postgresql { + local output=$1 + local db=$2 + eval "$output=$BASE_SQL_CONN/$db?client_encoding=utf8" +} + +# Restore xtrace +$XTRACE diff --git a/lib/glance b/lib/glance index 070c80d1..afddcd21 100644 --- a/lib/glance +++ b/lib/glance @@ -81,7 +81,9 @@ function configure_glance() { cp $GLANCE_DIR/etc/glance-registry.conf $GLANCE_REGISTRY_CONF iniset $GLANCE_REGISTRY_CONF DEFAULT debug True inicomment $GLANCE_REGISTRY_CONF DEFAULT log_file - iniset $GLANCE_REGISTRY_CONF DEFAULT sql_connection $BASE_SQL_CONN/glance?charset=utf8 + local dburl + database_connection_url dburl glance + iniset $GLANCE_REGISTRY_CONF DEFAULT sql_connection $dburl iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST @@ -95,7 +97,7 @@ function configure_glance() { cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF iniset $GLANCE_API_CONF DEFAULT debug True inicomment $GLANCE_API_CONF DEFAULT log_file - iniset $GLANCE_API_CONF DEFAULT sql_connection $BASE_SQL_CONN/glance?charset=utf8 + iniset $GLANCE_API_CONF DEFAULT sql_connection $dburl iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_API_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/ iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ @@ -149,8 +151,7 @@ function init_glance() { mkdir -p $GLANCE_CACHE_DIR # (re)create glance database - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS glance;' - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE glance CHARACTER SET utf8;' + recreate_database glance utf8 $GLANCE_BIN_DIR/glance-manage db_sync } diff --git a/lib/heat b/lib/heat index 7fb5fcc5..d1f1c7cf 100644 --- a/lib/heat +++ b/lib/heat @@ -120,7 +120,9 @@ function configure_heat() { iniset $HEAT_ENGINE_CONF DEFAULT use_syslog $SYSLOG iniset $HEAT_ENGINE_CONF DEFAULT bind_host $HEAT_ENGINE_HOST iniset $HEAT_ENGINE_CONF DEFAULT bind_port $HEAT_ENGINE_PORT - iniset $HEAT_ENGINE_CONF DEFAULT sql_connection $BASE_SQL_CONN/heat?charset=utf8 + local dburl + database_connection_url dburl heat + iniset $HEAT_ENGINE_CONF DEFAULT sql_connection $dburl iniset $HEAT_ENGINE_CONF DEFAULT auth_encryption_key `hexdump -n 16 -v -e '/1 "%02x"' /dev/random` if is_service_enabled rabbit; then @@ -185,8 +187,7 @@ function configure_heat() { function init_heat() { # (re)create heat database - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS heat;' - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE heat CHARACTER SET utf8;' + recreate_database heat utf8 $HEAT_DIR/bin/heat-db-setup $os_PACKAGE -r $MYSQL_PASSWORD $HEAT_DIR/tools/nova_create_flavors.sh diff --git a/lib/keystone b/lib/keystone index 73d82c58..ac15cbd0 100644 --- a/lib/keystone +++ b/lib/keystone @@ -82,9 +82,11 @@ function configure_keystone() { fi # Rewrite stock ``keystone.conf`` + local dburl + database_connection_url dburl keystone iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN" iniset $KEYSTONE_CONF signing token_format "$KEYSTONE_TOKEN_FORMAT" - iniset $KEYSTONE_CONF sql connection "$BASE_SQL_CONN/keystone?charset=utf8" + iniset $KEYSTONE_CONF sql connection $dburl iniset $KEYSTONE_CONF ec2 driver "keystone.contrib.ec2.backends.sql.Ec2" sed -e " /^pipeline.*ec2_extension crud_/s|ec2_extension crud_extension|ec2_extension s3_extension crud_extension|; @@ -141,8 +143,7 @@ function configure_keystone() { # init_keystone() - Initialize databases, etc. function init_keystone() { # (Re)create keystone database - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS keystone;' - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE keystone CHARACTER SET utf8;' + recreate_database keystone utf8 # Initialize keystone database $KEYSTONE_DIR/bin/keystone-manage db_sync diff --git a/lib/nova b/lib/nova index 77979276..49971754 100644 --- a/lib/nova +++ b/lib/nova @@ -296,7 +296,9 @@ function create_nova_conf() { add_nova_opt "s3_port=$S3_SERVICE_PORT" add_nova_opt "osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions" add_nova_opt "my_ip=$HOST_IP" - add_nova_opt "sql_connection=$BASE_SQL_CONN/nova?charset=utf8" + local dburl + database_connection_url dburl nova + add_nova_opt "sql_connection=$dburl" add_nova_opt "libvirt_type=$LIBVIRT_TYPE" add_nova_opt "libvirt_cpu_mode=none" add_nova_opt "instance_name_template=${INSTANCE_NAME_PREFIX}%08x" @@ -372,14 +374,12 @@ function init_nova() { # All nova components talk to a central database. We will need to do this step # only once for an entire cluster. - if is_service_enabled mysql && is_service_enabled nova; then + if is_service_enabled $DATABASE_BACKENDS && is_service_enabled nova; then # (Re)create nova database - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS nova;' - # Explicitly use latin1: to avoid lp#829209, nova expects the database to # use latin1 by default, and then upgrades the database to utf8 (see the # 082_essex.py in nova) - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE nova CHARACTER SET latin1;' + recreate_database nova latin1 # (Re)create nova database $NOVA_BIN_DIR/nova-manage db sync diff --git a/stack.sh b/stack.sh index 29e49537..b140385e 100755 --- a/stack.sh +++ b/stack.sh @@ -30,6 +30,8 @@ source $TOP_DIR/functions # and ``DISTRO`` GetDistro +# Import database library (must be loaded before stackrc which sources localrc) +source $TOP_DIR/lib/database # Settings # ======== @@ -37,15 +39,15 @@ GetDistro # ``stack.sh`` is customizable through setting environment variables. If you # want to override a setting you can set and export it:: # -# export MYSQL_PASSWORD=anothersecret +# export DATABASE_PASSWORD=anothersecret # ./stack.sh # -# You can also pass options on a single line ``MYSQL_PASSWORD=simple ./stack.sh`` +# You can also pass options on a single line ``DATABASE_PASSWORD=simple ./stack.sh`` # # Additionally, you can put any local variables into a ``localrc`` file:: # -# MYSQL_PASSWORD=anothersecret -# MYSQL_USER=hellaroot +# DATABASE_PASSWORD=anothersecret +# DATABASE_USER=hellaroot # # We try to have sensible defaults, so you should be able to run ``./stack.sh`` # in most cases. ``localrc`` is not distributed with DevStack and will never @@ -471,23 +473,20 @@ FLAT_INTERFACE=${FLAT_INTERFACE-$GUEST_INTERFACE_DEFAULT} # With Quantum networking the NET_MAN variable is ignored. -# MySQL & (RabbitMQ or Qpid) -# -------------------------- - -# We configure Nova, Horizon, Glance and Keystone to use MySQL as their -# database server. While they share a single server, each has their own -# database and tables. +# Database configuration +# ---------------------- +# To select between database backends, add a line to localrc like: +# +# use_database postgresql +# +# The available database backends are defined in the DATABASE_BACKENDS +# variable defined in stackrc. By default, MySQL is enabled as the database +# backend. -# By default this script will install and configure MySQL. If you want to -# use an existing server, you can pass in the user/password/host parameters. -# You will need to send the same ``MYSQL_PASSWORD`` to every host if you are doing -# a multi-node DevStack installation. -MYSQL_HOST=${MYSQL_HOST:-localhost} -MYSQL_USER=${MYSQL_USER:-root} -read_password MYSQL_PASSWORD "ENTER A PASSWORD TO USE FOR MYSQL." +initialize_database_backends && echo "Using $DATABASE_TYPE database backend" || echo "No database enabled" -# NOTE: Don't specify ``/db`` in this string so we can use it for multiple services -BASE_SQL_CONN=${BASE_SQL_CONN:-mysql://$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST} +# RabbitMQ or Qpid +# -------------------------- # Rabbit connection info if is_service_enabled rabbit; then @@ -746,32 +745,8 @@ elif is_service_enabled zeromq; then fi fi -if is_service_enabled mysql; then - - if [[ "$os_PACKAGE" = "deb" ]]; then - # Seed configuration with mysql password so that apt-get install doesn't - # prompt us for a password upon install. - cat <$HOME/.my.cnf -[client] -user=$MYSQL_USER -password=$MYSQL_PASSWORD -host=$MYSQL_HOST -EOF - chmod 0600 $HOME/.my.cnf - fi - # Install mysql-server - install_package mysql-server +if is_service_enabled $DATABASE_BACKENDS; then + install_database fi if is_service_enabled horizon; then @@ -993,46 +968,10 @@ elif is_service_enabled qpid; then fi -# Mysql -# ----- - -if is_service_enabled mysql; then - echo_summary "Configuring and starting MySQL" - - if [[ "$os_PACKAGE" = "deb" ]]; then - MY_CONF=/etc/mysql/my.cnf - MYSQL=mysql - else - MY_CONF=/etc/my.cnf - MYSQL=mysqld - fi - - # Start mysql-server - if [[ "$os_PACKAGE" = "rpm" ]]; then - # RPM doesn't start the service - start_service $MYSQL - # Set the root password - only works the first time - sudo mysqladmin -u root password $MYSQL_PASSWORD || true - fi - # Update the DB to give user ‘$MYSQL_USER’@’%’ full control of the all databases: - sudo mysql -uroot -p$MYSQL_PASSWORD -h127.0.0.1 -e "GRANT ALL PRIVILEGES ON *.* TO '$MYSQL_USER'@'%' identified by '$MYSQL_PASSWORD';" - - # Now update ``my.cnf`` for some local needs and restart the mysql service - - # Change ‘bind-address’ from localhost (127.0.0.1) to any (0.0.0.0) - sudo sed -i '/^bind-address/s/127.0.0.1/0.0.0.0/g' $MY_CONF - - # Set default db type to InnoDB - if sudo grep -q "default-storage-engine" $MY_CONF; then - # Change it - sudo bash -c "source $TOP_DIR/functions; iniset $MY_CONF mysqld default-storage-engine InnoDB" - else - # Add it - sudo sed -i -e "/^\[mysqld\]/ a \ -default-storage-engine = InnoDB" $MY_CONF - fi - - restart_service $MYSQL +# Configure database +# ------------------ +if is_service_enabled $DATABASE_BACKENDS; then + configure_database fi if [ -z "$SCREEN_HARDSTATUS" ]; then @@ -1283,7 +1222,9 @@ if is_service_enabled quantum; then Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME cp $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE - iniset /$Q_PLUGIN_CONF_FILE DATABASE sql_connection mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/$Q_DB_NAME?charset=utf8 + database_connection_url dburl $Q_DB_NAME + iniset /$Q_PLUGIN_CONF_FILE DATABASE sql_connection $dburl + unset dburl Q_CONF_FILE=/etc/quantum/quantum.conf cp $QUANTUM_DIR/etc/quantum.conf $Q_CONF_FILE @@ -1309,12 +1250,11 @@ if is_service_enabled q-svc; then cp $QUANTUM_DIR/etc/api-paste.ini $Q_API_PASTE_FILE cp $QUANTUM_DIR/etc/policy.json $Q_POLICY_FILE - if is_service_enabled mysql; then - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e "DROP DATABASE IF EXISTS $Q_DB_NAME;" - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e "CREATE DATABASE IF NOT EXISTS $Q_DB_NAME CHARACTER SET utf8;" - else - echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin." - exit 1 + if is_service_enabled $DATABASE_BACKENDS; then + recreate_database $Q_DB_NAME utf8 + else + echo "A database must be enabled in order to use the $Q_PLUGIN Quantum plugin." + exit 1 fi # Update either configuration file with plugin @@ -1974,7 +1914,7 @@ if is_service_enabled q-svc; then fi fi -elif is_service_enabled mysql && is_service_enabled nova; then +elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled nova; then # Create a small network $NOVA_BIN_DIR/nova-manage network create "$PRIVATE_NETWORK_NAME" $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS diff --git a/unstack.sh b/unstack.sh index 42cb7aff..6b34aa3a 100755 --- a/unstack.sh +++ b/unstack.sh @@ -15,6 +15,9 @@ TOP_DIR=$(cd $(dirname "$0") && pwd) # Import common functions source $TOP_DIR/functions +# Import database library +source $TOP_DIR/lib/database + # Load local configuration source $TOP_DIR/stackrc @@ -102,6 +105,10 @@ if [[ -n "$UNSTACK_ALL" ]]; then stop_service mysql fi + if is_service_enabled postgresql; then + stop_service postgresql + fi + # Stop rabbitmq-server if is_service_enabled rabbit; then stop_service rabbitmq-server From 0c49539288ddfc6446f2f198a4b1b76355a4c52b Mon Sep 17 00:00:00 2001 From: dmitriybudnik Date: Sun, 21 Oct 2012 02:00:07 +0300 Subject: [PATCH 744/967] Few changes to get_uec_image.sh: Fixed typo in usage message Fixed usage message to represent actual code Set default minimum image size to 2000MB from 2000*BS fixed bug not creating directory for downoladed files Change-Id: Id736ac7984f88e61b685569b6ba5e9158bea5889 added percise and quantal Change-Id: Id9cb74dded044ec998700c1b456a8077f76c96e8 --- tools/get_uec_image.sh | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tools/get_uec_image.sh b/tools/get_uec_image.sh index ca74a030..156fd439 100755 --- a/tools/get_uec_image.sh +++ b/tools/get_uec_image.sh @@ -5,7 +5,7 @@ # Download and prepare Ubuntu UEC images CACHEDIR=${CACHEDIR:-/opt/stack/cache} -ROOTSIZE=${ROOTSIZE:-2000} +ROOTSIZE=${ROOTSIZE:-2000M} # Keep track of the current directory TOOLS_DIR=$(cd $(dirname "$0") && pwd) @@ -24,7 +24,7 @@ usage() { echo "$0 [-r rootsize] release imagefile [kernel]" echo "" echo "-r size - root fs size (min 2000MB)" - echo "release - Ubuntu release: jaunty - oneric" + echo "release - Ubuntu release: lucid - quantal" echo "imagefile - output image file" echo "kernel - output kernel" exit 1 @@ -64,6 +64,8 @@ IMG_FILE_TMP=`mktemp $IMG_FILE.XXXXXX` KERNEL=$3 case $DIST_NAME in + quantal) ;; + percise) ;; oneiric) ;; natty) ;; maverick) ;; @@ -90,7 +92,7 @@ fi # Get the UEC image UEC_NAME=$DIST_NAME-server-cloudimg-amd64 -if [ ! -d $CACHEDIR ]; then +if [ ! -d $CACHEDIR/$DIST_NAME ]; then mkdir -p $CACHEDIR/$DIST_NAME fi if [ ! -e $CACHEDIR/$DIST_NAME/$UEC_NAME.tar.gz ]; then From 32761a4961857bd7f46b31866edc24ec5ef2f614 Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Mon, 5 Nov 2012 09:57:57 +1300 Subject: [PATCH 745/967] Install and configure python-heatclient. This uses the new REST API. It installs a client executable called 'heat'. The old heat client in the heat repo has been renamed to heat-cfn. Change-Id: I7bc8662c531e3639cc940a44df96ff426ac3aada --- lib/heat | 12 +++++++++++- stack.sh | 2 ++ stackrc | 4 ++++ 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/lib/heat b/lib/heat index d1f1c7cf..efdcfad3 100644 --- a/lib/heat +++ b/lib/heat @@ -23,7 +23,7 @@ set +o xtrace # Defaults # -------- HEAT_DIR=$DEST/heat - +HEATCLIENT_DIR=$DEST/python-heatclient # set up default directories # cleanup_heat() - Remove residual data files, anything left over from previous @@ -33,6 +33,11 @@ function cleanup_heat() { : } +# configure_heatclient() - Set config files, create data dirs, etc +function configure_heatclient() { + setup_develop $HEATCLIENT_DIR +} + # configure_heat() - Set config files, create data dirs, etc function configure_heat() { setup_develop $HEAT_DIR @@ -193,6 +198,11 @@ function init_heat() { $HEAT_DIR/tools/nova_create_flavors.sh } +# install_heatclient() - Collect source and prepare +function install_heatclient() { + git_clone $HEATCLIENT_REPO $HEATCLIENT_DIR $HEATCLIENT_BRANCH +} + # install_heat() - Collect source and prepare function install_heat() { git_clone $HEAT_REPO $HEAT_DIR $HEAT_BRANCH diff --git a/stack.sh b/stack.sh index 1cd1e1a0..4d769496 100755 --- a/stack.sh +++ b/stack.sh @@ -852,6 +852,7 @@ if is_service_enabled quantum; then fi if is_service_enabled heat; then install_heat + install_heatclient fi if is_service_enabled cinder; then install_cinder @@ -906,6 +907,7 @@ if is_service_enabled quantum; then fi if is_service_enabled heat; then configure_heat + configure_heatclient fi if is_service_enabled cinder; then configure_cinder diff --git a/stackrc b/stackrc index 283b2712..5be872ba 100644 --- a/stackrc +++ b/stackrc @@ -101,6 +101,10 @@ TEMPEST_BRANCH=master HEAT_REPO=${GIT_BASE}/heat-api/heat.git HEAT_BRANCH=master +# python heat client library +HEATCLIENT_REPO=${GIT_BASE}/heat-api/python-heatclient.git +HEATCLIENT_BRANCH=master + # ryu service RYU_REPO=https://github.com/osrg/ryu.git RYU_BRANCH=master From 4a2b1c66118c8397930489b9cfb9247e8160b3ca Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Thu, 1 Nov 2012 16:23:52 -0400 Subject: [PATCH 746/967] Finish configuring ceilometer authentication Copy the policy.json file for ceilometer into place and configure the API service to find it. Create a service user for ceilometer when the service is enabled. Use the service user for the admin_user and admin_password in the ceilometer config file so the middleware can verify tokens. Change-Id: I39be13da0c86704d35e0ce3dc3d27fd38d787058 Signed-off-by: Doug Hellmann --- files/keystone_data.sh | 33 ++++++++++++++++++++------------- lib/ceilometer | 13 +++++++++++-- 2 files changed, 31 insertions(+), 15 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 9520b177..3da11bf0 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -2,18 +2,19 @@ # # Initial data for Keystone using python-keystoneclient # -# Tenant User Roles +# Tenant User Roles # ------------------------------------------------------------------ -# admin admin admin -# service glance admin -# service nova admin, [ResellerAdmin (swift only)] -# service quantum admin # if enabled -# service swift admin # if enabled -# service cinder admin # if enabled -# service heat admin # if enabled -# demo admin admin -# demo demo Member, anotherrole -# invisible_to_admin demo Member +# admin admin admin +# service glance admin +# service nova admin, [ResellerAdmin (swift only)] +# service quantum admin # if enabled +# service swift admin # if enabled +# service cinder admin # if enabled +# service heat admin # if enabled +# service ceilometer admin # if enabled +# demo admin admin +# demo demo Member, anotherrole +# invisible_to_admin demo Member # Tempest Only: # alt_demo alt_demo Member # @@ -262,7 +263,14 @@ if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then fi fi -if [[ "$ENABLED_SERVICES" =~ "ceilometer-api" ]]; then +if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]]; then + CEILOMETER_USER=$(get_id keystone user-create --name=ceilometer \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=ceilometer@example.com) + keystone user-role-add --tenant_id $SERVICE_TENANT \ + --user_id $CEILOMETER_USER \ + --role_id $ADMIN_ROLE if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then CEILOMETER_SERVICE=$(get_id keystone service-create \ --name=ceilometer \ @@ -345,4 +353,3 @@ if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then --internalurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" fi fi - diff --git a/lib/ceilometer b/lib/ceilometer index b0f03778..2b014b05 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -6,8 +6,9 @@ # Dependencies: # - functions -# - OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME, OS_AUTH_URL set for admin credentials +# - OS_AUTH_URL for auth in api # - DEST set to the destination directory +# - SERVICE_PASSWORD, SERVICE_TENANT_NAME for auth in api # stack.sh # --------- @@ -61,7 +62,15 @@ function configure_ceilometer() { iniset $CEILOMETER_CONF DEFAULT rabbit_password $RABBIT_PASSWORD iniset $CEILOMETER_CONF DEFAULT sql_connection $BASE_SQL_CONN/nova?charset=utf8 + # Install the policy file for the API server + cp $CEILOMETER_DIR/etc/ceilometer/policy.json $CEILOMETER_CONF_DIR + iniset $CEILOMETER_CONF DEFAULT policy_file $CEILOMETER_CONF_DIR/policy.json + iniset $CEILOMETER_CONF keystone_authtoken auth_protocol http + iniset $CEILOMETER_CONF keystone_authtoken admin_user ceilometer + iniset $CEILOMETER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD + iniset $CEILOMETER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + cleanup_ceilometer } @@ -73,7 +82,7 @@ function install_ceilometer() { # start_ceilometer() - Start running processes, including screen function start_ceilometer() { screen_it ceilometer-acompute "cd $CEILOMETER_DIR && sg libvirtd \"$CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_CONF\"" - screen_it ceilometer-acentral "export OS_USERNAME=$OS_USERNAME OS_PASSWORD=$OS_PASSWORD OS_TENANT_NAME=$OS_TENANT_NAME OS_AUTH_URL=$OS_AUTH_URL && cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-agent-central --config-file $CEILOMETER_CONF" + screen_it ceilometer-acentral "export OS_USERNAME=ceilometer OS_PASSWORD=$SERVICE_PASSWORD OS_TENANT_NAME=$SERVICE_TENANT_NAME OS_AUTH_URL=$OS_AUTH_URL && cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-agent-central --config-file $CEILOMETER_CONF" screen_it ceilometer-collector "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-collector --config-file $CEILOMETER_CONF" screen_it ceilometer-api "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" } From 161e2807a9f530988e21b9b723fb43dbc992496c Mon Sep 17 00:00:00 2001 From: John Griffith Date: Mon, 5 Nov 2012 13:59:49 -0700 Subject: [PATCH 747/967] Update devstack exercises to call cinderclient Exercises use nova volume-* commands still. This works fine, however moving forward now that cinder is the default volume service we should use the cinderclient explicitly for the applicable calls. Attach/Detach are still novaclient commands, however the others (create, delete, list, etc) should be cinderclient. Change-Id: I336de1b69a32eee6c91655b0a5bf8541b243f2f0 --- exercises/boot_from_volume.sh | 12 ++++++------ exercises/volumes.sh | 24 ++++++++++++------------ 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index 460b50cf..b06c8ddb 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -95,7 +95,7 @@ nova keypair-add $KEY_NAME > $KEY_FILE chmod 600 $KEY_FILE # Delete the old volume -nova volume-delete $VOL_NAME || true +cinder delete $VOL_NAME || true # Free every floating ips - setting FREE_ALL_FLOATING_IPS=True in localrc will make life easier for testers if [ "$FREE_ALL_FLOATING_IPS" = "True" ]; then @@ -112,15 +112,15 @@ if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep -q $ fi # Create the bootable volume -nova volume-create --display_name=$VOL_NAME --image-id $IMAGE 1 +cinder create --display_name=$VOL_NAME --image-id $IMAGE 1 # Wait for volume to activate -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep available; do sleep 1; done"; then +if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then echo "Volume $VOL_NAME not created" exit 1 fi -VOLUME_ID=`nova volume-list | grep $VOL_NAME | get_field 1` +VOLUME_ID=`cinder list | grep $VOL_NAME | get_field 1` # Boot instance from volume! This is done with the --block_device_mapping param. # The format of mapping is: @@ -152,13 +152,13 @@ nova delete $VOL_INSTANCE_NAME || \ die "Failure deleting instance volume $VOL_INSTANCE_NAME" # Wait till our volume is no longer in-use -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep available; do sleep 1; done"; then +if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then echo "Volume $VOL_NAME not created" exit 1 fi # Delete the volume -nova volume-delete $VOL_NAME || \ +cinder delete $VOL_NAME || \ die "Failure deleting volume $VOLUME_NAME" # De-allocate the floating ip diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 1c73786e..72c8729e 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -2,7 +2,7 @@ # **volumes.sh** -# Test nova volumes with the nova command from python-novaclient +# Test cinder volumes with the cinder command from python-cinderclient echo "*********************************************************************" echo "Begin DevStack Exercise: $0" @@ -131,28 +131,28 @@ ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT VOL_NAME="myvol-$(openssl rand -hex 4)" # Verify it doesn't exist -if [[ -n "`nova volume-list | grep $VOL_NAME | head -1 | get_field 2`" ]]; then +if [[ -n "`cinder list | grep $VOL_NAME | head -1 | get_field 2`" ]]; then echo "Volume $VOL_NAME already exists" exit 1 fi # Create a new volume -nova volume-create --display_name $VOL_NAME --display_description "test volume: $VOL_NAME" 1 +cinder create --display_name $VOL_NAME --display_description "test volume: $VOL_NAME" 1 if [[ $? != 0 ]]; then echo "Failure creating volume $VOL_NAME" exit 1 fi start_time=`date +%s` -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep available; do sleep 1; done"; then +if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then echo "Volume $VOL_NAME not created" exit 1 fi end_time=`date +%s` -echo "Completed volume-create in $((end_time - start_time)) seconds" +echo "Completed cinder create in $((end_time - start_time)) seconds" # Get volume ID -VOL_ID=`nova volume-list | grep $VOL_NAME | head -1 | get_field 1` +VOL_ID=`cinder list | grep $VOL_NAME | head -1 | get_field 1` die_if_not_set VOL_ID "Failure retrieving volume ID for $VOL_NAME" # Attach to server @@ -160,14 +160,14 @@ DEVICE=/dev/vdb start_time=`date +%s` nova volume-attach $VM_UUID $VOL_ID $DEVICE || \ die "Failure attaching volume $VOL_NAME to $NAME" -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep in-use; do sleep 1; done"; then +if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep in-use; do sleep 1; done"; then echo "Volume $VOL_NAME not attached to $NAME" exit 1 fi end_time=`date +%s` echo "Completed volume-attach in $((end_time - start_time)) seconds" -VOL_ATTACH=`nova volume-list | grep $VOL_NAME | head -1 | get_field -1` +VOL_ATTACH=`cinder list | grep $VOL_NAME | head -1 | get_field -1` die_if_not_set VOL_ATTACH "Failure retrieving $VOL_NAME status" if [[ "$VOL_ATTACH" != $VM_UUID ]]; then echo "Volume not attached to correct instance" @@ -177,7 +177,7 @@ fi # Detach volume start_time=`date +%s` nova volume-detach $VM_UUID $VOL_ID || die "Failure detaching volume $VOL_NAME from $NAME" -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep available; do sleep 1; done"; then +if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then echo "Volume $VOL_NAME not detached from $NAME" exit 1 fi @@ -186,13 +186,13 @@ echo "Completed volume-detach in $((end_time - start_time)) seconds" # Delete volume start_time=`date +%s` -nova volume-delete $VOL_ID || die "Failure deleting volume $VOL_NAME" -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME; do sleep 1; done"; then +cinder delete $VOL_ID || die "Failure deleting volume $VOL_NAME" +if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME; do sleep 1; done"; then echo "Volume $VOL_NAME not deleted" exit 1 fi end_time=`date +%s` -echo "Completed volume-delete in $((end_time - start_time)) seconds" +echo "Completed cinder delete in $((end_time - start_time)) seconds" # Shutdown the server nova delete $VM_UUID || die "Failure deleting instance $NAME" From 3edca54923495cf48186808dd2ed788315c29126 Mon Sep 17 00:00:00 2001 From: Dmitriy Budnik Date: Tue, 6 Nov 2012 09:58:24 +0200 Subject: [PATCH 748/967] Fixed path for calling build_uec_ramdisk.sh from build_usb_boot.sh Bug desc: build_usb_boot.sh is unable to find build_uec_ramdisk.sh while being called from tools directory. Variable with correct path wasn't been used at all. Change-Id: I5ddff3d587a495e6768163f6b282e5b5f7a40849 --- tools/build_usb_boot.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tools/build_usb_boot.sh b/tools/build_usb_boot.sh index f64b7b68..85662298 100755 --- a/tools/build_usb_boot.sh +++ b/tools/build_usb_boot.sh @@ -11,7 +11,6 @@ DEST_DIR=${1:-/tmp/syslinux-boot} PXEDIR=${PXEDIR:-/opt/ramstack/pxe} -PROGDIR=`dirname $0` # Clean up any resources that may be in use cleanup() { @@ -81,7 +80,7 @@ fi # Get image into place if [ ! -r $PXEDIR/stack-initrd.img ]; then cd $TOP_DIR - $PROGDIR/build_uec_ramdisk.sh $PXEDIR/stack-initrd.img + $TOOLS_DIR/build_uec_ramdisk.sh $PXEDIR/stack-initrd.img fi if [ ! -r $PXEDIR/stack-initrd.gz ]; then gzip -1 -c $PXEDIR/stack-initrd.img >$PXEDIR/stack-initrd.gz From 213c416878eaf42aae3a0e4343c220a72951d14c Mon Sep 17 00:00:00 2001 From: "James E. Blair" Date: Tue, 6 Nov 2012 09:38:36 +0100 Subject: [PATCH 749/967] Add CINDER_SECURE_DELETE flag. Defaults to true which means no change in behavoir. Adds option to disable secure delete as in https://review.openstack.org/15477. Change-Id: I2b19ed4223460b550026ad4975080c0079c8b433 --- lib/cinder | 4 ++++ stack.sh | 5 +++++ 2 files changed, 9 insertions(+) diff --git a/lib/cinder b/lib/cinder index 81bfbfe3..d24212d7 100644 --- a/lib/cinder +++ b/lib/cinder @@ -134,6 +134,10 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT rabbit_password $RABBIT_PASSWORD fi + if [[ "$CINDER_SECURE_DELETE" == "False" ]]; then + iniset $CINDER_CONF DEFAULT secure_delete False + fi + if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then # Add color to logging output iniset $CINDER_CONF DEFAULT logging_context_format_string "%(asctime)s %(color)s%(levelname)s %(name)s [%(request_id)s %(user_id)s %(project_id)s%(color)s] %(instance)s%(color)s%(message)s" diff --git a/stack.sh b/stack.sh index 1cd1e1a0..366d752a 100755 --- a/stack.sh +++ b/stack.sh @@ -356,6 +356,11 @@ RYU_OFP_PORT=${RYU_OFP_PORT:-6633} # Ryu Applications RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest} +# Should cinder perform secure deletion of volumes? +# Defaults to true, can be set to False to avoid this bug when testing: +# https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1023755 +CINDER_SECURE_DELETE=`trueorfalse True $CINDER_SECURE_DELETE` + # Name of the LVM volume group to use/create for iscsi volumes VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} From 8ec719b4d43a2c234442256df1ade62cc26755d6 Mon Sep 17 00:00:00 2001 From: Aaron Rosen Date: Tue, 30 Oct 2012 12:57:47 -0700 Subject: [PATCH 750/967] nova-manage network commands only when n-net enabled This patch checks if n-net is enabled rather than any nova service before creating networks through nova. Fixes bug 1073313 Change-Id: I8810737ddbb26a5e281060f5395cfad5d186b6d3 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 366d752a..7df32da1 100755 --- a/stack.sh +++ b/stack.sh @@ -1916,7 +1916,7 @@ if is_service_enabled q-svc; then fi fi -elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled nova; then +elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then # Create a small network $NOVA_BIN_DIR/nova-manage network create "$PRIVATE_NETWORK_NAME" $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS From 4d3049e7cd8571aac26bd9aec6cfda9b6933433d Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 6 Nov 2012 20:38:14 -0600 Subject: [PATCH 751/967] Configure Cinder for syslog if enabled Change-Id: I3d2770cd2c3e3b93c0f26de359e5b0cb166826c1 --- lib/cinder | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/cinder b/lib/cinder index d24212d7..c2cf15bf 100644 --- a/lib/cinder +++ b/lib/cinder @@ -125,6 +125,10 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT osapi_volume_extension cinder.api.openstack.volume.contrib.standard_extensions iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH + if [ "$SYSLOG" != "False" ]; then + iniset $CINDER_CONF DEFAULT use_syslog True + fi + if is_service_enabled qpid ; then iniset $CINDER_CONF DEFAULT rpc_backend cinder.openstack.common.rpc.impl_qpid elif is_service_enabled zeromq; then From 3a87eddc7fb83c5cdbbc231f0fd2a94e92d958d7 Mon Sep 17 00:00:00 2001 From: Maru Newby Date: Thu, 25 Oct 2012 23:01:06 +0000 Subject: [PATCH 752/967] Added options to improve offline usage. * NOVA_ZIPBALL_URL can be set for a XS/XCP install to point to a non-standard zipball location. * PIP_USE_MIRRORS (default True) can be set to False to stop pip from attempting to use mirrors (useful for when using an offline eggproxy). Change-Id: I01a9944fe4c13c8cc8323a703f598bc498491b73 --- functions | 9 +++++++-- tools/xen/install_os_domU.sh | 4 ++-- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/functions b/functions index dbe9d30a..80085004 100644 --- a/functions +++ b/functions @@ -7,6 +7,7 @@ # ``GLANCE_HOSTPORT`` # ``OFFLINE`` # ``PIP_DOWNLOAD_CACHE`` +# ``PIP_USE_MIRRORS`` # ``RECLONE`` # ``TRACK_DEPENDS`` # ``http_proxy``, ``https_proxy``, ``no_proxy`` @@ -578,7 +579,8 @@ function is_set() { # Wrapper for ``pip install`` to set cache and proxy environment variables -# Uses globals ``OFFLINE``, ``PIP_DOWNLOAD_CACHE``, ``TRACK_DEPENDES``, ``*_proxy` +# Uses globals ``OFFLINE``, ``PIP_DOWNLOAD_CACHE``, ``PIP_USE_MIRRORS``, +# ``TRACK_DEPENDS``, ``*_proxy` # pip_install package [package ...] function pip_install { [[ "$OFFLINE" = "True" || -z "$@" ]] && return @@ -597,11 +599,14 @@ function pip_install { CMD_PIP=/usr/bin/pip-python fi fi + if [[ "$PIP_USE_MIRRORS" != "False" ]]; then + PIP_MIRROR_OPT="--use-mirrors" + fi $SUDO_PIP PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \ HTTP_PROXY=$http_proxy \ HTTPS_PROXY=$https_proxy \ NO_PROXY=$no_proxy \ - $CMD_PIP install --use-mirrors $@ + $CMD_PIP install $PIP_MIRROR_OPT $@ } diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 0bb6ac8a..c78c6f2e 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -57,8 +57,8 @@ then fi # get nova -nova_zipball=$(echo $NOVA_REPO | sed "s:\.git$::;s:$:/zipball/$NOVA_BRANCH:g") -wget $nova_zipball -O nova-zipball --no-check-certificate +NOVA_ZIPBALL_URL=${NOVA_ZIPBALL_URL:-$(echo $NOVA_REPO | sed "s:\.git$::;s:$:/zipball/$NOVA_BRANCH:g")} +wget $NOVA_ZIPBALL_URL -O nova-zipball --no-check-certificate unzip -o nova-zipball -d ./nova # install xapi plugins From b9182d65ccf2e560975c2bc0b64d143ac8f29e54 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 7 Nov 2012 12:31:34 -0600 Subject: [PATCH 753/967] Documentation and formatting cleanup * No functional changes to see here Change-Id: I68b7abb3a30effc9e76c6e7deb1e3c6ef140f6bb --- stack.sh | 73 +++++++++++++++++++++++++++++++++----------------------- 1 file changed, 43 insertions(+), 30 deletions(-) diff --git a/stack.sh b/stack.sh index 7df32da1..04037e81 100755 --- a/stack.sh +++ b/stack.sh @@ -12,13 +12,12 @@ # developer install. # To keep this script simple we assume you are running on a recent **Ubuntu** -# (11.10 Oneiric or 12.04 Precise) or **Fedora** (F16 or F17) machine. It +# (11.10 Oneiric or newer) or **Fedora** (F16 or newer) machine. It # should work in a VM or physical server. Additionally we put the list of # ``apt`` and ``rpm`` dependencies and other configuration files in this repo. # Learn more and get the most recent version at http://devstack.org - # Keep track of the devstack directory TOP_DIR=$(cd $(dirname "$0") && pwd) @@ -33,6 +32,7 @@ GetDistro # Import database library (must be loaded before stackrc which sources localrc) source $TOP_DIR/lib/database + # Settings # ======== @@ -107,9 +107,8 @@ if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|f16|f17) ]]; then fi fi -# Disallow qpid on oneiric +# Qpid was introduced to Ubuntu in precise, disallow it on oneiric if [ "${DISTRO}" = "oneiric" ] && is_service_enabled qpid ; then - # Qpid was introduced in precise echo "You must use Ubuntu Precise or newer for Qpid support." exit 1 fi @@ -453,14 +452,16 @@ MULTI_HOST=`trueorfalse False $MULTI_HOST` # fail. # # If you are running on a single node and don't need to access the VMs from -# devices other than that node, you can set FLAT_INTERFACE= -# This will stop nova from bridging any interfaces into FLAT_NETWORK_BRIDGE. +# devices other than that node, you can set ``FLAT_INTERFACE=`` +# This will stop nova from bridging any interfaces into ``FLAT_NETWORK_BRIDGE``. FLAT_INTERFACE=${FLAT_INTERFACE-$GUEST_INTERFACE_DEFAULT} ## FIXME(ja): should/can we check that FLAT_INTERFACE is sane? -# Using Quantum networking: -# + +# Quantum Networking +# ------------------ + # Make sure that quantum is enabled in ENABLED_SERVICES. If you want # to run Quantum on this host, make sure that q-svc is also in # ENABLED_SERVICES. @@ -478,18 +479,20 @@ FLAT_INTERFACE=${FLAT_INTERFACE-$GUEST_INTERFACE_DEFAULT} # With Quantum networking the NET_MAN variable is ignored. -# Database configuration +# Database Configuration # ---------------------- + # To select between database backends, add a line to localrc like: # # use_database postgresql # -# The available database backends are defined in the DATABASE_BACKENDS +# The available database backends are defined in the ``DATABASE_BACKENDS`` # variable defined in stackrc. By default, MySQL is enabled as the database # backend. initialize_database_backends && echo "Using $DATABASE_TYPE database backend" || echo "No database enabled" + # RabbitMQ or Qpid # -------------------------- @@ -541,7 +544,7 @@ if is_service_enabled swift; then S3_SERVICE_PORT=${S3_SERVICE_PORT:-8080} fi # We only ask for Swift Hash if we have enabled swift service. - # SWIFT_HASH is a random unique string for a swift cluster that + # ``SWIFT_HASH`` is a random unique string for a swift cluster that # can never change. read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH." fi @@ -556,7 +559,7 @@ S3_SERVICE_PORT=${S3_SERVICE_PORT:-3333} # The ``SERVICE_TOKEN`` is used to bootstrap the Keystone database. It is # just a string and is not a 'real' Keystone token. read_password SERVICE_TOKEN "ENTER A SERVICE_TOKEN TO USE FOR THE SERVICE ADMIN TOKEN." -# Services authenticate to Identity with servicename/SERVICE_PASSWORD +# Services authenticate to Identity with servicename/``SERVICE_PASSWORD`` read_password SERVICE_PASSWORD "ENTER A SERVICE_PASSWORD TO USE FOR THE SERVICE AUTHENTICATION." # Horizon currently truncates usernames and passwords at 20 characters read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (20 CHARS OR LESS)." @@ -565,7 +568,6 @@ read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE ( SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service} - # Horizon # ------- @@ -579,10 +581,9 @@ APACHE_GROUP=${APACHE_GROUP:-$APACHE_USER} # --------- # Draw a spinner so the user knows something is happening -function spinner() -{ +function spinner() { local delay=0.75 - local spinstr='|/-\' + local spinstr='/-\|' printf "..." >&3 while [ true ]; do local temp=${spinstr#?} @@ -637,6 +638,7 @@ if [[ -n "$LOGFILE" ]]; then SUMFILE=$LOGFILE.${CURRENT_LOG_TIME}.summary # Redirect output according to config + # Copy stdout to fd 3 exec 3>&1 if [[ "$VERBOSE" == "True" ]]; then @@ -767,7 +769,7 @@ fi if is_service_enabled q-agt; then if is_quantum_ovs_base_plugin "$Q_PLUGIN"; then # Install deps - # FIXME add to files/apts/quantum, but don't install if not needed! + # FIXME add to ``files/apts/quantum``, but don't install if not needed! if [[ "$os_PACKAGE" = "deb" ]]; then kernel_version=`cat /proc/version | cut -d " " -f3` install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version @@ -810,6 +812,7 @@ pip_install $(get_packages $FILES/pips | sort -u) echo_summary "Installing OpenStack project source" +# Grab clients first install_keystoneclient install_glanceclient install_novaclient @@ -871,6 +874,7 @@ if is_service_enabled ryu || (is_service_enabled quantum && [[ "$Q_PLUGIN" = "ry git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH fi + # Initialization # ============== @@ -972,10 +976,15 @@ fi # Configure database # ------------------ + if is_service_enabled $DATABASE_BACKENDS; then configure_database fi + +# Configure screen +# ---------------- + if [ -z "$SCREEN_HARDSTATUS" ]; then SCREEN_HARDSTATUS='%{= .} %-Lw%{= .}%> %n%f %t*%{= .}%+Lw%< %-=%{g}(%{d}%H/%l%{g})' fi @@ -985,9 +994,11 @@ SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc if [[ -e $SCREENRC ]]; then echo -n > $SCREENRC fi + # Create a new named screen to run processes in screen -d -m -S $SCREEN_NAME -t shell -s /bin/bash sleep 1 + # Set a reasonable status bar screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS" @@ -1097,6 +1108,7 @@ fi # Ryu # --- + # Ryu is not a part of OpenStack project. Please ignore following block if # you are not interested in Ryu. # launch ryu manager @@ -1123,11 +1135,10 @@ fi # Quantum # ------- +# Quantum Network Configuration if is_service_enabled quantum; then echo_summary "Configuring Quantum" - # - # Quantum Network Configuration - # + # The following variables control the Quantum openvswitch and # linuxbridge plugins' allocation of tenant networks and # availability of provider networks. If these are not configured @@ -1155,7 +1166,7 @@ if is_service_enabled quantum; then # allocated. An external network switch must be configured to # trunk these VLANs between hosts for multi-host connectivity. # - # Example: TENANT_VLAN_RANGE=1000:1999 + # Example: ``TENANT_VLAN_RANGE=1000:1999`` TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} # If using VLANs for tenant networks, or if using flat or VLAN @@ -1164,7 +1175,7 @@ if is_service_enabled quantum; then # openvswitch agent or LB_PHYSICAL_INTERFACE for the linuxbridge # agent, as described below. # - # Example: PHYSICAL_NETWORK=default + # Example: ``PHYSICAL_NETWORK=default`` PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-} # With the openvswitch plugin, if using VLANs for tenant networks, @@ -1174,7 +1185,7 @@ if is_service_enabled quantum; then # physical interface must be manually added to the bridge as a # port for external connectivity. # - # Example: OVS_PHYSICAL_BRIDGE=br-eth1 + # Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-} # With the linuxbridge plugin, if using VLANs for tenant networks, @@ -1182,13 +1193,13 @@ if is_service_enabled quantum; then # the name of the network interface to use for the physical # network. # - # Example: LB_PHYSICAL_INTERFACE=eth1 + # Example: ``LB_PHYSICAL_INTERFACE=eth1`` LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-} # With the openvswitch plugin, set to True in localrc to enable - # provider GRE tunnels when ENABLE_TENANT_TUNNELS is False. + # provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False. # - # Example: OVS_ENABLE_TUNNELING=True + # Example: ``OVS_ENABLE_TUNNELING=True`` OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS} # Put config files in ``/etc/quantum`` for everyone to find @@ -1276,7 +1287,7 @@ if is_service_enabled q-svc; then echo "WARNING - The openvswitch plugin is using local tenant networks, with no connectivity between hosts." fi - # Override OVS_VLAN_RANGES and OVS_BRIDGE_MAPPINGS in localrc + # Override ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` # for more complex physical network configurations. if [[ "$OVS_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then OVS_VLAN_RANGES=$PHYSICAL_NETWORK @@ -1299,7 +1310,7 @@ if is_service_enabled q-svc; then echo "WARNING - The linuxbridge plugin is using local tenant networks, with no connectivity between hosts." fi - # Override LB_VLAN_RANGES and LB_INTERFACE_MAPPINGS in localrc + # Override ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` # for more complex physical network configurations. if [[ "$LB_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then LB_VLAN_RANGES=$PHYSICAL_NETWORK @@ -1339,7 +1350,7 @@ if is_service_enabled q-agt; then fi # Setup physical network bridge mappings. Override - # OVS_VLAN_RANGES and OVS_BRIDGE_MAPPINGS in localrc for more + # ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more # complex physical network configurations. if [[ "$OVS_BRIDGE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE @@ -1353,7 +1364,7 @@ if is_service_enabled q-agt; then AGENT_BINARY="$QUANTUM_DIR/bin/quantum-openvswitch-agent" elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then # Setup physical network interface mappings. Override - # LB_VLAN_RANGES and LB_INTERFACE_MAPPINGS in localrc for more + # ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` for more # complex physical network configurations. if [[ "$LB_INTERFACE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$LB_PHYSICAL_INTERFACE" != "" ]]; then LB_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$LB_PHYSICAL_INTERFACE @@ -1454,6 +1465,7 @@ if is_service_enabled quantum; then fi fi + # Nova # ---- @@ -1807,6 +1819,7 @@ elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then fi add_nova_opt "glance_api_servers=$GLANCE_HOSTPORT" + # XenServer # --------- From 5233064713b94ca56137bbcee6f8d5c2cbbd48e3 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Wed, 7 Nov 2012 15:00:01 -0500 Subject: [PATCH 754/967] Add multiple database support to configure_tempest After support for multiple databases were added to devstack, configure tempest would not work with update_database() in localrc. This sources lib/database before localrc is loaded so that update_database is defined. Also, COMPUTE_DB_URI is now replaced by BASE_SQL_CONN from lib/database. Change-Id: Ifd791e5732898a661cc1237839bd18cef2f36f60 Signed-off-by: Matthew Treinish --- tools/configure_tempest.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index 1e35036b..070bc0bd 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -47,6 +47,7 @@ if [ ! -e $TOP_DIR/openrc ]; then fi # Source params +source $TOP_DIR/lib/database source $TOP_DIR/openrc # Where Openstack code lives @@ -186,7 +187,7 @@ COMPUTE_CONFIG_PATH=/etc/nova/nova.conf # TODO(jaypipes): Create the key file here... right now, no whitebox # tests actually use a key. COMPUTE_PATH_TO_PRIVATE_KEY=$TEMPEST_DIR/id_rsa -COMPUTE_DB_URI=mysql://root:$MYSQL_PASSWORD@localhost/nova +COMPUTE_DB_URI=$BASE_SQL_CONN/nova # Image test configuration options... IMAGE_HOST=${IMAGE_HOST:-127.0.0.1} From 6931c137b9f214a2ac3bdda42904952f9aa8b7ca Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 7 Nov 2012 16:51:21 -0600 Subject: [PATCH 755/967] Fix spacing issue in ssh_check() Change-Id: Ia5bd7b678b86f2e3e3b1fa04e628096feb1ace81 --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index 0da8299d..c0c9197a 100644 --- a/functions +++ b/functions @@ -894,7 +894,7 @@ function ssh_check() { local FLOATING_IP=$3 local DEFAULT_INSTANCE_USER=$4 local ACTIVE_TIMEOUT=$5 - local probe_cmd = "" + local probe_cmd="" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! ssh -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP echo success ; do sleep 1; done"; then echo "server didn't become ssh-able!" exit 1 From ba05621c6ffa1adb9fe2772a2e5d0ea94043950c Mon Sep 17 00:00:00 2001 From: Tomoe Sugihara Date: Wed, 7 Nov 2012 20:10:57 +0900 Subject: [PATCH 756/967] Add iputils-arping package for quantum l3 agent. This is for Ubuntu only since arping command is packeged in iputils on Fedora, which is already in files/rpms/quantum. Change-Id: I8318929fb35a12e0fb7a6a148d8ef0554cf3bc30 Signed-off-by: Tomoe Sugihara --- files/apts/quantum | 1 + 1 file changed, 1 insertion(+) diff --git a/files/apts/quantum b/files/apts/quantum index 39f45618..64fc1bfb 100644 --- a/files/apts/quantum +++ b/files/apts/quantum @@ -1,6 +1,7 @@ ebtables iptables iputils-ping +iputils-arping mysql-server #NOPRIME sudo python-boto From f993b2353fdf3fc643afa78df9b64af446352220 Mon Sep 17 00:00:00 2001 From: Stef T Date: Thu, 8 Nov 2012 10:46:48 -0500 Subject: [PATCH 757/967] Enable Xen/DevStackDomU to have larger disk * Size of xvda can be specified via xenrc * Fixes bug 1076430 Change-Id: Ia4ffef98b01fa9572e43c46275a132b2b1e5f689 --- tools/xen/scripts/install_ubuntu_template.sh | 2 ++ tools/xen/xenrc | 1 + 2 files changed, 3 insertions(+) diff --git a/tools/xen/scripts/install_ubuntu_template.sh b/tools/xen/scripts/install_ubuntu_template.sh index f67547b0..43b6decd 100755 --- a/tools/xen/scripts/install_ubuntu_template.sh +++ b/tools/xen/scripts/install_ubuntu_template.sh @@ -45,6 +45,7 @@ fi # Clone built-in template to create new template new_uuid=$(xe vm-clone uuid=$builtin_uuid \ new-name-label="$UBUNTU_INST_TEMPLATE_NAME") +disk_size=$(($OSDOMU_VDI_GB * 1024 * 1024 * 1024)) # Some of these settings can be found in example preseed files # however these need to be answered before the netinstall @@ -73,6 +74,7 @@ xe template-param-set uuid=$new_uuid \ PV-args="$pvargs" \ other-config:debian-release="$UBUNTU_INST_RELEASE" \ other-config:default_template=true \ + other-config:disks='' \ other-config:install-arch="$UBUNTU_INST_ARCH" echo "Ubuntu template installed uuid:$new_uuid" diff --git a/tools/xen/xenrc b/tools/xen/xenrc index 0365a25e..1a5a2a93 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -11,6 +11,7 @@ GUEST_NAME=${GUEST_NAME:-DevStackOSDomU} # Size of image VDI_MB=${VDI_MB:-5000} OSDOMU_MEM_MB=1024 +OSDOMU_VDI_GB=8 # VM Password GUEST_PASSWORD=${GUEST_PASSWORD:-secrete} From 86a796941e3a6b29c95581dcaba45bf16f9e9fe2 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 22 Oct 2012 15:24:46 -0500 Subject: [PATCH 758/967] Clean up operation with no Nova services enabled Change-Id: Ib9054bacc34e923c05f0bc699afd514eaa3cad01 --- stack.sh | 178 +++++++++++++++++++++++++++---------------------------- 1 file changed, 89 insertions(+), 89 deletions(-) diff --git a/stack.sh b/stack.sh index 04037e81..084f276d 100755 --- a/stack.sh +++ b/stack.sh @@ -1757,98 +1757,98 @@ if is_service_enabled nova; then # Rebuild the config file from scratch create_nova_conf init_nova -fi - -# Additional Nova configuration that is dependent on other services -if is_service_enabled quantum; then - add_nova_opt "network_api_class=nova.network.quantumv2.api.API" - add_nova_opt "quantum_admin_username=$Q_ADMIN_USERNAME" - add_nova_opt "quantum_admin_password=$SERVICE_PASSWORD" - add_nova_opt "quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0" - add_nova_opt "quantum_auth_strategy=$Q_AUTH_STRATEGY" - add_nova_opt "quantum_admin_tenant_name=$SERVICE_TENANT_NAME" - add_nova_opt "quantum_url=http://$Q_HOST:$Q_PORT" - if [[ "$Q_PLUGIN" = "openvswitch" ]]; then - NOVA_VIF_DRIVER="nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver" - elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then - NOVA_VIF_DRIVER="nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver" - elif [[ "$Q_PLUGIN" = "ryu" ]]; then - NOVA_VIF_DRIVER="quantum.plugins.ryu.nova.vif.LibvirtOpenVswitchOFPRyuDriver" - add_nova_opt "libvirt_ovs_integration_bridge=$OVS_BRIDGE" - add_nova_opt "linuxnet_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT" - add_nova_opt "libvirt_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT" + # Additional Nova configuration that is dependent on other services + if is_service_enabled quantum; then + add_nova_opt "network_api_class=nova.network.quantumv2.api.API" + add_nova_opt "quantum_admin_username=$Q_ADMIN_USERNAME" + add_nova_opt "quantum_admin_password=$SERVICE_PASSWORD" + add_nova_opt "quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0" + add_nova_opt "quantum_auth_strategy=$Q_AUTH_STRATEGY" + add_nova_opt "quantum_admin_tenant_name=$SERVICE_TENANT_NAME" + add_nova_opt "quantum_url=http://$Q_HOST:$Q_PORT" + + if [[ "$Q_PLUGIN" = "openvswitch" ]]; then + NOVA_VIF_DRIVER="nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver" + elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then + NOVA_VIF_DRIVER="nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver" + elif [[ "$Q_PLUGIN" = "ryu" ]]; then + NOVA_VIF_DRIVER="quantum.plugins.ryu.nova.vif.LibvirtOpenVswitchOFPRyuDriver" + add_nova_opt "libvirt_ovs_integration_bridge=$OVS_BRIDGE" + add_nova_opt "linuxnet_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT" + add_nova_opt "libvirt_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT" + fi + add_nova_opt "libvirt_vif_driver=$NOVA_VIF_DRIVER" + add_nova_opt "linuxnet_interface_driver=$LINUXNET_VIF_DRIVER" + elif is_service_enabled n-net; then + add_nova_opt "network_manager=nova.network.manager.$NET_MAN" + add_nova_opt "public_interface=$PUBLIC_INTERFACE" + add_nova_opt "vlan_interface=$VLAN_INTERFACE" + add_nova_opt "flat_network_bridge=$FLAT_NETWORK_BRIDGE" + if [ -n "$FLAT_INTERFACE" ]; then + add_nova_opt "flat_interface=$FLAT_INTERFACE" + fi fi - add_nova_opt "libvirt_vif_driver=$NOVA_VIF_DRIVER" - add_nova_opt "linuxnet_interface_driver=$LINUXNET_VIF_DRIVER" -else - add_nova_opt "network_manager=nova.network.manager.$NET_MAN" - add_nova_opt "public_interface=$PUBLIC_INTERFACE" - add_nova_opt "vlan_interface=$VLAN_INTERFACE" - add_nova_opt "flat_network_bridge=$FLAT_NETWORK_BRIDGE" - if [ -n "$FLAT_INTERFACE" ]; then - add_nova_opt "flat_interface=$FLAT_INTERFACE" + # All nova-compute workers need to know the vnc configuration options + # These settings don't hurt anything if n-xvnc and n-novnc are disabled + if is_service_enabled n-cpu; then + NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"} + add_nova_opt "novncproxy_base_url=$NOVNCPROXY_URL" + XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"} + add_nova_opt "xvpvncproxy_base_url=$XVPVNCPROXY_URL" + fi + if [ "$VIRT_DRIVER" = 'xenserver' ]; then + VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1} + else + VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1} + fi + # Address on which instance vncservers will listen on compute hosts. + # For multi-host, this should be the management ip of the compute host. + VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1} + add_nova_opt "vncserver_listen=$VNCSERVER_LISTEN" + add_nova_opt "vncserver_proxyclient_address=$VNCSERVER_PROXYCLIENT_ADDRESS" + add_nova_opt "ec2_dmz_host=$EC2_DMZ_HOST" + if is_service_enabled zeromq; then + add_nova_opt "rpc_backend=nova.openstack.common.rpc.impl_zmq" + elif is_service_enabled qpid; then + add_nova_opt "rpc_backend=nova.rpc.impl_qpid" + elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then + add_nova_opt "rabbit_host=$RABBIT_HOST" + add_nova_opt "rabbit_password=$RABBIT_PASSWORD" + fi + add_nova_opt "glance_api_servers=$GLANCE_HOSTPORT" + + + # XenServer + # --------- + + if [ "$VIRT_DRIVER" = 'xenserver' ]; then + echo_summary "Using XenServer virtualization driver" + read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN." + add_nova_opt "compute_driver=xenapi.XenAPIDriver" + XENAPI_CONNECTION_URL=${XENAPI_CONNECTION_URL:-"http://169.254.0.1"} + XENAPI_USER=${XENAPI_USER:-"root"} + add_nova_opt "xenapi_connection_url=$XENAPI_CONNECTION_URL" + add_nova_opt "xenapi_connection_username=$XENAPI_USER" + add_nova_opt "xenapi_connection_password=$XENAPI_PASSWORD" + add_nova_opt "flat_injected=False" + # Need to avoid crash due to new firewall support + XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"} + add_nova_opt "firewall_driver=$XEN_FIREWALL_DRIVER" + elif [ "$VIRT_DRIVER" = 'openvz' ]; then + echo_summary "Using OpenVZ virtualization driver" + # TODO(deva): OpenVZ driver does not yet work if compute_driver is set here. + # Replace connection_type when this is fixed. + # add_nova_opt "compute_driver=openvz.connection.OpenVzConnection" + add_nova_opt "connection_type=openvz" + LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} + add_nova_opt "firewall_driver=$LIBVIRT_FIREWALL_DRIVER" + else + echo_summary "Using libvirt virtualization driver" + add_nova_opt "compute_driver=libvirt.LibvirtDriver" + LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} + add_nova_opt "firewall_driver=$LIBVIRT_FIREWALL_DRIVER" fi -fi -# All nova-compute workers need to know the vnc configuration options -# These settings don't hurt anything if n-xvnc and n-novnc are disabled -if is_service_enabled n-cpu; then - NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"} - add_nova_opt "novncproxy_base_url=$NOVNCPROXY_URL" - XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"} - add_nova_opt "xvpvncproxy_base_url=$XVPVNCPROXY_URL" -fi -if [ "$VIRT_DRIVER" = 'xenserver' ]; then - VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1} -else - VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1} -fi -# Address on which instance vncservers will listen on compute hosts. -# For multi-host, this should be the management ip of the compute host. -VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1} -add_nova_opt "vncserver_listen=$VNCSERVER_LISTEN" -add_nova_opt "vncserver_proxyclient_address=$VNCSERVER_PROXYCLIENT_ADDRESS" -add_nova_opt "ec2_dmz_host=$EC2_DMZ_HOST" -if is_service_enabled zeromq; then - add_nova_opt "rpc_backend=nova.openstack.common.rpc.impl_zmq" -elif is_service_enabled qpid; then - add_nova_opt "rpc_backend=nova.rpc.impl_qpid" -elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then - add_nova_opt "rabbit_host=$RABBIT_HOST" - add_nova_opt "rabbit_password=$RABBIT_PASSWORD" -fi -add_nova_opt "glance_api_servers=$GLANCE_HOSTPORT" - - -# XenServer -# --------- - -if [ "$VIRT_DRIVER" = 'xenserver' ]; then - echo_summary "Using XenServer virtualization driver" - read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN." - add_nova_opt "compute_driver=xenapi.XenAPIDriver" - XENAPI_CONNECTION_URL=${XENAPI_CONNECTION_URL:-"http://169.254.0.1"} - XENAPI_USER=${XENAPI_USER:-"root"} - add_nova_opt "xenapi_connection_url=$XENAPI_CONNECTION_URL" - add_nova_opt "xenapi_connection_username=$XENAPI_USER" - add_nova_opt "xenapi_connection_password=$XENAPI_PASSWORD" - add_nova_opt "flat_injected=False" - # Need to avoid crash due to new firewall support - XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"} - add_nova_opt "firewall_driver=$XEN_FIREWALL_DRIVER" -elif [ "$VIRT_DRIVER" = 'openvz' ]; then - echo_summary "Using OpenVZ virtualization driver" - # TODO(deva): OpenVZ driver does not yet work if compute_driver is set here. - # Replace connection_type when this is fixed. - # add_nova_opt "compute_driver=openvz.connection.OpenVzConnection" - add_nova_opt "connection_type=openvz" - LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} - add_nova_opt "firewall_driver=$LIBVIRT_FIREWALL_DRIVER" -else - echo_summary "Using libvirt virtualization driver" - add_nova_opt "compute_driver=libvirt.LibvirtDriver" - LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} - add_nova_opt "firewall_driver=$LIBVIRT_FIREWALL_DRIVER" fi From 8263c22cd952ea24e7190dd54727d07a1d2494d4 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Thu, 8 Nov 2012 19:39:03 -0800 Subject: [PATCH 759/967] Add genisoimage as dependency Fixes bug 1076854 Change-Id: I51cd293898f40edaf241a2fb7aff27e73f3a8e07 --- files/apts/n-cpu | 1 + files/rpms/n-cpu | 1 + 2 files changed, 2 insertions(+) diff --git a/files/apts/n-cpu b/files/apts/n-cpu index 06c21a23..a40b6590 100644 --- a/files/apts/n-cpu +++ b/files/apts/n-cpu @@ -2,3 +2,4 @@ lvm2 open-iscsi open-iscsi-utils +genisoimage diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu index 1996a986..f7054e82 100644 --- a/files/rpms/n-cpu +++ b/files/rpms/n-cpu @@ -1,3 +1,4 @@ # Stuff for diablo volumes iscsi-initiator-utils lvm2 +genisoimage From 71cf53a9f60176419732f3ecbbce11c75190c059 Mon Sep 17 00:00:00 2001 From: Jay Pipes Date: Fri, 9 Nov 2012 10:38:49 -0500 Subject: [PATCH 760/967] Set the rabbit_durable_queues to match local consumers Due to a problematic bug in Glance (https://bugs.launchpad.net/glance/+bug/1074132), I was unable to get stack.sh to complete successfully. The workaround on the Glance bug was to set the rabbit_durable_queues value to match the setting of the local Rabbit consumers and exchanges. This patch merely looks for any consumer or exchange that is durable and ensures that the default durable_rabbit_queues config option of False is set to True in that case. Change-Id: Ia5a165a5a06d11d1fe6492ca32139972d49d3a1e --- lib/glance | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/lib/glance b/lib/glance index b02a4b63..60026d54 100644 --- a/lib/glance +++ b/lib/glance @@ -70,6 +70,13 @@ function configure_glanceclient() { setup_develop $GLANCECLIENT_DIR } +# durable_glance_queues() - Determine if RabbitMQ queues are durable or not +function durable_glance_queues() { + test `rabbitmqctl list_queues name durable | grep true | wc -l` -gt 0 && return 0 + test `rabbitmqctl list_exchanges name durable | grep true | wc -l` -gt 0 && return 0 + return 1 +} + # configure_glance() - Set config files, create data dirs, etc function configure_glance() { setup_develop $GLANCE_DIR @@ -120,6 +127,12 @@ function configure_glance() { iniset $GLANCE_API_CONF DEFAULT notifier_strategy rabbit iniset $GLANCE_API_CONF DEFAULT rabbit_host $RABBIT_HOST iniset $GLANCE_API_CONF DEFAULT rabbit_password $RABBIT_PASSWORD + if [[ durable_glance_queues -eq 0 ]]; then + # This gets around https://bugs.launchpad.net/glance/+bug/1074132 + # that results in a g-api server becoming unresponsive during + # startup... + iniset $GLANCE_API_CONF DEFAULT rabbit_durable_queues True + fi fi if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then iniset $GLANCE_API_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/api From efcda85b7ddfe045c5e7f340977031416d64491f Mon Sep 17 00:00:00 2001 From: Jay Pipes Date: Fri, 9 Nov 2012 12:22:16 -0500 Subject: [PATCH 761/967] Ensure BASE_SQL_CONN is set properly in tools/configure_tempest.sh Recent changes to enable more than just MySQL to be used in devstack moved some setup stuff to lib/database. While this file was source'd in tools/configure_tempes.sh, the BASE_SQL_CONN variable was not being set because lib/database/initialize_database_backends was not called. Change-Id: I53c3302097a94c842860fcb781e21383150bde7f fixes: LP bug#1077056 --- tools/configure_tempest.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index 070bc0bd..b48680c9 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -58,6 +58,9 @@ TEMPEST_DIR=$DEST/tempest CONFIG_DIR=$TEMPEST_DIR/etc TEMPEST_CONF=$CONFIG_DIR/tempest.conf +DATABASE_TYPE=${DATABASE_TYPE:-mysql} +initialize_database_backends + # Use the GUEST_IP unless an explicit IP is set by ``HOST_IP`` HOST_IP=${HOST_IP:-$GUEST_IP} # Use the first IP if HOST_IP still is not set From af5cd77e180bad81aadc51e3f224910a8fe02141 Mon Sep 17 00:00:00 2001 From: Lianhao Lu Date: Mon, 12 Nov 2012 16:36:42 +0800 Subject: [PATCH 762/967] Change the ceilometer's git repository. Changed the ceilometer's git repository from stackforge/ceilometer.git to openstack/ceilometer.git. Change-Id: I8cf1854641fc7df318f42a56ba061c93614728aa --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 5be872ba..a02bdc01 100644 --- a/stackrc +++ b/stackrc @@ -28,7 +28,7 @@ NOVA_ENABLED_APIS=ec2,osapi_compute,metadata GIT_BASE=https://github.com # metering service -CEILOMETER_REPO=https://github.com/stackforge/ceilometer.git +CEILOMETER_REPO=${GIT_BASE}/openstack/ceilometer.git CEILOMETER_BRANCH=master # volume service From 5db5bfa28f48b0524db6d25d340d12c96270ac0e Mon Sep 17 00:00:00 2001 From: Nachi Ueno Date: Mon, 29 Oct 2012 11:25:29 -0700 Subject: [PATCH 763/967] Make exercise.sh with quantum work - added support for quantum-debug command - added ping and ssh method for quantum Change-Id: Iebf8a0e9e2ed2bb56bee6533e69827e6caa2bc82 --- exercises/boot_from_volume.sh | 10 +++++ exercises/euca.sh | 10 +++++ exercises/floating_ips.sh | 35 ++++++++++++------ exercises/quantum-adv-test.sh | 70 ++++++++++++++++++----------------- exercises/volumes.sh | 10 +++++ functions | 30 +++++++++++++-- lib/quantum | 69 ++++++++++++++++++++++++++++++++++ openrc | 4 +- stack.sh | 11 ++++++ 9 files changed, 199 insertions(+), 50 deletions(-) diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index 460b50cf..4c7890bb 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -32,6 +32,12 @@ source $TOP_DIR/functions # Import configuration source $TOP_DIR/openrc +# Import quantum functions if needed +if is_service_enabled quantum; then + source $TOP_DIR/lib/quantum + setup_quantum +fi + # Import exercise configuration source $TOP_DIR/exerciserc @@ -168,6 +174,10 @@ nova floating-ip-delete $FLOATING_IP || \ # Delete a secgroup nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP" +if is_service_enabled quantum; then + teardown_quantum +fi + set +o xtrace echo "*********************************************************************" echo "SUCCESS: End DevStack Exercise: $0" diff --git a/exercises/euca.sh b/exercises/euca.sh index b1214930..29141ec5 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -33,6 +33,12 @@ source $TOP_DIR/functions # Import EC2 configuration source $TOP_DIR/eucarc +# Import quantum functions if needed +if is_service_enabled quantum; then + source $TOP_DIR/lib/quantum + setup_quantum +fi + # Import exercise configuration source $TOP_DIR/exerciserc @@ -169,6 +175,10 @@ fi # Delete group euca-delete-group $SECGROUP || die "Failure deleting security group $SECGROUP" +if is_service_enabled quantum; then + teardown_quantum +fi + set +o xtrace echo "*********************************************************************" echo "SUCCESS: End DevStack Exercise: $0" diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 67878787..ae5691f4 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -31,6 +31,12 @@ source $TOP_DIR/functions # Import configuration source $TOP_DIR/openrc +# Import quantum functions if needed +if is_service_enabled quantum; then + source $TOP_DIR/lib/quantum + setup_quantum +fi + # Import exercise configuration source $TOP_DIR/exerciserc @@ -155,14 +161,16 @@ nova add-floating-ip $VM_UUID $FLOATING_IP || \ # test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT -# Allocate an IP from second floating pool -TEST_FLOATING_IP=`nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | get_field 1` -die_if_not_set TEST_FLOATING_IP "Failure creating floating IP in $TEST_FLOATING_POOL" +if ! is_service_enabled quantum; then + # Allocate an IP from second floating pool + TEST_FLOATING_IP=`nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | get_field 1` + die_if_not_set TEST_FLOATING_IP "Failure creating floating IP in $TEST_FLOATING_POOL" -# list floating addresses -if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep $TEST_FLOATING_POOL | grep -q $TEST_FLOATING_IP; do sleep 1; done"; then - echo "Floating IP not allocated" - exit 1 + # list floating addresses + if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep $TEST_FLOATING_POOL | grep -q $TEST_FLOATING_IP; do sleep 1; done"; then + echo "Floating IP not allocated" + exit 1 + fi fi # dis-allow icmp traffic (ping) @@ -171,12 +179,13 @@ nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || die "Failure deletin # FIXME (anthony): make xs support security groups if [ "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then # test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds - ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT + ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT Fail fi -# Delete second floating IP -nova floating-ip-delete $TEST_FLOATING_IP || die "Failure deleting floating IP $TEST_FLOATING_IP" - +if ! is_service_enabled quantum; then + # Delete second floating IP + nova floating-ip-delete $TEST_FLOATING_IP || die "Failure deleting floating IP $TEST_FLOATING_IP" +fi # de-allocate the floating ip nova floating-ip-delete $FLOATING_IP || die "Failure deleting floating IP $FLOATING_IP" @@ -193,6 +202,10 @@ fi # Delete a secgroup nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP" +if is_service_enabled quantum; then + teardown_quantum +fi + set +o xtrace echo "*********************************************************************" echo "SUCCESS: End DevStack Exercise: $0" diff --git a/exercises/quantum-adv-test.sh b/exercises/quantum-adv-test.sh index 8f15b634..2ee82ff2 100755 --- a/exercises/quantum-adv-test.sh +++ b/exercises/quantum-adv-test.sh @@ -52,13 +52,17 @@ source $TOP_DIR/functions # Import configuration source $TOP_DIR/openrc -# Import exercise configuration -source $TOP_DIR/exerciserc - # If quantum is not enabled we exit with exitcode 55 which mean # exercise is skipped. is_service_enabled quantum && is_service_enabled q-agt && is_service_enabled q-dhcp || exit 55 +# Import quantum fucntions +source $TOP_DIR/lib/quantum +setup_quantum + +# Import exercise configuration +source $TOP_DIR/exerciserc + #------------------------------------------------------------------------------ # Test settings for quantum #------------------------------------------------------------------------------ @@ -76,14 +80,14 @@ DEMO1_NUM_NET=1 DEMO2_NUM_NET=2 PUBLIC_NET1_CIDR="200.0.0.0/24" -DEMO1_NET1_CIDR="10.1.0.0/24" -DEMO2_NET1_CIDR="10.2.0.0/24" -DEMO2_NET2_CIDR="10.2.1.0/24" +DEMO1_NET1_CIDR="10.10.0.0/24" +DEMO2_NET1_CIDR="10.20.0.0/24" +DEMO2_NET2_CIDR="10.20.1.0/24" PUBLIC_NET1_GATEWAY="200.0.0.1" -DEMO1_NET1_GATEWAY="10.1.0.1" -DEMO2_NET1_GATEWAY="10.2.0.1" -DEMO2_NET2_GATEWAY="10.2.1.1" +DEMO1_NET1_GATEWAY="10.10.0.1" +DEMO2_NET1_GATEWAY="10.20.0.1" +DEMO2_NET2_GATEWAY="10.20.1.1" PUBLIC_NUM_VM=1 DEMO1_NUM_VM=1 @@ -188,7 +192,7 @@ function get_flavor_id { function confirm_server_active { local VM_UUID=$1 - if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova --no_cache show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then + if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then echo "server '$VM_UUID' did not become active!" false fi @@ -232,6 +236,7 @@ function create_tenants { source $TOP_DIR/openrc admin admin add_tenant demo1 demo1 demo1 add_tenant demo2 demo2 demo2 + source $TOP_DIR/openrc demo demo } function delete_tenants_and_users { @@ -241,6 +246,7 @@ function delete_tenants_and_users { remove_user demo2 remove_tenant demo2 echo "removed all tenants" + source $TOP_DIR/openrc demo demo } function create_network { @@ -256,12 +262,8 @@ function create_network { source $TOP_DIR/openrc $TENANT $TENANT local NET_ID=$(quantum net-create --tenant_id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' ) quantum subnet-create --ip_version 4 --tenant_id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR - #T0DO(nati) comment out until l3-agent is merged - #local ROUTER_ID=$($QUANTUM router-create --tenant_id $TENANT_ID $ROUTER_NAME| grep ' id ' | awk '{print $4}' ) - #for NET_NAME in ${NET_NAMES//,/ };do - # SUBNET_ID=`get_subnet_id $NET_NAME` - # $QUANTUM router-interface-create $NAME --subnet_id $SUBNET_ID - #done + quantum-debug probe-create $NET_ID + source $TOP_DIR/openrc demo demo } function create_networks { @@ -285,7 +287,7 @@ function create_vm { done #TODO (nati) Add multi-nic test #TODO (nati) Add public-net test - local VM_UUID=`nova --no_cache boot --flavor $(get_flavor_id m1.tiny) \ + local VM_UUID=`nova boot --flavor $(get_flavor_id m1.tiny) \ --image $(get_image_id) \ $NIC \ $TENANT-server$NUM | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'` @@ -301,32 +303,26 @@ function ping_ip { # Test agent connection. Assumes namespaces are disabled, and # that DHCP is in use, but not L3 local VM_NAME=$1 - IP=`nova --no_cache show $VM_NAME | grep 'network' | awk '{print $5}'` - if ! timeout $BOOT_TIMEOUT sh -c "while ! ping -c1 -w1 $IP; do sleep 1; done"; then - echo "Could not ping $VM_NAME" - false - fi + local NET_NAME=$2 + IP=`nova show $VM_NAME | grep 'network' | awk '{print $5}'` + ping_check $NET_NAME $IP $BOOT_TIMEOUT } function check_vm { local TENANT=$1 local NUM=$2 local VM_NAME="$TENANT-server$NUM" + local NET_NAME=$3 source $TOP_DIR/openrc $TENANT $TENANT - ping_ip $VM_NAME + ping_ip $VM_NAME $NET_NAME # TODO (nati) test ssh connection # TODO (nati) test inter connection between vm - # TODO (nati) test namespace dhcp # TODO (nati) test dhcp host routes # TODO (nati) test multi-nic - # TODO (nati) use test-agent - # TODO (nati) test L3 forwarding - # TODO (nati) test floating ip - # TODO (nati) test security group } function check_vms { - foreach_tenant_vm 'check_vm ${%TENANT%_NAME} %NUM%' + foreach_tenant_vm 'check_vm ${%TENANT%_NAME} %NUM% ${%TENANT%_VM%NUM%_NET}' } function shutdown_vm { @@ -334,12 +330,12 @@ function shutdown_vm { local NUM=$2 source $TOP_DIR/openrc $TENANT $TENANT VM_NAME=${TENANT}-server$NUM - nova --no_cache delete $VM_NAME + nova delete $VM_NAME } function shutdown_vms { foreach_tenant_vm 'shutdown_vm ${%TENANT%_NAME} %NUM%' - if ! timeout $TERMINATE_TIMEOUT sh -c "while nova --no_cache list | grep -q ACTIVE; do sleep 1; done"; then + if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q ACTIVE; do sleep 1; done"; then echo "Some VMs failed to shutdown" false fi @@ -347,17 +343,22 @@ function shutdown_vms { function delete_network { local TENANT=$1 + local NUM=$2 + local NET_NAME="${TENANT}-net$NUM" source $TOP_DIR/openrc admin admin local TENANT_ID=$(get_tenant_id $TENANT) #TODO(nati) comment out until l3-agent merged #for res in port subnet net router;do - for res in port subnet net;do - quantum ${res}-list -F id -F tenant_id | grep $TENANT_ID | awk '{print $2}' | xargs -I % quantum ${res}-delete % + for net_id in `quantum net-list -c id -c name | grep $NET_NAME | awk '{print $2}'`;do + delete_probe $net_id + quantum subnet-list | grep $net_id | awk '{print $2}' | xargs -I% quantum subnet-delete % + quantum net-delete $net_id done + source $TOP_DIR/openrc demo demo } function delete_networks { - foreach_tenant 'delete_network ${%TENANT%_NAME}' + foreach_tenant_net 'delete_network ${%TENANT%_NAME} ${%NUM%}' #TODO(nati) add secuirty group check after it is implemented # source $TOP_DIR/openrc demo1 demo1 # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0 @@ -474,6 +475,7 @@ main() { } +teardown_quantum #------------------------------------------------------------------------------- # Kick off script. #------------------------------------------------------------------------------- diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 1c73786e..8533993d 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -30,6 +30,12 @@ source $TOP_DIR/functions # Import configuration source $TOP_DIR/openrc +# Import quantum functions if needed +if is_service_enabled quantum; then + source $TOP_DIR/lib/quantum + setup_quantum +fi + # Import exercise configuration source $TOP_DIR/exerciserc @@ -206,6 +212,10 @@ fi # Delete a secgroup nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP" +if is_service_enabled quantum; then + teardown_quantum +fi + set +o xtrace echo "*********************************************************************" echo "SUCCESS: End DevStack Exercise: $0" diff --git a/functions b/functions index dbe9d30a..f806b5a3 100644 --- a/functions +++ b/functions @@ -852,7 +852,11 @@ function yum_install() { # ping check # Uses globals ``ENABLED_SERVICES`` function ping_check() { - _ping_check_novanet "$1" $2 $3 + if is_service_enabled quantum; then + _ping_check_quantum "$1" $2 $3 $4 + return + fi + _ping_check_novanet "$1" $2 $3 $4 } # ping check for nova @@ -861,19 +865,39 @@ function _ping_check_novanet() { local from_net=$1 local ip=$2 local boot_timeout=$3 + local expected=${4:-"True"} + local check_command="" MULTI_HOST=`trueorfalse False $MULTI_HOST` if [[ "$MULTI_HOST" = "True" && "$from_net" = "$PRIVATE_NETWORK_NAME" ]]; then sleep $boot_timeout return fi - if ! timeout $boot_timeout sh -c "while ! ping -c1 -w1 $ip; do sleep 1; done"; then - echo "Couldn't ping server" + if [[ "$expected" = "True" ]]; then + check_command="while ! ping -c1 -w1 $ip; do sleep 1; done" + else + check_command="while ping -c1 -w1 $ip; do sleep 1; done" + fi + if ! timeout $boot_timeout sh -c "$check_command"; then + if [[ "$expected" = "True" ]]; then + echo "[Fail] Couldn't ping server" + else + echo "[Fail] Could ping server" + fi exit 1 fi } # ssh check + function ssh_check() { + if is_service_enabled quantum; then + _ssh_check_quantum "$1" $2 $3 $4 $5 + return + fi + _ssh_check_novanet "$1" $2 $3 $4 $5 +} + +function _ssh_check_novanet() { local NET_NAME=$1 local KEY_FILE=$2 local FLOATING_IP=$3 diff --git a/lib/quantum b/lib/quantum index f9e17825..ba98b646 100644 --- a/lib/quantum +++ b/lib/quantum @@ -5,6 +5,8 @@ XTRACE=$(set +o | grep xtrace) set +o xtrace +export QUANTUM_TEST_CONFIG_FILE=${QUANTUM_TEST_CONFIG_FILE:-"/etc/quantum/debug.ini"} + # Configures keystone integration for quantum service and agents function quantum_setup_keystone() { local conf_file=$1 @@ -57,5 +59,72 @@ function is_quantum_ovs_base_plugin() { return 1 } +function _get_net_id() { + quantum --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD net-list | grep $1 | awk '{print $2}' +} + +function _get_probe_cmd_prefix() { + local from_net="$1" + net_id=`_get_net_id $from_net` + probe_id=`quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}' | head -n 1` + echo "sudo ip netns exec qprobe-$probe_id" +} + +function delete_probe() { + local from_net="$1" + net_id=`_get_net_id $from_net` + probe_id=`quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'` + quantum-debug --os-tenant-name admin --os-username admin probe-delete $probe_id +} + +function _ping_check_quantum() { + local from_net=$1 + local ip=$2 + local timeout_sec=$3 + local expected=${4:-"True"} + local check_command="" + probe_cmd=`_get_probe_cmd_prefix $from_net` + if [[ "$expected" = "True" ]]; then + check_command="while ! $probe_cmd ping -c1 -w1 $ip; do sleep 1; done" + else + check_command="while $probe_cmd ping -c1 -w1 $ip; do sleep 1; done" + fi + if ! timeout $timeout_sec sh -c "$check_command"; then + if [[ "$expected" = "True" ]]; then + echo "[Fail] Couldn't ping server" + else + echo "[Fail] Could ping server" + fi + exit 1 + fi +} + +# ssh check +function _ssh_check_quantum() { + local from_net=$1 + local key_file=$2 + local ip=$3 + local user=$4 + local timeout_sec=$5 + local probe_cmd = "" + probe_cmd=`_get_probe_cmd_prefix $from_net` + if ! timeout $timeout_sec sh -c "while ! $probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success ; do sleep 1; done"; then + echo "server didn't become ssh-able!" + exit 1 + fi +} + +function setup_quantum() { + public_net_id=`_get_net_id $PUBLIC_NETWORK_NAME` + quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create $public_net_id + private_net_id=`_get_net_id $PRIVATE_NETWORK_NAME` + quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create $private_net_id +} + +function teardown_quantum() { + delete_probe $PUBLIC_NETWORK_NAME + delete_probe $PRIVATE_NETWORK_NAME +} + # Restore xtrace $XTRACE diff --git a/openrc b/openrc index 0a6a2150..4b6b9b2b 100644 --- a/openrc +++ b/openrc @@ -73,5 +73,5 @@ export COMPUTE_API_VERSION=${COMPUTE_API_VERSION:-$NOVA_VERSION} # export KEYSTONECLIENT_DEBUG=1 # export NOVACLIENT_DEBUG=1 -# set qunatum debug command -export TEST_CONFIG_FILE=/etc/quantum/debug.ini +# set quantum debug command +export QUANTUM_TEST_CONFIG_FILE=${QUANTUM_TEST_CONFIG_FILE:-"/etc/quantum/debug.ini"} diff --git a/stack.sh b/stack.sh index 59b21670..d15d7e7d 100755 --- a/stack.sh +++ b/stack.sh @@ -341,6 +341,8 @@ Q_USE_NAMESPACE=${Q_USE_NAMESPACE:-True} Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP=:-True} # Meta data IP Q_META_DATA_IP=${Q_META_DATA_IP:-$HOST_IP} +# Use quantum-debug command +Q_USE_DEBUG_COMMAND=${Q_USE_DEBUG_COMMAND:-False} RYU_DIR=$DEST/ryu # Ryu API Host @@ -1503,6 +1505,15 @@ if is_service_enabled quantum; then iniset $Q_CONF_FILE DEFAULT rabbit_host $RABBIT_HOST iniset $Q_CONF_FILE DEFAULT rabbit_password $RABBIT_PASSWORD fi + if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then + Q_DEBUG_CONF_FILE=/etc/quantum/debug.ini + cp $QUANTUM_DIR/etc/l3_agent.ini $Q_DEBUG_CONF_FILE + iniset $Q_L3_CONF_FILE DEFAULT verbose False + iniset $Q_L3_CONF_FILE DEFAULT debug False + iniset $Q_L3_CONF_FILE DEFAULT metadata_ip $Q_META_DATA_IP + iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE + iniset $Q_L3_CONF_FILE DEFAULT root_helper "sudo" + fi fi # Nova From c6d54c10dff89c515e567b49247c7f13eaf4c2e8 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 13 Nov 2012 15:08:26 -0500 Subject: [PATCH 764/967] install memcached for swift via files when swift was enabled we were installing memcached via stack.sh, after marking it optional in files. Just use files instead. Change-Id: Ib8ee2d1f47254e805f4747b8aff6e89baa66913c --- files/apts/swift | 2 +- files/rpms/swift | 2 +- stack.sh | 5 ----- 3 files changed, 2 insertions(+), 7 deletions(-) diff --git a/files/apts/swift b/files/apts/swift index f2983778..c52c68b7 100644 --- a/files/apts/swift +++ b/files/apts/swift @@ -1,6 +1,6 @@ curl gcc -memcached # NOPRIME +memcached python-configobj python-coverage python-dev diff --git a/files/rpms/swift b/files/rpms/swift index c9d49e92..ce41ceb8 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -1,6 +1,6 @@ curl gcc -memcached # NOPRIME +memcached python-configobj python-coverage python-devel diff --git a/stack.sh b/stack.sh index 084f276d..8947382d 100755 --- a/stack.sh +++ b/stack.sh @@ -784,11 +784,6 @@ if is_service_enabled q-agt; then fi fi -if is_service_enabled swift; then - # Install memcached for swift. - install_package memcached -fi - TRACK_DEPENDS=${TRACK_DEPENDS:-False} # Install python packages into a virtualenv so that we can track them From da339829472ffcdc3044f79b76b6cd03608db191 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Wed, 14 Nov 2012 12:45:10 +0000 Subject: [PATCH 765/967] Remove hardwired ansolabs urls Fixes bug 1078618. The files are no longer available on ansolabs' servers. The files were put on github, and this change modifies the location used in devstack. Change-Id: I1f512ad3b52d6b04d0e28ce6a532e11bfede1462 --- stack.sh | 2 +- tools/xen/prepare_guest_template.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index 40eab36e..0a9e4d47 100755 --- a/stack.sh +++ b/stack.sh @@ -2003,7 +2003,7 @@ if is_service_enabled g-reg; then # Option to upload legacy ami-tty, which works with xenserver if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then - IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}http://images.ansolabs.com/tty.tgz" + IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}https://github.com/downloads/citrix-openstack/warehouse/tty.tgz" fi for image_url in ${IMAGE_URLS//,/ }; do diff --git a/tools/xen/prepare_guest_template.sh b/tools/xen/prepare_guest_template.sh index baf9c3a2..19bd2f84 100755 --- a/tools/xen/prepare_guest_template.sh +++ b/tools/xen/prepare_guest_template.sh @@ -60,7 +60,7 @@ if [ -e "$ISO_DIR" ]; then rm -rf $TMP_DIR else echo "WARNING: no XenServer tools found, falling back to 5.6 tools" - TOOLS_URL="http://images.ansolabs.com/xen/xe-guest-utilities_5.6.100-651_amd64.deb" + TOOLS_URL="https://github.com/downloads/citrix-openstack/warehouse/xe-guest-utilities_5.6.100-651_amd64.deb" wget $TOOLS_URL -O $XS_TOOLS_FILE_NAME cp $XS_TOOLS_FILE_NAME "${STAGING_DIR}${XS_TOOLS_PATH}" rm -rf $XS_TOOLS_FILE_NAME From 14246ac16b1c7ba02c7ca40c416ac50a44bc9af4 Mon Sep 17 00:00:00 2001 From: Eoghan Glynn Date: Wed, 14 Nov 2012 16:23:04 +0000 Subject: [PATCH 766/967] Provide credentials via ceilometer config file Fixes bug 1076831 Previously we passed these credentials to the ceilometer central agent via the OS_* environment variables. Since these credentials are now needed by the compute agent also, and have already leaked into the config file to enable the keystone auth token middleware, we now switch over to using the config file only and drop the environment variable usage. Change-Id: I0298d711905a99aa5355fe034bb0e51e53b3be21 --- lib/ceilometer | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/lib/ceilometer b/lib/ceilometer index 2b014b05..aa1b3960 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -66,6 +66,13 @@ function configure_ceilometer() { cp $CEILOMETER_DIR/etc/ceilometer/policy.json $CEILOMETER_CONF_DIR iniset $CEILOMETER_CONF DEFAULT policy_file $CEILOMETER_CONF_DIR/policy.json + # the compute and central agents need these credentials in order to + # call out to the public nova and glance APIs + iniset $CEILOMETER_CONF DEFAULT os_username ceilometer + iniset $CEILOMETER_CONF DEFAULT os_password $SERVICE_PASSWORD + iniset $CEILOMETER_CONF DEFAULT os_tenant_name $SERVICE_TENANT_NAME + iniset $CEILOMETER_CONF DEFAULT os_auth_url $OS_AUTH_URL + iniset $CEILOMETER_CONF keystone_authtoken auth_protocol http iniset $CEILOMETER_CONF keystone_authtoken admin_user ceilometer iniset $CEILOMETER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD @@ -82,7 +89,7 @@ function install_ceilometer() { # start_ceilometer() - Start running processes, including screen function start_ceilometer() { screen_it ceilometer-acompute "cd $CEILOMETER_DIR && sg libvirtd \"$CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_CONF\"" - screen_it ceilometer-acentral "export OS_USERNAME=ceilometer OS_PASSWORD=$SERVICE_PASSWORD OS_TENANT_NAME=$SERVICE_TENANT_NAME OS_AUTH_URL=$OS_AUTH_URL && cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-agent-central --config-file $CEILOMETER_CONF" + screen_it ceilometer-acentral "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-agent-central --config-file $CEILOMETER_CONF" screen_it ceilometer-collector "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-collector --config-file $CEILOMETER_CONF" screen_it ceilometer-api "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" } From 6fd2811726c098e0311bc22c84c5da0d6aa89c62 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Tue, 13 Nov 2012 16:55:41 -0800 Subject: [PATCH 767/967] Remove support for nova-volume * nova-volume has been removed in Grizzly * part of delete-nova-volume Change-Id: Iba91d69950767823d77aaaa93243b0f476dbb04d --- exercises/boot_from_volume.sh | 4 +- exercises/euca.sh | 2 +- exercises/volumes.sh | 6 +- files/keystone_data.sh | 16 ----- functions | 8 +-- lib/n-vol | 126 ---------------------------------- lib/nova | 9 --- stack.sh | 16 +---- stackrc | 4 -- unstack.sh | 11 +-- 10 files changed, 12 insertions(+), 190 deletions(-) delete mode 100644 lib/n-vol diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index b06c8ddb..4562ac0b 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -35,9 +35,9 @@ source $TOP_DIR/openrc # Import exercise configuration source $TOP_DIR/exerciserc -# If cinder or n-vol are not enabled we exit with exitcode 55 so that +# If cinder is not enabled we exit with exitcode 55 so that # the exercise is skipped -is_service_enabled cinder n-vol || exit 55 +is_service_enabled cinder || exit 55 # Boot this image, use first AMI image if unset DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} diff --git a/exercises/euca.sh b/exercises/euca.sh index b1214930..5480b76f 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -73,7 +73,7 @@ fi # Volumes # ------- -if [[ "$ENABLED_SERVICES" =~ "n-vol" || "$ENABLED_SERVICES" =~ "c-vol" ]]; then +if [[ "$ENABLED_SERVICES" =~ "c-vol" ]]; then VOLUME_ZONE=`euca-describe-availability-zones | head -n1 | cut -f2` die_if_not_set VOLUME_ZONE "Failure to find zone for volume" diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 72c8729e..68927393 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -9,7 +9,7 @@ echo "Begin DevStack Exercise: $0" echo "*********************************************************************" # This script exits on an error so that errors don't compound and you see -# only the first error that occured. +# only the first error that occurred. set -o errexit # Print the commands being run so that we can see the command that triggers @@ -33,9 +33,9 @@ source $TOP_DIR/openrc # Import exercise configuration source $TOP_DIR/exerciserc -# If cinder or n-vol are not enabled we exit with exitcode 55 which mean +# If cinder is not enabled we exit with exitcode 55 which mean # exercise is skipped. -is_service_enabled cinder n-vol || exit 55 +is_service_enabled cinder || exit 55 # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 3da11bf0..71994a81 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -140,22 +140,6 @@ if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then --role_id $RESELLER_ROLE fi -# Volume -if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - VOLUME_SERVICE=$(get_id keystone service-create \ - --name=volume \ - --type=volume \ - --description="Volume Service") - keystone endpoint-create \ - --region RegionOne \ - --service_id $VOLUME_SERVICE \ - --publicurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" \ - --adminurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" \ - --internalurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" - fi -fi - # Heat if [[ "$ENABLED_SERVICES" =~ "heat" ]]; then HEAT_USER=$(get_id keystone user-create --name=heat \ diff --git a/functions b/functions index c7f65dbd..90b3b784 100644 --- a/functions +++ b/functions @@ -462,7 +462,7 @@ function _cleanup_service_list () { # ``ENABLED_SERVICES`` list, if they are not already present. # # For example: -# enable_service n-vol +# enable_service qpid # # This function does not know about the special cases # for nova, glance, and quantum built into is_service_enabled(). @@ -484,7 +484,7 @@ function enable_service() { # ``ENABLED_SERVICES`` list, if they are present. # # For example: -# disable_service n-vol +# disable_service rabbit # # This function does not know about the special cases # for nova, glance, and quantum built into is_service_enabled(). @@ -513,8 +513,8 @@ function disable_all_services() { # Remove all services starting with '-'. For example, to install all default -# services except nova-volume (n-vol) set in ``localrc``: -# ENABLED_SERVICES+=",-n-vol" +# services except rabbit (rabbit) set in ``localrc``: +# ENABLED_SERVICES+=",-rabbit" # Uses global ``ENABLED_SERVICES`` # disable_negated_services function disable_negated_services() { diff --git a/lib/n-vol b/lib/n-vol deleted file mode 100644 index db53582b..00000000 --- a/lib/n-vol +++ /dev/null @@ -1,126 +0,0 @@ -# lib/n-vol -# Install and start Nova volume service - -# Dependencies: -# - functions -# - DATA_DIR must be defined -# - KEYSTONE_AUTH_* must be defined -# - NOVA_DIR, NOVA_BIN_DIR, NOVA_STATE_PATH must be defined -# SERVICE_{TENANT_NAME|PASSWORD} must be defined -# _configure_tgt_for_config_d() from lib/cinder - -# stack.sh -# --------- -# install_nvol -# configure_nvol -# init_nvol -# start_nvol -# stop_nvol -# cleanup_nvol - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# Name of the LVM volume group to use/create for iscsi volumes -VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} -VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} - - -# cleanup_nvol() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_nvol() { - # kill instances (nova) - # delete image files (glance) - # This function intentionally left blank - : -} - -# configure_nvol() - Set config files, create data dirs, etc -function configure_nvol() { - # sudo python setup.py deploy - # iniset $XXX_CONF ... - # This function intentionally left blank - : -} - -# init_nvol() - Initialize databases, etc. -function init_nvol() { - # Configure a default volume group called '`stack-volumes`' for the volume - # service if it does not yet exist. If you don't wish to use a file backed - # volume group, create your own volume group called ``stack-volumes`` before - # invoking ``stack.sh``. - # - # By default, the backing file is 5G in size, and is stored in ``/opt/stack/data``. - - if ! sudo vgs $VOLUME_GROUP; then - VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DATA_DIR/${VOLUME_GROUP}-backing-file} - # Only create if the file doesn't already exists - [[ -f $VOLUME_BACKING_FILE ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE - DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE` - # Only create if the loopback device doesn't contain $VOLUME_GROUP - if ! sudo vgs $VOLUME_GROUP; then sudo vgcreate $VOLUME_GROUP $DEV; fi - fi - - mkdir -p $NOVA_STATE_PATH/volumes - - if sudo vgs $VOLUME_GROUP; then - if [[ "$os_PACKAGE" = "rpm" ]]; then - # RPM doesn't start the service - start_service tgtd - fi - - # Remove nova iscsi targets - sudo tgtadm --op show --mode target | grep $VOLUME_NAME_PREFIX | grep Target | cut -f3 -d ' ' | sudo xargs -n1 tgt-admin --delete || true - # Clean out existing volumes - for lv in `sudo lvs --noheadings -o lv_name $VOLUME_GROUP`; do - # ``VOLUME_NAME_PREFIX`` prefixes the LVs we want - if [[ "${lv#$VOLUME_NAME_PREFIX}" != "$lv" ]]; then - sudo lvremove -f $VOLUME_GROUP/$lv - fi - done - fi -} - -# install_nvol() - Collect source and prepare -function install_nvol() { - # git clone xxx - # Install is handled when installing Nova - : -} - -# start_nvol() - Start running processes, including screen -function start_nvol() { - # Setup the tgt configuration file - if [[ ! -f /etc/tgt/conf.d/nova.conf ]]; then - _configure_tgt_for_config_d - sudo mkdir -p /etc/tgt/conf.d - echo "include $NOVA_STATE_PATH/volumes/*" | sudo tee /etc/tgt/conf.d/nova.conf - fi - - if [[ "$os_PACKAGE" = "deb" ]]; then - # tgt in oneiric doesn't restart properly if tgtd isn't running - # do it in two steps - sudo stop tgt || true - sudo start tgt - else - restart_service tgtd - fi - - screen_it n-vol "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-volume" -} - -# stop_nvol() - Stop running processes -function stop_nvol() { - # Kill the nova volume screen window - screen -S $SCREEN_NAME -p n-vol -X kill - - stop_service tgt -} - -# Restore xtrace -$XTRACE diff --git a/lib/nova b/lib/nova index 2c1413d3..fbb5a012 100644 --- a/lib/nova +++ b/lib/nova @@ -312,15 +312,6 @@ function create_nova_conf() { if is_service_enabled n-api; then add_nova_opt "enabled_apis=$NOVA_ENABLED_APIS" fi - if is_service_enabled n-vol; then - NOVA_ENABLED_APIS="${NOVA_ENABLED_APIS},osapi_volume" - iniset $NOVA_CONF DEFAULT enabled_apis $NOVA_ENABLED_APIS - add_nova_opt "volume_api_class=nova.volume.api.API" - add_nova_opt "volume_group=$VOLUME_GROUP" - add_nova_opt "volume_name_template=${VOLUME_NAME_PREFIX}%s" - # oneiric no longer supports ietadm - add_nova_opt "iscsi_helper=tgtadm" - fi if is_service_enabled cinder; then add_nova_opt "volume_api_class=nova.volume.cinder.API" fi diff --git a/stack.sh b/stack.sh index 40eab36e..75e0244b 100755 --- a/stack.sh +++ b/stack.sh @@ -93,7 +93,7 @@ DEST=${DEST:-/opt/stack} # ============ # Remove services which were negated in ENABLED_SERVICES -# using the "-" prefix (e.g., "-n-vol") instead of +# using the "-" prefix (e.g., "-rabbit") instead of # calling disable_service(). disable_negated_services @@ -154,12 +154,6 @@ elif [ "$rpc_backend_cnt" == 0 ]; then fi unset rpc_backend_cnt -# Make sure we only have one volume service enabled. -if is_service_enabled cinder && is_service_enabled n-vol; then - echo "ERROR: n-vol and cinder must not be enabled at the same time" - exit 1 -fi - # Set up logging level VERBOSE=$(trueorfalse True $VERBOSE) @@ -310,7 +304,6 @@ source $TOP_DIR/lib/keystone source $TOP_DIR/lib/glance source $TOP_DIR/lib/nova source $TOP_DIR/lib/cinder -source $TOP_DIR/lib/n-vol source $TOP_DIR/lib/ceilometer source $TOP_DIR/lib/heat source $TOP_DIR/lib/quantum @@ -1749,9 +1742,6 @@ fi if is_service_enabled cinder; then echo_summary "Configuring Cinder" init_cinder -elif is_service_enabled n-vol; then - echo_summary "Configuring Nova volumes" - init_nvol fi if is_service_enabled nova; then @@ -1951,10 +1941,6 @@ if is_service_enabled nova; then echo_summary "Starting Nova" start_nova fi -if is_service_enabled n-vol; then - echo_summary "Starting Nova volumes" - start_nvol -fi if is_service_enabled cinder; then echo_summary "Starting Cinder" start_cinder diff --git a/stackrc b/stackrc index a02bdc01..56897798 100644 --- a/stackrc +++ b/stackrc @@ -11,10 +11,6 @@ DEST=/opt/stack # ``disable_service`` functions in ``localrc``. # For example, to enable Swift add this to ``localrc``: # enable_service swift -# -# And to disable Cinder and use Nova Volumes instead: -# disable_service c-api c-sch c-vol cinder -# enable_service n-vol ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,mysql,rabbit # Set the default Nova APIs to enable diff --git a/unstack.sh b/unstack.sh index 6b34aa3a..1a2cad83 100755 --- a/unstack.sh +++ b/unstack.sh @@ -26,7 +26,6 @@ DATA_DIR=${DATA_DIR:-${DEST}/data} # Get project function libraries source $TOP_DIR/lib/cinder -source $TOP_DIR/lib/n-vol # Determine what system we are running on. This provides ``os_VENDOR``, # ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` @@ -58,11 +57,7 @@ fi SCSI_PERSIST_DIR=$CINDER_STATE_PATH/volumes/* # Get the iSCSI volumes -if is_service_enabled cinder n-vol; then - if is_service_enabled n-vol; then - SCSI_PERSIST_DIR=$NOVA_STATE_PATH/volumes/* - fi - +if is_service_enabled cinder; then TARGETS=$(sudo tgtadm --op show --mode target) if [ $? -ne 0 ]; then # If tgt driver isn't running this won't work obviously @@ -88,10 +83,6 @@ if is_service_enabled cinder n-vol; then sudo rm -rf $CINDER_STATE_PATH/volumes/* fi - if is_service_enabled n-vol; then - sudo rm -rf $NOVA_STATE_PATH/volumes/* - fi - if [[ "$os_PACKAGE" = "deb" ]]; then stop_service tgt else From 203edc569bec0fe845ab1d64388c53aaedc256f0 Mon Sep 17 00:00:00 2001 From: jiajun xu Date: Thu, 15 Nov 2012 10:45:44 +0800 Subject: [PATCH 768/967] Fix the parameter expansion issue in configure_tempest.sh We need follow the syntax like ${parameter:-word} for OS_PASSWORD set. Change-Id: I44f630007b578779658ddcd68417a778b242ed4d --- tools/configure_tempest.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index b48680c9..6493822e 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -128,7 +128,7 @@ IDENTITY_CATALOG_TYPE=identity # OS_USERNAME et all should be defined in openrc. OS_USERNAME=${OS_USERNAME:-demo} OS_TENANT_NAME=${OS_TENANT_NAME:-demo} -OS_PASSWORD=${OS_PASSWORD:$ADMIN_PASSWORD} +OS_PASSWORD=${OS_PASSWORD:-$ADMIN_PASSWORD} # See files/keystone_data.sh where alt_demo user # and tenant are set up... From c1b486a520dd3c2c9596244a0aa899f2e35ec3bf Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 5 Nov 2012 14:26:09 -0600 Subject: [PATCH 769/967] Simplify database selection Do not require every script that sources stackrc to also source lib/databases. * Move use_databases() to functions * Set DATABASE_TYPE in stackrc * Allow setting DATABASE_TYPE in localrc to work (use_database() essentially just sets DATABASE_TYPE at this stage so continuing to use it is equivalent) * Validate DATABASE_TYPE in stack.sh. * Change sudo to postgresql user to go through root to eliminate password prompt * fix use_database error condition Change-Id: Ibb080c76e6cd7c6eebbb641a894d54b1dde78ca6 --- functions | 16 ++++++++++++++++ lib/database | 9 --------- lib/databases/postgresql | 4 ++-- stack.sh | 10 ++++++++-- stackrc | 5 ++++- 5 files changed, 30 insertions(+), 14 deletions(-) diff --git a/functions b/functions index 92c8a5f1..8ab3eefc 100644 --- a/functions +++ b/functions @@ -841,6 +841,22 @@ function upload_image() { fi } +# Set the database backend to use +# When called from stackrc/localrc DATABASE_BACKENDS has not been +# initialized yet, just save the configuration selection and call back later +# to validate it. +# $1 The name of the database backend to use (mysql, postgresql, ...) +function use_database { + if [[ -z "$DATABASE_BACKENDS" ]]; then + # The backends haven't initialized yet, just save the selection for now + DATABASE_TYPE=$1 + return + fi + use_exclusive_service DATABASE_BACKENDS DATABASE_TYPE $1 && return 0 + ret=$? + return $ret +} + # Toggle enable/disable_service for services that must run exclusive of each other # $1 The name of a variable containing a space-separated list of services # $2 The name of a variable in which to store the enabled service's name diff --git a/lib/database b/lib/database index 66fb36fb..07e37aef 100644 --- a/lib/database +++ b/lib/database @@ -62,15 +62,6 @@ function initialize_database_backends { return 0 } -# Set the database backend to use -# $1 The name of the database backend to use (mysql, postgresql, ...) -function use_database { - use_exclusive_service DATABASE_BACKENDS DATABASE_TYPE $1 && return 0 - ret=$? - echo "Invalid database '$1'" - return $ret -} - # Recreate a given database # $1 The name of the database # $2 The character set/encoding of the database diff --git a/lib/databases/postgresql b/lib/databases/postgresql index 81989f2e..ee24c8b5 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -38,8 +38,8 @@ function configure_database_postgresql { start_service postgresql # If creating the role fails, chances are it already existed. Try to alter it. - sudo -u postgres -i psql -c "CREATE ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" || \ - sudo -u postgres -i psql -c "ALTER ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" + sudo -u root sudo -u postgres -i psql -c "CREATE ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" || \ + sudo -u root sudo -u postgres -i psql -c "ALTER ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" } function install_database_postgresql { diff --git a/stack.sh b/stack.sh index f250c6bf..ec10b110 100755 --- a/stack.sh +++ b/stack.sh @@ -29,8 +29,6 @@ source $TOP_DIR/functions # and ``DISTRO`` GetDistro -# Import database library (must be loaded before stackrc which sources localrc) -source $TOP_DIR/lib/database # Settings @@ -92,6 +90,14 @@ DEST=${DEST:-/opt/stack} # Sanity Check # ============ +# Import database configuration +source $TOP_DIR/lib/database + +# Validate database selection +# Since DATABASE_BACKENDS is now set, this also gets ENABLED_SERVICES +# properly configured for the database selection. +use_database $DATABASE_TYPE || echo "Invalid database '$DATABASE_TYPE'" + # Remove services which were negated in ENABLED_SERVICES # using the "-" prefix (e.g., "-rabbit") instead of # calling disable_service(). diff --git a/stackrc b/stackrc index 56897798..01e95561 100644 --- a/stackrc +++ b/stackrc @@ -6,12 +6,15 @@ RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd) # Destination path for installation DEST=/opt/stack +# Select the default database +DATABASE_TYPE=mysql + # Specify which services to launch. These generally correspond to # screen tabs. To change the default list, use the ``enable_service`` and # ``disable_service`` functions in ``localrc``. # For example, to enable Swift add this to ``localrc``: # enable_service swift -ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,mysql,rabbit +ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,$DATABASE_TYPE # Set the default Nova APIs to enable NOVA_ENABLED_APIS=ec2,osapi_compute,metadata From c3fca0814984daaf52a2356c4ed12c495e6bf436 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Thu, 15 Nov 2012 14:14:30 -0500 Subject: [PATCH 770/967] Remove use of nonexistent postgresql-setup. On Ubuntu the default postgresql data directory is not /var/lib/pgsql/data so the check to see if that directory exists is not needed. On Fedora we can assume that the rpm will create it and initialize it properly. So this line can safely removed without any issues. Change-Id: If949f0580eb139f3803b698ee88fceebf958448e --- lib/databases/postgresql | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/databases/postgresql b/lib/databases/postgresql index ee24c8b5..10ab7219 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -28,7 +28,6 @@ function configure_database_postgresql { PG_HBA=$PG_DIR/pg_hba.conf PG_CONF=$PG_DIR/postgresql.conf fi - sudo [ -e /var/lib/pgsql/data ] || sudo postgresql-setup initdb # Listen on all addresses sudo sed -i "/listen_addresses/s/.*/listen_addresses = '*'/" $PG_CONF # Do password auth from all IPv4 clients From 6e3330967c5c7be73a8ffee3779c214768683c56 Mon Sep 17 00:00:00 2001 From: "James E. Blair" Date: Fri, 16 Nov 2012 16:41:26 -0800 Subject: [PATCH 771/967] Remove python-openstackclient. It's not used. Change-Id: I00deaa9ebcd844dd9c3c9d2560d11ad37589d847 --- stack.sh | 5 ----- stackrc | 4 ---- 2 files changed, 9 deletions(-) diff --git a/stack.sh b/stack.sh index ec10b110..8df03953 100755 --- a/stack.sh +++ b/stack.sh @@ -317,7 +317,6 @@ source $TOP_DIR/lib/tempest # Set the destination directories for OpenStack projects HORIZON_DIR=$DEST/horizon -OPENSTACKCLIENT_DIR=$DEST/python-openstackclient NOVNC_DIR=$DEST/noVNC SWIFT_DIR=$DEST/swift SWIFT3_DIR=$DEST/swift3 @@ -813,9 +812,6 @@ install_keystoneclient install_glanceclient install_novaclient -# Check out the client libs that are used most -git_clone $OPENSTACKCLIENT_REPO $OPENSTACKCLIENT_DIR $OPENSTACKCLIENT_BRANCH - # glance, swift middleware and nova api needs keystone middleware if is_service_enabled key g-api n-api swift; then # unified auth system (manages accounts/tokens) @@ -881,7 +877,6 @@ echo_summary "Configuring OpenStack projects" # allowing ``import nova`` or ``import glance.client`` configure_keystoneclient configure_novaclient -setup_develop $OPENSTACKCLIENT_DIR if is_service_enabled key g-api n-api swift; then configure_keystone fi diff --git a/stackrc b/stackrc index 01e95561..e0c69cab 100644 --- a/stackrc +++ b/stackrc @@ -76,10 +76,6 @@ HORIZON_BRANCH=master NOVACLIENT_REPO=${GIT_BASE}/openstack/python-novaclient.git NOVACLIENT_BRANCH=master -# consolidated openstack python client -OPENSTACKCLIENT_REPO=${GIT_BASE}/openstack/python-openstackclient.git -OPENSTACKCLIENT_BRANCH=master - # python keystone client library to nova that horizon uses KEYSTONECLIENT_REPO=${GIT_BASE}/openstack/python-keystoneclient KEYSTONECLIENT_BRANCH=master From 07ccefd6bee75dc6df1d6544b92682f65aa0202f Mon Sep 17 00:00:00 2001 From: Robert Collins Date: Mon, 19 Nov 2012 18:55:33 +1300 Subject: [PATCH 772/967] Default Q_HOST to HOST_IP rather than localhost. This makes it less likely to interact with e.g. http_proxy settings. I filed this as bug 1080561. Change-Id: If97459a28f2d2a77cd322bb3f6024d11fbb8fcd4 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index ec10b110..70c9e040 100755 --- a/stack.sh +++ b/stack.sh @@ -330,7 +330,7 @@ Q_PLUGIN=${Q_PLUGIN:-openvswitch} # Default Quantum Port Q_PORT=${Q_PORT:-9696} # Default Quantum Host -Q_HOST=${Q_HOST:-localhost} +Q_HOST=${Q_HOST:-$HOST_IP} # Which Quantum API nova should use # Default admin username Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-quantum} From 443ac48fdef510835bf2de1ba27f0b6baac8f5b8 Mon Sep 17 00:00:00 2001 From: Robert Collins Date: Mon, 19 Nov 2012 18:59:04 +1300 Subject: [PATCH 773/967] Make it possible to choose a different VIF driver. This is useful when working with baremetal which uses openvswitch quantum plugin, but baremetal-vif vif driver. bug 1080562 Change-Id: I9f94a8f2d7f11fa0771a5304b0aed1d0de5a3db7 --- stack.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index ec10b110..57049d9c 100755 --- a/stack.sh +++ b/stack.sh @@ -1773,11 +1773,11 @@ if is_service_enabled nova; then add_nova_opt "quantum_url=http://$Q_HOST:$Q_PORT" if [[ "$Q_PLUGIN" = "openvswitch" ]]; then - NOVA_VIF_DRIVER="nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver" + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"} elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then - NOVA_VIF_DRIVER="nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver" + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver"} elif [[ "$Q_PLUGIN" = "ryu" ]]; then - NOVA_VIF_DRIVER="quantum.plugins.ryu.nova.vif.LibvirtOpenVswitchOFPRyuDriver" + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"quantum.plugins.ryu.nova.vif.LibvirtOpenVswitchOFPRyuDriver"} add_nova_opt "libvirt_ovs_integration_bridge=$OVS_BRIDGE" add_nova_opt "linuxnet_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT" add_nova_opt "libvirt_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT" From 596b906b63e2f60a185ae969e35f58c6318480e7 Mon Sep 17 00:00:00 2001 From: Jay Pipes Date: Mon, 19 Nov 2012 10:58:50 -0500 Subject: [PATCH 774/967] Adds requisite changes to configure_tempest.sh for EC2/S3 tests * Adds all the BOTO_XXX variables to the configuration file setup that are needed by https://review.openstack.org/#/c/14689/15 Change-Id: I44b2950705807fcfd026f1069fbe0d2727632760 --- tools/configure_tempest.sh | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index 6493822e..9b543ab0 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -209,6 +209,21 @@ VOLUME_CATALOG_TYPE=volume LIVE_MIGRATION_AVAILABLE=${LIVE_MIGRATION_AVAILABLE:-False} USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION=${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} +# EC2 and S3 test configuration +BOTO_EC2_URL="http://$IDENTITY_HOST:8773/services/Cloud" +BOTO_S3_URL="http://$IDENTITY_HOST:3333" +BOTO_AWS_ACCESS="" # Created in tempest... +BOTO_AWS_SECRET="" # Created in tempest... +BOTO_AWS_REGION="RegionOne" +BOTO_S3_MATERIALS_PATH=$DEST/devstack/files/images/s3-materials/cirros-0.3.0 +BOTO_ARI_MANIFEST=cirros-0.3.0-x86_64-initrd.manifest.xml +BOTO_AMI_MANIFEST=cirros-0.3.0-x86_64-blank.img.manifest.xml +BOTO_AKI_MANIFEST=cirros-0.3.0-x86_64-vmlinuz.manifest.xml +BOTO_FLAVOR_NAME=m1.tiny +BOTO_SOCKET_TIMEOUT=5 +BOTO_BUILD_TIMEOUT=${COMPUTE_BUILD_TIMEOUT:-400} +BOTO_BUILD_INTERVAL=${COMPUTE_BUILD_INTERVAL:-3} + sed -e " s,%IDENTITY_USE_SSL%,$IDENTITY_USE_SSL,g; s,%IDENTITY_HOST%,$IDENTITY_HOST,g; @@ -266,6 +281,19 @@ sed -e " s,%VOLUME_BUILD_TIMEOUT%,$VOLUME_BUILD_TIMEOUT,g; s,%LIVE_MIGRATION_AVAILABLE%,$LIVE_MIGRATION_AVAILABLE,g; s,%USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION%,$USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION,g; + s,%BOTO_EC2_URL%,$BOTO_EC2_URL,g; + s,%BOTO_S3_URL%,$BOTO_S3_URL,g; + s,%BOTO_AWS_ACCESS%,$BOTO_AWS_ACCESS,g; + s,%BOTO_AWS_SECRET%,$BOTO_AWS_SECRET,g; + s,%BOTO_AWS_REGION%,$BOTO_AWS_REGION,g; + s,%BOTO_S3_MATERIALS_PATH%,$BOTO_S3_MATERIALS_PATH,g; + s,%BOTO_ARI_MANIFEST%,$BOTO_ARI_MANIFEST,g; + s,%BOTO_AMI_MANIFEST%,$BOTO_AMI_MANIFEST,g; + s,%BOTO_AKI_MANIFEST%,$BOTO_AKI_MANIFEST,g; + s,%BOTO_FLAVOR_NAME%,$BOTO_FLAVOR_NAME,g; + s,%BOTO_SOCKET_TIMEOUT%,$BOTO_SOCKET_TIMEOUT,g; + s,%BOTO_BUILD_TIMEOUT%,$BOTO_BUILD_TIMEOUT,g; + s,%BOTO_BUILD_INTERVAL%,$BOTO_BUILD_INTERVAL,g; " -i $TEMPEST_CONF echo "Created tempest configuration file:" From d57ccf0271e2d416fb0fc73b5ab96f342eae7f28 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 15 Nov 2012 10:09:33 -0800 Subject: [PATCH 775/967] Add nova-conductor service This is a new service for nova that will soon be required for n-cpu to function. Change-Id: I9a2e62f25200a47233a7796084ad8ebabc852c59 --- lib/nova | 3 ++- stackrc | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/nova b/lib/nova index fbb5a012..b5efce96 100644 --- a/lib/nova +++ b/lib/nova @@ -434,12 +434,13 @@ function start_nova() { screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --config-file $NOVA_CONF --web ." screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --config-file $NOVA_CONF" screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth" + screen_it n-cond "cd $NOVA_DIR && ./bin/nova-conductor" } # stop_nova() - Stop running processes (non-screen) function stop_nova() { # Kill the nova screen windows - for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth; do + for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-cond; do screen -S $SCREEN_NAME -p $serv -X kill done } diff --git a/stackrc b/stackrc index 01e95561..9588cf99 100644 --- a/stackrc +++ b/stackrc @@ -14,7 +14,7 @@ DATABASE_TYPE=mysql # ``disable_service`` functions in ``localrc``. # For example, to enable Swift add this to ``localrc``: # enable_service swift -ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,$DATABASE_TYPE +ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,$DATABASE_TYPE # Set the default Nova APIs to enable NOVA_ENABLED_APIS=ec2,osapi_compute,metadata From 818a048afc2ae0935f487dec7107237c7fba2f28 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 19 Nov 2012 15:05:31 -0500 Subject: [PATCH 776/967] install nodejs-legacy on quantal quantal changed the name of the node binary on disk, which breaks horizon on 12.10 installs. Provide a work around for installing the legacy package on that environment. Fixes bug #1070083 Change-Id: If8ef211d12451ef4e1df0d2398cf18a3b2c46da3 --- files/apts/horizon | 1 + 1 file changed, 1 insertion(+) diff --git a/files/apts/horizon b/files/apts/horizon index 2161ccd3..2c2faf1a 100644 --- a/files/apts/horizon +++ b/files/apts/horizon @@ -21,4 +21,5 @@ python-coverage python-cherrypy3 # why? python-migrate nodejs +nodejs-legacy # dist:quantal python-netaddr From adfc7a3c0aec56030da5369e3598520ba18b3e9c Mon Sep 17 00:00:00 2001 From: Terry Wilson Date: Tue, 20 Nov 2012 13:08:13 -0500 Subject: [PATCH 777/967] Re-add postgresql-setup initdb for Fedora The Fedora RPM does not set up the postgresql data directory. postgresql-setup initdb must be run after installing the RPM. Change-Id: I5e5ab659e83f4ee6a024f74a23bf4562ea0065ce --- lib/databases/postgresql | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/databases/postgresql b/lib/databases/postgresql index 10ab7219..d9c2f00c 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -23,6 +23,7 @@ function configure_database_postgresql { if [[ "$os_PACKAGE" = "rpm" ]]; then PG_HBA=/var/lib/pgsql/data/pg_hba.conf PG_CONF=/var/lib/pgsql/data/postgresql.conf + sudo [ -e $PG_HBA ] || sudo postgresql-setup initdb else PG_DIR=`find /etc/postgresql -name pg_hba.conf|xargs dirname` PG_HBA=$PG_DIR/pg_hba.conf From b592b29f923b4ea137d8efd4bb5f0a6dde356075 Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Wed, 21 Nov 2012 14:20:12 +1300 Subject: [PATCH 778/967] Configure heat engine server URLs Wait conditions do not work without them. Change-Id: I64ed75e4b84c73678af11182ac951cb1da561428 --- lib/heat | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/heat b/lib/heat index efdcfad3..396c8a05 100644 --- a/lib/heat +++ b/lib/heat @@ -125,6 +125,9 @@ function configure_heat() { iniset $HEAT_ENGINE_CONF DEFAULT use_syslog $SYSLOG iniset $HEAT_ENGINE_CONF DEFAULT bind_host $HEAT_ENGINE_HOST iniset $HEAT_ENGINE_CONF DEFAULT bind_port $HEAT_ENGINE_PORT + iniset $HEAT_ENGINE_CONF DEFAULT heat_metadata_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT + iniset $HEAT_ENGINE_CONF DEFAULT heat_waitcondition_server_url http://$HEAT_METADATA_HOST:$HEAT_METADATA_PORT + iniset $HEAT_ENGINE_CONF DEFAULT heat_watch_server_url http://$HEAT_API_CW_HOST:$HEAT_API_CW_PORT local dburl database_connection_url dburl heat iniset $HEAT_ENGINE_CONF DEFAULT sql_connection $dburl From 766ae34261a33fbf7661e63f30b227dd9177bd71 Mon Sep 17 00:00:00 2001 From: Akihiro MOTOKI Date: Thu, 22 Nov 2012 20:04:02 +0900 Subject: [PATCH 779/967] Creates nova endpoint when n-api is enabled. Fixes bug 1081975 Nova endpoint in keystone was registered if n-cpu is enabled. However it is a usual case where n-cpu runs on a different host in multi-node setup and it results in no endpoint for nova. n-api is a better condition since nova-api and keystone usually run on a same host. Change-Id: Ic097e1c3bd30798d9d3c5fb76023fbdb3ae189d9 --- files/keystone_data.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 71994a81..f75d24a6 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -106,7 +106,7 @@ if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then fi # Nova -if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then +if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then NOVA_USER=$(get_id keystone user-create \ --name=nova \ --pass="$SERVICE_PASSWORD" \ From b562e6a710b34609f95bcc46e2ae50e7812aa103 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 19 Nov 2012 16:00:01 -0500 Subject: [PATCH 780/967] move horizon logic to lib to clean up stack.sh pre holiday refactor extrodinare, get the horizon code over fully into lib/horizon so that all these fixes aren't scattered through stack.sh Change-Id: I7f26c5c6708d5693048eb7b1ce792122adbc7351 --- lib/horizon | 133 ++++++++++++++++++++++++++++++++++++++++++++++++++++ stack.sh | 72 +++------------------------- unstack.sh | 3 +- 3 files changed, 142 insertions(+), 66 deletions(-) create mode 100644 lib/horizon diff --git a/lib/horizon b/lib/horizon new file mode 100644 index 00000000..c6c96dae --- /dev/null +++ b/lib/horizon @@ -0,0 +1,133 @@ +# lib/horizon +# Functions to control the configuration and operation of the horizon service +# + +# Dependencies: +# ``functions`` file +# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined +# + +# ``stack.sh`` calls the entry points in this order: +# +# install_horizon +# configure_horizon +# init_horizon +# start_horizon +# stop_horizon +# cleanup_horizon + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# + +# Set up default directories +HORIZON_DIR=$DEST/horizon + +# Allow overriding the default Apache user and group, default both to +# current user. +APACHE_USER=${APACHE_USER:-$USER} +APACHE_GROUP=${APACHE_GROUP:-$APACHE_USER} + + +# Entry Points +# ------------ + +# cleanup_horizon() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_horizon() { + # kill instances (nova) + # delete image files (glance) + # This function intentionally left blank + : +} + +# configure_horizon() - Set config files, create data dirs, etc +function configure_horizon() { + setup_develop $HORIZON_DIR +} + +# init_horizon() - Initialize databases, etc. +function init_horizon() { + # Remove stale session database. + rm -f $HORIZON_DIR/openstack_dashboard/local/dashboard_openstack.sqlite3 + + # ``local_settings.py`` is used to override horizon default settings. + local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py + cp $FILES/horizon_settings.py $local_settings + + # Initialize the horizon database (it stores sessions and notices shown to + # users). The user system is external (keystone). + cd $HORIZON_DIR + python manage.py syncdb --noinput + cd $TOP_DIR + + # Create an empty directory that apache uses as docroot + sudo mkdir -p $HORIZON_DIR/.blackhole + + + if [[ "$os_PACKAGE" = "deb" ]]; then + APACHE_NAME=apache2 + APACHE_CONF=sites-available/horizon + # Clean up the old config name + sudo rm -f /etc/apache2/sites-enabled/000-default + # Be a good citizen and use the distro tools here + sudo touch /etc/$APACHE_NAME/$APACHE_CONF + sudo a2ensite horizon + else + # Install httpd, which is NOPRIME'd + APACHE_NAME=httpd + APACHE_CONF=conf.d/horizon.conf + sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf + fi + + # Configure apache to run horizon + sudo sh -c "sed -e \" + s,%USER%,$APACHE_USER,g; + s,%GROUP%,$APACHE_GROUP,g; + s,%HORIZON_DIR%,$HORIZON_DIR,g; + s,%APACHE_NAME%,$APACHE_NAME,g; + s,%DEST%,$DEST,g; + \" $FILES/apache-horizon.template >/etc/$APACHE_NAME/$APACHE_CONF" + +} + +# install_horizon() - Collect source and prepare +function install_horizon() { + # Apache installation, because we mark it NOPRIME + if [[ "$os_PACKAGE" = "deb" ]]; then + # Install apache2, which is NOPRIME'd + install_package apache2 libapache2-mod-wsgi + else + sudo rm -f /etc/httpd/conf.d/000-* + install_package httpd mod_wsgi + fi + + # NOTE(sdague) quantal changed the name of the node binary + if [[ "$os_PACKAGE" = "deb" ]]; then + if [[ ! -e "/usr/bin/node" ]]; then + install_package nodejs-legacy + fi + fi + + git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH $HORIZON_TAG +} + +# start_horizon() - Start running processes, including screen +function start_horizon() { + restart_service $APACHE_NAME + screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/$APACHE_NAME/horizon_error.log" +} + +# stop_horizon() - Stop running processes (non-screen) +function stop_horizon() { + stop_service apache2 +} + +# Restore xtrace +$XTRACE diff --git a/stack.sh b/stack.sh index 9b830b13..9ecc7499 100755 --- a/stack.sh +++ b/stack.sh @@ -306,6 +306,7 @@ SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} # ================== # Get project function libraries +source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone source $TOP_DIR/lib/glance source $TOP_DIR/lib/nova @@ -568,15 +569,6 @@ read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE ( SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service} -# Horizon -# ------- - -# Allow overriding the default Apache user and group, default both to -# current user. -APACHE_USER=${APACHE_USER:-$USER} -APACHE_GROUP=${APACHE_GROUP:-$APACHE_USER} - - # Log files # --------- @@ -756,16 +748,6 @@ if is_service_enabled $DATABASE_BACKENDS; then install_database fi -if is_service_enabled horizon; then - if [[ "$os_PACKAGE" = "deb" ]]; then - # Install apache2, which is NOPRIME'd - install_package apache2 libapache2-mod-wsgi - else - sudo rm -f /etc/httpd/conf.d/000-* - install_package httpd mod_wsgi - fi -fi - if is_service_enabled q-agt; then if is_quantum_ovs_base_plugin "$Q_PLUGIN"; then # Install deps @@ -840,8 +822,8 @@ if is_service_enabled n-novnc; then git_clone $NOVNC_REPO $NOVNC_DIR $NOVNC_BRANCH fi if is_service_enabled horizon; then - # django powered web control panel for openstack - git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH $HORIZON_TAG + # dashboard + install_horizon fi if is_service_enabled quantum; then git_clone $QUANTUM_CLIENT_REPO $QUANTUM_CLIENT_DIR $QUANTUM_CLIENT_BRANCH @@ -899,7 +881,7 @@ if is_service_enabled nova; then configure_nova fi if is_service_enabled horizon; then - setup_develop $HORIZON_DIR + configure_horizon fi if is_service_enabled quantum; then setup_develop $QUANTUM_CLIENT_DIR @@ -1035,48 +1017,8 @@ fi if is_service_enabled horizon; then echo_summary "Configuring and starting Horizon" - - # Remove stale session database. - rm -f $HORIZON_DIR/openstack_dashboard/local/dashboard_openstack.sqlite3 - - # ``local_settings.py`` is used to override horizon default settings. - local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py - cp $FILES/horizon_settings.py $local_settings - - # Initialize the horizon database (it stores sessions and notices shown to - # users). The user system is external (keystone). - cd $HORIZON_DIR - python manage.py syncdb --noinput - cd $TOP_DIR - - # Create an empty directory that apache uses as docroot - sudo mkdir -p $HORIZON_DIR/.blackhole - - if [[ "$os_PACKAGE" = "deb" ]]; then - APACHE_NAME=apache2 - APACHE_CONF=sites-available/horizon - # Clean up the old config name - sudo rm -f /etc/apache2/sites-enabled/000-default - # Be a good citizen and use the distro tools here - sudo touch /etc/$APACHE_NAME/$APACHE_CONF - sudo a2ensite horizon - else - # Install httpd, which is NOPRIME'd - APACHE_NAME=httpd - APACHE_CONF=conf.d/horizon.conf - sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf - fi - - # Configure apache to run horizon - sudo sh -c "sed -e \" - s,%USER%,$APACHE_USER,g; - s,%GROUP%,$APACHE_GROUP,g; - s,%HORIZON_DIR%,$HORIZON_DIR,g; - s,%APACHE_NAME%,$APACHE_NAME,g; - s,%DEST%,$DEST,g; - \" $FILES/apache-horizon.template >/etc/$APACHE_NAME/$APACHE_CONF" - - restart_service $APACHE_NAME + init_horizon + start_horizon fi @@ -1958,7 +1900,7 @@ if is_service_enabled ceilometer; then echo_summary "Starting Ceilometer" start_ceilometer fi -screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/$APACHE_NAME/horizon_error.log" + screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_DIR}/proxy-server.conf -v" # Starting the nova-objectstore only if swift3 service is not enabled. diff --git a/unstack.sh b/unstack.sh index 1a2cad83..0040cf1e 100755 --- a/unstack.sh +++ b/unstack.sh @@ -26,6 +26,7 @@ DATA_DIR=${DATA_DIR:-${DEST}/data} # Get project function libraries source $TOP_DIR/lib/cinder +source $TOP_DIR/lib/horizon # Determine what system we are running on. This provides ``os_VENDOR``, # ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` @@ -51,7 +52,7 @@ fi # Apache has the WSGI processes if is_service_enabled horizon; then - stop_service apache2 + stop_horizon fi SCSI_PERSIST_DIR=$CINDER_STATE_PATH/volumes/* From 9f61d29e66433eac5c657f6d3a3903b35ecfb7d1 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 26 Nov 2012 18:56:20 +0000 Subject: [PATCH 781/967] Revert "Remove python-openstackclient." This reverts commit 6e3330967c5c7be73a8ffee3779c214768683c56 This is in fact useful to an admittedly small population. And if I had not been on vacation I'd have -2'd it... --- stack.sh | 5 +++++ stackrc | 4 ++++ 2 files changed, 9 insertions(+) diff --git a/stack.sh b/stack.sh index 8df03953..ec10b110 100755 --- a/stack.sh +++ b/stack.sh @@ -317,6 +317,7 @@ source $TOP_DIR/lib/tempest # Set the destination directories for OpenStack projects HORIZON_DIR=$DEST/horizon +OPENSTACKCLIENT_DIR=$DEST/python-openstackclient NOVNC_DIR=$DEST/noVNC SWIFT_DIR=$DEST/swift SWIFT3_DIR=$DEST/swift3 @@ -812,6 +813,9 @@ install_keystoneclient install_glanceclient install_novaclient +# Check out the client libs that are used most +git_clone $OPENSTACKCLIENT_REPO $OPENSTACKCLIENT_DIR $OPENSTACKCLIENT_BRANCH + # glance, swift middleware and nova api needs keystone middleware if is_service_enabled key g-api n-api swift; then # unified auth system (manages accounts/tokens) @@ -877,6 +881,7 @@ echo_summary "Configuring OpenStack projects" # allowing ``import nova`` or ``import glance.client`` configure_keystoneclient configure_novaclient +setup_develop $OPENSTACKCLIENT_DIR if is_service_enabled key g-api n-api swift; then configure_keystone fi diff --git a/stackrc b/stackrc index e0c69cab..01e95561 100644 --- a/stackrc +++ b/stackrc @@ -76,6 +76,10 @@ HORIZON_BRANCH=master NOVACLIENT_REPO=${GIT_BASE}/openstack/python-novaclient.git NOVACLIENT_BRANCH=master +# consolidated openstack python client +OPENSTACKCLIENT_REPO=${GIT_BASE}/openstack/python-openstackclient.git +OPENSTACKCLIENT_BRANCH=master + # python keystone client library to nova that horizon uses KEYSTONECLIENT_REPO=${GIT_BASE}/openstack/python-keystoneclient KEYSTONECLIENT_BRANCH=master From 0a9954f2c251c68a8261a5ed6999c1585e48de67 Mon Sep 17 00:00:00 2001 From: Robert Collins Date: Tue, 20 Nov 2012 11:34:25 +1300 Subject: [PATCH 782/967] Also pickup quantum distro dependencies (bug 1080886). Change-Id: Ic0fc0b03dc01782d0d85d98de765f04fcbcacd74 --- functions | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/functions b/functions index 8ab3eefc..9f540492 100644 --- a/functions +++ b/functions @@ -155,6 +155,10 @@ function get_packages() { if [[ ! $file_to_parse =~ keystone ]]; then file_to_parse="${file_to_parse} keystone" fi + elif [[ $service == q-* ]]; then + if [[ ! $file_to_parse =~ quantum ]]; then + file_to_parse="${file_to_parse} quantum" + fi fi done From 0edfd6f6e39e01b6acf29be32b2cb18a0c4f4482 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 23 Nov 2012 15:00:38 -0800 Subject: [PATCH 783/967] Use NOVA_BIN_DIR for newer binaries. We added a variable a while ago, NOVA_BIN_DIR which is set properly to the location of the binaries. Rather than using the in-tree bin-dir, which is going away in favor of entrypoints console_scripts. Change-Id: I65040cfe8321d49595a909353870f981bbd6a480 --- lib/nova | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/nova b/lib/nova index b5efce96..3ea2f2af 100644 --- a/lib/nova +++ b/lib/nova @@ -432,9 +432,9 @@ function start_nova() { screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network" screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler" screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --config-file $NOVA_CONF --web ." - screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --config-file $NOVA_CONF" - screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth" - screen_it n-cond "cd $NOVA_DIR && ./bin/nova-conductor" + screen_it n-xvnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-xvpvncproxy --config-file $NOVA_CONF" + screen_it n-cauth "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-consoleauth" + screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor" } # stop_nova() - Stop running processes (non-screen) From e83356217b48308b3a4dc975940c79a22e159238 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 27 Nov 2012 17:00:11 -0600 Subject: [PATCH 784/967] Fix ini functions to handle spaces in section names This allows section names to look like: [ default ] OpenSSL is the primary offender for this usage. Change-Id: If5c711107e73cebab9d4a26ca02a7ce572224377 --- functions | 12 ++++++------ tests/functions.sh | 12 +++++++++++- 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/functions b/functions index 8ab3eefc..cdb982d5 100644 --- a/functions +++ b/functions @@ -370,7 +370,7 @@ function inicomment() { local file=$1 local section=$2 local option=$3 - sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" $file + sed -ie "/^\[ *$section *\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" $file } # Uncomment an option in an INI file @@ -379,7 +379,7 @@ function iniuncomment() { local file=$1 local section=$2 local option=$3 - sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" $file + sed -ie "/^\[ *$section *\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" $file } @@ -390,7 +390,7 @@ function iniget() { local section=$2 local option=$3 local line - line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" $file) + line=$(sed -ne "/^\[ *$section *\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" $file) echo ${line#*=} } @@ -402,18 +402,18 @@ function iniset() { local section=$2 local option=$3 local value=$4 - if ! grep -q "^\[$section\]" $file; then + if ! grep -q "^\[ *$section *\]" $file; then # Add section at the end echo -e "\n[$section]" >>$file fi if [[ -z "$(iniget $file $section $option)" ]]; then # Add it - sed -i -e "/^\[$section\]/ a\\ + sed -ie "/^\[ *$section *\]/ a\\ $option = $value " $file else # Replace it - sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=[ \t]*\).*$|\1$value|" $file + sed -ie "/^\[ *$section *\]/,/^\[.*\]/ s|^\($option[ \t]*=[ \t]*\).*$|\1$value|" $file fi } diff --git a/tests/functions.sh b/tests/functions.sh index 3a0f3199..d2cc5c44 100755 --- a/tests/functions.sh +++ b/tests/functions.sh @@ -54,6 +54,9 @@ handlers = aa, bb [bbb] handlers=ee,ff + +[ ccc ] +spaces = yes EOF # Test with spaces @@ -74,6 +77,14 @@ else echo "iniget failed: $VAL" fi +# Test with spaces in section header + +VAL=$(iniget test.ini ccc spaces) +if [[ "$VAL" == "yes" ]]; then + echo "OK: $VAL" +else + echo "iniget failed: $VAL" +fi # Test without spaces, end of file @@ -112,7 +123,6 @@ else echo "iniget failed: $VAL" fi - # Test option not exist VAL=$(iniget test.ini aaa debug) From 856a11e0e42c253b84b6074c620d54e620cd17c6 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Wed, 21 Nov 2012 16:04:12 +0100 Subject: [PATCH 785/967] Add partial openSUSE/SLE support Note that this is the first part of the support. A second part involves dealing with the package names. Among the changes: - add several functions to determine some distro-specific behavior (how to call usermod, if some features are available on the distro, etc.) - correctly detect openSUSE and SLE in GetOSVersion, and set DISTRO accordingly - new is_suse() function to check if running on a SUSE-based distro - use zypper to install packages - adapt apache virtual host configuration for openSUSE - some simple fixes (path to pip, mysql service name) Change-Id: Id2f7c9e18a1c4a7b7cea262ea7959d183e4b0cf0 --- functions | 115 +++++++++++++++++++++++++++++++++++++++++++- lib/cinder | 6 +-- lib/databases/mysql | 6 ++- lib/horizon | 14 ++++-- lib/nova | 8 +-- stack.sh | 5 +- 6 files changed, 134 insertions(+), 20 deletions(-) diff --git a/functions b/functions index 8ab3eefc..16664d6d 100644 --- a/functions +++ b/functions @@ -223,6 +223,12 @@ GetOSVersion() { os_UPDATE="" if [[ "Debian,Ubuntu" =~ $os_VENDOR ]]; then os_PACKAGE="deb" + elif [[ "SUSE LINUX" =~ $os_VENDOR ]]; then + lsb_release -d -s | grep -q openSUSE + if [[ $? -eq 0 ]]; then + os_VENDOR="openSUSE" + fi + os_PACKAGE="rpm" else os_PACKAGE="rpm" fi @@ -246,6 +252,23 @@ GetOSVersion() { os_VENDOR="" done os_PACKAGE="rpm" + elif [[ -r /etc/SuSE-release ]]; then + for r in openSUSE "SUSE Linux"; do + if [[ "$r" = "SUSE Linux" ]]; then + os_VENDOR="SUSE LINUX" + else + os_VENDOR=$r + fi + + if [[ -n "`grep \"$r\" /etc/SuSE-release`" ]]; then + os_CODENAME=`grep "CODENAME = " /etc/SuSE-release | sed 's:.* = ::g'` + os_RELEASE=`grep "VERSION = " /etc/SuSE-release | sed 's:.* = ::g'` + os_UPDATE=`grep "PATCHLEVEL = " /etc/SuSE-release | sed 's:.* = ::g'` + break + fi + os_VENDOR="" + done + os_PACKAGE="rpm" fi export os_VENDOR os_RELEASE os_UPDATE os_PACKAGE os_CODENAME } @@ -297,6 +320,15 @@ function GetDistro() { elif [[ "$os_VENDOR" =~ (Fedora) ]]; then # For Fedora, just use 'f' and the release DISTRO="f$os_RELEASE" + elif [[ "$os_VENDOR" =~ (openSUSE) ]]; then + DISTRO="opensuse-$os_RELEASE" + elif [[ "$os_VENDOR" =~ (SUSE LINUX) ]]; then + # For SLE, also use the service pack + if [[ -z "$os_UPDATE" ]]; then + DISTRO="sle${os_RELEASE}" + else + DISTRO="sle${os_RELEASE}sp${os_UPDATE}" + fi else # Catch-all for now is Vendor + Release + Update DISTRO="$os_VENDOR-$os_RELEASE.$os_UPDATE" @@ -305,6 +337,19 @@ function GetDistro() { } +# Determine if current distribution is a SUSE-based distribution +# (openSUSE, SLE). +# is_suse +function is_suse { + if [[ -z "$os_VENDOR" ]]; then + GetOSVersion + fi + + [[ "$os_VENDOR" = "openSUSE" || "$os_VENDOR" = "SUSE LINUX" ]] + return $? +} + + # git clone only if directory doesn't exist already. Since ``DEST`` might not # be owned by the installation user, we create the directory and change the # ownership to the proper user. @@ -542,7 +587,11 @@ function install_package() { apt_get install "$@" else - yum_install "$@" + if is_suse; then + zypper_install "$@" + else + yum_install "$@" + fi fi } @@ -593,7 +642,7 @@ function pip_install { SUDO_PIP="env" else SUDO_PIP="sudo" - if [[ "$os_PACKAGE" = "deb" ]]; then + if [[ "$os_PACKAGE" = "deb" || is_suse ]]; then CMD_PIP=/usr/bin/pip else CMD_PIP=/usr/bin/pip-python @@ -946,6 +995,68 @@ function _ssh_check_novanet() { fi } + +# zypper wrapper to set arguments correctly +# zypper_install package [package ...] +function zypper_install() { + [[ "$OFFLINE" = "True" ]] && return + local sudo="sudo" + [[ "$(id -u)" = "0" ]] && sudo="env" + $sudo http_proxy=$http_proxy https_proxy=$https_proxy \ + zypper --non-interactive install --auto-agree-with-licenses "$@" +} + + +# Add a user to a group. +# add_user_to_group user group +function add_user_to_group() { + local user=$1 + local group=$2 + + if [[ -z "$os_VENDOR" ]]; then + GetOSVersion + fi + + # SLE11 and openSUSE 12.2 don't have the usual usermod + if ! is_suse || [[ "$os_VENDOR" = "openSUSE" && "$os_RELEASE" != "12.2" ]]; then + sudo usermod -a -G "$group" "$user" + else + sudo usermod -A "$group" "$user" + fi +} + + +# Get the location of the $module-rootwrap executables, where module is cinder +# or nova. +# get_rootwrap_location module +function get_rootwrap_location() { + local module=$1 + + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + + if [[ "$os_PACKAGE" = "deb" || is_suse ]]; then + echo "/usr/local/bin/$module-rootwrap" + else + echo "/usr/bin/$module-rootwrap" + fi +} + + +# Check if qpid can be used on the current distro. +# qpid_is_supported +function qpid_is_supported() { + if [[ -z "$DISTRO" ]]; then + GetDistro + fi + + # Qpid was introduced to Ubuntu in precise, disallow it on oneiric; it is + # not in openSUSE either right now. + [[ "$DISTRO" = "oneiric" || is_suse ]] + return $? +} + # Restore xtrace $XTRACE diff --git a/lib/cinder b/lib/cinder index c2cf15bf..058fcc23 100644 --- a/lib/cinder +++ b/lib/cinder @@ -63,11 +63,7 @@ function configure_cinder() { cp -p $CINDER_DIR/etc/cinder/policy.json $CINDER_CONF_DIR # Set the paths of certain binaries - if [[ "$os_PACKAGE" = "deb" ]]; then - CINDER_ROOTWRAP=/usr/local/bin/cinder-rootwrap - else - CINDER_ROOTWRAP=/usr/bin/cinder-rootwrap - fi + CINDER_ROOTWRAP=$(get_rootwrap_location cinder) # If Cinder ships the new rootwrap filters files, deploy them # (owned by root) and add a parameter to $CINDER_ROOTWRAP diff --git a/lib/databases/mysql b/lib/databases/mysql index ed59290a..fc6a3b7a 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -25,7 +25,11 @@ function configure_database_mysql { MYSQL=mysql else MY_CONF=/etc/my.cnf - MYSQL=mysqld + if is_suse; then + MYSQL=mysql + else + MYSQL=mysqld + fi fi # Start mysql-server diff --git a/lib/horizon b/lib/horizon index c6c96dae..af09f770 100644 --- a/lib/horizon +++ b/lib/horizon @@ -81,9 +81,17 @@ function init_horizon() { sudo a2ensite horizon else # Install httpd, which is NOPRIME'd - APACHE_NAME=httpd - APACHE_CONF=conf.d/horizon.conf - sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf + if is_suse; then + APACHE_NAME=apache2 + APACHE_CONF=vhosts.d/horizon.conf + # Append wsgi to the list of modules to load + grep -q "^APACHE_MODULES=.*wsgi" /etc/sysconfig/apache2 || + sudo sed '/^APACHE_MODULES=/s/^\(.*\)"$/\1 wsgi"/' -i /etc/sysconfig/apache2 + else + APACHE_NAME=httpd + APACHE_CONF=conf.d/horizon.conf + sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf + fi fi # Configure apache to run horizon diff --git a/lib/nova b/lib/nova index 3ea2f2af..d15d9e31 100644 --- a/lib/nova +++ b/lib/nova @@ -47,11 +47,7 @@ else fi # Set the paths of certain binaries -if [[ "$os_PACKAGE" = "deb" ]]; then - NOVA_ROOTWRAP=/usr/local/bin/nova-rootwrap -else - NOVA_ROOTWRAP=/usr/bin/nova-rootwrap -fi +NOVA_ROOTWRAP=$(get_rootwrap_location nova) # Allow rate limiting to be turned off for testing, like for Tempest # NOTE: Set API_RATE_LIMIT="False" to turn OFF rate limiting @@ -252,7 +248,7 @@ EOF' # The user that nova runs as needs to be member of **libvirtd** group otherwise # nova-compute will be unable to use libvirt. - sudo usermod -a -G libvirtd `whoami` + add_user_to_group `whoami` libvirtd # libvirt detects various settings on startup, as we potentially changed # the system configuration (modules, filesystems), we need to restart diff --git a/stack.sh b/stack.sh index 570fc688..70f46104 100755 --- a/stack.sh +++ b/stack.sh @@ -113,9 +113,8 @@ if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|f16|f17) ]]; then fi fi -# Qpid was introduced to Ubuntu in precise, disallow it on oneiric -if [ "${DISTRO}" = "oneiric" ] && is_service_enabled qpid ; then - echo "You must use Ubuntu Precise or newer for Qpid support." +if is_service_enabled qpid && ! qpid_is_supported; then + echo "Qpid support is not available for this version of your distribution." exit 1 fi From ca5c4713869fb88c2e8753039f80f1f8bf1d8fef Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Wed, 21 Nov 2012 17:45:49 +0100 Subject: [PATCH 786/967] Add package names for openSUSE/SLE Change-Id: I487cc7b8bd228ff77c9881528e3395cbe3c43d4a --- files/rpms-suse/ceilometer-collector | 4 +++ files/rpms-suse/cinder | 2 ++ files/rpms-suse/general | 23 +++++++++++++ files/rpms-suse/glance | 12 +++++++ files/rpms-suse/horizon | 23 +++++++++++++ files/rpms-suse/keystone | 17 ++++++++++ files/rpms-suse/n-api | 2 ++ files/rpms-suse/n-cpu | 4 +++ files/rpms-suse/n-novnc | 1 + files/rpms-suse/n-vol | 2 ++ files/rpms-suse/nova | 48 ++++++++++++++++++++++++++++ files/rpms-suse/postgresql | 1 + files/rpms-suse/quantum | 27 ++++++++++++++++ files/rpms-suse/ryu | 5 +++ files/rpms-suse/swift | 19 +++++++++++ lib/databases/mysql | 6 +++- lib/horizon | 2 ++ stack.sh | 14 ++++++-- 18 files changed, 209 insertions(+), 3 deletions(-) create mode 100644 files/rpms-suse/ceilometer-collector create mode 100644 files/rpms-suse/cinder create mode 100644 files/rpms-suse/general create mode 100644 files/rpms-suse/glance create mode 100644 files/rpms-suse/horizon create mode 100644 files/rpms-suse/keystone create mode 100644 files/rpms-suse/n-api create mode 100644 files/rpms-suse/n-cpu create mode 100644 files/rpms-suse/n-novnc create mode 100644 files/rpms-suse/n-vol create mode 100644 files/rpms-suse/nova create mode 100644 files/rpms-suse/postgresql create mode 100644 files/rpms-suse/quantum create mode 100644 files/rpms-suse/ryu create mode 100644 files/rpms-suse/swift diff --git a/files/rpms-suse/ceilometer-collector b/files/rpms-suse/ceilometer-collector new file mode 100644 index 00000000..c76454fd --- /dev/null +++ b/files/rpms-suse/ceilometer-collector @@ -0,0 +1,4 @@ +# Not available in openSUSE main repositories, but can be fetched from OBS +# (devel:languages:python and server:database projects) +mongodb +python-pymongo diff --git a/files/rpms-suse/cinder b/files/rpms-suse/cinder new file mode 100644 index 00000000..e5b47274 --- /dev/null +++ b/files/rpms-suse/cinder @@ -0,0 +1,2 @@ +lvm2 +tgt diff --git a/files/rpms-suse/general b/files/rpms-suse/general new file mode 100644 index 00000000..8ed74ec0 --- /dev/null +++ b/files/rpms-suse/general @@ -0,0 +1,23 @@ +bridge-utils +curl +euca2ools +git-core +iputils +openssh +psmisc +python-cmd2 # dist:opensuse-12.3 +python-netaddr +python-pep8 +python-pip +python-pylint +python-unittest2 +python-virtualenv +screen +tar +tcpdump +unzip +vim-enhanced +wget + +findutils-locate # useful when debugging +lsof # useful when debugging diff --git a/files/rpms-suse/glance b/files/rpms-suse/glance new file mode 100644 index 00000000..dd68ac08 --- /dev/null +++ b/files/rpms-suse/glance @@ -0,0 +1,12 @@ +gcc +libxml2-devel +python-PasteDeploy +python-Routes +python-SQLAlchemy +python-argparse +python-devel +python-eventlet +python-greenlet +python-iso8601 +python-wsgiref +python-xattr diff --git a/files/rpms-suse/horizon b/files/rpms-suse/horizon new file mode 100644 index 00000000..7e46ffe0 --- /dev/null +++ b/files/rpms-suse/horizon @@ -0,0 +1,23 @@ +apache2 # NOPRIME +apache2-mod_wsgi # NOPRIME +nodejs +python-CherryPy # why? (coming from apts) +python-Paste +python-PasteDeploy +python-Routes +python-Sphinx +python-SQLAlchemy +python-WebOb +python-anyjson +python-beautifulsoup +python-coverage +python-dateutil +python-eventlet +python-kombu +python-mox +python-netaddr +python-nose +python-pep8 +python-pylint +python-sqlalchemy-migrate +python-xattr diff --git a/files/rpms-suse/keystone b/files/rpms-suse/keystone new file mode 100644 index 00000000..b3c876ad --- /dev/null +++ b/files/rpms-suse/keystone @@ -0,0 +1,17 @@ +cyrus-sasl-devel +openldap2-devel +python-Paste +python-PasteDeploy +python-PasteScript +python-Routes +python-SQLAlchemy +python-WebOb +python-devel +python-distribute +python-setuptools # instead of python-distribute; dist:sle11sp2 +python-greenlet +python-lxml +python-mysql +python-py-bcrypt +python-pysqlite +sqlite3 diff --git a/files/rpms-suse/n-api b/files/rpms-suse/n-api new file mode 100644 index 00000000..ad943ffd --- /dev/null +++ b/files/rpms-suse/n-api @@ -0,0 +1,2 @@ +gcc # temporary because this pulls in glance to get the client without running the glance prereqs +python-dateutil diff --git a/files/rpms-suse/n-cpu b/files/rpms-suse/n-cpu new file mode 100644 index 00000000..27d3254f --- /dev/null +++ b/files/rpms-suse/n-cpu @@ -0,0 +1,4 @@ +# Stuff for diablo volumes +genisoimage +lvm2 +open-iscsi diff --git a/files/rpms-suse/n-novnc b/files/rpms-suse/n-novnc new file mode 100644 index 00000000..c8722b9f --- /dev/null +++ b/files/rpms-suse/n-novnc @@ -0,0 +1 @@ +python-numpy diff --git a/files/rpms-suse/n-vol b/files/rpms-suse/n-vol new file mode 100644 index 00000000..e5b47274 --- /dev/null +++ b/files/rpms-suse/n-vol @@ -0,0 +1,2 @@ +lvm2 +tgt diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova new file mode 100644 index 00000000..0c036786 --- /dev/null +++ b/files/rpms-suse/nova @@ -0,0 +1,48 @@ +curl +# Note: we need to package dhcp_release in dnsmasq! +dnsmasq +ebtables +gawk +iptables +iputils +kpartx +kvm +libvirt # NOPRIME +libvirt-python +libxml2-python +mysql-community-server # NOPRIME +parted +python-M2Crypto +python-m2crypto # dist:sle11sp2 +python-Paste +python-PasteDeploy +python-Routes +python-SQLAlchemy +python-Tempita +python-boto +python-carrot +python-cheetah +python-eventlet +python-feedparser +python-greenlet +python-iso8601 +python-kombu +python-lockfile +python-lxml # needed for glance which is needed for nova --- this shouldn't be here +python-mox +python-mysql +python-netaddr +python-paramiko +python-python-gflags +python-sqlalchemy-migrate +python-suds +python-xattr # needed for glance which is needed for nova --- this shouldn't be here +rabbitmq-server # NOPRIME +socat +sqlite3 +sudo +vlan + +# FIXME: qpid is not part of openSUSE, those names are tentative +python-qpid # NOPRIME +qpidd # NOPRIME diff --git a/files/rpms-suse/postgresql b/files/rpms-suse/postgresql new file mode 100644 index 00000000..bf19d397 --- /dev/null +++ b/files/rpms-suse/postgresql @@ -0,0 +1 @@ +python-psycopg2 diff --git a/files/rpms-suse/quantum b/files/rpms-suse/quantum new file mode 100644 index 00000000..068c15c2 --- /dev/null +++ b/files/rpms-suse/quantum @@ -0,0 +1,27 @@ +# Note: we need to package dhcp_release in dnsmasq! +dnsmasq +ebtables +iptables +iputils +mysql-community-server # NOPRIME +python-boto +python-eventlet +python-greenlet +python-iso8601 +python-kombu +python-mysql +python-netaddr +python-Paste +python-PasteDeploy +python-pyudev +python-Routes +python-SQLAlchemy +python-suds +rabbitmq-server # NOPRIME +sqlite3 +sudo +vlan + +# FIXME: qpid is not part of openSUSE, those names are tentative +python-qpid # NOPRIME +qpidd # NOPRIME diff --git a/files/rpms-suse/ryu b/files/rpms-suse/ryu new file mode 100644 index 00000000..763fd24c --- /dev/null +++ b/files/rpms-suse/ryu @@ -0,0 +1,5 @@ +python-distribute +python-setuptools # instead of python-distribute; dist:sle11sp2 +python-Sphinx +python-gevent +python-python-gflags diff --git a/files/rpms-suse/swift b/files/rpms-suse/swift new file mode 100644 index 00000000..db379bbc --- /dev/null +++ b/files/rpms-suse/swift @@ -0,0 +1,19 @@ +curl +gcc +memcached +python-PasteDeploy +python-WebOb +python-configobj +python-coverage +python-devel +python-distribute +python-setuptools # instead of python-distribute; dist:sle11sp2 +python-eventlet +python-greenlet +python-netifaces +python-nose +python-simplejson +python-xattr +sqlite3 +xfsprogs +xinetd diff --git a/lib/databases/mysql b/lib/databases/mysql index fc6a3b7a..eb84f2ca 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -84,7 +84,11 @@ EOF chmod 0600 $HOME/.my.cnf fi # Install mysql-server - install_package mysql-server + if is_suse; then + install_package mysql-community-server + else + install_package mysql-server + fi } function database_connection_url_mysql { diff --git a/lib/horizon b/lib/horizon index af09f770..a378baf2 100644 --- a/lib/horizon +++ b/lib/horizon @@ -111,6 +111,8 @@ function install_horizon() { if [[ "$os_PACKAGE" = "deb" ]]; then # Install apache2, which is NOPRIME'd install_package apache2 libapache2-mod-wsgi + elif is_suse; then + install_package apache2 apache2-mod_wsgi else sudo rm -f /etc/httpd/conf.d/000-* install_package httpd mod_wsgi diff --git a/stack.sh b/stack.sh index 70f46104..0e3a3b18 100755 --- a/stack.sh +++ b/stack.sh @@ -715,12 +715,18 @@ set -o xtrace echo_summary "Installing package prerequisites" if [[ "$os_PACKAGE" = "deb" ]]; then install_package $(get_packages $FILES/apts) +elif is_suse; then + install_package $(get_packages $FILES/rpms-suse) else install_package $(get_packages $FILES/rpms) fi if [[ $SYSLOG != "False" ]]; then - install_package rsyslog-relp + if is_suse; then + install_package rsyslog-module-relp + else + install_package rsyslog-relp + fi fi if is_service_enabled rabbit; then @@ -738,7 +744,11 @@ elif is_service_enabled qpid; then fi elif is_service_enabled zeromq; then if [[ "$os_PACKAGE" = "rpm" ]]; then - install_package zeromq python-zmq + if is_suse; then + install_package libzmq1 python-pyzmq + else + install_package zeromq python-zmq + fi else install_package libzmq1 python-zmq fi From afd472cb30ba90611b3b3907ad2570f26905532c Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 28 Nov 2012 11:54:45 -0600 Subject: [PATCH 787/967] Don't combine sed options Combining '-i -e' into '-ie' changes behaviour, don't do that Change-Id: Ice46c6b4f899b4c76f355cc88241dd33bc60f459 --- functions | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/functions b/functions index 9dab759a..f2b12e21 100644 --- a/functions +++ b/functions @@ -419,7 +419,7 @@ function inicomment() { local file=$1 local section=$2 local option=$3 - sed -ie "/^\[ *$section *\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" $file + sed -i -e "/^\[ *$section *\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" $file } # Uncomment an option in an INI file @@ -428,7 +428,7 @@ function iniuncomment() { local file=$1 local section=$2 local option=$3 - sed -ie "/^\[ *$section *\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" $file + sed -i -e "/^\[ *$section *\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" $file } @@ -457,12 +457,12 @@ function iniset() { fi if [[ -z "$(iniget $file $section $option)" ]]; then # Add it - sed -ie "/^\[ *$section *\]/ a\\ + sed -i -e "/^\[ *$section *\]/ a\\ $option = $value " $file else # Replace it - sed -ie "/^\[ *$section *\]/,/^\[.*\]/ s|^\($option[ \t]*=[ \t]*\).*$|\1$value|" $file + sed -i -e "/^\[ *$section *\]/,/^\[.*\]/ s|^\($option[ \t]*=[ \t]*\).*$|\1$value|" $file fi } From 9a352daf892a78bcef232e2da32b0d46c4c10fe7 Mon Sep 17 00:00:00 2001 From: Jeremy Stanley Date: Wed, 28 Nov 2012 17:22:39 +0000 Subject: [PATCH 788/967] Install Tempest's dependencies along with it. * lib/tempest(install_tempest): Directly install Tempest's tools/pip-requires list after cloning the repo. Change-Id: I5c508faab8756d5cdfec53193e08e3440fda1b2c --- lib/tempest | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/tempest b/lib/tempest index 115c9118..871e9e73 100644 --- a/lib/tempest +++ b/lib/tempest @@ -49,6 +49,10 @@ function configure_tempest() { # install_tempest() - Collect source and prepare function install_tempest() { git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH + + # Tempest doesn't satisfy its dependencies on its own, so + # install them here instead. + sudo pip install -r $TEMPEST_DIR/tools/pip-requires } From 9343df160e29a4a5193503ed6cd0e35d1e590e59 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Wed, 28 Nov 2012 10:05:53 +0000 Subject: [PATCH 789/967] Ensures that Quantum sets the correct signing directory Change-Id: I4f01a171f0ced73ba6b6000d225c8f5811f1874a --- lib/quantum | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/quantum b/lib/quantum index ba98b646..373d5217 100644 --- a/lib/quantum +++ b/lib/quantum @@ -6,6 +6,7 @@ XTRACE=$(set +o | grep xtrace) set +o xtrace export QUANTUM_TEST_CONFIG_FILE=${QUANTUM_TEST_CONFIG_FILE:-"/etc/quantum/debug.ini"} +QUANTUM_AUTH_CACHE_DIR=${QUANTUM_AUTH_CACHE_DIR:-/var/cache/quantum} # Configures keystone integration for quantum service and agents function quantum_setup_keystone() { @@ -22,6 +23,12 @@ function quantum_setup_keystone() { iniset $conf_file $section admin_tenant_name $SERVICE_TENANT_NAME iniset $conf_file $section admin_user $Q_ADMIN_USERNAME iniset $conf_file $section admin_password $SERVICE_PASSWORD + if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then + iniset $conf_file $section signing_dir $QUANTUM_AUTH_CACHE_DIR + # Create cache dir + sudo mkdir -p $QUANTUM_AUTH_CACHE_DIR + sudo chown `whoami` $QUANTUM_AUTH_CACHE_DIR + fi } function quantum_setup_ovs_bridge() { From 7c3053da69681ed5a57729812d4a357ac1c23b17 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Thu, 29 Nov 2012 09:19:16 +0100 Subject: [PATCH 790/967] Remove leftover references to files/pips/ The files/pips/* files were removed a while ago (replaced by pip-requires in individual projects). So remove leftover code that was dealing with that. Change-Id: Id521a3365ab018193607389f022a25acddb49714 --- stack.sh | 4 ---- tools/build_ramdisk.sh | 3 +-- tools/build_tempest.sh | 2 -- tools/build_uec.sh | 2 +- tools/build_uec_ramdisk.sh | 2 +- tools/{warm_apts_and_pips_for_uec.sh => warm_apts_for_uec.sh} | 4 +--- 6 files changed, 4 insertions(+), 13 deletions(-) rename tools/{warm_apts_and_pips_for_uec.sh => warm_apts_for_uec.sh} (88%) diff --git a/stack.sh b/stack.sh index 70f46104..5c5ad2a0 100755 --- a/stack.sh +++ b/stack.sh @@ -779,10 +779,6 @@ if [[ $TRACK_DEPENDS = True ]] ; then $DEST/.venv/bin/pip freeze > $DEST/requires-pre-pip fi -# Install python requirements -echo_summary "Installing Python prerequisites" -pip_install $(get_packages $FILES/pips | sort -u) - # Check Out Source # ---------------- diff --git a/tools/build_ramdisk.sh b/tools/build_ramdisk.sh index 8e2c0be9..5ff05b08 100755 --- a/tools/build_ramdisk.sh +++ b/tools/build_ramdisk.sh @@ -108,7 +108,7 @@ function map_nbd { echo $NBD } -# Prime image with as many apt/pips as we can +# Prime image with as many apt as we can DEV_FILE=$CACHEDIR/$DIST_NAME-dev.img DEV_FILE_TMP=`mktemp $DEV_FILE.XXXXXX` if [ ! -r $DEV_FILE ]; then @@ -121,7 +121,6 @@ if [ ! -r $DEV_FILE ]; then chroot $MNTDIR apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1` chroot $MNTDIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1` - chroot $MNTDIR pip install `cat files/pips/*` # Create a stack user that is a member of the libvirtd group so that stack # is able to interact with libvirt. diff --git a/tools/build_tempest.sh b/tools/build_tempest.sh index e72355c9..1758e7da 100755 --- a/tools/build_tempest.sh +++ b/tools/build_tempest.sh @@ -48,8 +48,6 @@ DEST=${DEST:-/opt/stack} TEMPEST_DIR=$DEST/tempest # Install tests and prerequisites -pip_install `cat $TOP_DIR/files/pips/tempest` - git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH trap - SIGHUP SIGINT SIGTERM SIGQUIT EXIT diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 48819c95..58c54258 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -68,7 +68,7 @@ fi # Option to warm the base image with software requirements. if [ $WARM_CACHE ]; then cd $TOOLS_DIR - ./warm_apts_and_pips_for_uec.sh $image_dir/disk + ./warm_apts_for_uec.sh $image_dir/disk fi # Name of our instance, used by libvirt diff --git a/tools/build_uec_ramdisk.sh b/tools/build_uec_ramdisk.sh index 150ecabd..3ab5dafd 100755 --- a/tools/build_uec_ramdisk.sh +++ b/tools/build_uec_ramdisk.sh @@ -98,7 +98,7 @@ GUEST_NAME=${GUEST_NAME:-devstack} # Pre-load the image with basic environment if [ ! -e $image_dir/disk-primed ]; then cp $image_dir/disk $image_dir/disk-primed - $TOOLS_DIR/warm_apts_and_pips_for_uec.sh $image_dir/disk-primed + $TOOLS_DIR/warm_apts_for_uec.sh $image_dir/disk-primed $TOOLS_DIR/copy_dev_environment_to_uec.sh $image_dir/disk-primed fi diff --git a/tools/warm_apts_and_pips_for_uec.sh b/tools/warm_apts_for_uec.sh similarity index 88% rename from tools/warm_apts_and_pips_for_uec.sh rename to tools/warm_apts_for_uec.sh index fe389ffe..3c15f52e 100755 --- a/tools/warm_apts_and_pips_for_uec.sh +++ b/tools/warm_apts_for_uec.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# **warm_apts_and_pips_for_uec.sh** +# **warm_apts_for_uec.sh** # Echo commands set -o xtrace @@ -48,8 +48,6 @@ cp /etc/resolv.conf $STAGING_DIR/etc/resolv.conf chroot $STAGING_DIR apt-get update chroot $STAGING_DIR apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1` chroot $STAGING_DIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1` || true -mkdir -p $STAGING_DIR/var/cache/pip -PIP_DOWNLOAD_CACHE=/var/cache/pip chroot $STAGING_DIR pip install `cat files/pips/*` || true # Unmount umount $STAGING_DIR From b2fdafead20f5b11e7d53406db2ddb28b518f391 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Tue, 20 Nov 2012 15:52:21 +0000 Subject: [PATCH 791/967] Additional options for XenAPINFS cinder driver Related to bp xenapi-storage-manager-nfs Add configuration options to devstack scripts, so it is easier to set up a system with a XenAPINFS volume backend. It makes possible to test this configuration with exercises. To enable the XenAPINFS driver, specify: CINDER_DRIVER=XenAPINFS CINDER_XENAPI_CONNECTION_URL= CINDER_XENAPI_CONNECTION_USERNAME= CINDER_XENAPI_CONNECTION_PASSWORD= CINDER_XENAPI_NFS_SERVER= CINDER_XENAPI_NFS_SERVERPATH= in your localrc Change-Id: Ia214172aac377d273a03849c8cc2adcbf5b8f607 --- lib/cinder | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/lib/cinder b/lib/cinder index 058fcc23..1aa34cd2 100644 --- a/lib/cinder +++ b/lib/cinder @@ -24,6 +24,9 @@ set +o xtrace # Defaults # -------- +# set up default driver +CINDER_DRIVER=${CINDER_DRIVER:-default} + # set up default directories CINDER_DIR=$DEST/cinder CINDERCLIENT_DIR=$DEST/python-cinderclient @@ -145,6 +148,19 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" iniset $CINDER_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s TRACE %(name)s %(instance)s" fi + + if [ "$CINDER_DRIVER" == "XenAPINFS" ]; then + ( + set -u + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.xenapi_sm.XenAPINFSDriver" + iniset $CINDER_CONF DEFAULT xenapi_connection_url "$CINDER_XENAPI_CONNECTION_URL" + iniset $CINDER_CONF DEFAULT xenapi_connection_username "$CINDER_XENAPI_CONNECTION_USERNAME" + iniset $CINDER_CONF DEFAULT xenapi_connection_password "$CINDER_XENAPI_CONNECTION_PASSWORD" + iniset $CINDER_CONF DEFAULT xenapi_nfs_server "$CINDER_XENAPI_NFS_SERVER" + iniset $CINDER_CONF DEFAULT xenapi_nfs_serverpath "$CINDER_XENAPI_NFS_SERVERPATH" + ) + [ $? -ne 0 ] && exit 1 + fi } # init_cinder() - Initialize database and volume group From 901eed70b4b6257ad3a9192c0d0522969ef67509 Mon Sep 17 00:00:00 2001 From: guillaume pernot Date: Thu, 29 Nov 2012 08:44:58 +0100 Subject: [PATCH 792/967] Add ResellerAdmin role to ceilometer user. For the sake of swift metering, 'ceilometer' user needs to be a ResellerAdmin for tenant 'service'. Change-Id: I65b3bdedddded9d5f3bac5c5d714288800ffa8b6 --- files/keystone_data.sh | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index f75d24a6..35793d84 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -87,6 +87,11 @@ MEMBER_ROLE=$(get_id keystone role-create --name=Member) keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $DEMO_TENANT keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $INVIS_TENANT +# The ResellerAdmin role is used by Nova and Ceilometer so we need to keep it. +# The admin role in swift allows a user to act as an admin for their tenant, +# but ResellerAdmin is needed for a user to act as any tenant. The name of this +# role is also configurable in swift-proxy.conf +RESELLER_ROLE=$(get_id keystone role-create --name=ResellerAdmin) # Services # -------- @@ -129,11 +134,7 @@ if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then --internalurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" fi # Nova needs ResellerAdmin role to download images when accessing - # swift through the s3 api. The admin role in swift allows a user - # to act as an admin for their tenant, but ResellerAdmin is needed - # for a user to act as any tenant. The name of this role is also - # configurable in swift-proxy.conf - RESELLER_ROLE=$(get_id keystone role-create --name=ResellerAdmin) + # swift through the s3 api. keystone user-role-add \ --tenant_id $SERVICE_TENANT \ --user_id $NOVA_USER \ @@ -255,6 +256,10 @@ if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]]; then keystone user-role-add --tenant_id $SERVICE_TENANT \ --user_id $CEILOMETER_USER \ --role_id $ADMIN_ROLE + # Ceilometer needs ResellerAdmin role to access swift account stats. + keystone user-role-add --tenant_id $SERVICE_TENANT \ + --user_id $CEILOMETER_USER \ + --role_id $RESELLER_ROLE if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then CEILOMETER_SERVICE=$(get_id keystone service-create \ --name=ceilometer \ From ed30160c0454bcd7c203db0f331e2adfcbd62ea3 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Thu, 29 Nov 2012 16:52:59 +0100 Subject: [PATCH 793/967] Add OBJECT_CATALOG_TYPE to tempest config * add OBJECT_CATALOG_TYPE Change-Id: I776f7ce65e44ceef139e34a1b1aff52e069b90e6 --- tools/configure_tempest.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index 9b543ab0..03dc6839 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -209,6 +209,9 @@ VOLUME_CATALOG_TYPE=volume LIVE_MIGRATION_AVAILABLE=${LIVE_MIGRATION_AVAILABLE:-False} USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION=${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} +# Object Storage +OBJECT_CATALOG_TYPE="object-store" + # EC2 and S3 test configuration BOTO_EC2_URL="http://$IDENTITY_HOST:8773/services/Cloud" BOTO_S3_URL="http://$IDENTITY_HOST:3333" @@ -281,6 +284,7 @@ sed -e " s,%VOLUME_BUILD_TIMEOUT%,$VOLUME_BUILD_TIMEOUT,g; s,%LIVE_MIGRATION_AVAILABLE%,$LIVE_MIGRATION_AVAILABLE,g; s,%USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION%,$USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION,g; + s,%OBJECT_CATALOG_TYPE%,$OBJECT_CATALOG_TYPE,g; s,%BOTO_EC2_URL%,$BOTO_EC2_URL,g; s,%BOTO_S3_URL%,$BOTO_S3_URL,g; s,%BOTO_AWS_ACCESS%,$BOTO_AWS_ACCESS,g; From e1864c37f31a6e4d8680148c35ffc9f1bcc5d54a Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 29 Nov 2012 14:20:34 -0500 Subject: [PATCH 794/967] enable mod_wsgi on horizon_init on a clean ubuntu environment wsgi wasn't getting enabled ensure that it actually turns on wsgi to that apache can start Change-Id: I9c74f7c5d5d2f995843b2a649a52f7159c7de314 --- lib/horizon | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/horizon b/lib/horizon index af09f770..6173042f 100644 --- a/lib/horizon +++ b/lib/horizon @@ -79,6 +79,8 @@ function init_horizon() { # Be a good citizen and use the distro tools here sudo touch /etc/$APACHE_NAME/$APACHE_CONF sudo a2ensite horizon + # WSGI doesn't enable by default, enable it + sudo a2enmod wsgi else # Install httpd, which is NOPRIME'd if is_suse; then From a61eb6af5d4fe7affa3a8c8da6d3b4126e7764bf Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 29 Nov 2012 14:51:34 -0500 Subject: [PATCH 795/967] remove hard tabs from keystone_data.sh hard tabs somehow snuck into keystone_data.sh, noticed in an unrelated review. Remove for consistency. Change-Id: I04f3b4597fd3629c7f123588c512832a67228597 --- files/keystone_data.sh | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 7da07aaa..9b07d0b3 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -92,16 +92,16 @@ keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $ # Keystone if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - KEYSTONE_SERVICE=$(get_id keystone service-create \ - --name=keystone \ - --type=identity \ - --description="Keystone Identity Service") - keystone endpoint-create \ - --region RegionOne \ - --service_id $KEYSTONE_SERVICE \ - --publicurl "http://$SERVICE_HOST:\$(public_port)s/v2.0" \ - --adminurl "http://$SERVICE_HOST:\$(admin_port)s/v2.0" \ - --internalurl "http://$SERVICE_HOST:\$(public_port)s/v2.0" + KEYSTONE_SERVICE=$(get_id keystone service-create \ + --name=keystone \ + --type=identity \ + --description="Keystone Identity Service") + keystone endpoint-create \ + --region RegionOne \ + --service_id $KEYSTONE_SERVICE \ + --publicurl "http://$SERVICE_HOST:\$(public_port)s/v2.0" \ + --adminurl "http://$SERVICE_HOST:\$(admin_port)s/v2.0" \ + --internalurl "http://$SERVICE_HOST:\$(public_port)s/v2.0" fi # Nova From ff7f308e9cbdaf69fa116a628ed3114bb7aad54e Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Thu, 29 Nov 2012 22:00:51 -0500 Subject: [PATCH 796/967] Start nova-conductor before nova-compute. nova-compute is going to need to talk to nova-conductor during startup, so go ahead and start it conductor before compute. Change-Id: I565436e06b5bf4189ead0a57d57ec2ce4cf79bd8 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index d15d9e31..6445a073 100644 --- a/lib/nova +++ b/lib/nova @@ -423,6 +423,7 @@ function start_nova() { # The group **libvirtd** is added to the current user in this script. # Use 'sg' to execute nova-compute as a member of the **libvirtd** group. # ``screen_it`` checks ``is_service_enabled``, it is not needed here + screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor" screen_it n-cpu "cd $NOVA_DIR && sg libvirtd $NOVA_BIN_DIR/nova-compute" screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert" screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network" @@ -430,7 +431,6 @@ function start_nova() { screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --config-file $NOVA_CONF --web ." screen_it n-xvnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-xvpvncproxy --config-file $NOVA_CONF" screen_it n-cauth "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-consoleauth" - screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor" } # stop_nova() - Stop running processes (non-screen) From 08b4e9b445f460d36a78a68b5273aee8155e4839 Mon Sep 17 00:00:00 2001 From: Steven Hardy Date: Fri, 30 Nov 2012 13:31:49 +0000 Subject: [PATCH 797/967] heat : Remove heat-metadata as it has been removed in heat master The heat-metadata service has been removed as of the following commit in heat master: 6ae3ff0 Remove heat-metadata service So remove the heat-metadata service and related config-file items Change-Id: If36efe5924e9e0a7697f51dd3c9fc140fed8090b Signed-off-by: Steven Hardy --- lib/heat | 29 +++-------------------------- 1 file changed, 3 insertions(+), 26 deletions(-) diff --git a/lib/heat b/lib/heat index 396c8a05..b640fbca 100644 --- a/lib/heat +++ b/lib/heat @@ -1,7 +1,7 @@ # lib/heat # Install and start Heat service # To enable, add the following to localrc -# ENABLED_SERVICES+=,heat,h-api-cfn,h-api-cw,h-eng,h-meta +# ENABLED_SERVICES+=,heat,h-api-cfn,h-api-cw,h-eng # Dependencies: # - functions @@ -52,8 +52,6 @@ function configure_heat() { HEAT_API_CFN_PORT=${HEAT_API_CFN_PORT:-8000} HEAT_ENGINE_HOST=${HEAT_ENGINE_HOST:-$SERVICE_HOST} HEAT_ENGINE_PORT=${HEAT_ENGINE_PORT:-8001} - HEAT_METADATA_HOST=${HEAT_METADATA_HOST:-$SERVICE_HOST} - HEAT_METADATA_PORT=${HEAT_METADATA_PORT:-8002} HEAT_API_CW_HOST=${HEAT_API_CW_HOST:-$SERVICE_HOST} HEAT_API_CW_PORT=${HEAT_API_CW_PORT:-8003} HEAT_API_HOST=${HEAT_API_HOST:-$SERVICE_HOST} @@ -126,7 +124,7 @@ function configure_heat() { iniset $HEAT_ENGINE_CONF DEFAULT bind_host $HEAT_ENGINE_HOST iniset $HEAT_ENGINE_CONF DEFAULT bind_port $HEAT_ENGINE_PORT iniset $HEAT_ENGINE_CONF DEFAULT heat_metadata_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT - iniset $HEAT_ENGINE_CONF DEFAULT heat_waitcondition_server_url http://$HEAT_METADATA_HOST:$HEAT_METADATA_PORT + iniset $HEAT_ENGINE_CONF DEFAULT heat_waitcondition_server_url http://$HEAT_CFN_HOST:$HEAT_CFN_PORT/v1/waitcondition iniset $HEAT_ENGINE_CONF DEFAULT heat_watch_server_url http://$HEAT_API_CW_HOST:$HEAT_API_CW_PORT local dburl database_connection_url dburl heat @@ -141,26 +139,6 @@ function configure_heat() { iniset $HEAT_ENGINE_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid fi - # metadata api - HEAT_METADATA_CONF=$HEAT_CONF_DIR/heat-metadata.conf - cp $HEAT_DIR/etc/heat/heat-metadata.conf $HEAT_METADATA_CONF - iniset $HEAT_METADATA_CONF DEFAULT debug True - inicomment $HEAT_METADATA_CONF DEFAULT log_file - iniset $HEAT_METADATA_CONF DEFAULT use_syslog $SYSLOG - iniset $HEAT_METADATA_CONF DEFAULT bind_host $HEAT_METADATA_HOST - iniset $HEAT_METADATA_CONF DEFAULT bind_port $HEAT_METADATA_PORT - - if is_service_enabled rabbit; then - iniset $HEAT_METADATA_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_kombu - iniset $HEAT_METADATA_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - iniset $HEAT_METADATA_CONF DEFAULT rabbit_host $RABBIT_HOST - elif is_service_enabled qpid; then - iniset $HEAT_METADATA_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid - fi - - HEAT_METADATA_PASTE_INI=$HEAT_CONF_DIR/heat-metadata-paste.ini - cp $HEAT_DIR/etc/heat/heat-metadata-paste.ini $HEAT_METADATA_PASTE_INI - # cloudwatch api HEAT_API_CW_CONF=$HEAT_CONF_DIR/heat-api-cloudwatch.conf cp $HEAT_DIR/etc/heat/heat-api-cloudwatch.conf $HEAT_API_CW_CONF @@ -217,13 +195,12 @@ function start_heat() { screen_it h-api "cd $HEAT_DIR; bin/heat-api --config-dir=$HEAT_CONF_DIR/heat-api.conf" screen_it h-api-cfn "cd $HEAT_DIR; bin/heat-api-cfn --config-dir=$HEAT_CONF_DIR/heat-api-cfn.conf" screen_it h-api-cw "cd $HEAT_DIR; bin/heat-api-cloudwatch --config-dir=$HEAT_CONF_DIR/heat-api-cloudwatch.conf" - screen_it h-meta "cd $HEAT_DIR; bin/heat-metadata --config-dir=$HEAT_CONF_DIR/heat-metadata.conf" } # stop_heat() - Stop running processes function stop_heat() { # Kill the cinder screen windows - for serv in h-eng h-api-cfn h-api-cw h-meta; do + for serv in h-eng h-api-cfn h-api-cw; do screen -S $SCREEN_NAME -p $serv -X kill done } From ece6a332b7d5791c73071fbfea5723d4991c6c85 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Thu, 29 Nov 2012 14:19:41 +0100 Subject: [PATCH 798/967] Refactor swift installation * Optimize loops * Move install steps to the lib/swift Change-Id: Ie8a74b2627395620ccb0501171fa0150ee7497f2 --- lib/swift | 364 +++++++++++++++++++++++++++++++++++++++++++++++++++++ stack.sh | 315 +++------------------------------------------- unstack.sh | 3 +- 3 files changed, 384 insertions(+), 298 deletions(-) create mode 100644 lib/swift diff --git a/lib/swift b/lib/swift new file mode 100644 index 00000000..7acb1dfe --- /dev/null +++ b/lib/swift @@ -0,0 +1,364 @@ +# lib/swift +# Functions to control the configuration and operation of the swift service + +# Dependencies: +# ``functions`` file +# ``DEST``, ``SCREEN_NAME``, `SWIFT_HASH` must be defined +# ``SWIFT_DATA_DIR`` or ``DATA_DIR`` must be defined +# ``lib/keystone`` file +# ``stack.sh`` calls the entry points in this order: +# +# install_swift +# configure_swift +# init_swift +# start_swift +# stop_swift +# cleanup_swift + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# + +# Set up default directories + +SWIFT_DIR=$DEST/swift +SWIFTCLIENT_DIR=$DEST/python-swiftclient + +# TODO: add logging to different location. + +# Set ``SWIFT_DATA_DIR`` to the location of swift drives and objects. +# Default is the common DevStack data directory. +SWIFT_DATA_DIR=${SWIFT_DATA_DIR:-${DATA_DIR}/swift} + +# Set ``SWIFT_CONFIG_DIR`` to the location of the configuration files. +# Default is ``/etc/swift``. +SWIFT_CONFIG_DIR=${SWIFT_CONFIG_DIR:-/etc/swift} + +# DevStack will create a loop-back disk formatted as XFS to store the +# swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in bytes. +# Default is 1 gigabyte. +SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000} + +# The ring uses a configurable number of bits from a path’s MD5 hash as +# a partition index that designates a device. The number of bits kept +# from the hash is known as the partition power, and 2 to the partition +# power indicates the partition count. Partitioning the full MD5 hash +# ring allows other parts of the cluster to work in batches of items at +# once which ends up either more efficient or at least less complex than +# working with each item separately or the entire cluster all at once. +# By default we define 9 for the partition count (which mean 512). +SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9} + +# Set ``SWIFT_REPLICAS`` to configure how many replicas are to be +# configured for your Swift cluster. By default the three replicas would need a +# bit of IO and Memory on a VM you may want to lower that to 1 if you want to do +# only some quick testing. +SWIFT_REPLICAS=${SWIFT_REPLICAS:-3} +SWIFT_REPLICAS_SEQ=$(seq ${SWIFT_REPLICAS}) + +# Set ``OBJECT_PORT_BASE``, ``CONTAINER_PORT_BASE``, ``ACCOUNT_PORT_BASE`` +# Port bases used in port number calclution for the service "nodes" +# The specified port number will be used, the additinal ports calculated by +# base_port + node_num * 10 +OBJECT_PORT_BASE=6010 +CONTAINER_PORT_BASE=6011 +ACCOUNT_PORT_BASE=6012 + +# Entry Points +# ------------ + +# cleanup_swift() - Remove residual data files +function cleanup_swift() { + rm -f ${SWIFT_CONFIG_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz} + if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then + sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 + fi + if [[ -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then + rm ${SWIFT_DATA_DIR}/drives/images/swift.img + fi +} + +# configure_swift() - Set config files, create data dirs and loop image +function configure_swift() { + local swift_auth_server + local node_number + local swift_node_config + local swift_log_dir + + setup_develop $SWIFT_DIR + + # Make sure to kill all swift processes first + swift-init all stop || true + + # First do a bit of setup by creating the directories and + # changing the permissions so we can run it as our user. + + USER_GROUP=$(id -g) + sudo mkdir -p ${SWIFT_DATA_DIR}/drives + sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_DIR} + + # Create a loopback disk and format it to XFS. + if [[ -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then + if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then + sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 + fi + else + mkdir -p ${SWIFT_DATA_DIR}/drives/images + sudo touch ${SWIFT_DATA_DIR}/drives/images/swift.img + sudo chown $USER: ${SWIFT_DATA_DIR}/drives/images/swift.img + + dd if=/dev/zero of=${SWIFT_DATA_DIR}/drives/images/swift.img \ + bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE} + fi + + # Make a fresh XFS filesystem + mkfs.xfs -f -i size=1024 ${SWIFT_DATA_DIR}/drives/images/swift.img + + # Mount the disk with mount options to make it as efficient as possible + mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1 + if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then + sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 \ + ${SWIFT_DATA_DIR}/drives/images/swift.img ${SWIFT_DATA_DIR}/drives/sdb1 + fi + + # Create a link to the above mount and + # create all of the directories needed to emulate a few different servers + for node_number in ${SWIFT_REPLICAS_SEQ}; do + sudo ln -sf ${SWIFT_DATA_DIR}/drives/sdb1/$node_number ${SWIFT_DATA_DIR}/$node_number; + drive=${SWIFT_DATA_DIR}/drives/sdb1/${node_number} + node=${SWIFT_DATA_DIR}/${node_number}/node + node_device=${node}/sdb1 + [[ -d $node ]] && continue + [[ -d $drive ]] && continue + sudo install -o ${USER} -g $USER_GROUP -d $drive + sudo install -o ${USER} -g $USER_GROUP -d $node_device + sudo chown -R $USER: ${node} + done + + sudo mkdir -p ${SWIFT_CONFIG_DIR}/{object,container,account}-server /var/run/swift + sudo chown -R $USER: ${SWIFT_CONFIG_DIR} /var/run/swift + + if [[ "$SWIFT_CONFIG_DIR" != "/etc/swift" ]]; then + # Some swift tools are hard-coded to use ``/etc/swift`` and are apparently not going to be fixed. + # Create a symlink if the config dir is moved + sudo ln -sf ${SWIFT_CONFIG_DIR} /etc/swift + fi + + # Swift use rsync to synchronize between all the different + # partitions (which make more sense when you have a multi-node + # setup) we configure it with our version of rsync. + sed -e " + s/%GROUP%/${USER_GROUP}/; + s/%USER%/$USER/; + s,%SWIFT_DATA_DIR%,$SWIFT_DATA_DIR,; + " $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf + # rsyncd.conf just prepared for 4 nodes + if [[ "$os_PACKAGE" = "deb" ]]; then + sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync + else + sudo sed -i '/disable *= *yes/ { s/yes/no/ }' /etc/xinetd.d/rsync + fi + + if is_service_enabled swift3;then + swift_auth_server="s3token " + fi + + # By default Swift will be installed with the tempauth middleware + # which has some default username and password if you have + # configured keystone it will checkout the directory. + if is_service_enabled key; then + swift_auth_server+="authtoken keystoneauth" + else + swift_auth_server=tempauth + fi + + SWIFT_CONFIG_PROXY_SERVER=${SWIFT_CONFIG_DIR}/proxy-server.conf + cp ${SWIFT_DIR}/etc/proxy-server.conf-sample ${SWIFT_CONFIG_PROXY_SERVER} + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user ${USER} + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir ${SWIFT_CONFIG_DIR} + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers 1 + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level DEBUG + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080} + + # Only enable Swift3 if we have it enabled in ENABLED_SERVICES + is_service_enabled swift3 && swift3=swift3 || swift3="" + + iniset ${SWIFT_CONFIG_PROXY_SERVER} pipeline:main pipeline "catch_errors healthcheck cache ratelimit ${swift3} ${swift_auth_server} proxy-logging proxy-server" + + iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true + + # Configure Keystone + sed -i '/^# \[filter:authtoken\]/,/^# \[filter:keystoneauth\]$/ s/^#[ \t]*//' ${SWIFT_CONFIG_PROXY_SERVER} + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_host $KEYSTONE_AUTH_HOST + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_port $KEYSTONE_AUTH_PORT + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_user swift + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_password $SERVICE_PASSWORD + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth use + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles "Member, admin" + + if is_service_enabled swift3; then + cat <>${SWIFT_CONFIG_PROXY_SERVER} +# NOTE(chmou): s3token middleware is not updated yet to use only +# username and password. +[filter:s3token] +paste.filter_factory = keystone.middleware.s3_token:filter_factory +auth_port = ${KEYSTONE_AUTH_PORT} +auth_host = ${KEYSTONE_AUTH_HOST} +auth_protocol = ${KEYSTONE_AUTH_PROTOCOL} +auth_token = ${SERVICE_TOKEN} +admin_token = ${SERVICE_TOKEN} + +[filter:swift3] +use = egg:swift3#swift3 +EOF + fi + + cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONFIG_DIR}/swift.conf + iniset ${SWIFT_CONFIG_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH} + + # This function generates an object/account/proxy configuration + # emulating 4 nodes on different ports + function generate_swift_config() { + local swift_node_config=$1 + local node_id=$2 + local bind_port=$3 + + log_facility=$[ node_id - 1 ] + node_path=${SWIFT_DATA_DIR}/${node_number} + + iniuncomment ${swift_node_config} DEFAULT user + iniset ${swift_node_config} DEFAULT user ${USER} + + iniuncomment ${swift_node_config} DEFAULT bind_port + iniset ${swift_node_config} DEFAULT bind_port ${bind_port} + + iniuncomment ${swift_node_config} DEFAULT swift_dir + iniset ${swift_node_config} DEFAULT swift_dir ${SWIFT_CONFIG_DIR} + + iniuncomment ${swift_node_config} DEFAULT devices + iniset ${swift_node_config} DEFAULT devices ${node_path} + + iniuncomment ${swift_node_config} DEFAULT log_facility + iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility} + + iniuncomment ${swift_node_config} DEFAULT mount_check + iniset ${swift_node_config} DEFAULT mount_check false + + iniuncomment ${swift_node_config} ${server_type}-replicator vm_test_mode + iniset ${swift_node_config} ${server_type}-replicator vm_test_mode yes + } + + for node_number in ${SWIFT_REPLICAS_SEQ}; do + swift_node_config=${SWIFT_CONFIG_DIR}/object-server/${node_number}.conf + cp ${SWIFT_DIR}/etc/object-server.conf-sample ${swift_node_config} + generate_swift_config ${swift_node_config} ${node_number} $[OBJECT_PORT_BASE + 10 * (node_number - 1)] + + swift_node_config=${SWIFT_CONFIG_DIR}/container-server/${node_number}.conf + cp ${SWIFT_DIR}/etc/container-server.conf-sample ${swift_node_config} + generate_swift_config ${swift_node_config} ${node_number} $[CONTAINER_PORT_BASE + 10 * (node_number - 1)] + + swift_node_config=${SWIFT_CONFIG_DIR}/account-server/${node_number}.conf + cp ${SWIFT_DIR}/etc/account-server.conf-sample ${swift_node_config} + generate_swift_config ${swift_node_config} ${node_number} $[ACCOUNT_PORT_BASE + 10 * (node_number - 1)] + done + + swift_log_dir=${SWIFT_DATA_DIR}/logs + rm -rf ${swift_log_dir} + mkdir -p ${swift_log_dir}/hourly + sudo chown -R $USER:adm ${swift_log_dir} + sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \ + tee /etc/rsyslog.d/10-swift.conf + +} + +# configure_swiftclient() - Set config files, create data dirs, etc +function configure_swiftclient() { + setup_develop $SWIFTCLIENT_DIR +} + +# init_swift() - Initialize rings +function init_swift() { + local node_number + # Make sure to kill all swift processes first + swift-init all stop || true + + # This is where we create three different rings for swift with + # different object servers binding on different ports. + pushd ${SWIFT_CONFIG_DIR} >/dev/null && { + + rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz + + swift-ring-builder object.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 + swift-ring-builder container.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 + swift-ring-builder account.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 + + for node_number in ${SWIFT_REPLICAS_SEQ}; do + swift-ring-builder object.builder add z${node_number}-127.0.0.1:$[OBJECT_PORT_BASE + 10 * (node_number - 1)]/sdb1 1 + swift-ring-builder container.builder add z${node_number}-127.0.0.1:$[CONTAINER_PORT_BASE + 10 * (node_number - 1)]/sdb1 1 + swift-ring-builder account.builder add z${node_number}-127.0.0.1:$[ACCOUNT_PORT_BASE + 10 * (node_number - 1)]/sdb1 1 + done + swift-ring-builder object.builder rebalance + swift-ring-builder container.builder rebalance + swift-ring-builder account.builder rebalance + } && popd >/dev/null + +} + +function install_swift() { + git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH +} + +function install_swiftclient() { + git_clone $SWIFTCLIENT_REPO $SWIFTCLIENT_DIR $SWIFTCLIENT_BRANCH +} + + +# start_swift() - Start running processes, including screen +function start_swift() { + # (re)start rsyslog + restart_service rsyslog + # Start rsync + if [[ "$os_PACKAGE" = "deb" ]]; then + sudo /etc/init.d/rsync restart || : + else + sudo systemctl start xinetd.service + fi + + # First spawn all the swift services then kill the + # proxy service so we can run it in foreground in screen. + # ``swift-init ... {stop|restart}`` exits with '1' if no servers are running, + # ignore it just in case + swift-init all restart || true + swift-init proxy stop || true + screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_DIR}/proxy-server.conf -v" +} + +# stop_swift() - Stop running processes (non-screen) +function stop_swift() { + # screen normally killed by unstack.sh + swift-init all stop || true +} + +# Restore xtrace +$XTRACE diff --git a/stack.sh b/stack.sh index 70f46104..dbb53ecb 100755 --- a/stack.sh +++ b/stack.sh @@ -105,7 +105,7 @@ disable_negated_services # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|f16|f17) ]]; then +if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|f16|f17|f18) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then echo "If you wish to run this script anyway run with FORCE=yes" @@ -310,6 +310,7 @@ source $TOP_DIR/lib/keystone source $TOP_DIR/lib/glance source $TOP_DIR/lib/nova source $TOP_DIR/lib/cinder +source $TOP_DIR/lib/swift source $TOP_DIR/lib/ceilometer source $TOP_DIR/lib/heat source $TOP_DIR/lib/quantum @@ -319,9 +320,7 @@ source $TOP_DIR/lib/tempest HORIZON_DIR=$DEST/horizon OPENSTACKCLIENT_DIR=$DEST/python-openstackclient NOVNC_DIR=$DEST/noVNC -SWIFT_DIR=$DEST/swift SWIFT3_DIR=$DEST/swift3 -SWIFTCLIENT_DIR=$DEST/python-swiftclient QUANTUM_DIR=$DEST/quantum QUANTUM_CLIENT_DIR=$DEST/python-quantumclient @@ -503,41 +502,6 @@ if is_service_enabled rabbit; then read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT." fi - -# Swift -# ----- - -# TODO: add logging to different location. - -# Set ``SWIFT_DATA_DIR`` to the location of swift drives and objects. -# Default is the common DevStack data directory. -SWIFT_DATA_DIR=${SWIFT_DATA_DIR:-${DATA_DIR}/swift} - -# Set ``SWIFT_CONFIG_DIR`` to the location of the configuration files. -# Default is ``/etc/swift``. -SWIFT_CONFIG_DIR=${SWIFT_CONFIG_DIR:-/etc/swift} - -# DevStack will create a loop-back disk formatted as XFS to store the -# swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in bytes. -# Default is 1 gigabyte. -SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000} - -# The ring uses a configurable number of bits from a path’s MD5 hash as -# a partition index that designates a device. The number of bits kept -# from the hash is known as the partition power, and 2 to the partition -# power indicates the partition count. Partitioning the full MD5 hash -# ring allows other parts of the cluster to work in batches of items at -# once which ends up either more efficient or at least less complex than -# working with each item separately or the entire cluster all at once. -# By default we define 9 for the partition count (which mean 512). -SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9} - -# Set ``SWIFT_REPLICAS`` to configure how many replicas are to be -# configured for your Swift cluster. By default the three replicas would need a -# bit of IO and Memory on a VM you may want to lower that to 1 if you want to do -# only some quick testing. -SWIFT_REPLICAS=${SWIFT_REPLICAS:-3} - if is_service_enabled swift; then # If we are using swift3, we can default the s3 port to swift instead # of nova-objectstore @@ -793,7 +757,6 @@ echo_summary "Installing OpenStack project source" install_keystoneclient install_glanceclient install_novaclient - # Check out the client libs that are used most git_clone $OPENSTACKCLIENT_REPO $OPENSTACKCLIENT_DIR $OPENSTACKCLIENT_BRANCH @@ -802,16 +765,16 @@ if is_service_enabled key g-api n-api swift; then # unified auth system (manages accounts/tokens) install_keystone fi + if is_service_enabled swift; then - # storage service - git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH - # storage service client and and Library - git_clone $SWIFTCLIENT_REPO $SWIFTCLIENT_DIR $SWIFTCLIENT_BRANCH + install_swiftclient + install_swift if is_service_enabled swift3; then # swift3 middleware to provide S3 emulation to Swift git_clone $SWIFT3_REPO $SWIFT3_DIR $SWIFT3_BRANCH fi fi + if is_service_enabled g-api n-api; then # image catalog service install_glance @@ -867,11 +830,11 @@ if is_service_enabled key g-api n-api swift; then configure_keystone fi if is_service_enabled swift; then - setup_develop $SWIFT_DIR - setup_develop $SWIFTCLIENT_DIR -fi -if is_service_enabled swift3; then - setup_develop $SWIFT3_DIR + configure_swift + configure_swiftclient + if is_service_enabled swift3; then + setup_develop $SWIFT3_DIR + fi fi if is_service_enabled g-api n-api; then configure_glance @@ -1439,253 +1402,7 @@ fi if is_service_enabled swift; then echo_summary "Configuring Swift" - - # Make sure to kill all swift processes first - swift-init all stop || true - - # First do a bit of setup by creating the directories and - # changing the permissions so we can run it as our user. - - USER_GROUP=$(id -g) - sudo mkdir -p ${SWIFT_DATA_DIR}/drives - sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_DIR} - - # Create a loopback disk and format it to XFS. - if [[ -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then - if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then - sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 - fi - else - mkdir -p ${SWIFT_DATA_DIR}/drives/images - sudo touch ${SWIFT_DATA_DIR}/drives/images/swift.img - sudo chown $USER: ${SWIFT_DATA_DIR}/drives/images/swift.img - - dd if=/dev/zero of=${SWIFT_DATA_DIR}/drives/images/swift.img \ - bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE} - fi - - # Make a fresh XFS filesystem - mkfs.xfs -f -i size=1024 ${SWIFT_DATA_DIR}/drives/images/swift.img - - # Mount the disk with mount options to make it as efficient as possible - mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1 - if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then - sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 \ - ${SWIFT_DATA_DIR}/drives/images/swift.img ${SWIFT_DATA_DIR}/drives/sdb1 - fi - - # Create a link to the above mount - for x in $(seq ${SWIFT_REPLICAS}); do - sudo ln -sf ${SWIFT_DATA_DIR}/drives/sdb1/$x ${SWIFT_DATA_DIR}/$x; done - - # Create all of the directories needed to emulate a few different servers - for x in $(seq ${SWIFT_REPLICAS}); do - drive=${SWIFT_DATA_DIR}/drives/sdb1/${x} - node=${SWIFT_DATA_DIR}/${x}/node - node_device=${node}/sdb1 - [[ -d $node ]] && continue - [[ -d $drive ]] && continue - sudo install -o ${USER} -g $USER_GROUP -d $drive - sudo install -o ${USER} -g $USER_GROUP -d $node_device - sudo chown -R $USER: ${node} - done - - sudo mkdir -p ${SWIFT_CONFIG_DIR}/{object,container,account}-server /var/run/swift - sudo chown -R $USER: ${SWIFT_CONFIG_DIR} /var/run/swift - - if [[ "$SWIFT_CONFIG_DIR" != "/etc/swift" ]]; then - # Some swift tools are hard-coded to use ``/etc/swift`` and are apparently not going to be fixed. - # Create a symlink if the config dir is moved - sudo ln -sf ${SWIFT_CONFIG_DIR} /etc/swift - fi - - # Swift use rsync to synchronize between all the different - # partitions (which make more sense when you have a multi-node - # setup) we configure it with our version of rsync. - sed -e " - s/%GROUP%/${USER_GROUP}/; - s/%USER%/$USER/; - s,%SWIFT_DATA_DIR%,$SWIFT_DATA_DIR,; - " $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf - if [[ "$os_PACKAGE" = "deb" ]]; then - sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync - else - sudo sed -i '/disable *= *yes/ { s/yes/no/ }' /etc/xinetd.d/rsync - fi - - if is_service_enabled swift3;then - swift_auth_server="s3token " - fi - - # By default Swift will be installed with the tempauth middleware - # which has some default username and password if you have - # configured keystone it will checkout the directory. - if is_service_enabled key; then - swift_auth_server+="authtoken keystoneauth" - else - swift_auth_server=tempauth - fi - - SWIFT_CONFIG_PROXY_SERVER=${SWIFT_CONFIG_DIR}/proxy-server.conf - cp ${SWIFT_DIR}/etc/proxy-server.conf-sample ${SWIFT_CONFIG_PROXY_SERVER} - - iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user - iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user ${USER} - - iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir - iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir ${SWIFT_CONFIG_DIR} - - iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers - iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers 1 - - iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level - iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level DEBUG - - iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port - iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080} - - # Only enable Swift3 if we have it enabled in ENABLED_SERVICES - is_service_enabled swift3 && swift3=swift3 || swift3="" - - iniset ${SWIFT_CONFIG_PROXY_SERVER} pipeline:main pipeline "catch_errors healthcheck cache ratelimit ${swift3} ${swift_auth_server} proxy-logging proxy-server" - - iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true - - # Configure Keystone - sed -i '/^# \[filter:authtoken\]/,/^# \[filter:keystoneauth\]$/ s/^#[ \t]*//' ${SWIFT_CONFIG_PROXY_SERVER} - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_host $KEYSTONE_AUTH_HOST - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_port $KEYSTONE_AUTH_PORT - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_user swift - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_password $SERVICE_PASSWORD - - iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth use - iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles "Member, admin" - - if is_service_enabled swift3; then - cat <>${SWIFT_CONFIG_PROXY_SERVER} -# NOTE(chmou): s3token middleware is not updated yet to use only -# username and password. -[filter:s3token] -paste.filter_factory = keystone.middleware.s3_token:filter_factory -auth_port = ${KEYSTONE_AUTH_PORT} -auth_host = ${KEYSTONE_AUTH_HOST} -auth_protocol = ${KEYSTONE_AUTH_PROTOCOL} -auth_token = ${SERVICE_TOKEN} -admin_token = ${SERVICE_TOKEN} - -[filter:swift3] -use = egg:swift3#swift3 -EOF - fi - - cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONFIG_DIR}/swift.conf - iniset ${SWIFT_CONFIG_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH} - - # This function generates an object/account/proxy configuration - # emulating 4 nodes on different ports - function generate_swift_configuration() { - local server_type=$1 - local bind_port=$2 - local log_facility=$3 - local node_number - local swift_node_config - - for node_number in $(seq ${SWIFT_REPLICAS}); do - node_path=${SWIFT_DATA_DIR}/${node_number} - swift_node_config=${SWIFT_CONFIG_DIR}/${server_type}-server/${node_number}.conf - - cp ${SWIFT_DIR}/etc/${server_type}-server.conf-sample ${swift_node_config} - - iniuncomment ${swift_node_config} DEFAULT user - iniset ${swift_node_config} DEFAULT user ${USER} - - iniuncomment ${swift_node_config} DEFAULT bind_port - iniset ${swift_node_config} DEFAULT bind_port ${bind_port} - - iniuncomment ${swift_node_config} DEFAULT swift_dir - iniset ${swift_node_config} DEFAULT swift_dir ${SWIFT_CONFIG_DIR} - - iniuncomment ${swift_node_config} DEFAULT devices - iniset ${swift_node_config} DEFAULT devices ${node_path} - - iniuncomment ${swift_node_config} DEFAULT log_facility - iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility} - - iniuncomment ${swift_node_config} DEFAULT mount_check - iniset ${swift_node_config} DEFAULT mount_check false - - iniuncomment ${swift_node_config} ${server_type}-replicator vm_test_mode - iniset ${swift_node_config} ${server_type}-replicator vm_test_mode yes - - bind_port=$(( ${bind_port} + 10 )) - log_facility=$(( ${log_facility} + 1 )) - done - } - generate_swift_configuration object 6010 2 - generate_swift_configuration container 6011 2 - generate_swift_configuration account 6012 2 - - # Specific configuration for swift for rsyslog. See - # ``/etc/rsyslog.d/10-swift.conf`` for more info. - swift_log_dir=${SWIFT_DATA_DIR}/logs - rm -rf ${swift_log_dir} - mkdir -p ${swift_log_dir}/hourly - sudo chown -R $USER:adm ${swift_log_dir} - sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \ - tee /etc/rsyslog.d/10-swift.conf - restart_service rsyslog - - # This is where we create three different rings for swift with - # different object servers binding on different ports. - pushd ${SWIFT_CONFIG_DIR} >/dev/null && { - - rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz - - port_number=6010 - swift-ring-builder object.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 - for x in $(seq ${SWIFT_REPLICAS}); do - swift-ring-builder object.builder add z${x}-127.0.0.1:${port_number}/sdb1 1 - port_number=$[port_number + 10] - done - swift-ring-builder object.builder rebalance - - port_number=6011 - swift-ring-builder container.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 - for x in $(seq ${SWIFT_REPLICAS}); do - swift-ring-builder container.builder add z${x}-127.0.0.1:${port_number}/sdb1 1 - port_number=$[port_number + 10] - done - swift-ring-builder container.builder rebalance - - port_number=6012 - swift-ring-builder account.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 - for x in $(seq ${SWIFT_REPLICAS}); do - swift-ring-builder account.builder add z${x}-127.0.0.1:${port_number}/sdb1 1 - port_number=$[port_number + 10] - done - swift-ring-builder account.builder rebalance - - } && popd >/dev/null - - # Start rsync - if [[ "$os_PACKAGE" = "deb" ]]; then - sudo /etc/init.d/rsync restart || : - else - sudo systemctl start xinetd.service - fi - - # First spawn all the swift services then kill the - # proxy service so we can run it in foreground in screen. - # ``swift-init ... {stop|restart}`` exits with '1' if no servers are running, - # ignore it just in case - swift-init all restart || true - swift-init proxy stop || true - - unset s swift_hash swift_auth_server + init_swift fi @@ -1802,6 +1519,12 @@ fi # Only run the services specified in ``ENABLED_SERVICES`` +# Launch Swift Services +if is_service_enabled swift; then + echo_summary "Starting Swift" + start_swift +fi + # Launch the Glance services if is_service_enabled g-api g-reg; then echo_summary "Starting Glance" @@ -1905,8 +1628,6 @@ if is_service_enabled ceilometer; then start_ceilometer fi -screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_DIR}/proxy-server.conf -v" - # Starting the nova-objectstore only if swift3 service is not enabled. # Swift will act as s3 objectstore. is_service_enabled swift3 || \ diff --git a/unstack.sh b/unstack.sh index 0040cf1e..20ba17b6 100755 --- a/unstack.sh +++ b/unstack.sh @@ -27,6 +27,7 @@ DATA_DIR=${DATA_DIR:-${DEST}/data} # Get project function libraries source $TOP_DIR/lib/cinder source $TOP_DIR/lib/horizon +source $TOP_DIR/lib/swift # Determine what system we are running on. This provides ``os_VENDOR``, # ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` @@ -47,7 +48,7 @@ fi # Swift runs daemons if is_service_enabled swift; then - swift-init all stop 2>/dev/null || true + stop_swift fi # Apache has the WSGI processes From 0da8dbd0a369d90e7eafd88e54a1cff91f09448d Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 29 Nov 2012 14:37:14 -0500 Subject: [PATCH 799/967] create a horizon exercise for sanity check from time to time horizon gets broken in devstack and it takes a while to figure out why. Put a sanity check into devstack exercises that checks for horizon front page being up and not just a stack trace to use as a simple gate. Change-Id: I13a6c59881f618d5194b1625b67115013c9cb6c2 --- exercises/horizon.sh | 45 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100755 exercises/horizon.sh diff --git a/exercises/horizon.sh b/exercises/horizon.sh new file mode 100755 index 00000000..c5dae3ab --- /dev/null +++ b/exercises/horizon.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash + +# **horizon.sh** + +# Sanity check that horizon started if enabled + +echo "*********************************************************************" +echo "Begin DevStack Exercise: $0" +echo "*********************************************************************" + +# This script exits on an error so that errors don't compound and you see +# only the first error that occured. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + + +# Settings +# ======== + +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) + +# Import common functions +source $TOP_DIR/functions + +# Import configuration +source $TOP_DIR/openrc + +# Import exercise configuration +source $TOP_DIR/exerciserc + +is_service_enabled horizon || exit 55 + +# can we get the front page +curl http://$SERVICE_HOST 2>/dev/null | grep -q '

Log In

' || die "Horizon front page not functioning!" + +set +o xtrace +echo "*********************************************************************" +echo "SUCCESS: End DevStack Exercise: $0" +echo "*********************************************************************" + From 83e109571ec2c2a08a378d9c3a69bfbf3f5ec1ba Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Fri, 30 Nov 2012 23:28:07 +0100 Subject: [PATCH 800/967] Allow Versioning with swift * add the allow_versions to the container configs Change-Id: I2d39ba7c60f5f1c4cd4f80ed61a02a64979e8f19 --- lib/swift | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/swift b/lib/swift index 7acb1dfe..366c467b 100644 --- a/lib/swift +++ b/lib/swift @@ -277,6 +277,8 @@ EOF swift_node_config=${SWIFT_CONFIG_DIR}/container-server/${node_number}.conf cp ${SWIFT_DIR}/etc/container-server.conf-sample ${swift_node_config} generate_swift_config ${swift_node_config} ${node_number} $[CONTAINER_PORT_BASE + 10 * (node_number - 1)] + iniuncomment ${swift_node_config} app:container-server allow_versions + iniset ${swift_node_config} app:container-server allow_versions "true" swift_node_config=${SWIFT_CONFIG_DIR}/account-server/${node_number}.conf cp ${SWIFT_DIR}/etc/account-server.conf-sample ${swift_node_config} From 93923ebeed2e5ef4f8b9733f5457f97615f29477 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 30 Nov 2012 17:51:12 -0500 Subject: [PATCH 801/967] fix typo in get_uec_image.sh it's precise, not percise. Fixes bug #1085233 Change-Id: I857c233c9b461b7cc6e2ac356aa6f2cd6be93ff5 --- tools/get_uec_image.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/get_uec_image.sh b/tools/get_uec_image.sh index 156fd439..3c62064a 100755 --- a/tools/get_uec_image.sh +++ b/tools/get_uec_image.sh @@ -65,7 +65,7 @@ KERNEL=$3 case $DIST_NAME in quantal) ;; - percise) ;; + precise) ;; oneiric) ;; natty) ;; maverick) ;; From 99fcd8184d078efb9f8fcbe6d714b04ea44259e1 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Sun, 2 Dec 2012 13:07:39 -0800 Subject: [PATCH 802/967] A few more NOVA_BIN_DIR cleanups. Change-Id: I1d1225c894f1857a3723e01f18d0f0dade670dc9 --- stack.sh | 2 +- tools/configure_tempest.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index dbb53ecb..aca4cff0 100755 --- a/stack.sh +++ b/stack.sh @@ -1543,7 +1543,7 @@ if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nov add_nova_opt "s3_affix_tenant=True" fi -screen_it zeromq "cd $NOVA_DIR && $NOVA_DIR/bin/nova-rpc-zmq-receiver" +screen_it zeromq "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-rpc-zmq-receiver" # Launch the nova-api and wait for it to answer before continuing if is_service_enabled n-api; then diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index 03dc6839..298fa9ba 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -185,7 +185,7 @@ SSH_TIMEOUT=4 # Whitebox testing configuration for Compute... COMPUTE_WHITEBOX_ENABLED=True COMPUTE_SOURCE_DIR=$NOVA_SOURCE_DIR -COMPUTE_BIN_DIR=/usr/bin/nova +COMPUTE_BIN_DIR=$NOVA_BIN_DIR COMPUTE_CONFIG_PATH=/etc/nova/nova.conf # TODO(jaypipes): Create the key file here... right now, no whitebox # tests actually use a key. From 1a7bbd255fd02fbfee6a26b990d15d38402f1992 Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Mon, 3 Dec 2012 17:04:02 +1300 Subject: [PATCH 803/967] is_suse false positives on Fedora Also, uses of is_suse were also always evaluating to true on Fedora. Change-Id: I068f3179edbfb295163a4e4faa4998f2f7b2c124 --- functions | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/functions b/functions index f2b12e21..9022675a 100644 --- a/functions +++ b/functions @@ -349,8 +349,7 @@ function is_suse { GetOSVersion fi - [[ "$os_VENDOR" = "openSUSE" || "$os_VENDOR" = "SUSE LINUX" ]] - return $? + [ "$os_VENDOR" = "openSUSE" ] || [ "$os_VENDOR" = "SUSE LINUX" ] } @@ -646,7 +645,7 @@ function pip_install { SUDO_PIP="env" else SUDO_PIP="sudo" - if [[ "$os_PACKAGE" = "deb" || is_suse ]]; then + if [[ "$os_PACKAGE" = "deb" ]] || is_suse; then CMD_PIP=/usr/bin/pip else CMD_PIP=/usr/bin/pip-python @@ -1040,7 +1039,7 @@ function get_rootwrap_location() { GetOSVersion fi - if [[ "$os_PACKAGE" = "deb" || is_suse ]]; then + if [[ "$os_PACKAGE" = "deb" ]] || is_suse; then echo "/usr/local/bin/$module-rootwrap" else echo "/usr/bin/$module-rootwrap" @@ -1057,7 +1056,7 @@ function qpid_is_supported() { # Qpid was introduced to Ubuntu in precise, disallow it on oneiric; it is # not in openSUSE either right now. - [[ "$DISTRO" = "oneiric" || is_suse ]] + [[ "$DISTRO" = "oneiric" ]] || is_suse return $? } From 8f393df3524563813068ac0a646b86bbcfa28d30 Mon Sep 17 00:00:00 2001 From: Sean Gallagher Date: Mon, 3 Dec 2012 00:17:38 -0800 Subject: [PATCH 804/967] Use TOP_DIR to find devstack directory Change two statements to use TOP_DIR instead of PWD Change-Id: I6d3d16ce853493a06850b078d39e964f873c16fe Fixes: bug #1085819 --- stack.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index dbb53ecb..e94c1082 100755 --- a/stack.sh +++ b/stack.sh @@ -196,8 +196,8 @@ if [[ $EUID -eq 0 ]]; then > /etc/sudoers.d/50_stack_sh ) echo "Copying files to stack user" - STACK_DIR="$DEST/${PWD##*/}" - cp -r -f -T "$PWD" "$STACK_DIR" + STACK_DIR="$DEST/${TOP_DIR##*/}" + cp -r -f -T "$TOP_DIR" "$STACK_DIR" chown -R stack "$STACK_DIR" if [[ "$SHELL_AFTER_RUN" != "no" ]]; then exec su -c "set -e; cd $STACK_DIR; bash stack.sh; bash" stack From 9a27dd8dd557c9bf64bfaf4965eff7a8f75fb1af Mon Sep 17 00:00:00 2001 From: Steven Hardy Date: Mon, 3 Dec 2012 12:41:02 +0000 Subject: [PATCH 805/967] heat : heat repo moved to openstack Main heat repo has now moved under the github openstack project Note the old checkout will require removal to trigger stack.sh to re-clone it from the new location Change-Id: I4163e35cad7c319961d42f0c53a68ec6244508ed Signed-off-by: Steven Hardy --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 9588cf99..39d34b0b 100644 --- a/stackrc +++ b/stackrc @@ -97,7 +97,7 @@ TEMPEST_REPO=${GIT_BASE}/openstack/tempest.git TEMPEST_BRANCH=master # heat service -HEAT_REPO=${GIT_BASE}/heat-api/heat.git +HEAT_REPO=${GIT_BASE}/openstack/heat.git HEAT_BRANCH=master # python heat client library From c2d2f52bbdf7b83fbd74a7396c7380b6da9b2ae3 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Mon, 3 Dec 2012 10:02:40 -0500 Subject: [PATCH 806/967] Fix qpid support on Fedora. The new qpid_is_supported function returned the opposite value from what it was supposed to. It returned success for the platforms where qpid is not supported. Change-Id: I0ceaae1dddaa6192657926834c6eb8006925f0cf --- functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions b/functions index 9022675a..aaa00a49 100644 --- a/functions +++ b/functions @@ -1056,7 +1056,7 @@ function qpid_is_supported() { # Qpid was introduced to Ubuntu in precise, disallow it on oneiric; it is # not in openSUSE either right now. - [[ "$DISTRO" = "oneiric" ]] || is_suse + ( ! ([[ "$DISTRO" = "oneiric" ]] || is_suse) ) return $? } From 205bc49ef5fd642fe83fddd07cc2578ed7c6f165 Mon Sep 17 00:00:00 2001 From: Mark McClain Date: Fri, 16 Nov 2012 00:15:28 -0500 Subject: [PATCH 807/967] adding Quantum metadata service support Change-Id: I8985c169401eee7a435b99293bdf6d42f28ab66c --- stack.sh | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index dbb53ecb..6d8e1f67 100755 --- a/stack.sh +++ b/stack.sh @@ -1331,9 +1331,10 @@ if is_service_enabled q-l3; then # Set debug iniset $Q_L3_CONF_FILE DEFAULT debug True - iniset $Q_L3_CONF_FILE DEFAULT metadata_ip $Q_META_DATA_IP iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE + iniset $Q_L3_CONF_FILE DEFAULT state_path $DATA_DIR/quantum + iniset $Q_L3_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" quantum_setup_keystone $Q_L3_CONF_FILE DEFAULT set_auth_url @@ -1354,6 +1355,27 @@ if is_service_enabled q-l3; then fi fi +#Quantum Metadata +if is_service_enabled q-meta; then + AGENT_META_BINARY="$QUANTUM_DIR/bin/quantum-metadata-agent" + Q_META_CONF_FILE=/etc/quantum/metadata_agent.ini + + cp $QUANTUM_DIR/etc/metadata_agent.ini $Q_META_CONF_FILE + + # Set verbose + iniset $Q_META_CONF_FILE DEFAULT verbose True + # Set debug + iniset $Q_META_CONF_FILE DEFAULT debug True + + iniset $Q_META_CONF_FILE DEFAULT state_path $DATA_DIR/quantum + + iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP + + iniset $Q_META_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" + + quantum_setup_keystone $Q_META_CONF_FILE DEFAULT set_auth_url +fi + # Quantum RPC support - must be updated prior to starting any of the services if is_service_enabled quantum; then iniset $Q_CONF_FILE DEFAULT control_exchange quantum @@ -1442,6 +1464,9 @@ if is_service_enabled nova; then fi add_nova_opt "libvirt_vif_driver=$NOVA_VIF_DRIVER" add_nova_opt "linuxnet_interface_driver=$LINUXNET_VIF_DRIVER" + if is_service_enabled q-meta; then + add_nova_opt "service_quantum_metadata_proxy=True" + fi elif is_service_enabled n-net; then add_nova_opt "network_manager=nova.network.manager.$NET_MAN" add_nova_opt "public_interface=$PUBLIC_INTERFACE" @@ -1611,6 +1636,7 @@ fi # Start up the quantum agents if enabled screen_it q-agt "python $AGENT_BINARY --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE" screen_it q-dhcp "python $AGENT_DHCP_BINARY --config-file $Q_CONF_FILE --config-file=$Q_DHCP_CONF_FILE" +screen_it q-meta "python $AGENT_META_BINARY --config-file $Q_CONF_FILE --config-file=$Q_META_CONF_FILE" screen_it q-l3 "python $AGENT_L3_BINARY --config-file $Q_CONF_FILE --config-file=$Q_L3_CONF_FILE" if is_service_enabled nova; then From 8ec27220c5c63de59f129c839eddf5380efe46a4 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Thu, 29 Nov 2012 09:25:31 +0100 Subject: [PATCH 808/967] Add a get_pip_command function There are two places where we need to find the right command for pip, so instead of having one version we fix and a buggy version we forget, simply use a function :-) Change-Id: I728c17ad7be5c86690c4d7907f77f1f98ec2b815 --- functions | 19 ++++++++++++++----- tools/info.sh | 6 +----- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/functions b/functions index 9022675a..bc6fdc35 100644 --- a/functions +++ b/functions @@ -645,11 +645,7 @@ function pip_install { SUDO_PIP="env" else SUDO_PIP="sudo" - if [[ "$os_PACKAGE" = "deb" ]] || is_suse; then - CMD_PIP=/usr/bin/pip - else - CMD_PIP=/usr/bin/pip-python - fi + CMD_PIP=$(get_pip_command) fi if [[ "$PIP_USE_MIRRORS" != "False" ]]; then PIP_MIRROR_OPT="--use-mirrors" @@ -1046,6 +1042,19 @@ function get_rootwrap_location() { fi } +# Get the path to the pip command. +# get_pip_command +function get_pip_command() { + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + + if [[ "$os_PACKAGE" = "deb" ]] || is_suse; then + echo "/usr/bin/pip" + else + echo "/usr/bin/pip-python" + fi +} # Check if qpid can be used on the current distro. # qpid_is_supported diff --git a/tools/info.sh b/tools/info.sh index 5c9a1d3d..a872d59d 100755 --- a/tools/info.sh +++ b/tools/info.sh @@ -107,11 +107,7 @@ done # Pips # ---- -if [[ "$os_PACKAGE" = "deb" ]]; then - CMD_PIP=/usr/bin/pip -else - CMD_PIP=/usr/bin/pip-python -fi +CMD_PIP=$(get_pip_command) # Pip tells us what is currently installed FREEZE_FILE=$(mktemp --tmpdir freeze.XXXXXX) From d835de892a9426a96f16e187d23eff715311d492 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 29 Nov 2012 17:11:35 -0600 Subject: [PATCH 809/967] Move keystone account creation out of keystone_data.sh keystone_data.sh is getting unwieldly and increasingly needs configuration information for services. Also need the ability to manipulate HOST/IP information for hosts to handle service HA/proxy configurations. Begin moving the creation of service account information into the service lib files, starting with the common accounts and keystone itself. Change-Id: Ie259f7b71983c4f4a2e33ab9c8a8e2b00238ba38 --- files/keystone_data.sh | 63 ++----------------------- lib/keystone | 101 ++++++++++++++++++++++++++++++++++++++++- stack.sh | 16 ++++--- 3 files changed, 112 insertions(+), 68 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 20749bc6..c8e68dd6 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -4,7 +4,6 @@ # # Tenant User Roles # ------------------------------------------------------------------ -# admin admin admin # service glance admin # service nova admin, [ResellerAdmin (swift only)] # service quantum admin # if enabled @@ -12,9 +11,6 @@ # service cinder admin # if enabled # service heat admin # if enabled # service ceilometer admin # if enabled -# demo admin admin -# demo demo Member, anotherrole -# invisible_to_admin demo Member # Tempest Only: # alt_demo alt_demo Member # @@ -40,53 +36,14 @@ function get_id () { echo `"$@" | awk '/ id / { print $4 }'` } - -# Tenants -# ------- - -ADMIN_TENANT=$(get_id keystone tenant-create --name=admin) -SERVICE_TENANT=$(get_id keystone tenant-create --name=$SERVICE_TENANT_NAME) -DEMO_TENANT=$(get_id keystone tenant-create --name=demo) -INVIS_TENANT=$(get_id keystone tenant-create --name=invisible_to_admin) - - -# Users -# ----- - -ADMIN_USER=$(get_id keystone user-create --name=admin \ - --pass="$ADMIN_PASSWORD" \ - --email=admin@example.com) -DEMO_USER=$(get_id keystone user-create --name=demo \ - --pass="$ADMIN_PASSWORD" \ - --email=demo@example.com) +# Lookups +SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") +ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") # Roles # ----- -ADMIN_ROLE=$(get_id keystone role-create --name=admin) -KEYSTONEADMIN_ROLE=$(get_id keystone role-create --name=KeystoneAdmin) -KEYSTONESERVICE_ROLE=$(get_id keystone role-create --name=KeystoneServiceAdmin) -# ANOTHER_ROLE demonstrates that an arbitrary role may be created and used -# TODO(sleepsonthefloor): show how this can be used for rbac in the future! -ANOTHER_ROLE=$(get_id keystone role-create --name=anotherrole) - - -# Add Roles to Users in Tenants -keystone user-role-add --user_id $ADMIN_USER --role_id $ADMIN_ROLE --tenant_id $ADMIN_TENANT -keystone user-role-add --user_id $ADMIN_USER --role_id $ADMIN_ROLE --tenant_id $DEMO_TENANT -keystone user-role-add --user_id $DEMO_USER --role_id $ANOTHER_ROLE --tenant_id $DEMO_TENANT - -# TODO(termie): these two might be dubious -keystone user-role-add --user_id $ADMIN_USER --role_id $KEYSTONEADMIN_ROLE --tenant_id $ADMIN_TENANT -keystone user-role-add --user_id $ADMIN_USER --role_id $KEYSTONESERVICE_ROLE --tenant_id $ADMIN_TENANT - - -# The Member role is used by Horizon and Swift so we need to keep it: -MEMBER_ROLE=$(get_id keystone role-create --name=Member) -keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $DEMO_TENANT -keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $INVIS_TENANT - # The ResellerAdmin role is used by Nova and Ceilometer so we need to keep it. # The admin role in swift allows a user to act as an admin for their tenant, # but ResellerAdmin is needed for a user to act as any tenant. The name of this @@ -96,20 +53,6 @@ RESELLER_ROLE=$(get_id keystone role-create --name=ResellerAdmin) # Services # -------- -# Keystone -if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - KEYSTONE_SERVICE=$(get_id keystone service-create \ - --name=keystone \ - --type=identity \ - --description="Keystone Identity Service") - keystone endpoint-create \ - --region RegionOne \ - --service_id $KEYSTONE_SERVICE \ - --publicurl "http://$SERVICE_HOST:\$(public_port)s/v2.0" \ - --adminurl "http://$SERVICE_HOST:\$(admin_port)s/v2.0" \ - --internalurl "http://$SERVICE_HOST:\$(public_port)s/v2.0" -fi - # Nova if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then NOVA_USER=$(get_id keystone user-create \ diff --git a/lib/keystone b/lib/keystone index ae890567..f6a6d667 100644 --- a/lib/keystone +++ b/lib/keystone @@ -15,6 +15,7 @@ # configure_keystone # init_keystone # start_keystone +# create_keystone_accounts # stop_keystone # cleanup_keystone @@ -45,7 +46,6 @@ KEYSTONE_CATALOG=$KEYSTONE_CONF_DIR/default_catalog.templates KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-PKI} # Set Keystone interface configuration -KEYSTONE_API_PORT=${KEYSTONE_API_PORT:-5000} KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST} KEYSTONE_AUTH_PORT=${KEYSTONE_AUTH_PORT:-35357} KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-http} @@ -144,6 +144,100 @@ function configure_keystone() { } +# create_keystone_accounts() - Sets up common required keystone accounts + +# Tenant User Roles +# ------------------------------------------------------------------ +# service -- -- +# -- -- Member +# admin admin admin +# demo admin admin +# demo demo Member, anotherrole +# invisible_to_admin demo Member + +# Migrated from keystone_data.sh +create_keystone_accounts() { + + # admin + ADMIN_TENANT=$(keystone tenant-create \ + --name admin \ + | grep " id " | get_field 2) + ADMIN_USER=$(keystone user-create \ + --name admin \ + --pass "$ADMIN_PASSWORD" \ + --email admin@example.com \ + | grep " id " | get_field 2) + ADMIN_ROLE=$(keystone role-create \ + --name admin \ + | grep " id " | get_field 2) + keystone user-role-add \ + --user_id $ADMIN_USER \ + --role_id $ADMIN_ROLE \ + --tenant_id $ADMIN_TENANT + + # service + SERVICE_TENANT=$(keystone tenant-create \ + --name $SERVICE_TENANT_NAME \ + | grep " id " | get_field 2) + + # The Member role is used by Horizon and Swift so we need to keep it: + MEMBER_ROLE=$(keystone role-create --name=Member | grep " id " | get_field 2) + # ANOTHER_ROLE demonstrates that an arbitrary role may be created and used + # TODO(sleepsonthefloor): show how this can be used for rbac in the future! + ANOTHER_ROLE=$(keystone role-create --name=anotherrole | grep " id " | get_field 2) + + # invisible tenant - admin can't see this one + INVIS_TENANT=$(keystone tenant-create --name=invisible_to_admin | grep " id " | get_field 2) + + # demo + DEMO_TENANT=$(keystone tenant-create \ + --name=demo \ + | grep " id " | get_field 2) + DEMO_USER=$(keystone user-create \ + --name demo \ + --pass "$ADMIN_PASSWORD" \ + --email demo@example.com \ + | grep " id " | get_field 2) + keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $DEMO_TENANT + keystone user-role-add --user_id $ADMIN_USER --role_id $ADMIN_ROLE --tenant_id $DEMO_TENANT + keystone user-role-add --user_id $DEMO_USER --role_id $ANOTHER_ROLE --tenant_id $DEMO_TENANT + keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $INVIS_TENANT + + # Keystone + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + KEYSTONE_SERVICE=$(keystone service-create \ + --name keystone \ + --type identity \ + --description "Keystone Identity Service" \ + | grep " id " | get_field 2) + keystone endpoint-create \ + --region RegionOne \ + --service_id $KEYSTONE_SERVICE \ + --publicurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:\$(public_port)s/v2.0" \ + --adminurl "$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:\$(admin_port)s/v2.0" \ + --internalurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:\$(public_port)s/v2.0" + fi + + # TODO(dtroyer): This is part of a series of changes...remove these when + # complete if they are really unused +# KEYSTONEADMIN_ROLE=$(keystone role-create \ +# --name KeystoneAdmin \ +# | grep " id " | get_field 2) +# KEYSTONESERVICE_ROLE=$(keystone role-create \ +# --name KeystoneServiceAdmin \ +# | grep " id " | get_field 2) + + # TODO(termie): these two might be dubious +# keystone user-role-add \ +# --user_id $ADMIN_USER \ +# --role_id $KEYSTONEADMIN_ROLE \ +# --tenant_id $ADMIN_TENANT +# keystone user-role-add \ +# --user_id $ADMIN_USER \ +# --role_id $KEYSTONESERVICE_ROLE \ +# --tenant_id $ADMIN_TENANT +} + # init_keystone() - Initialize databases, etc. function init_keystone() { # (Re)create keystone database @@ -176,6 +270,11 @@ function install_keystone() { function start_keystone() { # Start Keystone in a screen window screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug" + echo "Waiting for keystone to start..." + if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ >/dev/null; do sleep 1; done"; then + echo "keystone did not start" + exit 1 + fi } # stop_keystone() - Stop running processes diff --git a/stack.sh b/stack.sh index 8e8c5199..5ab0f8e7 100755 --- a/stack.sh +++ b/stack.sh @@ -953,15 +953,16 @@ if is_service_enabled key; then configure_keystone init_keystone start_keystone - echo "Waiting for keystone to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/ >/dev/null; do sleep 1; done"; then - echo "keystone did not start" - exit 1 - fi - # ``keystone_data.sh`` creates services, admin and demo users, and roles. + # Set up a temporary admin URI for Keystone SERVICE_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0 + # Do the keystone-specific bits from keystone_data.sh + export OS_SERVICE_TOKEN=$SERVICE_TOKEN + export OS_SERVICE_ENDPOINT=$SERVICE_ENDPOINT + create_keystone_accounts + + # ``keystone_data.sh`` creates services, admin and demo users, and roles. ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \ SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \ S3_SERVICE_PORT=$S3_SERVICE_PORT KEYSTONE_CATALOG_BACKEND=$KEYSTONE_CATALOG_BACKEND \ @@ -974,6 +975,7 @@ if is_service_enabled key; then export OS_TENANT_NAME=admin export OS_USERNAME=admin export OS_PASSWORD=$ADMIN_PASSWORD + unset OS_SERVICE_TOKEN OS_SERVICE_ENDPOINT fi @@ -1750,7 +1752,7 @@ fi # If Keystone is present you can point ``nova`` cli to this server if is_service_enabled key; then - echo "Keystone is serving at $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/" + echo "Keystone is serving at $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/" echo "Examples on using novaclient command line is in exercise.sh" echo "The default users are: admin and demo" echo "The password: $ADMIN_PASSWORD" From 2ed63f4f8ede2a5819eb76f109a947f6bab24d0d Mon Sep 17 00:00:00 2001 From: Martin Vidner Date: Tue, 4 Dec 2012 10:33:49 +0100 Subject: [PATCH 810/967] Fix the default for APACHE_GROUP It should be the group of the effective apache user. For example, on openSUSE, we use wwwrun:www for apache and $USER:users for users. Change-Id: I8e12a8d90d45cfd18e67a41cf5462216ae404733 --- lib/horizon | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/horizon b/lib/horizon index 6173042f..1f68d795 100644 --- a/lib/horizon +++ b/lib/horizon @@ -29,10 +29,10 @@ set +o xtrace # Set up default directories HORIZON_DIR=$DEST/horizon -# Allow overriding the default Apache user and group, default both to -# current user. +# Allow overriding the default Apache user and group, default to +# current user and his default group. APACHE_USER=${APACHE_USER:-$USER} -APACHE_GROUP=${APACHE_GROUP:-$APACHE_USER} +APACHE_GROUP=${APACHE_GROUP:-$(id -gn $APACHE_USER)} # Entry Points From c18b96515279064c85cb7a71939d9e9de961d905 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Tue, 4 Dec 2012 12:36:34 +0100 Subject: [PATCH 811/967] Add is_ubuntu function This replaces all of the [[ "$os_PACKAGE" = "deb" ]] tests, except when those tests are before straight calls to dpkg. Change-Id: I8a3ebf1b1bc5a55d736f9258d5ba1d24dabf04ea --- functions | 47 +++++++++++++++++++-------------------------- lib/cinder | 2 +- lib/databases/mysql | 4 ++-- lib/horizon | 6 +++--- lib/nova | 8 ++++---- lib/swift | 4 ++-- stack.sh | 4 ++-- tools/info.sh | 2 +- unstack.sh | 4 ++-- 9 files changed, 37 insertions(+), 44 deletions(-) diff --git a/functions b/functions index 794e4747..0911557f 100644 --- a/functions +++ b/functions @@ -341,6 +341,19 @@ function GetDistro() { } +# Determine if current distribution is an Ubuntu-based distribution. +# It will also detect non-Ubuntu but Debian-based distros; this is not an issue +# since Debian and Ubuntu should be compatible. +# is_ubuntu +function is_ubuntu { + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + + [ "$os_PACKAGE" = "deb" ] +} + + # Determine if current distribution is a SUSE-based distribution # (openSUSE, SLE). # is_suse @@ -580,11 +593,7 @@ function disable_negated_services() { # Distro-agnostic package installer # install_package package [package ...] function install_package() { - if [[ -z "$os_PACKAGE" ]]; then - GetOSVersion - fi - - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then [[ "$NO_UPDATE_REPOS" = "True" ]] || apt_get update NO_UPDATE_REPOS=True @@ -609,6 +618,7 @@ function is_package_installed() { if [[ -z "$os_PACKAGE" ]]; then GetOSVersion fi + if [[ "$os_PACKAGE" = "deb" ]]; then dpkg -l "$@" > /dev/null return $? @@ -661,10 +671,7 @@ function pip_install { # Service wrapper to restart services # restart_service service-name function restart_service() { - if [[ -z "$os_PACKAGE" ]]; then - GetOSVersion - fi - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then sudo /usr/sbin/service $1 restart else sudo /sbin/service $1 restart @@ -746,10 +753,7 @@ function setup_develop() { # Service wrapper to start services # start_service service-name function start_service() { - if [[ -z "$os_PACKAGE" ]]; then - GetOSVersion - fi - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then sudo /usr/sbin/service $1 start else sudo /sbin/service $1 start @@ -760,10 +764,7 @@ function start_service() { # Service wrapper to stop services # stop_service service-name function stop_service() { - if [[ -z "$os_PACKAGE" ]]; then - GetOSVersion - fi - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then sudo /usr/sbin/service $1 stop else sudo /sbin/service $1 stop @@ -1031,11 +1032,7 @@ function add_user_to_group() { function get_rootwrap_location() { local module=$1 - if [[ -z "$os_PACKAGE" ]]; then - GetOSVersion - fi - - if [[ "$os_PACKAGE" = "deb" ]] || is_suse; then + if is_ubuntu || is_suse; then echo "/usr/local/bin/$module-rootwrap" else echo "/usr/bin/$module-rootwrap" @@ -1045,11 +1042,7 @@ function get_rootwrap_location() { # Get the path to the pip command. # get_pip_command function get_pip_command() { - if [[ -z "$os_PACKAGE" ]]; then - GetOSVersion - fi - - if [[ "$os_PACKAGE" = "deb" ]] || is_suse; then + if is_ubuntu || is_suse; then echo "/usr/bin/pip" else echo "/usr/bin/pip-python" diff --git a/lib/cinder b/lib/cinder index 1aa34cd2..ce160bf0 100644 --- a/lib/cinder +++ b/lib/cinder @@ -237,7 +237,7 @@ function _configure_tgt_for_config_d() { # start_cinder() - Start running processes, including screen function start_cinder() { if is_service_enabled c-vol; then - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then _configure_tgt_for_config_d if [[ ! -f /etc/tgt/conf.d/cinder.conf ]]; then echo "include $CINDER_STATE_PATH/volumes/*" | sudo tee /etc/tgt/conf.d/cinder.conf diff --git a/lib/databases/mysql b/lib/databases/mysql index eb84f2ca..60ea143f 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -20,7 +20,7 @@ function recreate_database_mysql { function configure_database_mysql { echo_summary "Configuring and starting MySQL" - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then MY_CONF=/etc/mysql/my.cnf MYSQL=mysql else @@ -61,7 +61,7 @@ default-storage-engine = InnoDB" $MY_CONF } function install_database_mysql { - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then # Seed configuration with mysql password so that apt-get install doesn't # prompt us for a password upon install. cat < natty ]]; then cgline="none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0" sudo mkdir -p /cgroup @@ -228,7 +228,7 @@ cgroup_device_acl = [ EOF fi - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then LIBVIRT_DAEMON=libvirt-bin else # http://wiki.libvirt.org/page/SSHPolicyKitSetup @@ -393,7 +393,7 @@ function install_novaclient() { # install_nova() - Collect source and prepare function install_nova() { if is_service_enabled n-cpu; then - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then LIBVIRT_PKG_NAME=libvirt-bin else LIBVIRT_PKG_NAME=libvirt @@ -403,7 +403,7 @@ function install_nova() { # splitting a system into many smaller parts. LXC uses cgroups and chroot # to simulate multiple systems. if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then if [[ "$DISTRO" > natty ]]; then install_package cgroup-lite fi diff --git a/lib/swift b/lib/swift index 366c467b..140e5e9b 100644 --- a/lib/swift +++ b/lib/swift @@ -159,7 +159,7 @@ function configure_swift() { s,%SWIFT_DATA_DIR%,$SWIFT_DATA_DIR,; " $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf # rsyncd.conf just prepared for 4 nodes - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync else sudo sed -i '/disable *= *yes/ { s/yes/no/ }' /etc/xinetd.d/rsync @@ -341,7 +341,7 @@ function start_swift() { # (re)start rsyslog restart_service rsyslog # Start rsync - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then sudo /etc/init.d/rsync restart || : else sudo systemctl start xinetd.service diff --git a/stack.sh b/stack.sh index 55eafa82..94283563 100755 --- a/stack.sh +++ b/stack.sh @@ -677,7 +677,7 @@ set -o xtrace # Install package requirements echo_summary "Installing package prerequisites" -if [[ "$os_PACKAGE" = "deb" ]]; then +if is_ubuntu; then install_package $(get_packages $FILES/apts) elif is_suse; then install_package $(get_packages $FILES/rpms-suse) @@ -726,7 +726,7 @@ if is_service_enabled q-agt; then if is_quantum_ovs_base_plugin "$Q_PLUGIN"; then # Install deps # FIXME add to ``files/apts/quantum``, but don't install if not needed! - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then kernel_version=`cat /proc/version | cut -d " " -f3` install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version else diff --git a/tools/info.sh b/tools/info.sh index a872d59d..583a9949 100755 --- a/tools/info.sh +++ b/tools/info.sh @@ -88,7 +88,7 @@ done # - We are going to check packages only for the services needed. # - We are parsing the packages files and detecting metadatas. -if [[ "$os_PACKAGE" = "deb" ]]; then +if is_ubuntu; then PKG_DIR=$FILES/apts else PKG_DIR=$FILES/rpms diff --git a/unstack.sh b/unstack.sh index 20ba17b6..81ce088a 100755 --- a/unstack.sh +++ b/unstack.sh @@ -65,7 +65,7 @@ if is_service_enabled cinder; then # If tgt driver isn't running this won't work obviously # So check the response and restart if need be echo "tgtd seems to be in a bad state, restarting..." - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then restart_service tgt else restart_service tgtd @@ -85,7 +85,7 @@ if is_service_enabled cinder; then sudo rm -rf $CINDER_STATE_PATH/volumes/* fi - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then stop_service tgt else stop_service tgtd From ec903059d5ab4c387f49f429976da9c147fab808 Mon Sep 17 00:00:00 2001 From: Mark McClain Date: Tue, 4 Dec 2012 10:32:41 -0500 Subject: [PATCH 812/967] Revert "Set the rabbit_durable_queues to match local consumers" This reverts commit 71cf53a9f60176419732f3ecbbce11c75190c059. The attempt to set the queue durability for Glance notifications always sets the queues to durable. We are reverting this until a refined approach is available. Change-Id: I469e5149d21e3fcdd409da8114d5ccef1ff1243c --- lib/glance | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/lib/glance b/lib/glance index 60026d54..b02a4b63 100644 --- a/lib/glance +++ b/lib/glance @@ -70,13 +70,6 @@ function configure_glanceclient() { setup_develop $GLANCECLIENT_DIR } -# durable_glance_queues() - Determine if RabbitMQ queues are durable or not -function durable_glance_queues() { - test `rabbitmqctl list_queues name durable | grep true | wc -l` -gt 0 && return 0 - test `rabbitmqctl list_exchanges name durable | grep true | wc -l` -gt 0 && return 0 - return 1 -} - # configure_glance() - Set config files, create data dirs, etc function configure_glance() { setup_develop $GLANCE_DIR @@ -127,12 +120,6 @@ function configure_glance() { iniset $GLANCE_API_CONF DEFAULT notifier_strategy rabbit iniset $GLANCE_API_CONF DEFAULT rabbit_host $RABBIT_HOST iniset $GLANCE_API_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - if [[ durable_glance_queues -eq 0 ]]; then - # This gets around https://bugs.launchpad.net/glance/+bug/1074132 - # that results in a g-api server becoming unresponsive during - # startup... - iniset $GLANCE_API_CONF DEFAULT rabbit_durable_queues True - fi fi if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then iniset $GLANCE_API_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/api From d75bc1f3c19fb8eb2c79434e2fc9307dc83bff26 Mon Sep 17 00:00:00 2001 From: jiajun xu Date: Tue, 4 Dec 2012 08:51:35 +0800 Subject: [PATCH 813/967] use pip_install to install packages for tempest Change-Id: Ia892e0bc3c8c16119686daf26595c2b82d7e27b0 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 871e9e73..4bfdc50a 100644 --- a/lib/tempest +++ b/lib/tempest @@ -52,7 +52,7 @@ function install_tempest() { # Tempest doesn't satisfy its dependencies on its own, so # install them here instead. - sudo pip install -r $TEMPEST_DIR/tools/pip-requires + pip_install -r $TEMPEST_DIR/tools/pip-requires } From 3ab927c9b04e372b66196d9b253fe32cbdf75a65 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Wed, 5 Dec 2012 16:15:26 +0100 Subject: [PATCH 814/967] Use getent to find if a group exists, not grep Change-Id: Ia181f41ae122dc5e5d3d33633dfd1d0e7420f8a9 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 6445a073..29238d43 100644 --- a/lib/nova +++ b/lib/nova @@ -232,7 +232,7 @@ EOF LIBVIRT_DAEMON=libvirt-bin else # http://wiki.libvirt.org/page/SSHPolicyKitSetup - if ! grep ^libvirtd: /etc/group >/dev/null; then + if ! getent group libvirtd >/dev/null; then sudo groupadd libvirtd fi sudo bash -c 'cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla From 818a9fa327414ec6123d572755cecb12ca483922 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Wed, 5 Dec 2012 17:39:33 +0100 Subject: [PATCH 815/967] Install qemu on openSUSE qemu is used as a fallback when kvm cannot be used. On Debian and Fedora, the binaries are there when kvm is installed, but this is not the case on openSUSE. Change-Id: I96592d105428acde9636608002109c166ac7a56a --- files/rpms-suse/nova | 2 ++ 1 file changed, 2 insertions(+) diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova index 0c036786..03067162 100644 --- a/files/rpms-suse/nova +++ b/files/rpms-suse/nova @@ -7,6 +7,8 @@ iptables iputils kpartx kvm +# qemu as fallback if kvm cannot be used +qemu libvirt # NOPRIME libvirt-python libxml2-python From 8bc21f6476304ca319489612867109d43d44cb6f Mon Sep 17 00:00:00 2001 From: Nachi Ueno Date: Mon, 19 Nov 2012 22:04:28 -0800 Subject: [PATCH 816/967] move setup_quantum to stack.sh sudo is only allowed in stack.sh on the CI, so move setup_quantum code to the stack.sh. also fixes quantum debug command setup for linuxbridge and ryu Change-Id: I11bc0aa242a690e25acc088b3e9f483ceab38f26 --- exercises/boot_from_volume.sh | 5 ----- exercises/euca.sh | 5 ----- exercises/floating_ips.sh | 5 ----- exercises/quantum-adv-test.sh | 2 -- exercises/volumes.sh | 5 ----- lib/quantum | 17 ++++++++++++++--- openrc | 3 --- stack.sh | 35 +++++++++++++++++++---------------- unstack.sh | 6 ++++++ 9 files changed, 39 insertions(+), 44 deletions(-) diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index 4c2f279e..5ebdecc7 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -35,7 +35,6 @@ source $TOP_DIR/openrc # Import quantum functions if needed if is_service_enabled quantum; then source $TOP_DIR/lib/quantum - setup_quantum fi # Import exercise configuration @@ -174,10 +173,6 @@ nova floating-ip-delete $FLOATING_IP || \ # Delete a secgroup nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP" -if is_service_enabled quantum; then - teardown_quantum -fi - set +o xtrace echo "*********************************************************************" echo "SUCCESS: End DevStack Exercise: $0" diff --git a/exercises/euca.sh b/exercises/euca.sh index c307a064..67da1bee 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -36,7 +36,6 @@ source $TOP_DIR/eucarc # Import quantum functions if needed if is_service_enabled quantum; then source $TOP_DIR/lib/quantum - setup_quantum fi # Import exercise configuration @@ -175,10 +174,6 @@ fi # Delete group euca-delete-group $SECGROUP || die "Failure deleting security group $SECGROUP" -if is_service_enabled quantum; then - teardown_quantum -fi - set +o xtrace echo "*********************************************************************" echo "SUCCESS: End DevStack Exercise: $0" diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index ae5691f4..8b18e6f4 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -34,7 +34,6 @@ source $TOP_DIR/openrc # Import quantum functions if needed if is_service_enabled quantum; then source $TOP_DIR/lib/quantum - setup_quantum fi # Import exercise configuration @@ -202,10 +201,6 @@ fi # Delete a secgroup nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP" -if is_service_enabled quantum; then - teardown_quantum -fi - set +o xtrace echo "*********************************************************************" echo "SUCCESS: End DevStack Exercise: $0" diff --git a/exercises/quantum-adv-test.sh b/exercises/quantum-adv-test.sh index 2ee82ff2..493e2239 100755 --- a/exercises/quantum-adv-test.sh +++ b/exercises/quantum-adv-test.sh @@ -58,7 +58,6 @@ is_service_enabled quantum && is_service_enabled q-agt && is_service_enabled q-d # Import quantum fucntions source $TOP_DIR/lib/quantum -setup_quantum # Import exercise configuration source $TOP_DIR/exerciserc @@ -475,7 +474,6 @@ main() { } -teardown_quantum #------------------------------------------------------------------------------- # Kick off script. #------------------------------------------------------------------------------- diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 3432763f..42f9cb4e 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -33,7 +33,6 @@ source $TOP_DIR/openrc # Import quantum functions if needed if is_service_enabled quantum; then source $TOP_DIR/lib/quantum - setup_quantum fi # Import exercise configuration @@ -212,10 +211,6 @@ fi # Delete a secgroup nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP" -if is_service_enabled quantum; then - teardown_quantum -fi - set +o xtrace echo "*********************************************************************" echo "SUCCESS: End DevStack Exercise: $0" diff --git a/lib/quantum b/lib/quantum index 373d5217..14a3a4ad 100644 --- a/lib/quantum +++ b/lib/quantum @@ -5,9 +5,20 @@ XTRACE=$(set +o | grep xtrace) set +o xtrace +QUANTUM_DIR=$DEST/quantum export QUANTUM_TEST_CONFIG_FILE=${QUANTUM_TEST_CONFIG_FILE:-"/etc/quantum/debug.ini"} QUANTUM_AUTH_CACHE_DIR=${QUANTUM_AUTH_CACHE_DIR:-/var/cache/quantum} +if is_service_enabled quantum; then + Q_CONF_FILE=/etc/quantum/quantum.conf + Q_RR_CONF_FILE=/etc/quantum/rootwrap.conf + if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + Q_RR_COMMAND="sudo" + else + Q_RR_COMMAND="sudo $QUANTUM_DIR/bin/quantum-rootwrap $Q_RR_CONF_FILE" + fi +fi + # Configures keystone integration for quantum service and agents function quantum_setup_keystone() { local conf_file=$1 @@ -74,7 +85,7 @@ function _get_probe_cmd_prefix() { local from_net="$1" net_id=`_get_net_id $from_net` probe_id=`quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}' | head -n 1` - echo "sudo ip netns exec qprobe-$probe_id" + echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id" } function delete_probe() { @@ -92,9 +103,9 @@ function _ping_check_quantum() { local check_command="" probe_cmd=`_get_probe_cmd_prefix $from_net` if [[ "$expected" = "True" ]]; then - check_command="while ! $probe_cmd ping -c1 -w1 $ip; do sleep 1; done" + check_command="while ! $probe_cmd ping -w 1 -c 1 $ip; do sleep 1; done" else - check_command="while $probe_cmd ping -c1 -w1 $ip; do sleep 1; done" + check_command="while $probe_cmd ping -w 1 -c 1 $ip; do sleep 1; done" fi if ! timeout $timeout_sec sh -c "$check_command"; then if [[ "$expected" = "True" ]]; then diff --git a/openrc b/openrc index 4b6b9b2b..08ef98be 100644 --- a/openrc +++ b/openrc @@ -72,6 +72,3 @@ export COMPUTE_API_VERSION=${COMPUTE_API_VERSION:-$NOVA_VERSION} # set log level to DEBUG (helps debug issues) # export KEYSTONECLIENT_DEBUG=1 # export NOVACLIENT_DEBUG=1 - -# set quantum debug command -export QUANTUM_TEST_CONFIG_FILE=${QUANTUM_TEST_CONFIG_FILE:-"/etc/quantum/debug.ini"} diff --git a/stack.sh b/stack.sh index 55eafa82..1d1ad636 100755 --- a/stack.sh +++ b/stack.sh @@ -321,7 +321,6 @@ HORIZON_DIR=$DEST/horizon OPENSTACKCLIENT_DIR=$DEST/python-openstackclient NOVNC_DIR=$DEST/noVNC SWIFT3_DIR=$DEST/swift3 -QUANTUM_DIR=$DEST/quantum QUANTUM_CLIENT_DIR=$DEST/python-quantumclient # Default Quantum Plugin @@ -1153,14 +1152,7 @@ if is_service_enabled quantum; then iniset /$Q_PLUGIN_CONF_FILE DATABASE sql_connection $dburl unset dburl - Q_CONF_FILE=/etc/quantum/quantum.conf cp $QUANTUM_DIR/etc/quantum.conf $Q_CONF_FILE - Q_RR_CONF_FILE=/etc/quantum/rootwrap.conf - if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then - Q_RR_COMMAND="sudo" - else - Q_RR_COMMAND="sudo $QUANTUM_DIR/bin/quantum-rootwrap $Q_RR_CONF_FILE" - fi cp -p $QUANTUM_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE # Copy over the config and filter bits @@ -1400,13 +1392,22 @@ if is_service_enabled quantum; then iniset $Q_CONF_FILE DEFAULT rabbit_password $RABBIT_PASSWORD fi if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then - Q_DEBUG_CONF_FILE=/etc/quantum/debug.ini - cp $QUANTUM_DIR/etc/l3_agent.ini $Q_DEBUG_CONF_FILE - iniset $Q_L3_CONF_FILE DEFAULT verbose False - iniset $Q_L3_CONF_FILE DEFAULT debug False - iniset $Q_L3_CONF_FILE DEFAULT metadata_ip $Q_META_DATA_IP - iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE - iniset $Q_L3_CONF_FILE DEFAULT root_helper "sudo" + cp $QUANTUM_DIR/etc/l3_agent.ini $QUANTUM_TEST_CONFIG_FILE + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT verbose False + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT debug False + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE + quantum_setup_keystone $QUANTUM_TEST_CONFIG_FILE DEFAULT set_auth_url + if [[ "$Q_PLUGIN" == "openvswitch" ]]; then + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE + elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge '' + elif [[ "$Q_PLUGIN" = "ryu" ]]; then + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT interface_driver quantum.agent.linux.interface.RyuInterfaceDriver + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT + fi fi fi @@ -1633,7 +1634,9 @@ if is_service_enabled q-svc; then iniset $Q_L3_CONF_FILE DEFAULT router_id $ROUTER_ID fi fi - + if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then + setup_quantum + fi elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then # Create a small network $NOVA_BIN_DIR/nova-manage network create "$PRIVATE_NETWORK_NAME" $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS diff --git a/unstack.sh b/unstack.sh index 20ba17b6..a01ed6d1 100755 --- a/unstack.sh +++ b/unstack.sh @@ -37,6 +37,12 @@ if [[ "$1" == "--all" ]]; then UNSTACK_ALL=${UNSTACK_ALL:-1} fi +if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then + source $TOP_DIR/openrc + source $TOP_DIR/lib/quantum + teardown_quantum +fi + # Shut down devstack's screen to get the bulk of OpenStack services in one shot SCREEN=$(which screen) if [[ -n "$SCREEN" ]]; then From 2aa35174b0f99b1b7ea95af474ae1807542b74c6 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Wed, 5 Dec 2012 20:03:40 +0100 Subject: [PATCH 817/967] Move tempest config to lib/tempest * Using iniset * Config based on the tempest.config.sample * tools/configure_tempest.sh is pending for removal Change-Id: Ia42e98ba4b640b89bcd2674008090909d88a2efb --- lib/tempest | 208 ++++++++++++++++++++++--- stack.sh | 4 + tools/configure_tempest.sh | 308 +------------------------------------ 3 files changed, 194 insertions(+), 326 deletions(-) diff --git a/lib/tempest b/lib/tempest index 4bfdc50a..606f05ec 100644 --- a/lib/tempest +++ b/lib/tempest @@ -2,31 +2,49 @@ # Dependencies: # ``functions`` file -# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined +# ``lib/nova`` service is runing # - +# - DEST +# - ADMIN_PASSWORD +# - OS_USERNAME +# - DEFAULT_IMAGE_NAME +# - S3_SERVICE_PORT +# - SERVICE_HOST +# - BASE_SQL_CONN ``lib/database`` declares +# Optional Dependencies: +# IDENTITY_* +# ALT_* (similar vars exists in keystone_data.sh) +# IMAGE_* +# LIVE_MIGRATION_AVAILABLE +# DEFAULT_INSTANCE_TYPE +# DEFAULT_INSTANCE_USER +# USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION # ``stack.sh`` calls the entry points in this order: # -# install_XXXX -# configure_XXXX -# init_XXXX -# start_XXXX -# stop_XXXX -# cleanup_XXXX +# install_tempest +# configure_tempest +# init_tempest +## start_tempest +## stop_tempest +## cleanup_tempest # Save trace setting XTRACE=$(set +o | grep xtrace) set +o xtrace - # Defaults # -------- # # Set up default directories +NOVA_SOURCE_DIR=$DEST/nova TEMPEST_DIR=$DEST/tempest -TEMPEST_CONF_DIR=$DEST/tempest/etc +TEMPEST_CONF_DIR=$TEMPEST_DIR/etc +TEMPEST_CONF=$TEMPEST_CONF_DIR/tempest.conf + +BUILD_INTERVAL=3 +BUILD_TIMEOUT=400 # Entry Points # ------------ @@ -34,15 +52,168 @@ TEMPEST_CONF_DIR=$DEST/tempest/etc # configure_tempest() - Set config files, create data dirs, etc function configure_tempest() { + local IMAGE_LINES + local IMAGES + local NUM_IMAGES + local IMAGE_UUID + local IMAGE_UUID_ALT + local errexit + + #TODO(afazekas): # sudo python setup.py deploy - # iniset $tempest_CONF ... - # This function intentionally left blank - # - # TODO(sdague) actually move the guts of configure tempest - # into this function - cd tools - ./configure_tempest.sh - cd .. + + # This function exits on an error so that errors don't compound and you see + # only the first error that occured. + errexit=$(set +o | grep errexit) + set -o errexit + + #Save IFS + ifs=$IFS + + # Glance should already contain images to be used in tempest + # testing. Here we simply look for images stored in Glance + # and set the appropriate variables for use in the tempest config + # We ignore ramdisk and kernel images, look for the default image + # DEFAULT_IMAGE_NAME. If not found, we set the IMAGE_UUID to the + # first image returned and set IMAGE_UUID_ALT to the second, + # if there is more than one returned... + # ... Also ensure we only take active images, so we don't get snapshots in process + IMAGE_LINES=`glance image-list` + IFS=$'\n\r' + IMAGES="" + for line in $IMAGE_LINES; do + if [ -z $DEFAULT_IMAGE_NAME ]; then + IMAGES="$IMAGES `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | grep 'active' | cut -d' ' -f2`" + else + IMAGES="$IMAGES `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | grep 'active' | grep "$DEFAULT_IMAGE_NAME" | cut -d' ' -f2`" + fi + done + # Create array of image UUIDs... + IFS=" " + IMAGES=($IMAGES) + NUM_IMAGES=${#IMAGES[*]} + echo "Found $NUM_IMAGES images" + if [[ $NUM_IMAGES -eq 0 ]]; then + echo "Found no valid images to use!" + exit 1 + fi + IMAGE_UUID=${IMAGES[0]} + IMAGE_UUID_ALT=$IMAGE_UUID + if [[ $NUM_IMAGES -gt 1 ]]; then + IMAGE_UUID_ALT=${IMAGES[1]} + fi + + # Create tempest.conf from tempest.conf.sample + # copy every time, because the image UUIDS are going to change + cp $TEMPEST_CONF.sample $TEMPEST_CONF + + IDENTITY_USE_SSL=${IDENTITY_USE_SSL:-False} + IDENTITY_HOST=${IDENTITY_HOST:-127.0.0.1} + IDENTITY_PORT=${IDENTITY_PORT:-5000} + # TODO(jaypipes): This is dumb and needs to be removed + # from the Tempest configuration file entirely... + IDENTITY_PATH=${IDENTITY_PATH:-tokens} + + PASSWORD=${ADMIN_PASSWORD:-secrete} + + # See files/keystone_data.sh where alt_demo user + # and tenant are set up... + ALT_USERNAME=${ALT_USERNAME:-alt_demo} + ALT_TENANT_NAME=${ALT_TENANT_NAME:-alt_demo} + + # Check Nova for existing flavors and, if set, look for the + # DEFAULT_INSTANCE_TYPE and use that. Otherwise, just use the first flavor. + FLAVOR_LINES=`nova flavor-list` + IFS="$(echo -e "\n\r")" + FLAVORS="" + for line in $FLAVOR_LINES; do + if [ -z $DEFAULT_INSTANCE_TYPE ]; then + FLAVORS="$FLAVORS `echo $line | grep -v "^\(|\s*ID\|+--\)" | cut -d' ' -f2`" + else + FLAVORS="$FLAVORS `echo $line | grep -v "^\(|\s*ID\|+--\)" | grep "$DEFAULT_INSTANCE_TYPE" | cut -d' ' -f2`" + fi + done + + IFS=" " + FLAVORS=($FLAVORS) + NUM_FLAVORS=${#FLAVORS[*]} + echo "Found $NUM_FLAVORS flavors" + if [[ $NUM_FLAVORS -eq 0 ]]; then + echo "Found no valid flavors to use!" + exit 1 + fi + FLAVOR_REF=${FLAVORS[0]} + FLAVOR_REF_ALT=$FLAVOR_REF + if [[ $NUM_FLAVORS -gt 1 ]]; then + FLAVOR_REF_ALT=${FLAVORS[1]} + fi + + # Timeouts + iniset $TEMPEST_CONF compute build_timeout $BUILD_TIMEOUT + iniset $TEMPEST_CONF volume build_timeout $BUILD_TIMEOUT + iniset $TEMPEST_CONF boto build_timeout $BUILD_TIMEOUT + iniset $TEMPEST_CONF compute build_interval $BUILD_INTERVAL + iniset $TEMPEST_CONF volume build_interval $BUILD_INTERVAL + iniset $TEMPEST_CONF boto build_interval $BUILD_INTERVAL + iniset $TEMPEST_CONF boto http_socket_timeout 5 + + iniset $TEMPEST_CONF identity use_ssl $IDENTITY_USE_SSL + iniset $TEMPEST_CONF identity host $IDENTITY_HOST + iniset $TEMPEST_CONF identity port $IDENTITY_PORT + iniset $TEMPEST_CONF identity path $IDENTITY_PATH + + iniset $TEMPEST_CONF compute password "$PASSWORD" + iniset $TEMPEST_CONF compute alt_username $ALT_USERNAME + iniset $TEMPEST_CONF compute alt_password "$PASSWORD" + iniset $TEMPEST_CONF compute alt_tenant_name $ALT_TENANT_NAME + iniset $TEMPEST_CONF compute resize_available False + iniset $TEMPEST_CONF compute change_password_available False + iniset $TEMPEST_CONF compute compute_log_level ERROR + #Skip until #1074039 is fixed + iniset $TEMPEST_CONF compute run_ssh False + iniset $TEMPEST_CONF compute ssh_user ${DEFAULT_INSTANCE_USER:-$OS_USERNAME} + iniset $TEMPEST_CONF compute network_for_ssh private + iniset $TEMPEST_CONF compute ip_version_for_ssh 4 + iniset $TEMPEST_CONF compute ssh_timeout 4 + iniset $TEMPEST_CONF compute image_ref $IMAGE_UUID + iniset $TEMPEST_CONF compute image_ref_alt $IMAGE_UUID_ALT + iniset $TEMPEST_CONF compute flavor_ref $FLAVOR_REF + iniset $TEMPEST_CONF compute flavor_ref_alt $FLAVOR_REF_ALT + iniset $TEMPEST_CONF compute source_dir $NOVA_SOURCE_DIR + iniset $TEMPEST_CONF compute live_migration_available ${LIVE_MIGRATION_AVAILABLE:-False} + iniset $TEMPEST_CONF compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} + # Inherited behavior, might be wrong + iniset $TEMPEST_CONF compute bin_dir $NOVA_BIN_DIR + # TODO(jaypipes): Create the key file here... right now, no whitebox + # tests actually use a key. + iniset $TEMPEST_CONF compute path_to_private_key $TEMPEST_DIR/id_rsa + iniset $TEMPEST_CONF compute db_uri $BASE_SQL_CONN/nova + + # image + iniset $TEMPEST_CONF image host ${IMAGE_HOST:-127.0.0.1} + iniset $TEMPEST_CONF image port ${IMAGE_PORT:-9292} + iniset $TEMPEST_CONF image password "$PASSWORD" + + # identity-admin + iniset $TEMPEST_CONF "identity-admin" password "$PASSWORD" + + # compute admin + iniset $TEMPEST_CONF "compute-admin" password "$PASSWORD" + + # network + iniset $TEMPEST_CONF network api_version 2.0 + + #boto + iniset $TEMPEST_CONF boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud" + iniset $TEMPEST_CONF boto s3_url "http://$SERVICE_HOST:${S3_SERVICE_PORT:-3333}" + + echo "Created tempest configuration file:" + cat $TEMPEST_CONF + + # Restore IFS + IFS=$ifs + #Restore errexit + $errexit } @@ -55,6 +226,5 @@ function install_tempest() { pip_install -r $TEMPEST_DIR/tools/pip-requires } - # Restore xtrace $XTRACE diff --git a/stack.sh b/stack.sh index 94283563..c4f26f42 100755 --- a/stack.sh +++ b/stack.sh @@ -1713,7 +1713,11 @@ fi # Configure Tempest last to ensure that the runtime configuration of # the various OpenStack services can be queried. if is_service_enabled tempest; then + echo_summary "Configuring Tempest" configure_tempest + echo '**************************************************' + echo_summary "Finished Configuring Tempest" + echo '**************************************************' fi diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh index 298fa9ba..09241808 100755 --- a/tools/configure_tempest.sh +++ b/tools/configure_tempest.sh @@ -1,309 +1,3 @@ #!/usr/bin/env bash -# -# **configure_tempest.sh** -# Build a tempest configuration file from devstack - -echo "**************************************************" -echo "Configuring Tempest" -echo "**************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occured. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - -function usage { - echo "$0 - Build tempest.conf" - echo "" - echo "Usage: $0" - exit 1 -} - -if [ "$1" = "-h" ]; then - usage -fi - -# Keep track of the current directory -TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $TOOLS_DIR/..; pwd) - -# Import common functions -. $TOP_DIR/functions - -# Abort if localrc is not set -if [ ! -e $TOP_DIR/localrc ]; then - echo "You must have a localrc with necessary basic configuration defined before proceeding." - exit 1 -fi - -# Abort if openrc is not set -if [ ! -e $TOP_DIR/openrc ]; then - echo "You must have an openrc with ALL necessary passwords and credentials defined before proceeding." - exit 1 -fi - -# Source params -source $TOP_DIR/lib/database -source $TOP_DIR/openrc - -# Where Openstack code lives -DEST=${DEST:-/opt/stack} - -NOVA_SOURCE_DIR=$DEST/nova -TEMPEST_DIR=$DEST/tempest -CONFIG_DIR=$TEMPEST_DIR/etc -TEMPEST_CONF=$CONFIG_DIR/tempest.conf - -DATABASE_TYPE=${DATABASE_TYPE:-mysql} -initialize_database_backends - -# Use the GUEST_IP unless an explicit IP is set by ``HOST_IP`` -HOST_IP=${HOST_IP:-$GUEST_IP} -# Use the first IP if HOST_IP still is not set -if [ ! -n "$HOST_IP" ]; then - HOST_IP=`LC_ALL=C /sbin/ifconfig | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'` -fi - -# Glance should already contain images to be used in tempest -# testing. Here we simply look for images stored in Glance -# and set the appropriate variables for use in the tempest config -# We ignore ramdisk and kernel images, look for the default image -# DEFAULT_IMAGE_NAME. If not found, we set the IMAGE_UUID to the -# first image returned and set IMAGE_UUID_ALT to the second, -# if there is more than one returned... -# ... Also ensure we only take active images, so we don't get snapshots in process -IMAGE_LINES=`glance image-list` -IFS="$(echo -e "\n\r")" -IMAGES="" -for line in $IMAGE_LINES; do - if [ -z $DEFAULT_IMAGE_NAME ]; then - IMAGES="$IMAGES `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | grep 'active' | cut -d' ' -f2`" - else - IMAGES="$IMAGES `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | grep 'active' | grep "$DEFAULT_IMAGE_NAME" | cut -d' ' -f2`" - fi -done -# Create array of image UUIDs... -IFS=" " -IMAGES=($IMAGES) -NUM_IMAGES=${#IMAGES[*]} -echo "Found $NUM_IMAGES images" -if [[ $NUM_IMAGES -eq 0 ]]; then - echo "Found no valid images to use!" - exit 1 -fi -IMAGE_UUID=${IMAGES[0]} -IMAGE_UUID_ALT=$IMAGE_UUID -if [[ $NUM_IMAGES -gt 1 ]]; then - IMAGE_UUID_ALT=${IMAGES[1]} -fi - -# Create tempest.conf from tempest.conf.tpl -# copy every time, because the image UUIDS are going to change -cp $TEMPEST_CONF.tpl $TEMPEST_CONF - -COMPUTE_ADMIN_USERNAME=${ADMIN_USERNAME:-admin} -COMPUTE_ADMIN_PASSWORD=${ADMIN_PASSWORD:-secrete} -COMPUTE_ADMIN_TENANT_NAME=${ADMIN_TENANT:-admin} - -IDENTITY_ADMIN_USERNAME=${ADMIN_USERNAME:-admin} -IDENTITY_ADMIN_PASSWORD=${ADMIN_PASSWORD:-secrete} -IDENTITY_ADMIN_TENANT_NAME=${ADMIN_TENANT:-admin} - -IDENTITY_USE_SSL=${IDENTITY_USE_SSL:-False} -IDENTITY_HOST=${IDENTITY_HOST:-127.0.0.1} -IDENTITY_PORT=${IDENTITY_PORT:-5000} -IDENTITY_API_VERSION="v2.0" # Note: need v for now... -# TODO(jaypipes): This is dumb and needs to be removed -# from the Tempest configuration file entirely... -IDENTITY_PATH=${IDENTITY_PATH:-tokens} -IDENTITY_STRATEGY=${IDENTITY_STRATEGY:-keystone} -IDENTITY_CATALOG_TYPE=identity - -# We use regular, non-admin users in Tempest for the USERNAME -# substitutions and use ADMIN_USERNAME et al for the admin stuff. -# OS_USERNAME et all should be defined in openrc. -OS_USERNAME=${OS_USERNAME:-demo} -OS_TENANT_NAME=${OS_TENANT_NAME:-demo} -OS_PASSWORD=${OS_PASSWORD:-$ADMIN_PASSWORD} - -# See files/keystone_data.sh where alt_demo user -# and tenant are set up... -ALT_USERNAME=${ALT_USERNAME:-alt_demo} -ALT_TENANT_NAME=${ALT_TENANT_NAME:-alt_demo} -ALT_PASSWORD=$OS_PASSWORD - -# Check Nova for existing flavors and, if set, look for the -# DEFAULT_INSTANCE_TYPE and use that. Otherwise, just use the first flavor. -FLAVOR_LINES=`nova flavor-list` -IFS="$(echo -e "\n\r")" -FLAVORS="" -for line in $FLAVOR_LINES; do - if [ -z $DEFAULT_INSTANCE_TYPE ]; then - FLAVORS="$FLAVORS `echo $line | grep -v "^\(|\s*ID\|+--\)" | cut -d' ' -f2`" - else - FLAVORS="$FLAVORS `echo $line | grep -v "^\(|\s*ID\|+--\)" | grep "$DEFAULT_INSTANCE_TYPE" | cut -d' ' -f2`" - fi -done -IFS=" " -FLAVORS=($FLAVORS) -NUM_FLAVORS=${#FLAVORS[*]} -echo "Found $NUM_FLAVORS flavors" -if [[ $NUM_FLAVORS -eq 0 ]]; then - echo "Found no valid flavors to use!" - exit 1 -fi -FLAVOR_REF=${FLAVORS[0]} -FLAVOR_REF_ALT=$FLAVOR_REF -if [[ $NUM_FLAVORS -gt 1 ]]; then - FLAVOR_REF_ALT=${FLAVORS[1]} -fi - -# Do any of the following need to be configurable? -COMPUTE_CATALOG_TYPE=compute -COMPUTE_CREATE_IMAGE_ENABLED=True -COMPUTE_ALLOW_TENANT_ISOLATION=True -COMPUTE_ALLOW_TENANT_REUSE=True -COMPUTE_RESIZE_AVAILABLE=False -COMPUTE_CHANGE_PASSWORD_AVAILABLE=False # not supported with QEMU... -COMPUTE_LOG_LEVEL=ERROR -BUILD_INTERVAL=3 -BUILD_TIMEOUT=400 -COMPUTE_BUILD_INTERVAL=3 -COMPUTE_BUILD_TIMEOUT=400 -VOLUME_BUILD_INTERVAL=3 -VOLUME_BUILD_TIMEOUT=300 -RUN_SSH=True -# Check for DEFAULT_INSTANCE_USER and try to connect with that account -SSH_USER=${DEFAULT_INSTANCE_USER:-$OS_USERNAME} -NETWORK_FOR_SSH=private -IP_VERSION_FOR_SSH=4 -SSH_TIMEOUT=4 -# Whitebox testing configuration for Compute... -COMPUTE_WHITEBOX_ENABLED=True -COMPUTE_SOURCE_DIR=$NOVA_SOURCE_DIR -COMPUTE_BIN_DIR=$NOVA_BIN_DIR -COMPUTE_CONFIG_PATH=/etc/nova/nova.conf -# TODO(jaypipes): Create the key file here... right now, no whitebox -# tests actually use a key. -COMPUTE_PATH_TO_PRIVATE_KEY=$TEMPEST_DIR/id_rsa -COMPUTE_DB_URI=$BASE_SQL_CONN/nova - -# Image test configuration options... -IMAGE_HOST=${IMAGE_HOST:-127.0.0.1} -IMAGE_PORT=${IMAGE_PORT:-9292} -IMAGE_API_VERSION=1 -IMAGE_CATALOG_TYPE=image - -# Network API test configuration -NETWORK_CATALOG_TYPE=network -NETWORK_API_VERSION=2.0 - -# Volume API test configuration -VOLUME_CATALOG_TYPE=volume - -# Live migration -LIVE_MIGRATION_AVAILABLE=${LIVE_MIGRATION_AVAILABLE:-False} -USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION=${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} - -# Object Storage -OBJECT_CATALOG_TYPE="object-store" - -# EC2 and S3 test configuration -BOTO_EC2_URL="http://$IDENTITY_HOST:8773/services/Cloud" -BOTO_S3_URL="http://$IDENTITY_HOST:3333" -BOTO_AWS_ACCESS="" # Created in tempest... -BOTO_AWS_SECRET="" # Created in tempest... -BOTO_AWS_REGION="RegionOne" -BOTO_S3_MATERIALS_PATH=$DEST/devstack/files/images/s3-materials/cirros-0.3.0 -BOTO_ARI_MANIFEST=cirros-0.3.0-x86_64-initrd.manifest.xml -BOTO_AMI_MANIFEST=cirros-0.3.0-x86_64-blank.img.manifest.xml -BOTO_AKI_MANIFEST=cirros-0.3.0-x86_64-vmlinuz.manifest.xml -BOTO_FLAVOR_NAME=m1.tiny -BOTO_SOCKET_TIMEOUT=5 -BOTO_BUILD_TIMEOUT=${COMPUTE_BUILD_TIMEOUT:-400} -BOTO_BUILD_INTERVAL=${COMPUTE_BUILD_INTERVAL:-3} - -sed -e " - s,%IDENTITY_USE_SSL%,$IDENTITY_USE_SSL,g; - s,%IDENTITY_HOST%,$IDENTITY_HOST,g; - s,%IDENTITY_PORT%,$IDENTITY_PORT,g; - s,%IDENTITY_API_VERSION%,$IDENTITY_API_VERSION,g; - s,%IDENTITY_PATH%,$IDENTITY_PATH,g; - s,%IDENTITY_STRATEGY%,$IDENTITY_STRATEGY,g; - s,%IDENTITY_CATALOG_TYPE%,$IDENTITY_CATALOG_TYPE,g; - s,%USERNAME%,$OS_USERNAME,g; - s,%PASSWORD%,$OS_PASSWORD,g; - s,%TENANT_NAME%,$OS_TENANT_NAME,g; - s,%ALT_USERNAME%,$ALT_USERNAME,g; - s,%ALT_PASSWORD%,$ALT_PASSWORD,g; - s,%ALT_TENANT_NAME%,$ALT_TENANT_NAME,g; - s,%COMPUTE_CATALOG_TYPE%,$COMPUTE_CATALOG_TYPE,g; - s,%COMPUTE_ALLOW_TENANT_ISOLATION%,$COMPUTE_ALLOW_TENANT_ISOLATION,g; - s,%COMPUTE_ALLOW_TENANT_REUSE%,$COMPUTE_ALLOW_TENANT_REUSE,g; - s,%COMPUTE_CREATE_IMAGE_ENABLED%,$COMPUTE_CREATE_IMAGE_ENABLED,g; - s,%COMPUTE_RESIZE_AVAILABLE%,$COMPUTE_RESIZE_AVAILABLE,g; - s,%COMPUTE_CHANGE_PASSWORD_AVAILABLE%,$COMPUTE_CHANGE_PASSWORD_AVAILABLE,g; - s,%COMPUTE_WHITEBOX_ENABLED%,$COMPUTE_WHITEBOX_ENABLED,g; - s,%COMPUTE_LOG_LEVEL%,$COMPUTE_LOG_LEVEL,g; - s,%BUILD_INTERVAL%,$BUILD_INTERVAL,g; - s,%BUILD_TIMEOUT%,$BUILD_TIMEOUT,g; - s,%COMPUTE_BUILD_INTERVAL%,$COMPUTE_BUILD_INTERVAL,g; - s,%COMPUTE_BUILD_TIMEOUT%,$COMPUTE_BUILD_TIMEOUT,g; - s,%RUN_SSH%,$RUN_SSH,g; - s,%SSH_USER%,$SSH_USER,g; - s,%NETWORK_FOR_SSH%,$NETWORK_FOR_SSH,g; - s,%IP_VERSION_FOR_SSH%,$IP_VERSION_FOR_SSH,g; - s,%SSH_TIMEOUT%,$SSH_TIMEOUT,g; - s,%IMAGE_ID%,$IMAGE_UUID,g; - s,%IMAGE_ID_ALT%,$IMAGE_UUID_ALT,g; - s,%FLAVOR_REF%,$FLAVOR_REF,g; - s,%FLAVOR_REF_ALT%,$FLAVOR_REF_ALT,g; - s,%COMPUTE_CONFIG_PATH%,$COMPUTE_CONFIG_PATH,g; - s,%COMPUTE_SOURCE_DIR%,$COMPUTE_SOURCE_DIR,g; - s,%COMPUTE_BIN_DIR%,$COMPUTE_BIN_DIR,g; - s,%COMPUTE_PATH_TO_PRIVATE_KEY%,$COMPUTE_PATH_TO_PRIVATE_KEY,g; - s,%COMPUTE_DB_URI%,$COMPUTE_DB_URI,g; - s,%IMAGE_HOST%,$IMAGE_HOST,g; - s,%IMAGE_PORT%,$IMAGE_PORT,g; - s,%IMAGE_API_VERSION%,$IMAGE_API_VERSION,g; - s,%IMAGE_CATALOG_TYPE%,$IMAGE_CATALOG_TYPE,g; - s,%COMPUTE_ADMIN_USERNAME%,$COMPUTE_ADMIN_USERNAME,g; - s,%COMPUTE_ADMIN_PASSWORD%,$COMPUTE_ADMIN_PASSWORD,g; - s,%COMPUTE_ADMIN_TENANT_NAME%,$COMPUTE_ADMIN_TENANT_NAME,g; - s,%IDENTITY_ADMIN_USERNAME%,$IDENTITY_ADMIN_USERNAME,g; - s,%IDENTITY_ADMIN_PASSWORD%,$IDENTITY_ADMIN_PASSWORD,g; - s,%IDENTITY_ADMIN_TENANT_NAME%,$IDENTITY_ADMIN_TENANT_NAME,g; - s,%NETWORK_CATALOG_TYPE%,$NETWORK_CATALOG_TYPE,g; - s,%NETWORK_API_VERSION%,$NETWORK_API_VERSION,g; - s,%VOLUME_CATALOG_TYPE%,$VOLUME_CATALOG_TYPE,g; - s,%VOLUME_BUILD_INTERVAL%,$VOLUME_BUILD_INTERVAL,g; - s,%VOLUME_BUILD_TIMEOUT%,$VOLUME_BUILD_TIMEOUT,g; - s,%LIVE_MIGRATION_AVAILABLE%,$LIVE_MIGRATION_AVAILABLE,g; - s,%USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION%,$USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION,g; - s,%OBJECT_CATALOG_TYPE%,$OBJECT_CATALOG_TYPE,g; - s,%BOTO_EC2_URL%,$BOTO_EC2_URL,g; - s,%BOTO_S3_URL%,$BOTO_S3_URL,g; - s,%BOTO_AWS_ACCESS%,$BOTO_AWS_ACCESS,g; - s,%BOTO_AWS_SECRET%,$BOTO_AWS_SECRET,g; - s,%BOTO_AWS_REGION%,$BOTO_AWS_REGION,g; - s,%BOTO_S3_MATERIALS_PATH%,$BOTO_S3_MATERIALS_PATH,g; - s,%BOTO_ARI_MANIFEST%,$BOTO_ARI_MANIFEST,g; - s,%BOTO_AMI_MANIFEST%,$BOTO_AMI_MANIFEST,g; - s,%BOTO_AKI_MANIFEST%,$BOTO_AKI_MANIFEST,g; - s,%BOTO_FLAVOR_NAME%,$BOTO_FLAVOR_NAME,g; - s,%BOTO_SOCKET_TIMEOUT%,$BOTO_SOCKET_TIMEOUT,g; - s,%BOTO_BUILD_TIMEOUT%,$BOTO_BUILD_TIMEOUT,g; - s,%BOTO_BUILD_INTERVAL%,$BOTO_BUILD_INTERVAL,g; -" -i $TEMPEST_CONF - -echo "Created tempest configuration file:" -cat $TEMPEST_CONF - -echo "\n" -echo "**************************************************" -echo "Finished Configuring Tempest" -echo "**************************************************" +echo "$0 is scheduled for delete!!" >&2 From 53d3d6baf93572618633ac79a6d1f594bc199837 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Thu, 6 Dec 2012 15:49:17 +0000 Subject: [PATCH 818/967] Fix XenAPINFS configuration fixes bug 1087272 Trying to configure XenAPINFS volume driver through localrc options failed. This fix removes the extra check, as lib/cinder already exits on error. Change-Id: I874b7cee44861244cb7a340cc4094ef3f8b48a5a --- lib/cinder | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/cinder b/lib/cinder index ce160bf0..d47c83a4 100644 --- a/lib/cinder +++ b/lib/cinder @@ -159,7 +159,6 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT xenapi_nfs_server "$CINDER_XENAPI_NFS_SERVER" iniset $CINDER_CONF DEFAULT xenapi_nfs_serverpath "$CINDER_XENAPI_NFS_SERVERPATH" ) - [ $? -ne 0 ] && exit 1 fi } From b79574b4954406d6d9e65ce5b1fb8d07678e7128 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Sat, 1 Dec 2012 10:42:46 +0100 Subject: [PATCH 819/967] Name the tgt/conf.d enties based on the vg name On one system multiple volume and volume manger could be installed and needs dedicated tgt config entries. cinder-volumes, stack-volumes, nova-volumes are the default volume group names. /etc/tgt/conf.d/ files should be named based on the volume-group name. The vg name is uniq on one system. In devstack case the stack.conf is usable. Changes: * Rename conf.d/cinder.conf to conf.d/stack.conf * Handle conf.d similary on all distribution Change-Id: I856cdf4a21a414d2940d8f9d8b0b0368b1fad887 --- lib/cinder | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/lib/cinder b/lib/cinder index ce160bf0..039c5cba 100644 --- a/lib/cinder +++ b/lib/cinder @@ -9,12 +9,12 @@ # stack.sh # --------- -# install_XXX -# configure_XXX -# init_XXX -# start_XXX -# stop_XXX -# cleanup_XXX +# install_cinder +# configure_cinder +# init_cinder +# start_cinder +# stop_cinder +# cleanup_cinder # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -229,7 +229,7 @@ function install_cinder() { # apply config.d approach (e.g. Oneiric does not have this) function _configure_tgt_for_config_d() { if [[ ! -d /etc/tgt/conf.d/ ]]; then - sudo mkdir /etc/tgt/conf.d + sudo mkdir -p /etc/tgt/conf.d echo "include /etc/tgt/conf.d/*.conf" | sudo tee -a /etc/tgt/targets.conf fi } @@ -237,11 +237,11 @@ function _configure_tgt_for_config_d() { # start_cinder() - Start running processes, including screen function start_cinder() { if is_service_enabled c-vol; then + _configure_tgt_for_config_d + if [[ ! -f /etc/tgt/conf.d/stack.conf ]]; then + echo "include $CINDER_STATE_PATH/volumes/*" | sudo tee /etc/tgt/conf.d/stack.conf + fi if is_ubuntu; then - _configure_tgt_for_config_d - if [[ ! -f /etc/tgt/conf.d/cinder.conf ]]; then - echo "include $CINDER_STATE_PATH/volumes/*" | sudo tee /etc/tgt/conf.d/cinder.conf - fi # tgt in oneiric doesn't restart properly if tgtd isn't running # do it in two steps sudo stop tgt || true From e5eee5834b4ae62857830a7a0266df76ec640b2d Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Thu, 6 Dec 2012 09:47:53 -0500 Subject: [PATCH 820/967] Add a timestamp to the log output for better diagnosis when we are looking at logs generated by the gate jobs we need timestamps on the output of stack.sh so we can figure out what was being executed around the time when there was a problem in say nova-network Change-Id: I203e8dae97715d6ee46a4088c7577b9be66cf09d --- stack.sh | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index b38c5791..154fd76f 100755 --- a/stack.sh +++ b/stack.sh @@ -598,7 +598,15 @@ if [[ -n "$LOGFILE" ]]; then exec 3>&1 if [[ "$VERBOSE" == "True" ]]; then # Redirect stdout/stderr to tee to write the log file - exec 1> >( tee "${LOGFILE}" ) 2>&1 + exec 1> >( awk ' + { + cmd ="date +\"%Y-%m-%d %H:%M:%S \"" + cmd | getline now + close("date +\"%Y-%m-%d %H:%M:%S \"") + sub(/^/, now) + print + fflush() + }' | tee "${LOGFILE}" ) 2>&1 # Set up a second fd for output exec 6> >( tee "${SUMFILE}" ) else From a0ca45f17379b76aaa8d58cb3bc26b2c64dba689 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Thu, 6 Dec 2012 17:45:49 +0000 Subject: [PATCH 821/967] Fix XenAPINFSDriver's path Fixes bug 1087329 As the driver was moved to a different location, devstack script needed an update. Change-Id: Iaa1db94a84b6e9cb99514ce886025600809e9f29 --- lib/cinder | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/cinder b/lib/cinder index d47c83a4..5d2c5961 100644 --- a/lib/cinder +++ b/lib/cinder @@ -152,7 +152,7 @@ function configure_cinder() { if [ "$CINDER_DRIVER" == "XenAPINFS" ]; then ( set -u - iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.xenapi_sm.XenAPINFSDriver" + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.xenapi.sm.XenAPINFSDriver" iniset $CINDER_CONF DEFAULT xenapi_connection_url "$CINDER_XENAPI_CONNECTION_URL" iniset $CINDER_CONF DEFAULT xenapi_connection_username "$CINDER_XENAPI_CONNECTION_USERNAME" iniset $CINDER_CONF DEFAULT xenapi_connection_password "$CINDER_XENAPI_CONNECTION_PASSWORD" From 72cffd56bf68afcb4ca087f6ed852f875b3bc90e Mon Sep 17 00:00:00 2001 From: John Garbutt Date: Tue, 4 Dec 2012 16:14:04 +0000 Subject: [PATCH 822/967] Make the standard devstack logging work with XenServer Fixes bug 1087387 Stop redirecting the output in devstack to ensure when you configure LOGFILE that the devstack scripts keep working Change-Id: I00dce315f5f79c4fc351d9ab11c504274d998fce --- tools/xen/build_xva.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh index 9eae1903..c359c558 100755 --- a/tools/xen/build_xva.sh +++ b/tools/xen/build_xva.sh @@ -66,7 +66,7 @@ cat <$STAGING_DIR/etc/rc.local # network restart required for getting the right gateway /etc/init.d/networking restart chown -R stack /opt/stack -su -c "/opt/stack/run.sh > /opt/stack/run.sh.log 2>&1" stack +su -c "/opt/stack/run.sh > /opt/stack/run.sh.log" stack exit 0 EOF From a784748527404f59e2c920c889e7958c6532f408 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Tue, 4 Dec 2012 16:06:52 -0500 Subject: [PATCH 823/967] Enable the correct instance managers for instance/floating ips. Currently the default is nova.network.dns_driver.DNSDriver for both. We need to switch to nova.network.minidns.MiniDNS for both instance_dns_manager and floating_ip_dns_manager. nova.network.dns_driver.DNSDriver is just the interface, we need a good implementation as the default Fixes LP #1040236 Change-Id: If6e65cb1c7802b1ba0c1e64d4c06185cabf9eeca --- stack.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stack.sh b/stack.sh index cddb64e0..b3e858f1 100755 --- a/stack.sh +++ b/stack.sh @@ -1478,6 +1478,8 @@ if is_service_enabled nova; then fi elif is_service_enabled n-net; then add_nova_opt "network_manager=nova.network.manager.$NET_MAN" + add_nova_opt "instance_dns_manager=nova.network.minidns.MiniDNS" + add_nova_opt "floating_ip_dns_manager=nova.network.minidns.MiniDNS" add_nova_opt "public_interface=$PUBLIC_INTERFACE" add_nova_opt "vlan_interface=$VLAN_INTERFACE" add_nova_opt "flat_network_bridge=$FLAT_NETWORK_BRIDGE" From 6994296bf64f8b07db7e970b53691502d5341298 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Fri, 7 Dec 2012 08:36:14 +0100 Subject: [PATCH 824/967] Remove tools/configure_tempest.sh Related bug #1087203 Change-Id: I2df4601197214d9d50d86876d4a2892b3421217a --- tools/configure_tempest.sh | 3 --- 1 file changed, 3 deletions(-) delete mode 100755 tools/configure_tempest.sh diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh deleted file mode 100755 index 09241808..00000000 --- a/tools/configure_tempest.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env bash - -echo "$0 is scheduled for delete!!" >&2 From 00011c0847a9972b78051954e272f54e9d07ef51 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Thu, 6 Dec 2012 09:56:32 +0100 Subject: [PATCH 825/967] Add is_fedora and exit_distro_not_supported functions Between is_fedora, is_ubuntu and is_suse, we can make the code a bit simpler to read. We also use exit_distro_not_supported to identify places where we need implementation details for new distros. As "/sbin/service --skip-redirect" is Fedora-specific, guard this with a is_fedora test too. Change-Id: Ic77c0697ed9be0dbb5df8e73da93463e76025f0c --- functions | 55 +++++++++++++++++++++++++++++++--------- lib/cinder | 12 ++++++--- lib/databases/mysql | 30 ++++++++++++++-------- lib/databases/postgresql | 8 +++--- lib/horizon | 29 +++++++++++---------- lib/nova | 8 +++--- stack.sh | 34 +++++++++++++++---------- tests/functions.sh | 8 ++++-- tools/info.sh | 8 ++++-- 9 files changed, 128 insertions(+), 64 deletions(-) diff --git a/functions b/functions index 0911557f..3ee43d3d 100644 --- a/functions +++ b/functions @@ -354,6 +354,18 @@ function is_ubuntu { } +# Determine if current distribution is a Fedora-based distribution +# (Fedora, RHEL, CentOS). +# is_fedora +function is_fedora { + if [[ -z "$os_VENDOR" ]]; then + GetOSVersion + fi + + [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || [ "$os_VENDOR" = "CentOS" ] +} + + # Determine if current distribution is a SUSE-based distribution # (openSUSE, SLE). # is_suse @@ -366,6 +378,23 @@ function is_suse { } +# Exit after outputting a message about the distribution not being supported. +# exit_distro_not_supported [optional-string-telling-what-is-missing] +function exit_distro_not_supported { + if [[ -z "$DISTRO" ]]; then + GetDistro + fi + + if [ $# -gt 0 ]; then + echo "Support for $DISTRO is incomplete: no support for $@" + else + echo "Support for $DISTRO is incomplete." + fi + + exit 1 +} + + # git clone only if directory doesn't exist already. Since ``DEST`` might not # be owned by the installation user, we create the directory and change the # ownership to the proper user. @@ -598,12 +627,12 @@ function install_package() { NO_UPDATE_REPOS=True apt_get install "$@" + elif is_fedora; then + yum_install "$@" + elif is_suse; then + zypper_install "$@" else - if is_suse; then - zypper_install "$@" - else - yum_install "$@" - fi + exit_distro_not_supported "installing packages" fi } @@ -622,9 +651,11 @@ function is_package_installed() { if [[ "$os_PACKAGE" = "deb" ]]; then dpkg -l "$@" > /dev/null return $? - else + elif [[ "$os_PACKAGE" = "rpm" ]]; then rpm --quiet -q "$@" return $? + else + exit_distro_not_supported "finding if a package is installed" fi } @@ -1032,20 +1063,20 @@ function add_user_to_group() { function get_rootwrap_location() { local module=$1 - if is_ubuntu || is_suse; then - echo "/usr/local/bin/$module-rootwrap" - else + if is_fedora; then echo "/usr/bin/$module-rootwrap" + else + echo "/usr/local/bin/$module-rootwrap" fi } # Get the path to the pip command. # get_pip_command function get_pip_command() { - if is_ubuntu || is_suse; then - echo "/usr/bin/pip" - else + if is_fedora; then echo "/usr/bin/pip-python" + else + echo "/usr/bin/pip" fi } diff --git a/lib/cinder b/lib/cinder index 9b9d50d1..a43f0a16 100644 --- a/lib/cinder +++ b/lib/cinder @@ -195,8 +195,8 @@ function init_cinder() { mkdir -p $CINDER_STATE_PATH/volumes if sudo vgs $VOLUME_GROUP; then - if [[ "$os_PACKAGE" = "rpm" ]]; then - # RPM doesn't start the service + if is_fedora || is_suse; then + # service is not started by default start_service tgtd fi @@ -245,9 +245,15 @@ function start_cinder() { # do it in two steps sudo stop tgt || true sudo start tgt - else + elif is_fedora; then # bypass redirection to systemctl during restart sudo /sbin/service --skip-redirect tgtd restart + elif is_suse; then + restart_service tgtd + else + # note for other distros: unstack.sh also uses the tgt/tgtd service + # name, and would need to be adjusted too + exit_distro_not_supported "restarting tgt" fi fi diff --git a/lib/databases/mysql b/lib/databases/mysql index 60ea143f..68e9adc5 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -23,22 +23,28 @@ function configure_database_mysql { if is_ubuntu; then MY_CONF=/etc/mysql/my.cnf MYSQL=mysql - else + elif is_fedora; then + MY_CONF=/etc/my.cnf + MYSQL=mysqld + elif is_suse; then MY_CONF=/etc/my.cnf - if is_suse; then - MYSQL=mysql - else - MYSQL=mysqld - fi + MYSQL=mysql + else + exit_distro_not_supported "mysql configuration" fi # Start mysql-server - if [[ "$os_PACKAGE" = "rpm" ]]; then - # RPM doesn't start the service + if is_fedora || is_suse; then + # service is not started by default start_service $MYSQL - # Set the root password - only works the first time + fi + + # Set the root password - only works the first time. For Ubuntu, we already + # did that with debconf before installing the package. + if ! is_ubuntu; then sudo mysqladmin -u root password $DATABASE_PASSWORD || true fi + # Update the DB to give user ‘$DATABASE_USER’@’%’ full control of the all databases: sudo mysql -uroot -p$DATABASE_PASSWORD -h127.0.0.1 -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';" @@ -84,10 +90,12 @@ EOF chmod 0600 $HOME/.my.cnf fi # Install mysql-server - if is_suse; then + if is_ubuntu || is_fedora; then + install_package mysql-server + elif is_suse; then install_package mysql-community-server else - install_package mysql-server + exit_distro_not_supported "mysql installation" fi } diff --git a/lib/databases/postgresql b/lib/databases/postgresql index d9c2f00c..20ade857 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -20,7 +20,7 @@ function recreate_database_postgresql { function configure_database_postgresql { echo_summary "Configuring and starting PostgreSQL" - if [[ "$os_PACKAGE" = "rpm" ]]; then + if is_fedora || is_suse; then PG_HBA=/var/lib/pgsql/data/pg_hba.conf PG_CONF=/var/lib/pgsql/data/postgresql.conf sudo [ -e $PG_HBA ] || sudo postgresql-setup initdb @@ -53,10 +53,12 @@ EOF else sed -i "s/:root:\w\+/:root:$DATABASE_PASSWORD/" $PGPASS fi - if [[ "$os_PACKAGE" = "rpm" ]]; then + if is_ubuntu; then + install_package postgresql + elif is_fedora || is_suse; then install_package postgresql-server else - install_package postgresql + exit_distro_not_supported "postgresql installation" fi } diff --git a/lib/horizon b/lib/horizon index 7321cbcc..68337ab8 100644 --- a/lib/horizon +++ b/lib/horizon @@ -81,19 +81,18 @@ function init_horizon() { sudo a2ensite horizon # WSGI doesn't enable by default, enable it sudo a2enmod wsgi + elif is_fedora; then + APACHE_NAME=httpd + APACHE_CONF=conf.d/horizon.conf + sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf + elif is_suse; then + APACHE_NAME=apache2 + APACHE_CONF=vhosts.d/horizon.conf + # Append wsgi to the list of modules to load + grep -q "^APACHE_MODULES=.*wsgi" /etc/sysconfig/apache2 || + sudo sed '/^APACHE_MODULES=/s/^\(.*\)"$/\1 wsgi"/' -i /etc/sysconfig/apache2 else - # Install httpd, which is NOPRIME'd - if is_suse; then - APACHE_NAME=apache2 - APACHE_CONF=vhosts.d/horizon.conf - # Append wsgi to the list of modules to load - grep -q "^APACHE_MODULES=.*wsgi" /etc/sysconfig/apache2 || - sudo sed '/^APACHE_MODULES=/s/^\(.*\)"$/\1 wsgi"/' -i /etc/sysconfig/apache2 - else - APACHE_NAME=httpd - APACHE_CONF=conf.d/horizon.conf - sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf - fi + exit_distro_not_supported "apache configuration" fi # Configure apache to run horizon @@ -113,11 +112,13 @@ function install_horizon() { if is_ubuntu; then # Install apache2, which is NOPRIME'd install_package apache2 libapache2-mod-wsgi + elif is_fedora; then + sudo rm -f /etc/httpd/conf.d/000-* + install_package httpd mod_wsgi elif is_suse; then install_package apache2 apache2-mod_wsgi else - sudo rm -f /etc/httpd/conf.d/000-* - install_package httpd mod_wsgi + exit_distro_not_supported "apache installation" fi # NOTE(sdague) quantal changed the name of the node binary diff --git a/lib/nova b/lib/nova index 3a4d34d8..8272ef0d 100644 --- a/lib/nova +++ b/lib/nova @@ -394,11 +394,13 @@ function install_novaclient() { function install_nova() { if is_service_enabled n-cpu; then if is_ubuntu; then - LIBVIRT_PKG_NAME=libvirt-bin + install_package libvirt-bin + elif is_fedora || is_suse; then + install_package libvirt else - LIBVIRT_PKG_NAME=libvirt + exit_distro_not_supported "libvirt installation" fi - install_package $LIBVIRT_PKG_NAME + # Install and configure **LXC** if specified. LXC is another approach to # splitting a system into many smaller parts. LXC uses cgroups and chroot # to simulate multiple systems. diff --git a/stack.sh b/stack.sh index cddb64e0..6483de3b 100755 --- a/stack.sh +++ b/stack.sh @@ -678,17 +678,21 @@ set -o xtrace echo_summary "Installing package prerequisites" if is_ubuntu; then install_package $(get_packages $FILES/apts) +elif is_fedora; then + install_package $(get_packages $FILES/rpms) elif is_suse; then install_package $(get_packages $FILES/rpms-suse) else - install_package $(get_packages $FILES/rpms) + exit_distro_not_supported "list of packages" fi if [[ $SYSLOG != "False" ]]; then - if is_suse; then + if is_ubuntu || is_fedora; then + install_package rsyslog-relp + elif is_suse; then install_package rsyslog-module-relp else - install_package rsyslog-relp + exit_distro_not_supported "rsyslog-relp installation" fi fi @@ -700,20 +704,22 @@ if is_service_enabled rabbit; then cat "$tfile" rm -f "$tfile" elif is_service_enabled qpid; then - if [[ "$os_PACKAGE" = "rpm" ]]; then + if is_fedora; then install_package qpid-cpp-server-daemon - else + elif is_ubuntu; then install_package qpidd + else + exit_distro_not_supported "qpid installation" fi elif is_service_enabled zeromq; then - if [[ "$os_PACKAGE" = "rpm" ]]; then - if is_suse; then - install_package libzmq1 python-pyzmq - else - install_package zeromq python-zmq - fi - else + if is_fedora; then + install_package zeromq python-zmq + elif is_ubuntu; then install_package libzmq1 python-zmq + elif is_suse; then + install_package libzmq1 python-pyzmq + else + exit_distro_not_supported "zeromq installation" fi fi @@ -909,8 +915,8 @@ fi if is_service_enabled rabbit; then # Start rabbitmq-server echo_summary "Starting RabbitMQ" - if [[ "$os_PACKAGE" = "rpm" ]]; then - # RPM doesn't start the service + if is_fedora || is_suse; then + # service is not started by default restart_service rabbitmq-server fi # change the rabbit password since the default is "guest" diff --git a/tests/functions.sh b/tests/functions.sh index d2cc5c44..be48729f 100755 --- a/tests/functions.sh +++ b/tests/functions.sh @@ -260,9 +260,11 @@ fi if [[ "$os_PACKAGE" = "deb" ]]; then is_package_installed dpkg VAL=$? -else +elif [[ "$os_PACKAGE" = "rpm" ]]; then is_package_installed rpm VAL=$? +else + VAL=1 fi if [[ "$VAL" -eq 0 ]]; then echo "OK" @@ -273,9 +275,11 @@ fi if [[ "$os_PACKAGE" = "deb" ]]; then is_package_installed dpkg bash VAL=$? -else +elif [[ "$os_PACKAGE" = "rpm" ]]; then is_package_installed rpm bash VAL=$? +else + VAL=1 fi if [[ "$VAL" -eq 0 ]]; then echo "OK" diff --git a/tools/info.sh b/tools/info.sh index 583a9949..f01dbea0 100755 --- a/tools/info.sh +++ b/tools/info.sh @@ -90,15 +90,19 @@ done if is_ubuntu; then PKG_DIR=$FILES/apts -else +elif is_fedora; then PKG_DIR=$FILES/rpms +else + exit_distro_not_supported "list of packages" fi for p in $(get_packages $PKG_DIR); do if [[ "$os_PACKAGE" = "deb" ]]; then ver=$(dpkg -s $p 2>/dev/null | grep '^Version: ' | cut -d' ' -f2) - else + elif [[ "$os_PACKAGE" = "rpm" ]]; then ver=$(rpm -q --queryformat "%{VERSION}-%{RELEASE}\n" $p) + else + exit_distro_not_supported "finding version of a package" fi echo "pkg|${p}|${ver}" done From 1e32d0ab191bfe8a8c89580b9f84fe38ded7af0a Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Fri, 7 Dec 2012 12:46:15 +0000 Subject: [PATCH 826/967] exercises/euca: Fix volume timeout Fixes bug 1087656 In euca exercise, the timeout for one of the volume operations was specified as ASSOCIATE_TIMEOUT, whereas the timeout error message was mentioning RUNNING_TIMEOUT. This fix changes the timeout to RUNNING_TIMEOUT so that it is consistent with the error message. As RUNNING is usually larger than ASSOCIATE, it leaves more time for the volume operation. Change-Id: Ic016c7920ae6e4ec9a476bb5612b7df9eed01c75 --- exercises/euca.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exercises/euca.sh b/exercises/euca.sh index 67da1bee..982653ef 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -90,7 +90,7 @@ if [[ "$ENABLED_SERVICES" =~ "c-vol" ]]; then die_if_not_set VOLUME "Failure to get volume" # Test volume has become available - if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then + if ! timeout $RUNNING_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then echo "volume didnt become available within $RUNNING_TIMEOUT seconds" exit 1 fi From 65c0846e379ba629fcc389486057322d5e30b34a Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Fri, 7 Dec 2012 14:20:51 +0100 Subject: [PATCH 827/967] Local variable cosmetic changes in lib/tempest Change-Id: I5e83531c32968bc734abb0f9a8d03e2f9500a074 --- lib/tempest | 121 +++++++++++++++++++++++++++------------------------- 1 file changed, 63 insertions(+), 58 deletions(-) diff --git a/lib/tempest b/lib/tempest index 606f05ec..7fa15df0 100644 --- a/lib/tempest +++ b/lib/tempest @@ -4,21 +4,21 @@ # ``functions`` file # ``lib/nova`` service is runing # -# - DEST -# - ADMIN_PASSWORD -# - OS_USERNAME -# - DEFAULT_IMAGE_NAME -# - S3_SERVICE_PORT -# - SERVICE_HOST -# - BASE_SQL_CONN ``lib/database`` declares +# - ``DEST`` +# - ``ADMIN_PASSWORD`` +# - ``DEFAULT_IMAGE_NAME`` +# - ``S3_SERVICE_PORT`` +# - ``SERVICE_HOST`` +# - ``BASE_SQL_CONN`` ``lib/database`` declares # Optional Dependencies: -# IDENTITY_* +# IDENTITY_USE_SSL, IDENTITY_HOST, IDENTITY_PORT, IDENTITY_PATH # ALT_* (similar vars exists in keystone_data.sh) -# IMAGE_* -# LIVE_MIGRATION_AVAILABLE -# DEFAULT_INSTANCE_TYPE -# DEFAULT_INSTANCE_USER -# USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION +# ``OS_USERNAME`` +# ``IMAGE_PORT``, ``IMAGE_HOST`` +# ``LIVE_MIGRATION_AVAILABLE`` +# ``USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION`` +# ``DEFAULT_INSTANCE_TYPE`` +# ``DEFAULT_INSTANCE_USER`` # ``stack.sh`` calls the entry points in this order: # # install_tempest @@ -52,12 +52,17 @@ BUILD_TIMEOUT=400 # configure_tempest() - Set config files, create data dirs, etc function configure_tempest() { - local IMAGE_LINES - local IMAGES - local NUM_IMAGES - local IMAGE_UUID - local IMAGE_UUID_ALT + local image_lines + local images + local num_images + local image_uuid + local image_uuid_alt local errexit + local password + local line + local flavors + local flavors_ref + local flavor_lines #TODO(afazekas): # sudo python setup.py deploy @@ -74,33 +79,33 @@ function configure_tempest() { # testing. Here we simply look for images stored in Glance # and set the appropriate variables for use in the tempest config # We ignore ramdisk and kernel images, look for the default image - # DEFAULT_IMAGE_NAME. If not found, we set the IMAGE_UUID to the - # first image returned and set IMAGE_UUID_ALT to the second, + # ``DEFAULT_IMAGE_NAME``. If not found, we set the ``image_uuid`` to the + # first image returned and set ``image_uuid_alt`` to the second, # if there is more than one returned... # ... Also ensure we only take active images, so we don't get snapshots in process - IMAGE_LINES=`glance image-list` + image_lines=`glance image-list` IFS=$'\n\r' - IMAGES="" - for line in $IMAGE_LINES; do + images="" + for line in $image_lines; do if [ -z $DEFAULT_IMAGE_NAME ]; then - IMAGES="$IMAGES `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | grep 'active' | cut -d' ' -f2`" + images="$images `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | grep 'active' | cut -d' ' -f2`" else - IMAGES="$IMAGES `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | grep 'active' | grep "$DEFAULT_IMAGE_NAME" | cut -d' ' -f2`" + images="$images `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | grep 'active' | grep "$DEFAULT_IMAGE_NAME" | cut -d' ' -f2`" fi done # Create array of image UUIDs... IFS=" " - IMAGES=($IMAGES) - NUM_IMAGES=${#IMAGES[*]} - echo "Found $NUM_IMAGES images" - if [[ $NUM_IMAGES -eq 0 ]]; then + images=($images) + num_images=${#images[*]} + echo "Found $num_images images" + if [[ $num_images -eq 0 ]]; then echo "Found no valid images to use!" exit 1 fi - IMAGE_UUID=${IMAGES[0]} - IMAGE_UUID_ALT=$IMAGE_UUID - if [[ $NUM_IMAGES -gt 1 ]]; then - IMAGE_UUID_ALT=${IMAGES[1]} + image_uuid=${images[0]} + image_uuid_alt=$image_uuid + if [[ $num_images -gt 1 ]]; then + image_uuid_alt=${images[1]} fi # Create tempest.conf from tempest.conf.sample @@ -114,7 +119,7 @@ function configure_tempest() { # from the Tempest configuration file entirely... IDENTITY_PATH=${IDENTITY_PATH:-tokens} - PASSWORD=${ADMIN_PASSWORD:-secrete} + password=${ADMIN_PASSWORD:-secrete} # See files/keystone_data.sh where alt_demo user # and tenant are set up... @@ -122,30 +127,30 @@ function configure_tempest() { ALT_TENANT_NAME=${ALT_TENANT_NAME:-alt_demo} # Check Nova for existing flavors and, if set, look for the - # DEFAULT_INSTANCE_TYPE and use that. Otherwise, just use the first flavor. - FLAVOR_LINES=`nova flavor-list` - IFS="$(echo -e "\n\r")" - FLAVORS="" - for line in $FLAVOR_LINES; do + # ``DEFAULT_INSTANCE_TYPE`` and use that. Otherwise, just use the first flavor. + flavor_lines=`nova flavor-list` + IFS=$'\r\n' + flavors="" + for line in $flavor_lines; do if [ -z $DEFAULT_INSTANCE_TYPE ]; then - FLAVORS="$FLAVORS `echo $line | grep -v "^\(|\s*ID\|+--\)" | cut -d' ' -f2`" + flavors="$flavors `echo $line | grep -v "^\(|\s*ID\|+--\)" | cut -d' ' -f2`" else - FLAVORS="$FLAVORS `echo $line | grep -v "^\(|\s*ID\|+--\)" | grep "$DEFAULT_INSTANCE_TYPE" | cut -d' ' -f2`" + flavors="$flavors `echo $line | grep -v "^\(|\s*ID\|+--\)" | grep "$DEFAULT_INSTANCE_TYPE" | cut -d' ' -f2`" fi done IFS=" " - FLAVORS=($FLAVORS) - NUM_FLAVORS=${#FLAVORS[*]} - echo "Found $NUM_FLAVORS flavors" - if [[ $NUM_FLAVORS -eq 0 ]]; then + flavors=($flavors) + num_flavors=${#flavors[*]} + echo "Found $num_flavors flavors" + if [[ $num_flavors -eq 0 ]]; then echo "Found no valid flavors to use!" exit 1 fi - FLAVOR_REF=${FLAVORS[0]} - FLAVOR_REF_ALT=$FLAVOR_REF - if [[ $NUM_FLAVORS -gt 1 ]]; then - FLAVOR_REF_ALT=${FLAVORS[1]} + flavor_ref=${flavors[0]} + flavor_ref_alt=$flavor_ref + if [[ $num_flavors -gt 1 ]]; then + flavor_ref_alt=${flavors[1]} fi # Timeouts @@ -162,9 +167,9 @@ function configure_tempest() { iniset $TEMPEST_CONF identity port $IDENTITY_PORT iniset $TEMPEST_CONF identity path $IDENTITY_PATH - iniset $TEMPEST_CONF compute password "$PASSWORD" + iniset $TEMPEST_CONF compute password "$password" iniset $TEMPEST_CONF compute alt_username $ALT_USERNAME - iniset $TEMPEST_CONF compute alt_password "$PASSWORD" + iniset $TEMPEST_CONF compute alt_password "$password" iniset $TEMPEST_CONF compute alt_tenant_name $ALT_TENANT_NAME iniset $TEMPEST_CONF compute resize_available False iniset $TEMPEST_CONF compute change_password_available False @@ -175,10 +180,10 @@ function configure_tempest() { iniset $TEMPEST_CONF compute network_for_ssh private iniset $TEMPEST_CONF compute ip_version_for_ssh 4 iniset $TEMPEST_CONF compute ssh_timeout 4 - iniset $TEMPEST_CONF compute image_ref $IMAGE_UUID - iniset $TEMPEST_CONF compute image_ref_alt $IMAGE_UUID_ALT - iniset $TEMPEST_CONF compute flavor_ref $FLAVOR_REF - iniset $TEMPEST_CONF compute flavor_ref_alt $FLAVOR_REF_ALT + iniset $TEMPEST_CONF compute image_ref $image_uuid + iniset $TEMPEST_CONF compute image_ref_alt $image_uuid_alt + iniset $TEMPEST_CONF compute flavor_ref $flavor_ref + iniset $TEMPEST_CONF compute flavor_ref_alt $flavor_ref_alt iniset $TEMPEST_CONF compute source_dir $NOVA_SOURCE_DIR iniset $TEMPEST_CONF compute live_migration_available ${LIVE_MIGRATION_AVAILABLE:-False} iniset $TEMPEST_CONF compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} @@ -192,13 +197,13 @@ function configure_tempest() { # image iniset $TEMPEST_CONF image host ${IMAGE_HOST:-127.0.0.1} iniset $TEMPEST_CONF image port ${IMAGE_PORT:-9292} - iniset $TEMPEST_CONF image password "$PASSWORD" + iniset $TEMPEST_CONF image password "$password" # identity-admin - iniset $TEMPEST_CONF "identity-admin" password "$PASSWORD" + iniset $TEMPEST_CONF "identity-admin" password "$password" # compute admin - iniset $TEMPEST_CONF "compute-admin" password "$PASSWORD" + iniset $TEMPEST_CONF "compute-admin" password "$password" # network iniset $TEMPEST_CONF network api_version 2.0 From a9414249af522324c68e4d8fe1656283162e5738 Mon Sep 17 00:00:00 2001 From: jiajun xu Date: Thu, 6 Dec 2012 16:30:57 +0800 Subject: [PATCH 828/967] Add a service_check function There is no function to check if the services invoked by devstack are running well or not. We could use the function to check their status and print them at the end of devstack running. Change-Id: I845f6b5dddce5cffa7165ec58517f9ae5d8632a6 --- functions | 45 ++++++++++++++++++++++++++++++++++++++++++++- stack.sh | 4 ++++ 2 files changed, 48 insertions(+), 1 deletion(-) diff --git a/functions b/functions index 0911557f..85ff4202 100644 --- a/functions +++ b/functions @@ -684,6 +684,8 @@ function restart_service() { function screen_it { NL=`echo -ne '\015'` SCREEN_NAME=${SCREEN_NAME:-stack} + SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} + if is_service_enabled $1; then # Append the service to the screen rc file screen_rc "$1" "$2" @@ -699,7 +701,7 @@ function screen_it { screen -S $SCREEN_NAME -p $1 -X log on ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log fi - screen -S $SCREEN_NAME -p $1 -X stuff "$2$NL" + screen -S $SCREEN_NAME -p $1 -X stuff "$2 || touch \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL" fi } @@ -724,6 +726,47 @@ function screen_rc { fi } +# Helper to remove the *.failure files under $SERVICE_DIR/$SCREEN_NAME +# This is used for service_check when all the screen_it are called finished +# init_service_check +function init_service_check() { + SCREEN_NAME=${SCREEN_NAME:-stack} + SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} + + if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then + mkdir -p "$SERVICE_DIR/$SCREEN_NAME" + fi + + rm -f "$SERVICE_DIR/$SCREEN_NAME"/*.failure +} + +# Helper to get the status of each running service +# service_check +function service_check() { + local service + local failures + SCREEN_NAME=${SCREEN_NAME:-stack} + SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} + + + if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then + echo "No service status directory found" + return + fi + + # Check if there is any falure flag file under $SERVICE_DIR/$SCREEN_NAME + failures=`ls "$SERVICE_DIR/$SCREEN_NAME"/*.failure 2>/dev/null` + + for service in $failures; do + service=`basename $service` + service=${service::-8} + echo "Error: Service $service is not running" + done + + if [ -n "$failures" ]; then + echo "More details about the above errors can be found with screen, with ./rejoin-stack.sh" + fi +} # ``pip install`` the dependencies of the package before ``setup.py develop`` # so pip and not distutils processes the dependency chain diff --git a/stack.sh b/stack.sh index 94283563..40708afd 100755 --- a/stack.sh +++ b/stack.sh @@ -954,6 +954,8 @@ sleep 1 # Set a reasonable status bar screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS" +# Initialize the directory for service status check +init_service_check # Keystone # -------- @@ -1726,6 +1728,8 @@ if [[ -x $TOP_DIR/local.sh ]]; then $TOP_DIR/local.sh fi +# Check the status of running services +service_check # Fin # === From eb1aa3d5ed4388119fac56038b4655648bca7e76 Mon Sep 17 00:00:00 2001 From: Nachi Ueno Date: Thu, 6 Dec 2012 11:55:29 -0800 Subject: [PATCH 829/967] setup quantum-rootrwapper Add quantum-rootwrapper for /etc/sudoers.d This is needed to run quantum in CI env Change-Id: Ib59351c106f0a45bb45476edf032c97744873923 --- lib/quantum | 36 +++++++++++++++++++++++++++++++++++- stack.sh | 7 +------ 2 files changed, 36 insertions(+), 7 deletions(-) diff --git a/lib/quantum b/lib/quantum index 14a3a4ad..cb683398 100644 --- a/lib/quantum +++ b/lib/quantum @@ -15,10 +15,44 @@ if is_service_enabled quantum; then if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then Q_RR_COMMAND="sudo" else - Q_RR_COMMAND="sudo $QUANTUM_DIR/bin/quantum-rootwrap $Q_RR_CONF_FILE" + QUANTUM_ROOTWRAP=$(get_rootwrap_location quantum) + Q_RR_COMMAND="sudo $QUANTUM_ROOTWRAP $Q_RR_CONF_FILE" fi fi +# configure_quantum_rootwrap() - configure Quantum's rootwrap +function configure_quantum_rootwrap() { + if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + return + fi + # Deploy new rootwrap filters files (owned by root). + # Wipe any existing rootwrap.d files first + Q_CONF_ROOTWRAP_D=/etc/quantum/rootwrap.d + if [[ -d $Q_CONF_ROOTWRAP_D ]]; then + sudo rm -rf $Q_CONF_ROOTWRAP_D + fi + # Deploy filters to /etc/quantum/rootwrap.d + mkdir -p -m 755 $Q_CONF_ROOTWRAP_D + cp -pr $QUANTUM_DIR/etc/quantum/rootwrap.d/* $Q_CONF_ROOTWRAP_D/ + sudo chown -R root:root $Q_CONF_ROOTWRAP_D + sudo chmod 644 $Q_CONF_ROOTWRAP_D/* + # Set up rootwrap.conf, pointing to /etc/quantum/rootwrap.d + sudo cp -p $QUANTUM_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE + sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE + sudo chown root:root $Q_RR_CONF_FILE + sudo chmod 0644 $Q_RR_CONF_FILE + # Specify rootwrap.conf as first parameter to quantum-rootwrap + ROOTWRAP_SUDOER_CMD="$QUANTUM_ROOTWRAP $Q_RR_CONF_FILE *" + + # Set up the rootwrap sudoers for quantum + TEMPFILE=`mktemp` + echo "$USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE + chmod 0440 $TEMPFILE + sudo chown root:root $TEMPFILE + sudo mv $TEMPFILE /etc/sudoers.d/quantum-rootwrap +} + + # Configures keystone integration for quantum service and agents function quantum_setup_keystone() { local conf_file=$1 diff --git a/stack.sh b/stack.sh index cddb64e0..33da2cff 100755 --- a/stack.sh +++ b/stack.sh @@ -1149,12 +1149,7 @@ if is_service_enabled quantum; then unset dburl cp $QUANTUM_DIR/etc/quantum.conf $Q_CONF_FILE - cp -p $QUANTUM_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE - - # Copy over the config and filter bits - Q_CONF_ROOTWRAP_D=/etc/quantum/rootwrap.d - mkdir -p $Q_CONF_ROOTWRAP_D - cp -pr $QUANTUM_DIR/etc/quantum/rootwrap.d/* $Q_CONF_ROOTWRAP_D/ + configure_quantum_rootwrap fi # Quantum service (for controller node) From 3c6a57a3c4a668b5f1522bd42ca79cdb05360fc2 Mon Sep 17 00:00:00 2001 From: Nachi Ueno Date: Sat, 8 Dec 2012 22:07:11 -0800 Subject: [PATCH 830/967] Setup rootwrapper for quantum-debug command Setup rootwrapper for quantum-debug command This change is needed to quantum-gating Change-Id: I032f26c0c020374ac978e00bdf72856da795096d --- stack.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/stack.sh b/stack.sh index 7b87cd67..772e1142 100755 --- a/stack.sh +++ b/stack.sh @@ -1387,6 +1387,7 @@ if is_service_enabled quantum; then iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT verbose False iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT debug False iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT root_helper "$Q_RR_COMMAND" quantum_setup_keystone $QUANTUM_TEST_CONFIG_FILE DEFAULT set_auth_url if [[ "$Q_PLUGIN" == "openvswitch" ]]; then iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver From 60e9c0ab22309d1b0b857761be16d4d58a1b251e Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 6 Dec 2012 15:52:52 -0600 Subject: [PATCH 831/967] Quantum cleanups * Set base default env vars in lib/quantum * Rename common dirs to match devstack conventions Q_CONF_FILE -> QUANTUM_CONF QUANTUM_CLIENT_* -> QUANTUMCLIENT_* Change-Id: I7a2a92b50ef953195f078ac62cb975f28892c05c --- lib/quantum | 69 +++++++++++++++++++++++++++++++++++----- stack.sh | 90 +++++++++++++++-------------------------------------- stackrc | 4 +-- 3 files changed, 89 insertions(+), 74 deletions(-) diff --git a/lib/quantum b/lib/quantum index cb683398..4e9f2987 100644 --- a/lib/quantum +++ b/lib/quantum @@ -1,17 +1,69 @@ # lib/quantum # functions - funstions specific to quantum +# Dependencies: +# ``functions`` file +# ``DEST`` must be defined + + +# Quantum Networking +# ------------------ + +# Make sure that quantum is enabled in ``ENABLED_SERVICES``. If you want +# to run Quantum on this host, make sure that q-svc is also in +# ``ENABLED_SERVICES``. +# +# If you're planning to use the Quantum openvswitch plugin, set +# ``Q_PLUGIN`` to "openvswitch" and make sure the q-agt service is enabled +# in ``ENABLED_SERVICES``. If you're planning to use the Quantum +# linuxbridge plugin, set ``Q_PLUGIN`` to "linuxbridge" and make sure the +# q-agt service is enabled in ``ENABLED_SERVICES``. +# +# See "Quantum Network Configuration" below for additional variables +# that must be set in localrc for connectivity across hosts with +# Quantum. +# +# With Quantum networking the NET_MAN variable is ignored. + + # Save trace setting XTRACE=$(set +o | grep xtrace) set +o xtrace + +# Defaults +# -------- + +# Set up default directories QUANTUM_DIR=$DEST/quantum -export QUANTUM_TEST_CONFIG_FILE=${QUANTUM_TEST_CONFIG_FILE:-"/etc/quantum/debug.ini"} +QUANTUMCLIENT_DIR=$DEST/python-quantumclient QUANTUM_AUTH_CACHE_DIR=${QUANTUM_AUTH_CACHE_DIR:-/var/cache/quantum} +QUANTUM_CONF_DIR=/etc/quantum +QUANTUM_CONF=$QUANTUM_CONF_DIR/quantum.conf +export QUANTUM_TEST_CONFIG_FILE=${QUANTUM_TEST_CONFIG_FILE:-"$QUANTUM_CONF_DIR/debug.ini"} + +# Default Quantum Plugin +Q_PLUGIN=${Q_PLUGIN:-openvswitch} +# Default Quantum Port +Q_PORT=${Q_PORT:-9696} +# Default Quantum Host +Q_HOST=${Q_HOST:-$HOST_IP} +# Which Quantum API nova should use +# Default admin username +Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-quantum} +# Default auth strategy +Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone} +# Use namespace or not +Q_USE_NAMESPACE=${Q_USE_NAMESPACE:-True} +Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True} +# Meta data IP +Q_META_DATA_IP=${Q_META_DATA_IP:-$HOST_IP} +# Use quantum-debug command +Q_USE_DEBUG_COMMAND=${Q_USE_DEBUG_COMMAND:-False} + if is_service_enabled quantum; then - Q_CONF_FILE=/etc/quantum/quantum.conf - Q_RR_CONF_FILE=/etc/quantum/rootwrap.conf + Q_RR_CONF_FILE=$QUANTUM_CONF_DIR/rootwrap.conf if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then Q_RR_COMMAND="sudo" else @@ -20,6 +72,10 @@ if is_service_enabled quantum; then fi fi + +# Entry Points +# ------------ + # configure_quantum_rootwrap() - configure Quantum's rootwrap function configure_quantum_rootwrap() { if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then @@ -27,16 +83,16 @@ function configure_quantum_rootwrap() { fi # Deploy new rootwrap filters files (owned by root). # Wipe any existing rootwrap.d files first - Q_CONF_ROOTWRAP_D=/etc/quantum/rootwrap.d + Q_CONF_ROOTWRAP_D=$QUANTUM_CONF_DIR/rootwrap.d if [[ -d $Q_CONF_ROOTWRAP_D ]]; then sudo rm -rf $Q_CONF_ROOTWRAP_D fi - # Deploy filters to /etc/quantum/rootwrap.d + # Deploy filters to $QUANTUM_CONF_DIR/rootwrap.d mkdir -p -m 755 $Q_CONF_ROOTWRAP_D cp -pr $QUANTUM_DIR/etc/quantum/rootwrap.d/* $Q_CONF_ROOTWRAP_D/ sudo chown -R root:root $Q_CONF_ROOTWRAP_D sudo chmod 644 $Q_CONF_ROOTWRAP_D/* - # Set up rootwrap.conf, pointing to /etc/quantum/rootwrap.d + # Set up rootwrap.conf, pointing to $QUANTUM_CONF_DIR/rootwrap.d sudo cp -p $QUANTUM_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE sudo chown root:root $Q_RR_CONF_FILE @@ -52,7 +108,6 @@ function configure_quantum_rootwrap() { sudo mv $TEMPFILE /etc/sudoers.d/quantum-rootwrap } - # Configures keystone integration for quantum service and agents function quantum_setup_keystone() { local conf_file=$1 diff --git a/stack.sh b/stack.sh index 48071828..d58f5f5c 100755 --- a/stack.sh +++ b/stack.sh @@ -321,26 +321,6 @@ HORIZON_DIR=$DEST/horizon OPENSTACKCLIENT_DIR=$DEST/python-openstackclient NOVNC_DIR=$DEST/noVNC SWIFT3_DIR=$DEST/swift3 -QUANTUM_CLIENT_DIR=$DEST/python-quantumclient - -# Default Quantum Plugin -Q_PLUGIN=${Q_PLUGIN:-openvswitch} -# Default Quantum Port -Q_PORT=${Q_PORT:-9696} -# Default Quantum Host -Q_HOST=${Q_HOST:-$HOST_IP} -# Which Quantum API nova should use -# Default admin username -Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-quantum} -# Default auth strategy -Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone} -# Use namespace or not -Q_USE_NAMESPACE=${Q_USE_NAMESPACE:-True} -Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True} -# Meta data IP -Q_META_DATA_IP=${Q_META_DATA_IP:-$HOST_IP} -# Use quantum-debug command -Q_USE_DEBUG_COMMAND=${Q_USE_DEBUG_COMMAND:-False} RYU_DIR=$DEST/ryu # Ryu API Host @@ -458,26 +438,6 @@ FLAT_INTERFACE=${FLAT_INTERFACE-$GUEST_INTERFACE_DEFAULT} ## FIXME(ja): should/can we check that FLAT_INTERFACE is sane? -# Quantum Networking -# ------------------ - -# Make sure that quantum is enabled in ENABLED_SERVICES. If you want -# to run Quantum on this host, make sure that q-svc is also in -# ENABLED_SERVICES. -# -# If you're planning to use the Quantum openvswitch plugin, set -# Q_PLUGIN to "openvswitch" and make sure the q-agt service is enabled -# in ENABLED_SERVICES. If you're planning to use the Quantum -# linuxbridge plugin, set Q_PLUGIN to "linuxbridge" and make sure the -# q-agt service is enabled in ENABLED_SERVICES. -# -# See "Quantum Network Configuration" below for additional variables -# that must be set in localrc for connectivity across hosts with -# Quantum. -# -# With Quantum networking the NET_MAN variable is ignored. - - # Database Configuration # ---------------------- @@ -805,7 +765,7 @@ if is_service_enabled horizon; then install_horizon fi if is_service_enabled quantum; then - git_clone $QUANTUM_CLIENT_REPO $QUANTUM_CLIENT_DIR $QUANTUM_CLIENT_BRANCH + git_clone $QUANTUMCLIENT_REPO $QUANTUMCLIENT_DIR $QUANTUMCLIENT_BRANCH fi if is_service_enabled quantum; then # quantum @@ -864,7 +824,7 @@ if is_service_enabled horizon; then configure_horizon fi if is_service_enabled quantum; then - setup_develop $QUANTUM_CLIENT_DIR + setup_develop $QUANTUMCLIENT_DIR setup_develop $QUANTUM_DIR fi if is_service_enabled heat; then @@ -1119,11 +1079,11 @@ if is_service_enabled quantum; then # Example: ``OVS_ENABLE_TUNNELING=True`` OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS} - # Put config files in ``/etc/quantum`` for everyone to find - if [[ ! -d /etc/quantum ]]; then - sudo mkdir -p /etc/quantum + # Put config files in ``QUANTUM_CONF_DIR`` for everyone to find + if [[ ! -d $QUANTUM_CONF_DIR ]]; then + sudo mkdir -p $QUANTUM_CONF_DIR fi - sudo chown `whoami` /etc/quantum + sudo chown `whoami` $QUANTUM_CONF_DIR if [[ "$Q_PLUGIN" = "openvswitch" ]]; then Q_PLUGIN_CONF_PATH=etc/quantum/plugins/openvswitch @@ -1147,7 +1107,7 @@ if is_service_enabled quantum; then exit 1 fi - # If needed, move config file from ``$QUANTUM_DIR/etc/quantum`` to ``/etc/quantum`` + # If needed, move config file from ``$QUANTUM_DIR/etc/quantum`` to ``QUANTUM_CONF_DIR`` mkdir -p /$Q_PLUGIN_CONF_PATH Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME cp $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE @@ -1156,14 +1116,14 @@ if is_service_enabled quantum; then iniset /$Q_PLUGIN_CONF_FILE DATABASE sql_connection $dburl unset dburl - cp $QUANTUM_DIR/etc/quantum.conf $Q_CONF_FILE + cp $QUANTUM_DIR/etc/quantum.conf $QUANTUM_CONF configure_quantum_rootwrap fi # Quantum service (for controller node) if is_service_enabled q-svc; then - Q_API_PASTE_FILE=/etc/quantum/api-paste.ini - Q_POLICY_FILE=/etc/quantum/policy.json + Q_API_PASTE_FILE=$QUANTUM_CONF_DIR/api-paste.ini + Q_POLICY_FILE=$QUANTUM_CONF_DIR/policy.json cp $QUANTUM_DIR/etc/api-paste.ini $Q_API_PASTE_FILE cp $QUANTUM_DIR/etc/policy.json $Q_POLICY_FILE @@ -1176,9 +1136,9 @@ if is_service_enabled q-svc; then fi # Update either configuration file with plugin - iniset $Q_CONF_FILE DEFAULT core_plugin $Q_PLUGIN_CLASS + iniset $QUANTUM_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS - iniset $Q_CONF_FILE DEFAULT auth_strategy $Q_AUTH_STRATEGY + iniset $QUANTUM_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY quantum_setup_keystone $Q_API_PASTE_FILE filter:authtoken # Configure plugin @@ -1295,7 +1255,7 @@ fi if is_service_enabled q-dhcp; then AGENT_DHCP_BINARY="$QUANTUM_DIR/bin/quantum-dhcp-agent" - Q_DHCP_CONF_FILE=/etc/quantum/dhcp_agent.ini + Q_DHCP_CONF_FILE=$QUANTUM_CONF_DIR/dhcp_agent.ini cp $QUANTUM_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE @@ -1325,7 +1285,7 @@ fi if is_service_enabled q-l3; then AGENT_L3_BINARY="$QUANTUM_DIR/bin/quantum-l3-agent" PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} - Q_L3_CONF_FILE=/etc/quantum/l3_agent.ini + Q_L3_CONF_FILE=$QUANTUM_CONF_DIR/l3_agent.ini cp $QUANTUM_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE @@ -1361,7 +1321,7 @@ fi #Quantum Metadata if is_service_enabled q-meta; then AGENT_META_BINARY="$QUANTUM_DIR/bin/quantum-metadata-agent" - Q_META_CONF_FILE=/etc/quantum/metadata_agent.ini + Q_META_CONF_FILE=$QUANTUM_CONF_DIR/metadata_agent.ini cp $QUANTUM_DIR/etc/metadata_agent.ini $Q_META_CONF_FILE @@ -1381,14 +1341,14 @@ fi # Quantum RPC support - must be updated prior to starting any of the services if is_service_enabled quantum; then - iniset $Q_CONF_FILE DEFAULT control_exchange quantum + iniset $QUANTUM_CONF DEFAULT control_exchange quantum if is_service_enabled qpid ; then - iniset $Q_CONF_FILE DEFAULT rpc_backend quantum.openstack.common.rpc.impl_qpid + iniset $QUANTUM_CONF DEFAULT rpc_backend quantum.openstack.common.rpc.impl_qpid elif is_service_enabled zeromq; then - iniset $Q_CONF_FILE DEFAULT rpc_backend quantum.openstack.common.rpc.impl_zmq + iniset $QUANTUM_CONF DEFAULT rpc_backend quantum.openstack.common.rpc.impl_zmq elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then - iniset $Q_CONF_FILE DEFAULT rabbit_host $RABBIT_HOST - iniset $Q_CONF_FILE DEFAULT rabbit_password $RABBIT_PASSWORD + iniset $QUANTUM_CONF DEFAULT rabbit_host $RABBIT_HOST + iniset $QUANTUM_CONF DEFAULT rabbit_password $RABBIT_PASSWORD fi if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then cp $QUANTUM_DIR/etc/l3_agent.ini $QUANTUM_TEST_CONFIG_FILE @@ -1598,7 +1558,7 @@ fi if is_service_enabled q-svc; then echo_summary "Starting Quantum" # Start the Quantum service - screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE" + screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE" echo "Waiting for Quantum to start..." if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:9696; do sleep 1; done"; then echo "Quantum did not start" @@ -1650,10 +1610,10 @@ elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then fi # Start up the quantum agents if enabled -screen_it q-agt "python $AGENT_BINARY --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE" -screen_it q-dhcp "python $AGENT_DHCP_BINARY --config-file $Q_CONF_FILE --config-file=$Q_DHCP_CONF_FILE" -screen_it q-meta "python $AGENT_META_BINARY --config-file $Q_CONF_FILE --config-file=$Q_META_CONF_FILE" -screen_it q-l3 "python $AGENT_L3_BINARY --config-file $Q_CONF_FILE --config-file=$Q_L3_CONF_FILE" +screen_it q-agt "python $AGENT_BINARY --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE" +screen_it q-dhcp "python $AGENT_DHCP_BINARY --config-file $QUANTUM_CONF --config-file=$Q_DHCP_CONF_FILE" +screen_it q-meta "python $AGENT_META_BINARY --config-file $QUANTUM_CONF --config-file=$Q_META_CONF_FILE" +screen_it q-l3 "python $AGENT_L3_BINARY --config-file $QUANTUM_CONF --config-file=$Q_L3_CONF_FILE" if is_service_enabled nova; then echo_summary "Starting Nova" diff --git a/stackrc b/stackrc index 39d34b0b..8ac6ec59 100644 --- a/stackrc +++ b/stackrc @@ -89,8 +89,8 @@ QUANTUM_REPO=${GIT_BASE}/openstack/quantum QUANTUM_BRANCH=master # quantum client -QUANTUM_CLIENT_REPO=${GIT_BASE}/openstack/python-quantumclient -QUANTUM_CLIENT_BRANCH=master +QUANTUMCLIENT_REPO=${GIT_BASE}/openstack/python-quantumclient +QUANTUMCLIENT_BRANCH=master # Tempest test suite TEMPEST_REPO=${GIT_BASE}/openstack/tempest.git From c50a86e917a4bbc9f9f6affeaae94ff4e80b556a Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Mon, 10 Dec 2012 15:10:23 -0500 Subject: [PATCH 832/967] Don't set the dns managers Revert to previous behavior, pick up whatever is set as default in nova. This was causing an issue in a tempest run. Tempest has a test where multiple servers can be spun up with the same name this test failed. Change-Id: Ie71eda94caf38db0489d6b2385dc80808a39864d --- stack.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/stack.sh b/stack.sh index 700b520c..05f5f35d 100755 --- a/stack.sh +++ b/stack.sh @@ -1482,8 +1482,6 @@ if is_service_enabled nova; then fi elif is_service_enabled n-net; then add_nova_opt "network_manager=nova.network.manager.$NET_MAN" - add_nova_opt "instance_dns_manager=nova.network.minidns.MiniDNS" - add_nova_opt "floating_ip_dns_manager=nova.network.minidns.MiniDNS" add_nova_opt "public_interface=$PUBLIC_INTERFACE" add_nova_opt "vlan_interface=$VLAN_INTERFACE" add_nova_opt "flat_network_bridge=$FLAT_NETWORK_BRIDGE" From 77f076a56d511378eb1ba3ab1267f54a291996e9 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 10 Dec 2012 16:49:20 -0500 Subject: [PATCH 833/967] enable tempest by default turn on tempest by default, as that will provide the end users with a testing environment to use with openstack out of the box. Change-Id: I74160a25cfbc6325eea30c81df36e6acbb938bfd --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 39d34b0b..1d2cf8d9 100644 --- a/stackrc +++ b/stackrc @@ -14,7 +14,7 @@ DATABASE_TYPE=mysql # ``disable_service`` functions in ``localrc``. # For example, to enable Swift add this to ``localrc``: # enable_service swift -ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,$DATABASE_TYPE +ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,tempest,$DATABASE_TYPE # Set the default Nova APIs to enable NOVA_ENABLED_APIS=ec2,osapi_compute,metadata From 9ec34214fce505892937b1cb91c5ece60cdd7882 Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Tue, 11 Dec 2012 14:18:02 +1300 Subject: [PATCH 834/967] Fix Heat waitcondition URL configuration Change-Id: I32fb7f5ef91aebdf574a98845988b3a2a91d5550 --- lib/heat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/heat b/lib/heat index b640fbca..feaadec2 100644 --- a/lib/heat +++ b/lib/heat @@ -124,7 +124,7 @@ function configure_heat() { iniset $HEAT_ENGINE_CONF DEFAULT bind_host $HEAT_ENGINE_HOST iniset $HEAT_ENGINE_CONF DEFAULT bind_port $HEAT_ENGINE_PORT iniset $HEAT_ENGINE_CONF DEFAULT heat_metadata_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT - iniset $HEAT_ENGINE_CONF DEFAULT heat_waitcondition_server_url http://$HEAT_CFN_HOST:$HEAT_CFN_PORT/v1/waitcondition + iniset $HEAT_ENGINE_CONF DEFAULT heat_waitcondition_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1/waitcondition iniset $HEAT_ENGINE_CONF DEFAULT heat_watch_server_url http://$HEAT_API_CW_HOST:$HEAT_API_CW_PORT local dburl database_connection_url dburl heat From 053a5f8425395efb7b2b7111120fa92c6134fc0b Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Tue, 11 Dec 2012 17:08:48 +1300 Subject: [PATCH 835/967] Add the role heat_stack_user for heat Change-Id: I0c3ac92d222ff746baca817002821f109815fee9 --- files/keystone_data.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index c8e68dd6..a4f08e42 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -93,6 +93,8 @@ if [[ "$ENABLED_SERVICES" =~ "heat" ]]; then keystone user-role-add --tenant_id $SERVICE_TENANT \ --user_id $HEAT_USER \ --role_id $ADMIN_ROLE + # heat_stack_user role is for users created by Heat + keystone role-create --name heat_stack_user if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then HEAT_CFN_SERVICE=$(get_id keystone service-create \ --name=heat-cfn \ From 5e3deb678e95737e05f43832d07a37d74c4e8aca Mon Sep 17 00:00:00 2001 From: Akihiro MOTOKI Date: Tue, 11 Dec 2012 17:09:02 +0900 Subject: [PATCH 836/967] Always create signing_dir regardless of token format Fixes bug 1088801 devstack does not create signing_dir when keystone token format is UUID. If the default value of signing_dir is read-only, OpenStack services such as Quantum server failed to start due to permission denied. On the keystone client cannot know which token_format is used in keystone in advance, so signing_dir should be created regardless of the token format. Change-Id: I1b0d25c1ac4d22d9fb2c5443d15b96fdaa5a4c81 --- lib/cinder | 13 ++++--------- lib/glance | 20 +++++++------------- lib/nova | 12 ++++-------- lib/quantum | 10 ++++------ 4 files changed, 19 insertions(+), 36 deletions(-) diff --git a/lib/cinder b/lib/cinder index 9b9d50d1..0dc86cad 100644 --- a/lib/cinder +++ b/lib/cinder @@ -105,10 +105,7 @@ function configure_cinder() { iniset $CINDER_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $CINDER_API_PASTE_INI filter:authtoken admin_user cinder iniset $CINDER_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD - - if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then - iniset $CINDER_API_PASTE_INI filter:authtoken signing_dir $CINDER_AUTH_CACHE_DIR - fi + iniset $CINDER_API_PASTE_INI filter:authtoken signing_dir $CINDER_AUTH_CACHE_DIR cp $CINDER_DIR/etc/cinder/cinder.conf.sample $CINDER_CONF iniset $CINDER_CONF DEFAULT auth_strategy keystone @@ -212,11 +209,9 @@ function init_cinder() { fi fi - if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then - # Create cache dir - sudo mkdir -p $CINDER_AUTH_CACHE_DIR - sudo chown `whoami` $CINDER_AUTH_CACHE_DIR - fi + # Create cache dir + sudo mkdir -p $CINDER_AUTH_CACHE_DIR + sudo chown `whoami` $CINDER_AUTH_CACHE_DIR } # install_cinder() - Collect source and prepare diff --git a/lib/glance b/lib/glance index b02a4b63..4f631b2c 100644 --- a/lib/glance +++ b/lib/glance @@ -95,9 +95,7 @@ function configure_glance() { iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_user glance iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_password $SERVICE_PASSWORD - if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then - iniset $GLANCE_REGISTRY_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/registry - fi + iniset $GLANCE_REGISTRY_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/registry cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF iniset $GLANCE_API_CONF DEFAULT debug True @@ -121,9 +119,7 @@ function configure_glance() { iniset $GLANCE_API_CONF DEFAULT rabbit_host $RABBIT_HOST iniset $GLANCE_API_CONF DEFAULT rabbit_password $RABBIT_PASSWORD fi - if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then - iniset $GLANCE_API_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/api - fi + iniset $GLANCE_API_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/api cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI @@ -163,13 +159,11 @@ function init_glance() { $GLANCE_BIN_DIR/glance-manage db_sync - if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then - # Create cache dir - sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/api - sudo chown `whoami` $GLANCE_AUTH_CACHE_DIR/api - sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/registry - sudo chown `whoami` $GLANCE_AUTH_CACHE_DIR/registry - fi + # Create cache dir + sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/api + sudo chown `whoami` $GLANCE_AUTH_CACHE_DIR/api + sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/registry + sudo chown `whoami` $GLANCE_AUTH_CACHE_DIR/registry } # install_glanceclient() - Collect source and prepare diff --git a/lib/nova b/lib/nova index 3a4d34d8..f059576d 100644 --- a/lib/nova +++ b/lib/nova @@ -172,9 +172,7 @@ function configure_nova() { " -i $NOVA_API_PASTE_INI fi - if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then - iniset $NOVA_API_PASTE_INI filter:authtoken signing_dir $NOVA_AUTH_CACHE_DIR - fi + iniset $NOVA_API_PASTE_INI filter:authtoken signing_dir $NOVA_AUTH_CACHE_DIR if is_service_enabled n-cpu; then # Force IP forwarding on, just on case @@ -378,11 +376,9 @@ function init_nova() { $NOVA_BIN_DIR/nova-manage db sync fi - if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then - # Create cache dir - sudo mkdir -p $NOVA_AUTH_CACHE_DIR - sudo chown `whoami` $NOVA_AUTH_CACHE_DIR - fi + # Create cache dir + sudo mkdir -p $NOVA_AUTH_CACHE_DIR + sudo chown `whoami` $NOVA_AUTH_CACHE_DIR } # install_novaclient() - Collect source and prepare diff --git a/lib/quantum b/lib/quantum index cb683398..f7fe90a0 100644 --- a/lib/quantum +++ b/lib/quantum @@ -68,12 +68,10 @@ function quantum_setup_keystone() { iniset $conf_file $section admin_tenant_name $SERVICE_TENANT_NAME iniset $conf_file $section admin_user $Q_ADMIN_USERNAME iniset $conf_file $section admin_password $SERVICE_PASSWORD - if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then - iniset $conf_file $section signing_dir $QUANTUM_AUTH_CACHE_DIR - # Create cache dir - sudo mkdir -p $QUANTUM_AUTH_CACHE_DIR - sudo chown `whoami` $QUANTUM_AUTH_CACHE_DIR - fi + iniset $conf_file $section signing_dir $QUANTUM_AUTH_CACHE_DIR + # Create cache dir + sudo mkdir -p $QUANTUM_AUTH_CACHE_DIR + sudo chown `whoami` $QUANTUM_AUTH_CACHE_DIR } function quantum_setup_ovs_bridge() { From c83a7e125fc1fea0370fffed37435097346befa6 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 29 Nov 2012 11:47:58 -0600 Subject: [PATCH 837/967] Add TLS support for keystone via proxy * Adds lib/tls to create test CA/certs * Start proxy if 'tls-proxy' is enabled * Configure keystone service catalog for TLS * Tear down proxy in unstack.sh * Set auth protocol and ca-cert chain in openrc * Add DATA_DIR to stackrc This is the first in a series of patches to enable TLS support for the service API endpoints. Change-Id: Ia1c91dc8f1aaf94fbec9dc71da322559a83d14b6 --- files/apts/tls-proxy | 1 + lib/keystone | 37 ++++- lib/tls | 314 +++++++++++++++++++++++++++++++++++++++++++ openrc | 9 +- stack.sh | 17 ++- stackrc | 3 + unstack.sh | 5 + 7 files changed, 376 insertions(+), 10 deletions(-) create mode 100644 files/apts/tls-proxy create mode 100644 lib/tls diff --git a/files/apts/tls-proxy b/files/apts/tls-proxy new file mode 100644 index 00000000..dce9c07d --- /dev/null +++ b/files/apts/tls-proxy @@ -0,0 +1 @@ +stud diff --git a/lib/keystone b/lib/keystone index f6a6d667..2d21c2c3 100644 --- a/lib/keystone +++ b/lib/keystone @@ -4,7 +4,7 @@ # Dependencies: # ``functions`` file # ``BASE_SQL_CONN`` -# ``SERVICE_HOST`` +# ``SERVICE_HOST``, ``SERVICE_PROTOCOL`` # ``SERVICE_TOKEN`` # ``S3_SERVICE_PORT`` (template backend only) @@ -48,10 +48,14 @@ KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-PKI} # Set Keystone interface configuration KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST} KEYSTONE_AUTH_PORT=${KEYSTONE_AUTH_PORT:-35357} -KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-http} +KEYSTONE_AUTH_PORT_INT=${KEYSTONE_AUTH_PORT_INT:-35358} +KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-$SERVICE_PROTOCOL} + +# Public facing bits KEYSTONE_SERVICE_HOST=${KEYSTONE_SERVICE_HOST:-$SERVICE_HOST} KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000} -KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-http} +KEYSTONE_SERVICE_PORT_INT=${KEYSTONE_SERVICE_PORT_INT:-5001} +KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} # Entry Points @@ -88,6 +92,13 @@ function configure_keystone() { # Rewrite stock ``keystone.conf`` local dburl database_connection_url dburl keystone + + if is_service_enabled tls-proxy; then + # Set the service ports for a proxy to take the originals + iniset $KEYSTONE_CONF DEFAULT public_port $KEYSTONE_SERVICE_PORT_INT + iniset $KEYSTONE_CONF DEFAULT admin_port $KEYSTONE_AUTH_PORT_INT + fi + iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN" iniset $KEYSTONE_CONF signing token_format "$KEYSTONE_TOKEN_FORMAT" iniset $KEYSTONE_CONF sql connection $dburl @@ -213,9 +224,9 @@ create_keystone_accounts() { keystone endpoint-create \ --region RegionOne \ --service_id $KEYSTONE_SERVICE \ - --publicurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:\$(public_port)s/v2.0" \ - --adminurl "$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:\$(admin_port)s/v2.0" \ - --internalurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:\$(public_port)s/v2.0" + --publicurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0" \ + --adminurl "$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0" \ + --internalurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0" fi # TODO(dtroyer): This is part of a series of changes...remove these when @@ -268,13 +279,25 @@ function install_keystone() { # start_keystone() - Start running processes, including screen function start_keystone() { + # Get right service port for testing + local service_port=$KEYSTONE_SERVICE_PORT + if is_service_enabled tls-proxy; then + service_port=$KEYSTONE_SERVICE_PORT_INT + fi + # Start Keystone in a screen window screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug" echo "Waiting for keystone to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ >/dev/null; do sleep 1; done"; then + if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s http://$SERVICE_HOST:$service_port/v2.0/ >/dev/null; do sleep 1; done"; then echo "keystone did not start" exit 1 fi + + # Start proxies if enabled + if is_service_enabled tls-proxy; then + start_tls_proxy '*' $KEYSTONE_SERVICE_PORT $KEYSTONE_SERVICE_HOST $KEYSTONE_SERVICE_PORT_INT & + start_tls_proxy '*' $KEYSTONE_AUTH_PORT $KEYSTONE_AUTH_HOST $KEYSTONE_AUTH_PORT_INT & + fi } # stop_keystone() - Stop running processes diff --git a/lib/tls b/lib/tls new file mode 100644 index 00000000..1e2a8993 --- /dev/null +++ b/lib/tls @@ -0,0 +1,314 @@ +# lib/tls +# Functions to control the configuration and operation of the TLS proxy service + +# Dependencies: +# !! source _before_ any services that use ``SERVICE_HOST`` +# ``functions`` file +# ``DEST``, ``DATA_DIR`` must be defined +# ``HOST_IP``, ``SERVICE_HOST`` +# ``KEYSTONE_TOKEN_FORMAT`` must be defined + +# Entry points: +# configure_CA +# init_CA + +# configure_proxy +# start_tls_proxy + +# make_root_ca +# make_int_ca +# new_cert $INT_CA_DIR int-server "abc" +# start_tls_proxy HOST_IP 5000 localhost 5000 + + +if is_service_enabled tls-proxy; then + # TODO(dtroyer): revisit this below after the search for HOST_IP has been done + TLS_IP=${TLS_IP:-$SERVICE_IP} + + # Set the default ``SERVICE_PROTOCOL`` for TLS + SERVICE_PROTOCOL=https +fi + +# Make up a hostname for cert purposes +# will be added to /etc/hosts? +DEVSTACK_HOSTNAME=secure.devstack.org +DEVSTACK_CERT_NAME=devstack-cert +DEVSTACK_CERT=$DATA_DIR/$DEVSTACK_CERT_NAME.pem + +# CA configuration +ROOT_CA_DIR=${ROOT_CA_DIR:-$DATA_DIR/CA/root-ca} +INT_CA_DIR=${INT_CA_DIR:-$DATA_DIR/CA/int-ca} + +ORG_NAME="OpenStack" +ORG_UNIT_NAME="DevStack" + +# Stud configuration +STUD_PROTO="--tls" +STUD_CIPHERS='TLSv1+HIGH:!DES:!aNULL:!eNULL:@STRENGTH' + + +# CA Functions +# ============ + +# There may be more than one, get specific +OPENSSL=${OPENSSL:-/usr/bin/openssl} + +# Do primary CA configuration +function configure_CA() { + # build common config file + + # Verify ``TLS_IP`` is good + if [[ -n "$HOST_IP" && "$HOST_IP" != "$TLS_IP" ]]; then + # auto-discover has changed the IP + TLS_IP=$HOST_IP + fi +} + +# Creates a new CA directory structure +# create_CA_base ca-dir +function create_CA_base() { + local ca_dir=$1 + + if [[ -d $ca_dir ]]; then + # Bail out it exists + return 0 + fi + + for i in certs crl newcerts private; do + mkdir -p $ca_dir/$i + done + chmod 710 $ca_dir/private + echo "01" >$ca_dir/serial + cp /dev/null $ca_dir/index.txt +} + + +# Create a new CA configuration file +# create_CA_config ca-dir common-name +function create_CA_config() { + local ca_dir=$1 + local common_name=$2 + + echo " +[ ca ] +default_ca = CA_default + +[ CA_default ] +dir = $ca_dir +policy = policy_match +database = \$dir/index.txt +serial = \$dir/serial +certs = \$dir/certs +crl_dir = \$dir/crl +new_certs_dir = \$dir/newcerts +certificate = \$dir/cacert.pem +private_key = \$dir/private/cacert.key +RANDFILE = \$dir/private/.rand +default_md = default + +[ req ] +default_bits = 1024 +default_md = sha1 + +prompt = no +distinguished_name = ca_distinguished_name + +x509_extensions = ca_extensions + +[ ca_distinguished_name ] +organizationName = $ORG_NAME +organizationalUnitName = $ORG_UNIT_NAME Certificate Authority +commonName = $common_name + +[ policy_match ] +countryName = optional +stateOrProvinceName = optional +organizationName = match +organizationalUnitName = optional +commonName = supplied + +[ ca_extensions ] +basicConstraints = critical,CA:true +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always, issuer +keyUsage = cRLSign, keyCertSign + +" >$ca_dir/ca.conf +} + +# Create a new signing configuration file +# create_signing_config ca-dir +function create_signing_config() { + local ca_dir=$1 + + echo " +[ ca ] +default_ca = CA_default + +[ CA_default ] +dir = $ca_dir +policy = policy_match +database = \$dir/index.txt +serial = \$dir/serial +certs = \$dir/certs +crl_dir = \$dir/crl +new_certs_dir = \$dir/newcerts +certificate = \$dir/cacert.pem +private_key = \$dir/private/cacert.key +RANDFILE = \$dir/private/.rand +default_md = default + +[ req ] +default_bits = 1024 +default_md = sha1 + +prompt = no +distinguished_name = req_distinguished_name + +x509_extensions = req_extensions + +[ req_distinguished_name ] +organizationName = $ORG_NAME +organizationalUnitName = $ORG_UNIT_NAME Server Farm + +[ policy_match ] +countryName = optional +stateOrProvinceName = optional +organizationName = match +organizationalUnitName = optional +commonName = supplied + +[ req_extensions ] +basicConstraints = CA:false +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always, issuer +keyUsage = digitalSignature, keyEncipherment, keyAgreement +extendedKeyUsage = serverAuth, clientAuth +subjectAltName = \$ENV::SUBJECT_ALT_NAME + +" >$ca_dir/signing.conf +} + +# Create root and intermediate CAs and an initial server cert +# init_CA +function init_CA { + # Ensure CAs are built + make_root_CA $ROOT_CA_DIR + make_int_CA $INT_CA_DIR $ROOT_CA_DIR + + # Create the CA bundle + cat $ROOT_CA_DIR/cacert.pem $INT_CA_DIR/cacert.pem >>$INT_CA_DIR/ca-chain.pem + + if [[ ! -r $DEVSTACK_CERT ]]; then + if [[ -n "$TLS_IP" ]]; then + # Lie to let incomplete match routines work + TLS_IP="DNS:$TLS_IP" + fi + make_cert $INT_CA_DIR $DEVSTACK_CERT_NAME $DEVSTACK_HOSTNAME "$TLS_IP" + + # Create a cert bundle + cat $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/cacert.pem >$DEVSTACK_CERT + fi +} + + +# make_cert creates and signs a new certificate with the given commonName and CA +# make_cert ca-dir cert-name "common-name" ["alt-name" ...] +function make_cert() { + local ca_dir=$1 + local cert_name=$2 + local common_name=$3 + local alt_names=$4 + + # Generate a signing request + $OPENSSL req \ + -sha1 \ + -newkey rsa \ + -nodes \ + -keyout $ca_dir/private/$cert_name.key \ + -out $ca_dir/$cert_name.csr \ + -subj "/O=${ORG_NAME}/OU=${ORG_UNIT_NAME} Servers/CN=${common_name}" + + if [[ -z "$alt_names" ]]; then + alt_names="DNS:${common_name}" + else + alt_names="DNS:${common_name},${alt_names}" + fi + + # Sign the request valid for 1 year + SUBJECT_ALT_NAME="$alt_names" \ + $OPENSSL ca -config $ca_dir/signing.conf \ + -extensions req_extensions \ + -days 365 \ + -notext \ + -in $ca_dir/$cert_name.csr \ + -out $ca_dir/$cert_name.crt \ + -subj "/O=${ORG_NAME}/OU=${ORG_UNIT_NAME} Servers/CN=${common_name}" \ + -batch +} + + +# Make an intermediate CA to sign everything else +# make_int_CA ca-dir signing-ca-dir +function make_int_CA() { + local ca_dir=$1 + local signing_ca_dir=$2 + + # Create the root CA + create_CA_base $ca_dir + create_CA_config $ca_dir 'Intermediate CA' + create_signing_config $ca_dir + + # Create a signing certificate request + $OPENSSL req -config $ca_dir/ca.conf \ + -sha1 \ + -newkey rsa \ + -nodes \ + -keyout $ca_dir/private/cacert.key \ + -out $ca_dir/cacert.csr \ + -outform PEM + + # Sign the intermediate request valid for 1 year + $OPENSSL ca -config $signing_ca_dir/ca.conf \ + -extensions ca_extensions \ + -days 365 \ + -notext \ + -in $ca_dir/cacert.csr \ + -out $ca_dir/cacert.pem \ + -batch +} + +# Make a root CA to sign other CAs +# make_root_CA ca-dir +function make_root_CA() { + local ca_dir=$1 + + # Create the root CA + create_CA_base $ca_dir + create_CA_config $ca_dir 'Root CA' + + # Create a self-signed certificate valid for 5 years + $OPENSSL req -config $ca_dir/ca.conf \ + -x509 \ + -nodes \ + -newkey rsa \ + -days 21360 \ + -keyout $ca_dir/private/cacert.key \ + -out $ca_dir/cacert.pem \ + -outform PEM +} + + +# Proxy Functions +# =============== + +# Starts the TLS proxy for the given IP/ports +# start_tls_proxy front-host front-port back-host back-port +function start_tls_proxy() { + local f_host=$1 + local f_port=$2 + local b_host=$3 + local b_port=$4 + + stud $STUD_PROTO -f $f_host,$f_port -b $b_host,$b_port $DEVSTACK_CERT 2>/dev/null +} diff --git a/openrc b/openrc index 08ef98be..2553b4aa 100644 --- a/openrc +++ b/openrc @@ -26,6 +26,9 @@ source $RC_DIR/functions # Load local configuration source $RC_DIR/stackrc +# Get some necessary configuration +source $RC_DIR/lib/tls + # The introduction of Keystone to the OpenStack ecosystem has standardized the # term **tenant** as the entity that owns resources. In some places references # still exist to the original Nova term **project** for this use. Also, @@ -49,6 +52,7 @@ export OS_NO_CACHE=${OS_NO_CACHE:-1} # which is convenient for some localrc configurations. HOST_IP=${HOST_IP:-127.0.0.1} SERVICE_HOST=${SERVICE_HOST:-$HOST_IP} +SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http} # Some exercises call glance directly. On a single-node installation, Glance # should be listening on HOST_IP. If its running elsewhere, it can be set here @@ -61,7 +65,10 @@ GLANCE_HOST=${GLANCE_HOST:-$HOST_IP} # # *NOTE*: Using the 2.0 *identity api* does not mean that compute api is 2.0. We # will use the 1.1 *compute api* -export OS_AUTH_URL=http://$SERVICE_HOST:5000/v2.0 +export OS_AUTH_URL=$SERVICE_PROTOCOL://$SERVICE_HOST:5000/v2.0 + +# Set the pointer to our CA certificate chain. Harmless if TLS is not used. +export OS_CACERT=$INT_CA_DIR/ca-chain.pem # Currently novaclient needs you to specify the *compute api* version. This # needs to match the config of your catalog returned by Keystone. diff --git a/stack.sh b/stack.sh index b8e59bc0..69c983cc 100755 --- a/stack.sh +++ b/stack.sh @@ -288,6 +288,7 @@ fi # Allow the use of an alternate hostname (such as localhost/127.0.0.1) for service endpoints. SERVICE_HOST=${SERVICE_HOST:-$HOST_IP} +SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http} # Configure services to use syslog instead of writing to individual log files SYSLOG=`trueorfalse False $SYSLOG` @@ -305,6 +306,7 @@ SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} # ================== # Get project function libraries +source $TOP_DIR/lib/tls source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone source $TOP_DIR/lib/glance @@ -847,6 +849,12 @@ if [[ $TRACK_DEPENDS = True ]] ; then exit 0 fi +if is_service_enabled tls-proxy; then + configure_CA + init_CA + # Add name to /etc/hosts + # don't be naive and add to existing line! +fi # Syslog # ------ @@ -923,12 +931,17 @@ screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS" if is_service_enabled key; then echo_summary "Starting Keystone" - configure_keystone init_keystone start_keystone # Set up a temporary admin URI for Keystone - SERVICE_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0 + SERVICE_ENDPOINT=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0 + + if is_service_enabled tls-proxy; then + export OS_CACERT=$INT_CA_DIR/ca-chain.pem + # Until the client support is fixed, just use the internal endpoint + SERVICE_ENDPOINT=http://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT_INT/v2.0 + fi # Do the keystone-specific bits from keystone_data.sh export OS_SERVICE_TOKEN=$SERVICE_TOKEN diff --git a/stackrc b/stackrc index 8ac6ec59..41627808 100644 --- a/stackrc +++ b/stackrc @@ -6,6 +6,9 @@ RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd) # Destination path for installation DEST=/opt/stack +# Destination for working data +DATA_DIR=${DEST}/data + # Select the default database DATABASE_TYPE=mysql diff --git a/unstack.sh b/unstack.sh index 34195c21..09e0de6b 100755 --- a/unstack.sh +++ b/unstack.sh @@ -62,6 +62,11 @@ if is_service_enabled horizon; then stop_horizon fi +# Kill TLS proxies +if is_service_enabled tls-proxy; then + killall stud +fi + SCSI_PERSIST_DIR=$CINDER_STATE_PATH/volumes/* # Get the iSCSI volumes From a0dce264d93909af3052e1fa59210032bb9a994d Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 11 Dec 2012 16:52:37 -0600 Subject: [PATCH 838/967] Move Nova account creation out of keystone_data.sh Supports the coming HA/proxy configuration for Nova Change-Id: I2baf1f51486537a1489f1376d38f5710bd96c314 --- files/keystone_data.sh | 26 ++------------------------ lib/nova | 40 ++++++++++++++++++++++++++++++++++++++++ stack.sh | 1 + 3 files changed, 43 insertions(+), 24 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index c8e68dd6..32d4e1a0 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -5,7 +5,6 @@ # Tenant User Roles # ------------------------------------------------------------------ # service glance admin -# service nova admin, [ResellerAdmin (swift only)] # service quantum admin # if enabled # service swift admin # if enabled # service cinder admin # if enabled @@ -53,29 +52,8 @@ RESELLER_ROLE=$(get_id keystone role-create --name=ResellerAdmin) # Services # -------- -# Nova -if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then - NOVA_USER=$(get_id keystone user-create \ - --name=nova \ - --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=nova@example.com) - keystone user-role-add \ - --tenant_id $SERVICE_TENANT \ - --user_id $NOVA_USER \ - --role_id $ADMIN_ROLE - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - NOVA_SERVICE=$(get_id keystone service-create \ - --name=nova \ - --type=compute \ - --description="Nova Compute Service") - keystone endpoint-create \ - --region RegionOne \ - --service_id $NOVA_SERVICE \ - --publicurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" \ - --adminurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" \ - --internalurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" - fi +if [[ "$ENABLED_SERVICES" =~ "n-api" ]] && [[ "$ENABLED_SERVICES" =~ "swift" ]]; then + NOVA_USER=$(keystone user-list | awk "/ nova / { print \$2 }") # Nova needs ResellerAdmin role to download images when accessing # swift through the s3 api. keystone user-role-add \ diff --git a/lib/nova b/lib/nova index 3a4d34d8..095c65ef 100644 --- a/lib/nova +++ b/lib/nova @@ -277,6 +277,46 @@ EOF' fi } +# create_nova_accounts() - Set up common required nova accounts + +# Tenant User Roles +# ------------------------------------------------------------------ +# service nova admin, [ResellerAdmin (swift only)] + +# Migrated from keystone_data.sh +create_nova_accounts() { + + SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + + # Nova + if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then + NOVA_USER=$(keystone user-create \ + --name=nova \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=nova@example.com \ + | grep " id " | get_field 2) + keystone user-role-add \ + --tenant_id $SERVICE_TENANT \ + --user_id $NOVA_USER \ + --role_id $ADMIN_ROLE + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + NOVA_SERVICE=$(keystone service-create \ + --name=nova \ + --type=compute \ + --description="Nova Compute Service" \ + | grep " id " | get_field 2) + keystone endpoint-create \ + --region RegionOne \ + --service_id $NOVA_SERVICE \ + --publicurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" \ + --adminurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" \ + --internalurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" + fi + fi +} + # create_nova_conf() - Create a new nova.conf file function create_nova_conf() { # Remove legacy ``nova.conf`` diff --git a/stack.sh b/stack.sh index b8e59bc0..99ed91f8 100755 --- a/stack.sh +++ b/stack.sh @@ -934,6 +934,7 @@ if is_service_enabled key; then export OS_SERVICE_TOKEN=$SERVICE_TOKEN export OS_SERVICE_ENDPOINT=$SERVICE_ENDPOINT create_keystone_accounts + create_nova_accounts # ``keystone_data.sh`` creates services, admin and demo users, and roles. ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \ From 5c1bedd1edcd04c749721d55710c629bc3d91d12 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Wed, 12 Dec 2012 12:03:19 +0000 Subject: [PATCH 839/967] Update aggregates test for aggregates bp Instead of implementing availability zones in the service table, availability zones will be implemented using general aggregate metadata. So when an aggregate is created it will already have metadata. Part of blueprint aggregate-based-availability-zones Change-Id: I0fd22399b99a14087fef63fc91d0baef746efbed --- exercises/aggregates.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh index adc3393b..deb1a038 100755 --- a/exercises/aggregates.sh +++ b/exercises/aggregates.sh @@ -99,8 +99,8 @@ META_DATA_1_KEY=asdf META_DATA_2_KEY=foo META_DATA_3_KEY=bar -#ensure no metadata is set -nova aggregate-details $AGGREGATE_ID | grep {} +#ensure no additional metadata is set +nova aggregate-details $AGGREGATE_ID | egrep "{u'availability_zone': u'$AGGREGATE_A_ZONE'}|{}" nova aggregate-set-metadata $AGGREGATE_ID ${META_DATA_1_KEY}=123 nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY @@ -117,7 +117,7 @@ nova aggregate-details $AGGREGATE_ID | grep $META_DATA_3_KEY nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY && die "ERROR metadata was not cleared" nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_3_KEY $META_DATA_1_KEY -nova aggregate-details $AGGREGATE_ID | grep {} +nova aggregate-details $AGGREGATE_ID | egrep "{u'availability_zone': u'$AGGREGATE_A_ZONE'}|{}" # Test aggregate-add/remove-host From f2a18c065e2447083e874eeae59bf6988491ae3f Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Tue, 4 Dec 2012 18:34:25 +0100 Subject: [PATCH 840/967] Two small fixes for openSUSE support - Use right package files for openSUSE in tools/info.sh - Use a2enmod to enable the wsgi apache module Change-Id: I51e3019be32dc0938674c9c8d285a55f5b023707 --- lib/horizon | 7 +++---- tools/info.sh | 2 ++ 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/lib/horizon b/lib/horizon index 68337ab8..5d479d5d 100644 --- a/lib/horizon +++ b/lib/horizon @@ -79,7 +79,7 @@ function init_horizon() { # Be a good citizen and use the distro tools here sudo touch /etc/$APACHE_NAME/$APACHE_CONF sudo a2ensite horizon - # WSGI doesn't enable by default, enable it + # WSGI isn't enabled by default, enable it sudo a2enmod wsgi elif is_fedora; then APACHE_NAME=httpd @@ -88,9 +88,8 @@ function init_horizon() { elif is_suse; then APACHE_NAME=apache2 APACHE_CONF=vhosts.d/horizon.conf - # Append wsgi to the list of modules to load - grep -q "^APACHE_MODULES=.*wsgi" /etc/sysconfig/apache2 || - sudo sed '/^APACHE_MODULES=/s/^\(.*\)"$/\1 wsgi"/' -i /etc/sysconfig/apache2 + # WSGI isn't enabled by default, enable it + sudo a2enmod wsgi else exit_distro_not_supported "apache configuration" fi diff --git a/tools/info.sh b/tools/info.sh index f01dbea0..ef1f3380 100755 --- a/tools/info.sh +++ b/tools/info.sh @@ -92,6 +92,8 @@ if is_ubuntu; then PKG_DIR=$FILES/apts elif is_fedora; then PKG_DIR=$FILES/rpms +elif is_suse; then + PKG_DIR=$FILES/rpms-suse else exit_distro_not_supported "list of packages" fi From b1b04d066d56162013fe08fd893e51060365653e Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Thu, 6 Dec 2012 11:59:29 +0100 Subject: [PATCH 841/967] Fix postgresql setup on openSUSE The initdb call is part of starting the service for the first time, so we need to do that. Also, restart postgresql after sed'ing its configuration files: if it was already running for some reason, it needs to be restarted, not started. Change-Id: Ib7d3ff5217d06a7764a62a36084090514a1825ea --- lib/databases/postgresql | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/lib/databases/postgresql b/lib/databases/postgresql index 20ade857..e1463c5a 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -20,14 +20,21 @@ function recreate_database_postgresql { function configure_database_postgresql { echo_summary "Configuring and starting PostgreSQL" - if is_fedora || is_suse; then + if is_fedora; then PG_HBA=/var/lib/pgsql/data/pg_hba.conf PG_CONF=/var/lib/pgsql/data/postgresql.conf sudo [ -e $PG_HBA ] || sudo postgresql-setup initdb - else + elif is_ubuntu; then PG_DIR=`find /etc/postgresql -name pg_hba.conf|xargs dirname` PG_HBA=$PG_DIR/pg_hba.conf PG_CONF=$PG_DIR/postgresql.conf + elif is_suse; then + PG_HBA=/var/lib/pgsql/data/pg_hba.conf + PG_CONF=/var/lib/pgsql/data/postgresql.conf + # initdb is called when postgresql is first started + sudo [ -e $PG_HBA ] || start_service postgresql + else + exit_distro_not_supported "postgresql configuration" fi # Listen on all addresses sudo sed -i "/listen_addresses/s/.*/listen_addresses = '*'/" $PG_CONF @@ -35,7 +42,7 @@ function configure_database_postgresql { sudo sed -i "/^host/s/all\s\+127.0.0.1\/32\s\+ident/$DATABASE_USER\t0.0.0.0\/0\tpassword/" $PG_HBA # Do password auth for all IPv6 clients sudo sed -i "/^host/s/all\s\+::1\/128\s\+ident/$DATABASE_USER\t::0\/0\tpassword/" $PG_HBA - start_service postgresql + restart_service postgresql # If creating the role fails, chances are it already existed. Try to alter it. sudo -u root sudo -u postgres -i psql -c "CREATE ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" || \ From f1c094cbcd7917593a2f92b82a5d29931a5698a7 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Wed, 5 Dec 2012 17:59:04 +0100 Subject: [PATCH 842/967] Fix polkit configuration to allow usage of libvirt on openSUSE There is a buggy limitation with pkla files on openSUSE, that blocks using 'unix-group:libvirtd' from working. A pkla with such a matching identity will be overruled by the pkla generated by polkit-default-privs containing 'unix-group:*' (which will match the other groups the user belongs to, likely after matching libvirtd). To work around this, explicitly allow the user instead. Also, move the creation of the libvirtd group a bit later, to clarify the code. Change-Id: Ia3e4ae982accfc247a744eaa6d6aa4935e4f404c --- lib/nova | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/lib/nova b/lib/nova index 86db5611..9530df46 100644 --- a/lib/nova +++ b/lib/nova @@ -231,10 +231,13 @@ EOF if is_ubuntu; then LIBVIRT_DAEMON=libvirt-bin else - # http://wiki.libvirt.org/page/SSHPolicyKitSetup - if ! getent group libvirtd >/dev/null; then - sudo groupadd libvirtd - fi + LIBVIRT_DAEMON=libvirtd + fi + + # For distributions using polkit to authorize access to libvirt, + # configure polkit accordingly. + # Based on http://wiki.libvirt.org/page/SSHPolicyKitSetup + if is_fedora; then sudo bash -c 'cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla [libvirt Management Access] Identity=unix-group:libvirtd @@ -243,11 +246,24 @@ ResultAny=yes ResultInactive=yes ResultActive=yes EOF' - LIBVIRT_DAEMON=libvirtd + elif is_suse; then + # Work around the fact that polkit-default-privs overrules pklas + # with 'unix-group:$group'. + sudo bash -c "cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla +[libvirt Management Access] +Identity=unix-user:$USER +Action=org.libvirt.unix.manage +ResultAny=yes +ResultInactive=yes +ResultActive=yes +EOF" fi # The user that nova runs as needs to be member of **libvirtd** group otherwise # nova-compute will be unable to use libvirt. + if ! getent group libvirtd >/dev/null; then + sudo groupadd libvirtd + fi add_user_to_group `whoami` libvirtd # libvirt detects various settings on startup, as we potentially changed From 1bd2a1b18601fa4f896160c348f2f5988d8b0106 Mon Sep 17 00:00:00 2001 From: Clint Byrum Date: Wed, 12 Dec 2012 12:57:16 -0800 Subject: [PATCH 843/967] Fixes lib/heat to use DATABASE_PASSWORD instead of MYSQL_PASSWORD The latter is not used anywhere else, but the former is used in all of the other projects sharing the mysql database for devstack. Change-Id: I2a0cd7e5b908eb144468caf410a6c41e0e5e3fd0 --- lib/heat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/heat b/lib/heat index feaadec2..43115cb8 100644 --- a/lib/heat +++ b/lib/heat @@ -175,7 +175,7 @@ function init_heat() { # (re)create heat database recreate_database heat utf8 - $HEAT_DIR/bin/heat-db-setup $os_PACKAGE -r $MYSQL_PASSWORD + $HEAT_DIR/bin/heat-db-setup $os_PACKAGE -r $DATABASE_PASSWORD $HEAT_DIR/tools/nova_create_flavors.sh } From 33cb43034e13ceb9b55d26ac95e28eeaf47a9cec Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 10 Dec 2012 16:47:36 -0600 Subject: [PATCH 844/967] Write selected env variables to .stackenv There are some environment variables that are derived in stack.sh and cubersome to re-create later, so save them at the end of stack.sh for use by other supporting scripts, such as openrc. Change-Id: I1bbf717b970f8ceac0ff7da74aeaf19474997e07 --- openrc | 5 +++++ stack.sh | 15 ++++++++++++++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/openrc b/openrc index 2553b4aa..3ef44fd1 100644 --- a/openrc +++ b/openrc @@ -26,6 +26,11 @@ source $RC_DIR/functions # Load local configuration source $RC_DIR/stackrc +# Load the last env variables if available +if [[ -r $TOP_DIR/.stackenv ]]; then + source $TOP_DIR/.stackenv +fi + # Get some necessary configuration source $RC_DIR/lib/tls diff --git a/stack.sh b/stack.sh index f2fd68cc..5002f8b9 100755 --- a/stack.sh +++ b/stack.sh @@ -90,6 +90,11 @@ DEST=${DEST:-/opt/stack} # Sanity Check # ============ +# Clean up last environment var cache +if [[ -r $TOP_DIR/.stackenv ]]; then + rm $TOP_DIR/.stackenv +fi + # Import database configuration source $TOP_DIR/lib/database @@ -537,9 +542,9 @@ function echo_nolog() { # Set ``LOGFILE`` to turn on logging # Append '.xxxxxxxx' to the given name to maintain history # where 'xxxxxxxx' is a representation of the date the file was created +TIMESTAMP_FORMAT=${TIMESTAMP_FORMAT:-"%F-%H%M%S"} if [[ -n "$LOGFILE" || -n "$SCREEN_LOGDIR" ]]; then LOGDAYS=${LOGDAYS:-7} - TIMESTAMP_FORMAT=${TIMESTAMP_FORMAT:-"%F-%H%M%S"} CURRENT_LOG_TIME=$(date "+$TIMESTAMP_FORMAT") fi @@ -1705,6 +1710,14 @@ if is_service_enabled tempest; then echo '**************************************************' fi +# Save some values we generated for later use +CURRENT_RUN_TIME=$(date "+$TIMESTAMP_FORMAT") +echo "# $CURRENT_RUN_TIME" >$TOP_DIR/.stackenv +for i in BASE_SQL_CONN ENABLED_SERVICES HOST_IP LOGFILE \ + SERVICE_HOST SERVICE_PROTOCOL TLS_IP; do + echo $i=${!i} >>$TOP_DIR/.stackenv +done + # Run local script # ================ From 90e10888547dd1b256e851c6e5fd2488ddda8b5b Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Thu, 13 Dec 2012 08:47:06 +0100 Subject: [PATCH 845/967] Always chown $KEYSTONE_CONF_DIR If the directory exists but is owned by another user, then this will cause failures. Note that we already do this for other components (glance, for instance). Change-Id: Ic7d2a2dd179f721636afc9ea9c3fe6bb314c9b33 --- lib/keystone | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index 2d21c2c3..2185f6fb 100644 --- a/lib/keystone +++ b/lib/keystone @@ -81,8 +81,8 @@ function configure_keystone() { if [[ ! -d $KEYSTONE_CONF_DIR ]]; then sudo mkdir -p $KEYSTONE_CONF_DIR - sudo chown `whoami` $KEYSTONE_CONF_DIR fi + sudo chown `whoami` $KEYSTONE_CONF_DIR if [[ "$KEYSTONE_CONF_DIR" != "$KEYSTONE_DIR/etc" ]]; then cp -p $KEYSTONE_DIR/etc/keystone.conf.sample $KEYSTONE_CONF From 7e86dbe16695808f8206b26d73c1dcfddd173d13 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Thu, 13 Dec 2012 08:50:37 +0100 Subject: [PATCH 846/967] Do not use sudo when sed'ing $KEYSTONE_CATALOG We already edited this file earlier without sudo. Change-Id: I366053edd1a2ad729cfd983ea7491c6252cad905 --- lib/keystone | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index 2d21c2c3..3add0359 100644 --- a/lib/keystone +++ b/lib/keystone @@ -133,7 +133,7 @@ function configure_keystone() { echo "catalog.RegionOne.network.name = Quantum Service" >> $KEYSTONE_CATALOG fi - sudo sed -e " + sed -e " s,%SERVICE_HOST%,$SERVICE_HOST,g; s,%S3_SERVICE_PORT%,$S3_SERVICE_PORT,g; " -i $KEYSTONE_CATALOG From 90dd96d4785bf12d66199c3fc8e2ea6a83090602 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Thu, 13 Dec 2012 08:59:57 +0100 Subject: [PATCH 847/967] Use the right service name for tgt/tgtd in stop_cinder Change-Id: I58cf8cdf88a3edebed729f4460e8ce222db3664d --- lib/cinder | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/cinder b/lib/cinder index 16cbaf36..586cfcbb 100644 --- a/lib/cinder +++ b/lib/cinder @@ -265,7 +265,11 @@ function stop_cinder() { done if is_service_enabled c-vol; then - stop_service tgt + if is_ubuntu; then + stop_service tgt + else + stop_service tgtd + fi fi } From b0d8a8288be6ad23114563c4bf62338c79766501 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Thu, 13 Dec 2012 16:08:48 +0000 Subject: [PATCH 848/967] make volume size parametric in boot_from_volume Fixes bug #1090007 Change-Id: Ifa13b0b7b62be75805db2730cb7154406f0c1b94 --- exerciserc | 4 ++++ exercises/boot_from_volume.sh | 2 +- exercises/volumes.sh | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/exerciserc b/exerciserc index 82c74b7f..c26ec2ce 100644 --- a/exerciserc +++ b/exerciserc @@ -26,3 +26,7 @@ export VOLUME_TIMEOUT=${VOLUME_TIMEOUT:-30} # Max time to wait for a euca-delete command to propogate export VOLUME_DELETE_TIMEOUT=${SNAPSHOT_DELETE_TIMEOUT:-60} + +# The size of the volume we want to boot from; some storage back-ends +# do not allow a disk resize, so it's important that this can be tuned +export DEFAULT_VOLUME_SIZE=${DEFAULT_VOLUME_SIZE:-1} diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index 5ebdecc7..5ada2370 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -117,7 +117,7 @@ if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep -q $ fi # Create the bootable volume -cinder create --display_name=$VOL_NAME --image-id $IMAGE 1 +cinder create --display_name=$VOL_NAME --image-id $IMAGE $DEFAULT_VOLUME_SIZE # Wait for volume to activate if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 42f9cb4e..48a976ed 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -142,7 +142,7 @@ if [[ -n "`cinder list | grep $VOL_NAME | head -1 | get_field 2`" ]]; then fi # Create a new volume -cinder create --display_name $VOL_NAME --display_description "test volume: $VOL_NAME" 1 +cinder create --display_name $VOL_NAME --display_description "test volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE if [[ $? != 0 ]]; then echo "Failure creating volume $VOL_NAME" exit 1 From 671c16e63aad003e12151bc94ee2a82365141507 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 13 Dec 2012 16:22:38 -0600 Subject: [PATCH 849/967] Move cinder account creation out of keystone_data.sh Supports the coming HA/proxy configuration for Cinder Change-Id: If2e08e45430dce895ed6bb1070612517a38ca4bc --- files/keystone_data.sh | 25 ++--------------------- lib/cinder | 45 ++++++++++++++++++++++++++++++++++++++++-- stack.sh | 1 + 3 files changed, 46 insertions(+), 25 deletions(-) diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 17e8c59e..71a8e5ef 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -7,7 +7,6 @@ # service glance admin # service quantum admin # if enabled # service swift admin # if enabled -# service cinder admin # if enabled # service heat admin # if enabled # service ceilometer admin # if enabled # Tempest Only: @@ -38,6 +37,7 @@ function get_id () { # Lookups SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") +MEMBER_ROLE=$(keystone role-list | awk "/ Member / { print \$2 }") # Roles @@ -49,6 +49,7 @@ ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") # role is also configurable in swift-proxy.conf RESELLER_ROLE=$(get_id keystone role-create --name=ResellerAdmin) + # Services # -------- @@ -243,25 +244,3 @@ if [[ "$ENABLED_SERVICES" =~ "tempest" ]]; then --user_id $ALT_DEMO_USER \ --role_id $MEMBER_ROLE fi - -if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then - CINDER_USER=$(get_id keystone user-create --name=cinder \ - --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=cinder@example.com) - keystone user-role-add --tenant_id $SERVICE_TENANT \ - --user_id $CINDER_USER \ - --role_id $ADMIN_ROLE - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - CINDER_SERVICE=$(get_id keystone service-create \ - --name=cinder \ - --type=volume \ - --description="Cinder Service") - keystone endpoint-create \ - --region RegionOne \ - --service_id $CINDER_SERVICE \ - --publicurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" \ - --adminurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" \ - --internalurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" - fi -fi diff --git a/lib/cinder b/lib/cinder index 16cbaf36..17005af4 100644 --- a/lib/cinder +++ b/lib/cinder @@ -31,9 +31,11 @@ CINDER_DRIVER=${CINDER_DRIVER:-default} CINDER_DIR=$DEST/cinder CINDERCLIENT_DIR=$DEST/python-cinderclient CINDER_STATE_PATH=${CINDER_STATE_PATH:=$DATA_DIR/cinder} +CINDER_AUTH_CACHE_DIR=${CINDER_AUTH_CACHE_DIR:-/var/cache/cinder} + CINDER_CONF_DIR=/etc/cinder CINDER_CONF=$CINDER_CONF_DIR/cinder.conf -CINDER_AUTH_CACHE_DIR=${CINDER_AUTH_CACHE_DIR:-/var/cache/cinder} +CINDER_API_PASTE_INI=$CINDER_CONF_DIR/api-paste.ini # Support entry points installation of console scripts if [[ -d $CINDER_DIR/bin ]]; then @@ -97,7 +99,6 @@ function configure_cinder() { sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/cinder-rootwrap - CINDER_API_PASTE_INI=$CINDER_CONF_DIR/api-paste.ini cp $CINDER_DIR/etc/cinder/api-paste.ini $CINDER_API_PASTE_INI iniset $CINDER_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST iniset $CINDER_API_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT @@ -159,6 +160,46 @@ function configure_cinder() { fi } +# create_cinder_accounts() - Set up common required cinder accounts + +# Tenant User Roles +# ------------------------------------------------------------------ +# service cinder admin # if enabled + +# Migrated from keystone_data.sh +create_cinder_accounts() { + + SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + + # Cinder + if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then + CINDER_USER=$(keystone user-create \ + --name=cinder \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=cinder@example.com \ + | grep " id " | get_field 2) + keystone user-role-add \ + --tenant_id $SERVICE_TENANT \ + --user_id $CINDER_USER \ + --role_id $ADMIN_ROLE + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + CINDER_SERVICE=$(keystone service-create \ + --name=cinder \ + --type=volume \ + --description="Cinder Volume Service" \ + | grep " id " | get_field 2) + keystone endpoint-create \ + --region RegionOne \ + --service_id $CINDER_SERVICE \ + --publicurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" \ + --adminurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" \ + --internalurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" + fi + fi +} + # init_cinder() - Initialize database and volume group function init_cinder() { # Force nova volumes off diff --git a/stack.sh b/stack.sh index f2fd68cc..94403fcc 100755 --- a/stack.sh +++ b/stack.sh @@ -956,6 +956,7 @@ if is_service_enabled key; then export OS_SERVICE_ENDPOINT=$SERVICE_ENDPOINT create_keystone_accounts create_nova_accounts + create_cinder_accounts # ``keystone_data.sh`` creates services, admin and demo users, and roles. ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \ From a5c774ea62ef00b36ffc30b314c6ca08895c75e3 Mon Sep 17 00:00:00 2001 From: Maru Newby Date: Mon, 10 Dec 2012 10:40:01 +0000 Subject: [PATCH 850/967] Add tempest config support for quantum. * Supports fix for 1043980 Change-Id: I047989dacc263b30992a90181fb07a5ac47787d4 --- lib/tempest | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/lib/tempest b/lib/tempest index 7fa15df0..18599219 100644 --- a/lib/tempest +++ b/lib/tempest @@ -63,6 +63,8 @@ function configure_tempest() { local flavors local flavors_ref local flavor_lines + local public_network_id + local tenant_networks_reachable #TODO(afazekas): # sudo python setup.py deploy @@ -153,6 +155,17 @@ function configure_tempest() { flavor_ref_alt=${flavors[1]} fi + if [ "$Q_USE_NAMESPACE" != "False" ]; then + tenant_networks_reachable=false + else + tenant_networks_reachable=true + fi + + if is_service_enabled q-l3; then + public_network_id=$(quantum net-list | grep $PUBLIC_NETWORK_NAME | \ + awk '{print $2}') + fi + # Timeouts iniset $TEMPEST_CONF compute build_timeout $BUILD_TIMEOUT iniset $TEMPEST_CONF volume build_timeout $BUILD_TIMEOUT @@ -205,8 +218,14 @@ function configure_tempest() { # compute admin iniset $TEMPEST_CONF "compute-admin" password "$password" + # network admin + iniset $TEMPEST_CONF "network-admin" password "$password" + # network iniset $TEMPEST_CONF network api_version 2.0 + iniset $TEMPEST_CONF network password "$password" + iniset $TEMPEST_CONF network tenant_networks_reachable "$tenant_networks_reachable" + iniset $TEMPEST_CONF network public_network_id "$public_network_id" #boto iniset $TEMPEST_CONF boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud" From eb4ae630bc706736d9ad0e7fce201ac0a29a037e Mon Sep 17 00:00:00 2001 From: "James E. Blair" Date: Sun, 16 Dec 2012 19:00:26 -0800 Subject: [PATCH 851/967] Stud is only in >=precise. Change-Id: Ieb302e80af69a783736f2ebbdc9077e2cafe6a35 --- files/apts/tls-proxy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/apts/tls-proxy b/files/apts/tls-proxy index dce9c07d..0a440159 100644 --- a/files/apts/tls-proxy +++ b/files/apts/tls-proxy @@ -1 +1 @@ -stud +stud # only available in dist:precise,quantal From 251d3b5fbcf445c41e127c6afd6350af47b3e011 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Sun, 16 Dec 2012 15:05:44 +0100 Subject: [PATCH 852/967] Remove unnecessary returns * remove duplicated xtrace * remove some unnecessary return Change-Id: If9e0a979e0bd5a334e82d42572ac0b149de341d7 --- functions | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/functions b/functions index 1b7d1308..1d0a6445 100644 --- a/functions +++ b/functions @@ -73,7 +73,6 @@ function die_if_not_set() { set +o xtrace local evar=$1; shift if ! is_set $evar || [ $exitcode != 0 ]; then - set +o xtrace echo $@ exit -1 fi @@ -650,10 +649,8 @@ function is_package_installed() { if [[ "$os_PACKAGE" = "deb" ]]; then dpkg -l "$@" > /dev/null - return $? elif [[ "$os_PACKAGE" = "rpm" ]]; then rpm --quiet -q "$@" - return $? else exit_distro_not_supported "finding if a package is installed" fi @@ -664,10 +661,7 @@ function is_package_installed() { # is_set env-var function is_set() { local var=\$"$1" - if eval "[ -z \"$var\" ]"; then - return 1 - fi - return 0 + eval "[ -n \"$var\" ]" # For ex.: sh -c "[ -n \"$var\" ]" would be better, but several exercises depends on this } @@ -973,11 +967,9 @@ function use_database { if [[ -z "$DATABASE_BACKENDS" ]]; then # The backends haven't initialized yet, just save the selection for now DATABASE_TYPE=$1 - return + else + use_exclusive_service DATABASE_BACKENDS DATABASE_TYPE $1 fi - use_exclusive_service DATABASE_BACKENDS DATABASE_TYPE $1 && return 0 - ret=$? - return $ret } # Toggle enable/disable_service for services that must run exclusive of each other @@ -1133,7 +1125,6 @@ function qpid_is_supported() { # Qpid was introduced to Ubuntu in precise, disallow it on oneiric; it is # not in openSUSE either right now. ( ! ([[ "$DISTRO" = "oneiric" ]] || is_suse) ) - return $? } # Restore xtrace From 23431f352618372b6204c3a591653784d77effa1 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 12 Dec 2012 15:57:33 -0800 Subject: [PATCH 853/967] Clear out the keystone pki data for each run This prevents old invalid tokens from working after a rerun of stack.sh and potentially providing users and tenants that don't exist. Fixes bug 1089700 Change-Id: Icfc22978e41e459d51b50bc7ad2e6d98d766e402 --- lib/cinder | 1 + lib/glance | 2 ++ lib/keystone | 2 ++ lib/nova | 1 + lib/quantum | 1 + 5 files changed, 7 insertions(+) diff --git a/lib/cinder b/lib/cinder index 16cbaf36..0ab3fd4e 100644 --- a/lib/cinder +++ b/lib/cinder @@ -212,6 +212,7 @@ function init_cinder() { # Create cache dir sudo mkdir -p $CINDER_AUTH_CACHE_DIR sudo chown `whoami` $CINDER_AUTH_CACHE_DIR + rm -f $CINDER_AUTH_CACHE_DIR/* } # install_cinder() - Collect source and prepare diff --git a/lib/glance b/lib/glance index 4f631b2c..8ba04b3a 100644 --- a/lib/glance +++ b/lib/glance @@ -162,8 +162,10 @@ function init_glance() { # Create cache dir sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/api sudo chown `whoami` $GLANCE_AUTH_CACHE_DIR/api + rm -f $GLANCE_AUTH_CACHE_DIR/api/* sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/registry sudo chown `whoami` $GLANCE_AUTH_CACHE_DIR/registry + rm -f $GLANCE_AUTH_CACHE_DIR/registry/* } # install_glanceclient() - Collect source and prepare diff --git a/lib/keystone b/lib/keystone index 2d21c2c3..acef8ce3 100644 --- a/lib/keystone +++ b/lib/keystone @@ -259,11 +259,13 @@ function init_keystone() { if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then # Set up certificates + rm -rf $KEYSTONE_CONF_DIR/ssl $KEYSTONE_DIR/bin/keystone-manage pki_setup # Create cache dir sudo mkdir -p $KEYSTONE_AUTH_CACHE_DIR sudo chown `whoami` $KEYSTONE_AUTH_CACHE_DIR + rm -f $KEYSTONE_AUTH_CACHE_DIR/* fi } diff --git a/lib/nova b/lib/nova index 840965ee..e07e61c2 100644 --- a/lib/nova +++ b/lib/nova @@ -435,6 +435,7 @@ function init_nova() { # Create cache dir sudo mkdir -p $NOVA_AUTH_CACHE_DIR sudo chown `whoami` $NOVA_AUTH_CACHE_DIR + rm -f $NOVA_AUTH_CACHE_DIR/* } # install_novaclient() - Collect source and prepare diff --git a/lib/quantum b/lib/quantum index 288a3279..480aaa17 100644 --- a/lib/quantum +++ b/lib/quantum @@ -127,6 +127,7 @@ function quantum_setup_keystone() { # Create cache dir sudo mkdir -p $QUANTUM_AUTH_CACHE_DIR sudo chown `whoami` $QUANTUM_AUTH_CACHE_DIR + rm -f $QUANTUM_AUTH_CACHE_DIR/* } function quantum_setup_ovs_bridge() { From 9efcf6042c9eddd84f8abd70ca2cdf9d20258264 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Wed, 19 Dec 2012 10:23:06 +0000 Subject: [PATCH 854/967] xenapi: Enhance devstack progress monitoring Fixes bug 1091299 XenServer - devstack install monitors a log file by tailing it, to see, if the devstack installation is finished. In some cases this script does not detect, that the startup script is finished, and just waiting, causing build failures with timeouts. With this change, the install_os_domU script monitors, if the run.sh script is still running, thus guaranteed to exit as soon as run.sh is done. Change-Id: I24a7a46e93ce26be024096828c7954bc694af2b2 --- tools/xen/install_os_domU.sh | 37 ++++++++++++------------------------ 1 file changed, 12 insertions(+), 25 deletions(-) diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index c78c6f2e..e270e59b 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -376,35 +376,22 @@ if [ "$WAIT_TILL_LAUNCH" = "1" ] && [ -e ~/.ssh/id_rsa.pub ] && [ "$COPYENV" = sleep 10 done - # output the run.sh.log - ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no stack@$DOMU_IP 'tail -f run.sh.log' & - TAIL_PID=$! - - function kill_tail() { - kill -9 $TAIL_PID - exit 1 - } - # Let Ctrl-c kill tail and exit - trap kill_tail SIGINT - - # ensure we kill off the tail if we exit the script early - # for other reasons - add_on_exit "kill -9 $TAIL_PID || true" - - # wait silently until stack.sh has finished - set +o xtrace - while ! ssh_no_check -q stack@$DOMU_IP "tail run.sh.log | grep -q 'stack.sh completed in'"; do + set +x + echo -n "Waiting for startup script to finish" + while [ `ssh_no_check -q stack@$DOMU_IP pgrep -c run.sh` -ge 1 ] + do sleep 10 + echo -n "." done - set -o xtrace + echo "done!" + set -x - # kill the tail process now stack.sh has finished - kill -9 $TAIL_PID + # output the run.sh.log + ssh_no_check -q stack@$DOMU_IP 'cat run.sh.log' + + # Fail if the expected text is not found + ssh_no_check -q stack@$DOMU_IP 'cat run.sh.log' | grep -q 'stack.sh completed in' - # check for a failure - if ssh_no_check -q stack@$DOMU_IP "grep -q 'stack.sh failed' run.sh.log"; then - exit 1 - fi echo "################################################################################" echo "" echo "All Finished!" From c439b5dfc2da1b8582f2cafcae20bfe55207372b Mon Sep 17 00:00:00 2001 From: Bob Melander Date: Wed, 19 Dec 2012 14:49:34 +0100 Subject: [PATCH 855/967] Changed so that nova databases are only created once in a multi-host setup. Change-Id: Idb17bcb8b6d558f03c188d14509bb5e732079585 Fixes: bug #1091730 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index e07e61c2..d5cefc92 100644 --- a/lib/nova +++ b/lib/nova @@ -421,7 +421,7 @@ function init_nova() { # All nova components talk to a central database. We will need to do this step # only once for an entire cluster. - if is_service_enabled $DATABASE_BACKENDS && is_service_enabled nova; then + if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then # (Re)create nova database # Explicitly use latin1: to avoid lp#829209, nova expects the database to # use latin1 by default, and then upgrades the database to utf8 (see the From ac25b0359ae1beb348b7a47ca3e9984773e88168 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 19 Dec 2012 11:15:46 -0500 Subject: [PATCH 856/967] turn debug on for euca-upload-bundle this spuriously fails in devstack gate, and need to turn this on to get to the bottom of the fails. Change-Id: I7d5c1b4d3230efacdd8a3b89e5e40e98ac894a1d --- exercises/bundle.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exercises/bundle.sh b/exercises/bundle.sh index daff5f9c..12f27323 100755 --- a/exercises/bundle.sh +++ b/exercises/bundle.sh @@ -51,7 +51,7 @@ IMAGE=bundle.img truncate -s 5M /tmp/$IMAGE euca-bundle-image -i /tmp/$IMAGE || die "Failure bundling image $IMAGE" -euca-upload-bundle -b $BUCKET -m /tmp/$IMAGE.manifest.xml || die "Failure uploading bundle $IMAGE to $BUCKET" +euca-upload-bundle --debug -b $BUCKET -m /tmp/$IMAGE.manifest.xml || die "Failure uploading bundle $IMAGE to $BUCKET" AMI=`euca-register $BUCKET/$IMAGE.manifest.xml | cut -f2` die_if_not_set AMI "Failure registering $BUCKET/$IMAGE" From 7be0b047dc668a0095d0c7e51948edf83b66d5b8 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 12 Dec 2012 12:50:38 -0600 Subject: [PATCH 857/967] Create tools/install_prereqs.sh * Factor system package prereq installs out to tools/install_prereqs.sh * Set minimum time between runs with PREREQ_RERUN_HOURS default = 2 hours * Create re_export_proxy_variables Change-Id: I4a182b1da685f403d6abdd8540d2114796c01682 --- functions | 21 +++++++++++ stack.sh | 20 +---------- tools/install_prereqs.sh | 78 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 100 insertions(+), 19 deletions(-) create mode 100755 tools/install_prereqs.sh diff --git a/functions b/functions index 3ee43d3d..7de5a44f 100644 --- a/functions +++ b/functions @@ -710,6 +710,27 @@ function restart_service() { } +# HTTP and HTTPS proxy servers are supported via the usual environment variables [1] +# ``http_proxy``, ``https_proxy`` and ``no_proxy``. They can be set in +# ``localrc`` or on the command line if necessary:: +# +# [1] http://www.w3.org/Daemon/User/Proxies/ProxyClients.html +# +# http_proxy=http://proxy.example.com:3128/ no_proxy=repo.example.net ./stack.sh + +function re_export_proxy_variables() { + if [[ -n "$http_proxy" ]]; then + export http_proxy=$http_proxy + fi + if [[ -n "$https_proxy" ]]; then + export https_proxy=$https_proxy + fi + if [[ -n "$no_proxy" ]]; then + export no_proxy=$no_proxy + fi +} + + # Helper to launch a service in a named screen # screen_it service "command-line" function screen_it { diff --git a/stack.sh b/stack.sh index da0faed0..c8b8db44 100755 --- a/stack.sh +++ b/stack.sh @@ -644,25 +644,7 @@ set -o xtrace # Install package requirements echo_summary "Installing package prerequisites" -if is_ubuntu; then - install_package $(get_packages $FILES/apts) -elif is_fedora; then - install_package $(get_packages $FILES/rpms) -elif is_suse; then - install_package $(get_packages $FILES/rpms-suse) -else - exit_distro_not_supported "list of packages" -fi - -if [[ $SYSLOG != "False" ]]; then - if is_ubuntu || is_fedora; then - install_package rsyslog-relp - elif is_suse; then - install_package rsyslog-module-relp - else - exit_distro_not_supported "rsyslog-relp installation" - fi -fi +$TOP_DIR/tools/install_prereqs.sh if is_service_enabled rabbit; then # Install rabbitmq-server diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh new file mode 100755 index 00000000..0bf217b3 --- /dev/null +++ b/tools/install_prereqs.sh @@ -0,0 +1,78 @@ +#!/usr/bin/env bash + +# **install_prereqs.sh** + +# Install system package prerequisites +# +# install_prereqs.sh [-f] +# +# -f Force an install run now + + +if [[ -n "$1" && "$1" = "-f" ]]; then + FORCE=1 +fi + +# Keep track of the devstack directory +TOP_DIR=$(cd $(dirname "$0")/.. && pwd) + +# Import common functions +source $TOP_DIR/functions + +# Determine what system we are running on. This provides ``os_VENDOR``, +# ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` +# and ``DISTRO`` +GetDistro + +# Needed to get ``ENABLED_SERVICES`` +source $TOP_DIR/stackrc + +# Prereq dirs are here +FILES=$TOP_DIR/files + +# Minimum wait time +PREREQ_RERUN_MARKER=${PREREQ_RERUN_MARKER:-$TOP_DIR/.prereqs} +PREREQ_RERUN_HOURS=${PREREQ_RERUN_HOURS:-2} +PREREQ_RERUN_SECONDS=$((60*60*$PREREQ_RERUN_HOURS)) + +NOW=$(date "+%s") +LAST_RUN=$(head -1 $PREREQ_RERUN_MARKER 2>/dev/null || echo "0") +DELTA=$(($NOW - $LAST_RUN)) +if [[ $DELTA -lt $PREREQ_RERUN_SECONDS && -z "$FORCE" ]]; then + echo "Re-run time has not expired ($(($PREREQ_RERUN_SECONDS - $DELTA)) seconds remaining); exiting..." + exit 0 +fi + +# Make sure the proxy config is visible to sub-processes +re_export_proxy_variables + +# Install Packages +# ================ + +# Install package requirements +if is_ubuntu; then + install_package $(get_packages $FILES/apts) +elif is_fedora; then + install_package $(get_packages $FILES/rpms) +elif is_suse; then + install_package $(get_packages $FILES/rpms-suse) +else + exit_distro_not_supported "list of packages" +fi + +if [[ -n "$SYSLOG" && "$SYSLOG" != "False" ]]; then + if is_ubuntu || is_fedora; then + install_package rsyslog-relp + elif is_suse; then + install_package rsyslog-module-relp + else + exit_distro_not_supported "rsyslog-relp installation" + fi +fi + + +# Mark end of run +# --------------- + +date "+%s" >$PREREQ_RERUN_MARKER +date >>$PREREQ_RERUN_MARKER From 560346b506616a505718dd18c5053b4bf4360a5b Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 13 Dec 2012 17:05:24 -0600 Subject: [PATCH 858/967] Set up Cinder for TLS * Start c-api proxy if 'tls-proxy' is enabled * Configure Cinder service catalog for TLS Change-Id: Ic692a0a16ffa51bfd4bfb67f827cd941ac0e72a4 --- lib/cinder | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/lib/cinder b/lib/cinder index 5477e26f..d69790c9 100644 --- a/lib/cinder +++ b/lib/cinder @@ -37,6 +37,12 @@ CINDER_CONF_DIR=/etc/cinder CINDER_CONF=$CINDER_CONF_DIR/cinder.conf CINDER_API_PASTE_INI=$CINDER_CONF_DIR/api-paste.ini +# Public facing bits +CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST} +CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776} +CINDER_SERVICE_PORT_INT=${CINDER_SERVICE_PORT_INT:-18776} +CINDER_SERVICE_PROTOCOL=${CINDER_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} + # Support entry points installation of console scripts if [[ -d $CINDER_DIR/bin ]]; then CINDER_BIN_DIR=$CINDER_DIR/bin @@ -122,6 +128,11 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT osapi_volume_extension cinder.api.openstack.volume.contrib.standard_extensions iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH + if is_service_enabled tls-proxy; then + # Set the service port for a proxy to take the original + iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT + fi + if [ "$SYSLOG" != "False" ]; then iniset $CINDER_CONF DEFAULT use_syslog True fi @@ -193,9 +204,9 @@ create_cinder_accounts() { keystone endpoint-create \ --region RegionOne \ --service_id $CINDER_SERVICE \ - --publicurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" \ - --adminurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" \ - --internalurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" + --publicurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \ + --adminurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \ + --internalurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" fi fi } @@ -297,6 +308,11 @@ function start_cinder() { screen_it c-api "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-api --config-file $CINDER_CONF" screen_it c-vol "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF" screen_it c-sch "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-scheduler --config-file $CINDER_CONF" + + # Start proxies if enabled + if is_service_enabled c-api && is_service_enabled tls-proxy; then + start_tls_proxy '*' $CINDER_SERVICE_PORT $CINDER_SERVICE_HOST $CINDER_SERVICE_PORT_INT & + fi } # stop_cinder() - Stop running processes From a3b0255313996ec13153d83e898e59b98bbbe973 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 19 Dec 2012 16:27:12 -0600 Subject: [PATCH 859/967] Fix script matching in exercise.sh This prevents false matches in exercise skip test Change-Id: I5656a20bcf11b2ccaf55e280655d0600124adedc --- exercise.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exercise.sh b/exercise.sh index a0349ce4..5b3c56e2 100755 --- a/exercise.sh +++ b/exercise.sh @@ -28,7 +28,7 @@ skips="" # Loop over each possible script (by basename) for script in $basenames; do - if [[ "$SKIP_EXERCISES" =~ $script ]] ; then + if [[ ,$SKIP_EXERCISES, =~ ,$script, ]] ; then skips="$skips $script" else echo "=====================================================================" From 588eb4129d34ea58fd40438eb1c6edd1a9f9a2d0 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Thu, 20 Dec 2012 10:57:16 +0100 Subject: [PATCH 860/967] Fix iniset and his friends * In python the white spaces are part of the section name * Handle options with empty value * Support paths with white spaces Change-Id: I69a584608853cfdb8b7dce1e24d929216ef2fc41 --- functions | 28 +++++++++++++++++++--------- tests/functions.sh | 43 ++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 61 insertions(+), 10 deletions(-) diff --git a/functions b/functions index 1b7d1308..3bf06552 100644 --- a/functions +++ b/functions @@ -460,7 +460,7 @@ function inicomment() { local file=$1 local section=$2 local option=$3 - sed -i -e "/^\[ *$section *\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" $file + sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" "$file" } # Uncomment an option in an INI file @@ -469,7 +469,7 @@ function iniuncomment() { local file=$1 local section=$2 local option=$3 - sed -i -e "/^\[ *$section *\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" $file + sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" "$file" } @@ -480,10 +480,20 @@ function iniget() { local section=$2 local option=$3 local line - line=$(sed -ne "/^\[ *$section *\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" $file) + line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") echo ${line#*=} } +# Determinate is the given option present in the INI file +# ini_has_option config-file section option +function ini_has_option() { + local file=$1 + local section=$2 + local option=$3 + local line + line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") + [ -n "$line" ] +} # Set an option in an INI file # iniset config-file section option value @@ -492,18 +502,18 @@ function iniset() { local section=$2 local option=$3 local value=$4 - if ! grep -q "^\[ *$section *\]" $file; then + if ! grep -q "^\[$section\]" "$file"; then # Add section at the end - echo -e "\n[$section]" >>$file + echo -e "\n[$section]" >>"$file" fi - if [[ -z "$(iniget $file $section $option)" ]]; then + if ! ini_has_option "$file" "$section" "$option"; then # Add it - sed -i -e "/^\[ *$section *\]/ a\\ + sed -i -e "/^\[$section\]/ a\\ $option = $value -" $file +" "$file" else # Replace it - sed -i -e "/^\[ *$section *\]/,/^\[.*\]/ s|^\($option[ \t]*=[ \t]*\).*$|\1$value|" $file + sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=[ \t]*\).*$|\1$value|" "$file" fi } diff --git a/tests/functions.sh b/tests/functions.sh index be48729f..4fe64436 100755 --- a/tests/functions.sh +++ b/tests/functions.sh @@ -57,6 +57,9 @@ handlers=ee,ff [ ccc ] spaces = yes + +[ddd] +empty = EOF # Test with spaces @@ -79,13 +82,22 @@ fi # Test with spaces in section header -VAL=$(iniget test.ini ccc spaces) +VAL=$(iniget test.ini " ccc " spaces) if [[ "$VAL" == "yes" ]]; then echo "OK: $VAL" else echo "iniget failed: $VAL" fi +iniset test.ini "b b" opt_ion 42 + +VAL=$(iniget test.ini "b b" opt_ion) +if [[ "$VAL" == "42" ]]; then + echo "OK: $VAL" +else + echo "iniget failed: $VAL" +fi + # Test without spaces, end of file VAL=$(iniget test.ini bbb handlers) @@ -104,6 +116,29 @@ else echo "iniget failed: $VAL" fi +# test empty option +if ini_has_option test.ini ddd empty; then + echo "OK: ddd.empty present" +else + echo "ini_has_option failed: ddd.empty not found" +fi + +# test non-empty option +if ini_has_option test.ini bbb handlers; then + echo "OK: bbb.handlers present" +else + echo "ini_has_option failed: bbb.handlers not found" +fi + +# test changing empty option +iniset test.ini ddd empty "42" + +VAL=$(iniget test.ini ddd empty) +if [[ "$VAL" == "42" ]]; then + echo "OK: $VAL" +else + echo "iniget failed: $VAL" +fi # Test section not exist @@ -132,6 +167,12 @@ else echo "iniget failed: $VAL" fi +if ! ini_has_option test.ini aaa debug; then + echo "OK aaa.debug not present" +else + echo "ini_has_option failed: aaa.debug" +fi + iniset test.ini aaa debug "999" VAL=$(iniget test.ini aaa debug) From e7bca2f86446e773472603b18728ce38c33acde9 Mon Sep 17 00:00:00 2001 From: Vincent Untz Date: Thu, 20 Dec 2012 15:09:20 +0100 Subject: [PATCH 861/967] Make opensuse-12.2 a supported distribution This has been working well in my tests, and has been confirmed to work by others. I'll of course step up to fix things for openSUSE when needed. Change-Id: I1ecd345adf975b082aff3a473ab94291b39c8c93 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index cf638e83..43511f47 100755 --- a/stack.sh +++ b/stack.sh @@ -110,7 +110,7 @@ disable_negated_services # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|f16|f17|f18) ]]; then +if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|f16|f17|f18|opensuse-12.2) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then echo "If you wish to run this script anyway run with FORCE=yes" From 22ef57317222b3e64eb5d2dcb3ae0588738062e3 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Sun, 16 Dec 2012 14:03:06 +0100 Subject: [PATCH 862/967] Add generic account rc creater * Creates account rc files for all tenant user * Able to create new accounts * The rc files contains certificates for image bundle * euca related steps can be simpler in the future Change-Id: I917bffb64e09a5d85c84cde45777c49eaca65e64 --- .gitignore | 1 + stack.sh | 11 ++ tools/create_userrc.sh | 254 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 266 insertions(+) create mode 100755 tools/create_userrc.sh diff --git a/.gitignore b/.gitignore index 17cb38c8..5e770c80 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,4 @@ files/*.gz files/images stack-screenrc *.pem +accrc diff --git a/stack.sh b/stack.sh index cf638e83..26376b63 100755 --- a/stack.sh +++ b/stack.sh @@ -1672,6 +1672,17 @@ if is_service_enabled heat; then start_heat fi +# Create account rc files +# ======================= + +# Creates source able script files for easier user switching. +# This step also creates certificates for tenants and users, +# which is helpful in image bundle steps. + +if is_service_enabled nova && is_service_enabled key; then + $TOP_DIR/tools/create_userrc.sh -PA --target-dir $TOP_DIR/accrc +fi + # Install Images # ============== diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh new file mode 100755 index 00000000..e39c1570 --- /dev/null +++ b/tools/create_userrc.sh @@ -0,0 +1,254 @@ +#!/usr/bin/env bash + +#Warning: This script just for development purposes + +ACCOUNT_DIR=./accrc + +display_help() +{ +cat < + +This script creates certificates and sourcable rc files per tenant/user. + +Target account directory hierarchy: +target_dir-| + |-cacert.pem + |-tenant1-name| + | |- user1 + | |- user1-cert.pem + | |- user1-pk.pem + | |- user2 + | .. + |-tenant2-name.. + .. + +Optional Arguments +-P include password to the rc files; with -A it assume all users password is the same +-A try with all user +-u create files just for the specified user +-C create user and tenant, the specifid tenant will be the user's tenant +-r when combined with -C and the (-u) user exists it will be the user's tenant role in the (-C)tenant (default: Member) +-p password for the user +--os-username +--os-password +--os-tenant-name +--os-tenant-id +--os-auth-url +--target-dir +--skip-tenant +--debug + +Example: +$0 -AP +$0 -P -C mytenant -u myuser -p mypass +EOF +} + +if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-name:,os-tenant-id:,os-auth-url:,target-dir:,skip-tenant:,help,debug -- "$@") +then + #parse error + display_help + exit 1 +fi +eval set -- $options +ADDPASS="" + +# The services users usually in the service tenant. +# rc files for service users, is out of scope. +# Supporting different tanent for services is out of scope. +SKIP_TENANT=",service," # tenant names are between commas(,) +MODE="" +ROLE=Member +USER_NAME="" +USER_PASS="" +while [ $# -gt 0 ] +do + case "$1" in + -h|--help) display_help; exit 0 ;; + --os-username) export OS_USERNAME=$2; shift ;; + --os-password) export OS_PASSWORD=$2; shift ;; + --os-tenant-name) export OS_TENANT_NAME=$2; shift ;; + --os-tenant-id) export OS_TENANT_ID=$2; shift ;; + --skip-tenant) SKIP_TENANT="$SKIP_TENANT$2,"; shift ;; + --os-auth-url) export OS_AUTH_URL=$2; shift ;; + --target-dir) ACCOUNT_DIR=$2; shift ;; + --debug) set -o xtrace ;; + -u) MODE=${MODE:-one}; USER_NAME=$2; shift ;; + -p) USER_PASS=$2; shift ;; + -A) MODE=all; ;; + -P) ADDPASS="yes" ;; + -C) MODE=create; TENANT=$2; shift ;; + -r) ROLE=$2; shift ;; + (--) shift; break ;; + (-*) echo "$0: error - unrecognized option $1" >&2; display_help; exit 1 ;; + (*) echo "$0: error - unexpected argument $1" >&2; display_help; exit 1 ;; + esac + shift +done + +if [ -z "$OS_PASSWORD" ]; then + if [ -z "$ADMIN_PASSWORD" ];then + echo "The admin password is required option!" >&2 + exit 2 + else + OS_PASSWORD=$ADMIN_PASSWORD + fi +fi + +if [ -z "$OS_TENANT_NAME" -a -z "$OS_TENANT_ID" ]; then + export OS_TENANT_NAME=admin +fi + +if [ -z "$OS_USERNAME" ]; then + export OS_USERNAME=admin +fi + +if [ -z "$OS_AUTH_URL" ]; then + export OS_AUTH_URL=http://localhost:5000/v2.0/ +fi + +USER_PASS=${USER_PASS:-$OS_PASSWORD} +USER_NAME=${USER_NAME:-$OS_USERNAME} + +if [ -z "$MODE" ]; then + echo "You must specify at least -A or -u parameter!" >&2 + echo + display_help + exit 3 +fi + +export -n SERVICE_TOKEN SERVICE_ENDPOINT OS_SERVICE_TOKEN OS_SERVICE_ENDPOINT + +EC2_URL=http://localhost:8773/service/Cloud +S3_URL=http://localhost:3333 + +ec2=`keystone endpoint-get --service ec2 | awk '/\|[[:space:]]*ec2.publicURL/ {print $4}'` +[ -n "$ec2" ] && EC2_URL=$ec2 + +s3=`keystone endpoint-get --service s3 | awk '/\|[[:space:]]*s3.publicURL/ {print $4}'` +[ -n "$s3" ] && S3_URL=$s3 + + +mkdir -p "$ACCOUNT_DIR" +ACCOUNT_DIR=`readlink -f "$ACCOUNT_DIR"` +EUCALYPTUS_CERT=$ACCOUNT_DIR/cacert.pem +mv "$EUCALYPTUS_CERT" "$EUCALYPTUS_CERT.old" &>/dev/null +if ! nova x509-get-root-cert "$EUCALYPTUS_CERT"; then + echo "Failed to update the root certificate: $EUCALYPTUS_CERT" >&2 + mv "$EUCALYPTUS_CERT.old" "$EUCALYPTUS_CERT" &>/dev/null +fi + + +function add_entry(){ + local user_id=$1 + local user_name=$2 + local tenant_id=$3 + local tenant_name=$4 + local user_passwd=$5 + + # The admin user can see all user's secret AWS keys, it does not looks good + local line=`keystone ec2-credentials-list --user_id $user_id | grep -E "^\\|[[:space:]]*($tenant_name|$tenant_id)[[:space:]]*\\|" | head -n 1` + if [ -z "$line" ]; then + keystone ec2-credentials-create --user-id $user_id --tenant-id $tenant_id 1>&2 + line=`keystone ec2-credentials-list --user_id $user_id | grep -E "^\\|[[:space:]]*($tenant_name|$tenant_id)[[:space:]]*\\|" | head -n 1` + fi + local ec2_access_key ec2_secret_key + read ec2_access_key ec2_secret_key <<< `echo $line | awk '{print $4 " " $6 }'` + mkdir -p "$ACCOUNT_DIR/$tenant_name" + local rcfile="$ACCOUNT_DIR/$tenant_name/$user_name" + # The certs subject part are the tenant ID "dash" user ID, but the CN should be the first part of the DN + # Generally the subject DN parts should be in reverse order like the Issuer + # The Serial does not seams correctly marked either + local ec2_cert="$rcfile-cert.pem" + local ec2_private_key="$rcfile-pk.pem" + # Try to preserve the original file on fail (best effort) + mv "$ec2_private_key" "$ec2_private_key.old" &>/dev/null + mv "$ec2_cert" "$ec2_cert.old" &>/dev/null + # It will not create certs when the password is incorrect + if ! nova --os-password "$user_passwd" --os-username "$user_name" --os-tenant-name "$tenant_name" x509-create-cert "$ec2_private_key" "$ec2_cert"; then + mv "$ec2_private_key.old" "$ec2_private_key" &>/dev/null + mv "$ec2_cert.old" "$ec2_cert" &>/dev/null + fi + cat >"$rcfile" <>"$rcfile" + fi +} + +#admin users expected +function create_or_get_tenant(){ + local tenant_name=$1 + local tenant_id=`keystone tenant-list | awk '/\|[[:space:]]*'"$tenant_name"'[[:space:]]*\|.*\|/ {print $2}'` + if [ -n "$tenant_id" ]; then + echo $tenant_id + else + keystone tenant-create --name "$tenant_name" | awk '/\|[[:space:]]*id[[:space:]]*\|.*\|/ {print $4}' + fi +} + +function create_or_get_role(){ + local role_name=$1 + local role_id=`keystone role-list| awk '/\|[[:space:]]*'"$role_name"'[[:space:]]*\|/ {print $2}'` + if [ -n "$role_id" ]; then + echo $role_id + else + keystone tenant-create --name "$role_name" |awk '/\|[[:space:]]*id[[:space:]]*\|.*\|/ {print $4}' + fi +} + +# Provides empty string when the user does not exists +function get_user_id(){ + local user_name=$1 + keystone user-list | awk '/^\|[^|]*\|[[:space:]]*'"$user_name"'[[:space:]]*\|.*\|/ {print $2}' +} + +if [ $MODE != "create" ]; then +# looks like I can't ask for all tenant related to a specified user + for tenant_id_at_name in `keystone tenant-list | awk 'BEGIN {IGNORECASE = 1} /true[[:space:]]*\|$/ {print $2 "@" $4}'`; do + read tenant_id tenant_name <<< `echo "$tenant_id_at_name" | sed 's/@/ /'` + if echo $SKIP_TENANT| grep -q ",$tenant_name,"; then + continue; + fi + for user_id_at_name in `keystone user-list --tenant-id $tenant_id | awk 'BEGIN {IGNORECASE = 1} /true[[:space:]]*\|[^|]*\|$/ {print $2 "@" $4}'`; do + read user_id user_name <<< `echo "$user_id_at_name" | sed 's/@/ /'` + if [ $MODE = one -a "$user_name" != "$USER_NAME" ]; then + continue; + fi + add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS" + done + done +else + tenant_name=$TENANT + tenant_id=`create_or_get_tenant "$TENANT"` + user_name=$USER_NAME + user_id=`get_user_id $user_name` + if [ -z "$user_id" ]; then + #new user + user_id=`keystone user-create --name "$user_name" --tenant-id "$tenant_id" --pass "$USER_PASS" --email "$user_name@example.com" | awk '/\|[[:space:]]*id[[:space:]]*\|.*\|/ {print $4}'` + #The password is in the cmd line. It is not a good thing + add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS" + else + #new role + role_id=`create_or_get_role "$ROLE"` + keystone user-role-add --user-id "$user_id" --tenant-id "$tenant_id" --role-id "$role_id" + add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS" + fi +fi From 3a3a2bac674041f5bb92bc1ef59c7fc55a9946bd Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 11 Dec 2012 15:26:24 -0600 Subject: [PATCH 863/967] Set up Nova for TLS * Start n-api proxy if 'tls-proxy' is enabled * Configure nova service catalog for TLS Change-Id: If031eb315f76c5c441a25fe3582b626bbee73c6e --- functions | 8 ++++++++ lib/nova | 41 ++++++++++++++++++++++++++++++++++++++--- stack.sh | 7 +------ 3 files changed, 47 insertions(+), 9 deletions(-) diff --git a/functions b/functions index 1b7d1308..9565e10d 100644 --- a/functions +++ b/functions @@ -996,6 +996,14 @@ function use_exclusive_service { return 0 } +# Wait for an HTTP server to start answering requests +# wait_for_service timeout url +function wait_for_service() { + local timeout=$1 + local url=$2 + timeout $timeout sh -c "while ! http_proxy= https_proxy= curl -s $url >/dev/null; do sleep 1; done" +} + # Wrapper for ``yum`` to set proxy environment variables # Uses globals ``OFFLINE``, ``*_proxy` # yum_install package [package ...] diff --git a/lib/nova b/lib/nova index 840965ee..04a869e7 100644 --- a/lib/nova +++ b/lib/nova @@ -39,6 +39,12 @@ NOVA_CONF_DIR=/etc/nova NOVA_CONF=$NOVA_CONF_DIR/nova.conf NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini} +# Public facing bits +NOVA_SERVICE_HOST=${NOVA_SERVICE_HOST:-$SERVICE_HOST} +NOVA_SERVICE_PORT=${NOVA_SERVICE_PORT:-8774} +NOVA_SERVICE_PORT_INT=${NOVA_SERVICE_PORT_INT:-18774} +NOVA_SERVICE_PROTOCOL=${NOVA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} + # Support entry points installation of console scripts if [[ -d $NOVA_DIR/bin ]]; then NOVA_BIN_DIR=$NOVA_DIR/bin @@ -170,6 +176,10 @@ function configure_nova() { s,%SERVICE_TENANT_NAME%,$SERVICE_TENANT_NAME,g; s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g; " -i $NOVA_API_PASTE_INI + iniset $NOVA_API_PASTE_INI filter:authtoken auth_host $SERVICE_HOST + if is_service_enabled tls-proxy; then + iniset $NOVA_API_PASTE_INI filter:authtoken auth_protocol $SERVICE_PROTOCOL + fi fi iniset $NOVA_API_PASTE_INI filter:authtoken signing_dir $NOVA_AUTH_CACHE_DIR @@ -324,9 +334,9 @@ create_nova_accounts() { keystone endpoint-create \ --region RegionOne \ --service_id $NOVA_SERVICE \ - --publicurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" \ - --adminurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" \ - --internalurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" + --publicurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \ + --adminurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \ + --internalurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" fi fi } @@ -361,6 +371,10 @@ function create_nova_conf() { if is_service_enabled n-api; then add_nova_opt "enabled_apis=$NOVA_ENABLED_APIS" + if is_service_enabled tls-proxy; then + # Set the service port for a proxy to take the original + add_nova_opt "osapi_compute_listen_port=$NOVA_SERVICE_PORT_INT" + fi fi if is_service_enabled cinder; then add_nova_opt "volume_api_class=nova.volume.cinder.API" @@ -472,6 +486,27 @@ function install_nova() { git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH } +# start_nova_api() - Start the API process ahead of other things +function start_nova_api() { + # Get right service port for testing + local service_port=$NOVA_SERVICE_PORT + if is_service_enabled tls-proxy; then + service_port=$NOVA_SERVICE_PORT_INT + fi + + screen_it n-api "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api" + echo "Waiting for nova-api to start..." + if ! wait_for_service $SERVICE_TIMEOUT http://$SERVICE_HOST:$service_port; then + echo "nova-api did not start" + exit 1 + fi + + # Start proxies if enabled + if is_service_enabled tls-proxy; then + start_tls_proxy '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT & + fi +} + # start_nova() - Start running processes, including screen function start_nova() { # The group **libvirtd** is added to the current user in this script. diff --git a/stack.sh b/stack.sh index f2fd68cc..a3772177 100755 --- a/stack.sh +++ b/stack.sh @@ -1568,12 +1568,7 @@ screen_it zeromq "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-rpc-zmq-receiver" # Launch the nova-api and wait for it to answer before continuing if is_service_enabled n-api; then echo_summary "Starting Nova API" - screen_it n-api "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api" - echo "Waiting for nova-api to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:8774; do sleep 1; done"; then - echo "nova-api did not start" - exit 1 - fi + start_nova_api fi if is_service_enabled q-svc; then From 252f2f533ba8cb6607ddbbcdd1c4aff01dbfb5c3 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 20 Dec 2012 16:41:57 -0500 Subject: [PATCH 864/967] clean up cinder on an unstack cinder currently has issues that leave volumes around after tempest tests. Make sure that cinder gets cleaned up to a zero state on an unstack.sh so that we can reset the environment. Change-Id: I448340899bf0fae7d4d16fa26da17feafcef888f --- lib/cinder | 36 ++++++++++++++++++++++++++++++++++-- unstack.sh | 31 +------------------------------ 2 files changed, 35 insertions(+), 32 deletions(-) diff --git a/lib/cinder b/lib/cinder index 2b2f8f1b..dadc8f14 100644 --- a/lib/cinder +++ b/lib/cinder @@ -51,8 +51,40 @@ VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} # cleanup_cinder() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_cinder() { - # This function intentionally left blank - : + # ensure the volume group is cleared up because fails might + # leave dead volumes in the group + TARGETS=$(sudo tgtadm --op show --mode target) + if [ $? -ne 0 ]; then + # If tgt driver isn't running this won't work obviously + # So check the response and restart if need be + echo "tgtd seems to be in a bad state, restarting..." + if is_ubuntu; then + restart_service tgt + else + restart_service tgtd + fi + TARGETS=$(sudo tgtadm --op show --mode target) + fi + + if [[ -n "$TARGETS" ]]; then + iqn_list=( $(grep --no-filename -r iqn $SCSI_PERSIST_DIR | sed 's///') ) + for i in "${iqn_list[@]}"; do + echo removing iSCSI target: $i + sudo tgt-admin --delete $i + done + fi + + if is_service_enabled cinder; then + sudo rm -rf $CINDER_STATE_PATH/volumes/* + fi + + if is_ubuntu; then + stop_service tgt + else + stop_service tgtd + fi + + sudo vgremove -f $VOLUME_GROUP } # configure_cinder() - Set config files, create data dirs, etc diff --git a/unstack.sh b/unstack.sh index 09e0de6b..949745e5 100755 --- a/unstack.sh +++ b/unstack.sh @@ -71,36 +71,7 @@ SCSI_PERSIST_DIR=$CINDER_STATE_PATH/volumes/* # Get the iSCSI volumes if is_service_enabled cinder; then - TARGETS=$(sudo tgtadm --op show --mode target) - if [ $? -ne 0 ]; then - # If tgt driver isn't running this won't work obviously - # So check the response and restart if need be - echo "tgtd seems to be in a bad state, restarting..." - if is_ubuntu; then - restart_service tgt - else - restart_service tgtd - fi - TARGETS=$(sudo tgtadm --op show --mode target) - fi - - if [[ -n "$TARGETS" ]]; then - iqn_list=( $(grep --no-filename -r iqn $SCSI_PERSIST_DIR | sed 's///') ) - for i in "${iqn_list[@]}"; do - echo removing iSCSI target: $i - sudo tgt-admin --delete $i - done - fi - - if is_service_enabled cinder; then - sudo rm -rf $CINDER_STATE_PATH/volumes/* - fi - - if is_ubuntu; then - stop_service tgt - else - stop_service tgtd - fi + cleanup_cinder fi if [[ -n "$UNSTACK_ALL" ]]; then From c24e23b43e42aeec636d58a5a66787541b5b4488 Mon Sep 17 00:00:00 2001 From: "Cody A.W. Somerville" Date: Fri, 21 Dec 2012 02:10:45 -0500 Subject: [PATCH 865/967] Fix selection of image(s) tested by tempest. The variable DEFAULT_IMAGE_NAME is set to 'cirros-0.3.0-x86_64-uec' by default. This will cause configure_tempest to 'exit 1' and abort stack.sh if an image with that name is not uploaded to glance. According to the relevant code comment, this behaviour is incorrect. Updated code to match behaviour described in comment: If image with name matching DEFAULT_IMAGE_NAME exists, use it for both primary and secondary test image otherwise select first image and, if available, second image listed by glance. Will still 'exit 1' if no images are available at all (though it probably shouldn't). Change-Id: I92773d4afd52cf533d16772ae2a087e23e206f8c Fixes: bug #1092713 --- lib/tempest | 50 +++++++++++++++++++++++++++----------------------- 1 file changed, 27 insertions(+), 23 deletions(-) diff --git a/lib/tempest b/lib/tempest index 18599219..c28af860 100644 --- a/lib/tempest +++ b/lib/tempest @@ -85,30 +85,34 @@ function configure_tempest() { # first image returned and set ``image_uuid_alt`` to the second, # if there is more than one returned... # ... Also ensure we only take active images, so we don't get snapshots in process - image_lines=`glance image-list` - IFS=$'\n\r' - images="" - for line in $image_lines; do - if [ -z $DEFAULT_IMAGE_NAME ]; then - images="$images `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | grep 'active' | cut -d' ' -f2`" - else - images="$images `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | grep 'active' | grep "$DEFAULT_IMAGE_NAME" | cut -d' ' -f2`" + declare -a images + + while read -r IMAGE_NAME IMAGE_UUID; do + if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then + image_uuid="$IMAGE_UUID" + image_uuid_alt="$IMAGE_UUID" fi - done - # Create array of image UUIDs... - IFS=" " - images=($images) - num_images=${#images[*]} - echo "Found $num_images images" - if [[ $num_images -eq 0 ]]; then - echo "Found no valid images to use!" - exit 1 - fi - image_uuid=${images[0]} - image_uuid_alt=$image_uuid - if [[ $num_images -gt 1 ]]; then - image_uuid_alt=${images[1]} - fi + images+=($IMAGE_UUID) + done < <(glance image-list --status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }') + + case "${#images[*]}" in + 0) + echo "Found no valid images to use!" + exit 1 + ;; + 1) + if [ -z "$image_uuid" ]; then + image_uuid=${images[0]} + image_uuid_alt=${images[0]} + fi + ;; + *) + if [ -z "$image_uuid" ]; then + image_uuid=${images[0]} + image_uuid_alt=${images[1]} + fi + ;; + esac # Create tempest.conf from tempest.conf.sample # copy every time, because the image UUIDS are going to change From 8e36cbe8c8f06576f634452cdb16c9876840572e Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 21 Dec 2012 15:39:28 +0100 Subject: [PATCH 866/967] Use new RPC notifier Stop using the deprecated rabbit_notifier. Change-Id: I84574c555031b23fb5f256d248af1cdafc8979ce Signed-off-by: Julien Danjou --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index d5cefc92..46b00696 100644 --- a/lib/nova +++ b/lib/nova @@ -395,7 +395,7 @@ function create_nova_conf() { if is_service_enabled ceilometer; then add_nova_opt "instance_usage_audit=True" add_nova_opt "instance_usage_audit_period=hour" - add_nova_opt "notification_driver=nova.openstack.common.notifier.rabbit_notifier" + add_nova_opt "notification_driver=nova.openstack.common.notifier.rpc_notifier" add_nova_opt "notification_driver=ceilometer.compute.nova_notifier" fi From 66afb47cb9b470bfa40f11f23ca4f80483cb7aad Mon Sep 17 00:00:00 2001 From: Akihiro MOTOKI Date: Fri, 21 Dec 2012 15:34:13 +0900 Subject: [PATCH 867/967] Refactor quantum installation * Move quantum installation to lib/quantum * Refactor quantum configuration * Move Quantum service account creation from keystone_data.sh to lib/quantum * Define generic functions to install third party programs * Minor cleanups related to Quantum * Kill dnsmasq which watches an interface 'ns-XXXXXX' in unstack.sh * Set default_floating_pool in nova.conf to make default flaoting pool work when PUBLIC_NETWORK_NAME is other than 'nova' * Make tempest work even when PRIVATE_NETWORK_NAME is other than 'private' Change-Id: I4a6e7fcebfb11556968f53ab6a0e862ce16bb139 --- AUTHORS | 1 + files/keystone_data.sh | 25 -- lib/nova | 11 + lib/quantum | 742 ++++++++++++++++++++++++++++++++++++++--- lib/ryu | 63 ++++ lib/tempest | 4 +- stack.sh | 517 ++-------------------------- unstack.sh | 11 +- 8 files changed, 803 insertions(+), 571 deletions(-) create mode 100644 lib/ryu diff --git a/AUTHORS b/AUTHORS index cd0acac1..ba68e329 100644 --- a/AUTHORS +++ b/AUTHORS @@ -1,6 +1,7 @@ Aaron Lee Aaron Rosen Adam Gandelman +Akihiro MOTOKI Andrew Laski Andy Smith Anthony Young diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 71a8e5ef..4c76c9b5 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -5,7 +5,6 @@ # Tenant User Roles # ------------------------------------------------------------------ # service glance admin -# service quantum admin # if enabled # service swift admin # if enabled # service heat admin # if enabled # service ceilometer admin # if enabled @@ -148,30 +147,6 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then fi fi -if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then - QUANTUM_USER=$(get_id keystone user-create \ - --name=quantum \ - --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=quantum@example.com) - keystone user-role-add \ - --tenant_id $SERVICE_TENANT \ - --user_id $QUANTUM_USER \ - --role_id $ADMIN_ROLE - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - QUANTUM_SERVICE=$(get_id keystone service-create \ - --name=quantum \ - --type=network \ - --description="Quantum Service") - keystone endpoint-create \ - --region RegionOne \ - --service_id $QUANTUM_SERVICE \ - --publicurl "http://$SERVICE_HOST:9696/" \ - --adminurl "http://$SERVICE_HOST:9696/" \ - --internalurl "http://$SERVICE_HOST:9696/" - fi -fi - if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]]; then CEILOMETER_USER=$(get_id keystone user-create --name=ceilometer \ --pass="$SERVICE_PASSWORD" \ diff --git a/lib/nova b/lib/nova index d5cefc92..26c5d3c6 100644 --- a/lib/nova +++ b/lib/nova @@ -348,6 +348,7 @@ function create_nova_conf() { add_nova_opt "dhcpbridge_flagfile=$NOVA_CONF" add_nova_opt "force_dhcp_release=True" add_nova_opt "fixed_range=$FIXED_RANGE" + add_nova_opt "default_floating_pool=$PUBLIC_NETWORK_NAME" add_nova_opt "s3_host=$SERVICE_HOST" add_nova_opt "s3_port=$S3_SERVICE_PORT" add_nova_opt "osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions" @@ -413,6 +414,16 @@ function create_nova_conf() { done } +function create_nova_conf_nova_network() { + add_nova_opt "network_manager=nova.network.manager.$NET_MAN" + add_nova_opt "public_interface=$PUBLIC_INTERFACE" + add_nova_opt "vlan_interface=$VLAN_INTERFACE" + add_nova_opt "flat_network_bridge=$FLAT_NETWORK_BRIDGE" + if [ -n "$FLAT_INTERFACE" ]; then + add_nova_opt "flat_interface=$FLAT_INTERFACE" + fi +} + # init_nova() - Initialize databases, etc. function init_nova() { # Nova Database diff --git a/lib/quantum b/lib/quantum index 480aaa17..ea0e311c 100644 --- a/lib/quantum +++ b/lib/quantum @@ -5,6 +5,36 @@ # ``functions`` file # ``DEST`` must be defined +# ``stack.sh`` calls the entry points in this order: +# +# install_quantum +# install_quantumclient +# install_quantum_agent_packages +# install_quantum_third_party +# setup_quantum +# setup_quantumclient +# configure_quantum +# init_quantum +# configure_quantum_third_party +# init_quantum_third_party +# start_quantum_third_party +# create_nova_conf_quantum +# start_quantum_service_and_check +# create_quantum_initial_network +# setup_quantum_debug +# start_quantum_agents +# +# ``unstack.sh`` calls the entry points in this order: +# +# stop_quantum + +# Functions in lib/quantum are classified into the following categories: +# +# - entry points (called from stack.sh or unstack.sh) +# - internal functions +# - quantum exercises +# - 3rd party programs + # Quantum Networking # ------------------ @@ -31,8 +61,8 @@ XTRACE=$(set +o | grep xtrace) set +o xtrace -# Defaults -# -------- +# Quantum Network Configuration +# ----------------------------- # Set up default directories QUANTUM_DIR=$DEST/quantum @@ -49,7 +79,6 @@ Q_PLUGIN=${Q_PLUGIN:-openvswitch} Q_PORT=${Q_PORT:-9696} # Default Quantum Host Q_HOST=${Q_HOST:-$HOST_IP} -# Which Quantum API nova should use # Default admin username Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-quantum} # Default auth strategy @@ -59,6 +88,8 @@ Q_USE_NAMESPACE=${Q_USE_NAMESPACE:-True} Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True} # Meta data IP Q_META_DATA_IP=${Q_META_DATA_IP:-$HOST_IP} +# Allow Overlapping IP among subnets +Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-False} # Use quantum-debug command Q_USE_DEBUG_COMMAND=${Q_USE_DEBUG_COMMAND:-False} @@ -70,14 +101,587 @@ if is_service_enabled quantum; then QUANTUM_ROOTWRAP=$(get_rootwrap_location quantum) Q_RR_COMMAND="sudo $QUANTUM_ROOTWRAP $Q_RR_CONF_FILE" fi -fi + # Provider Network Configurations + # -------------------------------- + + # The following variables control the Quantum openvswitch and + # linuxbridge plugins' allocation of tenant networks and + # availability of provider networks. If these are not configured + # in localrc, tenant networks will be local to the host (with no + # remote connectivity), and no physical resources will be + # available for the allocation of provider networks. + + # To use GRE tunnels for tenant networks, set to True in + # localrc. GRE tunnels are only supported by the openvswitch + # plugin, and currently only on Ubuntu. + ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-False} + + # If using GRE tunnels for tenant networks, specify the range of + # tunnel IDs from which tenant networks are allocated. Can be + # overriden in localrc in necesssary. + TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGE:-1:1000} + + # To use VLANs for tenant networks, set to True in localrc. VLANs + # are supported by the openvswitch and linuxbridge plugins, each + # requiring additional configuration described below. + ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False} + + # If using VLANs for tenant networks, set in localrc to specify + # the range of VLAN VIDs from which tenant networks are + # allocated. An external network switch must be configured to + # trunk these VLANs between hosts for multi-host connectivity. + # + # Example: ``TENANT_VLAN_RANGE=1000:1999`` + TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} + + # If using VLANs for tenant networks, or if using flat or VLAN + # provider networks, set in localrc to the name of the physical + # network, and also configure OVS_PHYSICAL_BRIDGE for the + # openvswitch agent or LB_PHYSICAL_INTERFACE for the linuxbridge + # agent, as described below. + # + # Example: ``PHYSICAL_NETWORK=default`` + PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-} + + # With the openvswitch plugin, if using VLANs for tenant networks, + # or if using flat or VLAN provider networks, set in localrc to + # the name of the OVS bridge to use for the physical network. The + # bridge will be created if it does not already exist, but a + # physical interface must be manually added to the bridge as a + # port for external connectivity. + # + # Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` + OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-} + + # With the linuxbridge plugin, if using VLANs for tenant networks, + # or if using flat or VLAN provider networks, set in localrc to + # the name of the network interface to use for the physical + # network. + # + # Example: ``LB_PHYSICAL_INTERFACE=eth1`` + LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-} + + # With the openvswitch plugin, set to True in localrc to enable + # provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False. + # + # Example: ``OVS_ENABLE_TUNNELING=True`` + OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS} +fi # Entry Points # ------------ -# configure_quantum_rootwrap() - configure Quantum's rootwrap -function configure_quantum_rootwrap() { +# configure_quantum() +# Set common config for all quantum server and agents. +function configure_quantum() { + _configure_quantum_common + _configure_quantum_rpc + + if is_service_enabled q-svc; then + _configure_quantum_service + fi + if is_service_enabled q-agt; then + _configure_quantum_plugin_agent + fi + if is_service_enabled q-dhcp; then + _configure_quantum_dhcp_agent + fi + if is_service_enabled q-l3; then + _configure_quantum_l3_agent + fi + if is_service_enabled q-meta; then + _configure_quantum_metadata_agent + fi + + _configure_quantum_debug_command + + _cleanup_quantum +} + +function create_nova_conf_quantum() { + add_nova_opt "network_api_class=nova.network.quantumv2.api.API" + add_nova_opt "quantum_admin_username=$Q_ADMIN_USERNAME" + add_nova_opt "quantum_admin_password=$SERVICE_PASSWORD" + add_nova_opt "quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0" + add_nova_opt "quantum_auth_strategy=$Q_AUTH_STRATEGY" + add_nova_opt "quantum_admin_tenant_name=$SERVICE_TENANT_NAME" + add_nova_opt "quantum_url=http://$Q_HOST:$Q_PORT" + + if [[ "$Q_PLUGIN" = "openvswitch" ]]; then + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"} + elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver"} + elif [[ "$Q_PLUGIN" = "ryu" ]]; then + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"quantum.plugins.ryu.nova.vif.LibvirtOpenVswitchOFPRyuDriver"} + add_nova_opt "libvirt_ovs_integration_bridge=$OVS_BRIDGE" + add_nova_opt "linuxnet_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT" + add_nova_opt "libvirt_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT" + fi + add_nova_opt "libvirt_vif_driver=$NOVA_VIF_DRIVER" + add_nova_opt "linuxnet_interface_driver=$LINUXNET_VIF_DRIVER" + if is_service_enabled q-meta; then + add_nova_opt "service_quantum_metadata_proxy=True" + fi +} + +# create_quantum_accounts() - Set up common required quantum accounts + +# Tenant User Roles +# ------------------------------------------------------------------ +# service quantum admin # if enabled + +# Migrated from keystone_data.sh +function create_quantum_accounts() { + + SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + + if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then + QUANTUM_USER=$(keystone user-create \ + --name=quantum \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=quantum@example.com \ + | grep " id " | get_field 2) + keystone user-role-add \ + --tenant_id $SERVICE_TENANT \ + --user_id $QUANTUM_USER \ + --role_id $ADMIN_ROLE + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + QUANTUM_SERVICE=$(keystone service-create \ + --name=quantum \ + --type=network \ + --description="Quantum Service" \ + | grep " id " | get_field 2) + keystone endpoint-create \ + --region RegionOne \ + --service_id $QUANTUM_SERVICE \ + --publicurl "http://$SERVICE_HOST:9696/" \ + --adminurl "http://$SERVICE_HOST:9696/" \ + --internalurl "http://$SERVICE_HOST:9696/" + fi + fi +} + +function create_quantum_initial_network() { + TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1) + + # Create a small network + # Since quantum command is executed in admin context at this point, + # ``--tenant_id`` needs to be specified. + NET_ID=$(quantum net-create --tenant_id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) + SUBNET_ID=$(quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) + + if is_service_enabled q-l3; then + # Create a router, and add the private subnet as one of its interfaces + ROUTER_ID=$(quantum router-create --tenant_id $TENANT_ID router1 | grep ' id ' | get_field 2) + quantum router-interface-add $ROUTER_ID $SUBNET_ID + # Create an external network, and a subnet. Configure the external network as router gw + EXT_NET_ID=$(quantum net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2) + EXT_GW_IP=$(quantum subnet-create --ip_version 4 $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2) + quantum router-gateway-set $ROUTER_ID $EXT_NET_ID + + if is_quantum_ovs_base_plugin "$Q_PLUGIN" && [[ "$Q_USE_NAMESPACE" = "True" ]]; then + CIDR_LEN=${FLOATING_RANGE#*/} + sudo ip addr add $EXT_GW_IP/$CIDR_LEN dev $PUBLIC_BRIDGE + sudo ip link set $PUBLIC_BRIDGE up + ROUTER_GW_IP=`quantum port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' '{ print $8; }'` + sudo route add -net $FIXED_RANGE gw $ROUTER_GW_IP + fi + if [[ "$Q_USE_NAMESPACE" == "False" ]]; then + # Explicitly set router id in l3 agent configuration + iniset $Q_L3_CONF_FILE DEFAULT router_id $ROUTER_ID + fi + fi +} + +# init_quantum() - Initialize databases, etc. +function init_quantum() { + : +} + +# install_quantum() - Collect source and prepare +function install_quantum() { + git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH +} + +# install_quantumclient() - Collect source and prepare +function install_quantumclient() { + git_clone $QUANTUMCLIENT_REPO $QUANTUMCLIENT_DIR $QUANTUMCLIENT_BRANCH +} + +# install_quantum_agent_packages() - Collect source and prepare +function install_quantum_agent_packages() { + if is_quantum_ovs_base_plugin "$Q_PLUGIN"; then + # Install deps + # FIXME add to ``files/apts/quantum``, but don't install if not needed! + if is_ubuntu; then + kernel_version=`cat /proc/version | cut -d " " -f3` + install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version + else + ### FIXME(dtroyer): Find RPMs for OpenVSwitch + echo "OpenVSwitch packages need to be located" + # Fedora does not started OVS by default + restart_service openvswitch + fi + elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then + install_package bridge-utils + fi +} + +function is_quantum_ovs_base_plugin() { + local plugin=$1 + if [[ ",openvswitch,ryu," =~ ,${plugin}, ]]; then + return 0 + fi + return 1 +} + +function setup_quantum() { + setup_develop $QUANTUM_DIR +} + +function setup_quantumclient() { + setup_develop $QUANTUMCLIENT_DIR +} + +# Start running processes, including screen +function start_quantum_service_and_check() { + # Start the Quantum service + screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE" + echo "Waiting for Quantum to start..." + if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:9696; do sleep 1; done"; then + echo "Quantum did not start" + exit 1 + fi +} + +# Start running processes, including screen +function start_quantum_agents() { + # Start up the quantum agents if enabled + screen_it q-agt "python $AGENT_BINARY --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE" + screen_it q-dhcp "python $AGENT_DHCP_BINARY --config-file $QUANTUM_CONF --config-file=$Q_DHCP_CONF_FILE" + screen_it q-meta "python $AGENT_META_BINARY --config-file $QUANTUM_CONF --config-file=$Q_META_CONF_FILE" + screen_it q-l3 "python $AGENT_L3_BINARY --config-file $QUANTUM_CONF --config-file=$Q_L3_CONF_FILE" +} + +# stop_quantum() - Stop running processes (non-screen) +function stop_quantum() { + if is_service_enabled q-dhcp; then + pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }') + [ ! -z "$pid" ] && sudo kill -9 $pid + fi +} + +# _cleanup_quantum() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function _cleanup_quantum() { + : +} + +# _configure_quantum_common() +# Set common config for all quantum server and agents. +# This MUST be called before other _configure_quantum_* functions. +function _configure_quantum_common() { + # Put config files in ``QUANTUM_CONF_DIR`` for everyone to find + if [[ ! -d $QUANTUM_CONF_DIR ]]; then + sudo mkdir -p $QUANTUM_CONF_DIR + fi + sudo chown `whoami` $QUANTUM_CONF_DIR + + cp $QUANTUM_DIR/etc/quantum.conf $QUANTUM_CONF + + if [[ "$Q_PLUGIN" = "openvswitch" ]]; then + Q_PLUGIN_CONF_PATH=etc/quantum/plugins/openvswitch + Q_PLUGIN_CONF_FILENAME=ovs_quantum_plugin.ini + Q_DB_NAME="ovs_quantum" + Q_PLUGIN_CLASS="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2" + elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then + Q_PLUGIN_CONF_PATH=etc/quantum/plugins/linuxbridge + Q_PLUGIN_CONF_FILENAME=linuxbridge_conf.ini + Q_DB_NAME="quantum_linux_bridge" + Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.lb_quantum_plugin.LinuxBridgePluginV2" + elif [[ "$Q_PLUGIN" = "ryu" ]]; then + Q_PLUGIN_CONF_PATH=etc/quantum/plugins/ryu + Q_PLUGIN_CONF_FILENAME=ryu.ini + Q_DB_NAME="ovs_quantum" + Q_PLUGIN_CLASS="quantum.plugins.ryu.ryu_quantum_plugin.RyuQuantumPluginV2" + fi + + if [[ $Q_PLUGIN_CONF_PATH == '' || $Q_PLUGIN_CONF_FILENAME == '' || $Q_PLUGIN_CLASS == '' ]]; then + echo "Quantum plugin not set.. exiting" + exit 1 + fi + + # If needed, move config file from ``$QUANTUM_DIR/etc/quantum`` to ``QUANTUM_CONF_DIR`` + mkdir -p /$Q_PLUGIN_CONF_PATH + Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME + cp $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE + + database_connection_url dburl $Q_DB_NAME + iniset /$Q_PLUGIN_CONF_FILE DATABASE sql_connection $dburl + unset dburl + + _quantum_setup_rootwrap +} + +function _configure_quantum_debug_command() { + if [[ "$Q_USE_DEBUG_COMMAND" != "True" ]]; then + return + fi + + cp $QUANTUM_DIR/etc/l3_agent.ini $QUANTUM_TEST_CONFIG_FILE + + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT verbose False + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT debug False + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT root_helper "$Q_RR_COMMAND" + + _quantum_setup_keystone $QUANTUM_TEST_CONFIG_FILE DEFAULT set_auth_url + _quantum_setup_interface_driver $QUANTUM_TEST_CONFIG_FILE + + if is_quantum_ovs_base_plugin "$Q_PLUGIN"; then + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE + elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge '' + fi + + if [[ "$Q_PLUGIN" = "ryu" ]]; then + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT + fi +} + +function _configure_quantum_dhcp_agent() { + AGENT_DHCP_BINARY="$QUANTUM_DIR/bin/quantum-dhcp-agent" + Q_DHCP_CONF_FILE=$QUANTUM_CONF_DIR/dhcp_agent.ini + + cp $QUANTUM_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE + + iniset $Q_DHCP_CONF_FILE DEFAULT verbose True + iniset $Q_DHCP_CONF_FILE DEFAULT debug True + iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE + iniset $Q_DHCP_CONF_FILE DEFAULT state_path $DATA_DIR/quantum + iniset $Q_DHCP_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" + + _quantum_setup_keystone $Q_DHCP_CONF_FILE DEFAULT set_auth_url + _quantum_setup_interface_driver $Q_DHCP_CONF_FILE + + if [[ "$Q_PLUGIN" = "ryu" ]]; then + iniset $Q_DHCP_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT + fi +} + +function _configure_quantum_l3_agent() { + AGENT_L3_BINARY="$QUANTUM_DIR/bin/quantum-l3-agent" + PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} + Q_L3_CONF_FILE=$QUANTUM_CONF_DIR/l3_agent.ini + + cp $QUANTUM_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE + + iniset $Q_L3_CONF_FILE DEFAULT verbose True + iniset $Q_L3_CONF_FILE DEFAULT debug True + iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE + iniset $Q_L3_CONF_FILE DEFAULT state_path $DATA_DIR/quantum + iniset $Q_L3_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" + + _quantum_setup_keystone $Q_L3_CONF_FILE DEFAULT set_auth_url + _quantum_setup_interface_driver $Q_L3_CONF_FILE + + if is_quantum_ovs_base_plugin "$Q_PLUGIN"; then + iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE + _quantum_setup_external_bridge $PUBLIC_BRIDGE + elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then + iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge '' + fi + + if [[ "$Q_PLUGIN" = "ryu" ]]; then + iniset $Q_L3_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT + fi +} + +function _configure_quantum_metadata_agent() { + AGENT_META_BINARY="$QUANTUM_DIR/bin/quantum-metadata-agent" + Q_META_CONF_FILE=$QUANTUM_CONF_DIR/metadata_agent.ini + + cp $QUANTUM_DIR/etc/metadata_agent.ini $Q_META_CONF_FILE + + iniset $Q_META_CONF_FILE DEFAULT verbose True + iniset $Q_META_CONF_FILE DEFAULT debug True + iniset $Q_META_CONF_FILE DEFAULT state_path $DATA_DIR/quantum + iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP + iniset $Q_META_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" + + _quantum_setup_keystone $Q_META_CONF_FILE DEFAULT set_auth_url +} + +# _configure_quantum_plugin_agent() - Set config files for quantum plugin agent +# It is called when q-agt is enabled. +function _configure_quantum_plugin_agent() { + # Configure agent for plugin + if [[ "$Q_PLUGIN" = "openvswitch" ]]; then + _configure_quantum_plugin_agent_openvswitch + elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then + _configure_quantum_plugin_agent_linuxbridge + elif [[ "$Q_PLUGIN" = "ryu" ]]; then + _configure_quantum_plugin_agent_ryu + fi + + iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" +} + +function _configure_quantum_plugin_agent_linuxbridge() { + # Setup physical network interface mappings. Override + # ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` for more + # complex physical network configurations. + if [[ "$LB_INTERFACE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$LB_PHYSICAL_INTERFACE" != "" ]]; then + LB_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$LB_PHYSICAL_INTERFACE + fi + if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE physical_interface_mappings $LB_INTERFACE_MAPPINGS + fi + AGENT_BINARY="$QUANTUM_DIR/bin/quantum-linuxbridge-agent" +} + +function _configure_quantum_plugin_agent_openvswitch() { + # Setup integration bridge + OVS_BRIDGE=${OVS_BRIDGE:-br-int} + _quantum_setup_ovs_bridge $OVS_BRIDGE + + # Setup agent for tunneling + if [[ "$OVS_ENABLE_TUNNELING" = "True" ]]; then + # Verify tunnels are supported + # REVISIT - also check kernel module support for GRE and patch ports + OVS_VERSION=`ovs-vsctl --version | head -n 1 | awk '{print $4;}'` + if [ $OVS_VERSION \< "1.4" ] && ! is_service_enabled q-svc ; then + echo "You are running OVS version $OVS_VERSION." + echo "OVS 1.4+ is required for tunneling between multiple hosts." + exit 1 + fi + iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True + iniset /$Q_PLUGIN_CONF_FILE OVS local_ip $HOST_IP + fi + + # Setup physical network bridge mappings. Override + # ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more + # complex physical network configurations. + if [[ "$OVS_BRIDGE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then + OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE + + # Configure bridge manually with physical interface as port for multi-node + sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_PHYSICAL_BRIDGE + fi + if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings $OVS_BRIDGE_MAPPINGS + fi + AGENT_BINARY="$QUANTUM_DIR/bin/quantum-openvswitch-agent" +} + +function _configure_quantum_plugin_agent_ryu() { + # Set up integration bridge + OVS_BRIDGE=${OVS_BRIDGE:-br-int} + _quantum_setup_ovs_bridge $OVS_BRIDGE + if [ -n "$RYU_INTERNAL_INTERFACE" ]; then + sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_BRIDGE $RYU_INTERNAL_INTERFACE + fi + AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/ryu/agent/ryu_quantum_agent.py" +} + +# Quantum RPC support - must be updated prior to starting any of the services +function _configure_quantum_rpc() { + iniset $QUANTUM_CONF DEFAULT control_exchange quantum + if is_service_enabled qpid ; then + iniset $QUANTUM_CONF DEFAULT rpc_backend quantum.openstack.common.rpc.impl_qpid + elif is_service_enabled zeromq; then + iniset $QUANTUM_CONF DEFAULT rpc_backend quantum.openstack.common.rpc.impl_zmq + elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then + iniset $QUANTUM_CONF DEFAULT rabbit_host $RABBIT_HOST + iniset $QUANTUM_CONF DEFAULT rabbit_password $RABBIT_PASSWORD + fi +} + +# _configure_quantum_service() - Set config files for quantum service +# It is called when q-svc is enabled. +function _configure_quantum_service() { + Q_API_PASTE_FILE=$QUANTUM_CONF_DIR/api-paste.ini + Q_POLICY_FILE=$QUANTUM_CONF_DIR/policy.json + + cp $QUANTUM_DIR/etc/api-paste.ini $Q_API_PASTE_FILE + cp $QUANTUM_DIR/etc/policy.json $Q_POLICY_FILE + + if is_service_enabled $DATABASE_BACKENDS; then + recreate_database $Q_DB_NAME utf8 + else + echo "A database must be enabled in order to use the $Q_PLUGIN Quantum plugin." + exit 1 + fi + + # Update either configuration file with plugin + iniset $QUANTUM_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS + + iniset $QUANTUM_CONF DEFAULT verbose True + iniset $QUANTUM_CONF DEFAULT debug True + iniset $QUANTUM_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP + + iniset $QUANTUM_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY + _quantum_setup_keystone $Q_API_PASTE_FILE filter:authtoken + + # Configure plugin + if [[ "$Q_PLUGIN" = "openvswitch" ]]; then + if [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE OVS tenant_network_type gre + iniset /$Q_PLUGIN_CONF_FILE OVS tunnel_id_ranges $TENANT_TUNNEL_RANGES + elif [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE OVS tenant_network_type vlan + else + echo "WARNING - The openvswitch plugin is using local tenant networks, with no connectivity between hosts." + fi + + # Override ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` + # for more complex physical network configurations. + if [[ "$OVS_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then + OVS_VLAN_RANGES=$PHYSICAL_NETWORK + if [[ "$TENANT_VLAN_RANGE" != "" ]]; then + OVS_VLAN_RANGES=$OVS_VLAN_RANGES:$TENANT_VLAN_RANGE + fi + fi + if [[ "$OVS_VLAN_RANGES" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE OVS network_vlan_ranges $OVS_VLAN_RANGES + fi + + # Enable tunnel networks if selected + if [[ $OVS_ENABLE_TUNNELING = "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True + fi + elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then + if [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE VLANS tenant_network_type vlan + else + echo "WARNING - The linuxbridge plugin is using local tenant networks, with no connectivity between hosts." + fi + + # Override ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` + # for more complex physical network configurations. + if [[ "$LB_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then + LB_VLAN_RANGES=$PHYSICAL_NETWORK + if [[ "$TENANT_VLAN_RANGE" != "" ]]; then + LB_VLAN_RANGES=$LB_VLAN_RANGES:$TENANT_VLAN_RANGE + fi + fi + if [[ "$LB_VLAN_RANGES" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE VLANS network_vlan_ranges $LB_VLAN_RANGES + fi + elif [[ "$Q_PLUGIN" = "ryu" ]]; then + iniset /$Q_PLUGIN_CONF_FILE OVS openflow_controller $RYU_OFP_HOST:$RYU_OFP_PORT + iniset /$Q_PLUGIN_CONF_FILE OVS openflow_rest_api $RYU_API_HOST:$RYU_API_PORT + fi +} + +# Utility Functions +#------------------ + +# _quantum_setup_rootwrap() - configure Quantum's rootwrap +function _quantum_setup_rootwrap() { if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then return fi @@ -109,7 +713,7 @@ function configure_quantum_rootwrap() { } # Configures keystone integration for quantum service and agents -function quantum_setup_keystone() { +function _quantum_setup_keystone() { local conf_file=$1 local section=$2 local use_auth_url=$3 @@ -130,39 +734,54 @@ function quantum_setup_keystone() { rm -f $QUANTUM_AUTH_CACHE_DIR/* } -function quantum_setup_ovs_bridge() { +function _quantum_setup_ovs_bridge() { local bridge=$1 - for PORT in `sudo ovs-vsctl --no-wait list-ports $bridge`; do - if [[ "$PORT" =~ tap* ]]; then echo `sudo ip link delete $PORT` > /dev/null; fi - sudo ovs-vsctl --no-wait del-port $bridge $PORT - done - sudo ovs-vsctl --no-wait -- --if-exists del-br $bridge - sudo ovs-vsctl --no-wait add-br $bridge + quantum-ovs-cleanup --ovs_integration_bridge $bridge + sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge } -function quantum_setup_external_bridge() { +function _quantum_setup_interface_driver() { + local conf_file=$1 + if [[ "$Q_PLUGIN" == "openvswitch" ]]; then + iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver + elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then + iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver + elif [[ "$Q_PLUGIN" = "ryu" ]]; then + iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.RyuInterfaceDriver + fi +} + +function _quantum_setup_external_bridge() { local bridge=$1 - # Create it if it does not exist + quantum-ovs-cleanup --external_network_bridge $bridge sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge - # remove internal ports - for PORT in `sudo ovs-vsctl --no-wait list-ports $bridge`; do - TYPE=$(sudo ovs-vsctl get interface $PORT type) - if [[ "$TYPE" == "internal" ]]; then - echo `sudo ip link delete $PORT` > /dev/null - sudo ovs-vsctl --no-wait del-port $bridge $PORT - fi - done # ensure no IP is configured on the public bridge sudo ip addr flush dev $bridge } -function is_quantum_ovs_base_plugin() { - local plugin=$1 - if [[ ",openvswitch,ryu," =~ ,${plugin}, ]]; then - return 0 +# Functions for Quantum Exercises +#-------------------------------- + +function delete_probe() { + local from_net="$1" + net_id=`_get_net_id $from_net` + probe_id=`quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'` + quantum-debug --os-tenant-name admin --os-username admin probe-delete $probe_id +} + +function setup_quantum_debug() { + if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then + public_net_id=`_get_net_id $PUBLIC_NETWORK_NAME` + quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create $public_net_id + private_net_id=`_get_net_id $PRIVATE_NETWORK_NAME` + quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create $private_net_id fi - return 1 +} + +function teardown_quantum_debug() { + delete_probe $PUBLIC_NETWORK_NAME + delete_probe $PRIVATE_NETWORK_NAME } function _get_net_id() { @@ -176,13 +795,6 @@ function _get_probe_cmd_prefix() { echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id" } -function delete_probe() { - local from_net="$1" - net_id=`_get_net_id $from_net` - probe_id=`quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'` - quantum-debug --os-tenant-name admin --os-username admin probe-delete $probe_id -} - function _ping_check_quantum() { local from_net=$1 local ip=$2 @@ -220,17 +832,59 @@ function _ssh_check_quantum() { fi } -function setup_quantum() { - public_net_id=`_get_net_id $PUBLIC_NETWORK_NAME` - quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create $public_net_id - private_net_id=`_get_net_id $PRIVATE_NETWORK_NAME` - quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create $private_net_id +# Quantum 3rd party programs +#--------------------------- +# A comma-separated list of 3rd party programs +QUANTUM_THIRD_PARTIES="ryu" +for third_party in ${QUANTUM_THIRD_PARTIES//,/ }; do + source lib/$third_party +done + +# configure_quantum_third_party() - Set config files, create data dirs, etc +function configure_quantum_third_party() { + for third_party in ${QUANTUM_THIRD_PARTIES//,/ }; do + if is_service_enabled $third_party; then + configure_${third_party} + fi + done } -function teardown_quantum() { - delete_probe $PUBLIC_NETWORK_NAME - delete_probe $PRIVATE_NETWORK_NAME +# init_quantum_third_party() - Initialize databases, etc. +function init_quantum_third_party() { + for third_party in ${QUANTUM_THIRD_PARTIES//,/ }; do + if is_service_enabled $third_party; then + init_${third_party} + fi + done +} + +# install_quantum_third_party() - Collect source and prepare +function install_quantum_third_party() { + for third_party in ${QUANTUM_THIRD_PARTIES//,/ }; do + if is_service_enabled $third_party; then + install_${third_party} + fi + done +} + +# start_quantum_third_party() - Start running processes, including screen +function start_quantum_third_party() { + for third_party in ${QUANTUM_THIRD_PARTIES//,/ }; do + if is_service_enabled $third_party; then + start_${third_party} + fi + done } +# stop_quantum_third_party - Stop running processes (non-screen) +function stop_quantum_third_party() { + for third_party in ${QUANTUM_THIRD_PARTIES//,/ }; do + if is_service_enabled $third_party; then + stop_${third_party} + fi + done +} + + # Restore xtrace $XTRACE diff --git a/lib/ryu b/lib/ryu new file mode 100644 index 00000000..ac3462bb --- /dev/null +++ b/lib/ryu @@ -0,0 +1,63 @@ +# Ryu OpenFlow Controller +# ----------------------- + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +RYU_DIR=$DEST/ryu +# Ryu API Host +RYU_API_HOST=${RYU_API_HOST:-127.0.0.1} +# Ryu API Port +RYU_API_PORT=${RYU_API_PORT:-8080} +# Ryu OFP Host +RYU_OFP_HOST=${RYU_OFP_HOST:-127.0.0.1} +# Ryu OFP Port +RYU_OFP_PORT=${RYU_OFP_PORT:-6633} +# Ryu Applications +RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest} + +function configure_ryu() { + setup_develop $RYU_DIR +} + +function init_ryu() { + RYU_CONF_DIR=/etc/ryu + if [[ ! -d $RYU_CONF_DIR ]]; then + sudo mkdir -p $RYU_CONF_DIR + fi + sudo chown `whoami` $RYU_CONF_DIR + RYU_CONF=$RYU_CONF_DIR/ryu.conf + sudo rm -rf $RYU_CONF + + cat < $RYU_CONF +--app_lists=$RYU_APPS +--wsapi_host=$RYU_API_HOST +--wsapi_port=$RYU_API_PORT +--ofp_listen_host=$RYU_OFP_HOST +--ofp_tcp_listen_port=$RYU_OFP_PORT +EOF +} + +function install_ryu() { + git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH +} + +function is_ryu_required() { + if is_service_enabled ryu || (is_service_enabled quantum && [[ "$Q_PLUGIN" = "ryu" ]]); then + return 0 + fi + return 1 +} + +function start_ryu() { + screen_it ryu "cd $RYU_DIR && $RYU_DIR/bin/ryu-manager --flagfile $RYU_CONF" +} + +function stop_ryu() { + : +} + +# Restore xtrace +$XTRACE diff --git a/lib/tempest b/lib/tempest index 18599219..337be75b 100644 --- a/lib/tempest +++ b/lib/tempest @@ -190,7 +190,7 @@ function configure_tempest() { #Skip until #1074039 is fixed iniset $TEMPEST_CONF compute run_ssh False iniset $TEMPEST_CONF compute ssh_user ${DEFAULT_INSTANCE_USER:-$OS_USERNAME} - iniset $TEMPEST_CONF compute network_for_ssh private + iniset $TEMPEST_CONF compute network_for_ssh $PRIVATE_NETWORK_NAME iniset $TEMPEST_CONF compute ip_version_for_ssh 4 iniset $TEMPEST_CONF compute ssh_timeout 4 iniset $TEMPEST_CONF compute image_ref $image_uuid @@ -199,7 +199,7 @@ function configure_tempest() { iniset $TEMPEST_CONF compute flavor_ref_alt $flavor_ref_alt iniset $TEMPEST_CONF compute source_dir $NOVA_SOURCE_DIR iniset $TEMPEST_CONF compute live_migration_available ${LIVE_MIGRATION_AVAILABLE:-False} - iniset $TEMPEST_CONF compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} + iniset $TEMPEST_CONF compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} # Inherited behavior, might be wrong iniset $TEMPEST_CONF compute bin_dir $NOVA_BIN_DIR # TODO(jaypipes): Create the key file here... right now, no whitebox diff --git a/stack.sh b/stack.sh index cf638e83..10a86206 100755 --- a/stack.sh +++ b/stack.sh @@ -329,18 +329,6 @@ OPENSTACKCLIENT_DIR=$DEST/python-openstackclient NOVNC_DIR=$DEST/noVNC SWIFT3_DIR=$DEST/swift3 -RYU_DIR=$DEST/ryu -# Ryu API Host -RYU_API_HOST=${RYU_API_HOST:-127.0.0.1} -# Ryu API Port -RYU_API_PORT=${RYU_API_PORT:-8080} -# Ryu OFP Host -RYU_OFP_HOST=${RYU_OFP_HOST:-127.0.0.1} -# Ryu OFP Port -RYU_OFP_PORT=${RYU_OFP_PORT:-6633} -# Ryu Applications -RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest} - # Should cinder perform secure deletion of volumes? # Defaults to true, can be set to False to avoid this bug when testing: # https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1023755 @@ -703,21 +691,7 @@ if is_service_enabled $DATABASE_BACKENDS; then fi if is_service_enabled q-agt; then - if is_quantum_ovs_base_plugin "$Q_PLUGIN"; then - # Install deps - # FIXME add to ``files/apts/quantum``, but don't install if not needed! - if is_ubuntu; then - kernel_version=`cat /proc/version | cut -d " " -f3` - install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version - else - ### FIXME(dtroyer): Find RPMs for OpenVSwitch - echo "OpenVSwitch packages need to be located" - # Fedora does not started OVS by default - restart_service openvswitch - fi - elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then - install_package bridge-utils - fi + install_quantum_agent_packages fi TRACK_DEPENDS=${TRACK_DEPENDS:-False} @@ -778,11 +752,9 @@ if is_service_enabled horizon; then install_horizon fi if is_service_enabled quantum; then - git_clone $QUANTUMCLIENT_REPO $QUANTUMCLIENT_DIR $QUANTUMCLIENT_BRANCH -fi -if is_service_enabled quantum; then - # quantum - git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH + install_quantum + install_quantumclient + install_quantum_third_party fi if is_service_enabled heat; then install_heat @@ -797,9 +769,6 @@ fi if is_service_enabled tempest; then install_tempest fi -if is_service_enabled ryu || (is_service_enabled quantum && [[ "$Q_PLUGIN" = "ryu" ]]); then - git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH -fi # Initialization @@ -837,8 +806,8 @@ if is_service_enabled horizon; then configure_horizon fi if is_service_enabled quantum; then - setup_develop $QUANTUMCLIENT_DIR - setup_develop $QUANTUM_DIR + setup_quantumclient + setup_quantum fi if is_service_enabled heat; then configure_heat @@ -847,9 +816,6 @@ fi if is_service_enabled cinder; then configure_cinder fi -if is_service_enabled ryu || (is_service_enabled quantum && [[ "$Q_PLUGIN" = "ryu" ]]); then - setup_develop $RYU_DIR -fi if [[ $TRACK_DEPENDS = True ]] ; then $DEST/.venv/bin/pip freeze > $DEST/requires-post-pip @@ -962,6 +928,7 @@ if is_service_enabled key; then create_keystone_accounts create_nova_accounts create_cinder_accounts + create_quantum_accounts # ``keystone_data.sh`` creates services, admin and demo users, and roles. ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \ @@ -1011,392 +978,22 @@ if is_service_enabled g-reg; then fi -# Ryu -# --- - -# Ryu is not a part of OpenStack project. Please ignore following block if -# you are not interested in Ryu. -# launch ryu manager -if is_service_enabled ryu; then - RYU_CONF_DIR=/etc/ryu - if [[ ! -d $RYU_CONF_DIR ]]; then - sudo mkdir -p $RYU_CONF_DIR - fi - sudo chown `whoami` $RYU_CONF_DIR - RYU_CONF=$RYU_CONF_DIR/ryu.conf - sudo rm -rf $RYU_CONF - - cat < $RYU_CONF ---app_lists=$RYU_APPS ---wsapi_host=$RYU_API_HOST ---wsapi_port=$RYU_API_PORT ---ofp_listen_host=$RYU_OFP_HOST ---ofp_tcp_listen_port=$RYU_OFP_PORT -EOF - screen_it ryu "cd $RYU_DIR && $RYU_DIR/bin/ryu-manager --flagfile $RYU_CONF" -fi - - # Quantum # ------- -# Quantum Network Configuration if is_service_enabled quantum; then echo_summary "Configuring Quantum" - # The following variables control the Quantum openvswitch and - # linuxbridge plugins' allocation of tenant networks and - # availability of provider networks. If these are not configured - # in localrc, tenant networks will be local to the host (with no - # remote connectivity), and no physical resources will be - # available for the allocation of provider networks. - - # To use GRE tunnels for tenant networks, set to True in - # localrc. GRE tunnels are only supported by the openvswitch - # plugin, and currently only on Ubuntu. - ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-False} - - # If using GRE tunnels for tenant networks, specify the range of - # tunnel IDs from which tenant networks are allocated. Can be - # overriden in localrc in necesssary. - TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGE:-1:1000} - - # To use VLANs for tenant networks, set to True in localrc. VLANs - # are supported by the openvswitch and linuxbridge plugins, each - # requiring additional configuration described below. - ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False} - - # If using VLANs for tenant networks, set in localrc to specify - # the range of VLAN VIDs from which tenant networks are - # allocated. An external network switch must be configured to - # trunk these VLANs between hosts for multi-host connectivity. - # - # Example: ``TENANT_VLAN_RANGE=1000:1999`` - TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} - - # If using VLANs for tenant networks, or if using flat or VLAN - # provider networks, set in localrc to the name of the physical - # network, and also configure OVS_PHYSICAL_BRIDGE for the - # openvswitch agent or LB_PHYSICAL_INTERFACE for the linuxbridge - # agent, as described below. - # - # Example: ``PHYSICAL_NETWORK=default`` - PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-} - - # With the openvswitch plugin, if using VLANs for tenant networks, - # or if using flat or VLAN provider networks, set in localrc to - # the name of the OVS bridge to use for the physical network. The - # bridge will be created if it does not already exist, but a - # physical interface must be manually added to the bridge as a - # port for external connectivity. - # - # Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` - OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-} - - # With the linuxbridge plugin, if using VLANs for tenant networks, - # or if using flat or VLAN provider networks, set in localrc to - # the name of the network interface to use for the physical - # network. - # - # Example: ``LB_PHYSICAL_INTERFACE=eth1`` - LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-} - - # With the openvswitch plugin, set to True in localrc to enable - # provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False. - # - # Example: ``OVS_ENABLE_TUNNELING=True`` - OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS} - - # Put config files in ``QUANTUM_CONF_DIR`` for everyone to find - if [[ ! -d $QUANTUM_CONF_DIR ]]; then - sudo mkdir -p $QUANTUM_CONF_DIR - fi - sudo chown `whoami` $QUANTUM_CONF_DIR - - if [[ "$Q_PLUGIN" = "openvswitch" ]]; then - Q_PLUGIN_CONF_PATH=etc/quantum/plugins/openvswitch - Q_PLUGIN_CONF_FILENAME=ovs_quantum_plugin.ini - Q_DB_NAME="ovs_quantum" - Q_PLUGIN_CLASS="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2" - elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then - Q_PLUGIN_CONF_PATH=etc/quantum/plugins/linuxbridge - Q_PLUGIN_CONF_FILENAME=linuxbridge_conf.ini - Q_DB_NAME="quantum_linux_bridge" - Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.lb_quantum_plugin.LinuxBridgePluginV2" - elif [[ "$Q_PLUGIN" = "ryu" ]]; then - Q_PLUGIN_CONF_PATH=etc/quantum/plugins/ryu - Q_PLUGIN_CONF_FILENAME=ryu.ini - Q_DB_NAME="ovs_quantum" - Q_PLUGIN_CLASS="quantum.plugins.ryu.ryu_quantum_plugin.RyuQuantumPluginV2" - fi - - if [[ $Q_PLUGIN_CONF_PATH == '' || $Q_PLUGIN_CONF_FILENAME == '' || $Q_PLUGIN_CLASS == '' ]]; then - echo "Quantum plugin not set.. exiting" - exit 1 - fi - - # If needed, move config file from ``$QUANTUM_DIR/etc/quantum`` to ``QUANTUM_CONF_DIR`` - mkdir -p /$Q_PLUGIN_CONF_PATH - Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME - cp $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE - - database_connection_url dburl $Q_DB_NAME - iniset /$Q_PLUGIN_CONF_FILE DATABASE sql_connection $dburl - unset dburl - - cp $QUANTUM_DIR/etc/quantum.conf $QUANTUM_CONF - configure_quantum_rootwrap -fi - -# Quantum service (for controller node) -if is_service_enabled q-svc; then - Q_API_PASTE_FILE=$QUANTUM_CONF_DIR/api-paste.ini - Q_POLICY_FILE=$QUANTUM_CONF_DIR/policy.json - - cp $QUANTUM_DIR/etc/api-paste.ini $Q_API_PASTE_FILE - cp $QUANTUM_DIR/etc/policy.json $Q_POLICY_FILE - - if is_service_enabled $DATABASE_BACKENDS; then - recreate_database $Q_DB_NAME utf8 - else - echo "A database must be enabled in order to use the $Q_PLUGIN Quantum plugin." - exit 1 - fi - - # Update either configuration file with plugin - iniset $QUANTUM_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS - - iniset $QUANTUM_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY - quantum_setup_keystone $Q_API_PASTE_FILE filter:authtoken - - # Configure plugin - if [[ "$Q_PLUGIN" = "openvswitch" ]]; then - if [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE OVS tenant_network_type gre - iniset /$Q_PLUGIN_CONF_FILE OVS tunnel_id_ranges $TENANT_TUNNEL_RANGES - elif [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE OVS tenant_network_type vlan - else - echo "WARNING - The openvswitch plugin is using local tenant networks, with no connectivity between hosts." - fi - - # Override ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` - # for more complex physical network configurations. - if [[ "$OVS_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then - OVS_VLAN_RANGES=$PHYSICAL_NETWORK - if [[ "$TENANT_VLAN_RANGE" != "" ]]; then - OVS_VLAN_RANGES=$OVS_VLAN_RANGES:$TENANT_VLAN_RANGE - fi - fi - if [[ "$OVS_VLAN_RANGES" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE OVS network_vlan_ranges $OVS_VLAN_RANGES - fi - - # Enable tunnel networks if selected - if [[ $OVS_ENABLE_TUNNELING = "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True - fi - elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then - if [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE VLANS tenant_network_type vlan - else - echo "WARNING - The linuxbridge plugin is using local tenant networks, with no connectivity between hosts." - fi - - # Override ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` - # for more complex physical network configurations. - if [[ "$LB_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then - LB_VLAN_RANGES=$PHYSICAL_NETWORK - if [[ "$TENANT_VLAN_RANGE" != "" ]]; then - LB_VLAN_RANGES=$LB_VLAN_RANGES:$TENANT_VLAN_RANGE - fi - fi - if [[ "$LB_VLAN_RANGES" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE VLANS network_vlan_ranges $LB_VLAN_RANGES - fi - elif [[ "$Q_PLUGIN" = "ryu" ]]; then - iniset /$Q_PLUGIN_CONF_FILE OVS openflow_controller $RYU_OFP_HOST:$RYU_OFP_PORT - iniset /$Q_PLUGIN_CONF_FILE OVS openflow_rest_api $RYU_API_HOST:$RYU_API_PORT - fi -fi - -# Quantum agent (for compute nodes) -if is_service_enabled q-agt; then - # Configure agent for plugin - if [[ "$Q_PLUGIN" = "openvswitch" ]]; then - # Setup integration bridge - OVS_BRIDGE=${OVS_BRIDGE:-br-int} - quantum_setup_ovs_bridge $OVS_BRIDGE - - # Setup agent for tunneling - if [[ "$OVS_ENABLE_TUNNELING" = "True" ]]; then - # Verify tunnels are supported - # REVISIT - also check kernel module support for GRE and patch ports - OVS_VERSION=`ovs-vsctl --version | head -n 1 | awk '{print $4;}'` - if [ $OVS_VERSION \< "1.4" ] && ! is_service_enabled q-svc ; then - echo "You are running OVS version $OVS_VERSION." - echo "OVS 1.4+ is required for tunneling between multiple hosts." - exit 1 - fi - iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True - iniset /$Q_PLUGIN_CONF_FILE OVS local_ip $HOST_IP - fi - - # Setup physical network bridge mappings. Override - # ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more - # complex physical network configurations. - if [[ "$OVS_BRIDGE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then - OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE - - # Configure bridge manually with physical interface as port for multi-node - sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_PHYSICAL_BRIDGE - fi - if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings $OVS_BRIDGE_MAPPINGS - fi - AGENT_BINARY="$QUANTUM_DIR/bin/quantum-openvswitch-agent" - elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then - # Setup physical network interface mappings. Override - # ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` for more - # complex physical network configurations. - if [[ "$LB_INTERFACE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$LB_PHYSICAL_INTERFACE" != "" ]]; then - LB_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$LB_PHYSICAL_INTERFACE - fi - if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE physical_interface_mappings $LB_INTERFACE_MAPPINGS - fi - AGENT_BINARY="$QUANTUM_DIR/bin/quantum-linuxbridge-agent" - elif [[ "$Q_PLUGIN" = "ryu" ]]; then - # Set up integration bridge - OVS_BRIDGE=${OVS_BRIDGE:-br-int} - quantum_setup_ovs_bridge $OVS_BRIDGE - if [ -n "$RYU_INTERNAL_INTERFACE" ]; then - sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_BRIDGE $RYU_INTERNAL_INTERFACE - fi - AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/ryu/agent/ryu_quantum_agent.py" - fi - # Update config w/rootwrap - iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" + configure_quantum + init_quantum fi -# Quantum DHCP -if is_service_enabled q-dhcp; then - AGENT_DHCP_BINARY="$QUANTUM_DIR/bin/quantum-dhcp-agent" - - Q_DHCP_CONF_FILE=$QUANTUM_CONF_DIR/dhcp_agent.ini - - cp $QUANTUM_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE - - # Set verbose - iniset $Q_DHCP_CONF_FILE DEFAULT verbose True - # Set debug - iniset $Q_DHCP_CONF_FILE DEFAULT debug True - iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE - iniset $Q_DHCP_CONF_FILE DEFAULT state_path $DATA_DIR/quantum - - quantum_setup_keystone $Q_DHCP_CONF_FILE DEFAULT set_auth_url - - # Update config w/rootwrap - iniset $Q_DHCP_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" - - if [[ "$Q_PLUGIN" = "openvswitch" ]]; then - iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver - elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then - iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver - elif [[ "$Q_PLUGIN" = "ryu" ]]; then - iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.RyuInterfaceDriver - iniset $Q_DHCP_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT - fi -fi - -# Quantum L3 -if is_service_enabled q-l3; then - AGENT_L3_BINARY="$QUANTUM_DIR/bin/quantum-l3-agent" - PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} - Q_L3_CONF_FILE=$QUANTUM_CONF_DIR/l3_agent.ini - - cp $QUANTUM_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE - - # Set verbose - iniset $Q_L3_CONF_FILE DEFAULT verbose True - # Set debug - iniset $Q_L3_CONF_FILE DEFAULT debug True - - iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE - - iniset $Q_L3_CONF_FILE DEFAULT state_path $DATA_DIR/quantum - - iniset $Q_L3_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" - - quantum_setup_keystone $Q_L3_CONF_FILE DEFAULT set_auth_url - if [[ "$Q_PLUGIN" == "openvswitch" ]]; then - iniset $Q_L3_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver - iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE - # Set up external bridge - quantum_setup_external_bridge $PUBLIC_BRIDGE - elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then - iniset $Q_L3_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver - iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge '' - elif [[ "$Q_PLUGIN" = "ryu" ]]; then - iniset $Q_L3_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.RyuInterfaceDriver - iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE - iniset $Q_L3_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT - # Set up external bridge - quantum_setup_external_bridge $PUBLIC_BRIDGE - fi -fi - -#Quantum Metadata -if is_service_enabled q-meta; then - AGENT_META_BINARY="$QUANTUM_DIR/bin/quantum-metadata-agent" - Q_META_CONF_FILE=$QUANTUM_CONF_DIR/metadata_agent.ini - - cp $QUANTUM_DIR/etc/metadata_agent.ini $Q_META_CONF_FILE - - # Set verbose - iniset $Q_META_CONF_FILE DEFAULT verbose True - # Set debug - iniset $Q_META_CONF_FILE DEFAULT debug True - - iniset $Q_META_CONF_FILE DEFAULT state_path $DATA_DIR/quantum - - iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP - - iniset $Q_META_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" - - quantum_setup_keystone $Q_META_CONF_FILE DEFAULT set_auth_url -fi - -# Quantum RPC support - must be updated prior to starting any of the services +# Some Quantum plugins require network controllers which are not +# a part of the OpenStack project. Configure and start them. if is_service_enabled quantum; then - iniset $QUANTUM_CONF DEFAULT control_exchange quantum - if is_service_enabled qpid ; then - iniset $QUANTUM_CONF DEFAULT rpc_backend quantum.openstack.common.rpc.impl_qpid - elif is_service_enabled zeromq; then - iniset $QUANTUM_CONF DEFAULT rpc_backend quantum.openstack.common.rpc.impl_zmq - elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then - iniset $QUANTUM_CONF DEFAULT rabbit_host $RABBIT_HOST - iniset $QUANTUM_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - fi - if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then - cp $QUANTUM_DIR/etc/l3_agent.ini $QUANTUM_TEST_CONFIG_FILE - iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT verbose False - iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT debug False - iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE - iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT root_helper "$Q_RR_COMMAND" - quantum_setup_keystone $QUANTUM_TEST_CONFIG_FILE DEFAULT set_auth_url - if [[ "$Q_PLUGIN" == "openvswitch" ]]; then - iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver - iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE - elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then - iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver - iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge '' - elif [[ "$Q_PLUGIN" = "ryu" ]]; then - iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT interface_driver quantum.agent.linux.interface.RyuInterfaceDriver - iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE - iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT - fi - fi + configure_quantum_third_party + init_quantum_third_party + start_quantum_third_party fi @@ -1445,37 +1042,9 @@ if is_service_enabled nova; then # Additional Nova configuration that is dependent on other services if is_service_enabled quantum; then - add_nova_opt "network_api_class=nova.network.quantumv2.api.API" - add_nova_opt "quantum_admin_username=$Q_ADMIN_USERNAME" - add_nova_opt "quantum_admin_password=$SERVICE_PASSWORD" - add_nova_opt "quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0" - add_nova_opt "quantum_auth_strategy=$Q_AUTH_STRATEGY" - add_nova_opt "quantum_admin_tenant_name=$SERVICE_TENANT_NAME" - add_nova_opt "quantum_url=http://$Q_HOST:$Q_PORT" - - if [[ "$Q_PLUGIN" = "openvswitch" ]]; then - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"} - elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver"} - elif [[ "$Q_PLUGIN" = "ryu" ]]; then - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"quantum.plugins.ryu.nova.vif.LibvirtOpenVswitchOFPRyuDriver"} - add_nova_opt "libvirt_ovs_integration_bridge=$OVS_BRIDGE" - add_nova_opt "linuxnet_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT" - add_nova_opt "libvirt_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT" - fi - add_nova_opt "libvirt_vif_driver=$NOVA_VIF_DRIVER" - add_nova_opt "linuxnet_interface_driver=$LINUXNET_VIF_DRIVER" - if is_service_enabled q-meta; then - add_nova_opt "service_quantum_metadata_proxy=True" - fi + create_nova_conf_quantum elif is_service_enabled n-net; then - add_nova_opt "network_manager=nova.network.manager.$NET_MAN" - add_nova_opt "public_interface=$PUBLIC_INTERFACE" - add_nova_opt "vlan_interface=$VLAN_INTERFACE" - add_nova_opt "flat_network_bridge=$FLAT_NETWORK_BRIDGE" - if [ -n "$FLAT_INTERFACE" ]; then - add_nova_opt "flat_interface=$FLAT_INTERFACE" - fi + create_nova_conf_nova_network fi # All nova-compute workers need to know the vnc configuration options # These settings don't hurt anything if n-xvnc and n-novnc are disabled @@ -1584,64 +1153,24 @@ fi if is_service_enabled q-svc; then echo_summary "Starting Quantum" - # Start the Quantum service - screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE" - echo "Waiting for Quantum to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:9696; do sleep 1; done"; then - echo "Quantum did not start" - exit 1 - fi - # Configure Quantum elements - # Configure internal network & subnet - - TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1) - - # Create a small network - # Since quantum command is executed in admin context at this point, - # ``--tenant_id`` needs to be specified. - NET_ID=$(quantum net-create --tenant_id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) - SUBNET_ID=$(quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) - if is_service_enabled q-l3; then - # Create a router, and add the private subnet as one of its interfaces - ROUTER_ID=$(quantum router-create --tenant_id $TENANT_ID router1 | grep ' id ' | get_field 2) - quantum router-interface-add $ROUTER_ID $SUBNET_ID - # Create an external network, and a subnet. Configure the external network as router gw - EXT_NET_ID=$(quantum net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2) - EXT_GW_IP=$(quantum subnet-create --ip_version 4 $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2) - quantum router-gateway-set $ROUTER_ID $EXT_NET_ID - if is_quantum_ovs_base_plugin "$Q_PLUGIN" && [[ "$Q_USE_NAMESPACE" = "True" ]]; then - CIDR_LEN=${FLOATING_RANGE#*/} - sudo ip addr add $EXT_GW_IP/$CIDR_LEN dev $PUBLIC_BRIDGE - sudo ip link set $PUBLIC_BRIDGE up - ROUTER_GW_IP=`quantum port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' '{ print $8; }'` - sudo route add -net $FIXED_RANGE gw $ROUTER_GW_IP - fi - if [[ "$Q_USE_NAMESPACE" == "False" ]]; then - # Explicitly set router id in l3 agent configuration - iniset $Q_L3_CONF_FILE DEFAULT router_id $ROUTER_ID - fi - fi - if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then - setup_quantum - fi + start_quantum_service_and_check + create_quantum_initial_network + setup_quantum_debug elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then # Create a small network $NOVA_BIN_DIR/nova-manage network create "$PRIVATE_NETWORK_NAME" $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS # Create some floating ips - $NOVA_BIN_DIR/nova-manage floating create $FLOATING_RANGE --pool=$PUBLIC_NETWORK + $NOVA_BIN_DIR/nova-manage floating create $FLOATING_RANGE --pool=$PUBLIC_NETWORK_NAME # Create a second pool $NOVA_BIN_DIR/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL fi -# Start up the quantum agents if enabled -screen_it q-agt "python $AGENT_BINARY --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE" -screen_it q-dhcp "python $AGENT_DHCP_BINARY --config-file $QUANTUM_CONF --config-file=$Q_DHCP_CONF_FILE" -screen_it q-meta "python $AGENT_META_BINARY --config-file $QUANTUM_CONF --config-file=$Q_META_CONF_FILE" -screen_it q-l3 "python $AGENT_L3_BINARY --config-file $QUANTUM_CONF --config-file=$Q_L3_CONF_FILE" - +if is_service_enabled quantum; then + start_quantum_agents +fi if is_service_enabled nova; then echo_summary "Starting Nova" start_nova diff --git a/unstack.sh b/unstack.sh index 09e0de6b..975a0793 100755 --- a/unstack.sh +++ b/unstack.sh @@ -28,6 +28,7 @@ DATA_DIR=${DATA_DIR:-${DEST}/data} source $TOP_DIR/lib/cinder source $TOP_DIR/lib/horizon source $TOP_DIR/lib/swift +source $TOP_DIR/lib/quantum # Determine what system we are running on. This provides ``os_VENDOR``, # ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` @@ -39,8 +40,7 @@ fi if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then source $TOP_DIR/openrc - source $TOP_DIR/lib/quantum - teardown_quantum + teardown_quantum_debug fi # Shut down devstack's screen to get the bulk of OpenStack services in one shot @@ -119,8 +119,7 @@ if [[ -n "$UNSTACK_ALL" ]]; then fi fi -# Quantum dhcp agent runs dnsmasq -if is_service_enabled q-dhcp; then - pid=$(ps aux | awk '/[d]nsmasq.+interface=tap/ { print $2 }') - [ ! -z "$pid" ] && sudo kill -9 $pid +if is_service_enabled quantum; then + stop_quantum + stop_quantum_third_party fi From 796342c06e8ca3dcfd2e8f1ba4e0300a703b8de1 Mon Sep 17 00:00:00 2001 From: Michael Still Date: Fri, 28 Dec 2012 11:08:20 +1100 Subject: [PATCH 868/967] Handle the new behaviour for invalid instances. The behaviour of this case changed with bug/836978. Requesting the status of an invalid instance will now return an error message including the instance id, so we need to filter that out. Resolves the devstack elements of bug 836978. Change-Id: I385eb6f04cff90e1ddc0b79d835fbbdf92e4e9ff --- exercises/euca.sh | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/exercises/euca.sh b/exercises/euca.sh index 982653ef..76df254b 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -165,8 +165,11 @@ fi euca-terminate-instances $INSTANCE || \ die "Failure terminating instance $INSTANCE" -# Assure it has terminated within a reasonable time -if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -q $INSTANCE; do sleep 1; done"; then +# Assure it has terminated within a reasonable time. The behaviour of this +# case changed with bug/836978. Requesting the status of an invalid instance +# will now return an error message including the instance id, so we need to +# filter that out. +if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE |grep -v \"InstanceNotFound\" | grep -q $INSTANCE; do sleep 1; done"; then echo "server didn't terminate within $TERMINATE_TIMEOUT seconds" exit 1 fi From f35cf91a1d4f13cfa77f9411a1eef38953abebbc Mon Sep 17 00:00:00 2001 From: Devananda van der Veen Date: Mon, 12 Nov 2012 17:58:38 -0800 Subject: [PATCH 869/967] adding support for baremetal hypervisor New files for baremetal driver: - lib/baremetal - files/apts/baremetal Adds two dependencies: - google shell-in-a-box - diskimage-builder Enable by setting both: VIRT_DRIVER=baremetal ENABLED_SERVICES="$ENABLED_SERVICES,baremetal" Change-Id: Ibf6fe1671a759a449c9eb0df47751d1b31ade591 --- files/apts/baremetal | 9 + lib/baremetal | 403 +++++++++++++++++++++++++++++++++++++++++++ lib/nova | 20 +++ stack.sh | 97 +++++++++-- stackrc | 4 + 5 files changed, 522 insertions(+), 11 deletions(-) create mode 100644 files/apts/baremetal create mode 100644 lib/baremetal diff --git a/files/apts/baremetal b/files/apts/baremetal new file mode 100644 index 00000000..54e76e00 --- /dev/null +++ b/files/apts/baremetal @@ -0,0 +1,9 @@ +busybox +dnsmasq +gcc +ipmitool +make +open-iscsi +qemu-kvm +syslinux +tgt diff --git a/lib/baremetal b/lib/baremetal new file mode 100644 index 00000000..f82633a4 --- /dev/null +++ b/lib/baremetal @@ -0,0 +1,403 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +# This file provides devstack with the environment and utilities to +# control nova-compute's baremetal driver. +# It sets reasonable defaults to run within a single host, +# using virtual machines in place of physical hardware. +# However, by changing just a few options, devstack+baremetal can in fact +# control physical hardware resources on the same network, if you know +# the MAC address(es) and IPMI credentials. +# +# At a minimum, to enable the baremetal driver, you must set these in loclarc: +# VIRT_DRIVER=baremetal +# ENABLED_SERVICES="$ENABLED_SERVICES,baremetal" +# +# +# We utilize diskimage-builder to create a ramdisk, and then +# baremetal driver uses that to push a disk image onto the node(s). +# +# Below we define various defaults which control the behavior of the +# baremetal compute service, and inform it of the hardware it will contorl. +# +# Below that, various functions are defined, which are called by devstack +# in the following order: +# +# before nova-cpu starts: +# - prepare_baremetal_toolchain +# - configure_baremetal_nova_dirs +# +# after nova and glance have started: +# - build_and_upload_baremetal_deploy_k_and_r $token +# - create_baremetal_flavor $BM_DEPLOY_KERNEL_ID $BM_DEPLOY_RAMDISK_ID +# - upload_baremetal_image $url $token +# - add_baremetal_node + + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# Sub-driver settings +# ------------------- + +# sub-driver to use for kernel deployment +# - nova.virt.baremetal.pxe.PXE +# - nova.virt.baremetal.tilera.TILERA +BM_DRIVER=${BM_DRIVER:-nova.virt.baremetal.pxe.PXE} + +# sub-driver to use for remote power management +# - nova.virt.baremetal.fake.FakePowerManager, for manual power control +# - nova.virt.baremetal.ipmi.Ipmi, for remote IPMI +# - nova.virt.baremetal.tilera_pdu.Pdu, for TilePro hardware +BM_POWER_MANAGER=${BM_POWER_MANAGER:-nova.virt.baremetal.fake.FakePowerManager} + + +# These should be customized to your environment and hardware +# ----------------------------------------------------------- + +# BM_DNSMASQ_* options must be changed to suit your network environment +BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-$PUBLIC_INTERFACE} +BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-eth0} +BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-192.0.2.32,192.0.2.48} + +# BM_FIRST_MAC *must* be set to the MAC address of the node you will boot. +# This is passed to dnsmasq along with the kernel/ramdisk to +# deploy via PXE. +BM_FIRST_MAC=${BM_FIRST_MAC:-} + +# BM_SECOND_MAC is only important if the host has >1 NIC. +BM_SECOND_MAC=${BM_SECOND_MAC:-} + +# Hostname for the baremetal nova-compute node, if not run on this host +BM_HOSTNAME=${BM_HOSTNAME:-$(hostname -f)} + +# BM_PM_* options are only necessary if BM_POWER_MANAGER=...IPMI +BM_PM_ADDR=${BM_PM_ADDR:-0.0.0.0} +BM_PM_USER=${BM_PM_USER:-user} +BM_PM_PASS=${BM_PM_PASS:-pass} + +# BM_FLAVOR_* options are arbitrary and not necessarily related to physical +# hardware capacity. These can be changed if you are testing +# BaremetalHostManager with multiple nodes and different flavors. +BM_CPU_ARCH=${BM_CPU_ARCH:-x86_64} +BM_FLAVOR_CPU=${BM_FLAVOR_CPU:-1} +BM_FLAVOR_RAM=${BM_FLAVOR_RAM:-1024} +BM_FLAVOR_ROOT_DISK=${BM_FLAVOR_ROOT_DISK:-10} +BM_FLAVOR_EPHEMERAL_DISK=${BM_FLAVOR_EPHEMERAL_DISK:-0} +BM_FLAVOR_SWAP=${BM_FLAVOR_SWAP:-1} +BM_FLAVOR_NAME=${BM_FLAVOR_NAME:-bm.small} +BM_FLAVOR_ID=${BM_FLAVOR_ID:-11} +BM_FLAVOR_ARCH=${BM_FLAVOR_ARCH:-$BM_CPU_ARCH} + + +# Below this, we set some path and filenames. +# Defaults are probably sufficient. + +BM_IMAGE_BUILD_DIR=${BM_IMAGE_BUILD_DIR:-$DEST/diskimage-builder} + +BM_HOST_CURRENT_KERNEL=$(uname -r) +BM_DEPLOY_RAMDISK=${BM_DEPLOY_RAMDISK:-bm-deploy-$BM_HOST_CURRENT_KERNEL-initrd} +BM_DEPLOY_KERNEL=${BM_DEPLOY_KERNEL:-bm-deploy-$BM_HOST_CURRENT_KERNEL-vmlinuz} + +# If you need to add any extra flavors to the deploy ramdisk image +# eg, specific network drivers, specify them here +BM_DEPLOY_FLAVOR=${BM_DEPLOY_FLAVOR:-} + +# set URL and version for google shell-in-a-box +BM_SHELL_IN_A_BOX=${BM_SHELL_IN_A_BOX:-http://shellinabox.googlecode.com/files/shellinabox-2.14.tar.gz} + + +# Functions +# --------- + +# Check if baremetal is properly enabled +# Returns false if VIRT_DRIVER is not baremetal, or if ENABLED_SERVICES +# does not contain "baremetal" +function is_baremetal() { + if [[ "$ENABLED_SERVICES" =~ 'baremetal' && "$VIRT_DRIVER" = 'baremetal' ]]; then + return 0 + fi + return 1 +} + +# Install diskimage-builder and shell-in-a-box +# so that we can build the deployment kernel & ramdisk +function prepare_baremetal_toolchain() { + git_clone $BM_IMAGE_BUILD_REPO $BM_IMAGE_BUILD_DIR $BM_IMAGE_BUILD_BRANCH + + local shellinabox_basename=$(basename $BM_SHELL_IN_A_BOX) + if [[ ! -e $DEST/$shellinabox_basename ]]; then + cd $DEST + wget $BM_SHELL_IN_A_BOX + fi + if [[ ! -d $DEST/${shellinabox_basename%%.tar.gz} ]]; then + cd $DEST + tar xzf $shellinabox_basename + fi + if [[ ! $(which shellinaboxd) ]]; then + cd $DEST/${shellinabox_basename%%.tar.gz} + ./configure + make + sudo make install + fi +} + +# prepare various directories needed by baremetal hypervisor +function configure_baremetal_nova_dirs() { + # ensure /tftpboot is prepared + sudo mkdir -p /tftpboot + sudo mkdir -p /tftpboot/pxelinux.cfg + sudo cp /usr/lib/syslinux/pxelinux.0 /tftpboot/ + sudo chown -R `whoami`:libvirtd /tftpboot + + # ensure $NOVA_STATE_PATH/baremetal is prepared + sudo mkdir -p $NOVA_STATE_PATH/baremetal + sudo mkdir -p $NOVA_STATE_PATH/baremetal/console + sudo mkdir -p $NOVA_STATE_PATH/baremetal/dnsmasq + sudo touch $NOVA_STATE_PATH/baremetal/dnsmasq/dnsmasq-dhcp.host + sudo chown -R `whoami` $NOVA_STATE_PATH/baremetal + + # ensure dnsmasq is installed but not running + # because baremetal driver will reconfigure and restart this as needed + if [ ! is_package_installed dnsmasq ]; then + install_package dnsmasq + fi + stop_service dnsmasq +} + +# build deploy kernel+ramdisk, then upload them to glance +# this function sets BM_DEPLOY_KERNEL_ID and BM_DEPLOY_RAMDISK_ID +function upload_baremetal_deploy() { + token=$1 + + if [ ! -e $TOP_DIR/files/$BM_DEPLOY_KERNEL -a -e /boot/vmlinuz-$BM_HOST_CURRENT_KERNEL ]; then + sudo cp /boot/vmlinuz-$BM_HOST_CURRENT_KERNEL $TOP_DIR/files/$BM_DEPLOY_KERNEL + sudo chmod a+r $TOP_DIR/files/$BM_DEPLOY_KERNEL + fi + if [ ! -e $TOP_DIR/files/$BM_DEPLOY_RAMDISK ]; then + $BM_IMAGE_BUILD_DIR/bin/ramdisk-image-create $BM_DEPLOY_FLAVOR deploy \ + -o $TOP_DIR/files/$BM_DEPLOY_RAMDISK -k $BM_HOST_CURRENT_KERNEL + fi + + # load them into glance + BM_DEPLOY_KERNEL_ID=$(glance \ + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name $BM_DEPLOY_KERNEL \ + --public --disk-format=aki \ + < $TOP_DIR/files/$BM_DEPLOY_KERNEL | grep ' id ' | get_field 2) + BM_DEPLOY_RAMDISK_ID=$(glance \ + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name $BM_DEPLOY_RAMDISK \ + --public --disk-format=ari \ + < $TOP_DIR/files/$BM_DEPLOY_RAMDISK | grep ' id ' | get_field 2) +} + +# create a basic baremetal flavor, associated with deploy kernel & ramdisk +# +# Usage: create_baremetal_flavor +function create_baremetal_flavor() { + aki=$1 + ari=$2 + nova flavor-create $BM_FLAVOR_NAME $BM_FLAVOR_ID \ + $BM_FLAVOR_RAM $BM_FLAVOR_ROOT_DISK $BM_FLAVOR_CPU + nova-manage instance_type set_key \ + --name=$BM_FLAVOR_NAME --key cpu_arch --value $BM_FLAVOR_ARCH + nova-manage instance_type set_key \ + --name=$BM_FLAVOR_NAME --key deploy_kernel_id --value $aki + nova-manage instance_type set_key \ + --name=$BM_FLAVOR_NAME --key deploy_ramdisk_id --value $ari +} + +# pull run-time kernel/ramdisk out of disk image and load into glance +# note that $file is currently expected to be in qcow2 format +# Sets KERNEL_ID and RAMDISK_ID +# +# Usage: extract_and_upload_k_and_r_from_image $token $file +function extract_and_upload_k_and_r_from_image() { + token=$1 + file=$2 + image_name=$(basename "$file" ".qcow2") + + # this call returns the file names as "$kernel,$ramdisk" + out=$($BM_IMAGE_BUILD_DIR/bin/disk-image-get-kernel \ + -x -d $TOP_DIR/files -o bm-deploy -i $file) + if [ $? -ne 0 ]; then + die "Failed to get kernel and ramdisk from $file" + fi + XTRACE=$(set +o | grep xtrace) + set +o xtrace + out=$(echo "$out" | tail -1) + $XTRACE + OUT_KERNEL=${out%%,*} + OUT_RAMDISK=${out##*,} + + # load them into glance + KERNEL_ID=$(glance \ + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name $image_name-kernel \ + --public --disk-format=aki \ + < $TOP_DIR/files/$OUT_KERNEL | grep ' id ' | get_field 2) + RAMDISK_ID=$(glance \ + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name $image_name-initrd \ + --public --disk-format=ari \ + < $TOP_DIR/files/$OUT_RAMDISK | grep ' id ' | get_field 2) +} + + +# Re-implementation of devstack's "upload_image" function +# +# Takes the same parameters, but has some peculiarities which made it +# easier to create a separate method, rather than complicate the logic +# of the existing function. +function upload_baremetal_image() { + local image_url=$1 + local token=$2 + + # Create a directory for the downloaded image tarballs. + mkdir -p $FILES/images + + # Downloads the image (uec ami+aki style), then extracts it. + IMAGE_FNAME=`basename "$image_url"` + if [[ ! -f $FILES/$IMAGE_FNAME || \ + "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then + wget -c $image_url -O $FILES/$IMAGE_FNAME + if [[ $? -ne 0 ]]; then + echo "Not found: $image_url" + return + fi + fi + + local KERNEL="" + local RAMDISK="" + local DISK_FORMAT="" + local CONTAINER_FORMAT="" + case "$IMAGE_FNAME" in + *.tar.gz|*.tgz) + # Extract ami and aki files + [ "${IMAGE_FNAME%.tar.gz}" != "$IMAGE_FNAME" ] && + IMAGE_NAME="${IMAGE_FNAME%.tar.gz}" || + IMAGE_NAME="${IMAGE_FNAME%.tgz}" + xdir="$FILES/images/$IMAGE_NAME" + rm -Rf "$xdir"; + mkdir "$xdir" + tar -zxf $FILES/$IMAGE_FNAME -C "$xdir" + KERNEL=$(for f in "$xdir/"*-vmlinuz* "$xdir/"aki-*/image; do + [ -f "$f" ] && echo "$f" && break; done; true) + RAMDISK=$(for f in "$xdir/"*-initrd* "$xdir/"ari-*/image; do + [ -f "$f" ] && echo "$f" && break; done; true) + IMAGE=$(for f in "$xdir/"*.img "$xdir/"ami-*/image; do + [ -f "$f" ] && echo "$f" && break; done; true) + if [[ -z "$IMAGE_NAME" ]]; then + IMAGE_NAME=$(basename "$IMAGE" ".img") + fi + DISK_FORMAT=ami + CONTAINER_FORMAT=ami + ;; + *.qcow2) + IMAGE="$FILES/${IMAGE_FNAME}" + IMAGE_NAME=$(basename "$IMAGE" ".qcow2") + DISK_FORMAT=qcow2 + CONTAINER_FORMAT=bare + ;; + *) echo "Do not know what to do with $IMAGE_FNAME"; false;; + esac + + if [ "$CONTAINER_FORMAT" = "bare" ]; then + extract_and_upload_k_and_r_from_image $token $IMAGE + elif [ "$CONTAINER_FORMAT" = "ami" ]; then + KERNEL_ID=$(glance \ + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name "$IMAGE_NAME-kernel" --public \ + --container-format aki \ + --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2) + RAMDISK_ID=$(glance \ + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name "$IMAGE_NAME-ramdisk" --public \ + --container-format ari \ + --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2) + else + # TODO(deva): add support for other image types + return + fi + + glance \ + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name "${IMAGE_NAME%.img}" --public \ + --container-format $CONTAINER_FORMAT \ + --disk-format $DISK_FORMAT \ + ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} \ + ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}" + + # override DEFAULT_IMAGE_NAME so that tempest can find the image + # that we just uploaded in glance + DEFAULT_IMAGE_NAME="${IMAGE_NAME%.img}" +} + +function clear_baremetal_of_all_nodes() { + list=$(nova-baremetal-manage node list | tail -n +2 | awk '{print $1}' ) + for node in $list + do + nova-baremetal-manage node delete $node + done + list=$(nova-baremetal-manage interface list | tail -n +2 | awk '{print $1}' ) + for iface in $list + do + nova-baremetal-manage interface delete $iface + done +} + +# inform nova-baremetal about nodes, MACs, etc +# Defaults to using BM_FIRST_MAC and BM_SECOND_MAC if parameters not specified +# +# Usage: add_baremetal_node +function add_baremetal_node() { + mac_1=${1:-$BM_FIRST_MAC} + mac_2=${2:-$BM_SECOND_MAC} + + id=$(nova-baremetal-manage node create \ + --host=$BM_HOSTNAME --prov_mac=$mac_1 \ + --cpus=$BM_FLAVOR_CPU --memory_mb=$BM_FLAVOR_RAM \ + --local_gb=$BM_FLAVOR_ROOT_DISK --terminal_port=0 \ + --pm_address=$BM_PM_ADDR --pm_user=$BM_PM_USER --pm_password=$BM_PM_PASS \ + ) + [ $? -eq 0 ] || [ "$id" ] || die "Error adding baremetal node" + id2=$(nova-baremetal-manage interface create \ + --node_id=$id --mac_address=$mac_2 --datapath_id=0 --port_no=0 \ + ) + [ $? -eq 0 ] || [ "$id2" ] || die "Error adding interface to barmetal node $id" +} + + +# Restore xtrace +$XTRACE diff --git a/lib/nova b/lib/nova index 26c5d3c6..80741533 100644 --- a/lib/nova +++ b/lib/nova @@ -214,6 +214,11 @@ function configure_nova() { fi fi + # Prepare directories and packages for baremetal driver + if is_baremetal; then + configure_baremetal_nova_dirs + fi + if is_service_enabled quantum && is_quantum_ovs_base_plugin "$Q_PLUGIN" && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF ; then # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces cat < Date: Wed, 12 Dec 2012 16:52:55 -0800 Subject: [PATCH 870/967] update baremetal option names update power_manager and instance_type_extra_specs config opts to match the new values in nova, introduced by https://review.openstack.org/#/c/17994/ Change-Id: Ic624362df17c217406e142ef4c2e65a4c0c2765d --- stack.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index 1ee669d6..53b892f5 100755 --- a/stack.sh +++ b/stack.sh @@ -1124,8 +1124,8 @@ if is_service_enabled nova; then add_nova_opt "firewall_driver=$LIBVIRT_FIREWALL_DRIVER" add_nova_opt "baremetal_driver=$BM_DRIVER" add_nova_opt "baremetal_tftp_root=/tftpboot" - add_nova_opt "instance_type_extra_specs=cpu_arch:$BM_CPU_ARCH" - add_nova_opt "power_manager=$BM_POWER_MANAGER" + add_nova_opt "baremetal_instance_type_extra_specs=cpu_arch:$BM_CPU_ARCH" + add_nova_opt "baremetal_power_manager=$BM_POWER_MANAGER" add_nova_opt "scheduler_host_manager=nova.scheduler.baremetal_host_manager.BaremetalHostManager" add_nova_opt "scheduler_default_filters=AllHostsFilter" From 7611c894b598c876912ab967642f0e8c8ad9171b Mon Sep 17 00:00:00 2001 From: Devananda van der Veen Date: Fri, 23 Nov 2012 10:54:54 -0800 Subject: [PATCH 871/967] Add fake env support to baremetal Use bm_poseur to create VM and network bridge so that, in the absence of physical hardware, baremetal driver still has something to manipulate. Change-Id: Id80ede13a35e4380f358b47f08d41ff98ea9d70f --- lib/baremetal | 46 +++++++++++++++++++++++++++++++++++++++++----- stack.sh | 3 +++ stackrc | 7 +++++++ unstack.sh | 6 ++++++ 4 files changed, 57 insertions(+), 5 deletions(-) diff --git a/lib/baremetal b/lib/baremetal index f82633a4..62605fb8 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -71,10 +71,24 @@ BM_POWER_MANAGER=${BM_POWER_MANAGER:-nova.virt.baremetal.fake.FakePowerManager} # These should be customized to your environment and hardware # ----------------------------------------------------------- -# BM_DNSMASQ_* options must be changed to suit your network environment -BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-$PUBLIC_INTERFACE} -BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-eth0} -BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-192.0.2.32,192.0.2.48} +# whether to create a fake environment, eg. for devstack-gate +BM_USE_FAKE_ENV=`trueorfalse False $BM_USE_FAKE_ENV` + +# Extra options to pass to bm_poseur +# change the bridge name or IP: --bridge br99 --bridge-ip 192.0.2.1 +# change the virtualization type: --engine qemu +BM_POSEUR_EXTRA_OPTS=${BM_POSEUR_EXTRA_OPTS:-} + +# BM_DNSMASQ_IFACE should match FLAT_NETWORK_BRIDGE +if [ "$BM_USE_FAKE_ENV" ]; then + BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-br99} + BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-192.0.2.32,192.0.2.48} +else + BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-eth0} + # if testing on a physical network, + # BM_DNSMASQ_RANGE must be changed to suit your network + BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-} +fi # BM_FIRST_MAC *must* be set to the MAC address of the node you will boot. # This is passed to dnsmasq along with the kernel/ramdisk to @@ -108,8 +122,8 @@ BM_FLAVOR_ARCH=${BM_FLAVOR_ARCH:-$BM_CPU_ARCH} # Below this, we set some path and filenames. # Defaults are probably sufficient. - BM_IMAGE_BUILD_DIR=${BM_IMAGE_BUILD_DIR:-$DEST/diskimage-builder} +BM_POSEUR_DIR=${BM_POSEUR_DIR:-$DEST/bm_poseur} BM_HOST_CURRENT_KERNEL=$(uname -r) BM_DEPLOY_RAMDISK=${BM_DEPLOY_RAMDISK:-bm-deploy-$BM_HOST_CURRENT_KERNEL-initrd} @@ -140,6 +154,7 @@ function is_baremetal() { # so that we can build the deployment kernel & ramdisk function prepare_baremetal_toolchain() { git_clone $BM_IMAGE_BUILD_REPO $BM_IMAGE_BUILD_DIR $BM_IMAGE_BUILD_BRANCH + git_clone $BM_POSEUR_REPO $BM_POSEUR_DIR $BM_POSEUR_BRANCH local shellinabox_basename=$(basename $BM_SHELL_IN_A_BOX) if [[ ! -e $DEST/$shellinabox_basename ]]; then @@ -158,6 +173,27 @@ function prepare_baremetal_toolchain() { fi } +# set up virtualized environment for devstack-gate testing +function create_fake_baremetal_env() { + local bm_poseur="$BM_POSEUR_DIR/bm_poseur" + # TODO(deva): add support for >1 VM + sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS create-bridge + sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS create-vm + BM_FIRST_MAC=$(sudo $bm_poseur get-macs) + + # NOTE: there is currently a limitation in baremetal driver + # that requires second MAC even if it is not used. + # Passing a fake value allows this to work. + # TODO(deva): remove this after driver issue is fixed. + BM_SECOND_MAC='12:34:56:78:90:12' +} + +function cleanup_fake_baremetal_env() { + local bm_poseur="$BM_POSEUR_DIR/bm_poseur" + sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS destroy-vm + sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS destroy-bridge +} + # prepare various directories needed by baremetal hypervisor function configure_baremetal_nova_dirs() { # ensure /tftpboot is prepared diff --git a/stack.sh b/stack.sh index 53b892f5..87c193a7 100755 --- a/stack.sh +++ b/stack.sh @@ -1145,6 +1145,9 @@ if is_service_enabled nova && is_baremetal; then echo_summary "Preparing for nova baremetal" prepare_baremetal_toolchain configure_baremetal_nova_dirs + if [[ "$BM_USE_FAKE_ENV" = "True" ]]; then + create_fake_baremetal_env + fi fi # Launch Services diff --git a/stackrc b/stackrc index 49ccaa82..0e84db80 100644 --- a/stackrc +++ b/stackrc @@ -115,6 +115,13 @@ RYU_BRANCH=master BM_IMAGE_BUILD_REPO=https://github.com/stackforge/diskimage-builder.git BM_IMAGE_BUILD_BRANCH=master +# bm_poseur +# Used to simulate a hardware environment for baremetal +# Only used if BM_USE_FAKE_ENV is set +BM_POSEUR_REPO=https://github.com/tripleo/bm_poseur.git +BM_POSEUR_BRANCH=master + + # Nova hypervisor configuration. We default to libvirt with **kvm** but will # drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can # also install an **LXC** or **OpenVZ** based system. diff --git a/unstack.sh b/unstack.sh index 2a0a40a8..fd70916d 100755 --- a/unstack.sh +++ b/unstack.sh @@ -25,6 +25,7 @@ source $TOP_DIR/stackrc DATA_DIR=${DATA_DIR:-${DEST}/data} # Get project function libraries +source $TOP_DIR/lib/baremetal source $TOP_DIR/lib/cinder source $TOP_DIR/lib/horizon source $TOP_DIR/lib/swift @@ -67,6 +68,11 @@ if is_service_enabled tls-proxy; then killall stud fi +# baremetal might have created a fake environment +if is_service_enabled baremetal && [[ "$BM_USE_FAKE_ENV" = "True" ]]; then + cleanup_fake_baremetal_env +fi + SCSI_PERSIST_DIR=$CINDER_STATE_PATH/volumes/* # Get the iSCSI volumes From 64ab774313a5791cfbd8798c68a93068a11229ac Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 28 Dec 2012 15:38:28 -0600 Subject: [PATCH 872/967] Set up swift's auth cache dir Swift backing glance doesn't work due to auth_token failing without a signing_dir configured and set up. Create the dir and configure in proxy-server.conf Bug 1092783 Change-Id: If9ac46592bb7fc09e6cfd0a802a4fa61304fc369 --- lib/swift | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/swift b/lib/swift index 140e5e9b..713b38c7 100644 --- a/lib/swift +++ b/lib/swift @@ -29,6 +29,7 @@ set +o xtrace SWIFT_DIR=$DEST/swift SWIFTCLIENT_DIR=$DEST/python-swiftclient +SWIFT_AUTH_CACHE_DIR=${SWIFT_AUTH_CACHE_DIR:-/var/cache/swift} # TODO: add logging to different location. @@ -212,6 +213,7 @@ function configure_swift() { iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_user swift iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_password $SERVICE_PASSWORD + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken signing_dir $SWIFT_AUTH_CACHE_DIR iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth use iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles @@ -325,6 +327,10 @@ function init_swift() { swift-ring-builder account.builder rebalance } && popd >/dev/null + # Create cache dir + sudo mkdir -p $SWIFT_AUTH_CACHE_DIR + sudo chown `whoami` $SWIFT_AUTH_CACHE_DIR + rm -f $SWIFT_AUTH_CACHE_DIR/* } function install_swift() { From 6d04fd7ba59450c4d9c6c7317eef05c7812056b1 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 21 Dec 2012 11:03:37 -0600 Subject: [PATCH 873/967] Holiday docs and comment formatting cleanup Change-Id: Ia4ca88c67d3b94e306a79a669805a2fa1b0dc069 --- HACKING.rst | 20 +++++++++- exercises/quantum-adv-test.sh | 69 +++++++++++------------------------ lib/ceilometer | 25 +++++++------ lib/cinder | 2 +- lib/databases/mysql | 4 +- lib/databases/postgresql | 4 +- lib/glance | 5 +-- lib/heat | 23 +++++++----- lib/keystone | 3 -- lib/nova | 2 +- lib/swift | 7 +--- lib/tempest | 18 ++++----- 12 files changed, 81 insertions(+), 101 deletions(-) diff --git a/HACKING.rst b/HACKING.rst index e8f90c78..c4641fa0 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -15,6 +15,16 @@ https://github.com/openstack-dev/devstack.git. Besides the master branch that tracks the OpenStack trunk branches a separate branch is maintained for all OpenStack releases starting with Diablo (stable/diablo). +Contributing code to DevStack follows the usual OpenStack process as described +in `How To Contribute`__ in the OpenStack wiki. `DevStack's LaunchPad project`__ +contains the usual links for blueprints, bugs, tec. + +__ contribute_ +.. _contribute: http://wiki.openstack.org/HowToContribute. + +__ lp_ +.. _lp: https://launchpad.net/~devstack + The primary script in DevStack is ``stack.sh``, which performs the bulk of the work for DevStack's use cases. There is a subscript ``functions`` that contains generally useful shell functions and is used by a number of the scripts in @@ -53,8 +63,8 @@ configuration of the user environment:: source $TOP_DIR/openrc ``stack.sh`` is a rather large monolithic script that flows through from beginning -to end. The process of breaking it down into project-level sub-scripts has begun -with the introduction of ``lib/cinder`` and ``lib/ceilometer``. +to end. The process of breaking it down into project-level sub-scripts is nearly +complete and should make ``stack.sh`` easier to read and manage. These library sub-scripts have a number of fixed entry points, some of which may just be stubs. These entry points will be called by ``stack.sh`` in the @@ -71,6 +81,12 @@ There is a sub-script template in ``lib/templates`` to be used in creating new service sub-scripts. The comments in ``<>`` are meta comments describing how to use the template and should be removed. +In order to show the dependencies and conditions under which project functions +are executed the top-level conditional testing for things like ``is_service_enabled`` +should be done in ``stack.sh``. There may be nested conditionals that need +to be in the sub-script, such as testing for keystone being enabled in +``configure_swift()``. + Documentation ------------- diff --git a/exercises/quantum-adv-test.sh b/exercises/quantum-adv-test.sh index 493e2239..bc33fe82 100755 --- a/exercises/quantum-adv-test.sh +++ b/exercises/quantum-adv-test.sh @@ -1,10 +1,9 @@ #!/usr/bin/env bash # -# **quantum.sh** +# **quantum-adv-test.sh** -# We will use this test to perform integration testing of nova and -# other components with Quantum. +# Perform integration testing of Nova and other components with Quantum. echo "*********************************************************************" echo "Begin DevStack Exercise: $0" @@ -14,6 +13,7 @@ echo "*********************************************************************" # only the first error that occured. set -o errtrace + trap failed ERR failed() { local r=$? @@ -30,17 +30,8 @@ failed() { # an error. It is also useful for following allowing as the install occurs. set -o xtrace -#------------------------------------------------------------------------------ -# Quantum config check -#------------------------------------------------------------------------------ -# Warn if quantum is not enabled -if [[ ! "$ENABLED_SERVICES" =~ "q-svc" ]]; then - echo "WARNING: Running quantum test without enabling quantum" -fi - -#------------------------------------------------------------------------------ # Environment -#------------------------------------------------------------------------------ +# ----------- # Keep track of the current directory EXERCISE_DIR=$(cd $(dirname "$0") && pwd) @@ -62,9 +53,8 @@ source $TOP_DIR/lib/quantum # Import exercise configuration source $TOP_DIR/exerciserc -#------------------------------------------------------------------------------ -# Test settings for quantum -#------------------------------------------------------------------------------ +# Quantum Settings +# ---------------- TENANTS="DEMO1" # TODO (nati)_Test public network @@ -106,24 +96,17 @@ PUBLIC_ROUTER1_NET="admin-net1" DEMO1_ROUTER1_NET="demo1-net1" DEMO2_ROUTER1_NET="demo2-net1" -#------------------------------------------------------------------------------ -# Keystone settings. -#------------------------------------------------------------------------------ KEYSTONE="keystone" -#------------------------------------------------------------------------------ -# Get a token for clients that don't support service catalog -#------------------------------------------------------------------------------ - -# manually create a token by querying keystone (sending JSON data). Keystone +# Manually create a token by querying keystone (sending JSON data). Keystone # returns a token and catalog of endpoints. We use python to parse the token # and save it. TOKEN=`keystone token-get | grep ' id ' | awk '{print $4}'` -#------------------------------------------------------------------------------ -# Various functions. -#------------------------------------------------------------------------------ +# Various functions +# ----------------- + function foreach_tenant { COMMAND=$1 for TENANT in ${TENANTS//,/ };do @@ -192,10 +175,9 @@ function get_flavor_id { function confirm_server_active { local VM_UUID=$1 if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then - echo "server '$VM_UUID' did not become active!" - false -fi - + echo "server '$VM_UUID' did not become active!" + false + fi } function add_tenant { @@ -214,23 +196,15 @@ function add_tenant { function remove_tenant { local TENANT=$1 local TENANT_ID=$(get_tenant_id $TENANT) - $KEYSTONE tenant-delete $TENANT_ID } function remove_user { local USER=$1 local USER_ID=$(get_user_id $USER) - $KEYSTONE user-delete $USER_ID } - - -#------------------------------------------------------------------------------ -# "Create" functions -#------------------------------------------------------------------------------ - function create_tenants { source $TOP_DIR/openrc admin admin add_tenant demo1 demo1 demo1 @@ -383,9 +357,9 @@ function all { delete_all } -#------------------------------------------------------------------------------ -# Test functions. -#------------------------------------------------------------------------------ +# Test functions +# -------------- + function test_functions { IMAGE=$(get_image_id) echo $IMAGE @@ -400,9 +374,9 @@ function test_functions { echo $NETWORK_ID } -#------------------------------------------------------------------------------ -# Usage and main. -#------------------------------------------------------------------------------ +# Usage and main +# -------------- + usage() { echo "$0: [-h]" echo " -h, --help Display help message" @@ -473,10 +447,9 @@ main() { fi } +# Kick off script +# --------------- -#------------------------------------------------------------------------------- -# Kick off script. -#------------------------------------------------------------------------------- echo $* main $* diff --git a/lib/ceilometer b/lib/ceilometer index aa1b3960..76ab254d 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -1,9 +1,9 @@ # lib/ceilometer -# Install and start Ceilometer service +# Install and start **Ceilometer** service + # To enable, add the following to localrc # ENABLED_SERVICES+=ceilometer-acompute,ceilometer-acentral,ceilometer-collector,ceilometer-api - # Dependencies: # - functions # - OS_AUTH_URL for auth in api @@ -12,12 +12,12 @@ # stack.sh # --------- -# install_XXX -# configure_XXX -# init_XXX -# start_XXX -# stop_XXX -# cleanup_XXX +# install_ceilometer +# configure_ceilometer +# init_ceilometer +# start_ceilometer +# stop_ceilometer +# cleanup_ceilometer # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -27,17 +27,18 @@ set +o xtrace # Defaults # -------- -# set up default directories +# Set up default directories CEILOMETER_DIR=$DEST/ceilometer +CEILOMETER_CONF_DIR=/etc/ceilometer +CEILOMETER_CONF=$CEILOMETER_CONF_DIR/ceilometer.conf +CEILOMETER_API_LOG_DIR=/var/log/ceilometer-api + # Support potential entry-points console scripts if [ -d $CEILOMETER_DIR/bin ] ; then CEILOMETER_BIN_DIR=$CEILOMETER_DIR/bin else CEILOMETER_BIN_DIR=/usr/local/bin fi -CEILOMETER_CONF_DIR=/etc/ceilometer -CEILOMETER_CONF=$CEILOMETER_CONF_DIR/ceilometer.conf -CEILOMETER_API_LOG_DIR=/var/log/ceilometer-api # cleanup_ceilometer() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up diff --git a/lib/cinder b/lib/cinder index dadc8f14..701effd3 100644 --- a/lib/cinder +++ b/lib/cinder @@ -1,5 +1,5 @@ # lib/cinder -# Install and start Cinder volume service +# Install and start **Cinder** volume service # Dependencies: # - functions diff --git a/lib/databases/mysql b/lib/databases/mysql index 68e9adc5..1c0f5ebf 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -1,5 +1,5 @@ -# lib/mysql -# Functions to control the configuration and operation of the MySQL database backend +# lib/databases/mysql +# Functions to control the configuration and operation of the **MySQL** database backend # Dependencies: # DATABASE_{HOST,USER,PASSWORD} must be defined diff --git a/lib/databases/postgresql b/lib/databases/postgresql index e1463c5a..04db714a 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -1,5 +1,5 @@ -# lib/postgresql -# Functions to control the configuration and operation of the PostgreSQL database backend +# lib/databases/postgresql +# Functions to control the configuration and operation of the **PostgreSQL** database backend # Dependencies: # DATABASE_{HOST,USER,PASSWORD} must be defined diff --git a/lib/glance b/lib/glance index 8ba04b3a..dff247a5 100644 --- a/lib/glance +++ b/lib/glance @@ -1,5 +1,5 @@ # lib/glance -# Functions to control the configuration and operation of the Glance service +# Functions to control the configuration and operation of the **Glance** service # Dependencies: # ``functions`` file @@ -25,8 +25,6 @@ set +o xtrace # Defaults # -------- -# - # Set up default directories GLANCE_DIR=$DEST/glance GLANCECLIENT_DIR=$DEST/python-glanceclient @@ -141,7 +139,6 @@ function configure_glance() { iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD cp -p $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON - } # init_glance() - Initialize databases, etc. diff --git a/lib/heat b/lib/heat index 43115cb8..a6f72862 100644 --- a/lib/heat +++ b/lib/heat @@ -1,5 +1,6 @@ # lib/heat -# Install and start Heat service +# Install and start **Heat** service + # To enable, add the following to localrc # ENABLED_SERVICES+=,heat,h-api-cfn,h-api-cw,h-eng @@ -8,12 +9,14 @@ # stack.sh # --------- -# install_XXX -# configure_XXX -# init_XXX -# start_XXX -# stop_XXX -# cleanup_XXX +# install_heatclient +# install_heat +# configure_heatclient +# configure_heat +# init_heat +# start_heat +# stop_heat +# cleanup_heat # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -57,7 +60,7 @@ function configure_heat() { HEAT_API_HOST=${HEAT_API_HOST:-$SERVICE_HOST} HEAT_API_PORT=${HEAT_API_PORT:-8004} - # cloudformation api + # Cloudformation API HEAT_API_CFN_CONF=$HEAT_CONF_DIR/heat-api-cfn.conf cp $HEAT_DIR/etc/heat/heat-api-cfn.conf $HEAT_API_CFN_CONF iniset $HEAT_API_CFN_CONF DEFAULT debug True @@ -86,7 +89,7 @@ function configure_heat() { iniset $HEAT_API_CFN_PASTE_INI filter:ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 iniset $HEAT_API_CFN_PASTE_INI filter:ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens - # openstack api + # OpenStack API HEAT_API_CONF=$HEAT_CONF_DIR/heat-api.conf cp $HEAT_DIR/etc/heat/heat-api.conf $HEAT_API_CONF iniset $HEAT_API_CONF DEFAULT debug True @@ -139,7 +142,7 @@ function configure_heat() { iniset $HEAT_ENGINE_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid fi - # cloudwatch api + # Cloudwatch API HEAT_API_CW_CONF=$HEAT_CONF_DIR/heat-api-cloudwatch.conf cp $HEAT_DIR/etc/heat/heat-api-cloudwatch.conf $HEAT_API_CW_CONF iniset $HEAT_API_CW_CONF DEFAULT debug True diff --git a/lib/keystone b/lib/keystone index 4dddedb1..34f33723 100644 --- a/lib/keystone +++ b/lib/keystone @@ -8,7 +8,6 @@ # ``SERVICE_TOKEN`` # ``S3_SERVICE_PORT`` (template backend only) - # ``stack.sh`` calls the entry points in this order: # # install_keystone @@ -27,8 +26,6 @@ set +o xtrace # Defaults # -------- -# - # Set up default directories KEYSTONE_DIR=$DEST/keystone KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone} diff --git a/lib/nova b/lib/nova index 5224d4de..594195ea 100644 --- a/lib/nova +++ b/lib/nova @@ -1,5 +1,5 @@ # lib/nova -# Functions to control the configuration and operation of the XXXX service +# Functions to control the configuration and operation of the **Nova** service # Dependencies: # ``functions`` file diff --git a/lib/swift b/lib/swift index 713b38c7..89342644 100644 --- a/lib/swift +++ b/lib/swift @@ -1,5 +1,5 @@ # lib/swift -# Functions to control the configuration and operation of the swift service +# Functions to control the configuration and operation of the **Swift** service # Dependencies: # ``functions`` file @@ -23,10 +23,7 @@ set +o xtrace # Defaults # -------- -# - # Set up default directories - SWIFT_DIR=$DEST/swift SWIFTCLIENT_DIR=$DEST/python-swiftclient SWIFT_AUTH_CACHE_DIR=${SWIFT_AUTH_CACHE_DIR:-/var/cache/swift} @@ -71,6 +68,7 @@ OBJECT_PORT_BASE=6010 CONTAINER_PORT_BASE=6011 ACCOUNT_PORT_BASE=6012 + # Entry Points # ------------ @@ -293,7 +291,6 @@ EOF sudo chown -R $USER:adm ${swift_log_dir} sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \ tee /etc/rsyslog.d/10-swift.conf - } # configure_swiftclient() - Set config files, create data dirs, etc diff --git a/lib/tempest b/lib/tempest index 337be75b..190d77f1 100644 --- a/lib/tempest +++ b/lib/tempest @@ -1,4 +1,5 @@ # lib/tempest +# Install and configure Tempest # Dependencies: # ``functions`` file @@ -23,33 +24,29 @@ # # install_tempest # configure_tempest -# init_tempest -## start_tempest -## stop_tempest -## cleanup_tempest # Save trace setting XTRACE=$(set +o | grep xtrace) set +o xtrace + # Defaults # -------- -# - # Set up default directories -NOVA_SOURCE_DIR=$DEST/nova TEMPEST_DIR=$DEST/tempest TEMPEST_CONF_DIR=$TEMPEST_DIR/etc TEMPEST_CONF=$TEMPEST_CONF_DIR/tempest.conf +NOVA_SOURCE_DIR=$DEST/nova + BUILD_INTERVAL=3 BUILD_TIMEOUT=400 + # Entry Points # ------------ - # configure_tempest() - Set config files, create data dirs, etc function configure_tempest() { local image_lines @@ -66,7 +63,7 @@ function configure_tempest() { local public_network_id local tenant_networks_reachable - #TODO(afazekas): + # TODO(afazekas): # sudo python setup.py deploy # This function exits on an error so that errors don't compound and you see @@ -74,7 +71,7 @@ function configure_tempest() { errexit=$(set +o | grep errexit) set -o errexit - #Save IFS + # Save IFS ifs=$IFS # Glance should already contain images to be used in tempest @@ -240,7 +237,6 @@ function configure_tempest() { $errexit } - # install_tempest() - Collect source and prepare function install_tempest() { git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH From 75eaaf43c7c0798c4d92726e448d4c0e9372ccf2 Mon Sep 17 00:00:00 2001 From: Devananda van der Veen Date: Fri, 28 Dec 2012 15:40:21 -0800 Subject: [PATCH 874/967] Minor fixes for lib/baremetal A recent patch moved the flavor management out of nova-manage and into python-novaclient. This corrects the behaviour of lib/baremetal so that it calls "nova flavor-key" instead of "nova-manage". This also fixes a logical error in testing whether dnsmasq is installed. Change-Id: I3fa821c22ae45a49e283d091b6e5ed51c9757e88 --- lib/baremetal | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/lib/baremetal b/lib/baremetal index 62605fb8..112fd6d9 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -211,9 +211,7 @@ function configure_baremetal_nova_dirs() { # ensure dnsmasq is installed but not running # because baremetal driver will reconfigure and restart this as needed - if [ ! is_package_installed dnsmasq ]; then - install_package dnsmasq - fi + is_package_installed dnsmasq || install_package dnsmasq stop_service dnsmasq } @@ -256,12 +254,10 @@ function create_baremetal_flavor() { ari=$2 nova flavor-create $BM_FLAVOR_NAME $BM_FLAVOR_ID \ $BM_FLAVOR_RAM $BM_FLAVOR_ROOT_DISK $BM_FLAVOR_CPU - nova-manage instance_type set_key \ - --name=$BM_FLAVOR_NAME --key cpu_arch --value $BM_FLAVOR_ARCH - nova-manage instance_type set_key \ - --name=$BM_FLAVOR_NAME --key deploy_kernel_id --value $aki - nova-manage instance_type set_key \ - --name=$BM_FLAVOR_NAME --key deploy_ramdisk_id --value $ari + nova flavor-key $BM_FLAVOR_NAME set \ + cpu_arch=$BM_FLAVOR_ARCH \ + deploy_kernel_id=$aki \ + deploy_ramdisk_id=$ari } # pull run-time kernel/ramdisk out of disk image and load into glance From eac9370a12c1491643457e576613c8368b8037c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A1draig=20Brady?= Date: Wed, 2 Jan 2013 16:02:54 +0000 Subject: [PATCH 875/967] Adjust CINDER_SECURE_DELETE flag to cinder changes Cater for the pending more general cinder support for configurable volume wiping method at: https://review.openstack.org/#/c/12521 This change is done here first so as to not trigger a CI lockup when the referenced patch above lands. When that's in place, we can remove the older secure_delete config adjustment in a subsequent patch. Change-Id: I73fe2e0d1cf2815ab6025121584951cb5ff56fa3 --- lib/cinder | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/cinder b/lib/cinder index 701effd3..e3df98a4 100644 --- a/lib/cinder +++ b/lib/cinder @@ -169,6 +169,7 @@ function configure_cinder() { if [[ "$CINDER_SECURE_DELETE" == "False" ]]; then iniset $CINDER_CONF DEFAULT secure_delete False + iniset $CINDER_CONF DEFAULT volume_clear none fi if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then From b73e247ad45eee9ef45c32eff9bfa0daa3b1d733 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 2 Jan 2013 13:59:47 -0500 Subject: [PATCH 876/967] Revert "Create tools/install_prereqs.sh" This reverts commit 7be0b04 This work breaks the ability to do multi database installs, revert until there is a working solution here, as this is going to make fixing postgresql in tempest impossible. Change-Id: I39a2b78542fe60233806d1005186ce1b31d4be17 --- functions | 21 ----------- stack.sh | 20 ++++++++++- tools/install_prereqs.sh | 78 ---------------------------------------- 3 files changed, 19 insertions(+), 100 deletions(-) delete mode 100755 tools/install_prereqs.sh diff --git a/functions b/functions index 23aee935..9565e10d 100644 --- a/functions +++ b/functions @@ -710,27 +710,6 @@ function restart_service() { } -# HTTP and HTTPS proxy servers are supported via the usual environment variables [1] -# ``http_proxy``, ``https_proxy`` and ``no_proxy``. They can be set in -# ``localrc`` or on the command line if necessary:: -# -# [1] http://www.w3.org/Daemon/User/Proxies/ProxyClients.html -# -# http_proxy=http://proxy.example.com:3128/ no_proxy=repo.example.net ./stack.sh - -function re_export_proxy_variables() { - if [[ -n "$http_proxy" ]]; then - export http_proxy=$http_proxy - fi - if [[ -n "$https_proxy" ]]; then - export https_proxy=$https_proxy - fi - if [[ -n "$no_proxy" ]]; then - export no_proxy=$no_proxy - fi -} - - # Helper to launch a service in a named screen # screen_it service "command-line" function screen_it { diff --git a/stack.sh b/stack.sh index 7306b588..9f734b9e 100755 --- a/stack.sh +++ b/stack.sh @@ -648,7 +648,25 @@ set -o xtrace # Install package requirements echo_summary "Installing package prerequisites" -$TOP_DIR/tools/install_prereqs.sh +if is_ubuntu; then + install_package $(get_packages $FILES/apts) +elif is_fedora; then + install_package $(get_packages $FILES/rpms) +elif is_suse; then + install_package $(get_packages $FILES/rpms-suse) +else + exit_distro_not_supported "list of packages" +fi + +if [[ $SYSLOG != "False" ]]; then + if is_ubuntu || is_fedora; then + install_package rsyslog-relp + elif is_suse; then + install_package rsyslog-module-relp + else + exit_distro_not_supported "rsyslog-relp installation" + fi +fi if is_service_enabled rabbit; then # Install rabbitmq-server diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh deleted file mode 100755 index 0bf217b3..00000000 --- a/tools/install_prereqs.sh +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env bash - -# **install_prereqs.sh** - -# Install system package prerequisites -# -# install_prereqs.sh [-f] -# -# -f Force an install run now - - -if [[ -n "$1" && "$1" = "-f" ]]; then - FORCE=1 -fi - -# Keep track of the devstack directory -TOP_DIR=$(cd $(dirname "$0")/.. && pwd) - -# Import common functions -source $TOP_DIR/functions - -# Determine what system we are running on. This provides ``os_VENDOR``, -# ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` -# and ``DISTRO`` -GetDistro - -# Needed to get ``ENABLED_SERVICES`` -source $TOP_DIR/stackrc - -# Prereq dirs are here -FILES=$TOP_DIR/files - -# Minimum wait time -PREREQ_RERUN_MARKER=${PREREQ_RERUN_MARKER:-$TOP_DIR/.prereqs} -PREREQ_RERUN_HOURS=${PREREQ_RERUN_HOURS:-2} -PREREQ_RERUN_SECONDS=$((60*60*$PREREQ_RERUN_HOURS)) - -NOW=$(date "+%s") -LAST_RUN=$(head -1 $PREREQ_RERUN_MARKER 2>/dev/null || echo "0") -DELTA=$(($NOW - $LAST_RUN)) -if [[ $DELTA -lt $PREREQ_RERUN_SECONDS && -z "$FORCE" ]]; then - echo "Re-run time has not expired ($(($PREREQ_RERUN_SECONDS - $DELTA)) seconds remaining); exiting..." - exit 0 -fi - -# Make sure the proxy config is visible to sub-processes -re_export_proxy_variables - -# Install Packages -# ================ - -# Install package requirements -if is_ubuntu; then - install_package $(get_packages $FILES/apts) -elif is_fedora; then - install_package $(get_packages $FILES/rpms) -elif is_suse; then - install_package $(get_packages $FILES/rpms-suse) -else - exit_distro_not_supported "list of packages" -fi - -if [[ -n "$SYSLOG" && "$SYSLOG" != "False" ]]; then - if is_ubuntu || is_fedora; then - install_package rsyslog-relp - elif is_suse; then - install_package rsyslog-module-relp - else - exit_distro_not_supported "rsyslog-relp installation" - fi -fi - - -# Mark end of run -# --------------- - -date "+%s" >$PREREQ_RERUN_MARKER -date >>$PREREQ_RERUN_MARKER From c99853ca7187d20a8ba6b59c6e44f089c2d7d74f Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Thu, 3 Jan 2013 17:39:16 -0800 Subject: [PATCH 877/967] Enable millisecond logging for nova and cinder Change-Id: Ic28867ae9a436e81c7f2fcf79f40a1ecc251072c --- lib/cinder | 6 +++--- lib/nova | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/cinder b/lib/cinder index 701effd3..385a5a2a 100644 --- a/lib/cinder +++ b/lib/cinder @@ -173,10 +173,10 @@ function configure_cinder() { if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then # Add color to logging output - iniset $CINDER_CONF DEFAULT logging_context_format_string "%(asctime)s %(color)s%(levelname)s %(name)s [%(request_id)s %(user_id)s %(project_id)s%(color)s] %(instance)s%(color)s%(message)s" - iniset $CINDER_CONF DEFAULT logging_default_format_string "%(asctime)s %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" + iniset $CINDER_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_id)s %(project_id)s%(color)s] %(instance)s%(color)s%(message)s" + iniset $CINDER_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" iniset $CINDER_CONF DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" - iniset $CINDER_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s TRACE %(name)s %(instance)s" + iniset $CINDER_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)d TRACE %(name)s %(instance)s" fi if [ "$CINDER_DRIVER" == "XenAPINFS" ]; then diff --git a/lib/nova b/lib/nova index 594195ea..4fef5527 100644 --- a/lib/nova +++ b/lib/nova @@ -408,13 +408,13 @@ function create_nova_conf() { fi if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then # Add color to logging output - add_nova_opt "logging_context_format_string=%(asctime)s %(color)s%(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s%(color)s] %(instance)s%(color)s%(message)s" - add_nova_opt "logging_default_format_string=%(asctime)s %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" + add_nova_opt "logging_context_format_string=%(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s%(color)s] %(instance)s%(color)s%(message)s" + add_nova_opt "logging_default_format_string=%(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" add_nova_opt "logging_debug_format_suffix=from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" - add_nova_opt "logging_exception_prefix=%(color)s%(asctime)s TRACE %(name)s %(instance)s" + add_nova_opt "logging_exception_prefix=%(color)s%(asctime)s.%(msecs)d TRACE %(name)s %(instance)s" else # Show user_name and project_name instead of user_id and project_id - add_nova_opt "logging_context_format_string=%(asctime)s %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" + add_nova_opt "logging_context_format_string=%(asctime)s.%(msecs) %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" fi if is_service_enabled ceilometer; then add_nova_opt "instance_usage_audit=True" From 35138ed6732fd2d4d06b9ffa92a391626acddfb1 Mon Sep 17 00:00:00 2001 From: Nikhil Manchanda Date: Thu, 3 Jan 2013 17:49:58 -0800 Subject: [PATCH 878/967] Use 'which pip' rather than /usr/bin/pip Bug 1095472: Bugfix to use 'which pip' rather than assume pip always lives in /usr/bin Change-Id: I0cc8a5e35306372653c3c27da9504c64e39d56dd --- functions | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/functions b/functions index 23aee935..bb03c558 100644 --- a/functions +++ b/functions @@ -1146,9 +1146,9 @@ function get_rootwrap_location() { # get_pip_command function get_pip_command() { if is_fedora; then - echo "/usr/bin/pip-python" + which pip-python else - echo "/usr/bin/pip" + which pip fi } From b38d61bc3becb7e82429a130729e1721b1827f0d Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Fri, 4 Jan 2013 13:38:23 -0500 Subject: [PATCH 879/967] Add libjs-jquery-tablesorter to package list. Coverage html reports have a symlink to jquery.tablesorter.min.js which is provided by libjs-jquery-tablesorter. If the package is not installed coverage html reports will have a broken symlink to that file. Change-Id: I7496a22f36d4f2fda2f030962b10d5afbc8f448f --- files/apts/nova | 1 + 1 file changed, 1 insertion(+) diff --git a/files/apts/nova b/files/apts/nova index c16a7087..b7d1e928 100644 --- a/files/apts/nova +++ b/files/apts/nova @@ -16,6 +16,7 @@ sqlite3 sudo kvm libvirt-bin # NOPRIME +libjs-jquery-tablesorter # Needed for coverage html reports vlan curl rabbitmq-server # NOPRIME From 756c842a7743a84a084b4cc211998e3fdd171592 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Fri, 4 Jan 2013 13:37:22 -0800 Subject: [PATCH 880/967] Properly wait until volumes are gone in volumes.sh A logic error in volume exercise's wait for volume deletion causes the test to do the opopsite, and continue on even tho the volume is in in the 'deleting' state. If using a volume backend that can quickly delete volumes (ceph), and the volume is gone before entering the wait, the loop will spin, timeout and fail the test. Change-Id: I8e3d2aaa04e6a165e0dee32bedac97d35e13d5eb --- exercises/volumes.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 48a976ed..5c5e0e44 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -192,7 +192,7 @@ echo "Completed volume-detach in $((end_time - start_time)) seconds" # Delete volume start_time=`date +%s` cinder delete $VOL_ID || die "Failure deleting volume $VOL_NAME" -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME; do sleep 1; done"; then +if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then echo "Volume $VOL_NAME not deleted" exit 1 fi From 06fac37d064f93f06948534517cffdaa8fdf504e Mon Sep 17 00:00:00 2001 From: Nachi Ueno Date: Wed, 26 Dec 2012 14:09:43 -0800 Subject: [PATCH 881/967] Turn off tenant_isolation in tempest for quantum Current tempest didn't create networks for each tenant, so let tempest use demo tenant for tesing if we enable quantum. Change-Id: I5f139b5f1bdf5c176130b8db9e896e2cb48b4420 --- lib/tempest | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/tempest b/lib/tempest index 18599219..a9a05eeb 100644 --- a/lib/tempest +++ b/lib/tempest @@ -187,6 +187,12 @@ function configure_tempest() { iniset $TEMPEST_CONF compute resize_available False iniset $TEMPEST_CONF compute change_password_available False iniset $TEMPEST_CONF compute compute_log_level ERROR + # Note(nati) current tempest don't create network for each tenant + # so reuse same tenant for now + if is_service_enabled quantum; then + TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-False} + fi + iniset $TEMPEST_CONF compute allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} #Skip until #1074039 is fixed iniset $TEMPEST_CONF compute run_ssh False iniset $TEMPEST_CONF compute ssh_user ${DEFAULT_INSTANCE_USER:-$OS_USERNAME} From 22853c1974ca1ce50b946290bc7bf9b2dd34b64d Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 7 Jan 2013 15:18:12 -0600 Subject: [PATCH 882/967] Clean up cinder volume group rather than remove it Removing the cinder volume group breaks devstack installations that share that volume group with other logical volumes. It also was leaking loopback devices. Change-Id: Ice4470e06e08ce49a0e1f82af70abcc015c91c20 --- lib/cinder | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/lib/cinder b/lib/cinder index 385a5a2a..8949cfcc 100644 --- a/lib/cinder +++ b/lib/cinder @@ -48,6 +48,20 @@ fi VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} +# _clean_volume_group removes all cinder volumes from the specified volume group +# _clean_volume_group $VOLUME_GROUP $VOLUME_NAME_PREFIX +function _clean_volume_group() { + local vg=$1 + local vg_prefix=$2 + # Clean out existing volumes + for lv in `sudo lvs --noheadings -o lv_name $vg`; do + # vg_prefix prefixes the LVs we want + if [[ "${lv#$vg_prefix}" != "$lv" ]]; then + sudo lvremove -f $vg/$lv + fi + done +} + # cleanup_cinder() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_cinder() { @@ -84,7 +98,8 @@ function cleanup_cinder() { stop_service tgtd fi - sudo vgremove -f $VOLUME_GROUP + # Campsite rule: leave behind a volume group at least as clean as we found it + _clean_volume_group $VOLUME_GROUP $VOLUME_NAME_PREFIX } # configure_cinder() - Set config files, create data dirs, etc @@ -272,13 +287,8 @@ function init_cinder() { # Remove iscsi targets sudo tgtadm --op show --mode target | grep $VOLUME_NAME_PREFIX | grep Target | cut -f3 -d ' ' | sudo xargs -n1 tgt-admin --delete || true - # Clean out existing volumes - for lv in `sudo lvs --noheadings -o lv_name $VOLUME_GROUP`; do - # VOLUME_NAME_PREFIX prefixes the LVs we want - if [[ "${lv#$VOLUME_NAME_PREFIX}" != "$lv" ]]; then - sudo lvremove -f $VOLUME_GROUP/$lv - fi - done + # Start with a clean volume group + _clean_volume_group $VOLUME_GROUP $VOLUME_NAME_PREFIX fi fi From 5b813bc489eff682025d530557e2beda50db2eac Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Tue, 8 Jan 2013 16:51:05 +0100 Subject: [PATCH 883/967] Fix role creation in tools/create_userrc.sh * use role-create instead of tenant-create * add some missing quote Change-Id: I3e263bfbfe63a35c5a95248f05d78bd6a1c2e593 --- tools/create_userrc.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index e39c1570..55cb8fac 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -173,10 +173,10 @@ function add_entry(){ fi cat >"$rcfile" < Date: Tue, 8 Jan 2013 11:54:43 -0800 Subject: [PATCH 884/967] Use apt git package instead of git-core. The git-core package is deprecated in favor of the git package. Use the git package instead. Change-Id: Ib136e34c7a0d4f87b02e32996420b3f507ca0bf4 --- files/apts/general | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/apts/general b/files/apts/general index 12a92e0c..0264066a 100644 --- a/files/apts/general +++ b/files/apts/general @@ -6,7 +6,7 @@ screen unzip wget psmisc -git-core +git lsof # useful when debugging openssh-server vim-nox From e583d9b8f9bc8f3367df96027a83996ac1303b43 Mon Sep 17 00:00:00 2001 From: "Yunhong, Jiang" Date: Wed, 9 Jan 2013 09:33:07 +0800 Subject: [PATCH 885/967] Add ceilometer client in devstack Ceilometer client CLI is helpful to develop ceilometer related code. Add it to devstack also involve more developer to use it. Change-Id: I4147e50c00cb520ec15d63a0c34524ba8cb6654f Signed-off-by: Yunhong, Jiang --- lib/ceilometer | 11 +++++++++++ stack.sh | 2 ++ stackrc | 4 ++++ 3 files changed, 17 insertions(+) diff --git a/lib/ceilometer b/lib/ceilometer index 76ab254d..c31fcb92 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -29,6 +29,7 @@ set +o xtrace # Set up default directories CEILOMETER_DIR=$DEST/ceilometer +CEILOMETERCLIENT_DIR=$DEST/python-ceilometerclient CEILOMETER_CONF_DIR=/etc/ceilometer CEILOMETER_CONF=$CEILOMETER_CONF_DIR/ceilometer.conf CEILOMETER_API_LOG_DIR=/var/log/ceilometer-api @@ -46,6 +47,11 @@ function cleanup_ceilometer() { mongo ceilometer --eval "db.dropDatabase();" } +# configure_ceilometerclient() - Set config files, create data dirs, etc +function configure_ceilometerclient() { + setup_develop $CEILOMETERCLIENT_DIR +} + # configure_ceilometer() - Set config files, create data dirs, etc function configure_ceilometer() { setup_develop $CEILOMETER_DIR @@ -87,6 +93,11 @@ function install_ceilometer() { git_clone $CEILOMETER_REPO $CEILOMETER_DIR $CEILOMETER_BRANCH } +# install_ceilometerclient() - Collect source and prepare +function install_ceilometerclient() { + git_clone $CEILOMETERCLIENT_REPO $CEILOMETERCLIENT_DIR $CEILOMETERCLIENT_BRANCH +} + # start_ceilometer() - Start running processes, including screen function start_ceilometer() { screen_it ceilometer-acompute "cd $CEILOMETER_DIR && sg libvirtd \"$CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_CONF\"" diff --git a/stack.sh b/stack.sh index 9f734b9e..247b860d 100755 --- a/stack.sh +++ b/stack.sh @@ -773,6 +773,7 @@ if is_service_enabled cinder; then install_cinder fi if is_service_enabled ceilometer; then + install_ceilometerclient install_ceilometer fi if is_service_enabled tempest; then @@ -1218,6 +1219,7 @@ fi if is_service_enabled ceilometer; then echo_summary "Configuring Ceilometer" configure_ceilometer + configure_ceilometerclient echo_summary "Starting Ceilometer" start_ceilometer fi diff --git a/stackrc b/stackrc index 0e84db80..4e03a2f4 100644 --- a/stackrc +++ b/stackrc @@ -33,6 +33,10 @@ GIT_BASE=https://github.com CEILOMETER_REPO=${GIT_BASE}/openstack/ceilometer.git CEILOMETER_BRANCH=master +# ceilometer client library +CEILOMETERCLIENT_REPO=${GIT_BASE}/openstack/python-ceilometerclient +CEILOMETERCLIENT_BRANCH=master + # volume service CINDER_REPO=${GIT_BASE}/openstack/cinder CINDER_BRANCH=master From 4ce35c46cc80d07bb9f5ea4f1d4c6961c5f50d3e Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 9 Jan 2013 08:13:39 -0500 Subject: [PATCH 886/967] fix msec format string in the else case, which was triggered for devstack gate the msec format string was incorrect, thus largely scrambling the usefulness of logs. Fix this to make devstack readable. Change-Id: I59d0e73932daf27dc1d718dfcf217fe2edf4d491 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 21157dc4..a43c0918 100644 --- a/lib/nova +++ b/lib/nova @@ -414,7 +414,7 @@ function create_nova_conf() { add_nova_opt "logging_exception_prefix=%(color)s%(asctime)s.%(msecs)d TRACE %(name)s %(instance)s" else # Show user_name and project_name instead of user_id and project_id - add_nova_opt "logging_context_format_string=%(asctime)s.%(msecs) %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" + add_nova_opt "logging_context_format_string=%(asctime)s.%(msecs)d %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" fi if is_service_enabled ceilometer; then add_nova_opt "instance_usage_audit=True" From 8e5d2f0c7a29a8002c3be1c94f1abca65ddaea08 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 20 Dec 2012 13:11:43 +0000 Subject: [PATCH 887/967] Set recon_cache_path to ${SWIFT_DATA_DIR}/cache. - Fixes bug 1092538. Change-Id: Id9eb9446b32a800b1c7e0ef72882747424c65b6e --- lib/swift | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/swift b/lib/swift index 89342644..c433387d 100644 --- a/lib/swift +++ b/lib/swift @@ -99,7 +99,7 @@ function configure_swift() { # changing the permissions so we can run it as our user. USER_GROUP=$(id -g) - sudo mkdir -p ${SWIFT_DATA_DIR}/drives + sudo mkdir -p ${SWIFT_DATA_DIR}/{drives,cache} sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_DIR} # Create a loopback disk and format it to XFS. @@ -273,16 +273,22 @@ EOF swift_node_config=${SWIFT_CONFIG_DIR}/object-server/${node_number}.conf cp ${SWIFT_DIR}/etc/object-server.conf-sample ${swift_node_config} generate_swift_config ${swift_node_config} ${node_number} $[OBJECT_PORT_BASE + 10 * (node_number - 1)] + iniset ${swift_node_config} filter:recon recon_cache_path ${SWIFT_DATA_DIR}/cache + # Using a sed and not iniset/iniuncomment because we want to a global + # modification and make sure it works for new sections. + sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config} swift_node_config=${SWIFT_CONFIG_DIR}/container-server/${node_number}.conf cp ${SWIFT_DIR}/etc/container-server.conf-sample ${swift_node_config} generate_swift_config ${swift_node_config} ${node_number} $[CONTAINER_PORT_BASE + 10 * (node_number - 1)] iniuncomment ${swift_node_config} app:container-server allow_versions iniset ${swift_node_config} app:container-server allow_versions "true" + sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config} swift_node_config=${SWIFT_CONFIG_DIR}/account-server/${node_number}.conf cp ${SWIFT_DIR}/etc/account-server.conf-sample ${swift_node_config} generate_swift_config ${swift_node_config} ${node_number} $[ACCOUNT_PORT_BASE + 10 * (node_number - 1)] + sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config} done swift_log_dir=${SWIFT_DATA_DIR}/logs From 9bc47db29c3767cb4aac492e1fd6f1c74e85ca5c Mon Sep 17 00:00:00 2001 From: Devananda van der Veen Date: Wed, 12 Dec 2012 16:52:55 -0800 Subject: [PATCH 888/967] convert add_nova_opt to iniset Convert all calls to add_nova_opt to use iniset $NOVA_CONF DEFAULT Convert baremetal options to use iniset $NOVA_CONF baremetal Change-Id: I03ce2149e1f3abc2feb40c156c50de7dabaf47a2 --- lib/nova | 88 ++++++++++++++++++++++++++--------------------------- lib/quantum | 26 ++++++++-------- stack.sh | 64 +++++++++++++++++++------------------- 3 files changed, 89 insertions(+), 89 deletions(-) diff --git a/lib/nova b/lib/nova index a43c0918..781cc097 100644 --- a/lib/nova +++ b/lib/nova @@ -354,73 +354,73 @@ function create_nova_conf() { # (Re)create ``nova.conf`` rm -f $NOVA_CONF add_nova_opt "[DEFAULT]" - add_nova_opt "verbose=True" - add_nova_opt "auth_strategy=keystone" - add_nova_opt "allow_resize_to_same_host=True" - add_nova_opt "api_paste_config=$NOVA_API_PASTE_INI" - add_nova_opt "rootwrap_config=$NOVA_CONF_DIR/rootwrap.conf" - add_nova_opt "compute_scheduler_driver=$SCHEDULER" - add_nova_opt "dhcpbridge_flagfile=$NOVA_CONF" - add_nova_opt "force_dhcp_release=True" - add_nova_opt "fixed_range=$FIXED_RANGE" - add_nova_opt "default_floating_pool=$PUBLIC_NETWORK_NAME" - add_nova_opt "s3_host=$SERVICE_HOST" - add_nova_opt "s3_port=$S3_SERVICE_PORT" - add_nova_opt "osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions" - add_nova_opt "my_ip=$HOST_IP" + iniset $NOVA_CONF DEFAULT verbose "True" + iniset $NOVA_CONF DEFAULT auth_strategy "keystone" + iniset $NOVA_CONF DEFAULT allow_resize_to_same_host "True" + iniset $NOVA_CONF DEFAULT api_paste_config "$NOVA_API_PASTE_INI" + iniset $NOVA_CONF DEFAULT rootwrap_config "$NOVA_CONF_DIR/rootwrap.conf" + iniset $NOVA_CONF DEFAULT compute_scheduler_driver "$SCHEDULER" + iniset $NOVA_CONF DEFAULT dhcpbridge_flagfile "$NOVA_CONF" + iniset $NOVA_CONF DEFAULT force_dhcp_release "True" + iniset $NOVA_CONF DEFAULT fixed_range "$FIXED_RANGE" + iniset $NOVA_CONF DEFAULT default_floating_pool "$PUBLIC_NETWORK_NAME" + iniset $NOVA_CONF DEFAULT s3_host "$SERVICE_HOST" + iniset $NOVA_CONF DEFAULT s3_port "$S3_SERVICE_PORT" + iniset $NOVA_CONF DEFAULT osapi_compute_extension "nova.api.openstack.compute.contrib.standard_extensions" + iniset $NOVA_CONF DEFAULT my_ip "$HOST_IP" local dburl database_connection_url dburl nova - add_nova_opt "sql_connection=$dburl" + iniset $NOVA_CONF DEFAULT sql_connection "$dburl" if is_baremetal; then database_connection_url dburl nova_bm - add_nova_opt "baremetal_sql_connection=$dburl" + iniset $NOVA_CONF baremetal sql_connection $dburl fi - add_nova_opt "libvirt_type=$LIBVIRT_TYPE" - add_nova_opt "libvirt_cpu_mode=none" - add_nova_opt "instance_name_template=${INSTANCE_NAME_PREFIX}%08x" + iniset $NOVA_CONF DEFAULT libvirt_type "$LIBVIRT_TYPE" + iniset $NOVA_CONF DEFAULT libvirt_cpu_mode "none" + iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x" if is_service_enabled n-api; then - add_nova_opt "enabled_apis=$NOVA_ENABLED_APIS" + iniset $NOVA_CONF DEFAULT enabled_apis "$NOVA_ENABLED_APIS" if is_service_enabled tls-proxy; then # Set the service port for a proxy to take the original - add_nova_opt "osapi_compute_listen_port=$NOVA_SERVICE_PORT_INT" + iniset $NOVA_CONF DEFAULT osapi_compute_listen_port "$NOVA_SERVICE_PORT_INT" fi fi if is_service_enabled cinder; then - add_nova_opt "volume_api_class=nova.volume.cinder.API" + iniset $NOVA_CONF DEFAULT volume_api_class "nova.volume.cinder.API" fi if [ -n "$NOVA_STATE_PATH" ]; then - add_nova_opt "state_path=$NOVA_STATE_PATH" - add_nova_opt "lock_path=$NOVA_STATE_PATH" + iniset $NOVA_CONF DEFAULT state_path "$NOVA_STATE_PATH" + iniset $NOVA_CONF DEFAULT lock_path "$NOVA_STATE_PATH" fi if [ -n "$NOVA_INSTANCES_PATH" ]; then - add_nova_opt "instances_path=$NOVA_INSTANCES_PATH" + iniset $NOVA_CONF DEFAULT instances_path "$NOVA_INSTANCES_PATH" fi if [ "$MULTI_HOST" != "False" ]; then - add_nova_opt "multi_host=True" - add_nova_opt "send_arp_for_ha=True" + iniset $NOVA_CONF DEFAULT multi_host "True" + iniset $NOVA_CONF DEFAULT send_arp_for_ha "True" fi if [ "$SYSLOG" != "False" ]; then - add_nova_opt "use_syslog=True" + iniset $NOVA_CONF DEFAULT use_syslog "True" fi if [ "$API_RATE_LIMIT" != "True" ]; then - add_nova_opt "api_rate_limit=False" + iniset $NOVA_CONF DEFAULT api_rate_limit "False" fi if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then # Add color to logging output - add_nova_opt "logging_context_format_string=%(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s%(color)s] %(instance)s%(color)s%(message)s" - add_nova_opt "logging_default_format_string=%(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" - add_nova_opt "logging_debug_format_suffix=from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" - add_nova_opt "logging_exception_prefix=%(color)s%(asctime)s.%(msecs)d TRACE %(name)s %(instance)s" + iniset $NOVA_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s%(color)s] %(instance)s%(color)s%(message)s" + iniset $NOVA_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" + iniset $NOVA_CONF DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" + iniset $NOVA_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)d TRACE %(name)s %(instance)s" else # Show user_name and project_name instead of user_id and project_id - add_nova_opt "logging_context_format_string=%(asctime)s.%(msecs)d %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" + iniset $NOVA_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)d %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" fi if is_service_enabled ceilometer; then - add_nova_opt "instance_usage_audit=True" - add_nova_opt "instance_usage_audit_period=hour" - add_nova_opt "notification_driver=nova.openstack.common.notifier.rpc_notifier" - add_nova_opt "notification_driver=ceilometer.compute.nova_notifier" + iniset $NOVA_CONF DEFAULT instance_usage_audit "True" + iniset $NOVA_CONF DEFAULT instance_usage_audit_period "hour" + iniset $NOVA_CONF DEFAULT notification_driver "nova.openstack.common.notifier.rpc_notifier" + iniset $NOVA_CONF DEFAULT notification_driver "ceilometer.compute.nova_notifier" fi @@ -433,17 +433,17 @@ function create_nova_conf() { # For Example: ``EXTRA_OPTS=(foo=true bar=2)`` for I in "${EXTRA_OPTS[@]}"; do # Attempt to convert flags to options - add_nova_opt ${I//--} + iniset $NOVA_CONF DEFAULT ${I//=/ } done } function create_nova_conf_nova_network() { - add_nova_opt "network_manager=nova.network.manager.$NET_MAN" - add_nova_opt "public_interface=$PUBLIC_INTERFACE" - add_nova_opt "vlan_interface=$VLAN_INTERFACE" - add_nova_opt "flat_network_bridge=$FLAT_NETWORK_BRIDGE" + iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NET_MAN" + iniset $NOVA_CONF DEFAULT public_interface "$PUBLIC_INTERFACE" + iniset $NOVA_CONF DEFAULT vlan_interface "$VLAN_INTERFACE" + iniset $NOVA_CONF DEFAULT flat_network_bridge "$FLAT_NETWORK_BRIDGE" if [ -n "$FLAT_INTERFACE" ]; then - add_nova_opt "flat_interface=$FLAT_INTERFACE" + iniset $NOVA_CONF DEFAULT flat_interface "$FLAT_INTERFACE" fi } diff --git a/lib/quantum b/lib/quantum index ea0e311c..f74eead6 100644 --- a/lib/quantum +++ b/lib/quantum @@ -200,13 +200,13 @@ function configure_quantum() { } function create_nova_conf_quantum() { - add_nova_opt "network_api_class=nova.network.quantumv2.api.API" - add_nova_opt "quantum_admin_username=$Q_ADMIN_USERNAME" - add_nova_opt "quantum_admin_password=$SERVICE_PASSWORD" - add_nova_opt "quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0" - add_nova_opt "quantum_auth_strategy=$Q_AUTH_STRATEGY" - add_nova_opt "quantum_admin_tenant_name=$SERVICE_TENANT_NAME" - add_nova_opt "quantum_url=http://$Q_HOST:$Q_PORT" + iniset $NOVA_CONF DEFAULT network_api_class "nova.network.quantumv2.api.API" + iniset $NOVA_CONF DEFAULT quantum_admin_username "$Q_ADMIN_USERNAME" + iniset $NOVA_CONF DEFAULT quantum_admin_password "$SERVICE_PASSWORD" + iniset $NOVA_CONF DEFAULT quantum_admin_auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0" + iniset $NOVA_CONF DEFAULT quantum_auth_strategy "$Q_AUTH_STRATEGY" + iniset $NOVA_CONF DEFAULT quantum_admin_tenant_name "$SERVICE_TENANT_NAME" + iniset $NOVA_CONF DEFAULT quantum_url "http://$Q_HOST:$Q_PORT" if [[ "$Q_PLUGIN" = "openvswitch" ]]; then NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"} @@ -214,14 +214,14 @@ function create_nova_conf_quantum() { NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver"} elif [[ "$Q_PLUGIN" = "ryu" ]]; then NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"quantum.plugins.ryu.nova.vif.LibvirtOpenVswitchOFPRyuDriver"} - add_nova_opt "libvirt_ovs_integration_bridge=$OVS_BRIDGE" - add_nova_opt "linuxnet_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT" - add_nova_opt "libvirt_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT" + iniset $NOVA_CONF DEFAULT libvirt_ovs_integration_bridge "$OVS_BRIDGE" + iniset $NOVA_CONF DEFAULT linuxnet_ovs_ryu_api_host "$RYU_API_HOST:$RYU_API_PORT" + iniset $NOVA_CONF DEFAULT libvirt_ovs_ryu_api_host "$RYU_API_HOST:$RYU_API_PORT" fi - add_nova_opt "libvirt_vif_driver=$NOVA_VIF_DRIVER" - add_nova_opt "linuxnet_interface_driver=$LINUXNET_VIF_DRIVER" + iniset $NOVA_CONF DEFAULT libvirt_vif_driver "$NOVA_VIF_DRIVER" + iniset $NOVA_CONF DEFAULT linuxnet_interface_driver "$LINUXNET_VIF_DRIVER" if is_service_enabled q-meta; then - add_nova_opt "service_quantum_metadata_proxy=True" + iniset $NOVA_CONF DEFAULT service_quantum_metadata_proxy "True" fi } diff --git a/stack.sh b/stack.sh index 9f734b9e..5a02e076 100755 --- a/stack.sh +++ b/stack.sh @@ -1059,9 +1059,9 @@ if is_service_enabled nova; then # These settings don't hurt anything if n-xvnc and n-novnc are disabled if is_service_enabled n-cpu; then NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"} - add_nova_opt "novncproxy_base_url=$NOVNCPROXY_URL" + iniset $NOVA_CONF DEFAULT novncproxy_base_url "$NOVNCPROXY_URL" XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"} - add_nova_opt "xvpvncproxy_base_url=$XVPVNCPROXY_URL" + iniset $NOVA_CONF DEFAULT xvpvncproxy_base_url "$XVPVNCPROXY_URL" fi if [ "$VIRT_DRIVER" = 'xenserver' ]; then VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1} @@ -1071,18 +1071,18 @@ if is_service_enabled nova; then # Address on which instance vncservers will listen on compute hosts. # For multi-host, this should be the management ip of the compute host. VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1} - add_nova_opt "vncserver_listen=$VNCSERVER_LISTEN" - add_nova_opt "vncserver_proxyclient_address=$VNCSERVER_PROXYCLIENT_ADDRESS" - add_nova_opt "ec2_dmz_host=$EC2_DMZ_HOST" + iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN" + iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" + iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST" if is_service_enabled zeromq; then - add_nova_opt "rpc_backend=nova.openstack.common.rpc.impl_zmq" + iniset $NOVA_CONF DEFAULT rpc_backend "nova.openstack.common.rpc.impl_zmq" elif is_service_enabled qpid; then - add_nova_opt "rpc_backend=nova.rpc.impl_qpid" + iniset $NOVA_CONF DEFAULT rpc_backend "nova.rpc.impl_qpid" elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then - add_nova_opt "rabbit_host=$RABBIT_HOST" - add_nova_opt "rabbit_password=$RABBIT_PASSWORD" + iniset $NOVA_CONF DEFAULT rabbit_host "$RABBIT_HOST" + iniset $NOVA_CONF DEFAULT rabbit_password "$RABBIT_PASSWORD" fi - add_nova_opt "glance_api_servers=$GLANCE_HOSTPORT" + iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT" # XenServer @@ -1091,16 +1091,16 @@ if is_service_enabled nova; then if [ "$VIRT_DRIVER" = 'xenserver' ]; then echo_summary "Using XenServer virtualization driver" read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN." - add_nova_opt "compute_driver=xenapi.XenAPIDriver" + iniset $NOVA_CONF DEFAULT compute_driver "xenapi.XenAPIDriver" XENAPI_CONNECTION_URL=${XENAPI_CONNECTION_URL:-"http://169.254.0.1"} XENAPI_USER=${XENAPI_USER:-"root"} - add_nova_opt "xenapi_connection_url=$XENAPI_CONNECTION_URL" - add_nova_opt "xenapi_connection_username=$XENAPI_USER" - add_nova_opt "xenapi_connection_password=$XENAPI_PASSWORD" - add_nova_opt "flat_injected=False" + iniset $NOVA_CONF DEFAULT xenapi_connection_url "$XENAPI_CONNECTION_URL" + iniset $NOVA_CONF DEFAULT xenapi_connection_username "$XENAPI_USER" + iniset $NOVA_CONF DEFAULT xenapi_connection_password "$XENAPI_PASSWORD" + iniset $NOVA_CONF DEFAULT flat_injected "False" # Need to avoid crash due to new firewall support XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"} - add_nova_opt "firewall_driver=$XEN_FIREWALL_DRIVER" + iniset $NOVA_CONF DEFAULT firewall_driver "$XEN_FIREWALL_DRIVER" # OpenVZ # ------ @@ -1109,34 +1109,34 @@ if is_service_enabled nova; then echo_summary "Using OpenVZ virtualization driver" # TODO(deva): OpenVZ driver does not yet work if compute_driver is set here. # Replace connection_type when this is fixed. - # add_nova_opt "compute_driver=openvz.connection.OpenVzConnection" - add_nova_opt "connection_type=openvz" + # iniset $NOVA_CONF DEFAULT compute_driver "openvz.connection.OpenVzConnection" + iniset $NOVA_CONF DEFAULT connection_type "openvz" LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} - add_nova_opt "firewall_driver=$LIBVIRT_FIREWALL_DRIVER" + iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER" # Bare Metal # ---------- elif [ "$VIRT_DRIVER" = 'baremetal' ]; then echo_summary "Using BareMetal driver" - add_nova_opt "compute_driver=nova.virt.baremetal.driver.BareMetalDriver" LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.firewall.NoopFirewallDriver"} - add_nova_opt "firewall_driver=$LIBVIRT_FIREWALL_DRIVER" - add_nova_opt "baremetal_driver=$BM_DRIVER" - add_nova_opt "baremetal_tftp_root=/tftpboot" - add_nova_opt "baremetal_instance_type_extra_specs=cpu_arch:$BM_CPU_ARCH" - add_nova_opt "baremetal_power_manager=$BM_POWER_MANAGER" - add_nova_opt "scheduler_host_manager=nova.scheduler.baremetal_host_manager.BaremetalHostManager" - add_nova_opt "scheduler_default_filters=AllHostsFilter" + iniset $NOVA_CONF DEFAULT compute_driver nova.virt.baremetal.driver.BareMetalDriver + iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER + iniset $NOVA_CONF DEFAULT scheduler_host_manager nova.scheduler.baremetal_host_manager.BaremetalHostManager + iniset $NOVA_CONF DEFAULT scheduler_default_filters AllHostsFilter + iniset $NOVA_CONF baremetal driver $BM_DRIVER + iniset $NOVA_CONF baremetal instance_type_extra_specs cpu_arch:$BM_CPU_ARCH + iniset $NOVA_CONF baremetal power_manager $BM_POWER_MANAGER + iniset $NOVA_CONF baremetal tftp_root /tftpboot # Default # ------- else echo_summary "Using libvirt virtualization driver" - add_nova_opt "compute_driver=libvirt.LibvirtDriver" + iniset $NOVA_CONF DEFAULT compute_driver "libvirt.LibvirtDriver" LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} - add_nova_opt "firewall_driver=$LIBVIRT_FIREWALL_DRIVER" + iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER" fi fi @@ -1174,9 +1174,9 @@ if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nov CREDS=$(keystone ec2-credentials-create --user_id $NOVA_USER_ID --tenant_id $NOVA_TENANT_ID) ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }') SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }') - add_nova_opt "s3_access_key=$ACCESS_KEY" - add_nova_opt "s3_secret_key=$SECRET_KEY" - add_nova_opt "s3_affix_tenant=True" + iniset $NOVA_CONF DEFAULT s3_access_key "$ACCESS_KEY" + iniset $NOVA_CONF DEFAULT s3_secret_key "$SECRET_KEY" + iniset $NOVA_CONF DEFAULT s3_affix_tenant "True" fi screen_it zeromq "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-rpc-zmq-receiver" From 1edba3318b5d0b52d00905edf0f1632c258225d4 Mon Sep 17 00:00:00 2001 From: Devananda van der Veen Date: Wed, 9 Jan 2013 15:29:03 -0800 Subject: [PATCH 889/967] Add EXTRA_BAREMETAL_OPTS to stack.sh Allow the passing of extra options to the [baremetal] option group by specifying EXTRA_BAREMETAL_OPTS, using the same format as the existing flag EXTRA_OPTS. Change-Id: I209675786c6a33a68d83a371292a1e1749ecb14c --- stack.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/stack.sh b/stack.sh index 5a02e076..ebb0bad4 100755 --- a/stack.sh +++ b/stack.sh @@ -1129,6 +1129,12 @@ if is_service_enabled nova; then iniset $NOVA_CONF baremetal power_manager $BM_POWER_MANAGER iniset $NOVA_CONF baremetal tftp_root /tftpboot + # Define extra baremetal nova conf flags by defining the array ``EXTRA_BAREMETAL_OPTS``. + for I in "${EXTRA_BAREMETAL_OPTS[@]}"; do + # Attempt to convert flags to options + iniset $NOVA_CONF baremetal ${I//=/ } + done + # Default # ------- From 8c54849d79b8d78679898bd4fcc47b340ecc9bdb Mon Sep 17 00:00:00 2001 From: Lianhao Lu Date: Wed, 9 Jan 2013 10:41:54 +0800 Subject: [PATCH 890/967] Create signing_dir for ceilometer. Create and initialize the signing_dir for ceilometer keystone authentication. This ensures the ceilometer to use its own PKI cache directory for authentication and avoids the authentication error due to the invalid cached certifications. Change-Id: I6fbc364695ae9be800245d14fd8945d531679550 --- lib/ceilometer | 10 ++++++++++ stack.sh | 1 + 2 files changed, 11 insertions(+) diff --git a/lib/ceilometer b/lib/ceilometer index 76ab254d..50b353f9 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -32,6 +32,7 @@ CEILOMETER_DIR=$DEST/ceilometer CEILOMETER_CONF_DIR=/etc/ceilometer CEILOMETER_CONF=$CEILOMETER_CONF_DIR/ceilometer.conf CEILOMETER_API_LOG_DIR=/var/log/ceilometer-api +CEILOMETER_AUTH_CACHE_DIR=${CEILOMETER_AUTH_CACHE_DIR:-/var/cache/ceilometer} # Support potential entry-points console scripts if [ -d $CEILOMETER_DIR/bin ] ; then @@ -78,10 +79,19 @@ function configure_ceilometer() { iniset $CEILOMETER_CONF keystone_authtoken admin_user ceilometer iniset $CEILOMETER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD iniset $CEILOMETER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $CEILOMETER_CONF keystone_authtoken signing_dir $CEILOMETER_AUTH_CACHE_DIR cleanup_ceilometer } +# init_ceilometer() - Initialize etc. +function init_ceilometer() { + # Create cache dir + sudo mkdir -p $CEILOMETER_AUTH_CACHE_DIR + sudo chown `whoami` $CEILOMETER_AUTH_CACHE_DIR + rm -f $CEILOMETER_AUTH_CACHE_DIR/* +} + # install_ceilometer() - Collect source and prepare function install_ceilometer() { git_clone $CEILOMETER_REPO $CEILOMETER_DIR $CEILOMETER_BRANCH diff --git a/stack.sh b/stack.sh index 9f734b9e..8b279c19 100755 --- a/stack.sh +++ b/stack.sh @@ -1219,6 +1219,7 @@ if is_service_enabled ceilometer; then echo_summary "Configuring Ceilometer" configure_ceilometer echo_summary "Starting Ceilometer" + init_ceilometer start_ceilometer fi From 768295e9f1b0ee74635f8b3002cf7e1971bbdddf Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 9 Jan 2013 13:42:03 -0600 Subject: [PATCH 891/967] Add mechanism to automatically load additional projects This adds an extras.d directory to contain startup scripts that stack.sh runs automatically at the end. Similar to local.sh except the scripts are sourced into the stack.sh process rather than executed as a child process. This gives them complete access to the stack.sh environment. Convert Tempest to use this format as an example. Change-Id: Ibc95e6aaecf4211da948319eb452293ae4357780 --- extras.d/80-tempest.sh | 20 ++++++++++++++++++++ extras.d/README | 14 ++++++++++++++ stack.sh | 24 ++++++++++-------------- unstack.sh | 9 +++++++++ 4 files changed, 53 insertions(+), 14 deletions(-) create mode 100644 extras.d/80-tempest.sh create mode 100644 extras.d/README diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh new file mode 100644 index 00000000..506ccef7 --- /dev/null +++ b/extras.d/80-tempest.sh @@ -0,0 +1,20 @@ +# tempest.sh - DevStack extras script + +source $TOP_DIR/lib/tempest + +if [[ "$1" == "stack" ]]; then + # Configure Tempest last to ensure that the runtime configuration of + # the various OpenStack services can be queried. + if is_service_enabled tempest; then + echo_summary "Configuring Tempest" + install_tempest + configure_tempest + fi +fi + +if [[ "$1" == "unstack" ]]; then + # no-op + : +fi + + diff --git a/extras.d/README b/extras.d/README new file mode 100644 index 00000000..ffc6793a --- /dev/null +++ b/extras.d/README @@ -0,0 +1,14 @@ +The extras.d directory contains project initialization scripts to be +sourced by stack.sh at the end of its run. This is expected to be +used by external projects that want to be configured, started and +stopped with DevStack. + +Order is controlled by prefixing the script names with the a two digit +sequence number. Script names must end with '.sh'. This provides a +convenient way to disable scripts by simoy renaming them. + +DevStack reserves the sequence numbers 00 through 09 and 90 through 99 +for its own use. + +The scripts are called with an argument of 'stack' by stack.sh and +with an argument of 'unstack' by unstack.sh. diff --git a/stack.sh b/stack.sh index 9f734b9e..53300998 100755 --- a/stack.sh +++ b/stack.sh @@ -321,7 +321,6 @@ source $TOP_DIR/lib/swift source $TOP_DIR/lib/ceilometer source $TOP_DIR/lib/heat source $TOP_DIR/lib/quantum -source $TOP_DIR/lib/tempest source $TOP_DIR/lib/baremetal # Set the destination directories for OpenStack projects @@ -775,9 +774,6 @@ fi if is_service_enabled ceilometer; then install_ceilometer fi -if is_service_enabled tempest; then - install_tempest -fi # Initialization @@ -1314,16 +1310,6 @@ if is_service_enabled nova && is_baremetal; then screen_it baremetal "nova-baremetal-deploy-helper" fi -# Configure Tempest last to ensure that the runtime configuration of -# the various OpenStack services can be queried. -if is_service_enabled tempest; then - echo_summary "Configuring Tempest" - configure_tempest - echo '**************************************************' - echo_summary "Finished Configuring Tempest" - echo '**************************************************' -fi - # Save some values we generated for later use CURRENT_RUN_TIME=$(date "+$TIMESTAMP_FORMAT") echo "# $CURRENT_RUN_TIME" >$TOP_DIR/.stackenv @@ -1333,6 +1319,16 @@ for i in BASE_SQL_CONN ENABLED_SERVICES HOST_IP LOGFILE \ done +# Run extras +# ========== + +if [[ -d $TOP_DIR/extras.d ]]; then + for i in $TOP_DIR/extras.d/*.sh; do + [[ -r $i ]] && source $i stack + done +fi + + # Run local script # ================ diff --git a/unstack.sh b/unstack.sh index fd70916d..1d4bfd56 100755 --- a/unstack.sh +++ b/unstack.sh @@ -39,6 +39,15 @@ if [[ "$1" == "--all" ]]; then UNSTACK_ALL=${UNSTACK_ALL:-1} fi +# Run extras +# ========== + +if [[ -d $TOP_DIR/extras.d ]]; then + for i in $TOP_DIR/extras.d/*.sh; do + [[ -r $i ]] && source $i unstack + done +fi + if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then source $TOP_DIR/openrc teardown_quantum_debug From ca8021712325dd4d4ac7185a287cb81cb10fd23d Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 9 Jan 2013 19:08:02 -0600 Subject: [PATCH 892/967] Add tools/make_cert.sh This allows use of either the DevStack CA or creating another CA independent of stack.sh. Change-Id: I055679b5fd06e830c8e6d7d7331c52dd8782d0b6 --- lib/tls | 6 ++++- stack.sh | 1 + tools/make_cert.sh | 55 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 61 insertions(+), 1 deletion(-) create mode 100755 tools/make_cert.sh diff --git a/lib/tls b/lib/tls index 1e2a8993..202edeff 100644 --- a/lib/tls +++ b/lib/tls @@ -189,7 +189,7 @@ subjectAltName = \$ENV::SUBJECT_ALT_NAME " >$ca_dir/signing.conf } -# Create root and intermediate CAs and an initial server cert +# Create root and intermediate CAs # init_CA function init_CA { # Ensure CAs are built @@ -198,7 +198,11 @@ function init_CA { # Create the CA bundle cat $ROOT_CA_DIR/cacert.pem $INT_CA_DIR/cacert.pem >>$INT_CA_DIR/ca-chain.pem +} +# Create an initial server cert +# init_cert +function init_cert { if [[ ! -r $DEVSTACK_CERT ]]; then if [[ -n "$TLS_IP" ]]; then # Lie to let incomplete match routines work diff --git a/stack.sh b/stack.sh index 9f734b9e..d43e81c0 100755 --- a/stack.sh +++ b/stack.sh @@ -838,6 +838,7 @@ fi if is_service_enabled tls-proxy; then configure_CA init_CA + init_cert # Add name to /etc/hosts # don't be naive and add to existing line! fi diff --git a/tools/make_cert.sh b/tools/make_cert.sh new file mode 100755 index 00000000..cb93e57c --- /dev/null +++ b/tools/make_cert.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# **make_cert.sh** + +# Create a CA hierarchy (if necessary) and server certificate +# +# This mimics the CA structure that DevStack sets up when ``tls_proxy`` is enabled +# but in the curent directory unless ``DATA_DIR`` is set + +ENABLE_TLS=True +DATA_DIR=${DATA_DIR:-`pwd`/ca-data} + +ROOT_CA_DIR=$DATA_DIR/root +INT_CA_DIR=$DATA_DIR/int + +# Import common functions +source $TOP_DIR/functions + +# Import TLS functions +source lib/tls + +function usage { + echo "$0 - Create CA and/or certs" + echo "" + echo "Usage: $0 commonName [orgUnit]" + exit 1 +} + +CN=$1 +if [ -z "$CN" ]]; then + usage +fi +ORG_UNIT_NAME=${2:-$ORG_UNIT_NAME} + +# Useful on OS/X +if [[ `uname -s` == 'Darwin' && -d /usr/local/Cellar/openssl ]]; then + # set up for brew-installed modern OpenSSL + OPENSSL_CONF=/usr/local/etc/openssl/openssl.cnf + OPENSSL=/usr/local/Cellar/openssl/*/bin/openssl +fi + +DEVSTACK_CERT_NAME=$CN +DEVSTACK_HOSTNAME=$CN +DEVSTACK_CERT=$DATA_DIR/$DEVSTACK_CERT_NAME.pem + +# Make sure the CA is set up +configure_CA +init_CA + +# Create the server cert +make_cert $INT_CA_DIR $DEVSTACK_CERT_NAME $DEVSTACK_HOSTNAME + +# Create a cert bundle +cat $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/cacert.pem >$DEVSTACK_CERT + From ceaa38b3299d56adc1e65e7128bb67cb7364acd1 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 12 Dec 2012 17:09:57 -0600 Subject: [PATCH 893/967] Fix tempest flavors and DEFAULT_INSTANCE_TYPE The flavor selection was broken if DEFAULT_INSTANCE_TYPE is defined but not yet created, for example when it is created in local.sh. This also has the side effect of setting flavor_ref_alt to the first flavor where it was unset in the previous code. Change-Id: I1fa48b3f90af45144c92298b6b07a4f7ee3b499f --- lib/tempest | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/lib/tempest b/lib/tempest index b408b113..fa637c12 100644 --- a/lib/tempest +++ b/lib/tempest @@ -134,12 +134,14 @@ function configure_tempest() { flavor_lines=`nova flavor-list` IFS=$'\r\n' flavors="" - for line in $flavor_lines; do - if [ -z $DEFAULT_INSTANCE_TYPE ]; then - flavors="$flavors `echo $line | grep -v "^\(|\s*ID\|+--\)" | cut -d' ' -f2`" - else - flavors="$flavors `echo $line | grep -v "^\(|\s*ID\|+--\)" | grep "$DEFAULT_INSTANCE_TYPE" | cut -d' ' -f2`" + if [[ -n "$DEFAULT_INSTANCE_TYPE" ]]; then + for line in $flavor_lines; do + f=$(echo $line | awk "/ $DEFAULT_INSTANCE_TYPE / { print \$2 }") + flavors="$flavors $f" + done fi + for line in $flavor_lines; do + flavors="$flavors `echo $line | grep -v "^\(|\s*ID\|+--\)" | cut -d' ' -f2`" done IFS=" " From d66c965b0c00209905747754cd580fc2f887af0a Mon Sep 17 00:00:00 2001 From: Kevin Lyda Date: Wed, 9 Jan 2013 13:39:57 +0000 Subject: [PATCH 894/967] Correct comment about SWIFT_LOOPBACK_DISK_SIZE. The comment regarding SWIFT_LOOPBACK_DISK_SIZE in lib/swift used the incorrect unit (bytes instead of kilobytes). Change-Id: I86117e36141c0a028a6fa6878a4d540f624e759d --- lib/swift | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/swift b/lib/swift index c433387d..b418eda8 100644 --- a/lib/swift +++ b/lib/swift @@ -39,7 +39,8 @@ SWIFT_DATA_DIR=${SWIFT_DATA_DIR:-${DATA_DIR}/swift} SWIFT_CONFIG_DIR=${SWIFT_CONFIG_DIR:-/etc/swift} # DevStack will create a loop-back disk formatted as XFS to store the -# swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in bytes. +# swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in +# kilobytes. # Default is 1 gigabyte. SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000} From 9a28c86b4c43eb8e311c60dc7400b9989a6745bb Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 10 Jan 2013 15:42:49 +0100 Subject: [PATCH 895/967] Add .stackenv to gitignore. Change-Id: I39e91aaf5e9ff29b025fd5a1aa74bad01c5e5bdd --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 5e770c80..f9e26445 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,4 @@ files/images stack-screenrc *.pem accrc +.stackenv From df1cf94cf0f6191842dd48b5e9a640510c33b3c0 Mon Sep 17 00:00:00 2001 From: Clint Byrum Date: Thu, 10 Jan 2013 11:12:45 -0800 Subject: [PATCH 896/967] Fixing python-heatclient's git repo path. heatclient was moved to the official openstack repo. Thanks Simon Pasquier for the patch. Change-Id: I561bf1ea11f1c74b5e75ab93170bd367ba36f90f Fixes: bug #1096922 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 4e03a2f4..89d4f090 100644 --- a/stackrc +++ b/stackrc @@ -108,7 +108,7 @@ HEAT_REPO=${GIT_BASE}/openstack/heat.git HEAT_BRANCH=master # python heat client library -HEATCLIENT_REPO=${GIT_BASE}/heat-api/python-heatclient.git +HEATCLIENT_REPO=${GIT_BASE}/openstack/python-heatclient.git HEATCLIENT_BRANCH=master # ryu service From 2b7ce5a8f37232c8cc28f86c6d659a0ec3f3f00b Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 10 Jan 2013 13:22:45 -0600 Subject: [PATCH 897/967] Add stackrc comments to HACKING Change-Id: I46ff885184a2b5b71caca905c27f28d8b1304011 --- HACKING.rst | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/HACKING.rst b/HACKING.rst index c4641fa0..6ad8c7e6 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -88,6 +88,30 @@ to be in the sub-script, such as testing for keystone being enabled in ``configure_swift()``. +stackrc +------- + +``stackrc`` is the global configuration file for DevStack. It is responsible for +calling ``localrc`` if it exists so configuration can be overridden by the user. + +The criteria for what belongs in ``stackrc`` can be vaguely summarized as +follows: + +* All project respositories and branches (for historical reasons) +* Global configuration that may be referenced in ``localrc``, i.e. ``DEST``, ``DATA_DIR`` +* Global service configuration like ``ENABLED_SERVICES`` +* Variables used by multiple services that do not have a clear owner, i.e. + ``VOLUME_BACKING_FILE_SIZE`` (nova-volumes and cinder) or ``PUBLIC_NETWORK_NAME`` + (nova-network and quantum) +* Variables that can not be cleanly declared in a project file due to + dependency ordering, i.e. the order of sourcing the project files can + not be changed for other reasons but the earlier file needs to dereference a + variable set in the later file. This should be rare. + +Also, variable declarations in ``stackrc`` do NOT allow overriding (the form +``FOO=${FOO:-baz}``); if they did then they can already be changed in ``localrc`` +and can stay in the project file. + Documentation ------------- From dff95122f79c83e7e3b108b12e6b8a48aa62c01d Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 10 Jan 2013 20:51:28 -0600 Subject: [PATCH 898/967] Fix EXTRA_OPTS handling In the conversion away from add_nova_opt the EXTRA_OPTS handling inadvertently replaced all '=' chars in the value rather than just the first. Additional '=' is legal for an option value. FWIW here is the setting that tripped it: EXTRA_OPTS=default_log_levels=sqlalchemy=WARN,boto=WARN,eventlet.wsgi.server=WARN Change-Id: I2deb139171250eb0ef5028bb924569cec31e1a4e --- lib/nova | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/nova b/lib/nova index 781cc097..a987008b 100644 --- a/lib/nova +++ b/lib/nova @@ -432,8 +432,8 @@ function create_nova_conf() { # Define extra nova conf flags by defining the array ``EXTRA_OPTS``. # For Example: ``EXTRA_OPTS=(foo=true bar=2)`` for I in "${EXTRA_OPTS[@]}"; do - # Attempt to convert flags to options - iniset $NOVA_CONF DEFAULT ${I//=/ } + # Replace the first '=' with ' ' for iniset syntax + iniset $NOVA_CONF DEFAULT ${I/=/ } done } From 91b8d13edad4d21bfd5b67219347f934728ee462 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Sun, 6 Jan 2013 22:40:09 +0100 Subject: [PATCH 899/967] Fix "sudo: sorry, you must have a tty to run sudo" On many systems the requiretty sudoers option is turned on by default. With "requiretty" option the sudo ensures the user have real tty access. Just several "su" variant has an option for skipping the new session creation step. Only one session can posses a tty, so after a "su -c" the sudo will not work. We will use sudo instead of su, when we create the stack account. This change adds new variable the STACK_USER for service username. Change-Id: I1b3fbd903686884e74a5a22d82c0c0890e1be03c --- lib/baremetal | 4 ++-- lib/ceilometer | 3 ++- lib/cinder | 6 ++--- lib/glance | 8 +++---- lib/heat | 2 +- lib/keystone | 5 ++-- lib/nova | 12 +++++----- lib/quantum | 4 ++-- lib/ryu | 2 +- lib/swift | 3 ++- stack.sh | 35 +++++++++++++++------------- stackrc | 3 +++ tools/build_ramdisk.sh | 12 +++++----- tools/build_uec.sh | 8 +++---- tools/copy_dev_environment_to_uec.sh | 11 +++++---- tools/xen/build_xva.sh | 4 ++-- tools/xen/prepare_guest.sh | 9 +++---- 17 files changed, 72 insertions(+), 59 deletions(-) diff --git a/lib/baremetal b/lib/baremetal index 112fd6d9..3cc24291 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -200,14 +200,14 @@ function configure_baremetal_nova_dirs() { sudo mkdir -p /tftpboot sudo mkdir -p /tftpboot/pxelinux.cfg sudo cp /usr/lib/syslinux/pxelinux.0 /tftpboot/ - sudo chown -R `whoami`:libvirtd /tftpboot + sudo chown -R $STACK_USER:libvirtd /tftpboot # ensure $NOVA_STATE_PATH/baremetal is prepared sudo mkdir -p $NOVA_STATE_PATH/baremetal sudo mkdir -p $NOVA_STATE_PATH/baremetal/console sudo mkdir -p $NOVA_STATE_PATH/baremetal/dnsmasq sudo touch $NOVA_STATE_PATH/baremetal/dnsmasq/dnsmasq-dhcp.host - sudo chown -R `whoami` $NOVA_STATE_PATH/baremetal + sudo chown -R $STACK_USER $NOVA_STATE_PATH/baremetal # ensure dnsmasq is installed but not running # because baremetal driver will reconfigure and restart this as needed diff --git a/lib/ceilometer b/lib/ceilometer index 749e785c..0fae3973 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -9,6 +9,7 @@ # - OS_AUTH_URL for auth in api # - DEST set to the destination directory # - SERVICE_PASSWORD, SERVICE_TENANT_NAME for auth in api +# - STACK_USER service user # stack.sh # --------- @@ -94,7 +95,7 @@ function configure_ceilometer() { function init_ceilometer() { # Create cache dir sudo mkdir -p $CEILOMETER_AUTH_CACHE_DIR - sudo chown `whoami` $CEILOMETER_AUTH_CACHE_DIR + sudo chown $STACK_USER $CEILOMETER_AUTH_CACHE_DIR rm -f $CEILOMETER_AUTH_CACHE_DIR/* } diff --git a/lib/cinder b/lib/cinder index 4aaea5d0..cbeb1d7a 100644 --- a/lib/cinder +++ b/lib/cinder @@ -3,7 +3,7 @@ # Dependencies: # - functions -# - DEST, DATA_DIR must be defined +# - DEST, DATA_DIR, STACK_USER must be defined # SERVICE_{TENANT_NAME|PASSWORD} must be defined # ``KEYSTONE_TOKEN_FORMAT`` must be defined @@ -110,7 +110,7 @@ function configure_cinder() { if [[ ! -d $CINDER_CONF_DIR ]]; then sudo mkdir -p $CINDER_CONF_DIR fi - sudo chown `whoami` $CINDER_CONF_DIR + sudo chown $STACK_USER $CINDER_CONF_DIR cp -p $CINDER_DIR/etc/cinder/policy.json $CINDER_CONF_DIR @@ -295,7 +295,7 @@ function init_cinder() { # Create cache dir sudo mkdir -p $CINDER_AUTH_CACHE_DIR - sudo chown `whoami` $CINDER_AUTH_CACHE_DIR + sudo chown $STACK_USER $CINDER_AUTH_CACHE_DIR rm -f $CINDER_AUTH_CACHE_DIR/* } diff --git a/lib/glance b/lib/glance index dff247a5..1c56a675 100644 --- a/lib/glance +++ b/lib/glance @@ -3,7 +3,7 @@ # Dependencies: # ``functions`` file -# ``DEST``, ``DATA_DIR`` must be defined +# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined # ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined # ``SERVICE_HOST`` # ``KEYSTONE_TOKEN_FORMAT`` must be defined @@ -75,7 +75,7 @@ function configure_glance() { if [[ ! -d $GLANCE_CONF_DIR ]]; then sudo mkdir -p $GLANCE_CONF_DIR fi - sudo chown `whoami` $GLANCE_CONF_DIR + sudo chown $STACK_USER $GLANCE_CONF_DIR # Copy over our glance configurations and update them cp $GLANCE_DIR/etc/glance-registry.conf $GLANCE_REGISTRY_CONF @@ -158,10 +158,10 @@ function init_glance() { # Create cache dir sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/api - sudo chown `whoami` $GLANCE_AUTH_CACHE_DIR/api + sudo chown $STACK_USER $GLANCE_AUTH_CACHE_DIR/api rm -f $GLANCE_AUTH_CACHE_DIR/api/* sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/registry - sudo chown `whoami` $GLANCE_AUTH_CACHE_DIR/registry + sudo chown $STACK_USER $GLANCE_AUTH_CACHE_DIR/registry rm -f $GLANCE_AUTH_CACHE_DIR/registry/* } diff --git a/lib/heat b/lib/heat index a6f72862..89bd44f0 100644 --- a/lib/heat +++ b/lib/heat @@ -49,7 +49,7 @@ function configure_heat() { if [[ ! -d $HEAT_CONF_DIR ]]; then sudo mkdir -p $HEAT_CONF_DIR fi - sudo chown `whoami` $HEAT_CONF_DIR + sudo chown $STACK_USER $HEAT_CONF_DIR HEAT_API_CFN_HOST=${HEAT_API_CFN_HOST:-$SERVICE_HOST} HEAT_API_CFN_PORT=${HEAT_API_CFN_PORT:-8000} diff --git a/lib/keystone b/lib/keystone index 34f33723..7a70cc41 100644 --- a/lib/keystone +++ b/lib/keystone @@ -7,6 +7,7 @@ # ``SERVICE_HOST``, ``SERVICE_PROTOCOL`` # ``SERVICE_TOKEN`` # ``S3_SERVICE_PORT`` (template backend only) +# ``STACK_USER`` # ``stack.sh`` calls the entry points in this order: # @@ -79,7 +80,7 @@ function configure_keystone() { if [[ ! -d $KEYSTONE_CONF_DIR ]]; then sudo mkdir -p $KEYSTONE_CONF_DIR fi - sudo chown `whoami` $KEYSTONE_CONF_DIR + sudo chown $STACK_USER $KEYSTONE_CONF_DIR if [[ "$KEYSTONE_CONF_DIR" != "$KEYSTONE_DIR/etc" ]]; then cp -p $KEYSTONE_DIR/etc/keystone.conf.sample $KEYSTONE_CONF @@ -261,7 +262,7 @@ function init_keystone() { # Create cache dir sudo mkdir -p $KEYSTONE_AUTH_CACHE_DIR - sudo chown `whoami` $KEYSTONE_AUTH_CACHE_DIR + sudo chown $STACK_USER $KEYSTONE_AUTH_CACHE_DIR rm -f $KEYSTONE_AUTH_CACHE_DIR/* fi } diff --git a/lib/nova b/lib/nova index 781cc097..9803acbf 100644 --- a/lib/nova +++ b/lib/nova @@ -3,7 +3,7 @@ # Dependencies: # ``functions`` file -# ``DEST``, ``DATA_DIR`` must be defined +# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined # ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined # ``LIBVIRT_TYPE`` must be defined # ``INSTANCE_NAME_PREFIX``, ``VOLUME_NAME_PREFIX`` must be defined @@ -149,7 +149,7 @@ function configure_nova() { if [[ ! -d $NOVA_CONF_DIR ]]; then sudo mkdir -p $NOVA_CONF_DIR fi - sudo chown `whoami` $NOVA_CONF_DIR + sudo chown $STACK_USER $NOVA_CONF_DIR cp -p $NOVA_DIR/etc/nova/policy.json $NOVA_CONF_DIR @@ -277,7 +277,7 @@ EOF" if ! getent group libvirtd >/dev/null; then sudo groupadd libvirtd fi - add_user_to_group `whoami` libvirtd + add_user_to_group $STACK_USER libvirtd # libvirt detects various settings on startup, as we potentially changed # the system configuration (modules, filesystems), we need to restart @@ -297,7 +297,7 @@ EOF" if [ -L /dev/disk/by-label/nova-instances ]; then if ! mount -n | grep -q $NOVA_INSTANCES_PATH; then sudo mount -L nova-instances $NOVA_INSTANCES_PATH - sudo chown -R `whoami` $NOVA_INSTANCES_PATH + sudo chown -R $STACK_USER $NOVA_INSTANCES_PATH fi fi @@ -474,13 +474,13 @@ function init_nova() { # Create cache dir sudo mkdir -p $NOVA_AUTH_CACHE_DIR - sudo chown `whoami` $NOVA_AUTH_CACHE_DIR + sudo chown $STACK_USER $NOVA_AUTH_CACHE_DIR rm -f $NOVA_AUTH_CACHE_DIR/* # Create the keys folder sudo mkdir -p ${NOVA_STATE_PATH}/keys # make sure we own NOVA_STATE_PATH and all subdirs - sudo chown -R `whoami` ${NOVA_STATE_PATH} + sudo chown -R $STACK_USER ${NOVA_STATE_PATH} } # install_novaclient() - Collect source and prepare diff --git a/lib/quantum b/lib/quantum index f74eead6..f081d9b6 100644 --- a/lib/quantum +++ b/lib/quantum @@ -388,7 +388,7 @@ function _configure_quantum_common() { if [[ ! -d $QUANTUM_CONF_DIR ]]; then sudo mkdir -p $QUANTUM_CONF_DIR fi - sudo chown `whoami` $QUANTUM_CONF_DIR + sudo chown $STACK_USER $QUANTUM_CONF_DIR cp $QUANTUM_DIR/etc/quantum.conf $QUANTUM_CONF @@ -730,7 +730,7 @@ function _quantum_setup_keystone() { iniset $conf_file $section signing_dir $QUANTUM_AUTH_CACHE_DIR # Create cache dir sudo mkdir -p $QUANTUM_AUTH_CACHE_DIR - sudo chown `whoami` $QUANTUM_AUTH_CACHE_DIR + sudo chown $STACK_USER $QUANTUM_AUTH_CACHE_DIR rm -f $QUANTUM_AUTH_CACHE_DIR/* } diff --git a/lib/ryu b/lib/ryu index ac3462bb..1292313e 100644 --- a/lib/ryu +++ b/lib/ryu @@ -27,7 +27,7 @@ function init_ryu() { if [[ ! -d $RYU_CONF_DIR ]]; then sudo mkdir -p $RYU_CONF_DIR fi - sudo chown `whoami` $RYU_CONF_DIR + sudo chown $STACK_USER $RYU_CONF_DIR RYU_CONF=$RYU_CONF_DIR/ryu.conf sudo rm -rf $RYU_CONF diff --git a/lib/swift b/lib/swift index b418eda8..46c6eb20 100644 --- a/lib/swift +++ b/lib/swift @@ -4,6 +4,7 @@ # Dependencies: # ``functions`` file # ``DEST``, ``SCREEN_NAME``, `SWIFT_HASH` must be defined +# ``STACK_USER`` must be defined # ``SWIFT_DATA_DIR`` or ``DATA_DIR`` must be defined # ``lib/keystone`` file # ``stack.sh`` calls the entry points in this order: @@ -333,7 +334,7 @@ function init_swift() { # Create cache dir sudo mkdir -p $SWIFT_AUTH_CACHE_DIR - sudo chown `whoami` $SWIFT_AUTH_CACHE_DIR + sudo chown $STACK_USER $SWIFT_AUTH_CACHE_DIR rm -f $SWIFT_AUTH_CACHE_DIR/* } diff --git a/stack.sh b/stack.sh index da623531..9b084bee 100755 --- a/stack.sh +++ b/stack.sh @@ -177,40 +177,43 @@ VERBOSE=$(trueorfalse True $VERBOSE) # sudo privileges and runs as that user. if [[ $EUID -eq 0 ]]; then + STACK_USER=$DEFAULT_STACK_USER ROOTSLEEP=${ROOTSLEEP:-10} echo "You are running this script as root." - echo "In $ROOTSLEEP seconds, we will create a user 'stack' and run as that user" + echo "In $ROOTSLEEP seconds, we will create a user '$STACK_USER' and run as that user" sleep $ROOTSLEEP # Give the non-root user the ability to run as **root** via ``sudo`` is_package_installed sudo || install_package sudo - if ! getent group stack >/dev/null; then - echo "Creating a group called stack" - groupadd stack + if ! getent group $STACK_USER >/dev/null; then + echo "Creating a group called $STACK_USER" + groupadd $STACK_USER fi - if ! getent passwd stack >/dev/null; then - echo "Creating a user called stack" - useradd -g stack -s /bin/bash -d $DEST -m stack + if ! getent passwd $STACK_USER >/dev/null; then + echo "Creating a user called $STACK_USER" + useradd -g $STACK_USER -s /bin/bash -d $DEST -m $STACK_USER fi echo "Giving stack user passwordless sudo privileges" # UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || echo "#includedir /etc/sudoers.d" >> /etc/sudoers - ( umask 226 && echo "stack ALL=(ALL) NOPASSWD:ALL" \ + ( umask 226 && echo "$STACK_USER ALL=(ALL) NOPASSWD:ALL" \ > /etc/sudoers.d/50_stack_sh ) - echo "Copying files to stack user" + echo "Copying files to $STACK_USER user" STACK_DIR="$DEST/${TOP_DIR##*/}" cp -r -f -T "$TOP_DIR" "$STACK_DIR" - chown -R stack "$STACK_DIR" + chown -R $STACK_USER "$STACK_DIR" + cd "$STACK_DIR" if [[ "$SHELL_AFTER_RUN" != "no" ]]; then - exec su -c "set -e; cd $STACK_DIR; bash stack.sh; bash" stack + exec sudo -u $STACK_USER bash -l -c "set -e; bash stack.sh; bash" else - exec su -c "set -e; cd $STACK_DIR; bash stack.sh" stack + exec sudo -u $STACK_USER bash -l -c "set -e; source stack.sh" fi exit 1 else + STACK_USER=`whoami` # We're not **root**, make sure ``sudo`` is available is_package_installed sudo || die "Sudo is required. Re-run stack.sh as root ONE TIME ONLY to set up sudo." @@ -220,10 +223,10 @@ else # Set up devstack sudoers TEMPFILE=`mktemp` - echo "`whoami` ALL=(root) NOPASSWD:ALL" >$TEMPFILE + echo "$STACK_USER ALL=(root) NOPASSWD:ALL" >$TEMPFILE # Some binaries might be under /sbin or /usr/sbin, so make sure sudo will # see them by forcing PATH - echo "Defaults:`whoami` secure_path=/sbin:/usr/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/local/bin" >> $TEMPFILE + echo "Defaults:$STACK_USER secure_path=/sbin:/usr/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/local/bin" >> $TEMPFILE chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh @@ -235,7 +238,7 @@ fi # Create the destination directory and ensure it is writable by the user sudo mkdir -p $DEST if [ ! -w $DEST ]; then - sudo chown `whoami` $DEST + sudo chown $STACK_USER $DEST fi # Set ``OFFLINE`` to ``True`` to configure ``stack.sh`` to run cleanly without @@ -251,7 +254,7 @@ ERROR_ON_CLONE=`trueorfalse False $ERROR_ON_CLONE` # Destination path for service data DATA_DIR=${DATA_DIR:-${DEST}/data} sudo mkdir -p $DATA_DIR -sudo chown `whoami` $DATA_DIR +sudo chown $STACK_USER $DATA_DIR # Common Configuration diff --git a/stackrc b/stackrc index 4e03a2f4..96f0ee58 100644 --- a/stackrc +++ b/stackrc @@ -12,6 +12,9 @@ DATA_DIR=${DEST}/data # Select the default database DATABASE_TYPE=mysql +# Default stack user +DEFAULT_STACK_USER=stack + # Specify which services to launch. These generally correspond to # screen tabs. To change the default list, use the ``enable_service`` and # ``disable_service`` functions in ``localrc``. diff --git a/tools/build_ramdisk.sh b/tools/build_ramdisk.sh index 5ff05b08..cfcca51f 100755 --- a/tools/build_ramdisk.sh +++ b/tools/build_ramdisk.sh @@ -125,17 +125,17 @@ if [ ! -r $DEV_FILE ]; then # Create a stack user that is a member of the libvirtd group so that stack # is able to interact with libvirt. chroot $MNTDIR groupadd libvirtd - chroot $MNTDIR useradd stack -s /bin/bash -d $DEST -G libvirtd + chroot $MNTDIR useradd $DEFAULT_STACK_USER -s /bin/bash -d $DEST -G libvirtd mkdir -p $MNTDIR/$DEST - chroot $MNTDIR chown stack $DEST + chroot $MNTDIR chown $DEFAULT_STACK_USER $DEST # A simple password - pass - echo stack:pass | chroot $MNTDIR chpasswd + echo $DEFAULT_STACK_USER:pass | chroot $MNTDIR chpasswd echo root:$ROOT_PASSWORD | chroot $MNTDIR chpasswd # And has sudo ability (in the future this should be limited to only what # stack requires) - echo "stack ALL=(ALL) NOPASSWD: ALL" >> $MNTDIR/etc/sudoers + echo "$DEFAULT_STACK_USER ALL=(ALL) NOPASSWD: ALL" >> $MNTDIR/etc/sudoers umount $MNTDIR rmdir $MNTDIR @@ -187,7 +187,7 @@ git_clone $OPENSTACKX_REPO $DEST/openstackx $OPENSTACKX_BRANCH # Use this version of devstack rm -rf $MNTDIR/$DEST/devstack cp -pr $CWD $MNTDIR/$DEST/devstack -chroot $MNTDIR chown -R stack $DEST/devstack +chroot $MNTDIR chown -R $DEFAULT_STACK_USER $DEST/devstack # Configure host network for DHCP mkdir -p $MNTDIR/etc/network @@ -225,7 +225,7 @@ EOF # Make the run.sh executable chmod 755 $RUN_SH -chroot $MNTDIR chown stack $DEST/run.sh +chroot $MNTDIR chown $DEFAULT_STACK_USER $DEST/run.sh umount $MNTDIR rmdir $MNTDIR diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 58c54258..5748b390 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -207,11 +207,11 @@ ROOTSLEEP=0 `cat $TOP_DIR/localrc` LOCAL_EOF fi -useradd -U -G sudo -s /bin/bash -d /opt/stack -m stack -echo stack:pass | chpasswd +useradd -U -G sudo -s /bin/bash -d /opt/stack -m $DEFAULT_STACK_USER +echo $DEFAULT_STACK_USER:pass | chpasswd mkdir -p /opt/stack/.ssh echo "$PUB_KEY" > /opt/stack/.ssh/authorized_keys -chown -R stack /opt/stack +chown -R $DEFAULT_STACK_USER /opt/stack chmod 700 /opt/stack/.ssh chmod 600 /opt/stack/.ssh/authorized_keys @@ -224,7 +224,7 @@ fi # Run stack.sh cat >> $vm_dir/uec/user-data< $STAGING_DIR/etc/sudoers.d/50_stack_sh ) # Copy over your ssh keys and env if desired @@ -64,7 +67,7 @@ rm -rf $STAGING_DIR/$DEST/devstack cp_it . $STAGING_DIR/$DEST/devstack # Give stack ownership over $DEST so it may do the work needed -chroot $STAGING_DIR chown -R stack $DEST +chroot $STAGING_DIR chown -R $DEFAULT_STACK_USER $DEST # Unmount umount $STAGING_DIR diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh index c359c558..f3f166fe 100755 --- a/tools/xen/build_xva.sh +++ b/tools/xen/build_xva.sh @@ -65,8 +65,8 @@ cd $TOP_DIR cat <$STAGING_DIR/etc/rc.local # network restart required for getting the right gateway /etc/init.d/networking restart -chown -R stack /opt/stack -su -c "/opt/stack/run.sh > /opt/stack/run.sh.log" stack +chown -R $DEFAULT_STACK_USER /opt/stack +su -c "/opt/stack/run.sh > /opt/stack/run.sh.log" $DEFAULT_STACK_USER exit 0 EOF diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh index 4aa4554f..fe524454 100755 --- a/tools/xen/prepare_guest.sh +++ b/tools/xen/prepare_guest.sh @@ -19,6 +19,7 @@ GUEST_PASSWORD=${GUEST_PASSWORD:-secrete} STAGING_DIR=${STAGING_DIR:-stage} DO_TGZ=${DO_TGZ:-1} XS_TOOLS_PATH=${XS_TOOLS_PATH:-"/root/xs-tools.deb"} +STACK_USER=${STACK_USER:-stack} # Install basics chroot $STAGING_DIR apt-get update @@ -46,12 +47,12 @@ rm -f $STAGING_DIR/etc/localtime # Add stack user chroot $STAGING_DIR groupadd libvirtd -chroot $STAGING_DIR useradd stack -s /bin/bash -d /opt/stack -G libvirtd -echo stack:$GUEST_PASSWORD | chroot $STAGING_DIR chpasswd -echo "stack ALL=(ALL) NOPASSWD: ALL" >> $STAGING_DIR/etc/sudoers +chroot $STAGING_DIR useradd $STACK_USER -s /bin/bash -d /opt/stack -G libvirtd +echo $STACK_USER:$GUEST_PASSWORD | chroot $STAGING_DIR chpasswd +echo "$STACK_USER ALL=(ALL) NOPASSWD: ALL" >> $STAGING_DIR/etc/sudoers # Give ownership of /opt/stack to stack user -chroot $STAGING_DIR chown -R stack /opt/stack +chroot $STAGING_DIR chown -R $STACK_USER /opt/stack # Make our ip address hostnames look nice at the command prompt echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $STAGING_DIR/opt/stack/.bashrc From 15bda3e4630618135b26bd5a41f48e8c2fb0112b Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 11 Jan 2013 15:07:53 -0600 Subject: [PATCH 900/967] Handle existing security group rules in volume exercise Change-Id: I0aa3bc0c6179f92a12c1e9bbace61597778ffa1f --- exercises/volumes.sh | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 5c5e0e44..45b8645b 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -86,8 +86,12 @@ if ! nova secgroup-list | grep -q $SECGROUP; then fi # Configure Security Group Rules -nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 -nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0 +if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then + nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 +fi +if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then + nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0 +fi # determinine instance type # ------------------------- From 4b26d3191ee82e96fedaffa68362919deb8a3813 Mon Sep 17 00:00:00 2001 From: Sumit Naiksatam Date: Fri, 4 Jan 2013 10:32:54 -0800 Subject: [PATCH 901/967] Quantum FloodLight/BigSwitch Plugin Support The patch introduces devstack support for the Quantum FloodLight/BigSwitch RESTProxy Plugin. Change-Id: I8c032fd16723ed6055821de0860fae508df371b7 Implements: blueprint quantum-floodlight-bigswitch-plugin-support --- AUTHORS | 1 + lib/bigswitch_floodlight | 50 ++++++++++++++++++++++++++++++++++++++++ lib/quantum | 18 +++++++++++++-- 3 files changed, 67 insertions(+), 2 deletions(-) create mode 100644 lib/bigswitch_floodlight diff --git a/AUTHORS b/AUTHORS index ba68e329..7ec1f663 100644 --- a/AUTHORS +++ b/AUTHORS @@ -35,6 +35,7 @@ Matt Joyce Osamu Habuka Russell Bryant Scott Moser +Sumit Naiksatam Thierry Carrez Todd Willey Tres Henry diff --git a/lib/bigswitch_floodlight b/lib/bigswitch_floodlight new file mode 100644 index 00000000..77aeb61d --- /dev/null +++ b/lib/bigswitch_floodlight @@ -0,0 +1,50 @@ +# Big Switch/FloodLight OpenFlow Controller +# ------------------------------------------ + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +BS_FL_CONTROLLERS_PORT=${BS_FL_CONTROLLERS_PORT:-localhost:80} +BS_FL_OF_PORT=${BS_FL_OF_PORT:-6633} +OVS_BRIDGE=${OVS_BRIDGE:-br-int} + +function configure_bigswitch_floodlight() { + : +} + +function init_bigswitch_floodlight() { + install_quantum_agent_packages + + echo -n "Installing OVS managed by the openflow controllers:" + echo ${BS_FL_CONTROLLERS_PORT} + + # Create local OVS bridge and configure it + sudo ovs-vsctl --no-wait -- --if-exists del-br ${OVS_BRIDGE} + sudo ovs-vsctl --no-wait add-br ${OVS_BRIDGE} + sudo ovs-vsctl --no-wait br-set-external-id ${OVS_BRIDGE} bridge-id ${OVS_BRIDGE} + + ctrls= + for ctrl in `echo ${BS_FL_CONTROLLERS_PORT} | tr ',' ' '` + do + ctrl=${ctrl%:*} + ctrls="${ctrls} tcp:${ctrl}:${BS_FL_OF_PORT}" + done + echo "Adding Network conttrollers: " ${ctrls} + sudo ovs-vsctl --no-wait set-controller ${OVS_BRIDGE} ${ctrls} +} + +function install_bigswitch_floodlight() { + : +} + +function start_bigswitch_floodlight() { + : +} + +function stop_bigswitch_floodlight() { + : +} + +# Restore xtrace +$XTRACE diff --git a/lib/quantum b/lib/quantum index f74eead6..b8b115af 100644 --- a/lib/quantum +++ b/lib/quantum @@ -217,6 +217,8 @@ function create_nova_conf_quantum() { iniset $NOVA_CONF DEFAULT libvirt_ovs_integration_bridge "$OVS_BRIDGE" iniset $NOVA_CONF DEFAULT linuxnet_ovs_ryu_api_host "$RYU_API_HOST:$RYU_API_PORT" iniset $NOVA_CONF DEFAULT libvirt_ovs_ryu_api_host "$RYU_API_HOST:$RYU_API_PORT" + elif [[ "$Q_PLUGIN" = "bigswitch_floodlight" ]]; then + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"} fi iniset $NOVA_CONF DEFAULT libvirt_vif_driver "$NOVA_VIF_DRIVER" iniset $NOVA_CONF DEFAULT linuxnet_interface_driver "$LINUXNET_VIF_DRIVER" @@ -332,7 +334,7 @@ function install_quantum_agent_packages() { function is_quantum_ovs_base_plugin() { local plugin=$1 - if [[ ",openvswitch,ryu," =~ ,${plugin}, ]]; then + if [[ ",openvswitch,ryu,bigswitch_floodlight," =~ ,${plugin}, ]]; then return 0 fi return 1 @@ -407,6 +409,13 @@ function _configure_quantum_common() { Q_PLUGIN_CONF_FILENAME=ryu.ini Q_DB_NAME="ovs_quantum" Q_PLUGIN_CLASS="quantum.plugins.ryu.ryu_quantum_plugin.RyuQuantumPluginV2" + elif [[ "$Q_PLUGIN" = "bigswitch_floodlight" ]]; then + Q_PLUGIN_CONF_PATH=etc/quantum/plugins/bigswitch + Q_PLUGIN_CONF_FILENAME=restproxy.ini + Q_DB_NAME="restproxy_quantum" + Q_PLUGIN_CLASS="quantum.plugins.bigswitch.plugin.QuantumRestProxyV2" + BS_FL_CONTROLLERS_PORT=${BS_FL_CONTROLLERS_PORT:-localhost:80} + BS_FL_CONTROLLER_TIMEOUT=${BS_FL_CONTROLLER_TIMEOUT:-10} fi if [[ $Q_PLUGIN_CONF_PATH == '' || $Q_PLUGIN_CONF_FILENAME == '' || $Q_PLUGIN_CLASS == '' ]]; then @@ -674,6 +683,9 @@ function _configure_quantum_service() { elif [[ "$Q_PLUGIN" = "ryu" ]]; then iniset /$Q_PLUGIN_CONF_FILE OVS openflow_controller $RYU_OFP_HOST:$RYU_OFP_PORT iniset /$Q_PLUGIN_CONF_FILE OVS openflow_rest_api $RYU_API_HOST:$RYU_API_PORT + elif [[ "$Q_PLUGIN" = "bigswitch_floodlight" ]]; then + iniset /$Q_PLUGIN_CONF_FILE RESTPROXY servers $BS_FL_CONTROLLERS_PORT + iniset /$Q_PLUGIN_CONF_FILE RESTPROXY servertimeout $BS_FL_CONTROLLER_TIMEOUT fi } @@ -749,6 +761,8 @@ function _quantum_setup_interface_driver() { iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver elif [[ "$Q_PLUGIN" = "ryu" ]]; then iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.RyuInterfaceDriver + elif [[ "$Q_PLUGIN" = "bigswitch_floodlight" ]]; then + iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver fi } @@ -835,7 +849,7 @@ function _ssh_check_quantum() { # Quantum 3rd party programs #--------------------------- # A comma-separated list of 3rd party programs -QUANTUM_THIRD_PARTIES="ryu" +QUANTUM_THIRD_PARTIES="ryu,bigswitch_floodlight" for third_party in ${QUANTUM_THIRD_PARTIES//,/ }; do source lib/$third_party done From 43eb0b3159d8ad1eb14e0430124cc72cb50ae3c2 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Sat, 12 Jan 2013 20:10:34 +0000 Subject: [PATCH 902/967] Make sure to cleanup swift on unstack/relaunch. - Fixes bug 1049553. Change-Id: I9fef93d25512c014dfb882adf0e169487bf877d8 --- lib/swift | 15 ++++++++------- unstack.sh | 1 + 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/lib/swift b/lib/swift index b418eda8..aff45967 100644 --- a/lib/swift +++ b/lib/swift @@ -107,16 +107,17 @@ function configure_swift() { if [[ -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 + sudo rm -f ${SWIFT_DATA_DIR}/drives/images/swift.img fi - else - mkdir -p ${SWIFT_DATA_DIR}/drives/images - sudo touch ${SWIFT_DATA_DIR}/drives/images/swift.img - sudo chown $USER: ${SWIFT_DATA_DIR}/drives/images/swift.img - - dd if=/dev/zero of=${SWIFT_DATA_DIR}/drives/images/swift.img \ - bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE} fi + mkdir -p ${SWIFT_DATA_DIR}/drives/images + sudo touch ${SWIFT_DATA_DIR}/drives/images/swift.img + sudo chown $USER: ${SWIFT_DATA_DIR}/drives/images/swift.img + + dd if=/dev/zero of=${SWIFT_DATA_DIR}/drives/images/swift.img \ + bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE} + # Make a fresh XFS filesystem mkfs.xfs -f -i size=1024 ${SWIFT_DATA_DIR}/drives/images/swift.img diff --git a/unstack.sh b/unstack.sh index 1d4bfd56..a086d5c6 100755 --- a/unstack.sh +++ b/unstack.sh @@ -65,6 +65,7 @@ fi # Swift runs daemons if is_service_enabled swift; then stop_swift + cleanup_swift fi # Apache has the WSGI processes From 7bf1dd351fc859346ac8cdd3574b86f294e97def Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Sat, 12 Jan 2013 17:31:26 +0100 Subject: [PATCH 903/967] Tempest should create his own flavors * Decrease memory usage caused by tempest significantly Change-Id: I0ea59d9bb1fbeb93f04353bc6b4e148637edf945 --- lib/tempest | 54 ++++++++++++++++++++++++++++++----------------------- 1 file changed, 31 insertions(+), 23 deletions(-) diff --git a/lib/tempest b/lib/tempest index fa637c12..906ca6ab 100644 --- a/lib/tempest +++ b/lib/tempest @@ -129,33 +129,41 @@ function configure_tempest() { ALT_USERNAME=${ALT_USERNAME:-alt_demo} ALT_TENANT_NAME=${ALT_TENANT_NAME:-alt_demo} - # Check Nova for existing flavors and, if set, look for the - # ``DEFAULT_INSTANCE_TYPE`` and use that. Otherwise, just use the first flavor. - flavor_lines=`nova flavor-list` - IFS=$'\r\n' - flavors="" - if [[ -n "$DEFAULT_INSTANCE_TYPE" ]]; then + # If the ``DEFAULT_INSTANCE_TYPE`` not declared, use the new behavior + # Tempest creates instane types for himself + if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then + nova flavor-create m1.pico 42 32 0 1 + flavor_ref=42 + nova flavor-create m1.nano 84 64 0 1 + flavor_ref_alt=84 + else + # Check Nova for existing flavors and, if set, look for the + # ``DEFAULT_INSTANCE_TYPE`` and use that. + flavor_lines=`nova flavor-list` + IFS=$'\r\n' + flavors="" for line in $flavor_lines; do f=$(echo $line | awk "/ $DEFAULT_INSTANCE_TYPE / { print \$2 }") flavors="$flavors $f" done - fi - for line in $flavor_lines; do - flavors="$flavors `echo $line | grep -v "^\(|\s*ID\|+--\)" | cut -d' ' -f2`" - done - - IFS=" " - flavors=($flavors) - num_flavors=${#flavors[*]} - echo "Found $num_flavors flavors" - if [[ $num_flavors -eq 0 ]]; then - echo "Found no valid flavors to use!" - exit 1 - fi - flavor_ref=${flavors[0]} - flavor_ref_alt=$flavor_ref - if [[ $num_flavors -gt 1 ]]; then - flavor_ref_alt=${flavors[1]} + + for line in $flavor_lines; do + flavors="$flavors `echo $line | grep -v "^\(|\s*ID\|+--\)" | cut -d' ' -f2`" + done + + IFS=" " + flavors=($flavors) + num_flavors=${#flavors[*]} + echo "Found $num_flavors flavors" + if [[ $num_flavors -eq 0 ]]; then + echo "Found no valid flavors to use!" + exit 1 + fi + flavor_ref=${flavors[0]} + flavor_ref_alt=$flavor_ref + if [[ $num_flavors -gt 1 ]]; then + flavor_ref_alt=${flavors[1]} + fi fi if [ "$Q_USE_NAMESPACE" != "False" ]; then From af988fd1d20aff684cde07d8683ecf5e0d539dfe Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Sun, 13 Jan 2013 14:20:47 +0100 Subject: [PATCH 904/967] Support RHEL with lsb_release Consider all distributor as "Red Hat" which id matches to the Red.*Hat regexp. Example Distributor ID: "RedHatEnterpriseServer" Change-Id: I29cc2e83cccaafa3e1e056e506fda5c9771764a1 --- functions | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/functions b/functions index 80e17969..55a81c54 100644 --- a/functions +++ b/functions @@ -224,6 +224,7 @@ GetOSVersion() { os_VENDOR=$(lsb_release -i -s) os_RELEASE=$(lsb_release -r -s) os_UPDATE="" + os_PACKAGE="rpm" if [[ "Debian,Ubuntu" =~ $os_VENDOR ]]; then os_PACKAGE="deb" elif [[ "SUSE LINUX" =~ $os_VENDOR ]]; then @@ -231,9 +232,8 @@ GetOSVersion() { if [[ $? -eq 0 ]]; then os_VENDOR="openSUSE" fi - os_PACKAGE="rpm" - else - os_PACKAGE="rpm" + elif [[ $os_VENDOR =~ Red.*Hat ]]; then + os_VENDOR="Red Hat" fi os_CODENAME=$(lsb_release -c -s) elif [[ -r /etc/redhat-release ]]; then From b0f1c38bdcb02068e2e3d0daf2d65695d9d58478 Mon Sep 17 00:00:00 2001 From: Akihiro MOTOKI Date: Sun, 13 Jan 2013 17:58:12 +0900 Subject: [PATCH 905/967] Refactor rpc backend configuration logic This commit also changes the following: - Fixes Nova QPID module path - Fixes a bug Cinder ZeroMQ RPC points to nova module - Adds ZeroMQ setting for Heat RPC qpid_is_supported is moved from functions to lib/rpc_backend. This work is based on the work by Isaku Yamahata in https://review.openstack.org/#/c/19074/. Change-Id: I45e21b1fb85e539213f5243764132a37906d7455 --- functions | 12 ----- lib/cinder | 9 +--- lib/heat | 32 ++----------- lib/quantum | 15 +----- lib/rpc_backend | 123 ++++++++++++++++++++++++++++++++++++++++++++++++ stack.sh | 78 ++++-------------------------- 6 files changed, 137 insertions(+), 132 deletions(-) create mode 100644 lib/rpc_backend diff --git a/functions b/functions index 80e17969..47950420 100644 --- a/functions +++ b/functions @@ -1133,18 +1133,6 @@ function get_pip_command() { fi } -# Check if qpid can be used on the current distro. -# qpid_is_supported -function qpid_is_supported() { - if [[ -z "$DISTRO" ]]; then - GetDistro - fi - - # Qpid was introduced to Ubuntu in precise, disallow it on oneiric; it is - # not in openSUSE either right now. - ( ! ([[ "$DISTRO" = "oneiric" ]] || is_suse) ) -} - # Restore xtrace $XTRACE diff --git a/lib/cinder b/lib/cinder index d9f8d63f..8b1ccd71 100644 --- a/lib/cinder +++ b/lib/cinder @@ -184,14 +184,7 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT use_syslog True fi - if is_service_enabled qpid ; then - iniset $CINDER_CONF DEFAULT rpc_backend cinder.openstack.common.rpc.impl_qpid - elif is_service_enabled zeromq; then - iniset $CINDER_CONF DEFAULT rpc_backend nova.openstack.common.rpc.impl_zmq - elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then - iniset $CINDER_CONF DEFAULT rabbit_host $RABBIT_HOST - iniset $CINDER_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - fi + iniset_rpc_backend cinder $CINDER_CONF DEFAULT if [[ "$CINDER_SECURE_DELETE" == "False" ]]; then iniset $CINDER_CONF DEFAULT secure_delete False diff --git a/lib/heat b/lib/heat index 89bd44f0..5b8b360a 100644 --- a/lib/heat +++ b/lib/heat @@ -69,13 +69,7 @@ function configure_heat() { iniset $HEAT_API_CFN_CONF DEFAULT bind_host $HEAT_API_CFN_HOST iniset $HEAT_API_CFN_CONF DEFAULT bind_port $HEAT_API_CFN_PORT - if is_service_enabled rabbit; then - iniset $HEAT_API_CFN_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_kombu - iniset $HEAT_API_CFN_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - iniset $HEAT_API_CFN_CONF DEFAULT rabbit_host $RABBIT_HOST - elif is_service_enabled qpid; then - iniset $HEAT_API_CFN_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid - fi + iniset_rpc_backend heat $HEAT_API_CFN_CONF DEFAULT HEAT_API_CFN_PASTE_INI=$HEAT_CONF_DIR/heat-api-cfn-paste.ini cp $HEAT_DIR/etc/heat/heat-api-cfn-paste.ini $HEAT_API_CFN_PASTE_INI @@ -98,13 +92,7 @@ function configure_heat() { iniset $HEAT_API_CONF DEFAULT bind_host $HEAT_API_HOST iniset $HEAT_API_CONF DEFAULT bind_port $HEAT_API_PORT - if is_service_enabled rabbit; then - iniset $HEAT_API_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_kombu - iniset $HEAT_API_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - iniset $HEAT_API_CONF DEFAULT rabbit_host $RABBIT_HOST - elif is_service_enabled qpid; then - iniset $HEAT_API_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid - fi + iniset_rpc_backend heat $HEAT_API_CONF DEFAULT HEAT_API_PASTE_INI=$HEAT_CONF_DIR/heat-api-paste.ini cp $HEAT_DIR/etc/heat/heat-api-paste.ini $HEAT_API_PASTE_INI @@ -134,13 +122,7 @@ function configure_heat() { iniset $HEAT_ENGINE_CONF DEFAULT sql_connection $dburl iniset $HEAT_ENGINE_CONF DEFAULT auth_encryption_key `hexdump -n 16 -v -e '/1 "%02x"' /dev/random` - if is_service_enabled rabbit; then - iniset $HEAT_ENGINE_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_kombu - iniset $HEAT_ENGINE_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - iniset $HEAT_ENGINE_CONF DEFAULT rabbit_host $RABBIT_HOST - elif is_service_enabled qpid; then - iniset $HEAT_ENGINE_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid - fi + iniset_rpc_backend heat $HEAT_ENGINE_CONF DEFAULT # Cloudwatch API HEAT_API_CW_CONF=$HEAT_CONF_DIR/heat-api-cloudwatch.conf @@ -151,13 +133,7 @@ function configure_heat() { iniset $HEAT_API_CW_CONF DEFAULT bind_host $HEAT_API_CW_HOST iniset $HEAT_API_CW_CONF DEFAULT bind_port $HEAT_API_CW_PORT - if is_service_enabled rabbit; then - iniset $HEAT_API_CW_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_kombu - iniset $HEAT_API_CW_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - iniset $HEAT_API_CW_CONF DEFAULT rabbit_host $RABBIT_HOST - elif is_service_enabled qpid; then - iniset $HEAT_API_CW_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid - fi + iniset_rpc_backend heat $HEAT_API_CW_CONF DEFAULT HEAT_API_CW_PASTE_INI=$HEAT_CONF_DIR/heat-api-cloudwatch-paste.ini cp $HEAT_DIR/etc/heat/heat-api-cloudwatch-paste.ini $HEAT_API_CW_PASTE_INI diff --git a/lib/quantum b/lib/quantum index 343e5a9b..19df4990 100644 --- a/lib/quantum +++ b/lib/quantum @@ -176,7 +176,7 @@ fi # Set common config for all quantum server and agents. function configure_quantum() { _configure_quantum_common - _configure_quantum_rpc + iniset_rpc_backend quantum $QUANTUM_CONF DEFAULT if is_service_enabled q-svc; then _configure_quantum_service @@ -596,19 +596,6 @@ function _configure_quantum_plugin_agent_ryu() { AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/ryu/agent/ryu_quantum_agent.py" } -# Quantum RPC support - must be updated prior to starting any of the services -function _configure_quantum_rpc() { - iniset $QUANTUM_CONF DEFAULT control_exchange quantum - if is_service_enabled qpid ; then - iniset $QUANTUM_CONF DEFAULT rpc_backend quantum.openstack.common.rpc.impl_qpid - elif is_service_enabled zeromq; then - iniset $QUANTUM_CONF DEFAULT rpc_backend quantum.openstack.common.rpc.impl_zmq - elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then - iniset $QUANTUM_CONF DEFAULT rabbit_host $RABBIT_HOST - iniset $QUANTUM_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - fi -} - # _configure_quantum_service() - Set config files for quantum service # It is called when q-svc is enabled. function _configure_quantum_service() { diff --git a/lib/rpc_backend b/lib/rpc_backend new file mode 100644 index 00000000..4d7f8d2f --- /dev/null +++ b/lib/rpc_backend @@ -0,0 +1,123 @@ +# lib/rpc_backend +# Interface for interactig with different rpc backend +# rpc backend settings + +# Dependencies: +# ``functions`` file +# ``RABBIT_{HOST|PASSWORD}`` must be defined when RabbitMQ is used + +# ``stack.sh`` calls the entry points in this order: +# +# check_rpc_backend +# install_rpc_backend +# restart_rpc_backend +# iniset_rpc_backend + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# Entry Points +# ------------ + +# Make sure we only have one rpc backend enabled. +# Also check the specified rpc backend is available on your platform. +function check_rpc_backend() { + local rpc_backend_cnt=0 + for svc in qpid zeromq rabbit; do + is_service_enabled $svc && + ((rpc_backend_cnt++)) + done + if [ "$rpc_backend_cnt" -gt 1 ]; then + echo "ERROR: only one rpc backend may be enabled," + echo " set only one of 'rabbit', 'qpid', 'zeromq'" + echo " via ENABLED_SERVICES." + elif [ "$rpc_backend_cnt" == 0 ]; then + echo "ERROR: at least one rpc backend must be enabled," + echo " set one of 'rabbit', 'qpid', 'zeromq'" + echo " via ENABLED_SERVICES." + fi + + if is_service_enabled qpid && ! qpid_is_supported; then + echo "Qpid support is not available for this version of your distribution." + exit 1 + fi +} + +# install rpc backend +function install_rpc_backend() { + if is_service_enabled rabbit; then + # Install rabbitmq-server + # the temp file is necessary due to LP: #878600 + tfile=$(mktemp) + install_package rabbitmq-server > "$tfile" 2>&1 + cat "$tfile" + rm -f "$tfile" + elif is_service_enabled qpid; then + if is_fedora; then + install_package qpid-cpp-server-daemon + elif is_ubuntu; then + install_package qpidd + else + exit_distro_not_supported "qpid installation" + fi + elif is_service_enabled zeromq; then + if is_fedora; then + install_package zeromq python-zmq + elif is_ubuntu; then + install_package libzmq1 python-zmq + elif is_suse; then + install_package libzmq1 python-pyzmq + else + exit_distro_not_supported "zeromq installation" + fi + fi +} + +# restart the rpc backend +function restart_rpc_backend() { + if is_service_enabled rabbit; then + # Start rabbitmq-server + echo_summary "Starting RabbitMQ" + if is_fedora || is_suse; then + # service is not started by default + restart_service rabbitmq-server + fi + # change the rabbit password since the default is "guest" + sudo rabbitmqctl change_password guest $RABBIT_PASSWORD + elif is_service_enabled qpid; then + echo_summary "Starting qpid" + restart_service qpidd + fi +} + +# iniset cofiguration +function iniset_rpc_backend() { + local package=$1 + local file=$2 + local section=$3 + if is_service_enabled zeromq; then + iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_zmq + elif is_service_enabled qpid; then + iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_qpid + elif is_service_enabled rabbit; then + iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_kombu + iniset $file $section rabbit_host $RABBIT_HOST + iniset $file $section rabbit_password $RABBIT_PASSWORD + fi +} + +# Check if qpid can be used on the current distro. +# qpid_is_supported +function qpid_is_supported() { + if [[ -z "$DISTRO" ]]; then + GetDistro + fi + + # Qpid was introduced to Ubuntu in precise, disallow it on oneiric; it is + # not in openSUSE either right now. + ( ! ([[ "$DISTRO" = "oneiric" ]] || is_suse) ) +} + +# Restore xtrace +$XTRACE diff --git a/stack.sh b/stack.sh index 5c071fcf..bf473ca7 100755 --- a/stack.sh +++ b/stack.sh @@ -95,8 +95,9 @@ if [[ -r $TOP_DIR/.stackenv ]]; then rm $TOP_DIR/.stackenv fi -# Import database configuration +# Import common services (database, message queue) configuration source $TOP_DIR/lib/database +source $TOP_DIR/lib/rpc_backend # Validate database selection # Since DATABASE_BACKENDS is now set, this also gets ENABLED_SERVICES @@ -118,10 +119,9 @@ if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|f16|f17|f18|opensuse-12.2) fi fi -if is_service_enabled qpid && ! qpid_is_supported; then - echo "Qpid support is not available for this version of your distribution." - exit 1 -fi +# Make sure we only have one rpc backend enabled, +# and the specified rpc backend is available on your platform. +check_rpc_backend # ``stack.sh`` keeps function libraries here # Make sure ``$TOP_DIR/lib`` directory is present @@ -147,23 +147,6 @@ if type -p screen >/dev/null && screen -ls | egrep -q "[0-9].$SCREEN_NAME"; then exit 1 fi -# Make sure we only have one rpc backend enabled. -rpc_backend_cnt=0 -for svc in qpid zeromq rabbit; do - is_service_enabled $svc && - ((rpc_backend_cnt++)) -done -if [ "$rpc_backend_cnt" -gt 1 ]; then - echo "ERROR: only one rpc backend may be enabled," - echo " set only one of 'rabbit', 'qpid', 'zeromq'" - echo " via ENABLED_SERVICES." -elif [ "$rpc_backend_cnt" == 0 ]; then - echo "ERROR: at least one rpc backend must be enabled," - echo " set one of 'rabbit', 'qpid', 'zeromq'" - echo " via ENABLED_SERVICES." -fi -unset rpc_backend_cnt - # Set up logging level VERBOSE=$(trueorfalse True $VERBOSE) @@ -670,32 +653,7 @@ if [[ $SYSLOG != "False" ]]; then fi fi -if is_service_enabled rabbit; then - # Install rabbitmq-server - # the temp file is necessary due to LP: #878600 - tfile=$(mktemp) - install_package rabbitmq-server > "$tfile" 2>&1 - cat "$tfile" - rm -f "$tfile" -elif is_service_enabled qpid; then - if is_fedora; then - install_package qpid-cpp-server-daemon - elif is_ubuntu; then - install_package qpidd - else - exit_distro_not_supported "qpid installation" - fi -elif is_service_enabled zeromq; then - if is_fedora; then - install_package zeromq python-zmq - elif is_ubuntu; then - install_package libzmq1 python-zmq - elif is_suse; then - install_package libzmq1 python-pyzmq - else - exit_distro_not_supported "zeromq installation" - fi -fi +install_rpc_backend if is_service_enabled $DATABASE_BACKENDS; then install_database @@ -868,20 +826,7 @@ fi # Finalize queue installation # ---------------------------- - -if is_service_enabled rabbit; then - # Start rabbitmq-server - echo_summary "Starting RabbitMQ" - if is_fedora || is_suse; then - # service is not started by default - restart_service rabbitmq-server - fi - # change the rabbit password since the default is "guest" - sudo rabbitmqctl change_password guest $RABBIT_PASSWORD -elif is_service_enabled qpid; then - echo_summary "Starting qpid" - restart_service qpidd -fi +restart_rpc_backend # Configure database @@ -1075,14 +1020,7 @@ if is_service_enabled nova; then iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN" iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST" - if is_service_enabled zeromq; then - iniset $NOVA_CONF DEFAULT rpc_backend "nova.openstack.common.rpc.impl_zmq" - elif is_service_enabled qpid; then - iniset $NOVA_CONF DEFAULT rpc_backend "nova.rpc.impl_qpid" - elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then - iniset $NOVA_CONF DEFAULT rabbit_host "$RABBIT_HOST" - iniset $NOVA_CONF DEFAULT rabbit_password "$RABBIT_PASSWORD" - fi + iniset_rpc_backend nova $NOVA_CONF DEFAULT iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT" From 3860a9252a8546326a7ac9f0c8b2c09afe655491 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Mon, 14 Jan 2013 13:14:34 +0100 Subject: [PATCH 906/967] Have ecua.sh to accept the correct error code ecua.sh will accept both the current and the correct error code Change-Id: I364e411986b9780fd5c5df29697753f04a9a4935 --- exercises/euca.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exercises/euca.sh b/exercises/euca.sh index 76df254b..46e40251 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -169,7 +169,7 @@ euca-terminate-instances $INSTANCE || \ # case changed with bug/836978. Requesting the status of an invalid instance # will now return an error message including the instance id, so we need to # filter that out. -if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE |grep -v \"InstanceNotFound\" | grep -q $INSTANCE; do sleep 1; done"; then +if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -ve \"\\\(InstanceNotFound\\\|InvalidInstanceId\[.\]NotFound\\\)\" | grep -q $INSTANCE; do sleep 1; done"; then echo "server didn't terminate within $TERMINATE_TIMEOUT seconds" exit 1 fi From 7c73e8dee705b4670cd051fad53e20e4a3cbe623 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Mon, 7 Jan 2013 08:17:01 +0000 Subject: [PATCH 907/967] Enable MySQL slow query log. Change-Id: I3db33839bea28abaff01f1d7b7d6698c5dd2c083 --- lib/databases/mysql | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/lib/databases/mysql b/lib/databases/mysql index 1c0f5ebf..965df6ee 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -63,6 +63,21 @@ function configure_database_mysql { default-storage-engine = InnoDB" $MY_CONF fi + # Turn on slow query log + sudo sed -i '/log.slow.queries/d' $MY_CONF + sudo sed -i -e "/^\[mysqld\]/ a \ +log-slow-queries = /var/log/mysql/mysql-slow.log" $MY_CONF + + # Log any query taking longer than a second + sudo sed -i '/long.query.time/d' $MY_CONF + sudo sed -i -e "/^\[mysqld\]/ a \ +long-query-time = 1" $MY_CONF + + # Log all non-indexed queries + sudo sed -i '/log.queries.not.using.indexes/d' $MY_CONF + sudo sed -i -e "/^\[mysqld\]/ a \ +log-queries-not-using-indexes" $MY_CONF + restart_service $MYSQL } From 02c0bcc38c143e0a9f66b9c4080f3881f8b3ddfd Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Mon, 14 Jan 2013 19:10:17 +0100 Subject: [PATCH 908/967] Increase tempest memory 32 MB not enough for boot correctly Change-Id: I58ca4c7e8dd303450a9970136d6f60661ea70f67 --- lib/tempest | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index 906ca6ab..84afc099 100644 --- a/lib/tempest +++ b/lib/tempest @@ -132,9 +132,9 @@ function configure_tempest() { # If the ``DEFAULT_INSTANCE_TYPE`` not declared, use the new behavior # Tempest creates instane types for himself if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then - nova flavor-create m1.pico 42 32 0 1 + nova flavor-create m1.nano 42 64 0 1 flavor_ref=42 - nova flavor-create m1.nano 84 64 0 1 + nova flavor-create m1.micro 84 128 0 1 flavor_ref_alt=84 else # Check Nova for existing flavors and, if set, look for the From 532908f6021f5e031cae01aa2374cd62da0200a9 Mon Sep 17 00:00:00 2001 From: Steven Dake Date: Mon, 14 Jan 2013 11:35:17 -0700 Subject: [PATCH 909/967] Remove error from httpd/apache on unstack.sh Fedora/RHEL use httpd for the package name of httpd. This is handled in other parts of the horizon startup code, but not in shutdown. Change-Id: I2732dad652d83a9cbe055f5f077678b7111ca782 Fixes: bug #1099538 --- lib/horizon | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/lib/horizon b/lib/horizon index 5d479d5d..9180370b 100644 --- a/lib/horizon +++ b/lib/horizon @@ -138,7 +138,15 @@ function start_horizon() { # stop_horizon() - Stop running processes (non-screen) function stop_horizon() { - stop_service apache2 + if is_ubuntu; then + stop_service apache2 + elif is_fedora; then + stop_service httpd + elif is_suse; then + stop_service apache2 + else + exit_distro_not_supported "apache configuration" + fi } # Restore xtrace From 31c94ab510a6896f3e87912443006ed43e61cf72 Mon Sep 17 00:00:00 2001 From: Maru Newby Date: Wed, 19 Dec 2012 03:59:20 +0000 Subject: [PATCH 910/967] Improve quantum l3 and tempest config. * Previously, configuration for the q-l3 agent was creating a tenant-owned router. This change maintains that behaviour if namespaces are enabled, but creates a public (not tenant-owned) router if namespaces are disabled. Since the L3 agent can only manage a single router if namespaces are disabled, the change ensures that the single router can be shared by multiple tenants. * Add tempest configuration for the public router. Change-Id: I2878a7eb9797bfd71082a55d4773519dc5198abc --- lib/quantum | 11 ++++++++++- lib/tempest | 11 +++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/lib/quantum b/lib/quantum index 343e5a9b..9c06f457 100644 --- a/lib/quantum +++ b/lib/quantum @@ -92,6 +92,8 @@ Q_META_DATA_IP=${Q_META_DATA_IP:-$HOST_IP} Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-False} # Use quantum-debug command Q_USE_DEBUG_COMMAND=${Q_USE_DEBUG_COMMAND:-False} +# The name of the default q-l3 router +Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1} if is_service_enabled quantum; then Q_RR_CONF_FILE=$QUANTUM_CONF_DIR/rootwrap.conf @@ -277,7 +279,14 @@ function create_quantum_initial_network() { if is_service_enabled q-l3; then # Create a router, and add the private subnet as one of its interfaces - ROUTER_ID=$(quantum router-create --tenant_id $TENANT_ID router1 | grep ' id ' | get_field 2) + if [[ "$Q_USE_NAMESPACE" == "True" ]]; then + # If namespaces are enabled, create a tenant-owned router. + ROUTER_ID=$(quantum router-create --tenant_id $TENANT_ID $Q_ROUTER_NAME | grep ' id ' | get_field 2) + else + # If namespaces are disabled, the L3 agent can only target + # a single router, which should not be tenant-owned. + ROUTER_ID=$(quantum router-create $Q_ROUTER_NAME | grep ' id ' | get_field 2) + fi quantum router-interface-add $ROUTER_ID $SUBNET_ID # Create an external network, and a subnet. Configure the external network as router gw EXT_NET_ID=$(quantum net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2) diff --git a/lib/tempest b/lib/tempest index 84afc099..0835234c 100644 --- a/lib/tempest +++ b/lib/tempest @@ -11,6 +11,9 @@ # - ``S3_SERVICE_PORT`` # - ``SERVICE_HOST`` # - ``BASE_SQL_CONN`` ``lib/database`` declares +# - ``PUBLIC_NETWORK_NAME`` +# - ``Q_USE_NAMESPACE`` +# - ``Q_ROUTER_NAME`` # Optional Dependencies: # IDENTITY_USE_SSL, IDENTITY_HOST, IDENTITY_PORT, IDENTITY_PATH # ALT_* (similar vars exists in keystone_data.sh) @@ -61,6 +64,7 @@ function configure_tempest() { local flavors_ref local flavor_lines local public_network_id + local public_router_id local tenant_networks_reachable # TODO(afazekas): @@ -175,6 +179,12 @@ function configure_tempest() { if is_service_enabled q-l3; then public_network_id=$(quantum net-list | grep $PUBLIC_NETWORK_NAME | \ awk '{print $2}') + if [ "$Q_USE_NAMESPACE" == "False" ]; then + # If namespaces are disabled, devstack will create a single + # public router that tempest should be configured to use. + public_router_id=$(quantum router-list | awk "/ $Q_ROUTER_NAME / \ + { print \$2 }") + fi fi # Timeouts @@ -243,6 +253,7 @@ function configure_tempest() { iniset $TEMPEST_CONF network password "$password" iniset $TEMPEST_CONF network tenant_networks_reachable "$tenant_networks_reachable" iniset $TEMPEST_CONF network public_network_id "$public_network_id" + iniset $TEMPEST_CONF network public_router_id "$public_router_id" #boto iniset $TEMPEST_CONF boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud" From 2298ca4f705e28dcc4b2aa605b73470612f6bb61 Mon Sep 17 00:00:00 2001 From: Maru Newby Date: Thu, 25 Oct 2012 23:46:42 +0000 Subject: [PATCH 911/967] Add limited support for Quantum+OVS on XS/XCP. * Add priliminary support for running the OVS L2 and DHCP agents in domU: * Configure Nova to use the correct vif driver and integration bridge. * Configure the ovs agent to target the dom0 integration bridge. * Install a xapi plugin supporting dom0 execution of ovs agent commands. * Config doc: http://wiki.openstack.org/QuantumDevstackOvsXcp * Supports blueprint xenapi-ovs Change-Id: If5ab07daab1dc3918004eb4bfb6fed6cab0a71fd --- lib/quantum | 46 ++++++++++++++++++++++++++++++++++-- tools/xen/install_os_domU.sh | 13 ++++++++++ 2 files changed, 57 insertions(+), 2 deletions(-) diff --git a/lib/quantum b/lib/quantum index 9c06f457..bfea2b55 100644 --- a/lib/quantum +++ b/lib/quantum @@ -212,6 +212,10 @@ function create_nova_conf_quantum() { if [[ "$Q_PLUGIN" = "openvswitch" ]]; then NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"} + if [ "$VIRT_DRIVER" = 'xenserver' ]; then + add_nova_opt "xenapi_vif_driver=nova.virt.xenapi.vif.XenAPIOpenVswitchDriver" + add_nova_opt "xenapi_ovs_integration_bridge=$FLAT_NETWORK_BRIDGE" + fi elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver"} elif [[ "$Q_PLUGIN" = "ryu" ]]; then @@ -536,6 +540,11 @@ function _configure_quantum_metadata_agent() { # _configure_quantum_plugin_agent() - Set config files for quantum plugin agent # It is called when q-agt is enabled. function _configure_quantum_plugin_agent() { + + # Specify the default root helper prior to agent configuration to + # ensure that an agent's configuration can override the default. + iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" + # Configure agent for plugin if [[ "$Q_PLUGIN" = "openvswitch" ]]; then _configure_quantum_plugin_agent_openvswitch @@ -544,8 +553,6 @@ function _configure_quantum_plugin_agent() { elif [[ "$Q_PLUGIN" = "ryu" ]]; then _configure_quantum_plugin_agent_ryu fi - - iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" } function _configure_quantum_plugin_agent_linuxbridge() { @@ -593,6 +600,41 @@ function _configure_quantum_plugin_agent_openvswitch() { iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings $OVS_BRIDGE_MAPPINGS fi AGENT_BINARY="$QUANTUM_DIR/bin/quantum-openvswitch-agent" + + if [ "$VIRT_DRIVER" = 'xenserver' ]; then + # Nova will always be installed along with quantum for a domU + # devstack install, so it should be safe to rely on nova.conf + # for xenapi configuration. + Q_RR_DOM0_COMMAND="$QUANTUM_DIR/bin/quantum-rootwrap-dom0 $NOVA_CONF" + # Under XS/XCP, the ovs agent needs to target the dom0 + # integration bridge. This is enabled by using a root wrapper + # that executes commands on dom0 via a XenAPI plugin. + iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_DOM0_COMMAND" + + # FLAT_NETWORK_BRIDGE is the dom0 integration bridge. To + # ensure the bridge lacks direct connectivity, set + # VM_VLAN=-1;VM_DEV=invalid in localrc + iniset /$Q_PLUGIN_CONF_FILE OVS integration_bridge $FLAT_NETWORK_BRIDGE + + # The ovs agent needs to ensure that the ports associated with + # a given network share the same local vlan tag. On + # single-node XS/XCP, this requires monitoring both the dom0 + # bridge, where VM's are attached, and the domU bridge, where + # dhcp servers are attached. + if is_service_enabled q-dhcp; then + iniset /$Q_PLUGIN_CONF_FILE OVS domu_integration_bridge $OVS_BRIDGE + # DomU will use the regular rootwrap + iniset /$Q_PLUGIN_CONF_FILE AGENT domu_root_helper "$Q_RR_COMMAND" + # Plug the vm interface into the domU integration bridge. + sudo ip addr flush dev $GUEST_INTERFACE_DEFAULT + sudo ip link set $OVS_BRIDGE up + # Assign the VM IP only if it has been set explicitly + if [[ "$VM_IP" != "" ]]; then + sudo ip addr add $VM_IP dev $OVS_BRIDGE + fi + sudo ovs-vsctl add-port $OVS_BRIDGE $GUEST_INTERFACE_DEFAULT + fi + fi } function _configure_quantum_plugin_agent_ryu() { diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index e270e59b..b4fbb699 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -68,6 +68,19 @@ if [ ! -d $XAPI_PLUGIN_DIR ]; then XAPI_PLUGIN_DIR=/usr/lib/xcp/plugins/ fi cp -pr ./nova/*/plugins/xenserver/xenapi/etc/xapi.d/plugins/* $XAPI_PLUGIN_DIR + +# Install the netwrap xapi plugin to support agent control of dom0 networking +if [[ "$ENABLED_SERVICES" =~ "q-agt" && "$Q_PLUGIN" = "openvswitch" ]]; then + if [ -f ./quantum ]; then + rm -rf ./quantum + fi + # get quantum + QUANTUM_ZIPBALL_URL=${QUANTUM_ZIPBALL_URL:-$(echo $QUANTUM_REPO | sed "s:\.git$::;s:$:/zipball/$QUANTUM_BRANCH:g")} + wget $QUANTUM_ZIPBALL_URL -O quantum-zipball --no-check-certificate + unzip -o quantum-zipball -d ./quantum + cp -pr ./quantum/*/quantum/plugins/openvswitch/agent/xenapi/etc/xapi.d/plugins/* $XAPI_PLUGIN_DIR +fi + chmod a+x ${XAPI_PLUGIN_DIR}* mkdir -p /boot/guest From 1d29d8bcf734cdd6db54da3c1458bfdb636e453c Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Mon, 7 Jan 2013 15:51:32 +0100 Subject: [PATCH 912/967] Add basic uec image preparation to tempest Add uec image preparation to lib/tempest. cirros as image is hard coded at the moment. If the images does not exists or the system is not able to use uec images the image prepare step will be skipped and tempest will skip the related tests as well. Setting ssh username correctly. Setting instance type for the boto test. Change-Id: I0d36ac7834e1eb677007e2c92dfc375d134a6023 --- extras.d/80-tempest.sh | 1 + lib/tempest | 41 ++++++++++++++++++++++++++++++++++++++--- 2 files changed, 39 insertions(+), 3 deletions(-) diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh index 506ccef7..f1599557 100644 --- a/extras.d/80-tempest.sh +++ b/extras.d/80-tempest.sh @@ -9,6 +9,7 @@ if [[ "$1" == "stack" ]]; then echo_summary "Configuring Tempest" install_tempest configure_tempest + init_tempest fi fi diff --git a/lib/tempest b/lib/tempest index 0835234c..c08a4306 100644 --- a/lib/tempest +++ b/lib/tempest @@ -14,10 +14,11 @@ # - ``PUBLIC_NETWORK_NAME`` # - ``Q_USE_NAMESPACE`` # - ``Q_ROUTER_NAME`` +# - ``VIRT_DRIVER`` +# - ``LIBVIRT_TYPE`` # Optional Dependencies: # IDENTITY_USE_SSL, IDENTITY_HOST, IDENTITY_PORT, IDENTITY_PATH # ALT_* (similar vars exists in keystone_data.sh) -# ``OS_USERNAME`` # ``IMAGE_PORT``, ``IMAGE_HOST`` # ``LIVE_MIGRATION_AVAILABLE`` # ``USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION`` @@ -27,6 +28,7 @@ # # install_tempest # configure_tempest +# init_tempest # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -47,6 +49,8 @@ BUILD_INTERVAL=3 BUILD_TIMEOUT=400 +BOTO_MATERIALS_PATH="$DEST/devstack/files/images/s3-materials/cirros-0.3.0" + # Entry Points # ------------ @@ -66,6 +70,7 @@ function configure_tempest() { local public_network_id local public_router_id local tenant_networks_reachable + local boto_instance_type="m1.tiny" # TODO(afazekas): # sudo python setup.py deploy @@ -138,11 +143,13 @@ function configure_tempest() { if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then nova flavor-create m1.nano 42 64 0 1 flavor_ref=42 + boto_instance_type=m1.nano nova flavor-create m1.micro 84 128 0 1 flavor_ref_alt=84 else # Check Nova for existing flavors and, if set, look for the # ``DEFAULT_INSTANCE_TYPE`` and use that. + boto_instance_type=$DEFAULT_INSTANCE_TYPE flavor_lines=`nova flavor-list` IFS=$'\r\n' flavors="" @@ -216,10 +223,10 @@ function configure_tempest() { iniset $TEMPEST_CONF compute allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} #Skip until #1074039 is fixed iniset $TEMPEST_CONF compute run_ssh False - iniset $TEMPEST_CONF compute ssh_user ${DEFAULT_INSTANCE_USER:-$OS_USERNAME} + iniset $TEMPEST_CONF compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} iniset $TEMPEST_CONF compute network_for_ssh $PRIVATE_NETWORK_NAME iniset $TEMPEST_CONF compute ip_version_for_ssh 4 - iniset $TEMPEST_CONF compute ssh_timeout 4 + iniset $TEMPEST_CONF compute ssh_timeout $BUILD_TIMEOUT iniset $TEMPEST_CONF compute image_ref $image_uuid iniset $TEMPEST_CONF compute image_ref_alt $image_uuid_alt iniset $TEMPEST_CONF compute flavor_ref $flavor_ref @@ -258,6 +265,9 @@ function configure_tempest() { #boto iniset $TEMPEST_CONF boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud" iniset $TEMPEST_CONF boto s3_url "http://$SERVICE_HOST:${S3_SERVICE_PORT:-3333}" + iniset $TEMPEST_CONF boto s3_materials_path "$BOTO_MATERIALS_PATH" + iniset $TEMPEST_CONF boto instance_type "$boto_instance_type" + iniset $TEMPEST_CONF boto http_socket_timeout 30 echo "Created tempest configuration file:" cat $TEMPEST_CONF @@ -277,5 +287,30 @@ function install_tempest() { pip_install -r $TEMPEST_DIR/tools/pip-requires } +# init_tempest() - Initialize ec2 images +function init_tempest() { + local base_image_name=cirros-0.3.0-x86_64 + # /opt/stack/devstack/files/images/cirros-0.3.0-x86_64-uec + local devstack_dir="$DEST/devstack" + local image_dir="$devstack_dir/files/images/${base_image_name}-uec" + local kernel="$image_dir/${base_image_name}-vmlinuz" + local ramdisk="$image_dir/${base_image_name}-initrd" + local disk_image="$image_dir/${base_image_name}-blank.img" + # if the cirros uec downloaded and the system is uec capable + if [ -f "$kernel" -a -f "$ramdisk" -a -f "$disk_image" -a "$VIRT_DRIVER" != "openvz" \ + -a \( "$LIBVIRT_TYPE" != "lxc" -o "$VIRT_DRIVER" != "libvirt" \) ]; then + echo "Prepare aki/ari/ami Images" + ( #new namespace + # tenant:demo ; user: demo + source $devstack_dir/accrc/demo/demo + euca-bundle-image -i "$kernel" --kernel true -d "$BOTO_MATERIALS_PATH" + euca-bundle-image -i "$ramdisk" --ramdisk true -d "$BOTO_MATERIALS_PATH" + euca-bundle-image -i "$disk_image" -d "$BOTO_MATERIALS_PATH" + ) 2>&1 Date: Wed, 16 Jan 2013 08:38:17 +0100 Subject: [PATCH 913/967] Case correct InvalidInstanceID.NotFound Change-Id: Iab067398205f51d640355ef91f0896afaecc4dea --- exercises/euca.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exercises/euca.sh b/exercises/euca.sh index 46e40251..7b35f6fe 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -169,7 +169,7 @@ euca-terminate-instances $INSTANCE || \ # case changed with bug/836978. Requesting the status of an invalid instance # will now return an error message including the instance id, so we need to # filter that out. -if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -ve \"\\\(InstanceNotFound\\\|InvalidInstanceId\[.\]NotFound\\\)\" | grep -q $INSTANCE; do sleep 1; done"; then +if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -ve \"\\\(InstanceNotFound\\\|InvalidInstanceID\[.\]NotFound\\\)\" | grep -q $INSTANCE; do sleep 1; done"; then echo "server didn't terminate within $TERMINATE_TIMEOUT seconds" exit 1 fi From af22a477d1326c345cc1c59049bf3b16e3510acd Mon Sep 17 00:00:00 2001 From: MORITA Kazutaka Date: Thu, 17 Jan 2013 16:16:25 +0900 Subject: [PATCH 914/967] lib/cinder: add sheepdog support This enables us to use Sheepdog as a Cinder backend storage by setting the CINDER_DRIVER environment variable. Change-Id: I70cfb7f89ac3260d277fa160c457d220255de065 --- lib/cinder | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/cinder b/lib/cinder index 8b1ccd71..a730cd62 100644 --- a/lib/cinder +++ b/lib/cinder @@ -209,6 +209,8 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT xenapi_nfs_server "$CINDER_XENAPI_NFS_SERVER" iniset $CINDER_CONF DEFAULT xenapi_nfs_serverpath "$CINDER_XENAPI_NFS_SERVERPATH" ) + elif [ "$CINDER_DRIVER" == "sheepdog" ]; then + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.sheepdog.SheepdogDriver" fi } From 3edddd108a4b5c785c7916b40aa153e055d54d8a Mon Sep 17 00:00:00 2001 From: "Daniel P. Berrange" Date: Thu, 3 Jan 2013 12:06:47 +0000 Subject: [PATCH 915/967] Run nova-novncproxy binary in the nova GIT trree The nova-novncproxy binary was pulled into the nova GIT tree several months back, so devstack should run that version, rather than the legacy version from the noVNC GIT tree. Change-Id: I289989d5c0831c75117f059dd8f2decb1f9d3a90 Signed-off-by: Daniel P. Berrange --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 8135bf15..dd684321 100644 --- a/lib/nova +++ b/lib/nova @@ -549,7 +549,7 @@ function start_nova() { screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert" screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network" screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler" - screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --config-file $NOVA_CONF --web ." + screen_it n-novnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-novncproxy --config-file $NOVA_CONF --web $NOVNC_DIR" screen_it n-xvnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-xvpvncproxy --config-file $NOVA_CONF" screen_it n-cauth "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-consoleauth" } From d10e12f1335492550d558d56c510fdf8ce55a9dc Mon Sep 17 00:00:00 2001 From: "Daniel P. Berrange" Date: Thu, 3 Jan 2013 11:51:42 +0000 Subject: [PATCH 916/967] Add support for setting up nova-spicehtml5proxy Add a new service 'n-spice' which is off by default, but can be enabled to turn on SPICE support in the Nova libvirt driver. Also if neither n-novnc or n-xvnc are enabled, then disable VNC support. This allows running in a SPICE only environment. The spice-html5 repo will be checked out to support the Horizon client integration Change-Id: If74fad33a7b491450afd823758d35b06ebe72cb9 Signed-off-by: Daniel P. Berrange --- files/rpms/n-spice | 1 + lib/nova | 3 ++- stack.sh | 36 +++++++++++++++++++++++++++++++----- stackrc | 4 ++++ 4 files changed, 38 insertions(+), 6 deletions(-) create mode 100644 files/rpms/n-spice diff --git a/files/rpms/n-spice b/files/rpms/n-spice new file mode 100644 index 00000000..24ce15ab --- /dev/null +++ b/files/rpms/n-spice @@ -0,0 +1 @@ +numpy diff --git a/lib/nova b/lib/nova index dd684321..ccf93d9e 100644 --- a/lib/nova +++ b/lib/nova @@ -551,13 +551,14 @@ function start_nova() { screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler" screen_it n-novnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-novncproxy --config-file $NOVA_CONF --web $NOVNC_DIR" screen_it n-xvnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-xvpvncproxy --config-file $NOVA_CONF" + screen_it n-spice "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $NOVA_CONF --web $SPICE_DIR" screen_it n-cauth "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-consoleauth" } # stop_nova() - Stop running processes (non-screen) function stop_nova() { # Kill the nova screen windows - for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-cond; do + for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-cond n-spice; do screen -S $SCREEN_NAME -p $serv -X kill done } diff --git a/stack.sh b/stack.sh index bf473ca7..005d88e7 100755 --- a/stack.sh +++ b/stack.sh @@ -313,6 +313,7 @@ source $TOP_DIR/lib/baremetal HORIZON_DIR=$DEST/horizon OPENSTACKCLIENT_DIR=$DEST/python-openstackclient NOVNC_DIR=$DEST/noVNC +SPICE_DIR=$DEST/spice-html5 SWIFT3_DIR=$DEST/swift3 # Should cinder perform secure deletion of volumes? @@ -716,6 +717,10 @@ if is_service_enabled n-novnc; then # a websockets/html5 or flash powered VNC console for vm instances git_clone $NOVNC_REPO $NOVNC_DIR $NOVNC_BRANCH fi +if is_service_enabled n-spice; then + # a websockets/html5 or flash powered SPICE console for vm instances + git_clone $SPICE_REPO $SPICE_DIR $SPICE_BRANCH +fi if is_service_enabled horizon; then # dashboard install_horizon @@ -1008,17 +1013,38 @@ if is_service_enabled nova; then iniset $NOVA_CONF DEFAULT novncproxy_base_url "$NOVNCPROXY_URL" XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"} iniset $NOVA_CONF DEFAULT xvpvncproxy_base_url "$XVPVNCPROXY_URL" + SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:6082/spice_auto.html"} + iniset $NOVA_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL" fi if [ "$VIRT_DRIVER" = 'xenserver' ]; then VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1} else VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1} fi - # Address on which instance vncservers will listen on compute hosts. - # For multi-host, this should be the management ip of the compute host. - VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1} - iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN" - iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" + + if is_service_enabled n-novnc || is_service_enabled n-xvnc ; then + # Address on which instance vncservers will listen on compute hosts. + # For multi-host, this should be the management ip of the compute host. + VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1} + iniset $NOVA_CONF DEFAULT vnc_enabled true + iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN" + iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" + else + iniset $NOVA_CONF DEFAULT vnc_enabled false + fi + + if is_service_enabled n-spice; then + # Address on which instance spiceservers will listen on compute hosts. + # For multi-host, this should be the management ip of the compute host. + SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=127.0.0.1} + SPICESERVER_LISTEN=${SPICESERVER_LISTEN=127.0.0.1} + iniset $NOVA_CONF spice enabled true + iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN" + iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS" + else + iniset $NOVA_CONF spice enabled false + fi + iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST" iniset_rpc_backend nova $NOVA_CONF DEFAULT iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT" diff --git a/stackrc b/stackrc index 8d194405..cfc4d1fb 100644 --- a/stackrc +++ b/stackrc @@ -78,6 +78,10 @@ KEYSTONE_BRANCH=master NOVNC_REPO=https://github.com/kanaka/noVNC.git NOVNC_BRANCH=master +# a websockets/html5 or flash powered SPICE console for vm instances +SPICE_REPO=http://anongit.freedesktop.org/git/spice/spice-html5.git +SPICE_BRANCH=master + # django powered web control panel for openstack HORIZON_REPO=${GIT_BASE}/openstack/horizon.git HORIZON_BRANCH=master From 029598ea74ab2adf08801e384b919cc2cd13398c Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 17 Jan 2013 11:17:16 -0600 Subject: [PATCH 917/967] Fix secgroups exercise on postgres This should fix the failing (but non-voting) postgres gate tests. Why does postgresql change the value '0.0.0.0/00' to '0.0.0.0/0'? Clearly the correct value for the network CIDR bits is with only one zero but even an incorrect value shouldn't be changing. SQLalchemy is given this for the column: Column('cidr', String(length=255)), Change-Id: Ib19dad23789654664e90518087e5a462fa8b8034 --- exercises/sec_groups.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh index f6810e3e..fbd9c8e1 100755 --- a/exercises/sec_groups.sh +++ b/exercises/sec_groups.sh @@ -48,7 +48,7 @@ nova secgroup-create $SEC_GROUP_NAME 'a test security group' RULES_TO_ADD=( 22 3389 5900 ) for RULE in "${RULES_TO_ADD[@]}"; do - nova secgroup-add-rule $SEC_GROUP_NAME tcp $RULE $RULE 0.0.0.0/00 + nova secgroup-add-rule $SEC_GROUP_NAME tcp $RULE $RULE 0.0.0.0/0 done # Check to make sure rules were added @@ -63,7 +63,7 @@ done # Delete rules and secgroup for RULE in "${RULES_TO_ADD[@]}"; do - nova secgroup-delete-rule $SEC_GROUP_NAME tcp $RULE $RULE 0.0.0.0/00 + nova secgroup-delete-rule $SEC_GROUP_NAME tcp $RULE $RULE 0.0.0.0/0 done nova secgroup-delete $SEC_GROUP_NAME From 8750b3d533df1174fe7d11290f97ef6a5779758c Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Thu, 17 Jan 2013 23:49:50 -0500 Subject: [PATCH 918/967] Enable nova Verbose logging With oslo-incubator commit 751c35b1c8ff0730883a8ccdda9b77a49fff2405, (Change-Id: Ic9e3cb5979b2d7283552ad3a461870373f45a239) Verbose does not enable debug level logging. Change-Id: I1741ec0ca61e4c7234ee4e29cbd52ded73995451 --- lib/nova | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/nova b/lib/nova index 8135bf15..d323425d 100644 --- a/lib/nova +++ b/lib/nova @@ -355,6 +355,7 @@ function create_nova_conf() { rm -f $NOVA_CONF add_nova_opt "[DEFAULT]" iniset $NOVA_CONF DEFAULT verbose "True" + iniset $NOVA_CONF DEFAULT debug "True" iniset $NOVA_CONF DEFAULT auth_strategy "keystone" iniset $NOVA_CONF DEFAULT allow_resize_to_same_host "True" iniset $NOVA_CONF DEFAULT api_paste_config "$NOVA_API_PASTE_INI" From f2696c0d85de4070504699bce767a27f4dd5a297 Mon Sep 17 00:00:00 2001 From: "Walter A. Boring IV" Date: Thu, 17 Jan 2013 20:40:09 -0800 Subject: [PATCH 919/967] Add Nova fibre channel support required packages This patch is to support the nova patch that adds Fibre Channel support to nova. Fibre Channel requires sysfsutils, sg3-utils, multipath-tools Change-Id: I9e44ef9152f1916b245dba3be77076f0283fed44 --- files/apts/n-cpu | 2 ++ files/rpms-suse/n-cpu | 2 ++ files/rpms/n-cpu | 2 ++ 3 files changed, 6 insertions(+) diff --git a/files/apts/n-cpu b/files/apts/n-cpu index a40b6590..ad2d6d71 100644 --- a/files/apts/n-cpu +++ b/files/apts/n-cpu @@ -3,3 +3,5 @@ lvm2 open-iscsi open-iscsi-utils genisoimage +sysfsutils +sg3-utils diff --git a/files/rpms-suse/n-cpu b/files/rpms-suse/n-cpu index 27d3254f..7040b843 100644 --- a/files/rpms-suse/n-cpu +++ b/files/rpms-suse/n-cpu @@ -2,3 +2,5 @@ genisoimage lvm2 open-iscsi +sysfsutils +sg3_utils diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu index f7054e82..149672ac 100644 --- a/files/rpms/n-cpu +++ b/files/rpms/n-cpu @@ -2,3 +2,5 @@ iscsi-initiator-utils lvm2 genisoimage +sysfsutils +sg3_utils From 97d3d202ff6ad40d9201b43a37861f58a9503d14 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Sat, 19 Jan 2013 19:20:49 +0100 Subject: [PATCH 920/967] Support Tempest config file format transition * Identity server location just configure by an URI * Image service location resolved by service endpoint * Credentials will be defined only in the identity section * Whitebox gets it's own section * ssh username is per image option Change-Id: I3b0d51a323560451c5636363896cadb39e0ea2d6 --- lib/tempest | 65 ++++++++++++++++++++++++++++------------------------- 1 file changed, 34 insertions(+), 31 deletions(-) diff --git a/lib/tempest b/lib/tempest index c08a4306..9057854d 100644 --- a/lib/tempest +++ b/lib/tempest @@ -16,10 +16,9 @@ # - ``Q_ROUTER_NAME`` # - ``VIRT_DRIVER`` # - ``LIBVIRT_TYPE`` +# - ``KEYSTONE_SERVICE_PROTOCOL``, ``KEYSTONE_SERVICE_HOST`` from lib/keystone # Optional Dependencies: -# IDENTITY_USE_SSL, IDENTITY_HOST, IDENTITY_PORT, IDENTITY_PATH # ALT_* (similar vars exists in keystone_data.sh) -# ``IMAGE_PORT``, ``IMAGE_HOST`` # ``LIVE_MIGRATION_AVAILABLE`` # ``USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION`` # ``DEFAULT_INSTANCE_TYPE`` @@ -124,13 +123,6 @@ function configure_tempest() { # copy every time, because the image UUIDS are going to change cp $TEMPEST_CONF.sample $TEMPEST_CONF - IDENTITY_USE_SSL=${IDENTITY_USE_SSL:-False} - IDENTITY_HOST=${IDENTITY_HOST:-127.0.0.1} - IDENTITY_PORT=${IDENTITY_PORT:-5000} - # TODO(jaypipes): This is dumb and needs to be removed - # from the Tempest configuration file entirely... - IDENTITY_PATH=${IDENTITY_PATH:-tokens} - password=${ADMIN_PASSWORD:-secrete} # See files/keystone_data.sh where alt_demo user @@ -203,15 +195,19 @@ function configure_tempest() { iniset $TEMPEST_CONF boto build_interval $BUILD_INTERVAL iniset $TEMPEST_CONF boto http_socket_timeout 5 - iniset $TEMPEST_CONF identity use_ssl $IDENTITY_USE_SSL - iniset $TEMPEST_CONF identity host $IDENTITY_HOST - iniset $TEMPEST_CONF identity port $IDENTITY_PORT - iniset $TEMPEST_CONF identity path $IDENTITY_PATH - - iniset $TEMPEST_CONF compute password "$password" - iniset $TEMPEST_CONF compute alt_username $ALT_USERNAME - iniset $TEMPEST_CONF compute alt_password "$password" - iniset $TEMPEST_CONF compute alt_tenant_name $ALT_TENANT_NAME + # Identity + iniset $TEMPEST_CONF identity uri "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v2.0/" + iniset $TEMPEST_CONF identity password "$password" + iniset $TEMPEST_CONF identity alt_username $ALT_USERNAME + iniset $TEMPEST_CONF identity alt_password "$password" + iniset $TEMPEST_CONF identity alt_tenant_name $ALT_TENANT_NAME + iniset $TEMPEST_CONF identity admin_password "$password" + + # Compute + iniset $TEMPEST_CONF compute password "$password" # DEPRECATED + iniset $TEMPEST_CONF compute alt_username $ALT_USERNAME # DEPRECATED + iniset $TEMPEST_CONF compute alt_password "$password" # DEPRECATED + iniset $TEMPEST_CONF compute alt_tenant_name $ALT_TENANT_NAME # DEPRECATED iniset $TEMPEST_CONF compute resize_available False iniset $TEMPEST_CONF compute change_password_available False iniset $TEMPEST_CONF compute compute_log_level ERROR @@ -223,41 +219,47 @@ function configure_tempest() { iniset $TEMPEST_CONF compute allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} #Skip until #1074039 is fixed iniset $TEMPEST_CONF compute run_ssh False - iniset $TEMPEST_CONF compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} + iniset $TEMPEST_CONF compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED iniset $TEMPEST_CONF compute network_for_ssh $PRIVATE_NETWORK_NAME iniset $TEMPEST_CONF compute ip_version_for_ssh 4 iniset $TEMPEST_CONF compute ssh_timeout $BUILD_TIMEOUT iniset $TEMPEST_CONF compute image_ref $image_uuid + iniset $TEMPEST_CONF compute image_ssh_user ${DEFAULT_INSTANCE_USER:-cirros} iniset $TEMPEST_CONF compute image_ref_alt $image_uuid_alt + iniset $TEMPEST_CONF compute image_alt_ssh_user ${DEFAULT_INSTANCE_USER:-cirros} iniset $TEMPEST_CONF compute flavor_ref $flavor_ref iniset $TEMPEST_CONF compute flavor_ref_alt $flavor_ref_alt - iniset $TEMPEST_CONF compute source_dir $NOVA_SOURCE_DIR iniset $TEMPEST_CONF compute live_migration_available ${LIVE_MIGRATION_AVAILABLE:-False} iniset $TEMPEST_CONF compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} - # Inherited behavior, might be wrong - iniset $TEMPEST_CONF compute bin_dir $NOVA_BIN_DIR + iniset $TEMPEST_CONF compute source_dir $NOVA_SOURCE_DIR # DEPRECATED + iniset $TEMPEST_CONF compute bin_dir $NOVA_BIN_DIR # DEPRECATED + iniset $TEMPEST_CONF compute path_to_private_key $TEMPEST_DIR/id_rsa # DEPRECATED + iniset $TEMPEST_CONF compute db_uri $BASE_SQL_CONN/nova # DEPRECATED + + # Whitebox + iniset $TEMPEST_CONF whitebox source_dir $NOVA_SOURCE_DIR + iniset $TEMPEST_CONF whitebox bin_dir $NOVA_BIN_DIR # TODO(jaypipes): Create the key file here... right now, no whitebox # tests actually use a key. - iniset $TEMPEST_CONF compute path_to_private_key $TEMPEST_DIR/id_rsa - iniset $TEMPEST_CONF compute db_uri $BASE_SQL_CONN/nova + iniset $TEMPEST_CONF whitebox path_to_private_key $TEMPEST_DIR/id_rsa + iniset $TEMPEST_CONF whitebox db_uri $BASE_SQL_CONN/nova + # image - iniset $TEMPEST_CONF image host ${IMAGE_HOST:-127.0.0.1} - iniset $TEMPEST_CONF image port ${IMAGE_PORT:-9292} - iniset $TEMPEST_CONF image password "$password" + iniset $TEMPEST_CONF image password "$password" # DEPRECATED # identity-admin - iniset $TEMPEST_CONF "identity-admin" password "$password" + iniset $TEMPEST_CONF "identity-admin" password "$password" # DEPRECATED # compute admin - iniset $TEMPEST_CONF "compute-admin" password "$password" + iniset $TEMPEST_CONF "compute-admin" password "$password" # DEPRECATED # network admin - iniset $TEMPEST_CONF "network-admin" password "$password" + iniset $TEMPEST_CONF "network-admin" password "$password" # DEPRECATED # network iniset $TEMPEST_CONF network api_version 2.0 - iniset $TEMPEST_CONF network password "$password" + iniset $TEMPEST_CONF network password "$password" # DEPRECATED iniset $TEMPEST_CONF network tenant_networks_reachable "$tenant_networks_reachable" iniset $TEMPEST_CONF network public_network_id "$public_network_id" iniset $TEMPEST_CONF network public_router_id "$public_router_id" @@ -268,6 +270,7 @@ function configure_tempest() { iniset $TEMPEST_CONF boto s3_materials_path "$BOTO_MATERIALS_PATH" iniset $TEMPEST_CONF boto instance_type "$boto_instance_type" iniset $TEMPEST_CONF boto http_socket_timeout 30 + iniset $TEMPEST_CONF boto ssh_user ${DEFAULT_INSTANCE_USER:-cirros} echo "Created tempest configuration file:" cat $TEMPEST_CONF From 0dd34df455637ee29176525974d6dab93f530e66 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Fri, 28 Dec 2012 13:15:31 +0900 Subject: [PATCH 921/967] lib/quantum: refactor quantum plugins and third party As quantum plugin support is coming like floodlight, nvp and nec, it's worth while to refactor quantum plugin logic so that each plugin can be modified/enhanced intervening with other quantum plugin. And new plugin support can be added easily (hopefully) without modifying core logic. Change-Id: Ic5ab5b993272fdd3b4e779823323777a845ee681 --- AUTHORS | 1 + lib/nova | 2 +- lib/quantum | 328 +++--------------- lib/quantum_plugins/README.md | 34 ++ lib/quantum_plugins/bigswitch_floodlight | 55 +++ lib/quantum_plugins/linuxbridge | 79 +++++ lib/quantum_plugins/openvswitch | 144 ++++++++ lib/quantum_plugins/ovs_base | 49 +++ lib/quantum_plugins/ryu | 63 ++++ lib/quantum_thirdparty/README.md | 36 ++ .../bigswitch_floodlight | 0 lib/{ => quantum_thirdparty}/ryu | 30 +- 12 files changed, 517 insertions(+), 304 deletions(-) create mode 100644 lib/quantum_plugins/README.md create mode 100644 lib/quantum_plugins/bigswitch_floodlight create mode 100644 lib/quantum_plugins/linuxbridge create mode 100644 lib/quantum_plugins/openvswitch create mode 100644 lib/quantum_plugins/ovs_base create mode 100644 lib/quantum_plugins/ryu create mode 100644 lib/quantum_thirdparty/README.md rename lib/{ => quantum_thirdparty}/bigswitch_floodlight (100%) rename lib/{ => quantum_thirdparty}/ryu (69%) diff --git a/AUTHORS b/AUTHORS index 7ec1f663..35c0a522 100644 --- a/AUTHORS +++ b/AUTHORS @@ -19,6 +19,7 @@ Gabriel Hurley Gary Kotton Hengqing Hu Hua ZHANG +Isaku Yamahata Jake Dahn James E. Blair Jason Cannavale diff --git a/lib/nova b/lib/nova index f0456d61..7165ae22 100644 --- a/lib/nova +++ b/lib/nova @@ -229,7 +229,7 @@ function configure_nova() { configure_baremetal_nova_dirs fi - if is_service_enabled quantum && is_quantum_ovs_base_plugin "$Q_PLUGIN" && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF ; then + if is_service_enabled quantum && is_quantum_ovs_base_plugin && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF ; then # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces cat <`` + * The corresponding file name should be same to service name, ````. + +functions +--------- +``lib/quantum`` calls the following functions when the ```` is enabled + +functions to be implemented +* ``configure_``: + set config files, create data dirs, etc + e.g. + sudo python setup.py deploy + iniset $XXXX_CONF... + +* ``init_``: + initialize databases, etc + +* ``install_``: + collect source and prepare + e.g. + git clone xxx + +* ``start_``: + start running processes, including screen + e.g. + screen_it XXXX "cd $XXXXY_DIR && $XXXX_DIR/bin/XXXX-bin" + +* ``stop_``: + stop running processes (non-screen) diff --git a/lib/bigswitch_floodlight b/lib/quantum_thirdparty/bigswitch_floodlight similarity index 100% rename from lib/bigswitch_floodlight rename to lib/quantum_thirdparty/bigswitch_floodlight diff --git a/lib/ryu b/lib/quantum_thirdparty/ryu similarity index 69% rename from lib/ryu rename to lib/quantum_thirdparty/ryu index 1292313e..f11951a3 100644 --- a/lib/ryu +++ b/lib/quantum_thirdparty/ryu @@ -17,6 +17,21 @@ RYU_OFP_HOST=${RYU_OFP_HOST:-127.0.0.1} RYU_OFP_PORT=${RYU_OFP_PORT:-6633} # Ryu Applications RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest} +# Ryu configuration +RYU_CONF_CONTENTS=${RYU_CONF_CONTENTS:-" +--app_lists=$RYU_APPS +--wsapi_host=$RYU_API_HOST +--wsapi_port=$RYU_API_PORT +--ofp_listen_host=$RYU_OFP_HOST +--ofp_tcp_listen_port=$RYU_OFP_PORT +--quantum_url=http://$Q_HOST:$Q_PORT +--quantum_admin_username=$Q_ADMIN_USERNAME +--quantum_admin_password=$SERVICE_PASSWORD +--quantum_admin_tenant_name=$SERVICE_TENANT_NAME +--quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0 +--quantum_auth_strategy=$Q_AUTH_STRATEGY +--quantum_controller_addr=tcp:$RYU_OFP_HOST:$RYU_OFP_PORT +"} function configure_ryu() { setup_develop $RYU_DIR @@ -31,26 +46,13 @@ function init_ryu() { RYU_CONF=$RYU_CONF_DIR/ryu.conf sudo rm -rf $RYU_CONF - cat < $RYU_CONF ---app_lists=$RYU_APPS ---wsapi_host=$RYU_API_HOST ---wsapi_port=$RYU_API_PORT ---ofp_listen_host=$RYU_OFP_HOST ---ofp_tcp_listen_port=$RYU_OFP_PORT -EOF + echo "${RYU_CONF_CONTENTS}" > $RYU_CONF } function install_ryu() { git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH } -function is_ryu_required() { - if is_service_enabled ryu || (is_service_enabled quantum && [[ "$Q_PLUGIN" = "ryu" ]]); then - return 0 - fi - return 1 -} - function start_ryu() { screen_it ryu "cd $RYU_DIR && $RYU_DIR/bin/ryu-manager --flagfile $RYU_CONF" } From 3c52922f4f2a94cec2c94fdd56474a9224c13213 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Mon, 21 Jan 2013 06:50:33 +0100 Subject: [PATCH 922/967] Use the correct directory for image files in tempest Change-Id: Ic40065a04b8015333b4cb6844211e9a0afb9d4c1 --- lib/tempest | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/lib/tempest b/lib/tempest index 9057854d..4d91d900 100644 --- a/lib/tempest +++ b/lib/tempest @@ -5,7 +5,7 @@ # ``functions`` file # ``lib/nova`` service is runing # -# - ``DEST`` +# - ``DEST``, ``FILES`` # - ``ADMIN_PASSWORD`` # - ``DEFAULT_IMAGE_NAME`` # - ``S3_SERVICE_PORT`` @@ -48,7 +48,7 @@ BUILD_INTERVAL=3 BUILD_TIMEOUT=400 -BOTO_MATERIALS_PATH="$DEST/devstack/files/images/s3-materials/cirros-0.3.0" +BOTO_MATERIALS_PATH="$FILES/images/s3-materials/cirros-0.3.0" # Entry Points # ------------ @@ -294,8 +294,7 @@ function install_tempest() { function init_tempest() { local base_image_name=cirros-0.3.0-x86_64 # /opt/stack/devstack/files/images/cirros-0.3.0-x86_64-uec - local devstack_dir="$DEST/devstack" - local image_dir="$devstack_dir/files/images/${base_image_name}-uec" + local image_dir="$FILES/images/${base_image_name}-uec" local kernel="$image_dir/${base_image_name}-vmlinuz" local ramdisk="$image_dir/${base_image_name}-initrd" local disk_image="$image_dir/${base_image_name}-blank.img" From a534e0bf1a1ec0abc1d1f673af1b70fbf8239350 Mon Sep 17 00:00:00 2001 From: Sunil Thaha Date: Mon, 21 Jan 2013 17:00:50 +1000 Subject: [PATCH 923/967] Fixes nova-compute failing to start on Fedora 18 Fixes bug #1086784 Adds a rule to the policy-kit allowing the stack user to manage libvirt Change-Id: I6e9c0106c932f5f5f5c5c18ff79ac81a050c4599 --- lib/nova | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/lib/nova b/lib/nova index f0456d61..28933669 100644 --- a/lib/nova +++ b/lib/nova @@ -247,11 +247,25 @@ EOF LIBVIRT_DAEMON=libvirtd fi - # For distributions using polkit to authorize access to libvirt, - # configure polkit accordingly. - # Based on http://wiki.libvirt.org/page/SSHPolicyKitSetup + + if is_fedora; then - sudo bash -c 'cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla + # Starting with fedora 18 enable stack-user to virsh -c qemu:///system + # by creating a policy-kit rule for stack-user + if [[ "$os_RELEASE" -ge "18" ]]; then + rules_dir=/etc/polkit-1/rules.d + sudo mkdir -p $rules_dir + sudo bash -c "cat < $rules_dir/50-libvirt-$STACK_USER.rules +polkit.addRule(function(action, subject) { + if (action.id == 'org.libvirt.unix.manage' && + subject.user == '"$STACK_USER"') { + return polkit.Result.YES; + } +}); +EOF" + unset rules_dir + else + sudo bash -c 'cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla [libvirt Management Access] Identity=unix-group:libvirtd Action=org.libvirt.unix.manage @@ -259,6 +273,7 @@ ResultAny=yes ResultInactive=yes ResultActive=yes EOF' + fi elif is_suse; then # Work around the fact that polkit-default-privs overrules pklas # with 'unix-group:$group'. From 4a30b849ec69344e82b13070d839a1ffb4504e7c Mon Sep 17 00:00:00 2001 From: jiajun xu Date: Tue, 22 Jan 2013 11:49:03 +0800 Subject: [PATCH 924/967] Add check for RABBIT_HOST and RABBIT_PASSWORD in iniset_rpc_backend In multi-node environment, RABBIT_HOST and RABBIT_PASSWORD are used to indicate the server running rabbitmq service. We should check the variables in iniset_rpc_backend. Change-Id: Iaea8cc87315be91429a8747254310d6474930eec --- lib/rpc_backend | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/rpc_backend b/lib/rpc_backend index 4d7f8d2f..f35f9dbd 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -100,7 +100,7 @@ function iniset_rpc_backend() { iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_zmq elif is_service_enabled qpid; then iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_qpid - elif is_service_enabled rabbit; then + elif is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_kombu iniset $file $section rabbit_host $RABBIT_HOST iniset $file $section rabbit_password $RABBIT_PASSWORD From ad8b27626ecef3509a1ffca8dac7392d32e1b2d6 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Thu, 10 Jan 2013 15:40:01 +0100 Subject: [PATCH 925/967] have the run and logs files in $SWIFT_DATA_DIR - Set all the run lock and logs files to go to $SWIFT_DATA_DIR. Change-Id: I42b72572e9700457475398043057d37d0dbc65ac --- files/swift/rsyncd.conf | 28 ++++++++++++++-------------- lib/swift | 16 ++++++++-------- 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/files/swift/rsyncd.conf b/files/swift/rsyncd.conf index 4e0dcbf9..c670531b 100644 --- a/files/swift/rsyncd.conf +++ b/files/swift/rsyncd.conf @@ -1,79 +1,79 @@ uid = %USER% gid = %GROUP% -log file = /var/log/rsyncd.log -pid file = /var/run/rsyncd.pid +log file = %SWIFT_DATA_DIR%/logs/rsyncd.log +pid file = %SWIFT_DATA_DIR%/run/rsyncd.pid address = 127.0.0.1 [account6012] max connections = 25 path = %SWIFT_DATA_DIR%/1/node/ read only = false -lock file = /var/lock/account6012.lock +lock file = %SWIFT_DATA_DIR%/run/account6012.lock [account6022] max connections = 25 path = %SWIFT_DATA_DIR%/2/node/ read only = false -lock file = /var/lock/account6022.lock +lock file = %SWIFT_DATA_DIR%/run/account6022.lock [account6032] max connections = 25 path = %SWIFT_DATA_DIR%/3/node/ read only = false -lock file = /var/lock/account6032.lock +lock file = %SWIFT_DATA_DIR%/run/account6032.lock [account6042] max connections = 25 path = %SWIFT_DATA_DIR%/4/node/ read only = false -lock file = /var/lock/account6042.lock +lock file = %SWIFT_DATA_DIR%/run/account6042.lock [container6011] max connections = 25 path = %SWIFT_DATA_DIR%/1/node/ read only = false -lock file = /var/lock/container6011.lock +lock file = %SWIFT_DATA_DIR%/run/container6011.lock [container6021] max connections = 25 path = %SWIFT_DATA_DIR%/2/node/ read only = false -lock file = /var/lock/container6021.lock +lock file = %SWIFT_DATA_DIR%/run/container6021.lock [container6031] max connections = 25 path = %SWIFT_DATA_DIR%/3/node/ read only = false -lock file = /var/lock/container6031.lock +lock file = %SWIFT_DATA_DIR%/run/container6031.lock [container6041] max connections = 25 path = %SWIFT_DATA_DIR%/4/node/ read only = false -lock file = /var/lock/container6041.lock +lock file = %SWIFT_DATA_DIR%/run/container6041.lock [object6010] max connections = 25 path = %SWIFT_DATA_DIR%/1/node/ read only = false -lock file = /var/lock/object6010.lock +lock file = %SWIFT_DATA_DIR%/run/object6010.lock [object6020] max connections = 25 path = %SWIFT_DATA_DIR%/2/node/ read only = false -lock file = /var/lock/object6020.lock +lock file = %SWIFT_DATA_DIR%/run/object6020.lock [object6030] max connections = 25 path = %SWIFT_DATA_DIR%/3/node/ read only = false -lock file = /var/lock/object6030.lock +lock file = %SWIFT_DATA_DIR%/run/object6030.lock [object6040] max connections = 25 path = %SWIFT_DATA_DIR%/4/node/ read only = false -lock file = /var/lock/object6040.lock +lock file = %SWIFT_DATA_DIR%/run/object6040.lock diff --git a/lib/swift b/lib/swift index a4faf031..5ba7e56f 100644 --- a/lib/swift +++ b/lib/swift @@ -95,13 +95,13 @@ function configure_swift() { setup_develop $SWIFT_DIR # Make sure to kill all swift processes first - swift-init all stop || true + swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true # First do a bit of setup by creating the directories and # changing the permissions so we can run it as our user. USER_GROUP=$(id -g) - sudo mkdir -p ${SWIFT_DATA_DIR}/{drives,cache} + sudo mkdir -p ${SWIFT_DATA_DIR}/{drives,cache,run,logs} sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_DIR} # Create a loopback disk and format it to XFS. @@ -143,8 +143,8 @@ function configure_swift() { sudo chown -R $USER: ${node} done - sudo mkdir -p ${SWIFT_CONFIG_DIR}/{object,container,account}-server /var/run/swift - sudo chown -R $USER: ${SWIFT_CONFIG_DIR} /var/run/swift + sudo mkdir -p ${SWIFT_CONFIG_DIR}/{object,container,account}-server + sudo chown -R $USER: ${SWIFT_CONFIG_DIR} if [[ "$SWIFT_CONFIG_DIR" != "/etc/swift" ]]; then # Some swift tools are hard-coded to use ``/etc/swift`` and are apparently not going to be fixed. @@ -311,7 +311,7 @@ function configure_swiftclient() { function init_swift() { local node_number # Make sure to kill all swift processes first - swift-init all stop || true + swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true # This is where we create three different rings for swift with # different object servers binding on different ports. @@ -363,15 +363,15 @@ function start_swift() { # proxy service so we can run it in foreground in screen. # ``swift-init ... {stop|restart}`` exits with '1' if no servers are running, # ignore it just in case - swift-init all restart || true - swift-init proxy stop || true + swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true + swift-init --run-dir=${SWIFT_DATA_DIR}/run proxy stop || true screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_DIR}/proxy-server.conf -v" } # stop_swift() - Stop running processes (non-screen) function stop_swift() { # screen normally killed by unstack.sh - swift-init all stop || true + swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true } # Restore xtrace From 767cd631796b5404e6331cee72977a1fcec68024 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Fri, 18 Jan 2013 17:15:44 -0500 Subject: [PATCH 926/967] Set MySQL slow log to record every query Since devstack is for development, lets record all SQL queries to enable debugging, and further development. Change-Id: Idb4078a0d3a84151ad4c506f8861637d84ae47ad --- lib/databases/mysql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index 965df6ee..95242536 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -68,10 +68,10 @@ default-storage-engine = InnoDB" $MY_CONF sudo sed -i -e "/^\[mysqld\]/ a \ log-slow-queries = /var/log/mysql/mysql-slow.log" $MY_CONF - # Log any query taking longer than a second + # Log all queries (any query taking longer than 0 seconds) sudo sed -i '/long.query.time/d' $MY_CONF sudo sed -i -e "/^\[mysqld\]/ a \ -long-query-time = 1" $MY_CONF +long-query-time = 0" $MY_CONF # Log all non-indexed queries sudo sed -i '/log.queries.not.using.indexes/d' $MY_CONF From 74c67fd8616f778061c27d4c929c9364b59e2b92 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 22 Jan 2013 18:10:16 -0500 Subject: [PATCH 927/967] add numpy to package list, saves lots of time websockify was added to the pip requires, and it has a dependency of numpy. Because we didn't specify it in the package list, it was built from source every time, adding 3 minutes to an average run. Stop testing whether numpy compiles 100 times a day in CI. Change-Id: Ic9d9b8135a917deb846911c6b266aec87d05781a --- files/apts/nova | 1 + files/rpms/nova | 1 + 2 files changed, 2 insertions(+) diff --git a/files/apts/nova b/files/apts/nova index b7d1e928..39b4060e 100644 --- a/files/apts/nova +++ b/files/apts/nova @@ -31,6 +31,7 @@ python-libvirt python-libxml2 python-routes python-netaddr +python-numpy # used by websockify for spice console python-pastedeploy python-eventlet python-cheetah diff --git a/files/rpms/nova b/files/rpms/nova index 88ad8c31..568ee7f5 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -10,6 +10,7 @@ kvm libvirt-bin # NOPRIME libvirt-python libxml2-python +numpy # needed by websockify for spice console m2crypto mysql-server # NOPRIME parted From b0b98b709650c1ffb940e66d26baf29d38515692 Mon Sep 17 00:00:00 2001 From: Chris Yeoh Date: Wed, 23 Jan 2013 21:14:49 +1030 Subject: [PATCH 928/967] Renames old $devstack_dir to $TOP_DIR This patch renames $devstack_dir which is no longer defined to $TOP_DIR. Fixes problem where initialisation of ec2 images for tempest testing was failing. Change-Id: Ie0cd43209e58c903b1fe6cc528a4971896e6fab1 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 4d91d900..c163a0d4 100644 --- a/lib/tempest +++ b/lib/tempest @@ -304,7 +304,7 @@ function init_tempest() { echo "Prepare aki/ari/ami Images" ( #new namespace # tenant:demo ; user: demo - source $devstack_dir/accrc/demo/demo + source $TOP_DIR/accrc/demo/demo euca-bundle-image -i "$kernel" --kernel true -d "$BOTO_MATERIALS_PATH" euca-bundle-image -i "$ramdisk" --ramdisk true -d "$BOTO_MATERIALS_PATH" euca-bundle-image -i "$disk_image" -d "$BOTO_MATERIALS_PATH" From 74759aa17a3b9e687aebf30c11b9bcb477aa48ef Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 24 Jan 2013 14:19:55 -0600 Subject: [PATCH 929/967] Remove DEFAULT_STACK_USER, just use STACK_USER STACK_USER needs to be set for more than just stack.sh, there was no real distinction for using DEFAULT_STACK_USER instead of just setting STACK_USER directly in stackrc and allowing it to be overridden in localrc. Change-Id: I9e8d70db29bb421f1ce3dbf40a5ad299cc7ea785 --- stack.sh | 4 +--- stackrc | 8 ++++++-- tools/build_ramdisk.sh | 12 ++++++------ tools/build_uec.sh | 8 ++++---- tools/copy_dev_environment_to_uec.sh | 8 ++++---- tools/xen/build_xva.sh | 4 ++-- 6 files changed, 23 insertions(+), 21 deletions(-) diff --git a/stack.sh b/stack.sh index 005d88e7..e50cc493 100755 --- a/stack.sh +++ b/stack.sh @@ -160,7 +160,6 @@ VERBOSE=$(trueorfalse True $VERBOSE) # sudo privileges and runs as that user. if [[ $EUID -eq 0 ]]; then - STACK_USER=$DEFAULT_STACK_USER ROOTSLEEP=${ROOTSLEEP:-10} echo "You are running this script as root." echo "In $ROOTSLEEP seconds, we will create a user '$STACK_USER' and run as that user" @@ -196,7 +195,6 @@ if [[ $EUID -eq 0 ]]; then fi exit 1 else - STACK_USER=`whoami` # We're not **root**, make sure ``sudo`` is available is_package_installed sudo || die "Sudo is required. Re-run stack.sh as root ONE TIME ONLY to set up sudo." @@ -1291,7 +1289,7 @@ fi CURRENT_RUN_TIME=$(date "+$TIMESTAMP_FORMAT") echo "# $CURRENT_RUN_TIME" >$TOP_DIR/.stackenv for i in BASE_SQL_CONN ENABLED_SERVICES HOST_IP LOGFILE \ - SERVICE_HOST SERVICE_PROTOCOL TLS_IP; do + SERVICE_HOST SERVICE_PROTOCOL STACK_USER TLS_IP; do echo $i=${!i} >>$TOP_DIR/.stackenv done diff --git a/stackrc b/stackrc index cfc4d1fb..789fc82d 100644 --- a/stackrc +++ b/stackrc @@ -12,8 +12,12 @@ DATA_DIR=${DEST}/data # Select the default database DATABASE_TYPE=mysql -# Default stack user -DEFAULT_STACK_USER=stack +# Determine stack user +if [[ $EUID -eq 0 ]]; then + STACK_USER=stack +else + STACK_USER=$(whoami) +fi # Specify which services to launch. These generally correspond to # screen tabs. To change the default list, use the ``enable_service`` and diff --git a/tools/build_ramdisk.sh b/tools/build_ramdisk.sh index cfcca51f..2c455685 100755 --- a/tools/build_ramdisk.sh +++ b/tools/build_ramdisk.sh @@ -125,17 +125,17 @@ if [ ! -r $DEV_FILE ]; then # Create a stack user that is a member of the libvirtd group so that stack # is able to interact with libvirt. chroot $MNTDIR groupadd libvirtd - chroot $MNTDIR useradd $DEFAULT_STACK_USER -s /bin/bash -d $DEST -G libvirtd + chroot $MNTDIR useradd $STACK_USER -s /bin/bash -d $DEST -G libvirtd mkdir -p $MNTDIR/$DEST - chroot $MNTDIR chown $DEFAULT_STACK_USER $DEST + chroot $MNTDIR chown $STACK_USER $DEST # A simple password - pass - echo $DEFAULT_STACK_USER:pass | chroot $MNTDIR chpasswd + echo $STACK_USER:pass | chroot $MNTDIR chpasswd echo root:$ROOT_PASSWORD | chroot $MNTDIR chpasswd # And has sudo ability (in the future this should be limited to only what # stack requires) - echo "$DEFAULT_STACK_USER ALL=(ALL) NOPASSWD: ALL" >> $MNTDIR/etc/sudoers + echo "$STACK_USER ALL=(ALL) NOPASSWD: ALL" >> $MNTDIR/etc/sudoers umount $MNTDIR rmdir $MNTDIR @@ -187,7 +187,7 @@ git_clone $OPENSTACKX_REPO $DEST/openstackx $OPENSTACKX_BRANCH # Use this version of devstack rm -rf $MNTDIR/$DEST/devstack cp -pr $CWD $MNTDIR/$DEST/devstack -chroot $MNTDIR chown -R $DEFAULT_STACK_USER $DEST/devstack +chroot $MNTDIR chown -R $STACK_USER $DEST/devstack # Configure host network for DHCP mkdir -p $MNTDIR/etc/network @@ -225,7 +225,7 @@ EOF # Make the run.sh executable chmod 755 $RUN_SH -chroot $MNTDIR chown $DEFAULT_STACK_USER $DEST/run.sh +chroot $MNTDIR chown $STACK_USER $DEST/run.sh umount $MNTDIR rmdir $MNTDIR diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 5748b390..6c4a26c2 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -207,11 +207,11 @@ ROOTSLEEP=0 `cat $TOP_DIR/localrc` LOCAL_EOF fi -useradd -U -G sudo -s /bin/bash -d /opt/stack -m $DEFAULT_STACK_USER -echo $DEFAULT_STACK_USER:pass | chpasswd +useradd -U -G sudo -s /bin/bash -d /opt/stack -m $STACK_USER +echo $STACK_USER:pass | chpasswd mkdir -p /opt/stack/.ssh echo "$PUB_KEY" > /opt/stack/.ssh/authorized_keys -chown -R $DEFAULT_STACK_USER /opt/stack +chown -R $STACK_USER /opt/stack chmod 700 /opt/stack/.ssh chmod 600 /opt/stack/.ssh/authorized_keys @@ -224,7 +224,7 @@ fi # Run stack.sh cat >> $vm_dir/uec/user-data< $STAGING_DIR/etc/sudoers.d/50_stack_sh ) # Copy over your ssh keys and env if desired @@ -67,7 +67,7 @@ rm -rf $STAGING_DIR/$DEST/devstack cp_it . $STAGING_DIR/$DEST/devstack # Give stack ownership over $DEST so it may do the work needed -chroot $STAGING_DIR chown -R $DEFAULT_STACK_USER $DEST +chroot $STAGING_DIR chown -R $STACK_USER $DEST # Unmount umount $STAGING_DIR diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh index f3f166fe..0e874cfe 100755 --- a/tools/xen/build_xva.sh +++ b/tools/xen/build_xva.sh @@ -65,8 +65,8 @@ cd $TOP_DIR cat <$STAGING_DIR/etc/rc.local # network restart required for getting the right gateway /etc/init.d/networking restart -chown -R $DEFAULT_STACK_USER /opt/stack -su -c "/opt/stack/run.sh > /opt/stack/run.sh.log" $DEFAULT_STACK_USER +chown -R $STACK_USER /opt/stack +su -c "/opt/stack/run.sh > /opt/stack/run.sh.log" $STACK_USER exit 0 EOF From 9f22f07a154a2d94c0e0f6d419497e3f94fbe929 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Mon, 28 Jan 2013 09:53:38 -0500 Subject: [PATCH 930/967] Cinder: update osapi_volume_extension default. Updates the Cinder config file to use the new extension loader location. Change-Id: I515e16e00b54c69ae3c09e64841818eb4a9c8f73 --- lib/cinder | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/cinder b/lib/cinder index a730cd62..5f4f979b 100644 --- a/lib/cinder +++ b/lib/cinder @@ -172,7 +172,7 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT sql_connection $dburl iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI iniset $CINDER_CONF DEFAULT root_helper "sudo ${CINDER_ROOTWRAP}" - iniset $CINDER_CONF DEFAULT osapi_volume_extension cinder.api.openstack.volume.contrib.standard_extensions + iniset $CINDER_CONF DEFAULT osapi_volume_extension cinder.api.contrib.standard_extensions iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH if is_service_enabled tls-proxy; then From 98e18e99d18a1a294fb2a7f3dceb48bd81520c03 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Mon, 28 Jan 2013 14:26:56 +0000 Subject: [PATCH 931/967] Quantum root_helper update We are currently moving root_helper to the [AGENT] section. This patch is intended to enable the transition for that process. Change-Id: Iff8144f74a1a5f8b0fc9af44bccf0213f4bfad7e --- lib/quantum | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/quantum b/lib/quantum index 27b3509f..c5fc6e81 100644 --- a/lib/quantum +++ b/lib/quantum @@ -472,9 +472,8 @@ function _configure_quantum_metadata_agent() { # _configure_quantum_plugin_agent() - Set config files for quantum plugin agent # It is called when q-agt is enabled. function _configure_quantum_plugin_agent() { - # Specify the default root helper prior to agent configuration to - # ensure that an agent's configuration can override the default. + # ensure that an agent's configuration can override the default iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" # Configure agent for plugin @@ -544,6 +543,9 @@ function _quantum_setup_rootwrap() { chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/quantum-rootwrap + + # Update the root_helper + iniset $QUANTUM_CONF AGENT root_helper "$Q_RR_COMMAND" } # Configures keystone integration for quantum service and agents From 41bf4520231bb6454333d6acb1e011bfc9976bae Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 28 Jan 2013 14:04:39 -0600 Subject: [PATCH 932/967] Handle nested xtrace states The lib/database abstraction includes the appropriate database file that also contains the $XTRACE bits at entry and exit. The nested XTRACE handling overwrote the value from lib/database. So...make the nested files use their own XTRACE variables. Change-Id: Ibdfc8d7d1e1457a9bc889b781ce176b417789ea1 --- lib/databases/mysql | 4 ++-- lib/databases/postgresql | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index 95242536..94aedc64 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -5,7 +5,7 @@ # DATABASE_{HOST,USER,PASSWORD} must be defined # Save trace setting -XTRACE=$(set +o | grep xtrace) +MY_XTRACE=$(set +o | grep xtrace) set +o xtrace register_database mysql @@ -121,4 +121,4 @@ function database_connection_url_mysql { } # Restore xtrace -$XTRACE +$MY_XTRACE diff --git a/lib/databases/postgresql b/lib/databases/postgresql index 04db714a..2c37f49b 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -5,7 +5,7 @@ # DATABASE_{HOST,USER,PASSWORD} must be defined # Save trace setting -XTRACE=$(set +o | grep xtrace) +PG_XTRACE=$(set +o | grep xtrace) set +o xtrace register_database postgresql @@ -76,4 +76,4 @@ function database_connection_url_postgresql { } # Restore xtrace -$XTRACE +$PG_XTRACE From 95fb0d440ceb4934c3116454cc2ff6349d39fca1 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Mon, 28 Jan 2013 16:24:14 -0500 Subject: [PATCH 933/967] Run setup_develop for tempest. This setups the development environment for tempest. Without running setup_develop for tempest import tempest.* breaks outside of the tempest directory. Change-Id: I6954733d68125dd116cc9bfa58b9aece674405c3 --- lib/tempest | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/tempest b/lib/tempest index 4d91d900..5ad9b320 100644 --- a/lib/tempest +++ b/lib/tempest @@ -55,6 +55,7 @@ BOTO_MATERIALS_PATH="$FILES/images/s3-materials/cirros-0.3.0" # configure_tempest() - Set config files, create data dirs, etc function configure_tempest() { + setup_develop $TEMPEST_DIR local image_lines local images local num_images From d71d6e71b37d97e3fd4922608ae41f9ff53bc4d0 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Mon, 28 Jan 2013 19:15:57 -0500 Subject: [PATCH 934/967] Dns stops working on precise when network manager is enabled In Precise and Quantal, we nuke the dnsmasq launched by NetworkManager Fixes LP# 993666 Change-Id: I4b39010765e2cbbea1ca3fc3120bf329015b7a56 --- stack.sh | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 005d88e7..4c10c74f 100755 --- a/stack.sh +++ b/stack.sh @@ -967,7 +967,14 @@ fi if is_service_enabled n-net q-dhcp; then # Delete traces of nova networks from prior runs - sudo killall dnsmasq || true + # Do not kill any dnsmasq instance spawned by NetworkManager + netman_pid=$(pidof NetworkManager || true) + if [ -z "$netman_pid" ]; then + sudo killall dnsmasq || true + else + sudo ps h -o pid,ppid -C dnsmasq | grep -v $netman_pid | awk '{print $1}' | sudo xargs kill || true + fi + clean_iptables rm -rf ${NOVA_STATE_PATH}/networks sudo mkdir -p ${NOVA_STATE_PATH}/networks From 63c6c2b006a3b23ecbb324de93db51d7725ccd52 Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Thu, 24 Jan 2013 13:13:51 +0000 Subject: [PATCH 935/967] Improved feedback for domU install on XS Change-Id: I5b9d07493eb334169fb2643047a014f56ee265fa --- tools/xen/install_os_domU.sh | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index b4fbb699..0e275705 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -236,6 +236,12 @@ SNAME_PREPARED="template_prepared" SNAME_FIRST_BOOT="before_first_boot" function wait_for_VM_to_halt() { + set +x + echo "Waiting for the VM to halt. Progress in-VM can be checked with vncviewer:" + mgmt_ip=$(echo $XENAPI_CONNECTION_URL | tr -d -c '1234567890.') + domid=$(xe vm-list name-label="$GUEST_NAME" params=dom-id minimal=true) + port=$(xenstore-read /local/domain/$domid/console/vnc-port) + echo "vncviewer -via $mgmt_ip localhost:${port:2}" while true do state=$(xe_min vm-list name-label="$GUEST_NAME" power-state=halted) @@ -243,10 +249,11 @@ function wait_for_VM_to_halt() { then break else - echo "Waiting for "$GUEST_NAME" to finish installation..." + echo -n "." sleep 20 fi done + set -x } templateuuid=$(xe template-list name-label="$TNAME") @@ -405,12 +412,14 @@ if [ "$WAIT_TILL_LAUNCH" = "1" ] && [ -e ~/.ssh/id_rsa.pub ] && [ "$COPYENV" = # Fail if the expected text is not found ssh_no_check -q stack@$DOMU_IP 'cat run.sh.log' | grep -q 'stack.sh completed in' + set +x echo "################################################################################" echo "" echo "All Finished!" echo "You can visit the OpenStack Dashboard" echo "at http://$DOMU_IP, and contact other services at the usual ports." else + set +x echo "################################################################################" echo "" echo "All Finished!" From 5bd96f967fc83d8fdbed707113b19117e4de05cf Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Tue, 29 Jan 2013 15:12:20 -0500 Subject: [PATCH 936/967] Support a flag for setting Keystone Token backend Fixes LP# 1073274 Change-Id: Ib4373a4a4d31e440e40f977f8f7ffa312f2d97be --- lib/keystone | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/lib/keystone b/lib/keystone index 7a70cc41..57146708 100644 --- a/lib/keystone +++ b/lib/keystone @@ -39,6 +39,9 @@ KEYSTONECLIENT_DIR=$DEST/python-keystoneclient KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-sql} KEYSTONE_CATALOG=$KEYSTONE_CONF_DIR/default_catalog.templates +# Select the backend for Tokens +KEYSTONE_TOKEN_BACKEND=${KEYSTONE_TOKEN_BACKEND:-sql} + # Select Keystone's token format # Choose from 'UUID' and 'PKI' KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-PKI} @@ -108,6 +111,12 @@ function configure_keystone() { # Append the S3 bits iniset $KEYSTONE_CONF filter:s3_extension paste.filter_factory "keystone.contrib.s3:S3Extension.factory" + if [[ "$KEYSTONE_TOKEN_BACKEND" = "sql" ]]; then + iniset $KEYSTONE_CONF token driver keystone.token.backends.sql.Token + else + iniset $KEYSTONE_CONF token driver keystone.token.backends.kvs.Token + fi + if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then # Configure ``keystone.conf`` to use sql iniset $KEYSTONE_CONF catalog driver keystone.catalog.backends.sql.Catalog From a263ef283e422fc25e7ecff2e9c272eab1a336a6 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Mon, 28 Jan 2013 21:56:02 -0500 Subject: [PATCH 937/967] Pick just the first route when looking for the host interface The current code will fail miserably if multiple default routes are present (perfectly normal if they have different metrics). Fixing the code to pick the first/top entry Fixes LP# 1084470 Change-Id: Ieda16b575685071ff831c92e6b2a29737d6f849b --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 005d88e7..27096670 100755 --- a/stack.sh +++ b/stack.sh @@ -253,7 +253,7 @@ FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256} NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1} # Find the interface used for the default route -HOST_IP_IFACE=${HOST_IP_IFACE:-$(ip route | sed -n '/^default/{ s/.*dev \(\w\+\)\s\+.*/\1/; p; }')} +HOST_IP_IFACE=${HOST_IP_IFACE:-$(ip route | sed -n '/^default/{ s/.*dev \(\w\+\)\s\+.*/\1/; p; }' | head -1)} # Search for an IP unless an explicit is set by ``HOST_IP`` environment variable if [ -z "$HOST_IP" -o "$HOST_IP" == "dhcp" ]; then HOST_IP="" From d9ca2b2fd657031f4d8ff84c0d137d2b9cabb8fb Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Wed, 30 Jan 2013 13:52:43 +0000 Subject: [PATCH 938/967] Ensure that debug agent has root_helper in [AGENT] section This is a temporary fix until the Quantum patch lands. Then devstack will be updated to remove all of the root_helper settings in the DEFAULT section. The Quantum patch in Question is: https://review.openstack.org/#/c/20603/ Change-Id: Ie17ae76e8b525481e1002bd941812390cf2e2afc --- lib/quantum | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/quantum b/lib/quantum index c5fc6e81..d5733b3a 100644 --- a/lib/quantum +++ b/lib/quantum @@ -410,6 +410,9 @@ function _configure_quantum_debug_command() { iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT debug False iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT root_helper "$Q_RR_COMMAND" + # Intermediate fix until Quantum patch lands and then line above will + # be cleaned. + iniset $QUANTUM_TEST_CONFIG_FILE AGENT root_helper "$Q_RR_COMMAND" _quantum_setup_keystone $QUANTUM_TEST_CONFIG_FILE DEFAULT set_auth_url _quantum_setup_interface_driver $QUANTUM_TEST_CONFIG_FILE From 4196d5565e48608c64fccdd9e17fcc01dd8d06fe Mon Sep 17 00:00:00 2001 From: Jakub Ruzicka Date: Wed, 30 Jan 2013 15:35:54 +0100 Subject: [PATCH 939/967] Introduce get_python_exec_prefix function. get_python_exec_prefix returns the path to the direcotry where python executables are installed, that is /usr/bin on Fedora and /usr/local/bin everywhere else. It is used to properly locate OpenStack executables. Fixes: bug #1068386 Change-Id: I228498ebe2762568d00757d065e37377ee2c8fb3 --- functions | 16 +++++++++++----- lib/ceilometer | 2 +- lib/cinder | 2 +- lib/glance | 2 +- lib/nova | 2 +- 5 files changed, 15 insertions(+), 9 deletions(-) diff --git a/functions b/functions index 79483785..68aec5d2 100644 --- a/functions +++ b/functions @@ -1110,17 +1110,23 @@ function add_user_to_group() { } +# Get the path to the direcotry where python executables are installed. +# get_python_exec_prefix +function get_python_exec_prefix() { + if is_fedora; then + echo "/usr/bin" + else + echo "/usr/local/bin" + fi +} + # Get the location of the $module-rootwrap executables, where module is cinder # or nova. # get_rootwrap_location module function get_rootwrap_location() { local module=$1 - if is_fedora; then - echo "/usr/bin/$module-rootwrap" - else - echo "/usr/local/bin/$module-rootwrap" - fi + echo "$(get_python_exec_prefix)/$module-rootwrap" } # Get the path to the pip command. diff --git a/lib/ceilometer b/lib/ceilometer index 0fae3973..41a5f53d 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -40,7 +40,7 @@ CEILOMETER_AUTH_CACHE_DIR=${CEILOMETER_AUTH_CACHE_DIR:-/var/cache/ceilometer} if [ -d $CEILOMETER_DIR/bin ] ; then CEILOMETER_BIN_DIR=$CEILOMETER_DIR/bin else - CEILOMETER_BIN_DIR=/usr/local/bin + CEILOMETER_BIN_DIR=$(get_python_exec_prefix) fi # cleanup_ceilometer() - Remove residual data files, anything left over from previous diff --git a/lib/cinder b/lib/cinder index 5f4f979b..28b3caa5 100644 --- a/lib/cinder +++ b/lib/cinder @@ -47,7 +47,7 @@ CINDER_SERVICE_PROTOCOL=${CINDER_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} if [[ -d $CINDER_DIR/bin ]]; then CINDER_BIN_DIR=$CINDER_DIR/bin else - CINDER_BIN_DIR=/usr/local/bin + CINDER_BIN_DIR=$(get_python_exec_prefix) fi # Name of the lvm volume group to use/create for iscsi volumes diff --git a/lib/glance b/lib/glance index 1c56a675..5d48129d 100644 --- a/lib/glance +++ b/lib/glance @@ -44,7 +44,7 @@ GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json if [[ -d $GLANCE_DIR/bin ]]; then GLANCE_BIN_DIR=$GLANCE_DIR/bin else - GLANCE_BIN_DIR=/usr/local/bin + GLANCE_BIN_DIR=$(get_python_exec_prefix) fi # Glance connection info. Note the port must be specified. diff --git a/lib/nova b/lib/nova index 7e5bb996..41162781 100644 --- a/lib/nova +++ b/lib/nova @@ -49,7 +49,7 @@ NOVA_SERVICE_PROTOCOL=${NOVA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} if [[ -d $NOVA_DIR/bin ]]; then NOVA_BIN_DIR=$NOVA_DIR/bin else - NOVA_BIN_DIR=/usr/local/bin + NOVA_BIN_DIR=$(get_python_exec_prefix) fi # Set the paths of certain binaries From 07db713549fab67e7288ebe8e2190f9629b2e9df Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Wed, 30 Jan 2013 13:07:25 -0800 Subject: [PATCH 940/967] Improve millisecond logging Always display 3 digits for milliseconds (appends 0s) Based on I3f1461839258be0723e2d3616ec225a830d13029 Change-Id: I48b3f3781b4d34ed6a5fb9e4e78cee919afda6c1 --- lib/cinder | 6 +++--- lib/nova | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/cinder b/lib/cinder index 5f4f979b..014fefaf 100644 --- a/lib/cinder +++ b/lib/cinder @@ -193,10 +193,10 @@ function configure_cinder() { if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then # Add color to logging output - iniset $CINDER_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_id)s %(project_id)s%(color)s] %(instance)s%(color)s%(message)s" - iniset $CINDER_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" + iniset $CINDER_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_id)s %(project_id)s%(color)s] %(instance)s%(color)s%(message)s" + iniset $CINDER_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" iniset $CINDER_CONF DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" - iniset $CINDER_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)d TRACE %(name)s %(instance)s" + iniset $CINDER_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" fi if [ "$CINDER_DRIVER" == "XenAPINFS" ]; then diff --git a/lib/nova b/lib/nova index 7e5bb996..6cf5a5d8 100644 --- a/lib/nova +++ b/lib/nova @@ -424,13 +424,13 @@ function create_nova_conf() { fi if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then # Add color to logging output - iniset $NOVA_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s%(color)s] %(instance)s%(color)s%(message)s" - iniset $NOVA_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" + iniset $NOVA_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s%(color)s] %(instance)s%(color)s%(message)s" + iniset $NOVA_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" iniset $NOVA_CONF DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" - iniset $NOVA_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)d TRACE %(name)s %(instance)s" + iniset $NOVA_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" else # Show user_name and project_name instead of user_id and project_id - iniset $NOVA_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)d %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" + iniset $NOVA_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" fi if is_service_enabled ceilometer; then iniset $NOVA_CONF DEFAULT instance_usage_audit "True" From a814f22ce49a3674fd6f266f52bf7de990521adc Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Thu, 31 Jan 2013 15:21:43 +0000 Subject: [PATCH 941/967] Minor fix to readme - corrected indentation for some commands Change-Id: I0a16c59d258be4ce8bb8cdebfb3d1cbc30ce9d54 --- tools/xen/README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/xen/README.md b/tools/xen/README.md index f20ad04b..1cd45cff 100644 --- a/tools/xen/README.md +++ b/tools/xen/README.md @@ -18,7 +18,7 @@ https://www.citrix.com/English/ss/downloads/details.asp?downloadId=2311504&produ For details on installation, see: http://wiki.openstack.org/XenServer/Install Here are some sample Xenserver network settings for when you are just -getting started (I use settings like this with a lappy + cheap wifi router): +getting started (Settings like this have been used with a laptop + cheap wifi router): * XenServer Host IP: 192.168.1.10 * XenServer Netmask: 255.255.255.0 @@ -29,9 +29,9 @@ Step 2: Download devstack -------------------------- On your XenServer host, run the following commands as root: -wget --no-check-certificate https://github.com/openstack-dev/devstack/zipball/master -unzip -o master -d ./devstack -cd devstack/*/ + wget --no-check-certificate https://github.com/openstack-dev/devstack/zipball/master + unzip -o master -d ./devstack + cd devstack/*/ Step 3: Configure your localrc inside the devstack directory ------------------------------------------------------------ From f127e2f316f1161bacdf4cccdbc3e56b2b8a54a8 Mon Sep 17 00:00:00 2001 From: Brad Topol Date: Tue, 22 Jan 2013 10:17:50 -0600 Subject: [PATCH 942/967] Add optional silent install and config of ldap to devstack Edited initial ldap entries and olcdb template file as recommended by Brant. Change-Id: I1404cc5c754f878e32a2d10254840d092211e6e6 --- files/apts/ldap | 3 ++ files/ldap/manager.ldif.in | 10 ++++++ files/ldap/openstack.ldif | 21 +++++++++++ files/rpms/ldap | 3 ++ lib/keystone | 15 ++++++++ lib/ldap | 74 ++++++++++++++++++++++++++++++++++++++ stack.sh | 15 ++++++++ 7 files changed, 141 insertions(+) create mode 100644 files/apts/ldap create mode 100644 files/ldap/manager.ldif.in create mode 100644 files/ldap/openstack.ldif create mode 100644 files/rpms/ldap create mode 100644 lib/ldap diff --git a/files/apts/ldap b/files/apts/ldap new file mode 100644 index 00000000..81a00f27 --- /dev/null +++ b/files/apts/ldap @@ -0,0 +1,3 @@ +ldap-utils +slapd # NOPRIME +python-ldap diff --git a/files/ldap/manager.ldif.in b/files/ldap/manager.ldif.in new file mode 100644 index 00000000..e522150f --- /dev/null +++ b/files/ldap/manager.ldif.in @@ -0,0 +1,10 @@ +dn: olcDatabase={${LDAP_OLCDB_NUMBER}}hdb,cn=config +changetype: modify +replace: olcSuffix +olcSuffix: dc=openstack,dc=org +- +replace: olcRootDN +olcRootDN: dc=Manager,dc=openstack,dc=org +- +${LDAP_ROOTPW_COMMAND}: olcRootPW +olcRootPW: ${SLAPPASS} diff --git a/files/ldap/openstack.ldif b/files/ldap/openstack.ldif new file mode 100644 index 00000000..287fda45 --- /dev/null +++ b/files/ldap/openstack.ldif @@ -0,0 +1,21 @@ +dn: dc=openstack,dc=org +dc: openstack +objectClass: dcObject +objectClass: organizationalUnit +ou: openstack + +dn: ou=Groups,dc=openstack,dc=org +objectClass: organizationalUnit +ou: Groups + +dn: ou=Users,dc=openstack,dc=org +objectClass: organizationalUnit +ou: Users + +dn: ou=Roles,dc=openstack,dc=org +objectClass: organizationalUnit +ou: Roles + +dn: ou=Projects,dc=openstack,dc=org +objectClass: organizationalUnit +ou: Projects diff --git a/files/rpms/ldap b/files/rpms/ldap new file mode 100644 index 00000000..2f7ab5de --- /dev/null +++ b/files/rpms/ldap @@ -0,0 +1,3 @@ +openldap-servers +openldap-clients +python-ldap diff --git a/lib/keystone b/lib/keystone index 57146708..866c62e1 100644 --- a/lib/keystone +++ b/lib/keystone @@ -94,6 +94,17 @@ function configure_keystone() { local dburl database_connection_url dburl keystone + if is_service_enabled ldap; then + #Set all needed ldap values + iniset $KEYSTONE_CONF ldap password $LDAP_PASSWORD + iniset $KEYSTONE_CONF ldap user "dc=Manager,dc=openstack,dc=org" + iniset $KEYSTONE_CONF ldap suffix "dc=openstack,dc=org" + fi + + if [[ "$KEYSTONE_IDENTITY_BACKEND" == "ldap" ]]; then + iniset $KEYSTONE_CONF identity driver "keystone.identity.backends.ldap.Identity" + fi + if is_service_enabled tls-proxy; then # Set the service ports for a proxy to take the originals iniset $KEYSTONE_CONF DEFAULT public_port $KEYSTONE_SERVICE_PORT_INT @@ -283,6 +294,10 @@ function install_keystoneclient() { # install_keystone() - Collect source and prepare function install_keystone() { + # only install ldap if the service has been enabled + if is_service_enabled ldap; then + install_ldap + fi git_clone $KEYSTONE_REPO $KEYSTONE_DIR $KEYSTONE_BRANCH } diff --git a/lib/ldap b/lib/ldap new file mode 100644 index 00000000..5cb45347 --- /dev/null +++ b/lib/ldap @@ -0,0 +1,74 @@ +# lib/ldap +# Functions to control the installation and configuration of **ldap** + +# ``stack.sh`` calls the entry points in this order: +# + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# install_ldap +# install_ldap() - Collect source and prepare +function install_ldap() { + echo "Installing LDAP inside function" + echo "LDAP_PASSWORD is $LDAP_PASSWORD" + echo "os_VENDOR is $os_VENDOR" + printf "installing" + if is_ubuntu; then + echo "os vendor is Ubuntu" + LDAP_OLCDB_NUMBER=1 + LDAP_ROOTPW_COMMAND=replace + sudo DEBIAN_FRONTEND=noninteractive apt-get install slapd ldap-utils + #automatically starts LDAP on ubuntu so no need to call start_ldap + elif is_fedora; then + echo "os vendor is Fedora" + LDAP_OLCDB_NUMBER=2 + LDAP_ROOTPW_COMMAND=add + start_ldap + fi + + printf "generate password file" + SLAPPASS=`slappasswd -s $LDAP_PASSWORD` + + printf "secret is $SLAPPASS\n" + #create manager.ldif + TMP_MGR_DIFF_FILE=`mktemp -t manager_ldiff.$$.XXXXXXXXXX.ldif` + sed -e "s|\${LDAP_OLCDB_NUMBER}|$LDAP_OLCDB_NUMBER|" -e "s|\${SLAPPASS}|$SLAPPASS|" -e "s|\${LDAP_ROOTPW_COMMAND}|$LDAP_ROOTPW_COMMAND|" $FILES/ldap/manager.ldif.in >> $TMP_MGR_DIFF_FILE + + #update ldap olcdb + sudo ldapmodify -Y EXTERNAL -H ldapi:/// -f $TMP_MGR_DIFF_FILE + + # add our top level ldap nodes + if ldapsearch -x -w $LDAP_PASSWORD -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -x -b dc=openstack,dc=org | grep -q "Success" ; then + printf "LDAP already configured for OpenStack\n" + if [[ "$KEYSTONE_CLEAR_LDAP" == "yes" ]]; then + # clear LDAP state + clear_ldap_state + # reconfigure LDAP for OpenStack + ldapadd -c -x -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -w $LDAP_PASSWORD -f $FILES/ldap/openstack.ldif + fi + else + printf "Configuring LDAP for OpenStack\n" + ldapadd -c -x -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -w $LDAP_PASSWORD -f $FILES/ldap/openstack.ldif + fi +} + +# start_ldap() - Start LDAP +function start_ldap() { + sudo service slapd restart +} + + +# stop_ldap() - Stop LDAP +function stop_ldap() { + sudo service slapd stop +} + +# clear_ldap_state() - Clear LDAP State +function clear_ldap_state() { + ldapdelete -x -w $LDAP_PASSWORD -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -x -r "dc=openstack,dc=org" +} + +# Restore xtrace +$XTRACE diff --git a/stack.sh b/stack.sh index 46086482..0521ced3 100755 --- a/stack.sh +++ b/stack.sh @@ -306,6 +306,7 @@ source $TOP_DIR/lib/ceilometer source $TOP_DIR/lib/heat source $TOP_DIR/lib/quantum source $TOP_DIR/lib/baremetal +source $TOP_DIR/lib/ldap # Set the destination directories for OpenStack projects HORIZON_DIR=$DEST/horizon @@ -475,6 +476,20 @@ read_password SERVICE_TOKEN "ENTER A SERVICE_TOKEN TO USE FOR THE SERVICE ADMIN read_password SERVICE_PASSWORD "ENTER A SERVICE_PASSWORD TO USE FOR THE SERVICE AUTHENTICATION." # Horizon currently truncates usernames and passwords at 20 characters read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (20 CHARS OR LESS)." +# Keystone can now optionally install OpenLDAP by adding ldap to the list +# of enabled services in the localrc file (e.g. ENABLED_SERVICES=key,ldap). +# If OpenLDAP has already been installed but you need to clear out +# the Keystone contents of LDAP set KEYSTONE_CLEAR_LDAP to yes +# (e.g. KEYSTONE_CLEAR_LDAP=yes ) in the localrc file. To enable the +# Keystone Identity Driver (keystone.identity.backends.ldap.Identity) +# set KEYSTONE_IDENTITY_BACKEND to ldap (e.g. KEYSTONE_IDENTITY_BACKEND=ldap) +# in the localrc file. + + +# only request ldap password if the service is enabled +if is_service_enabled ldap; then + read_password LDAP_PASSWORD "ENTER A PASSWORD TO USE FOR LDAP" +fi # Set the tenant for service accounts in Keystone SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service} From 88a3bc1d8db8c7f912d05bc2fd201b6b2c4f1e86 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Mon, 4 Feb 2013 09:16:14 -0500 Subject: [PATCH 943/967] Remove bad options calling quantum-ovs-cleanup --external_network_bridge and --ovs_integration_bridge are no longer a valid options Fixes LP# 1115213 Change-Id: I9af4514a0cc661f4b72b3f0e00407be163c48945 --- lib/quantum_plugins/ovs_base | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/quantum_plugins/ovs_base b/lib/quantum_plugins/ovs_base index d9f6fd0e..f34e8621 100644 --- a/lib/quantum_plugins/ovs_base +++ b/lib/quantum_plugins/ovs_base @@ -12,7 +12,7 @@ function is_quantum_ovs_base_plugin() { function _quantum_ovs_base_setup_bridge() { local bridge=$1 - quantum-ovs-cleanup --ovs_integration_bridge $bridge + quantum-ovs-cleanup sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge } @@ -39,7 +39,7 @@ function _quantum_ovs_base_configure_debug_command() { function _quantum_ovs_base_configure_l3_agent() { iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE - quantum-ovs-cleanup --external_network_bridge $PUBLIC_BRIDGE + quantum-ovs-cleanup sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE # ensure no IP is configured on the public bridge sudo ip addr flush dev $PUBLIC_BRIDGE From 35336282da621d5189bad97c5bddd1840721f632 Mon Sep 17 00:00:00 2001 From: Chris Krelle Date: Sun, 3 Feb 2013 15:48:43 -0800 Subject: [PATCH 944/967] Add dhcp server option to dnsmasq for baremetal This sets the dns option for baremetal deploy clients. this is useful for clients who may require external access during the deployment process. Change-Id: Ibe680d2acaad826e4868223ebfd1f112d7796662 Authored-by: Chris Krelle --- lib/baremetal | 3 +++ stack.sh | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/lib/baremetal b/lib/baremetal index 3cc24291..7c31d1fd 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -90,6 +90,9 @@ else BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-} fi +# BM_DNSMASQ_DNS provide dns server to bootstrap clients +BM_DNSMASQ_DNS=${BM_DNSMASQ_DNS:-} + # BM_FIRST_MAC *must* be set to the MAC address of the node you will boot. # This is passed to dnsmasq along with the kernel/ramdisk to # deploy via PXE. diff --git a/stack.sh b/stack.sh index 46086482..1550e445 100755 --- a/stack.sh +++ b/stack.sh @@ -1285,8 +1285,8 @@ if is_service_enabled nova && is_baremetal; then sudo pkill dnsmasq || true sudo dnsmasq --conf-file= --port=0 --enable-tftp --tftp-root=/tftpboot \ --dhcp-boot=pxelinux.0 --bind-interfaces --pid-file=/var/run/dnsmasq.pid \ - --interface=$BM_DNSMASQ_IFACE --dhcp-range=$BM_DNSMASQ_RANGE - + --interface=$BM_DNSMASQ_IFACE --dhcp-range=$BM_DNSMASQ_RANGE \ + ${$BM_DNSMASQ_DNS:+--dhcp-option=option:dns-server,$BM_DNSMASQ_DNS} # ensure callback daemon is running sudo pkill nova-baremetal-deploy-helper || true screen_it baremetal "nova-baremetal-deploy-helper" From 8407b2de2ad0e83690c9f1b193b50b984a40ddfb Mon Sep 17 00:00:00 2001 From: "Yunhong, Jiang" Date: Thu, 7 Feb 2013 13:48:33 +0800 Subject: [PATCH 945/967] Copy the pipeline configuration file Update the pipeline configuration file for ceilometer Change-Id: I7a46f61391b76447d7973be5c43b7d0360c56da0 Signed-off-by: Yunhong, Jiang --- lib/ceilometer | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ceilometer b/lib/ceilometer index 41a5f53d..bc37d92b 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -73,6 +73,7 @@ function configure_ceilometer() { # Install the policy file for the API server cp $CEILOMETER_DIR/etc/ceilometer/policy.json $CEILOMETER_CONF_DIR + cp $CEILOMETER_DIR/etc/ceilometer/pipeline.yaml $CEILOMETER_CONF_DIR iniset $CEILOMETER_CONF DEFAULT policy_file $CEILOMETER_CONF_DIR/policy.json # the compute and central agents need these credentials in order to From 8d55be31a95043236d52ee891bacae5ea9f5ed37 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 7 Feb 2013 17:16:35 -0600 Subject: [PATCH 946/967] Handle nested xtrace states (Quantum edition) The lib/quantum_* directories include plugin files that also contains the $XTRACE bits at entry and exit. The nested XTRACE handling overwrote the value from lib/quantum. So...make the nested files use their own XTRACE variables. Change-Id: Ib5e643371666b21402eef2ec58bfb1dfb7e1ccc4 --- lib/quantum_plugins/bigswitch_floodlight | 4 ++-- lib/quantum_plugins/linuxbridge | 4 ++-- lib/quantum_plugins/openvswitch | 4 ++-- lib/quantum_plugins/ovs_base | 4 ++-- lib/quantum_plugins/ryu | 4 ++-- lib/quantum_thirdparty/bigswitch_floodlight | 4 ++-- lib/quantum_thirdparty/ryu | 4 ++-- 7 files changed, 14 insertions(+), 14 deletions(-) diff --git a/lib/quantum_plugins/bigswitch_floodlight b/lib/quantum_plugins/bigswitch_floodlight index 35276a55..2c928bec 100644 --- a/lib/quantum_plugins/bigswitch_floodlight +++ b/lib/quantum_plugins/bigswitch_floodlight @@ -2,7 +2,7 @@ # ------------------------------------ # Save trace setting -XTRACE=$(set +o | grep xtrace) +MY_XTRACE=$(set +o | grep xtrace) set +o xtrace source $TOP_DIR/lib/quantum_plugins/ovs_base @@ -52,4 +52,4 @@ function quantum_plugin_setup_interface_driver() { } # Restore xtrace -$XTRACE +$MY_XTRACE diff --git a/lib/quantum_plugins/linuxbridge b/lib/quantum_plugins/linuxbridge index e8ba68c3..6d5d4e08 100644 --- a/lib/quantum_plugins/linuxbridge +++ b/lib/quantum_plugins/linuxbridge @@ -2,7 +2,7 @@ # --------------------------- # Save trace setting -XTRACE=$(set +o | grep xtrace) +MY_XTRACE=$(set +o | grep xtrace) set +o xtrace function is_quantum_ovs_base_plugin() { @@ -76,4 +76,4 @@ function quantum_plugin_setup_interface_driver() { } # Restore xtrace -$XTRACE +$MY_XTRACE diff --git a/lib/quantum_plugins/openvswitch b/lib/quantum_plugins/openvswitch index 5415e869..12bc2442 100644 --- a/lib/quantum_plugins/openvswitch +++ b/lib/quantum_plugins/openvswitch @@ -2,7 +2,7 @@ # --------------------------- # Save trace setting -XTRACE=$(set +o | grep xtrace) +MY_XTRACE=$(set +o | grep xtrace) set +o xtrace source $TOP_DIR/lib/quantum_plugins/ovs_base @@ -141,4 +141,4 @@ function quantum_plugin_setup_interface_driver() { } # Restore xtrace -$XTRACE +$MY_XTRACE diff --git a/lib/quantum_plugins/ovs_base b/lib/quantum_plugins/ovs_base index d9f6fd0e..4c334731 100644 --- a/lib/quantum_plugins/ovs_base +++ b/lib/quantum_plugins/ovs_base @@ -2,7 +2,7 @@ # ------------------------------------- # Save trace setting -XTRACE=$(set +o | grep xtrace) +MY_XTRACE=$(set +o | grep xtrace) set +o xtrace function is_quantum_ovs_base_plugin() { @@ -46,4 +46,4 @@ function _quantum_ovs_base_configure_l3_agent() { } # Restore xtrace -$XTRACE +$MY_XTRACE diff --git a/lib/quantum_plugins/ryu b/lib/quantum_plugins/ryu index 86105bc8..f44f4ae3 100644 --- a/lib/quantum_plugins/ryu +++ b/lib/quantum_plugins/ryu @@ -2,7 +2,7 @@ # ------------------ # Save trace setting -XTRACE=$(set +o | grep xtrace) +MY_XTRACE=$(set +o | grep xtrace) set +o xtrace source $TOP_DIR/lib/quantum_plugins/ovs_base @@ -60,4 +60,4 @@ function quantum_plugin_setup_interface_driver() { } # Restore xtrace -$XTRACE +$MY_XTRACE diff --git a/lib/quantum_thirdparty/bigswitch_floodlight b/lib/quantum_thirdparty/bigswitch_floodlight index 77aeb61d..60e39248 100644 --- a/lib/quantum_thirdparty/bigswitch_floodlight +++ b/lib/quantum_thirdparty/bigswitch_floodlight @@ -2,7 +2,7 @@ # ------------------------------------------ # Save trace setting -XTRACE=$(set +o | grep xtrace) +MY_XTRACE=$(set +o | grep xtrace) set +o xtrace BS_FL_CONTROLLERS_PORT=${BS_FL_CONTROLLERS_PORT:-localhost:80} @@ -47,4 +47,4 @@ function stop_bigswitch_floodlight() { } # Restore xtrace -$XTRACE +$MY_XTRACE diff --git a/lib/quantum_thirdparty/ryu b/lib/quantum_thirdparty/ryu index f11951a3..de8e0861 100644 --- a/lib/quantum_thirdparty/ryu +++ b/lib/quantum_thirdparty/ryu @@ -2,7 +2,7 @@ # ----------------------- # Save trace setting -XTRACE=$(set +o | grep xtrace) +MY_XTRACE=$(set +o | grep xtrace) set +o xtrace @@ -62,4 +62,4 @@ function stop_ryu() { } # Restore xtrace -$XTRACE +$MY_XTRACE From 429b39d8f854318f2d30d592b71526791f3fac9f Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Wed, 9 Jan 2013 18:35:55 +0900 Subject: [PATCH 947/967] Quantum/plugins/ryu: minor update for Quantum Ryu plugin - updated package list for Ryu ryu plugin/agent needs python-netifaces - quantum-ryu-agent also needs ryu module Change-Id: I5b49efceb65e8139a49a8e82f55ea6aa7d1eebac Signed-off-by: Isaku Yamahata --- files/apts/ryu | 1 + files/rpms/ryu | 1 + lib/quantum_plugins/ryu | 3 +++ lib/quantum_thirdparty/ryu | 10 +++++++++- 4 files changed, 14 insertions(+), 1 deletion(-) diff --git a/files/apts/ryu b/files/apts/ryu index 1e8f2d2b..4a4fc523 100644 --- a/files/apts/ryu +++ b/files/apts/ryu @@ -1,4 +1,5 @@ python-setuptools python-gevent python-gflags +python-netifaces python-sphinx diff --git a/files/rpms/ryu b/files/rpms/ryu index 1e8f2d2b..4a4fc523 100644 --- a/files/rpms/ryu +++ b/files/rpms/ryu @@ -1,4 +1,5 @@ python-setuptools python-gevent python-gflags +python-netifaces python-sphinx diff --git a/lib/quantum_plugins/ryu b/lib/quantum_plugins/ryu index 86105bc8..3f960ea8 100644 --- a/lib/quantum_plugins/ryu +++ b/lib/quantum_plugins/ryu @@ -15,6 +15,9 @@ function quantum_plugin_create_nova_conf() { function quantum_plugin_install_agent_packages() { _quantum_ovs_base_install_agent_packages + + # quantum_ryu_agent requires ryu module + install_ryu } function quantum_plugin_configure_common() { diff --git a/lib/quantum_thirdparty/ryu b/lib/quantum_thirdparty/ryu index f11951a3..5717d821 100644 --- a/lib/quantum_thirdparty/ryu +++ b/lib/quantum_thirdparty/ryu @@ -49,8 +49,16 @@ function init_ryu() { echo "${RYU_CONF_CONTENTS}" > $RYU_CONF } +# install_ryu can be called multiple times as quantum_pluing/ryu may call +# this function for quantum-ryu-agent +# Make this function idempotent and avoid cloning same repo many times +# with RECLONE=yes +_RYU_INSTALLED=${_RYU_INSTALLED:-False} function install_ryu() { - git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH + if [[ "$_RYU_INSTALLED" == "False" ]]; then + git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH + _RYU_INSTALLED=True + fi } function start_ryu() { From ec06efc607328bce9dc535be79e9539d5edec536 Mon Sep 17 00:00:00 2001 From: Mate Lakat Date: Fri, 1 Feb 2013 15:16:51 +0000 Subject: [PATCH 948/967] Disable non-dhcp resolv.conf Fixes: bug #1119268 Change-Id: Icf7d420a31eb7a0cb46b2e59a4328f6b640deb57 --- tools/xen/build_xva.sh | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh index 0e874cfe..b0fd003d 100755 --- a/tools/xen/build_xva.sh +++ b/tools/xen/build_xva.sh @@ -44,12 +44,9 @@ if [ ! -d $STAGING_DIR/etc ]; then exit 1 fi -# Configure dns (use same dns as dom0) -# but only when not precise -if [ "$UBUNTU_INST_RELEASE" != "precise" ]; then - cp /etc/resolv.conf $STAGING_DIR/etc/resolv.conf -elif [ "$MGT_IP" != "dhcp" ] && [ "$PUB_IP" != "dhcp" ]; then - echo "Configuration without DHCP not supported on Precise" +# Only support DHCP for now - don't support how different versions of Ubuntu handle resolv.conf +if [ "$MGT_IP" != "dhcp" ] && [ "$PUB_IP" != "dhcp" ]; then + echo "Configuration without DHCP not supported" exit 1 fi From 48352ee7c05cf79734abf74a2e7ac47425babb3a Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 12 Dec 2012 12:50:38 -0600 Subject: [PATCH 949/967] Create tools/install_prereqs.sh * Factor system package prereq installs out to tools/install_prereqs.sh * Set minimum time between runs with PREREQ_RERUN_HOURS default = 2 hours * Create export_proxy_variables * Force an update with install_prereqs.sh -f or by setting FORCE_PREREQ=true Fixed an issue with exit/return in tools/install_prereqs.sh Change-Id: I9a62090ad2f900b9b150cacb9cb02b326cb46972 --- functions | 21 ++++++++++ stack.sh | 49 ++++-------------------- tools/install_prereqs.sh | 82 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 111 insertions(+), 41 deletions(-) create mode 100755 tools/install_prereqs.sh diff --git a/functions b/functions index 68aec5d2..3f26b7fd 100644 --- a/functions +++ b/functions @@ -80,6 +80,27 @@ function die_if_not_set() { } +# HTTP and HTTPS proxy servers are supported via the usual environment variables [1] +# ``http_proxy``, ``https_proxy`` and ``no_proxy``. They can be set in +# ``localrc`` or on the command line if necessary:: +# +# [1] http://www.w3.org/Daemon/User/Proxies/ProxyClients.html +# +# http_proxy=http://proxy.example.com:3128/ no_proxy=repo.example.net ./stack.sh + +function export_proxy_variables() { + if [[ -n "$http_proxy" ]]; then + export http_proxy=$http_proxy + fi + if [[ -n "$https_proxy" ]]; then + export https_proxy=$https_proxy + fi + if [[ -n "$no_proxy" ]]; then + export no_proxy=$no_proxy + fi +} + + # Grab a numbered field from python prettytable output # Fields are numbered starting with 1 # Reverse syntax is supported: -1 is the last field, -2 is second to last, etc. diff --git a/stack.sh b/stack.sh index 46086482..7a8bd802 100755 --- a/stack.sh +++ b/stack.sh @@ -30,9 +30,8 @@ source $TOP_DIR/functions GetDistro - -# Settings -# ======== +# Global Settings +# =============== # ``stack.sh`` is customizable through setting environment variables. If you # want to override a setting you can set and export it:: @@ -62,33 +61,18 @@ fi source $TOP_DIR/stackrc -# Proxy Settings +# Local Settings # -------------- -# HTTP and HTTPS proxy servers are supported via the usual environment variables [1] -# ``http_proxy``, ``https_proxy`` and ``no_proxy``. They can be set in -# ``localrc`` if necessary or on the command line:: -# -# [1] http://www.w3.org/Daemon/User/Proxies/ProxyClients.html -# -# http_proxy=http://proxy.example.com:3128/ no_proxy=repo.example.net ./stack.sh - -if [[ -n "$http_proxy" ]]; then - export http_proxy=$http_proxy -fi -if [[ -n "$https_proxy" ]]; then - export https_proxy=$https_proxy -fi -if [[ -n "$no_proxy" ]]; then - export no_proxy=$no_proxy -fi +# Make sure the proxy config is visible to sub-processes +export_proxy_variables # Destination path for installation ``DEST`` DEST=${DEST:-/opt/stack} # Sanity Check -# ============ +# ------------ # Clean up last environment var cache if [[ -r $TOP_DIR/.stackenv ]]; then @@ -631,26 +615,9 @@ set -o xtrace # OpenStack uses a fair number of other projects. # Install package requirements +# Source it so the entire environment is available echo_summary "Installing package prerequisites" -if is_ubuntu; then - install_package $(get_packages $FILES/apts) -elif is_fedora; then - install_package $(get_packages $FILES/rpms) -elif is_suse; then - install_package $(get_packages $FILES/rpms-suse) -else - exit_distro_not_supported "list of packages" -fi - -if [[ $SYSLOG != "False" ]]; then - if is_ubuntu || is_fedora; then - install_package rsyslog-relp - elif is_suse; then - install_package rsyslog-module-relp - else - exit_distro_not_supported "rsyslog-relp installation" - fi -fi +source $TOP_DIR/tools/install_prereqs.sh install_rpc_backend diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh new file mode 100755 index 00000000..4d151db2 --- /dev/null +++ b/tools/install_prereqs.sh @@ -0,0 +1,82 @@ +#!/usr/bin/env bash + +# **install_prereqs.sh** + +# Install system package prerequisites +# +# install_prereqs.sh [-f] +# +# -f Force an install run now + +if [[ -n "$1" && "$1" = "-f" ]]; then + FORCE_PREREQ=1 +fi + +# If TOP_DIR is set we're being sourced rather than running stand-alone +# or in a sub-shell +if [[ -z "$TOP_DIR" ]]; then + # Keep track of the devstack directory + TOP_DIR=$(cd $(dirname "$0")/.. && pwd) + + # Import common functions + source $TOP_DIR/functions + + # Determine what system we are running on. This provides ``os_VENDOR``, + # ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` + # and ``DISTRO`` + GetDistro + + # Needed to get ``ENABLED_SERVICES`` + source $TOP_DIR/stackrc + + # Prereq dirs are here + FILES=$TOP_DIR/files +fi + +# Minimum wait time +PREREQ_RERUN_MARKER=${PREREQ_RERUN_MARKER:-$TOP_DIR/.prereqs} +PREREQ_RERUN_HOURS=${PREREQ_RERUN_HOURS:-2} +PREREQ_RERUN_SECONDS=$((60*60*$PREREQ_RERUN_HOURS)) + +NOW=$(date "+%s") +LAST_RUN=$(head -1 $PREREQ_RERUN_MARKER 2>/dev/null || echo "0") +DELTA=$(($NOW - $LAST_RUN)) +if [[ $DELTA -lt $PREREQ_RERUN_SECONDS && -z "$FORCE_PREREQ" ]]; then + echo "Re-run time has not expired ($(($PREREQ_RERUN_SECONDS - $DELTA)) seconds remaining); exiting..." + return 0 +fi + +# Make sure the proxy config is visible to sub-processes +export_proxy_variables + + +# Install Packages +# ================ + +# Install package requirements +if is_ubuntu; then + install_package $(get_packages $FILES/apts) +elif is_fedora; then + install_package $(get_packages $FILES/rpms) +elif is_suse; then + install_package $(get_packages $FILES/rpms-suse) +else + exit_distro_not_supported "list of packages" +fi + +if [[ -n "$SYSLOG" && "$SYSLOG" != "False" ]]; then + if is_ubuntu || is_fedora; then + install_package rsyslog-relp + elif is_suse; then + install_package rsyslog-module-relp + else + exit_distro_not_supported "rsyslog-relp installation" + fi +fi + + +# Mark end of run +# --------------- + +date "+%s" >$PREREQ_RERUN_MARKER +date >>$PREREQ_RERUN_MARKER From 5a3f90bea7296a9b9ec5b99452c03280a056a232 Mon Sep 17 00:00:00 2001 From: Daniel Salinas Date: Fri, 8 Feb 2013 17:17:53 -0600 Subject: [PATCH 950/967] Fixes openvz driver support for nova when deployed with devstack. Change-Id: I70599333a09267cbe2cd8afd075658f3f7d8bc9d Fixes: bug #1119859 --- stack.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index 8a814a01..11fafc12 100755 --- a/stack.sh +++ b/stack.sh @@ -1092,9 +1092,7 @@ if is_service_enabled nova; then elif [ "$VIRT_DRIVER" = 'openvz' ]; then echo_summary "Using OpenVZ virtualization driver" - # TODO(deva): OpenVZ driver does not yet work if compute_driver is set here. - # Replace connection_type when this is fixed. - # iniset $NOVA_CONF DEFAULT compute_driver "openvz.connection.OpenVzConnection" + iniset $NOVA_CONF DEFAULT compute_driver "openvz.driver.OpenVzDriver" iniset $NOVA_CONF DEFAULT connection_type "openvz" LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER" From a52095b18c112ac301b336c36c2affd6471ee61d Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Sat, 9 Feb 2013 07:24:33 -0500 Subject: [PATCH 951/967] allow resize tests to run resize tests were turned off explicitly, which they shouldn't be turn these back on in a default config as we've set nova to allow resize_to_same_host. Change-Id: Iacedf11e56aff3a541f1b67b208e8ed3a30b2c44 --- lib/tempest | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index cb172a80..49d0da7a 100644 --- a/lib/tempest +++ b/lib/tempest @@ -209,7 +209,6 @@ function configure_tempest() { iniset $TEMPEST_CONF compute alt_username $ALT_USERNAME # DEPRECATED iniset $TEMPEST_CONF compute alt_password "$password" # DEPRECATED iniset $TEMPEST_CONF compute alt_tenant_name $ALT_TENANT_NAME # DEPRECATED - iniset $TEMPEST_CONF compute resize_available False iniset $TEMPEST_CONF compute change_password_available False iniset $TEMPEST_CONF compute compute_log_level ERROR # Note(nati) current tempest don't create network for each tenant From 37a8d157a11abe55736707fdec2fc8a273027a2f Mon Sep 17 00:00:00 2001 From: Devananda van der Veen Date: Tue, 15 Jan 2013 17:27:34 -0800 Subject: [PATCH 952/967] Add quantum support for baremetal virtual env. Add quantum networking support when nova-baremetal is active. This creates a ctlplane network and br-ctlplane bridge, and moves IPs from PHYSICAL_INTERFACE to OVS_PHYSICAL_BRIDGE. Change-Id: If2026c01b93de0ccc7c3f9112de07b3a9c01ac20 --- lib/quantum | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/lib/quantum b/lib/quantum index d5733b3a..5b045580 100644 --- a/lib/quantum +++ b/lib/quantum @@ -270,8 +270,19 @@ function create_quantum_initial_network() { # Create a small network # Since quantum command is executed in admin context at this point, # ``--tenant_id`` needs to be specified. - NET_ID=$(quantum net-create --tenant_id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) - SUBNET_ID=$(quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) + if is_baremetal; then + sudo ovs-vsctl add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE + for IP in $(ip addr show dev $PUBLIC_INTERFACE | grep ' inet ' | awk '{print $2}'); do + sudo ip addr del $IP dev $PUBLIC_INTERFACE + sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE + done + NET_ID=$(quantum net-create $PHYSICAL_NETWORK --tenant_id $TENANT_ID --provider:network_type flat --provider:physical_network "$PHYSICAL_NETWORK" | grep ' id ' | get_field 2) + SUBNET_ID=$(quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) + sudo ifconfig $OVS_PHYSICAL_BRIDGE up + else + NET_ID=$(quantum net-create --tenant_id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) + SUBNET_ID=$(quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) + fi if is_service_enabled q-l3; then # Create a router, and add the private subnet as one of its interfaces From 712feb663c314da65cd215f3dae3045bf2c8b057 Mon Sep 17 00:00:00 2001 From: Akihiro MOTOKI Date: Mon, 11 Feb 2013 23:45:19 +0900 Subject: [PATCH 953/967] Move auth_token configurations to quantum.conf keystone auth_token middleware now allows quantum to have auth_token configuration in quantum.conf. auth_token middleware supports auth_token configuration both in api-paste.ini and quantum.conf, so we can apply this change at any timing. Change-Id: Ie5dd63e6c6938d2c8118e0f6090ef057c21a772a --- lib/quantum | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/lib/quantum b/lib/quantum index d5733b3a..da4d6f60 100644 --- a/lib/quantum +++ b/lib/quantum @@ -507,7 +507,11 @@ function _configure_quantum_service() { iniset $QUANTUM_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP iniset $QUANTUM_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY - _quantum_setup_keystone $Q_API_PASTE_FILE filter:authtoken + _quantum_setup_keystone $QUANTUM_CONF keystone_authtoken + # Comment out keystone authtoken configuration in api-paste.ini + # It is required to avoid any breakage in Quantum where the sample + # api-paste.ini has authtoken configurations. + _quantum_commentout_keystone_authtoken $Q_API_PASTE_FILE filter:authtoken # Configure plugin quantum_plugin_configure_service @@ -573,6 +577,21 @@ function _quantum_setup_keystone() { rm -f $QUANTUM_AUTH_CACHE_DIR/* } +function _quantum_commentout_keystone_authtoken() { + local conf_file=$1 + local section=$2 + + inicomment $conf_file $section auth_host + inicomment $conf_file $section auth_port + inicomment $conf_file $section auth_protocol + inicomment $conf_file $section auth_url + + inicomment $conf_file $section admin_tenant_name + inicomment $conf_file $section admin_user + inicomment $conf_file $section admin_password + inicomment $conf_file $section signing_dir +} + function _quantum_setup_interface_driver() { quantum_plugin_setup_interface_driver $1 } From b205cc8ff17885790a38a55bb5ee9facfac769cf Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 11 Feb 2013 17:34:39 -0600 Subject: [PATCH 954/967] Clean up configure_nova() a bit Change-Id: I2228221051a5a4413a34ca359856d90794fce69a --- lib/nova | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/lib/nova b/lib/nova index 1681af77..9ecf4ebf 100644 --- a/lib/nova +++ b/lib/nova @@ -166,20 +166,13 @@ function configure_nova() { # Get the sample configuration file in place cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_CONF_DIR - # Rewrite the authtoken configuration for our Keystone service. - # This is a bit defensive to allow the sample file some variance. - sed -e " - /^admin_token/i admin_tenant_name = $SERVICE_TENANT_NAME - /admin_tenant_name/s/^.*$/admin_tenant_name = $SERVICE_TENANT_NAME/; - /admin_user/s/^.*$/admin_user = nova/; - /admin_password/s/^.*$/admin_password = $SERVICE_PASSWORD/; - s,%SERVICE_TENANT_NAME%,$SERVICE_TENANT_NAME,g; - s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g; - " -i $NOVA_API_PASTE_INI iniset $NOVA_API_PASTE_INI filter:authtoken auth_host $SERVICE_HOST if is_service_enabled tls-proxy; then iniset $NOVA_API_PASTE_INI filter:authtoken auth_protocol $SERVICE_PROTOCOL fi + iniset $NOVA_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $NOVA_API_PASTE_INI filter:authtoken admin_user nova + iniset $NOVA_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD fi iniset $NOVA_API_PASTE_INI filter:authtoken signing_dir $NOVA_AUTH_CACHE_DIR From 58e21349644f42d4aff078e4da26ecd98d76ba19 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Mon, 11 Feb 2013 16:48:12 -0800 Subject: [PATCH 955/967] Add option to make screen starting more robust. We have seen a number of failures in ci where a host is overloaded and the 1.5 second sleep before stuffing data into screen is not long enough. This means the service doesn't start and tests fail. This change adds a config option to allow us to turn off the developer friendly option to stuff text into the screen. When SCREEN_DEV is set to False it will use a simple exec in screen instead of stuff. This should be far more reliable because we don't have to wait for bash to start. Change-Id: I7f1b5dbf5329b23507cb767d54a2795be0d73e01 --- functions | 27 ++++++++++++++++----------- stackrc | 7 +++++++ 2 files changed, 23 insertions(+), 11 deletions(-) diff --git a/functions b/functions index 3f26b7fd..79c82a45 100644 --- a/functions +++ b/functions @@ -738,26 +738,31 @@ function restart_service() { # Helper to launch a service in a named screen # screen_it service "command-line" function screen_it { - NL=`echo -ne '\015'` SCREEN_NAME=${SCREEN_NAME:-stack} SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} + SCREEN_DEV=`trueorfalse True $SCREEN_DEV` if is_service_enabled $1; then # Append the service to the screen rc file screen_rc "$1" "$2" screen -S $SCREEN_NAME -X screen -t $1 - # sleep to allow bash to be ready to be send the command - we are - # creating a new window in screen and then sends characters, so if - # bash isn't running by the time we send the command, nothing happens - sleep 1.5 - - if [[ -n ${SCREEN_LOGDIR} ]]; then - screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log - screen -S $SCREEN_NAME -p $1 -X log on - ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log + if [[ "$SCREEN_DEV" = "True" ]]; then + # sleep to allow bash to be ready to be send the command - we are + # creating a new window in screen and then sends characters, so if + # bash isn't running by the time we send the command, nothing happens + sleep 1.5 + + if [[ -n ${SCREEN_LOGDIR} ]]; then + screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log + screen -S $SCREEN_NAME -p $1 -X log on + ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log + fi + NL=`echo -ne '\015'` + screen -S $SCREEN_NAME -p $1 -X stuff "$2 || touch \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL" + else + screen -S $SCREEN_NAME -p $1 -X exec /bin/bash -c "$2 || touch \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"" fi - screen -S $SCREEN_NAME -p $1 -X stuff "$2 || touch \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL" fi } diff --git a/stackrc b/stackrc index 789fc82d..91f4e2b5 100644 --- a/stackrc +++ b/stackrc @@ -29,6 +29,13 @@ ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,cinder,c-s # Set the default Nova APIs to enable NOVA_ENABLED_APIS=ec2,osapi_compute,metadata +# Whether to use 'dev mode' for screen windows. Dev mode works by +# stuffing text into the screen windows so that a developer can use +# ctrl-c, up-arrow, enter to restart the service. Starting services +# this way is slightly unreliable, and a bit slower, so this can +# be disabled for automated testing by setting this value to false. +SCREEN_DEV=True + # Repositories # ------------ From a1a61c8a5bc318a1de81fb9724045f189a0d8b85 Mon Sep 17 00:00:00 2001 From: Maru Newby Date: Wed, 13 Feb 2013 19:20:03 +0000 Subject: [PATCH 956/967] Fix name of xen dom0 rootwrap for quantum ovs. * Supports blueprint xenapi-ovs Change-Id: I1d5ac0ce1f226aa3f6c0d7f7bd1eb968aef1eeba --- lib/quantum_plugins/openvswitch | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/quantum_plugins/openvswitch b/lib/quantum_plugins/openvswitch index 12bc2442..181e7e71 100644 --- a/lib/quantum_plugins/openvswitch +++ b/lib/quantum_plugins/openvswitch @@ -75,7 +75,7 @@ function quantum_plugin_configure_plugin_agent() { # Nova will always be installed along with quantum for a domU # devstack install, so it should be safe to rely on nova.conf # for xenapi configuration. - Q_RR_DOM0_COMMAND="$QUANTUM_DIR/bin/quantum-rootwrap-dom0 $NOVA_CONF" + Q_RR_DOM0_COMMAND="$QUANTUM_DIR/bin/quantum-rootwrap-xen-dom0 $NOVA_CONF" # Under XS/XCP, the ovs agent needs to target the dom0 # integration bridge. This is enabled by using a root wrapper # that executes commands on dom0 via a XenAPI plugin. From 25c42f6eb4955d0032c911c991db8b72643ea7c4 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Thu, 14 Feb 2013 15:00:02 +0100 Subject: [PATCH 957/967] Remove old tempest configuration variables Change-Id: I4c15c876514e1a8071a557ce79f56266a83b24b8 --- lib/tempest | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/lib/tempest b/lib/tempest index 49d0da7a..e43f6d75 100644 --- a/lib/tempest +++ b/lib/tempest @@ -205,12 +205,7 @@ function configure_tempest() { iniset $TEMPEST_CONF identity admin_password "$password" # Compute - iniset $TEMPEST_CONF compute password "$password" # DEPRECATED - iniset $TEMPEST_CONF compute alt_username $ALT_USERNAME # DEPRECATED - iniset $TEMPEST_CONF compute alt_password "$password" # DEPRECATED - iniset $TEMPEST_CONF compute alt_tenant_name $ALT_TENANT_NAME # DEPRECATED iniset $TEMPEST_CONF compute change_password_available False - iniset $TEMPEST_CONF compute compute_log_level ERROR # Note(nati) current tempest don't create network for each tenant # so reuse same tenant for now if is_service_enabled quantum; then @@ -231,10 +226,6 @@ function configure_tempest() { iniset $TEMPEST_CONF compute flavor_ref_alt $flavor_ref_alt iniset $TEMPEST_CONF compute live_migration_available ${LIVE_MIGRATION_AVAILABLE:-False} iniset $TEMPEST_CONF compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} - iniset $TEMPEST_CONF compute source_dir $NOVA_SOURCE_DIR # DEPRECATED - iniset $TEMPEST_CONF compute bin_dir $NOVA_BIN_DIR # DEPRECATED - iniset $TEMPEST_CONF compute path_to_private_key $TEMPEST_DIR/id_rsa # DEPRECATED - iniset $TEMPEST_CONF compute db_uri $BASE_SQL_CONN/nova # DEPRECATED # Whitebox iniset $TEMPEST_CONF whitebox source_dir $NOVA_SOURCE_DIR @@ -245,21 +236,11 @@ function configure_tempest() { iniset $TEMPEST_CONF whitebox db_uri $BASE_SQL_CONN/nova - # image - iniset $TEMPEST_CONF image password "$password" # DEPRECATED - - # identity-admin - iniset $TEMPEST_CONF "identity-admin" password "$password" # DEPRECATED - # compute admin iniset $TEMPEST_CONF "compute-admin" password "$password" # DEPRECATED - # network admin - iniset $TEMPEST_CONF "network-admin" password "$password" # DEPRECATED - # network iniset $TEMPEST_CONF network api_version 2.0 - iniset $TEMPEST_CONF network password "$password" # DEPRECATED iniset $TEMPEST_CONF network tenant_networks_reachable "$tenant_networks_reachable" iniset $TEMPEST_CONF network public_network_id "$public_network_id" iniset $TEMPEST_CONF network public_router_id "$public_router_id" From 24f796149a4cb7cf588d5481cf2786c4c9fe735d Mon Sep 17 00:00:00 2001 From: Arata Notsu Date: Wed, 13 Feb 2013 21:01:18 +0900 Subject: [PATCH 958/967] Use 'nova baremetal-*' instead of nova-baremetal-manage Change-Id: Iee4dd721387dce39c8e46ea0e1e428513498c5a0 --- lib/baremetal | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/lib/baremetal b/lib/baremetal index 7c31d1fd..26593867 100644 --- a/lib/baremetal +++ b/lib/baremetal @@ -400,15 +400,10 @@ function upload_baremetal_image() { } function clear_baremetal_of_all_nodes() { - list=$(nova-baremetal-manage node list | tail -n +2 | awk '{print $1}' ) + list=$(nova baremetal-node-list | awk -F '| ' 'NR>3 {print $2}' ) for node in $list do - nova-baremetal-manage node delete $node - done - list=$(nova-baremetal-manage interface list | tail -n +2 | awk '{print $1}' ) - for iface in $list - do - nova-baremetal-manage interface delete $iface + nova baremetal-node-delete $node done } @@ -420,16 +415,18 @@ function add_baremetal_node() { mac_1=${1:-$BM_FIRST_MAC} mac_2=${2:-$BM_SECOND_MAC} - id=$(nova-baremetal-manage node create \ - --host=$BM_HOSTNAME --prov_mac=$mac_1 \ - --cpus=$BM_FLAVOR_CPU --memory_mb=$BM_FLAVOR_RAM \ - --local_gb=$BM_FLAVOR_ROOT_DISK --terminal_port=0 \ - --pm_address=$BM_PM_ADDR --pm_user=$BM_PM_USER --pm_password=$BM_PM_PASS \ - ) + id=$(nova baremetal-node-create \ + --pm_address="$BM_PM_ADDR" \ + --pm_user="$BM_PM_USER" \ + --pm_password="$BM_PM_PASS" \ + "$BM_HOSTNAME" \ + "$BM_FLAVOR_CPU" \ + "$BM_FLAVOR_RAM" \ + "$BM_FLAVOR_ROOT_DISK" \ + "$mac_1" \ + | grep ' id ' | get_field 2 ) [ $? -eq 0 ] || [ "$id" ] || die "Error adding baremetal node" - id2=$(nova-baremetal-manage interface create \ - --node_id=$id --mac_address=$mac_2 --datapath_id=0 --port_no=0 \ - ) + id2=$(nova baremetal-add-interface "$id" "$mac_2" ) [ $? -eq 0 ] || [ "$id2" ] || die "Error adding interface to barmetal node $id" } From da85cdadd9a2331a28d4913b878e18c9c5abaccf Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Fri, 15 Feb 2013 11:07:14 -0600 Subject: [PATCH 959/967] Exercise cleanup * Make common steps consistent * Make comments consistent aggregates.sh boot_from_volume.sh client-args.sh client-env.sh euca.sh floating_ips.sh sec_groups.sh swift.sh volumes.sh Change-Id: Ib93dcdfdead93c259e3cd184fbc5ccc0a4a87c9a --- exercises/aggregates.sh | 19 ++-- exercises/boot_from_volume.sh | 174 ++++++++++++++++++++-------------- exercises/client-args.sh | 34 ++++++- exercises/client-env.sh | 28 ++++++ exercises/euca.sh | 5 +- exercises/floating_ips.sh | 130 +++++++++++++------------ exercises/sec_groups.sh | 8 +- exercises/swift.sh | 8 +- exercises/volumes.sh | 114 +++++++++++----------- 9 files changed, 302 insertions(+), 218 deletions(-) diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh index deb1a038..ae3198f9 100755 --- a/exercises/aggregates.sh +++ b/exercises/aggregates.sh @@ -39,9 +39,8 @@ source $TOP_DIR/openrc # Import exercise configuration source $TOP_DIR/exerciserc -# run test as the admin user -_OLD_USERNAME=$OS_USERNAME -OS_USERNAME=admin +# Test as the admin user +. openrc admin admin # Create an aggregate @@ -54,7 +53,7 @@ AGGREGATE_A_ZONE=nova exit_if_aggregate_present() { aggregate_name=$1 - if [ `nova aggregate-list | grep -c " $aggregate_name "` == 0 ]; then + if [ $(nova aggregate-list | grep -c " $aggregate_name ") == 0 ]; then echo "SUCCESS $aggregate_name not present" else echo "ERROR found aggregate: $aggregate_name" @@ -64,8 +63,8 @@ exit_if_aggregate_present() { exit_if_aggregate_present $AGGREGATE_NAME -AGGREGATE_ID=`nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE_NAME " | get_field 1` -AGGREGATE2_ID=`nova aggregate-create $AGGREGATE2_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE2_NAME " | get_field 1` +AGGREGATE_ID=$(nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE_NAME " | get_field 1) +AGGREGATE2_ID=$(nova aggregate-create $AGGREGATE2_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE2_NAME " | get_field 1) # check aggregate created nova aggregate-list | grep -q " $AGGREGATE_NAME " || die "Aggregate $AGGREGATE_NAME not created" @@ -125,7 +124,7 @@ nova aggregate-details $AGGREGATE_ID | egrep "{u'availability_zone': u'$AGGREGAT if [ "$VIRT_DRIVER" == "xenserver" ]; then echo "TODO(johngarbutt) add tests for add/remove host from pool aggregate" fi -FIRST_HOST=`nova host-list | grep compute | get_field 1 | head -1` +FIRST_HOST=$(nova host-list | grep compute | get_field 1 | head -1) # Make sure can add two aggregates to same host nova aggregate-add-host $AGGREGATE_ID $FIRST_HOST nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST @@ -142,12 +141,6 @@ nova aggregate-delete $AGGREGATE_ID nova aggregate-delete $AGGREGATE2_ID exit_if_aggregate_present $AGGREGATE_NAME - -# Test complete -# ============= -OS_USERNAME=$_OLD_USERNAME -echo "AGGREGATE TEST PASSED" - set +o xtrace echo "**************************************************" echo "End DevStack Exercise: $0" diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index 5ada2370..679091bb 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -44,52 +44,80 @@ source $TOP_DIR/exerciserc # the exercise is skipped is_service_enabled cinder || exit 55 +# Instance type to create +DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} + # Boot this image, use first AMI image if unset DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} -# Instance type -DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} +# Security group name +SECGROUP=${SECGROUP:-boot_secgroup} -# Default floating IP pool name -DEFAULT_FLOATING_POOL=${DEFAULT_FLOATING_POOL:-nova} +# Instance and volume names +VM_NAME=${VM_NAME:-ex-bfv-inst} +VOL_NAME=${VOL_NAME:-ex-vol-bfv} -# Default user -DEFAULT_INSTANCE_USER=${DEFAULT_INSTANCE_USER:-cirros} -# Security group name -SECGROUP=${SECGROUP:-boot_secgroup} +# Launching a server +# ================== + +# List servers for tenant: +nova list +# Images +# ------ -# Launching servers -# ================= +# List the images available +glance image-list # Grab the id of the image to launch -IMAGE=`glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1` -die_if_not_set IMAGE "Failure getting image" +IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) +die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" + +# Security Groups +# --------------- + +# List security groups +nova secgroup-list + +# Create a secgroup +if ! nova secgroup-list | grep -q $SECGROUP; then + nova secgroup-create $SECGROUP "$SECGROUP description" + if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then + echo "Security group not created" + exit 1 + fi +fi -# Instance and volume names -VOL_INSTANCE_NAME=${VOL_INSTANCE_NAME:-test_vol_instance} -VOL_NAME=${VOL_NAME:-test_volume} +# Configure Security Group Rules +if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then + nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 +fi +if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then + nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0 +fi -# Clean-up from previous runs -nova delete $VOL_INSTANCE_NAME || true +# List secgroup rules +nova secgroup-list-rules $SECGROUP -if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VOL_INSTANCE_NAME; do sleep 1; done"; then - echo "server didn't terminate!" - exit 1 -fi +# Set up instance +# --------------- -# Configure Security Groups -nova secgroup-delete $SECGROUP || true -nova secgroup-create $SECGROUP "$SECGROUP description" -nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 -nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0 +# List flavors +nova flavor-list -# Determinine instance type -INSTANCE_TYPE=`nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | cut -d"|" -f2` +# Select a flavor +INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) if [[ -z "$INSTANCE_TYPE" ]]; then # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=`nova flavor-list | head -n 4 | tail -n 1 | cut -d"|" -f2` + INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) +fi + +# Clean-up from previous runs +nova delete $VM_NAME || true +if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then + echo "server didn't terminate!" + exit 1 fi # Setup Keypair @@ -99,78 +127,80 @@ nova keypair-delete $KEY_NAME || true nova keypair-add $KEY_NAME > $KEY_FILE chmod 600 $KEY_FILE -# Delete the old volume -cinder delete $VOL_NAME || true +# Set up volume +# ------------- -# Free every floating ips - setting FREE_ALL_FLOATING_IPS=True in localrc will make life easier for testers -if [ "$FREE_ALL_FLOATING_IPS" = "True" ]; then - nova floating-ip-list | grep nova | cut -d "|" -f2 | tr -d " " | xargs -n1 nova floating-ip-delete || true -fi - -# Allocate floating ip -FLOATING_IP=`nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | get_field 1` - -# Make sure the ip gets allocated -if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep -q $FLOATING_IP; do sleep 1; done"; then - echo "Floating IP not allocated" +# Delete any old volume +cinder delete $VOL_NAME || true +if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then + echo "Volume $VOL_NAME not deleted" exit 1 fi # Create the bootable volume -cinder create --display_name=$VOL_NAME --image-id $IMAGE $DEFAULT_VOLUME_SIZE - -# Wait for volume to activate +start_time=$(date +%s) +cinder create --image-id $IMAGE --display_name=$VOL_NAME --display_description "test bootable volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \ + die "Failure creating volume $VOL_NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then echo "Volume $VOL_NAME not created" exit 1 fi +end_time=$(date +%s) +echo "Completed cinder create in $((end_time - start_time)) seconds" + +# Get volume ID +VOL_ID=$(cinder list | grep $VOL_NAME | get_field 1) +die_if_not_set VOL_ID "Failure retrieving volume ID for $VOL_NAME" -VOLUME_ID=`cinder list | grep $VOL_NAME | get_field 1` +# Boot instance +# ------------- -# Boot instance from volume! This is done with the --block_device_mapping param. -# The format of mapping is: +# Boot using the --block_device_mapping param. The format of mapping is: # =::: # Leaving the middle two fields blank appears to do-the-right-thing -VOL_VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE --block_device_mapping vda=$VOLUME_ID:::0 --security_groups=$SECGROUP --key_name $KEY_NAME $VOL_INSTANCE_NAME | grep ' id ' | get_field 2` -die_if_not_set VOL_VM_UUID "Failure launching $VOL_INSTANCE_NAME" +VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --block-device-mapping vda=$VOL_ID --security_groups=$SECGROUP --key_name $KEY_NAME $VM_NAME | grep ' id ' | get_field 2) +die_if_not_set VM_UUID "Failure launching $VM_NAME" # Check that the status is active within ACTIVE_TIMEOUT seconds -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VOL_VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then +if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then echo "server didn't become active!" exit 1 fi -# Add floating ip to our server -nova add-floating-ip $VOL_VM_UUID $FLOATING_IP - -# Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds -ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT +# Get the instance IP +IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2) +die_if_not_set IP "Failure retrieving IP address" -# Make sure our volume-backed instance launched -ssh_check "$PUBLIC_NETWORK_NAME" $KEY_FILE $FLOATING_IP $DEFAULT_INSTANCE_USER $ACTIVE_TIMEOUT +# Private IPs can be pinged in single node deployments +ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT -# Remove floating ip from volume-backed instance -nova remove-floating-ip $VOL_VM_UUID $FLOATING_IP +# Clean up +# -------- # Delete volume backed instance -nova delete $VOL_INSTANCE_NAME || \ - die "Failure deleting instance volume $VOL_INSTANCE_NAME" +nova delete $VM_UUID || die "Failure deleting instance $VM_NAME" +if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then + echo "Server $VM_NAME not deleted" + exit 1 +fi -# Wait till our volume is no longer in-use +# Wait for volume to be released if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then - echo "Volume $VOL_NAME not created" + echo "Volume $VOL_NAME not released" exit 1 fi -# Delete the volume -cinder delete $VOL_NAME || \ - die "Failure deleting volume $VOLUME_NAME" - -# De-allocate the floating ip -nova floating-ip-delete $FLOATING_IP || \ - die "Failure deleting floating IP $FLOATING_IP" +# Delete volume +start_time=$(date +%s) +cinder delete $VOL_ID || die "Failure deleting volume $VOLUME_NAME" +if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then + echo "Volume $VOL_NAME not deleted" + exit 1 +fi +end_time=$(date +%s) +echo "Completed cinder delete in $((end_time - start_time)) seconds" -# Delete a secgroup +# Delete secgroup nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP" set +o xtrace diff --git a/exercises/client-args.sh b/exercises/client-args.sh index b3e2ad8d..894da742 100755 --- a/exercises/client-args.sh +++ b/exercises/client-args.sh @@ -8,6 +8,14 @@ echo "*********************************************************************" echo "Begin DevStack Exercise: $0" echo "*********************************************************************" +# This script exits on an error so that errors don't compound and you see +# only the first error that occured. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + # Settings # ======== @@ -63,7 +71,7 @@ if [[ "$ENABLED_SERVICES" =~ "key" ]]; then STATUS_KEYSTONE="Skipped" else echo -e "\nTest Keystone" - if keystone $TENANT_ARG $ARGS catalog --service identity; then + if keystone $TENANT_ARG_DASH $ARGS_DASH catalog --service identity; then STATUS_KEYSTONE="Succeeded" else STATUS_KEYSTONE="Failed" @@ -82,7 +90,7 @@ if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then else # Test OSAPI echo -e "\nTest Nova" - if nova $TENANT_ARG $ARGS flavor-list; then + if nova $TENANT_ARG_DASH $ARGS_DASH flavor-list; then STATUS_NOVA="Succeeded" else STATUS_NOVA="Failed" @@ -91,6 +99,23 @@ if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then fi fi +# Cinder client +# ------------- + +if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then + if [[ "$SKIP_EXERCISES" =~ "c-api" ]] ; then + STATUS_CINDER="Skipped" + else + echo -e "\nTest Cinder" + if cinder $TENANT_ARG_DASH $ARGS_DASH list; then + STATUS_CINDER="Succeeded" + else + STATUS_CINDER="Failed" + RETURN=1 + fi + fi +fi + # Glance client # ------------- @@ -116,7 +141,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then STATUS_SWIFT="Skipped" else echo -e "\nTest Swift" - if swift $TENANT_ARG $ARGS stat; then + if swift $TENANT_ARG_DASH $ARGS_DASH stat; then STATUS_SWIFT="Succeeded" else STATUS_SWIFT="Failed" @@ -125,6 +150,8 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then fi fi +set +o xtrace + # Results # ------- @@ -137,6 +164,7 @@ function report() { echo -e "\n" report "Keystone" $STATUS_KEYSTONE report "Nova" $STATUS_NOVA +report "Cinder" $STATUS_CINDER report "Glance" $STATUS_GLANCE report "Swift" $STATUS_SWIFT diff --git a/exercises/client-env.sh b/exercises/client-env.sh index 68c0e5ad..c84e84e5 100755 --- a/exercises/client-env.sh +++ b/exercises/client-env.sh @@ -8,6 +8,14 @@ echo "*********************************************************************" echo "Begin DevStack Exercise: $0" echo "*********************************************************************" +# This script exits on an error so that errors don't compound and you see +# only the first error that occured. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + # Settings # ======== @@ -99,6 +107,23 @@ if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then fi fi +# Cinder client +# ------------- + +if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then + if [[ "$SKIP_EXERCISES" =~ "c-api" ]] ; then + STATUS_CINDER="Skipped" + else + echo -e "\nTest Cinder" + if cinder list; then + STATUS_CINDER="Succeeded" + else + STATUS_CINDER="Failed" + RETURN=1 + fi + fi +fi + # Glance client # ------------- @@ -133,6 +158,8 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then fi fi +set +o xtrace + # Results # ------- @@ -146,6 +173,7 @@ echo -e "\n" report "Keystone" $STATUS_KEYSTONE report "Nova" $STATUS_NOVA report "EC2" $STATUS_EC2 +report "Cinder" $STATUS_CINDER report "Glance" $STATUS_GLANCE report "Swift" $STATUS_SWIFT diff --git a/exercises/euca.sh b/exercises/euca.sh index 7b35f6fe..8b15da8d 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -44,7 +44,7 @@ source $TOP_DIR/exerciserc # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} -# Boot this image, use first AMI-format image if unset +# Boot this image, use first AMI image if unset DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} # Security group name @@ -56,6 +56,7 @@ SECGROUP=${SECGROUP:-euca_secgroup} # Find a machine image to boot IMAGE=`euca-describe-images | grep machine | grep ${DEFAULT_IMAGE_NAME} | cut -f2 | head -n1` +die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" # Add a secgroup if ! euca-describe-groups | grep -q $SECGROUP; then @@ -174,7 +175,7 @@ if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | exit 1 fi -# Delete group +# Delete secgroup euca-delete-group $SECGROUP || die "Failure deleting security group $SECGROUP" set +o xtrace diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 8b18e6f4..34ab69d9 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -2,8 +2,7 @@ # **floating_ips.sh** - using the cloud can be fun -# we will use the ``nova`` cli tool provided by the ``python-novaclient`` -# package to work out the instance connectivity +# Test instance connectivity with the ``nova`` command from ``python-novaclient`` echo "*********************************************************************" echo "Begin DevStack Exercise: $0" @@ -42,7 +41,7 @@ source $TOP_DIR/exerciserc # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} -# Boot this image, use first AMi image if unset +# Boot this image, use first AMI image if unset DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} # Security group name @@ -54,6 +53,9 @@ DEFAULT_FLOATING_POOL=${DEFAULT_FLOATING_POOL:-nova} # Additional floating IP pool and range TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test} +# Instance name +VM_NAME="ex-float" + # Launching a server # ================== @@ -64,19 +66,17 @@ nova list # Images # ------ -# Nova has a **deprecated** way of listing images. -nova image-list - -# But we recommend using glance directly +# List the images available glance image-list # Grab the id of the image to launch IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) +die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" # Security Groups # --------------- -# List of secgroups: +# List security groups nova secgroup-list # Create a secgroup @@ -88,81 +88,79 @@ if ! nova secgroup-list | grep -q $SECGROUP; then fi fi -# Determinine instance type -# ------------------------- +# Configure Security Group Rules +if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then + nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 +fi +if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then + nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0 +fi -# List of instance types: +# List secgroup rules +nova secgroup-list-rules $SECGROUP + +# Set up instance +# --------------- + +# List flavors nova flavor-list -INSTANCE_TYPE=`nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1` +# Select a flavor +INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) if [[ -z "$INSTANCE_TYPE" ]]; then # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=`nova flavor-list | head -n 4 | tail -n 1 | get_field 1` + INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) fi -NAME="ex-float" - -VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE $NAME --security_groups=$SECGROUP | grep ' id ' | get_field 2` -die_if_not_set VM_UUID "Failure launching $NAME" - - -# Testing -# ======= +# Clean-up from previous runs +nova delete $VM_NAME || true +if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then + echo "server didn't terminate!" + exit 1 +fi -# First check if it spins up (becomes active and responds to ping on -# internal ip). If you run this script from a nova node, you should -# bypass security groups and have direct access to the server. +# Boot instance +# ------------- -# Waiting for boot -# ---------------- +VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2) +die_if_not_set VM_UUID "Failure launching $VM_NAME" -# check that the status is active within ACTIVE_TIMEOUT seconds +# Check that the status is active within ACTIVE_TIMEOUT seconds if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then echo "server didn't become active!" exit 1 fi -# get the IP of the server -IP=`nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2` +# Get the instance IP +IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2) die_if_not_set IP "Failure retrieving IP address" +# Private IPs can be pinged in single node deployments ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT -# Security Groups & Floating IPs -# ------------------------------ - -if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then - # allow icmp traffic (ping) - nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 - if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list-rules $SECGROUP | grep -q icmp; do sleep 1; done"; then - echo "Security group rule not created" - exit 1 - fi -fi - -# List rules for a secgroup -nova secgroup-list-rules $SECGROUP +# Floating IPs +# ------------ -# allocate a floating ip from default pool -FLOATING_IP=`nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | get_field 1` -die_if_not_set FLOATING_IP "Failure creating floating IP" +# Allocate a floating IP from the default pool +FLOATING_IP=$(nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | get_field 1) +die_if_not_set FLOATING_IP "Failure creating floating IP from pool $DEFAULT_FLOATING_POOL" -# list floating addresses +# List floating addresses if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep -q $FLOATING_IP; do sleep 1; done"; then echo "Floating IP not allocated" exit 1 fi -# add floating ip to our server +# Add floating IP to our server nova add-floating-ip $VM_UUID $FLOATING_IP || \ - die "Failure adding floating IP $FLOATING_IP to $NAME" + die "Failure adding floating IP $FLOATING_IP to $VM_NAME" -# test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds +# Test we can ping our floating IP within ASSOCIATE_TIMEOUT seconds ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT if ! is_service_enabled quantum; then # Allocate an IP from second floating pool - TEST_FLOATING_IP=`nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | get_field 1` + TEST_FLOATING_IP=$(nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | get_field 1) die_if_not_set TEST_FLOATING_IP "Failure creating floating IP in $TEST_FLOATING_POOL" # list floating addresses @@ -172,34 +170,40 @@ if ! is_service_enabled quantum; then fi fi -# dis-allow icmp traffic (ping) -nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || die "Failure deleting security group rule from $SECGROUP" +# Dis-allow icmp traffic (ping) +nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || \ + die "Failure deleting security group rule from $SECGROUP" # FIXME (anthony): make xs support security groups if [ "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then - # test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds + # Test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT Fail fi +# Clean up +# -------- + if ! is_service_enabled quantum; then # Delete second floating IP - nova floating-ip-delete $TEST_FLOATING_IP || die "Failure deleting floating IP $TEST_FLOATING_IP" + nova floating-ip-delete $TEST_FLOATING_IP || \ + die "Failure deleting floating IP $TEST_FLOATING_IP" fi -# de-allocate the floating ip -nova floating-ip-delete $FLOATING_IP || die "Failure deleting floating IP $FLOATING_IP" - -# Shutdown the server -nova delete $VM_UUID || die "Failure deleting instance $NAME" +# Delete the floating ip +nova floating-ip-delete $FLOATING_IP || \ + die "Failure deleting floating IP $FLOATING_IP" +# Delete instance +nova delete $VM_UUID || die "Failure deleting instance $VM_NAME" # Wait for termination if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then - echo "Server $NAME not deleted" + echo "Server $VM_NAME not deleted" exit 1 fi -# Delete a secgroup -nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP" +# Delete secgroup +nova secgroup-delete $SECGROUP || \ + die "Failure deleting security group $SECGROUP" set +o xtrace echo "*********************************************************************" diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh index fbd9c8e1..a33c9c63 100755 --- a/exercises/sec_groups.sh +++ b/exercises/sec_groups.sh @@ -2,7 +2,7 @@ # **sec_groups.sh** -# Test security groups via the command line tools that ship with it. +# Test security groups via the command line echo "*********************************************************************" echo "Begin DevStack Exercise: $0" @@ -41,7 +41,7 @@ source $TOP_DIR/exerciserc nova secgroup-list # Create random name for new sec group and create secgroup of said name -SEC_GROUP_NAME="sec-group-$(openssl rand -hex 4)" +SEC_GROUP_NAME="ex-secgroup-$(openssl rand -hex 4)" nova secgroup-create $SEC_GROUP_NAME 'a test security group' # Add some rules to the secgroup @@ -65,8 +65,10 @@ done for RULE in "${RULES_TO_ADD[@]}"; do nova secgroup-delete-rule $SEC_GROUP_NAME tcp $RULE $RULE 0.0.0.0/0 done -nova secgroup-delete $SEC_GROUP_NAME +# Delete secgroup +nova secgroup-delete $SEC_GROUP_NAME || \ + die "Failure deleting security group $SEC_GROUP_NAME" set +o xtrace echo "*********************************************************************" diff --git a/exercises/swift.sh b/exercises/swift.sh index 4cd487bc..a75f955a 100755 --- a/exercises/swift.sh +++ b/exercises/swift.sh @@ -2,7 +2,7 @@ # **swift.sh** -# Test swift via the command line tools that ship with it. +# Test swift via the ``swift`` command line from ``python-swiftclient` echo "*********************************************************************" echo "Begin DevStack Exercise: $0" @@ -33,13 +33,13 @@ source $TOP_DIR/openrc # Import exercise configuration source $TOP_DIR/exerciserc -# Container name -CONTAINER=ex-swift - # If swift is not enabled we exit with exitcode 55 which mean # exercise is skipped. is_service_enabled swift || exit 55 +# Container name +CONTAINER=ex-swift + # Testing Swift # ============= diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 45b8645b..45cb0c8e 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -2,7 +2,7 @@ # **volumes.sh** -# Test cinder volumes with the cinder command from python-cinderclient +# Test cinder volumes with the ``cinder`` command from ``python-cinderclient`` echo "*********************************************************************" echo "Begin DevStack Exercise: $0" @@ -45,12 +45,16 @@ is_service_enabled cinder || exit 55 # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} -# Boot this image, use first AMi image if unset +# Boot this image, use first AMI image if unset DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} # Security group name SECGROUP=${SECGROUP:-vol_secgroup} +# Instance and volume names +VM_NAME=${VM_NAME:-ex-vol-inst} +VOL_NAME="ex-vol-$(openssl rand -hex 4)" + # Launching a server # ================== @@ -61,19 +65,17 @@ nova list # Images # ------ -# Nova has a **deprecated** way of listing images. -nova image-list - -# But we recommend using glance directly +# List the images available glance image-list # Grab the id of the image to launch IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) +die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" # Security Groups # --------------- -# List of secgroups: +# List security groups nova secgroup-list # Create a secgroup @@ -93,126 +95,122 @@ if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0 fi -# determinine instance type -# ------------------------- +# List secgroup rules +nova secgroup-list-rules $SECGROUP + +# Set up instance +# --------------- -# List of instance types: +# List flavors nova flavor-list -INSTANCE_TYPE=`nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1` +# Select a flavor +INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) if [[ -z "$INSTANCE_TYPE" ]]; then # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=`nova flavor-list | head -n 4 | tail -n 1 | get_field 1` + INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) fi -NAME="ex-vol" - -VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE $NAME --security_groups=$SECGROUP | grep ' id ' | get_field 2` -die_if_not_set VM_UUID "Failure launching $NAME" - - -# Testing -# ======= +# Clean-up from previous runs +nova delete $VM_NAME || true +if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then + echo "server didn't terminate!" + exit 1 +fi -# First check if it spins up (becomes active and responds to ping on -# internal ip). If you run this script from a nova node, you should -# bypass security groups and have direct access to the server. +# Boot instance +# ------------- -# Waiting for boot -# ---------------- +VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2) +die_if_not_set VM_UUID "Failure launching $VM_NAME" -# check that the status is active within ACTIVE_TIMEOUT seconds +# Check that the status is active within ACTIVE_TIMEOUT seconds if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then echo "server didn't become active!" exit 1 fi -# get the IP of the server -IP=`nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2` +# Get the instance IP +IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2) die_if_not_set IP "Failure retrieving IP address" -# for single node deployments, we can ping private ips +# Private IPs can be pinged in single node deployments ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT # Volumes # ------- -VOL_NAME="myvol-$(openssl rand -hex 4)" - # Verify it doesn't exist -if [[ -n "`cinder list | grep $VOL_NAME | head -1 | get_field 2`" ]]; then +if [[ -n $(cinder list | grep $VOL_NAME | head -1 | get_field 2) ]]; then echo "Volume $VOL_NAME already exists" exit 1 fi # Create a new volume -cinder create --display_name $VOL_NAME --display_description "test volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE -if [[ $? != 0 ]]; then - echo "Failure creating volume $VOL_NAME" - exit 1 -fi - -start_time=`date +%s` +start_time=$(date +%s) +cinder create --display_name $VOL_NAME --display_description "test volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \ + die "Failure creating volume $VOL_NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then echo "Volume $VOL_NAME not created" exit 1 fi -end_time=`date +%s` +end_time=$(date +%s) echo "Completed cinder create in $((end_time - start_time)) seconds" # Get volume ID -VOL_ID=`cinder list | grep $VOL_NAME | head -1 | get_field 1` +VOL_ID=$(cinder list | grep $VOL_NAME | head -1 | get_field 1) die_if_not_set VOL_ID "Failure retrieving volume ID for $VOL_NAME" # Attach to server DEVICE=/dev/vdb -start_time=`date +%s` +start_time=$(date +%s) nova volume-attach $VM_UUID $VOL_ID $DEVICE || \ - die "Failure attaching volume $VOL_NAME to $NAME" + die "Failure attaching volume $VOL_NAME to $VM_NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep in-use; do sleep 1; done"; then - echo "Volume $VOL_NAME not attached to $NAME" + echo "Volume $VOL_NAME not attached to $VM_NAME" exit 1 fi -end_time=`date +%s` +end_time=$(date +%s) echo "Completed volume-attach in $((end_time - start_time)) seconds" -VOL_ATTACH=`cinder list | grep $VOL_NAME | head -1 | get_field -1` +VOL_ATTACH=$(cinder list | grep $VOL_NAME | head -1 | get_field -1) die_if_not_set VOL_ATTACH "Failure retrieving $VOL_NAME status" if [[ "$VOL_ATTACH" != $VM_UUID ]]; then echo "Volume not attached to correct instance" exit 1 fi +# Clean up +# -------- + # Detach volume -start_time=`date +%s` -nova volume-detach $VM_UUID $VOL_ID || die "Failure detaching volume $VOL_NAME from $NAME" +start_time=$(date +%s) +nova volume-detach $VM_UUID $VOL_ID || die "Failure detaching volume $VOL_NAME from $VM_NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then - echo "Volume $VOL_NAME not detached from $NAME" + echo "Volume $VOL_NAME not detached from $VM_NAME" exit 1 fi -end_time=`date +%s` +end_time=$(date +%s) echo "Completed volume-detach in $((end_time - start_time)) seconds" # Delete volume -start_time=`date +%s` +start_time=$(date +%s) cinder delete $VOL_ID || die "Failure deleting volume $VOL_NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then echo "Volume $VOL_NAME not deleted" exit 1 fi -end_time=`date +%s` +end_time=$(date +%s) echo "Completed cinder delete in $((end_time - start_time)) seconds" -# Shutdown the server -nova delete $VM_UUID || die "Failure deleting instance $NAME" - -# Wait for termination +# Delete instance +nova delete $VM_UUID || die "Failure deleting instance $VM_NAME" if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then - echo "Server $NAME not deleted" + echo "Server $VM_NAME not deleted" exit 1 fi -# Delete a secgroup +# Delete secgroup nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP" set +o xtrace From f03bafeb84ed87e5e5fd219e063ee1eb067f1c49 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 12 Feb 2013 10:58:28 -0600 Subject: [PATCH 960/967] Refactor init functions to simplify Grenade upgrades * Glance: create_glance_cache_dir() * Cinder: create_cinder_cache_dir() and create_cinder_volume_group() * Nova: create_nova_cache_dir() and create_nova_keys_dir() * Random tidy-up changes Change-Id: I20d995d4c2e5facfb912ee03a6cda6c56f20bbe9 --- lib/cinder | 59 ++++++++++++++++++++++++++++++++++-------------------- lib/glance | 22 ++++++++++++-------- lib/nova | 36 ++++++++++++++++++--------------- 3 files changed, 71 insertions(+), 46 deletions(-) diff --git a/lib/cinder b/lib/cinder index fd5f8cf1..4d1ab420 100644 --- a/lib/cinder +++ b/lib/cinder @@ -254,37 +254,55 @@ create_cinder_accounts() { fi } +# create_cinder_cache_dir() - Part of the init_cinder() process +function create_cinder_cache_dir() { + # Create cache dir + sudo mkdir -p $CINDER_AUTH_CACHE_DIR + sudo chown $STACK_USER $CINDER_AUTH_CACHE_DIR + rm -f $CINDER_AUTH_CACHE_DIR/* +} + +create_cinder_volume_group() { + # Configure a default volume group called '`stack-volumes`' for the volume + # service if it does not yet exist. If you don't wish to use a file backed + # volume group, create your own volume group called ``stack-volumes`` before + # invoking ``stack.sh``. + # + # By default, the backing file is 5G in size, and is stored in ``/opt/stack/data``. + + if ! sudo vgs $VOLUME_GROUP; then + VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DATA_DIR/${VOLUME_GROUP}-backing-file} + + # Only create if the file doesn't already exists + [[ -f $VOLUME_BACKING_FILE ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE + + DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE` + + # Only create if the loopback device doesn't contain $VOLUME_GROUP + if ! sudo vgs $VOLUME_GROUP; then + sudo vgcreate $VOLUME_GROUP $DEV + fi + fi + + mkdir -p $CINDER_STATE_PATH/volumes +} + # init_cinder() - Initialize database and volume group function init_cinder() { # Force nova volumes off NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/osapi_volume,//") if is_service_enabled $DATABASE_BACKENDS; then - # (re)create cinder database + # (Re)create cinder database recreate_database cinder utf8 - # (re)create cinder database + # Migrate cinder database $CINDER_BIN_DIR/cinder-manage db sync fi if is_service_enabled c-vol; then - # Configure a default volume group called '`stack-volumes`' for the volume - # service if it does not yet exist. If you don't wish to use a file backed - # volume group, create your own volume group called ``stack-volumes`` before - # invoking ``stack.sh``. - # - # By default, the backing file is 5G in size, and is stored in ``/opt/stack/data``. - if ! sudo vgs $VOLUME_GROUP; then - VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DATA_DIR/${VOLUME_GROUP}-backing-file} - # Only create if the file doesn't already exists - [[ -f $VOLUME_BACKING_FILE ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE - DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE` - # Only create if the loopback device doesn't contain $VOLUME_GROUP - if ! sudo vgs $VOLUME_GROUP; then sudo vgcreate $VOLUME_GROUP $DEV; fi - fi - - mkdir -p $CINDER_STATE_PATH/volumes + create_cinder_volume_group if sudo vgs $VOLUME_GROUP; then if is_fedora || is_suse; then @@ -299,10 +317,7 @@ function init_cinder() { fi fi - # Create cache dir - sudo mkdir -p $CINDER_AUTH_CACHE_DIR - sudo chown $STACK_USER $CINDER_AUTH_CACHE_DIR - rm -f $CINDER_AUTH_CACHE_DIR/* + create_cinder_cache_dir } # install_cinder() - Collect source and prepare diff --git a/lib/glance b/lib/glance index 5d48129d..80d3902a 100644 --- a/lib/glance +++ b/lib/glance @@ -141,6 +141,17 @@ function configure_glance() { cp -p $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON } +# create_glance_cache_dir() - Part of the init_glance() process +function create_glance_cache_dir() { + # Create cache dir + sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/api + sudo chown $STACK_USER $GLANCE_AUTH_CACHE_DIR/api + rm -f $GLANCE_AUTH_CACHE_DIR/api/* + sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/registry + sudo chown $STACK_USER $GLANCE_AUTH_CACHE_DIR/registry + rm -f $GLANCE_AUTH_CACHE_DIR/registry/* +} + # init_glance() - Initialize databases, etc. function init_glance() { # Delete existing images @@ -151,18 +162,13 @@ function init_glance() { rm -rf $GLANCE_CACHE_DIR mkdir -p $GLANCE_CACHE_DIR - # (re)create glance database + # (Re)create glance database recreate_database glance utf8 + # Migrate glance database $GLANCE_BIN_DIR/glance-manage db_sync - # Create cache dir - sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/api - sudo chown $STACK_USER $GLANCE_AUTH_CACHE_DIR/api - rm -f $GLANCE_AUTH_CACHE_DIR/api/* - sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/registry - sudo chown $STACK_USER $GLANCE_AUTH_CACHE_DIR/registry - rm -f $GLANCE_AUTH_CACHE_DIR/registry/* + create_glance_cache_dir } # install_glanceclient() - Collect source and prepare diff --git a/lib/nova b/lib/nova index 1681af77..e3597196 100644 --- a/lib/nova +++ b/lib/nova @@ -453,6 +453,14 @@ function create_nova_conf() { done } +# create_nova_cache_dir() - Part of the init_nova() process +function create_nova_cache_dir() { + # Create cache dir + sudo mkdir -p $NOVA_AUTH_CACHE_DIR + sudo chown $STACK_USER $NOVA_AUTH_CACHE_DIR + rm -f $NOVA_AUTH_CACHE_DIR/* +} + function create_nova_conf_nova_network() { iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NET_MAN" iniset $NOVA_CONF DEFAULT public_interface "$PUBLIC_INTERFACE" @@ -463,14 +471,17 @@ function create_nova_conf_nova_network() { fi } +# create_nova_keys_dir() - Part of the init_nova() process +function create_nova_keys_dir() { + # Create keys dir + sudo mkdir -p ${NOVA_STATE_PATH}/keys + sudo chown -R $STACK_USER ${NOVA_STATE_PATH} +} + # init_nova() - Initialize databases, etc. function init_nova() { - # Nova Database - # ------------- - - # All nova components talk to a central database. We will need to do this step - # only once for an entire cluster. - + # All nova components talk to a central database. + # Only do this step once on the API node for an entire cluster. if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then # (Re)create nova database # Explicitly use latin1: to avoid lp#829209, nova expects the database to @@ -478,7 +489,7 @@ function init_nova() { # 082_essex.py in nova) recreate_database nova latin1 - # (Re)create nova database + # Migrate nova database $NOVA_BIN_DIR/nova-manage db sync # (Re)create nova baremetal database @@ -488,15 +499,8 @@ function init_nova() { fi fi - # Create cache dir - sudo mkdir -p $NOVA_AUTH_CACHE_DIR - sudo chown $STACK_USER $NOVA_AUTH_CACHE_DIR - rm -f $NOVA_AUTH_CACHE_DIR/* - - # Create the keys folder - sudo mkdir -p ${NOVA_STATE_PATH}/keys - # make sure we own NOVA_STATE_PATH and all subdirs - sudo chown -R $STACK_USER ${NOVA_STATE_PATH} + create_nova_cache_dir + create_nova_keys_dir } # install_novaclient() - Collect source and prepare From f29bb32d5c72cbc3b4fed49936982fbbc00690a4 Mon Sep 17 00:00:00 2001 From: Shiv Haris Date: Wed, 23 Jan 2013 03:00:16 +0000 Subject: [PATCH 961/967] Devstack changes for Brocade Quantum Plugin blueprint brocade-quantum-plugin Change-Id: I238ee0a89742ac904ead0f4700f027e841f04fe1 --- lib/quantum_plugins/brocade | 49 +++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 lib/quantum_plugins/brocade diff --git a/lib/quantum_plugins/brocade b/lib/quantum_plugins/brocade new file mode 100644 index 00000000..c372c19f --- /dev/null +++ b/lib/quantum_plugins/brocade @@ -0,0 +1,49 @@ +# Brocade Quantum Plugin +# ---------------------- + +# Save trace setting +BRCD_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +function is_quantum_ovs_base_plugin() { + return 1 +} + +function quantum_plugin_create_nova_conf() { + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver"} +} + +function quantum_plugin_install_agent_packages() { + install_package bridge-utils +} + +function quantum_plugin_configure_common() { + Q_PLUGIN_CONF_PATH=etc/quantum/plugins/brocade + Q_PLUGIN_CONF_FILENAME=brocade.ini + Q_DB_NAME="brcd_quantum" + Q_PLUGIN_CLASS="quantum.plugins.brocade.QuantumPlugin.BrocadePluginV2" +} + +function quantum_plugin_configure_debug_command() { + : +} + +function quantum_plugin_configure_dhcp_agent() { + : +} + +function quantum_plugin_configure_l3_agent() { + : +} + +function quantum_plugin_configure_plugin_agent() { + AGENT_BINARY="$QUANTUM_DIR/bin/quantum-linuxbridge-agent" +} + +function quantum_plugin_setup_interface_driver() { + local conf_file=$1 + iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver +} + +# Restore xtrace +$BRCD_XTRACE From 25ebbcd1a7bc95e69ad32b19245ce0990a29eaa7 Mon Sep 17 00:00:00 2001 From: Jeremy Stanley Date: Sun, 17 Feb 2013 15:45:55 +0000 Subject: [PATCH 962/967] Bring back screen logs for noninteractive runs. * functions(screen_it): Prior to 58e2134 screen logs were generated even when run without $SCREEN_DEV. We want to be able to capture these with devstack-gate for later inspection, so this patch reintroduces that capability. Change-Id: Ife127b47d7700878e02285281854595bc6585972 --- functions | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/functions b/functions index 79c82a45..ae63436a 100644 --- a/functions +++ b/functions @@ -747,17 +747,19 @@ function screen_it { screen_rc "$1" "$2" screen -S $SCREEN_NAME -X screen -t $1 + + if [[ -n ${SCREEN_LOGDIR} ]]; then + screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log + screen -S $SCREEN_NAME -p $1 -X log on + ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log + fi + if [[ "$SCREEN_DEV" = "True" ]]; then # sleep to allow bash to be ready to be send the command - we are # creating a new window in screen and then sends characters, so if # bash isn't running by the time we send the command, nothing happens sleep 1.5 - if [[ -n ${SCREEN_LOGDIR} ]]; then - screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log - screen -S $SCREEN_NAME -p $1 -X log on - ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log - fi NL=`echo -ne '\015'` screen -S $SCREEN_NAME -p $1 -X stuff "$2 || touch \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL" else From 5096ba79c1ba8ebf83933054d69e21741984e54c Mon Sep 17 00:00:00 2001 From: Arata Notsu Date: Mon, 18 Feb 2013 18:49:01 +0900 Subject: [PATCH 963/967] Correct syntax error in stack.sh for baremtal dhcp-option Change-Id: I0e29bf2e429b65065fdcd9e38b16a7ab6c04d917 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 0f009fc6..c9ca43fc 100755 --- a/stack.sh +++ b/stack.sh @@ -1266,7 +1266,7 @@ if is_service_enabled nova && is_baremetal; then sudo dnsmasq --conf-file= --port=0 --enable-tftp --tftp-root=/tftpboot \ --dhcp-boot=pxelinux.0 --bind-interfaces --pid-file=/var/run/dnsmasq.pid \ --interface=$BM_DNSMASQ_IFACE --dhcp-range=$BM_DNSMASQ_RANGE \ - ${$BM_DNSMASQ_DNS:+--dhcp-option=option:dns-server,$BM_DNSMASQ_DNS} + ${BM_DNSMASQ_DNS:+--dhcp-option=option:dns-server,$BM_DNSMASQ_DNS} # ensure callback daemon is running sudo pkill nova-baremetal-deploy-helper || true screen_it baremetal "nova-baremetal-deploy-helper" From 8396d4f27c296eed4ced5e44d868e6d90257e73e Mon Sep 17 00:00:00 2001 From: Stephen Ma Date: Mon, 18 Feb 2013 05:32:59 -0800 Subject: [PATCH 964/967] Added option Q_FLOATING_ALLOCATION_POOL to define an allocation-pool for floating IPs. Change-Id: If31b34ebb8095aa260c19292cf63826522908db9 --- lib/quantum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/quantum b/lib/quantum index f3a3ec4d..61a5218e 100644 --- a/lib/quantum +++ b/lib/quantum @@ -297,7 +297,7 @@ function create_quantum_initial_network() { quantum router-interface-add $ROUTER_ID $SUBNET_ID # Create an external network, and a subnet. Configure the external network as router gw EXT_NET_ID=$(quantum net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2) - EXT_GW_IP=$(quantum subnet-create --ip_version 4 $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2) + EXT_GW_IP=$(quantum subnet-create --ip_version 4 ${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2) quantum router-gateway-set $ROUTER_ID $EXT_NET_ID if is_quantum_ovs_base_plugin && [[ "$Q_USE_NAMESPACE" = "True" ]]; then From 86d8fbb59c774569d9b273302e22ae5a664cdaff Mon Sep 17 00:00:00 2001 From: Devananda van der Veen Date: Tue, 12 Feb 2013 21:58:33 -0800 Subject: [PATCH 965/967] Baremetal should start using scheduler filters. The baremetal driver should start using scheduler filters, particularly the RetryFilter and ComputeFilter, as some functionality in Nova depends on these. However, the ComputeCapabilitiesFilter currently does not work with baremetal in devstack due to an order-of-operations issue, so we explicitly list the other filters that we do want. Change-Id: Icc4b074c6f99e3e4ffbcf5eef0f9bb6f0f5c1996 --- stack.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index 0f009fc6..ea5ddb28 100755 --- a/stack.sh +++ b/stack.sh @@ -1073,9 +1073,11 @@ if is_service_enabled nova; then iniset $NOVA_CONF DEFAULT compute_driver nova.virt.baremetal.driver.BareMetalDriver iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER iniset $NOVA_CONF DEFAULT scheduler_host_manager nova.scheduler.baremetal_host_manager.BaremetalHostManager - iniset $NOVA_CONF DEFAULT scheduler_default_filters AllHostsFilter - iniset $NOVA_CONF baremetal driver $BM_DRIVER + # NOTE(deva): ComputeCapabilitiesFilter does not currently work with Baremetal. See bug # 1129485 + # As a work around, we disable CCFilter by explicitly enabling all the other default filters. + iniset $NOVA_CONF DEFAULT scheduler_default_filters ComputeFilter,RetryFilter,AvailabilityZoneFilter,ImagePropertiesFilter iniset $NOVA_CONF baremetal instance_type_extra_specs cpu_arch:$BM_CPU_ARCH + iniset $NOVA_CONF baremetal driver $BM_DRIVER iniset $NOVA_CONF baremetal power_manager $BM_POWER_MANAGER iniset $NOVA_CONF baremetal tftp_root /tftpboot From 2bccb8604e3ef9ccfd3e33471b049a2ca6dfeb9c Mon Sep 17 00:00:00 2001 From: Andrea Frittoli Date: Wed, 20 Feb 2013 16:31:34 +0000 Subject: [PATCH 966/967] Remove ssh tests diabling as #1074039 is fixed Removed the setting completely so that this is left under control of tempest.conf.sample Fixes LP# 1130750 Change-Id: I710f1c111e66834f4bc7020cad82c04bf495441c --- lib/tempest | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index e43f6d75..364323de 100644 --- a/lib/tempest +++ b/lib/tempest @@ -212,8 +212,6 @@ function configure_tempest() { TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-False} fi iniset $TEMPEST_CONF compute allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} - #Skip until #1074039 is fixed - iniset $TEMPEST_CONF compute run_ssh False iniset $TEMPEST_CONF compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED iniset $TEMPEST_CONF compute network_for_ssh $PRIVATE_NETWORK_NAME iniset $TEMPEST_CONF compute ip_version_for_ssh 4 From 0f2d954b82e44d7bbd646e200510beb1ca3e469e Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 20 Feb 2013 17:51:19 -0600 Subject: [PATCH 967/967] Fix create_userrc.sh private key hang tools/create_userrc.sh hangs in a couple of mv commands now that private keys are created with mode 400. mv is prompting to override the permissions, so let's just -f it all. Change-Id: I8fbb24da6582edcff741653ffdf8bf683b79851a --- tools/create_userrc.sh | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index 55cb8fac..619d63f7 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -1,6 +1,10 @@ #!/usr/bin/env bash -#Warning: This script just for development purposes +# **create_userrc.sh** + +# Pre-create rc files and credentials for the default users. + +# Warning: This script just for development purposes ACCOUNT_DIR=./accrc @@ -164,12 +168,12 @@ function add_entry(){ local ec2_cert="$rcfile-cert.pem" local ec2_private_key="$rcfile-pk.pem" # Try to preserve the original file on fail (best effort) - mv "$ec2_private_key" "$ec2_private_key.old" &>/dev/null - mv "$ec2_cert" "$ec2_cert.old" &>/dev/null + mv -f "$ec2_private_key" "$ec2_private_key.old" &>/dev/null + mv -f "$ec2_cert" "$ec2_cert.old" &>/dev/null # It will not create certs when the password is incorrect if ! nova --os-password "$user_passwd" --os-username "$user_name" --os-tenant-name "$tenant_name" x509-create-cert "$ec2_private_key" "$ec2_cert"; then - mv "$ec2_private_key.old" "$ec2_private_key" &>/dev/null - mv "$ec2_cert.old" "$ec2_cert" &>/dev/null + mv -f "$ec2_private_key.old" "$ec2_private_key" &>/dev/null + mv -f "$ec2_cert.old" "$ec2_cert" &>/dev/null fi cat >"$rcfile" <