diff --git a/.gitignore b/.gitignore index e4820903..f9e26445 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,14 @@ proto *~ +.*.sw[nop] *.log +*.log.[1-9] src localrc +local.sh +files/*.gz +files/images +stack-screenrc +*.pem +accrc +.stackenv diff --git a/.gitreview b/.gitreview new file mode 100644 index 00000000..570d31a9 --- /dev/null +++ b/.gitreview @@ -0,0 +1,4 @@ +[gerrit] +host=review.openstack.org +port=29418 +project=openstack-dev/devstack.git diff --git a/AUTHORS b/AUTHORS new file mode 100644 index 00000000..35c0a522 --- /dev/null +++ b/AUTHORS @@ -0,0 +1,47 @@ +Aaron Lee +Aaron Rosen +Adam Gandelman +Akihiro MOTOKI +Andrew Laski +Andy Smith +Anthony Young +Armando Migliaccio +Brad Hall +Chmouel Boudjnah +Dan Prince +Dean Troyer +Devin Carlen +Doug hellmann +Eddie Hebert +Eoghan Glynn +Eric Windisch +Gabriel Hurley +Gary Kotton +Hengqing Hu +Hua ZHANG +Isaku Yamahata +Jake Dahn +James E. Blair +Jason Cannavale +Jay Pipes +Jesse Andrews +Joe Gordon +Johannes Erdfelt +John Postlethwait +Josh Kearney +Justin Shepherd +Ken Pepple +Kiall Mac Innes +Matt Joyce +Osamu Habuka +Russell Bryant +Scott Moser +Sumit Naiksatam +Thierry Carrez +Todd Willey +Tres Henry +Vincent Untz +Vishvananda Ishaya +Yun Mao +Yong Sheng Gong +Zhongyue Luo diff --git a/HACKING.rst b/HACKING.rst new file mode 100644 index 00000000..6ad8c7e6 --- /dev/null +++ b/HACKING.rst @@ -0,0 +1,216 @@ +Contributing to DevStack +======================== + + +General +------- + +DevStack is written in POSIX shell script. This choice was made because +it best illustrates the configuration steps that this implementation takes +on setting up and interacting with OpenStack components. DevStack specifies +BASH and is compatible with Bash 3. + +DevStack's official repository is located on GitHub at +https://github.com/openstack-dev/devstack.git. Besides the master branch that +tracks the OpenStack trunk branches a separate branch is maintained for all +OpenStack releases starting with Diablo (stable/diablo). + +Contributing code to DevStack follows the usual OpenStack process as described +in `How To Contribute`__ in the OpenStack wiki. `DevStack's LaunchPad project`__ +contains the usual links for blueprints, bugs, tec. + +__ contribute_ +.. _contribute: http://wiki.openstack.org/HowToContribute. + +__ lp_ +.. _lp: https://launchpad.net/~devstack + +The primary script in DevStack is ``stack.sh``, which performs the bulk of the +work for DevStack's use cases. There is a subscript ``functions`` that contains +generally useful shell functions and is used by a number of the scripts in +DevStack. + +A number of additional scripts can be found in the ``tools`` directory that may +be useful in setting up special-case uses of DevStack. These include: bare metal +deployment, ramdisk deployment and Jenkins integration. + + +Scripts +------- + +DevStack scripts should generally begin by calling ``env(1)`` in the shebang line:: + + #!/usr/bin/env bash + +Sometimes the script needs to know the location of the DevStack install directory. +``TOP_DIR`` should always point there, even if the script itself is located in +a subdirectory:: + + # Keep track of the current devstack directory. + TOP_DIR=$(cd $(dirname "$0") && pwd) + +Many scripts will utilize shared functions from the ``functions`` file. There are +also rc files (``stackrc`` and ``openrc``) that are often included to set the primary +configuration of the user environment:: + + # Keep track of the current devstack directory. + TOP_DIR=$(cd $(dirname "$0") && pwd) + + # Import common functions + source $TOP_DIR/functions + + # Import configuration + source $TOP_DIR/openrc + +``stack.sh`` is a rather large monolithic script that flows through from beginning +to end. The process of breaking it down into project-level sub-scripts is nearly +complete and should make ``stack.sh`` easier to read and manage. + +These library sub-scripts have a number of fixed entry points, some of which may +just be stubs. These entry points will be called by ``stack.sh`` in the +following order:: + + install_XXXX + configure_XXXX + init_XXXX + start_XXXX + stop_XXXX + cleanup_XXXX + +There is a sub-script template in ``lib/templates`` to be used in creating new +service sub-scripts. The comments in ``<>`` are meta comments describing +how to use the template and should be removed. + +In order to show the dependencies and conditions under which project functions +are executed the top-level conditional testing for things like ``is_service_enabled`` +should be done in ``stack.sh``. There may be nested conditionals that need +to be in the sub-script, such as testing for keystone being enabled in +``configure_swift()``. + + +stackrc +------- + +``stackrc`` is the global configuration file for DevStack. It is responsible for +calling ``localrc`` if it exists so configuration can be overridden by the user. + +The criteria for what belongs in ``stackrc`` can be vaguely summarized as +follows: + +* All project respositories and branches (for historical reasons) +* Global configuration that may be referenced in ``localrc``, i.e. ``DEST``, ``DATA_DIR`` +* Global service configuration like ``ENABLED_SERVICES`` +* Variables used by multiple services that do not have a clear owner, i.e. + ``VOLUME_BACKING_FILE_SIZE`` (nova-volumes and cinder) or ``PUBLIC_NETWORK_NAME`` + (nova-network and quantum) +* Variables that can not be cleanly declared in a project file due to + dependency ordering, i.e. the order of sourcing the project files can + not be changed for other reasons but the earlier file needs to dereference a + variable set in the later file. This should be rare. + +Also, variable declarations in ``stackrc`` do NOT allow overriding (the form +``FOO=${FOO:-baz}``); if they did then they can already be changed in ``localrc`` +and can stay in the project file. + +Documentation +------------- + +The official DevStack repo on GitHub does not include a gh-pages branch that +GitHub uses to create static web sites. That branch is maintained in the +`CloudBuilders DevStack repo`__ mirror that supports the +http://devstack.org site. This is the primary DevStack +documentation along with the DevStack scripts themselves. + +__ repo_ +.. _repo: https://github.com/cloudbuilders/devstack + +All of the scripts are processed with shocco_ to render them with the comments +as text describing the script below. For this reason we tend to be a little +verbose in the comments _ABOVE_ the code they pertain to. Shocco also supports +Markdown formatting in the comments; use it sparingly. Specifically, ``stack.sh`` +uses Markdown headers to divide the script into logical sections. + +.. _shocco: http://rtomayko.github.com/shocco/ + + +Exercises +--------- + +The scripts in the exercises directory are meant to 1) perform basic operational +checks on certain aspects of OpenStack; and b) document the use of the +OpenStack command-line clients. + +In addition to the guidelines above, exercise scripts MUST follow the structure +outlined here. ``swift.sh`` is perhaps the clearest example of these guidelines. +These scripts are executed serially by ``exercise.sh`` in testing situations. + +* Begin and end with a banner that stands out in a sea of script logs to aid + in debugging failures, particularly in automated testing situations. If the + end banner is not displayed, the script ended prematurely and can be assumed + to have failed. + + :: + + echo "**************************************************" + echo "Begin DevStack Exercise: $0" + echo "**************************************************" + ... + set +o xtrace + echo "**************************************************" + echo "End DevStack Exercise: $0" + echo "**************************************************" + +* The scripts will generally have the shell ``xtrace`` attribute set to display + the actual commands being executed, and the ``errexit`` attribute set to exit + the script on non-zero exit codes:: + + # This script exits on an error so that errors don't compound and you see + # only the first error that occured. + set -o errexit + + # Print the commands being run so that we can see the command that triggers + # an error. It is also useful for following allowing as the install occurs. + set -o xtrace + +* Settings and configuration are stored in ``exerciserc``, which must be + sourced after ``openrc`` or ``stackrc``:: + + # Import exercise configuration + source $TOP_DIR/exerciserc + +* There are a couple of helper functions in the common ``functions`` sub-script + that will check for non-zero exit codes and unset environment variables and + print a message and exit the script. These should be called after most client + commands that are not otherwise checked to short-circuit long timeouts + (instance boot failure, for example):: + + swift post $CONTAINER + die_if_error "Failure creating container $CONTAINER" + + FLOATING_IP=`euca-allocate-address | cut -f2` + die_if_not_set FLOATING_IP "Failure allocating floating IP" + +* If you want an exercise to be skipped when for example a service wasn't + enabled for the exercise to be run, you can exit your exercise with the + special exitcode 55 and it will be detected as skipped. + +* The exercise scripts should only use the various OpenStack client binaries to + interact with OpenStack. This specifically excludes any ``*-manage`` tools + as those assume direct access to configuration and databases, as well as direct + database access from the exercise itself. + +* If specific configuration needs to be present for the exercise to complete, + it should be staged in ``stack.sh``, or called from ``stack.sh`` (see + ``files/keystone_data.sh`` for an example of this). + +* The ``OS_*`` environment variables should be the only ones used for all + authentication to OpenStack clients as documented in the CLIAuth_ wiki page. + +.. _CLIAuth: http://wiki.openstack.org/CLIAuth + +* The exercise MUST clean up after itself if successful. If it is not successful, + it is assumed that state will be left behind; this allows a chance for developers + to look around and attempt to debug the problem. The exercise SHOULD clean up + or graciously handle possible artifacts left over from previous runs if executed + again. It is acceptable to require a reboot or even a re-install of DevStack + to restore a clean test environment. diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..68c771a0 --- /dev/null +++ b/LICENSE @@ -0,0 +1,176 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + diff --git a/README.md b/README.md index daf398b1..93107588 100644 --- a/README.md +++ b/README.md @@ -1,43 +1,102 @@ -Tool to quickly deploy openstack dev environments. +DevStack is a set of scripts and utilities to quickly deploy an OpenStack cloud. # Goals -* To quickly build dev openstack environments in clean natty environments -* To describe working configurations of openstack (which code branches work together? what do config files look like for those branches?) -* To make it easier for developers to dive into openstack so that they can productively contribute without having to understand every part of the system at once +* To quickly build dev OpenStack environments in a clean Ubuntu or Fedora environment +* To describe working configurations of OpenStack (which code branches work together? what do config files look like for those branches?) +* To make it easier for developers to dive into OpenStack so that they can productively contribute without having to understand every part of the system at once * To make it easy to prototype cross-project features +* To sanity-check OpenStack builds (used in gating commits to the primary repos) Read more at http://devstack.org (built from the gh-pages branch) -Be sure to carefully read these scripts before you run them as they install software and may alter your networking configuration. +IMPORTANT: Be sure to carefully read `stack.sh` and any other scripts you execute before you run them, as they install software and may alter your networking configuration. We strongly recommend that you run `stack.sh` in a clean and disposable vm when you are first getting started. -# To start a dev cloud on your local machine (installing on a dedicated vm is safer!): +# Devstack on Xenserver +If you would like to use Xenserver as the hypervisor, please refer to the instructions in `./tools/xen/README.md`. + +# Versions + +The devstack master branch generally points to trunk versions of OpenStack components. For older, stable versions, look for branches named stable/[release] in the DevStack repo. For example, you can do the following to create a diablo OpenStack cloud: + + git checkout stable/diablo ./stack.sh -If working correctly, you should be able to access openstack endpoints, like: +You can also pick specific OpenStack project releases by setting the appropriate `*_BRANCH` variables in `localrc` (look in `stackrc` for the default set). Usually just before a release there will be milestone-proposed branches that need to be tested:: + + GLANCE_REPO=https://github.com/openstack/glance.git + GLANCE_BRANCH=milestone-proposed + +# Start A Dev Cloud + +Installing in a dedicated disposable vm is safer than installing on your dev machine! To start a dev cloud: + + ./stack.sh + +When the script finishes executing, you should be able to access OpenStack endpoints, like so: * Horizon: http://myhost/ * Keystone: http://myhost:5000/v2.0/ -# To start a dev cloud in an lxc container: +We also provide an environment file that you can use to interact with your cloud via CLI: + + # source openrc file to load your environment with osapi and ec2 creds + . openrc + # list instances + nova list - ./build_lxc.sh +If the EC2 API is your cup-o-tea, you can create credentials and use euca2ools: -You will need to configure a bridge and network on your host machine (by default br0) before starting build_lxc.sh. A sample host-only network configuration can be found in lxc_network_hostonlyplusnat.sh. + # source eucarc to generate EC2 credentials and set up the environment + . eucarc + # list instances using ec2 api + euca-describe-instances # Customizing -You can tweak environment variables by creating file name 'localrc' should you need to override defaults. It is likely that you will need to do this to tweak your networking configuration should you need to access your cloud from a different host. +You can override environment variables used in `stack.sh` by creating file name `localrc`. It is likely that you will need to do this to tweak your networking configuration should you need to access your cloud from a different host. + +# Database Backend + +Multiple database backends are available. The available databases are defined in the lib/databases directory. +To choose a database backend, add a line to your `localrc` like: + + use_database postgresql + +By default, the mysql database backend is used. + +# RPC Backend + +Multiple RPC backends are available. Currently, this +includes RabbitMQ (default), Qpid, and ZeroMQ. Your backend of +choice may be selected via the `localrc`. + +Note that selecting more than one RPC backend will result in a failure. + +Example (ZeroMQ): + + ENABLED_SERVICES="$ENABLED_SERVICES,-rabbit,-qpid,zeromq" + +Example (Qpid): + + ENABLED_SERVICES="$ENABLED_SERVICES,-rabbit,-zeromq,qpid" + +# Swift + +Swift is not installed by default, you can enable easily by adding this to your `localrc`: + + enable_service swift + +If you want a minimal Swift install with only Swift and Keystone you can have this instead in your `localrc`: + + disable_all_services + enable_service key mysql swift -# Todo +If you use Swift with Keystone, Swift will authenticate against it. You will need to make sure to use the Keystone URL to auth against. -* Add python-novaclient cli support -* syslog -* Add volume support -* Add quantum support +If you are enabling `swift3` in `ENABLED_SERVICES` devstack will install the swift3 middleware emulation. Swift will be configured to act as a S3 endpoint for Keystone so effectively replacing the `nova-objectstore`. -# Future +Only Swift proxy server is launched in the screen session all other services are started in background and managed by `swift-init` tool. -* idea: move from screen to tmux? -* idea: create a live-cd / vmware preview image using this? +By default Swift will configure 3 replicas (and one spare) which could be IO intensive on a small vm, if you only want to do some quick testing of the API you can choose to only have one replica by customizing the variable `SWIFT_REPLICAS` in your `localrc`. diff --git a/eucarc b/eucarc new file mode 100644 index 00000000..2b0f7dd1 --- /dev/null +++ b/eucarc @@ -0,0 +1,40 @@ +#!/usr/bin/env bash +# +# source eucarc [username] [tenantname] +# +# Create EC2 credentials for the current user as defined by OS_TENANT_NAME:OS_USERNAME +# Optionally set the tenant/username via openrc + +if [[ -n "$1" ]]; then + USERNAME=$1 +fi +if [[ -n "$2" ]]; then + TENANT=$2 +fi + +# Find the other rc files +RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd) + +# Get user configuration +source $RC_DIR/openrc + +# Set the ec2 url so euca2ools works +export EC2_URL=$(keystone catalog --service ec2 | awk '/ publicURL / { print $4 }') + +# Create EC2 credentials for the current user +CREDS=$(keystone ec2-credentials-create) +export EC2_ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }') +export EC2_SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }') + +# Euca2ools Certificate stuff for uploading bundles +# See exercises/bundle.sh to see how to get certs using nova cli +NOVA_KEY_DIR=${NOVA_KEY_DIR:-$RC_DIR} +export S3_URL=$(keystone catalog --service s3 | awk '/ publicURL / { print $4 }') +export EC2_USER_ID=42 # nova does not use user id, but bundling requires it +export EC2_PRIVATE_KEY=${NOVA_KEY_DIR}/pk.pem +export EC2_CERT=${NOVA_KEY_DIR}/cert.pem +export NOVA_CERT=${NOVA_KEY_DIR}/cacert.pem +export EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this set +alias ec2-bundle-image="ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_PRIVATE_KEY} --user ${EC2_USER_ID} --ec2cert ${NOVA_CERT}" +alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}" + diff --git a/exercise.sh b/exercise.sh index 7703f401..5b3c56e2 100755 --- a/exercise.sh +++ b/exercise.sh @@ -1,5 +1,16 @@ #!/usr/bin/env bash +# **exercise.sh** + +# Keep track of the current devstack directory. +TOP_DIR=$(cd $(dirname "$0") && pwd) + +# Import common functions +source $TOP_DIR/functions + +# Load local configuration +source $TOP_DIR/stackrc + # Run everything in the exercises/ directory that isn't explicitly disabled # comma separated list of script basenames to skip @@ -17,14 +28,17 @@ skips="" # Loop over each possible script (by basename) for script in $basenames; do - if [[ "$SKIP_EXERCISES" =~ $script ]] ; then + if [[ ,$SKIP_EXERCISES, =~ ,$script, ]] ; then skips="$skips $script" else - echo ========================= + echo "=====================================================================" echo Running $script - echo ========================= + echo "=====================================================================" $EXERCISE_DIR/$script.sh - if [[ $? -ne 0 ]] ; then + exitcode=$? + if [[ $exitcode == 55 ]]; then + skips="$skips $script" + elif [[ $exitcode -ne 0 ]] ; then failures="$failures $script" else passes="$passes $script" @@ -33,8 +47,7 @@ for script in $basenames; do done # output status of exercise run -echo ========================= -echo ========================= +echo "=====================================================================" for script in $skips; do echo SKIP $script done @@ -44,3 +57,8 @@ done for script in $failures; do echo FAILED $script done +echo "=====================================================================" + +if [ -n "$failures" ] ; then + exit 1 +fi diff --git a/exerciserc b/exerciserc new file mode 100644 index 00000000..c26ec2ce --- /dev/null +++ b/exerciserc @@ -0,0 +1,32 @@ +#!/usr/bin/env bash +# +# source exerciserc +# +# Configure the DevStack exercise scripts +# For best results, source this _after_ stackrc/localrc as it will set +# values only if they are not already set. + +# Max time to wait while vm goes from build to active state +export ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30} + +# Max time to wait for proper IP association and dis-association. +export ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-15} + +# Max time till the vm is bootable +export BOOT_TIMEOUT=${BOOT_TIMEOUT:-30} + +# Max time from run instance command until it is running +export RUNNING_TIMEOUT=${RUNNING_TIMEOUT:-$(($BOOT_TIMEOUT + $ACTIVE_TIMEOUT))} + +# Max time to wait for a vm to terminate +export TERMINATE_TIMEOUT=${TERMINATE_TIMEOUT:-30} + +# Max time to wait for a euca-volume command to propogate +export VOLUME_TIMEOUT=${VOLUME_TIMEOUT:-30} + +# Max time to wait for a euca-delete command to propogate +export VOLUME_DELETE_TIMEOUT=${SNAPSHOT_DELETE_TIMEOUT:-60} + +# The size of the volume we want to boot from; some storage back-ends +# do not allow a disk resize, so it's important that this can be tuned +export DEFAULT_VOLUME_SIZE=${DEFAULT_VOLUME_SIZE:-1} diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh new file mode 100755 index 00000000..ae3198f9 --- /dev/null +++ b/exercises/aggregates.sh @@ -0,0 +1,147 @@ +#!/usr/bin/env bash + +# **aggregates.sh** + +# This script demonstrates how to use host aggregates: +# * Create an Aggregate +# * Updating Aggregate details +# * Testing Aggregate metadata +# * Testing Aggregate delete +# * Testing General Aggregates (https://blueprints.launchpad.net/nova/+spec/general-host-aggregates) +# * Testing add/remove hosts (with one host) + +echo "**************************************************" +echo "Begin DevStack Exercise: $0" +echo "**************************************************" + +# This script exits on an error so that errors don't compound and you see +# only the first error that occurred. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + + +# Settings +# ======== + +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) + +# Import common functions +source $TOP_DIR/functions + +# Import configuration +source $TOP_DIR/openrc + +# Import exercise configuration +source $TOP_DIR/exerciserc + +# Test as the admin user +. openrc admin admin + + +# Create an aggregate +# =================== + +AGGREGATE_NAME=test_aggregate_$RANDOM +AGGREGATE2_NAME=test_aggregate_$RANDOM +AGGREGATE_A_ZONE=nova + +exit_if_aggregate_present() { + aggregate_name=$1 + + if [ $(nova aggregate-list | grep -c " $aggregate_name ") == 0 ]; then + echo "SUCCESS $aggregate_name not present" + else + echo "ERROR found aggregate: $aggregate_name" + exit -1 + fi +} + +exit_if_aggregate_present $AGGREGATE_NAME + +AGGREGATE_ID=$(nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE_NAME " | get_field 1) +AGGREGATE2_ID=$(nova aggregate-create $AGGREGATE2_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE2_NAME " | get_field 1) + +# check aggregate created +nova aggregate-list | grep -q " $AGGREGATE_NAME " || die "Aggregate $AGGREGATE_NAME not created" + + +# Ensure creating a duplicate fails +# ================================= + +if nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE; then + echo "ERROR could create duplicate aggregate" + exit -1 +fi + + +# Test aggregate-update (and aggregate-details) +# ============================================= +AGGREGATE_NEW_NAME=test_aggregate_$RANDOM + +nova aggregate-update $AGGREGATE_ID $AGGREGATE_NEW_NAME +nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_NEW_NAME +nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_A_ZONE + +nova aggregate-update $AGGREGATE_ID $AGGREGATE_NAME $AGGREGATE_A_ZONE +nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_NAME +nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_A_ZONE + + +# Test aggregate-set-metadata +# =========================== +META_DATA_1_KEY=asdf +META_DATA_2_KEY=foo +META_DATA_3_KEY=bar + +#ensure no additional metadata is set +nova aggregate-details $AGGREGATE_ID | egrep "{u'availability_zone': u'$AGGREGATE_A_ZONE'}|{}" + +nova aggregate-set-metadata $AGGREGATE_ID ${META_DATA_1_KEY}=123 +nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY +nova aggregate-details $AGGREGATE_ID | grep 123 + +nova aggregate-set-metadata $AGGREGATE_ID ${META_DATA_2_KEY}=456 +nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY +nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY + +nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_2_KEY ${META_DATA_3_KEY}=789 +nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY +nova aggregate-details $AGGREGATE_ID | grep $META_DATA_3_KEY + +nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY && die "ERROR metadata was not cleared" + +nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_3_KEY $META_DATA_1_KEY +nova aggregate-details $AGGREGATE_ID | egrep "{u'availability_zone': u'$AGGREGATE_A_ZONE'}|{}" + + +# Test aggregate-add/remove-host +# ============================== +if [ "$VIRT_DRIVER" == "xenserver" ]; then + echo "TODO(johngarbutt) add tests for add/remove host from pool aggregate" +fi +FIRST_HOST=$(nova host-list | grep compute | get_field 1 | head -1) +# Make sure can add two aggregates to same host +nova aggregate-add-host $AGGREGATE_ID $FIRST_HOST +nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST +if nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST; then + echo "ERROR could add duplicate host to single aggregate" + exit -1 +fi +nova aggregate-remove-host $AGGREGATE2_ID $FIRST_HOST +nova aggregate-remove-host $AGGREGATE_ID $FIRST_HOST + +# Test aggregate-delete +# ===================== +nova aggregate-delete $AGGREGATE_ID +nova aggregate-delete $AGGREGATE2_ID +exit_if_aggregate_present $AGGREGATE_NAME + +set +o xtrace +echo "**************************************************" +echo "End DevStack Exercise: $0" +echo "**************************************************" diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh new file mode 100755 index 00000000..679091bb --- /dev/null +++ b/exercises/boot_from_volume.sh @@ -0,0 +1,209 @@ +#!/usr/bin/env bash + +# **boot_from_volume.sh** + +# This script demonstrates how to boot from a volume. It does the following: +# * Create a bootable volume +# * Boot a volume-backed instance + +echo "*********************************************************************" +echo "Begin DevStack Exercise: $0" +echo "*********************************************************************" + +# This script exits on an error so that errors don't compound and you see +# only the first error that occured. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + + +# Settings +# ======== + +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) + +# Import common functions +source $TOP_DIR/functions + +# Import configuration +source $TOP_DIR/openrc + +# Import quantum functions if needed +if is_service_enabled quantum; then + source $TOP_DIR/lib/quantum +fi + +# Import exercise configuration +source $TOP_DIR/exerciserc + +# If cinder is not enabled we exit with exitcode 55 so that +# the exercise is skipped +is_service_enabled cinder || exit 55 + +# Instance type to create +DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} + +# Boot this image, use first AMI image if unset +DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} + +# Security group name +SECGROUP=${SECGROUP:-boot_secgroup} + +# Instance and volume names +VM_NAME=${VM_NAME:-ex-bfv-inst} +VOL_NAME=${VOL_NAME:-ex-vol-bfv} + + +# Launching a server +# ================== + +# List servers for tenant: +nova list + +# Images +# ------ + +# List the images available +glance image-list + +# Grab the id of the image to launch +IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) +die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" + +# Security Groups +# --------------- + +# List security groups +nova secgroup-list + +# Create a secgroup +if ! nova secgroup-list | grep -q $SECGROUP; then + nova secgroup-create $SECGROUP "$SECGROUP description" + if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then + echo "Security group not created" + exit 1 + fi +fi + +# Configure Security Group Rules +if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then + nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 +fi +if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then + nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0 +fi + +# List secgroup rules +nova secgroup-list-rules $SECGROUP + +# Set up instance +# --------------- + +# List flavors +nova flavor-list + +# Select a flavor +INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) +if [[ -z "$INSTANCE_TYPE" ]]; then + # grab the first flavor in the list to launch if default doesn't exist + INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) +fi + +# Clean-up from previous runs +nova delete $VM_NAME || true +if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then + echo "server didn't terminate!" + exit 1 +fi + +# Setup Keypair +KEY_NAME=test_key +KEY_FILE=key.pem +nova keypair-delete $KEY_NAME || true +nova keypair-add $KEY_NAME > $KEY_FILE +chmod 600 $KEY_FILE + +# Set up volume +# ------------- + +# Delete any old volume +cinder delete $VOL_NAME || true +if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then + echo "Volume $VOL_NAME not deleted" + exit 1 +fi + +# Create the bootable volume +start_time=$(date +%s) +cinder create --image-id $IMAGE --display_name=$VOL_NAME --display_description "test bootable volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \ + die "Failure creating volume $VOL_NAME" +if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then + echo "Volume $VOL_NAME not created" + exit 1 +fi +end_time=$(date +%s) +echo "Completed cinder create in $((end_time - start_time)) seconds" + +# Get volume ID +VOL_ID=$(cinder list | grep $VOL_NAME | get_field 1) +die_if_not_set VOL_ID "Failure retrieving volume ID for $VOL_NAME" + +# Boot instance +# ------------- + +# Boot using the --block_device_mapping param. The format of mapping is: +# =::: +# Leaving the middle two fields blank appears to do-the-right-thing +VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --block-device-mapping vda=$VOL_ID --security_groups=$SECGROUP --key_name $KEY_NAME $VM_NAME | grep ' id ' | get_field 2) +die_if_not_set VM_UUID "Failure launching $VM_NAME" + +# Check that the status is active within ACTIVE_TIMEOUT seconds +if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then + echo "server didn't become active!" + exit 1 +fi + +# Get the instance IP +IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2) +die_if_not_set IP "Failure retrieving IP address" + +# Private IPs can be pinged in single node deployments +ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT + +# Clean up +# -------- + +# Delete volume backed instance +nova delete $VM_UUID || die "Failure deleting instance $VM_NAME" +if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then + echo "Server $VM_NAME not deleted" + exit 1 +fi + +# Wait for volume to be released +if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then + echo "Volume $VOL_NAME not released" + exit 1 +fi + +# Delete volume +start_time=$(date +%s) +cinder delete $VOL_ID || die "Failure deleting volume $VOLUME_NAME" +if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then + echo "Volume $VOL_NAME not deleted" + exit 1 +fi +end_time=$(date +%s) +echo "Completed cinder delete in $((end_time - start_time)) seconds" + +# Delete secgroup +nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP" + +set +o xtrace +echo "*********************************************************************" +echo "SUCCESS: End DevStack Exercise: $0" +echo "*********************************************************************" diff --git a/exercises/bundle.sh b/exercises/bundle.sh new file mode 100755 index 00000000..12f27323 --- /dev/null +++ b/exercises/bundle.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash + +# **bundle.sh** + +# we will use the ``euca2ools`` cli tool that wraps the python boto +# library to test ec2 bundle upload compatibility + +echo "*********************************************************************" +echo "Begin DevStack Exercise: $0" +echo "*********************************************************************" + +# This script exits on an error so that errors don't compound and you see +# only the first error that occured. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + + +# Settings +# ======== + +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) + +# Import common functions +source $TOP_DIR/functions + +# Import EC2 configuration +source $TOP_DIR/eucarc + +# Import exercise configuration +source $TOP_DIR/exerciserc + +# Remove old certificates +rm -f $TOP_DIR/cacert.pem +rm -f $TOP_DIR/cert.pem +rm -f $TOP_DIR/pk.pem + +# Get Certificates +nova x509-get-root-cert $TOP_DIR/cacert.pem +nova x509-create-cert $TOP_DIR/pk.pem $TOP_DIR/cert.pem + +# Max time to wait for image to be registered +REGISTER_TIMEOUT=${REGISTER_TIMEOUT:-15} + +BUCKET=testbucket +IMAGE=bundle.img +truncate -s 5M /tmp/$IMAGE +euca-bundle-image -i /tmp/$IMAGE || die "Failure bundling image $IMAGE" + +euca-upload-bundle --debug -b $BUCKET -m /tmp/$IMAGE.manifest.xml || die "Failure uploading bundle $IMAGE to $BUCKET" + +AMI=`euca-register $BUCKET/$IMAGE.manifest.xml | cut -f2` +die_if_not_set AMI "Failure registering $BUCKET/$IMAGE" + +# Wait for the image to become available +if ! timeout $REGISTER_TIMEOUT sh -c "while euca-describe-images | grep $AMI | grep -q available; do sleep 1; done"; then + echo "Image $AMI not available within $REGISTER_TIMEOUT seconds" + exit 1 +fi + +# Clean up +euca-deregister $AMI || die "Failure deregistering $AMI" + +set +o xtrace +echo "*********************************************************************" +echo "SUCCESS: End DevStack Exercise: $0" +echo "*********************************************************************" diff --git a/exercises/client-args.sh b/exercises/client-args.sh new file mode 100755 index 00000000..894da742 --- /dev/null +++ b/exercises/client-args.sh @@ -0,0 +1,177 @@ +#!/usr/bin/env bash + +# **client-args.sh** + +# Test OpenStack client authentication aguemnts handling + +echo "*********************************************************************" +echo "Begin DevStack Exercise: $0" +echo "*********************************************************************" + +# This script exits on an error so that errors don't compound and you see +# only the first error that occured. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + + +# Settings +# ======== + +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) + +# Import common functions +source $TOP_DIR/functions + +# Import configuration +source $TOP_DIR/openrc + +# Import exercise configuration +source $TOP_DIR/exerciserc + +# Unset all of the known NOVA_* vars +unset NOVA_API_KEY +unset NOVA_ENDPOINT_NAME +unset NOVA_PASSWORD +unset NOVA_PROJECT_ID +unset NOVA_REGION_NAME +unset NOVA_URL +unset NOVA_USERNAME +unset NOVA_VERSION + +# Save the known variables for later +export x_TENANT_NAME=$OS_TENANT_NAME +export x_USERNAME=$OS_USERNAME +export x_PASSWORD=$OS_PASSWORD +export x_AUTH_URL=$OS_AUTH_URL + +# Unset the usual variables to force argument processing +unset OS_TENANT_NAME +unset OS_USERNAME +unset OS_PASSWORD +unset OS_AUTH_URL + +# Common authentication args +TENANT_ARG="--os_tenant_name=$x_TENANT_NAME" +TENANT_ARG_DASH="--os-tenant-name=$x_TENANT_NAME" +ARGS="--os_username=$x_USERNAME --os_password=$x_PASSWORD --os_auth_url=$x_AUTH_URL" +ARGS_DASH="--os-username=$x_USERNAME --os-password=$x_PASSWORD --os-auth-url=$x_AUTH_URL" + +# Set global return +RETURN=0 + +# Keystone client +# --------------- +if [[ "$ENABLED_SERVICES" =~ "key" ]]; then + if [[ "$SKIP_EXERCISES" =~ "key" ]] ; then + STATUS_KEYSTONE="Skipped" + else + echo -e "\nTest Keystone" + if keystone $TENANT_ARG_DASH $ARGS_DASH catalog --service identity; then + STATUS_KEYSTONE="Succeeded" + else + STATUS_KEYSTONE="Failed" + RETURN=1 + fi + fi +fi + +# Nova client +# ----------- + +if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then + if [[ "$SKIP_EXERCISES" =~ "n-api" ]] ; then + STATUS_NOVA="Skipped" + STATUS_EC2="Skipped" + else + # Test OSAPI + echo -e "\nTest Nova" + if nova $TENANT_ARG_DASH $ARGS_DASH flavor-list; then + STATUS_NOVA="Succeeded" + else + STATUS_NOVA="Failed" + RETURN=1 + fi + fi +fi + +# Cinder client +# ------------- + +if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then + if [[ "$SKIP_EXERCISES" =~ "c-api" ]] ; then + STATUS_CINDER="Skipped" + else + echo -e "\nTest Cinder" + if cinder $TENANT_ARG_DASH $ARGS_DASH list; then + STATUS_CINDER="Succeeded" + else + STATUS_CINDER="Failed" + RETURN=1 + fi + fi +fi + +# Glance client +# ------------- + +if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then + if [[ "$SKIP_EXERCISES" =~ "g-api" ]] ; then + STATUS_GLANCE="Skipped" + else + echo -e "\nTest Glance" + if glance $TENANT_ARG_DASH $ARGS_DASH image-list; then + STATUS_GLANCE="Succeeded" + else + STATUS_GLANCE="Failed" + RETURN=1 + fi + fi +fi + +# Swift client +# ------------ + +if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then + if [[ "$SKIP_EXERCISES" =~ "swift" ]] ; then + STATUS_SWIFT="Skipped" + else + echo -e "\nTest Swift" + if swift $TENANT_ARG_DASH $ARGS_DASH stat; then + STATUS_SWIFT="Succeeded" + else + STATUS_SWIFT="Failed" + RETURN=1 + fi + fi +fi + +set +o xtrace + +# Results +# ------- + +function report() { + if [[ -n "$2" ]]; then + echo "$1: $2" + fi +} + +echo -e "\n" +report "Keystone" $STATUS_KEYSTONE +report "Nova" $STATUS_NOVA +report "Cinder" $STATUS_CINDER +report "Glance" $STATUS_GLANCE +report "Swift" $STATUS_SWIFT + +if (( $RETURN == 0 )); then + echo "*********************************************************************" + echo "SUCCESS: End DevStack Exercise: $0" + echo "*********************************************************************" +fi + +exit $RETURN diff --git a/exercises/client-env.sh b/exercises/client-env.sh new file mode 100755 index 00000000..c84e84e5 --- /dev/null +++ b/exercises/client-env.sh @@ -0,0 +1,186 @@ +#!/usr/bin/env bash + +# **client-env.sh** + +# Test OpenStack client enviroment variable handling + +echo "*********************************************************************" +echo "Begin DevStack Exercise: $0" +echo "*********************************************************************" + +# This script exits on an error so that errors don't compound and you see +# only the first error that occured. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + + +# Settings +# ======== + +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) + +# Import common functions +source $TOP_DIR/functions + +# Import configuration +source $TOP_DIR/openrc + +# Import exercise configuration +source $TOP_DIR/exerciserc + +# Unset all of the known NOVA_* vars +unset NOVA_API_KEY +unset NOVA_ENDPOINT_NAME +unset NOVA_PASSWORD +unset NOVA_PROJECT_ID +unset NOVA_REGION_NAME +unset NOVA_URL +unset NOVA_USERNAME +unset NOVA_VERSION + +for i in OS_TENANT_NAME OS_USERNAME OS_PASSWORD OS_AUTH_URL; do + is_set $i + if [[ $? -ne 0 ]]; then + echo "$i expected to be set" + ABORT=1 + fi +done +if [[ -n "$ABORT" ]]; then + exit 1 +fi + +# Set global return +RETURN=0 + +# Keystone client +# --------------- +if [[ "$ENABLED_SERVICES" =~ "key" ]]; then + if [[ "$SKIP_EXERCISES" =~ "key" ]] ; then + STATUS_KEYSTONE="Skipped" + else + echo -e "\nTest Keystone" + if keystone catalog --service identity; then + STATUS_KEYSTONE="Succeeded" + else + STATUS_KEYSTONE="Failed" + RETURN=1 + fi + fi +fi + +# Nova client +# ----------- + +if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then + if [[ "$SKIP_EXERCISES" =~ "n-api" ]] ; then + STATUS_NOVA="Skipped" + STATUS_EC2="Skipped" + else + # Test OSAPI + echo -e "\nTest Nova" + if nova flavor-list; then + STATUS_NOVA="Succeeded" + else + STATUS_NOVA="Failed" + RETURN=1 + fi + + # Test EC2 API + echo -e "\nTest EC2" + # Get EC2 creds + source $TOP_DIR/eucarc + + if euca-describe-images; then + STATUS_EC2="Succeeded" + else + STATUS_EC2="Failed" + RETURN=1 + fi + + # Clean up side effects + unset NOVA_VERSION + fi +fi + +# Cinder client +# ------------- + +if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then + if [[ "$SKIP_EXERCISES" =~ "c-api" ]] ; then + STATUS_CINDER="Skipped" + else + echo -e "\nTest Cinder" + if cinder list; then + STATUS_CINDER="Succeeded" + else + STATUS_CINDER="Failed" + RETURN=1 + fi + fi +fi + +# Glance client +# ------------- + +if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then + if [[ "$SKIP_EXERCISES" =~ "g-api" ]] ; then + STATUS_GLANCE="Skipped" + else + echo -e "\nTest Glance" + if glance image-list; then + STATUS_GLANCE="Succeeded" + else + STATUS_GLANCE="Failed" + RETURN=1 + fi + fi +fi + +# Swift client +# ------------ + +if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then + if [[ "$SKIP_EXERCISES" =~ "swift" ]] ; then + STATUS_SWIFT="Skipped" + else + echo -e "\nTest Swift" + if swift stat; then + STATUS_SWIFT="Succeeded" + else + STATUS_SWIFT="Failed" + RETURN=1 + fi + fi +fi + +set +o xtrace + +# Results +# ------- + +function report() { + if [[ -n "$2" ]]; then + echo "$1: $2" + fi +} + +echo -e "\n" +report "Keystone" $STATUS_KEYSTONE +report "Nova" $STATUS_NOVA +report "EC2" $STATUS_EC2 +report "Cinder" $STATUS_CINDER +report "Glance" $STATUS_GLANCE +report "Swift" $STATUS_SWIFT + +if (( $RETURN == 0 )); then + echo "*********************************************************************" + echo "SUCCESS: End DevStack Exercise: $0" + echo "*********************************************************************" +fi + +exit $RETURN diff --git a/exercises/euca.sh b/exercises/euca.sh index 9605ace2..8b15da8d 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -1,8 +1,13 @@ #!/usr/bin/env bash -# we will use the ``euca2ools`` cli tool that wraps the python boto +# **euca.sh** + +# we will use the ``euca2ools`` cli tool that wraps the python boto # library to test ec2 compatibility -# + +echo "*********************************************************************" +echo "Begin DevStack Exercise: $0" +echo "*********************************************************************" # This script exits on an error so that errors don't compound and you see # only the first error that occured. @@ -16,21 +21,164 @@ set -o xtrace # Settings # ======== -# Use openrc + stackrc + localrc for settings -pushd $(cd $(dirname "$0")/.. && pwd) -source ./openrc -popd +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) +VOLUME_SIZE=1 +ATTACH_DEVICE=/dev/vdc + +# Import common functions +source $TOP_DIR/functions + +# Import EC2 configuration +source $TOP_DIR/eucarc + +# Import quantum functions if needed +if is_service_enabled quantum; then + source $TOP_DIR/lib/quantum +fi + +# Import exercise configuration +source $TOP_DIR/exerciserc + +# Instance type to create +DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} + +# Boot this image, use first AMI image if unset +DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} + +# Security group name +SECGROUP=${SECGROUP:-euca_secgroup} -# find a machine image to boot -IMAGE=`euca-describe-images | grep machine | cut -f2` -# launch it -INSTANCE=`euca-run-instances $IMAGE | grep INSTANCE | cut -f2` +# Launching a server +# ================== -# assure it has booted within a reasonable time -if ! timeout $RUNNING_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then +# Find a machine image to boot +IMAGE=`euca-describe-images | grep machine | grep ${DEFAULT_IMAGE_NAME} | cut -f2 | head -n1` +die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" + +# Add a secgroup +if ! euca-describe-groups | grep -q $SECGROUP; then + euca-add-group -d "$SECGROUP description" $SECGROUP + if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! euca-describe-groups | grep -q $SECGROUP; do sleep 1; done"; then + echo "Security group not created" + exit 1 + fi +fi + +# Launch it +INSTANCE=`euca-run-instances -g $SECGROUP -t $DEFAULT_INSTANCE_TYPE $IMAGE | grep INSTANCE | cut -f2` +die_if_not_set INSTANCE "Failure launching instance" + +# Assure it has booted within a reasonable time +if ! timeout $RUNNING_TIMEOUT sh -c "while ! euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then echo "server didn't become active within $RUNNING_TIMEOUT seconds" exit 1 fi -euca-terminate-instances $INSTANCE +# Volumes +# ------- +if [[ "$ENABLED_SERVICES" =~ "c-vol" ]]; then + VOLUME_ZONE=`euca-describe-availability-zones | head -n1 | cut -f2` + die_if_not_set VOLUME_ZONE "Failure to find zone for volume" + + VOLUME=`euca-create-volume -s 1 -z $VOLUME_ZONE | cut -f2` + die_if_not_set VOLUME "Failure to create volume" + + # Test that volume has been created + VOLUME=`euca-describe-volumes | cut -f2` + die_if_not_set VOLUME "Failure to get volume" + + # Test volume has become available + if ! timeout $RUNNING_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then + echo "volume didnt become available within $RUNNING_TIMEOUT seconds" + exit 1 + fi + + # Attach volume to an instance + euca-attach-volume -i $INSTANCE -d $ATTACH_DEVICE $VOLUME || \ + die "Failure attaching volume $VOLUME to $INSTANCE" + if ! timeout $ACTIVE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q in-use; do sleep 1; done"; then + echo "Could not attach $VOLUME to $INSTANCE" + exit 1 + fi + + # Detach volume from an instance + euca-detach-volume $VOLUME || \ + die "Failure detaching volume $VOLUME to $INSTANCE" + if ! timeout $ACTIVE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then + echo "Could not detach $VOLUME to $INSTANCE" + exit 1 + fi + + # Remove volume + euca-delete-volume $VOLUME || \ + die "Failure to delete volume" + if ! timeout $ACTIVE_TIMEOUT sh -c "while euca-describe-volumes | grep $VOLUME; do sleep 1; done"; then + echo "Could not delete $VOLUME" + exit 1 + fi +else + echo "Volume Tests Skipped" +fi + +# Allocate floating address +FLOATING_IP=`euca-allocate-address | cut -f2` +die_if_not_set FLOATING_IP "Failure allocating floating IP" + +# Associate floating address +euca-associate-address -i $INSTANCE $FLOATING_IP || \ + die "Failure associating address $FLOATING_IP to $INSTANCE" + +# Authorize pinging +euca-authorize -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \ + die "Failure authorizing rule in $SECGROUP" + +# Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds +ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT + +# Revoke pinging +euca-revoke -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \ + die "Failure revoking rule in $SECGROUP" + +# Release floating address +euca-disassociate-address $FLOATING_IP || \ + die "Failure disassociating address $FLOATING_IP" + +# Wait just a tick for everything above to complete so release doesn't fail +if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep $INSTANCE | grep -q $FLOATING_IP; do sleep 1; done"; then + echo "Floating ip $FLOATING_IP not disassociated within $ASSOCIATE_TIMEOUT seconds" + exit 1 +fi + +# Release floating address +euca-release-address $FLOATING_IP || \ + die "Failure releasing address $FLOATING_IP" + +# Wait just a tick for everything above to complete so terminate doesn't fail +if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep -q $FLOATING_IP; do sleep 1; done"; then + echo "Floating ip $FLOATING_IP not released within $ASSOCIATE_TIMEOUT seconds" + exit 1 +fi + +# Terminate instance +euca-terminate-instances $INSTANCE || \ + die "Failure terminating instance $INSTANCE" + +# Assure it has terminated within a reasonable time. The behaviour of this +# case changed with bug/836978. Requesting the status of an invalid instance +# will now return an error message including the instance id, so we need to +# filter that out. +if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -ve \"\\\(InstanceNotFound\\\|InvalidInstanceID\[.\]NotFound\\\)\" | grep -q $INSTANCE; do sleep 1; done"; then + echo "server didn't terminate within $TERMINATE_TIMEOUT seconds" + exit 1 +fi + +# Delete secgroup +euca-delete-group $SECGROUP || die "Failure deleting security group $SECGROUP" + +set +o xtrace +echo "*********************************************************************" +echo "SUCCESS: End DevStack Exercise: $0" +echo "*********************************************************************" diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 75046d1a..34ab69d9 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -1,11 +1,12 @@ #!/usr/bin/env bash -# **exercise.sh** - using the cloud can be fun +# **floating_ips.sh** - using the cloud can be fun -# we will use the ``nova`` cli tool provided by the ``python-novaclient`` -# package -# +# Test instance connectivity with the ``nova`` command from ``python-novaclient`` +echo "*********************************************************************" +echo "Begin DevStack Exercise: $0" +echo "*********************************************************************" # This script exits on an error so that errors don't compound and you see # only the first error that occured. @@ -19,19 +20,42 @@ set -o xtrace # Settings # ======== -# Use openrc + stackrc + localrc for settings -pushd $(cd $(dirname "$0")/.. && pwd) -source ./openrc -popd +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) -# Get a token for clients that don't support service catalog -# ========================================================== +# Import common functions +source $TOP_DIR/functions -# manually create a token by querying keystone (sending JSON data). Keystone -# returns a token and catalog of endpoints. We use python to parse the token -# and save it. +# Import configuration +source $TOP_DIR/openrc + +# Import quantum functions if needed +if is_service_enabled quantum; then + source $TOP_DIR/lib/quantum +fi + +# Import exercise configuration +source $TOP_DIR/exerciserc + +# Instance type to create +DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} + +# Boot this image, use first AMI image if unset +DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} + +# Security group name +SECGROUP=${SECGROUP:-test_secgroup} + +# Default floating IP pool name +DEFAULT_FLOATING_POOL=${DEFAULT_FLOATING_POOL:-nova} + +# Additional floating IP pool and range +TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test} + +# Instance name +VM_NAME="ex-float" -TOKEN=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$NOVA_USERNAME\", \"password\": \"$NOVA_API_KEY\"}}}" -H "Content-type: application/json" http://$HOST_IP:5000/v2.0/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"` # Launching a server # ================== @@ -42,149 +66,146 @@ nova list # Images # ------ -# Nova has a **deprecated** way of listing images. -nova image-list +# List the images available +glance image-list -# But we recommend using glance directly -glance -A $TOKEN index - -# Let's grab the id of the first AMI image to launch -IMAGE=`glance -A $TOKEN index | egrep ami | cut -d" " -f1` +# Grab the id of the image to launch +IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) +die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" # Security Groups # --------------- -SECGROUP=test_secgroup -# List of secgroups: +# List security groups nova secgroup-list # Create a secgroup -nova secgroup-create $SECGROUP "test_secgroup description" - -# determine flavor -# ---------------- - -# List of flavors: -nova flavor-list - -# and grab the first flavor in the list to launch -FLAVOR=`nova flavor-list | head -n 4 | tail -n 1 | cut -d"|" -f2` +if ! nova secgroup-list | grep -q $SECGROUP; then + nova secgroup-create $SECGROUP "$SECGROUP description" + if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then + echo "Security group not created" + exit 1 + fi +fi -NAME="myserver" +# Configure Security Group Rules +if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then + nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 +fi +if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then + nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0 +fi -nova boot --flavor $FLAVOR --image $IMAGE $NAME --security_groups=$SECGROUP +# List secgroup rules +nova secgroup-list-rules $SECGROUP -# Testing -# ======= +# Set up instance +# --------------- -# First check if it spins up (becomes active and responds to ping on -# internal ip). If you run this script from a nova node, you should -# bypass security groups and have direct access to the server. +# List flavors +nova flavor-list -# Waiting for boot -# ---------------- +# Select a flavor +INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) +if [[ -z "$INSTANCE_TYPE" ]]; then + # grab the first flavor in the list to launch if default doesn't exist + INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) +fi -# Max time to wait while vm goes from build to active state -ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-10} +# Clean-up from previous runs +nova delete $VM_NAME || true +if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then + echo "server didn't terminate!" + exit 1 +fi -# Max time till the vm is bootable -BOOT_TIMEOUT=${BOOT_TIMEOUT:-15} +# Boot instance +# ------------- -# Max time to wait for proper association and dis-association. -ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10} +VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2) +die_if_not_set VM_UUID "Failure launching $VM_NAME" -# check that the status is active within ACTIVE_TIMEOUT seconds -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $NAME | grep status | grep -q ACTIVE; do sleep 1; done"; then +# Check that the status is active within ACTIVE_TIMEOUT seconds +if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then echo "server didn't become active!" exit 1 fi -# get the IP of the server -IP=`nova show $NAME | grep "private network" | cut -d"|" -f3` - -# for single node deployments, we can ping private ips -MULTI_HOST=${MULTI_HOST:-0} -if [ "$MULTI_HOST" = "0" ]; then - # sometimes the first ping fails (10 seconds isn't enough time for the VM's - # network to respond?), so let's ping for a default of 15 seconds with a - # timeout of a second for each ping. - if ! timeout $BOOT_TIMEOUT sh -c "while ! ping -c1 -w1 $IP; do sleep 1; done"; then - echo "Couldn't ping server" - exit 1 - fi -else - # On a multi-host system, without vm net access, do a sleep to wait for the boot - sleep $BOOT_TIMEOUT -fi +# Get the instance IP +IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2) +die_if_not_set IP "Failure retrieving IP address" -# Security Groups & Floating IPs -# ------------------------------ +# Private IPs can be pinged in single node deployments +ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT -# allow icmp traffic (ping) -nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 +# Floating IPs +# ------------ -# List rules for a secgroup -nova secgroup-list-rules $SECGROUP +# Allocate a floating IP from the default pool +FLOATING_IP=$(nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | get_field 1) +die_if_not_set FLOATING_IP "Failure creating floating IP from pool $DEFAULT_FLOATING_POOL" -# allocate a floating ip -nova floating-ip-create - -# store floating address -FLOATING_IP=`nova floating-ip-list | grep None | head -1 | cut -d '|' -f2 | sed 's/ //g'` - -# add floating ip to our server -nova add-floating-ip $NAME $FLOATING_IP - -# test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds -if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then - echo "Couldn't ping server with floating ip" +# List floating addresses +if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep -q $FLOATING_IP; do sleep 1; done"; then + echo "Floating IP not allocated" exit 1 fi -# pause the VM and verify we can't ping it anymore -nova pause $NAME +# Add floating IP to our server +nova add-floating-ip $VM_UUID $FLOATING_IP || \ + die "Failure adding floating IP $FLOATING_IP to $VM_NAME" -sleep 2 +# Test we can ping our floating IP within ASSOCIATE_TIMEOUT seconds +ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT -if ( ping -c1 -w1 $IP); then - echo "Pause failure - ping shouldn't work" - exit 1 -fi +if ! is_service_enabled quantum; then + # Allocate an IP from second floating pool + TEST_FLOATING_IP=$(nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | get_field 1) + die_if_not_set TEST_FLOATING_IP "Failure creating floating IP in $TEST_FLOATING_POOL" -if ( ping -c1 -w1 $FLOATING_IP); then - echo "Pause failure - ping floating ips shouldn't work" - exit 1 + # list floating addresses + if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep $TEST_FLOATING_POOL | grep -q $TEST_FLOATING_IP; do sleep 1; done"; then + echo "Floating IP not allocated" + exit 1 + fi fi -# unpause the VM and verify we can ping it again -nova unpause $NAME - -sleep 2 - -ping -c1 -w1 $IP - -# dis-allow icmp traffic (ping) -nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 +# Dis-allow icmp traffic (ping) +nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || \ + die "Failure deleting security group rule from $SECGROUP" # FIXME (anthony): make xs support security groups -if [ "$VIRT_DRIVER" != "xenserver" ]; then - # test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds - if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then - print "Security group failure - ping should not be allowed!" - echo "Couldn't ping server with floating ip" - exit 1 - fi +if [ "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then + # Test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds + ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT Fail fi -# de-allocate the floating ip -nova floating-ip-delete $FLOATING_IP +# Clean up +# -------- + +if ! is_service_enabled quantum; then + # Delete second floating IP + nova floating-ip-delete $TEST_FLOATING_IP || \ + die "Failure deleting floating IP $TEST_FLOATING_IP" +fi -# shutdown the server -nova delete $NAME +# Delete the floating ip +nova floating-ip-delete $FLOATING_IP || \ + die "Failure deleting floating IP $FLOATING_IP" -# Delete a secgroup -nova secgroup-delete $SECGROUP +# Delete instance +nova delete $VM_UUID || die "Failure deleting instance $VM_NAME" +# Wait for termination +if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then + echo "Server $VM_NAME not deleted" + exit 1 +fi -# FIXME: validate shutdown within 5 seconds -# (nova show $NAME returns 1 or status != ACTIVE)? +# Delete secgroup +nova secgroup-delete $SECGROUP || \ + die "Failure deleting security group $SECGROUP" +set +o xtrace +echo "*********************************************************************" +echo "SUCCESS: End DevStack Exercise: $0" +echo "*********************************************************************" diff --git a/exercises/horizon.sh b/exercises/horizon.sh new file mode 100755 index 00000000..c5dae3ab --- /dev/null +++ b/exercises/horizon.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash + +# **horizon.sh** + +# Sanity check that horizon started if enabled + +echo "*********************************************************************" +echo "Begin DevStack Exercise: $0" +echo "*********************************************************************" + +# This script exits on an error so that errors don't compound and you see +# only the first error that occured. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + + +# Settings +# ======== + +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) + +# Import common functions +source $TOP_DIR/functions + +# Import configuration +source $TOP_DIR/openrc + +# Import exercise configuration +source $TOP_DIR/exerciserc + +is_service_enabled horizon || exit 55 + +# can we get the front page +curl http://$SERVICE_HOST 2>/dev/null | grep -q '

Log In

' || die "Horizon front page not functioning!" + +set +o xtrace +echo "*********************************************************************" +echo "SUCCESS: End DevStack Exercise: $0" +echo "*********************************************************************" + diff --git a/exercises/quantum-adv-test.sh b/exercises/quantum-adv-test.sh new file mode 100755 index 00000000..bc33fe82 --- /dev/null +++ b/exercises/quantum-adv-test.sh @@ -0,0 +1,459 @@ +#!/usr/bin/env bash +# + +# **quantum-adv-test.sh** + +# Perform integration testing of Nova and other components with Quantum. + +echo "*********************************************************************" +echo "Begin DevStack Exercise: $0" +echo "*********************************************************************" + +# This script exits on an error so that errors don't compound and you see +# only the first error that occured. + +set -o errtrace + +trap failed ERR +failed() { + local r=$? + set +o errtrace + set +o xtrace + echo "Failed to execute" + echo "Starting cleanup..." + delete_all + echo "Finished cleanup" + exit $r +} + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + +# Environment +# ----------- + +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) + +# Import common functions +source $TOP_DIR/functions + +# Import configuration +source $TOP_DIR/openrc + +# If quantum is not enabled we exit with exitcode 55 which mean +# exercise is skipped. +is_service_enabled quantum && is_service_enabled q-agt && is_service_enabled q-dhcp || exit 55 + +# Import quantum fucntions +source $TOP_DIR/lib/quantum + +# Import exercise configuration +source $TOP_DIR/exerciserc + +# Quantum Settings +# ---------------- + +TENANTS="DEMO1" +# TODO (nati)_Test public network +#TENANTS="DEMO1,DEMO2" + +PUBLIC_NAME="admin" +DEMO1_NAME="demo1" +DEMO2_NAME="demo2" + +PUBLIC_NUM_NET=1 +DEMO1_NUM_NET=1 +DEMO2_NUM_NET=2 + +PUBLIC_NET1_CIDR="200.0.0.0/24" +DEMO1_NET1_CIDR="10.10.0.0/24" +DEMO2_NET1_CIDR="10.20.0.0/24" +DEMO2_NET2_CIDR="10.20.1.0/24" + +PUBLIC_NET1_GATEWAY="200.0.0.1" +DEMO1_NET1_GATEWAY="10.10.0.1" +DEMO2_NET1_GATEWAY="10.20.0.1" +DEMO2_NET2_GATEWAY="10.20.1.1" + +PUBLIC_NUM_VM=1 +DEMO1_NUM_VM=1 +DEMO2_NUM_VM=2 + +PUBLIC_VM1_NET='admin-net1' +DEMO1_VM1_NET='demo1-net1' +# Multinic settings. But this is fail without nic setting in OS image +DEMO2_VM1_NET='demo2-net1' +DEMO2_VM2_NET='demo2-net2' + +PUBLIC_NUM_ROUTER=1 +DEMO1_NUM_ROUTER=1 +DEMO2_NUM_ROUTER=1 + +PUBLIC_ROUTER1_NET="admin-net1" +DEMO1_ROUTER1_NET="demo1-net1" +DEMO2_ROUTER1_NET="demo2-net1" + +KEYSTONE="keystone" + +# Manually create a token by querying keystone (sending JSON data). Keystone +# returns a token and catalog of endpoints. We use python to parse the token +# and save it. + +TOKEN=`keystone token-get | grep ' id ' | awk '{print $4}'` + +# Various functions +# ----------------- + +function foreach_tenant { + COMMAND=$1 + for TENANT in ${TENANTS//,/ };do + eval ${COMMAND//%TENANT%/$TENANT} + done +} + +function foreach_tenant_resource { + COMMAND=$1 + RESOURCE=$2 + for TENANT in ${TENANTS//,/ };do + eval 'NUM=$'"${TENANT}_NUM_$RESOURCE" + for i in `seq $NUM`;do + local COMMAND_LOCAL=${COMMAND//%TENANT%/$TENANT} + COMMAND_LOCAL=${COMMAND_LOCAL//%NUM%/$i} + eval $COMMAND_LOCAL + done + done +} + +function foreach_tenant_vm { + COMMAND=$1 + foreach_tenant_resource "$COMMAND" 'VM' +} + +function foreach_tenant_net { + COMMAND=$1 + foreach_tenant_resource "$COMMAND" 'NET' +} + +function get_image_id { + local IMAGE_ID=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) + echo "$IMAGE_ID" +} + +function get_tenant_id { + local TENANT_NAME=$1 + local TENANT_ID=`keystone tenant-list | grep " $TENANT_NAME " | head -n 1 | get_field 1` + echo "$TENANT_ID" +} + +function get_user_id { + local USER_NAME=$1 + local USER_ID=`keystone user-list | grep $USER_NAME | awk '{print $2}'` + echo "$USER_ID" +} + +function get_role_id { + local ROLE_NAME=$1 + local ROLE_ID=`keystone role-list | grep $ROLE_NAME | awk '{print $2}'` + echo "$ROLE_ID" +} + +function get_network_id { + local NETWORK_NAME="$1" + local NETWORK_ID=`quantum net-list -F id -- --name=$NETWORK_NAME | awk "NR==4" | awk '{print $2}'` + echo $NETWORK_ID +} + +function get_flavor_id { + local INSTANCE_TYPE=$1 + local FLAVOR_ID=`nova flavor-list | grep $INSTANCE_TYPE | awk '{print $2}'` + echo "$FLAVOR_ID" +} + +function confirm_server_active { + local VM_UUID=$1 + if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then + echo "server '$VM_UUID' did not become active!" + false + fi +} + +function add_tenant { + local TENANT=$1 + local USER=$2 + + $KEYSTONE tenant-create --name=$TENANT + $KEYSTONE user-create --name=$USER --pass=${ADMIN_PASSWORD} + + local USER_ID=$(get_user_id $USER) + local TENANT_ID=$(get_tenant_id $TENANT) + + $KEYSTONE user-role-add --user-id $USER_ID --role-id $(get_role_id Member) --tenant-id $TENANT_ID +} + +function remove_tenant { + local TENANT=$1 + local TENANT_ID=$(get_tenant_id $TENANT) + $KEYSTONE tenant-delete $TENANT_ID +} + +function remove_user { + local USER=$1 + local USER_ID=$(get_user_id $USER) + $KEYSTONE user-delete $USER_ID +} + +function create_tenants { + source $TOP_DIR/openrc admin admin + add_tenant demo1 demo1 demo1 + add_tenant demo2 demo2 demo2 + source $TOP_DIR/openrc demo demo +} + +function delete_tenants_and_users { + source $TOP_DIR/openrc admin admin + remove_user demo1 + remove_tenant demo1 + remove_user demo2 + remove_tenant demo2 + echo "removed all tenants" + source $TOP_DIR/openrc demo demo +} + +function create_network { + local TENANT=$1 + local GATEWAY=$2 + local CIDR=$3 + local NUM=$4 + local EXTRA=$5 + local NET_NAME="${TENANT}-net$NUM" + local ROUTER_NAME="${TENANT}-router${NUM}" + source $TOP_DIR/openrc admin admin + local TENANT_ID=$(get_tenant_id $TENANT) + source $TOP_DIR/openrc $TENANT $TENANT + local NET_ID=$(quantum net-create --tenant_id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' ) + quantum subnet-create --ip_version 4 --tenant_id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR + quantum-debug probe-create $NET_ID + source $TOP_DIR/openrc demo demo +} + +function create_networks { + foreach_tenant_net 'create_network ${%TENANT%_NAME} ${%TENANT%_NET%NUM%_GATEWAY} ${%TENANT%_NET%NUM%_CIDR} %NUM% ${%TENANT%_NET%NUM%_EXTRA}' + #TODO(nati) test security group function + # allow ICMP for both tenant's security groups + #source $TOP_DIR/openrc demo1 demo1 + #$NOVA secgroup-add-rule default icmp -1 -1 0.0.0.0/0 + #source $TOP_DIR/openrc demo2 demo2 + #$NOVA secgroup-add-rule default icmp -1 -1 0.0.0.0/0 +} + +function create_vm { + local TENANT=$1 + local NUM=$2 + local NET_NAMES=$3 + source $TOP_DIR/openrc $TENANT $TENANT + local NIC="" + for NET_NAME in ${NET_NAMES//,/ };do + NIC="$NIC --nic net-id="`get_network_id $NET_NAME` + done + #TODO (nati) Add multi-nic test + #TODO (nati) Add public-net test + local VM_UUID=`nova boot --flavor $(get_flavor_id m1.tiny) \ + --image $(get_image_id) \ + $NIC \ + $TENANT-server$NUM | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'` + die_if_not_set VM_UUID "Failure launching $TENANT-server$NUM" VM_UUID + confirm_server_active $VM_UUID +} + +function create_vms { + foreach_tenant_vm 'create_vm ${%TENANT%_NAME} %NUM% ${%TENANT%_VM%NUM%_NET}' +} + +function ping_ip { + # Test agent connection. Assumes namespaces are disabled, and + # that DHCP is in use, but not L3 + local VM_NAME=$1 + local NET_NAME=$2 + IP=`nova show $VM_NAME | grep 'network' | awk '{print $5}'` + ping_check $NET_NAME $IP $BOOT_TIMEOUT +} + +function check_vm { + local TENANT=$1 + local NUM=$2 + local VM_NAME="$TENANT-server$NUM" + local NET_NAME=$3 + source $TOP_DIR/openrc $TENANT $TENANT + ping_ip $VM_NAME $NET_NAME + # TODO (nati) test ssh connection + # TODO (nati) test inter connection between vm + # TODO (nati) test dhcp host routes + # TODO (nati) test multi-nic +} + +function check_vms { + foreach_tenant_vm 'check_vm ${%TENANT%_NAME} %NUM% ${%TENANT%_VM%NUM%_NET}' +} + +function shutdown_vm { + local TENANT=$1 + local NUM=$2 + source $TOP_DIR/openrc $TENANT $TENANT + VM_NAME=${TENANT}-server$NUM + nova delete $VM_NAME +} + +function shutdown_vms { + foreach_tenant_vm 'shutdown_vm ${%TENANT%_NAME} %NUM%' + if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q ACTIVE; do sleep 1; done"; then + echo "Some VMs failed to shutdown" + false + fi +} + +function delete_network { + local TENANT=$1 + local NUM=$2 + local NET_NAME="${TENANT}-net$NUM" + source $TOP_DIR/openrc admin admin + local TENANT_ID=$(get_tenant_id $TENANT) + #TODO(nati) comment out until l3-agent merged + #for res in port subnet net router;do + for net_id in `quantum net-list -c id -c name | grep $NET_NAME | awk '{print $2}'`;do + delete_probe $net_id + quantum subnet-list | grep $net_id | awk '{print $2}' | xargs -I% quantum subnet-delete % + quantum net-delete $net_id + done + source $TOP_DIR/openrc demo demo +} + +function delete_networks { + foreach_tenant_net 'delete_network ${%TENANT%_NAME} ${%NUM%}' + #TODO(nati) add secuirty group check after it is implemented + # source $TOP_DIR/openrc demo1 demo1 + # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0 + # source $TOP_DIR/openrc demo2 demo2 + # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0 +} + +function create_all { + create_tenants + create_networks + create_vms +} + +function delete_all { + shutdown_vms + delete_networks + delete_tenants_and_users +} + +function all { + create_all + check_vms + delete_all +} + +# Test functions +# -------------- + +function test_functions { + IMAGE=$(get_image_id) + echo $IMAGE + + TENANT_ID=$(get_tenant_id demo) + echo $TENANT_ID + + FLAVOR_ID=$(get_flavor_id m1.tiny) + echo $FLAVOR_ID + + NETWORK_ID=$(get_network_id admin) + echo $NETWORK_ID +} + +# Usage and main +# -------------- + +usage() { + echo "$0: [-h]" + echo " -h, --help Display help message" + echo " -t, --tenant Create tenants" + echo " -n, --net Create networks" + echo " -v, --vm Create vms" + echo " -c, --check Check connection" + echo " -x, --delete-tenants Delete tenants" + echo " -y, --delete-nets Delete networks" + echo " -z, --delete-vms Delete vms" + echo " -T, --test Test functions" +} + +main() { + + echo Description + echo + echo Copyright 2012, Cisco Systems + echo Copyright 2012, Nicira Networks, Inc. + echo Copyright 2012, NTT MCL, Inc. + echo + echo Please direct any questions to dedutta@cisco.com, dan@nicira.com, nachi@nttmcl.com + echo + + + if [ $# -eq 0 ] ; then + # if no args are provided, run all tests + all + else + + while [ "$1" != "" ]; do + case $1 in + -h | --help ) usage + exit + ;; + -n | --net ) create_networks + exit + ;; + -v | --vm ) create_vms + exit + ;; + -t | --tenant ) create_tenants + exit + ;; + -c | --check ) check_vms + exit + ;; + -T | --test ) test_functions + exit + ;; + -x | --delete-tenants ) delete_tenants_and_users + exit + ;; + -y | --delete-nets ) delete_networks + exit + ;; + -z | --delete-vms ) shutdown_vms + exit + ;; + -a | --all ) all + exit + ;; + * ) usage + exit 1 + esac + shift + done + fi +} + +# Kick off script +# --------------- + +echo $* +main $* + +set +o xtrace +echo "*********************************************************************" +echo "SUCCESS: End DevStack Exercise: $0" +echo "*********************************************************************" diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh new file mode 100755 index 00000000..a33c9c63 --- /dev/null +++ b/exercises/sec_groups.sh @@ -0,0 +1,76 @@ +#!/usr/bin/env bash + +# **sec_groups.sh** + +# Test security groups via the command line + +echo "*********************************************************************" +echo "Begin DevStack Exercise: $0" +echo "*********************************************************************" + +# This script exits on an error so that errors don't compound and you see +# only the first error that occured. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + + +# Settings +# ======== + +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) + +# Import common functions +source $TOP_DIR/functions + +# Import configuration +source $TOP_DIR/openrc + +# Import exercise configuration +source $TOP_DIR/exerciserc + + +# Testing Security Groups +# ======================= + +# List security groups +nova secgroup-list + +# Create random name for new sec group and create secgroup of said name +SEC_GROUP_NAME="ex-secgroup-$(openssl rand -hex 4)" +nova secgroup-create $SEC_GROUP_NAME 'a test security group' + +# Add some rules to the secgroup +RULES_TO_ADD=( 22 3389 5900 ) + +for RULE in "${RULES_TO_ADD[@]}"; do + nova secgroup-add-rule $SEC_GROUP_NAME tcp $RULE $RULE 0.0.0.0/0 +done + +# Check to make sure rules were added +SEC_GROUP_RULES=( $(nova secgroup-list-rules $SEC_GROUP_NAME | grep -v \- | grep -v 'Source Group' | cut -d '|' -f3 | tr -d ' ') ) +for i in "${RULES_TO_ADD[@]}"; do + skip= + for j in "${SEC_GROUP_RULES[@]}"; do + [[ $i == $j ]] && { skip=1; break; } + done + [[ -n $skip ]] || exit 1 +done + +# Delete rules and secgroup +for RULE in "${RULES_TO_ADD[@]}"; do + nova secgroup-delete-rule $SEC_GROUP_NAME tcp $RULE $RULE 0.0.0.0/0 +done + +# Delete secgroup +nova secgroup-delete $SEC_GROUP_NAME || \ + die "Failure deleting security group $SEC_GROUP_NAME" + +set +o xtrace +echo "*********************************************************************" +echo "SUCCESS: End DevStack Exercise: $0" +echo "*********************************************************************" diff --git a/exercises/swift.sh b/exercises/swift.sh index f7be0994..a75f955a 100755 --- a/exercises/swift.sh +++ b/exercises/swift.sh @@ -1,6 +1,12 @@ #!/usr/bin/env bash -# Test swift via the command line tools that ship with it. +# **swift.sh** + +# Test swift via the ``swift`` command line from ``python-swiftclient` + +echo "*********************************************************************" +echo "Begin DevStack Exercise: $0" +echo "*********************************************************************" # This script exits on an error so that errors don't compound and you see # only the first error that occured. @@ -14,27 +20,47 @@ set -o xtrace # Settings # ======== -# Use openrc + stackrc + localrc for settings -pushd $(cd $(dirname "$0")/.. && pwd) -source ./openrc -popd +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) + +# Import common functions +source $TOP_DIR/functions + +# Import configuration +source $TOP_DIR/openrc + +# Import exercise configuration +source $TOP_DIR/exerciserc + +# If swift is not enabled we exit with exitcode 55 which mean +# exercise is skipped. +is_service_enabled swift || exit 55 + +# Container name +CONTAINER=ex-swift # Testing Swift # ============= # Check if we have to swift via keystone -swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD stat +swift stat || die "Failure geting status" # We start by creating a test container -swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD post testcontainer +swift post $CONTAINER || die "Failure creating container $CONTAINER" # add some files into it. -swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD upload testcontainer /etc/issue +swift upload $CONTAINER /etc/issue || die "Failure uploading file to container $CONTAINER" # list them -swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD list testcontainer +swift list $CONTAINER || die "Failure listing contents of container $CONTAINER" # And we may want to delete them now that we have tested that # everything works. -swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD delete testcontainer +swift delete $CONTAINER || die "Failure deleting container $CONTAINER" + +set +o xtrace +echo "*********************************************************************" +echo "SUCCESS: End DevStack Exercise: $0" +echo "*********************************************************************" diff --git a/exercises/volumes.sh b/exercises/volumes.sh new file mode 100755 index 00000000..45cb0c8e --- /dev/null +++ b/exercises/volumes.sh @@ -0,0 +1,219 @@ +#!/usr/bin/env bash + +# **volumes.sh** + +# Test cinder volumes with the ``cinder`` command from ``python-cinderclient`` + +echo "*********************************************************************" +echo "Begin DevStack Exercise: $0" +echo "*********************************************************************" + +# This script exits on an error so that errors don't compound and you see +# only the first error that occurred. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + + +# Settings +# ======== + +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) + +# Import common functions +source $TOP_DIR/functions + +# Import configuration +source $TOP_DIR/openrc + +# Import quantum functions if needed +if is_service_enabled quantum; then + source $TOP_DIR/lib/quantum +fi + +# Import exercise configuration +source $TOP_DIR/exerciserc + +# If cinder is not enabled we exit with exitcode 55 which mean +# exercise is skipped. +is_service_enabled cinder || exit 55 + +# Instance type to create +DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} + +# Boot this image, use first AMI image if unset +DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} + +# Security group name +SECGROUP=${SECGROUP:-vol_secgroup} + +# Instance and volume names +VM_NAME=${VM_NAME:-ex-vol-inst} +VOL_NAME="ex-vol-$(openssl rand -hex 4)" + + +# Launching a server +# ================== + +# List servers for tenant: +nova list + +# Images +# ------ + +# List the images available +glance image-list + +# Grab the id of the image to launch +IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) +die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" + +# Security Groups +# --------------- + +# List security groups +nova secgroup-list + +# Create a secgroup +if ! nova secgroup-list | grep -q $SECGROUP; then + nova secgroup-create $SECGROUP "$SECGROUP description" + if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then + echo "Security group not created" + exit 1 + fi +fi + +# Configure Security Group Rules +if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then + nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 +fi +if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then + nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0 +fi + +# List secgroup rules +nova secgroup-list-rules $SECGROUP + +# Set up instance +# --------------- + +# List flavors +nova flavor-list + +# Select a flavor +INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) +if [[ -z "$INSTANCE_TYPE" ]]; then + # grab the first flavor in the list to launch if default doesn't exist + INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) +fi + +# Clean-up from previous runs +nova delete $VM_NAME || true +if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then + echo "server didn't terminate!" + exit 1 +fi + +# Boot instance +# ------------- + +VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2) +die_if_not_set VM_UUID "Failure launching $VM_NAME" + +# Check that the status is active within ACTIVE_TIMEOUT seconds +if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then + echo "server didn't become active!" + exit 1 +fi + +# Get the instance IP +IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2) +die_if_not_set IP "Failure retrieving IP address" + +# Private IPs can be pinged in single node deployments +ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT + +# Volumes +# ------- + +# Verify it doesn't exist +if [[ -n $(cinder list | grep $VOL_NAME | head -1 | get_field 2) ]]; then + echo "Volume $VOL_NAME already exists" + exit 1 +fi + +# Create a new volume +start_time=$(date +%s) +cinder create --display_name $VOL_NAME --display_description "test volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \ + die "Failure creating volume $VOL_NAME" +if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then + echo "Volume $VOL_NAME not created" + exit 1 +fi +end_time=$(date +%s) +echo "Completed cinder create in $((end_time - start_time)) seconds" + +# Get volume ID +VOL_ID=$(cinder list | grep $VOL_NAME | head -1 | get_field 1) +die_if_not_set VOL_ID "Failure retrieving volume ID for $VOL_NAME" + +# Attach to server +DEVICE=/dev/vdb +start_time=$(date +%s) +nova volume-attach $VM_UUID $VOL_ID $DEVICE || \ + die "Failure attaching volume $VOL_NAME to $VM_NAME" +if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep in-use; do sleep 1; done"; then + echo "Volume $VOL_NAME not attached to $VM_NAME" + exit 1 +fi +end_time=$(date +%s) +echo "Completed volume-attach in $((end_time - start_time)) seconds" + +VOL_ATTACH=$(cinder list | grep $VOL_NAME | head -1 | get_field -1) +die_if_not_set VOL_ATTACH "Failure retrieving $VOL_NAME status" +if [[ "$VOL_ATTACH" != $VM_UUID ]]; then + echo "Volume not attached to correct instance" + exit 1 +fi + +# Clean up +# -------- + +# Detach volume +start_time=$(date +%s) +nova volume-detach $VM_UUID $VOL_ID || die "Failure detaching volume $VOL_NAME from $VM_NAME" +if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then + echo "Volume $VOL_NAME not detached from $VM_NAME" + exit 1 +fi +end_time=$(date +%s) +echo "Completed volume-detach in $((end_time - start_time)) seconds" + +# Delete volume +start_time=$(date +%s) +cinder delete $VOL_ID || die "Failure deleting volume $VOL_NAME" +if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then + echo "Volume $VOL_NAME not deleted" + exit 1 +fi +end_time=$(date +%s) +echo "Completed cinder delete in $((end_time - start_time)) seconds" + +# Delete instance +nova delete $VM_UUID || die "Failure deleting instance $VM_NAME" +if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then + echo "Server $VM_NAME not deleted" + exit 1 +fi + +# Delete secgroup +nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP" + +set +o xtrace +echo "*********************************************************************" +echo "SUCCESS: End DevStack Exercise: $0" +echo "*********************************************************************" diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh new file mode 100644 index 00000000..f1599557 --- /dev/null +++ b/extras.d/80-tempest.sh @@ -0,0 +1,21 @@ +# tempest.sh - DevStack extras script + +source $TOP_DIR/lib/tempest + +if [[ "$1" == "stack" ]]; then + # Configure Tempest last to ensure that the runtime configuration of + # the various OpenStack services can be queried. + if is_service_enabled tempest; then + echo_summary "Configuring Tempest" + install_tempest + configure_tempest + init_tempest + fi +fi + +if [[ "$1" == "unstack" ]]; then + # no-op + : +fi + + diff --git a/extras.d/README b/extras.d/README new file mode 100644 index 00000000..ffc6793a --- /dev/null +++ b/extras.d/README @@ -0,0 +1,14 @@ +The extras.d directory contains project initialization scripts to be +sourced by stack.sh at the end of its run. This is expected to be +used by external projects that want to be configured, started and +stopped with DevStack. + +Order is controlled by prefixing the script names with the a two digit +sequence number. Script names must end with '.sh'. This provides a +convenient way to disable scripts by simoy renaming them. + +DevStack reserves the sequence numbers 00 through 09 and 90 through 99 +for its own use. + +The scripts are called with an argument of 'stack' by stack.sh and +with an argument of 'unstack' by unstack.sh. diff --git a/files/000-default.template b/files/000-default.template deleted file mode 100644 index fa8a86a3..00000000 --- a/files/000-default.template +++ /dev/null @@ -1,27 +0,0 @@ - - WSGIScriptAlias / %HORIZON_DIR%/openstack-dashboard/dashboard/wsgi/django.wsgi - WSGIDaemonProcess horizon user=%USER% group=%USER% processes=3 threads=10 - SetEnv APACHE_RUN_USER %USER% - SetEnv APACHE_RUN_GROUP %USER% - WSGIProcessGroup horizon - - DocumentRoot %HORIZON_DIR%/.blackhole/ - Alias /media %HORIZON_DIR%/openstack-dashboard/media - - - Options FollowSymLinks - AllowOverride None - - - - Options Indexes FollowSymLinks MultiViews - AllowOverride None - Order allow,deny - allow from all - - - ErrorLog /var/log/apache2/error.log - LogLevel warn - CustomLog /var/log/apache2/access.log combined - - diff --git a/files/apache-horizon.template b/files/apache-horizon.template new file mode 100644 index 00000000..fb98471b --- /dev/null +++ b/files/apache-horizon.template @@ -0,0 +1,30 @@ + + WSGIScriptAlias / %HORIZON_DIR%/openstack_dashboard/wsgi/django.wsgi + WSGIDaemonProcess horizon user=%USER% group=%GROUP% processes=3 threads=10 home=%HORIZON_DIR% + WSGIApplicationGroup %{GLOBAL} + + SetEnv APACHE_RUN_USER %USER% + SetEnv APACHE_RUN_GROUP %GROUP% + WSGIProcessGroup horizon + + DocumentRoot %HORIZON_DIR%/.blackhole/ + Alias /media %HORIZON_DIR%/openstack_dashboard/static + + + Options FollowSymLinks + AllowOverride None + + + + Options Indexes FollowSymLinks MultiViews + AllowOverride None + Order allow,deny + allow from all + + + ErrorLog /var/log/%APACHE_NAME%/horizon_error.log + LogLevel warn + CustomLog /var/log/%APACHE_NAME%/horizon_access.log combined + + +WSGISocketPrefix /var/run/%APACHE_NAME% diff --git a/files/apts/baremetal b/files/apts/baremetal new file mode 100644 index 00000000..54e76e00 --- /dev/null +++ b/files/apts/baremetal @@ -0,0 +1,9 @@ +busybox +dnsmasq +gcc +ipmitool +make +open-iscsi +qemu-kvm +syslinux +tgt diff --git a/files/apts/ceilometer-collector b/files/apts/ceilometer-collector new file mode 100644 index 00000000..c67ade3c --- /dev/null +++ b/files/apts/ceilometer-collector @@ -0,0 +1,2 @@ +python-pymongo +mongodb-server diff --git a/files/apts/cinder b/files/apts/cinder new file mode 100644 index 00000000..5db06eac --- /dev/null +++ b/files/apts/cinder @@ -0,0 +1,2 @@ +tgt +lvm2 diff --git a/files/apts/general b/files/apts/general index 31fa7527..0264066a 100644 --- a/files/apts/general +++ b/files/apts/general @@ -1,3 +1,4 @@ +bridge-utils pep8 pylint python-pip @@ -5,7 +6,7 @@ screen unzip wget psmisc -git-core +git lsof # useful when debugging openssh-server vim-nox @@ -17,3 +18,6 @@ wget curl tcpdump euca2ools # only for testing client +tar +python-cmd2 # dist:precise +python-netaddr diff --git a/files/apts/glance b/files/apts/glance index 1e87d589..a05e9f2e 100644 --- a/files/apts/glance +++ b/files/apts/glance @@ -1,8 +1,12 @@ +gcc +libxml2-dev +python-dev python-eventlet python-routes python-greenlet -python-argparse +python-argparse # dist:oneiric python-sqlalchemy python-wsgiref python-pastedeploy python-xattr +python-iso8601 diff --git a/files/apts/horizon b/files/apts/horizon index 22b3b307..2c2faf1a 100644 --- a/files/apts/horizon +++ b/files/apts/horizon @@ -1,5 +1,25 @@ -apache2 -libapache2-mod-wsgi +apache2 # NOPRIME +libapache2-mod-wsgi # NOPRIME +python-beautifulsoup python-dateutil +python-paste +python-pastedeploy python-anyjson python-routes +python-xattr +python-sqlalchemy +python-webob +python-kombu +pylint +pep8 +python-eventlet +python-nose +python-sphinx +python-mox +python-kombu +python-coverage +python-cherrypy3 # why? +python-migrate +nodejs +nodejs-legacy # dist:quantal +python-netaddr diff --git a/files/apts/keystone b/files/apts/keystone index 6e6d3d53..ce536bfc 100644 --- a/files/apts/keystone +++ b/files/apts/keystone @@ -7,9 +7,10 @@ python-paste sqlite3 python-pysqlite2 python-sqlalchemy +python-mysqldb python-webob python-greenlet python-routes libldap2-dev libsasl2-dev - +python-bcrypt diff --git a/files/apts/ldap b/files/apts/ldap new file mode 100644 index 00000000..81a00f27 --- /dev/null +++ b/files/apts/ldap @@ -0,0 +1,3 @@ +ldap-utils +slapd # NOPRIME +python-ldap diff --git a/files/apts/n-api b/files/apts/n-api new file mode 100644 index 00000000..ad943ffd --- /dev/null +++ b/files/apts/n-api @@ -0,0 +1,2 @@ +gcc # temporary because this pulls in glance to get the client without running the glance prereqs +python-dateutil diff --git a/files/apts/n-cpu b/files/apts/n-cpu new file mode 100644 index 00000000..ad2d6d71 --- /dev/null +++ b/files/apts/n-cpu @@ -0,0 +1,7 @@ +# Stuff for diablo volumes +lvm2 +open-iscsi +open-iscsi-utils +genisoimage +sysfsutils +sg3-utils diff --git a/files/apts/novnc b/files/apts/n-novnc similarity index 100% rename from files/apts/novnc rename to files/apts/n-novnc diff --git a/files/apts/n-vol b/files/apts/n-vol new file mode 100644 index 00000000..5db06eac --- /dev/null +++ b/files/apts/n-vol @@ -0,0 +1,2 @@ +tgt +lvm2 diff --git a/files/apts/nova b/files/apts/nova index 77622a81..39b4060e 100644 --- a/files/apts/nova +++ b/files/apts/nova @@ -1,10 +1,13 @@ dnsmasq-base -dnsmasq-utils # for dhcp_release +dnsmasq-utils # for dhcp_release only available in dist:oneiric,precise,quantal kpartx parted -arping # used for send_arp_for_ha option in nova-network +arping # only available in dist:natty +iputils-arping # only available in dist:oneiric mysql-server # NOPRIME python-mysqldb +python-xattr # needed for glance which is needed for nova --- this shouldn't be here +python-lxml # needed for glance which is needed for nova --- this shouldn't be here kvm gawk iptables @@ -13,9 +16,11 @@ sqlite3 sudo kvm libvirt-bin # NOPRIME +libjs-jquery-tablesorter # Needed for coverage html reports vlan curl rabbitmq-server # NOPRIME +qpidd # dist:precise NOPRIME socat # used by ajaxterm python-mox python-paste @@ -26,6 +31,7 @@ python-libvirt python-libxml2 python-routes python-netaddr +python-numpy # used by websockify for spice console python-pastedeploy python-eventlet python-cheetah @@ -36,8 +42,7 @@ python-suds python-lockfile python-m2crypto python-boto - -# Stuff for diablo volumes -iscsitarget -iscsitarget-dkms -lvm2 +python-kombu +python-feedparser +python-iso8601 +python-qpid # dist:precise diff --git a/files/apts/postgresql b/files/apts/postgresql new file mode 100644 index 00000000..bf19d397 --- /dev/null +++ b/files/apts/postgresql @@ -0,0 +1 @@ +python-psycopg2 diff --git a/files/apts/quantum b/files/apts/quantum new file mode 100644 index 00000000..64fc1bfb --- /dev/null +++ b/files/apts/quantum @@ -0,0 +1,26 @@ +ebtables +iptables +iputils-ping +iputils-arping +mysql-server #NOPRIME +sudo +python-boto +python-iso8601 +python-paste +python-routes +python-suds +python-netaddr +python-pastedeploy +python-greenlet +python-kombu +python-eventlet +python-sqlalchemy +python-mysqldb +python-pyudev +python-qpid # dist:precise +dnsmasq-base +dnsmasq-utils # for dhcp_release only available in dist:oneiric,precise,quantal +rabbitmq-server # NOPRIME +qpid # NOPRIME +sqlite3 +vlan diff --git a/files/apts/ryu b/files/apts/ryu new file mode 100644 index 00000000..4a4fc523 --- /dev/null +++ b/files/apts/ryu @@ -0,0 +1,5 @@ +python-setuptools +python-gevent +python-gflags +python-netifaces +python-sphinx diff --git a/files/apts/swift b/files/apts/swift index f2983778..c52c68b7 100644 --- a/files/apts/swift +++ b/files/apts/swift @@ -1,6 +1,6 @@ curl gcc -memcached # NOPRIME +memcached python-configobj python-coverage python-dev diff --git a/files/apts/tls-proxy b/files/apts/tls-proxy new file mode 100644 index 00000000..0a440159 --- /dev/null +++ b/files/apts/tls-proxy @@ -0,0 +1 @@ +stud # only available in dist:precise,quantal diff --git a/files/default_catalog.templates b/files/default_catalog.templates new file mode 100644 index 00000000..990cc0e9 --- /dev/null +++ b/files/default_catalog.templates @@ -0,0 +1,41 @@ +# config for TemplatedCatalog, using camelCase because I don't want to do +# translations for legacy compat +catalog.RegionOne.identity.publicURL = http://%SERVICE_HOST%:$(public_port)s/v2.0 +catalog.RegionOne.identity.adminURL = http://%SERVICE_HOST%:$(admin_port)s/v2.0 +catalog.RegionOne.identity.internalURL = http://%SERVICE_HOST%:$(public_port)s/v2.0 +catalog.RegionOne.identity.name = Identity Service + + +catalog.RegionOne.compute.publicURL = http://%SERVICE_HOST%:8774/v2/$(tenant_id)s +catalog.RegionOne.compute.adminURL = http://%SERVICE_HOST%:8774/v2/$(tenant_id)s +catalog.RegionOne.compute.internalURL = http://%SERVICE_HOST%:8774/v2/$(tenant_id)s +catalog.RegionOne.compute.name = Compute Service + + +catalog.RegionOne.volume.publicURL = http://%SERVICE_HOST%:8776/v1/$(tenant_id)s +catalog.RegionOne.volume.adminURL = http://%SERVICE_HOST%:8776/v1/$(tenant_id)s +catalog.RegionOne.volume.internalURL = http://%SERVICE_HOST%:8776/v1/$(tenant_id)s +catalog.RegionOne.volume.name = Volume Service + + +catalog.RegionOne.ec2.publicURL = http://%SERVICE_HOST%:8773/services/Cloud +catalog.RegionOne.ec2.adminURL = http://%SERVICE_HOST%:8773/services/Admin +catalog.RegionOne.ec2.internalURL = http://%SERVICE_HOST%:8773/services/Cloud +catalog.RegionOne.ec2.name = EC2 Service + + +catalog.RegionOne.s3.publicURL = http://%SERVICE_HOST%:%S3_SERVICE_PORT% +catalog.RegionOne.s3.adminURL = http://%SERVICE_HOST%:%S3_SERVICE_PORT% +catalog.RegionOne.s3.internalURL = http://%SERVICE_HOST%:%S3_SERVICE_PORT% +catalog.RegionOne.s3.name = S3 Service + + +catalog.RegionOne.image.publicURL = http://%SERVICE_HOST%:9292 +catalog.RegionOne.image.adminURL = http://%SERVICE_HOST%:9292 +catalog.RegionOne.image.internalURL = http://%SERVICE_HOST%:9292 +catalog.RegionOne.image.name = Image Service + +catalog.RegionOne.orchestration.publicURL = http://%SERVICE_HOST%:8000/v1 +catalog.RegionOne.orchestration.adminURL = http://%SERVICE_HOST%:8000/v1 +catalog.RegionOne.orchestration.internalURL = http://%SERVICE_HOST%:8000/v1 +catalog.RegionOne.orchestration.name = Heat Service diff --git a/files/glance-api.conf b/files/glance-api.conf deleted file mode 100644 index bb758afb..00000000 --- a/files/glance-api.conf +++ /dev/null @@ -1,178 +0,0 @@ -[DEFAULT] -# Show more verbose log output (sets INFO log level output) -verbose = True - -# Show debugging output in logs (sets DEBUG log level output) -debug = True - -# Which backend store should Glance use by default is not specified -# in a request to add a new image to Glance? Default: 'file' -# Available choices are 'file', 'swift', and 's3' -default_store = file - -# Address to bind the API server -bind_host = 0.0.0.0 - -# Port the bind the API server to -bind_port = 9292 - -# Address to find the registry server -registry_host = 0.0.0.0 - -# Port the registry server is listening on -registry_port = 9191 - -# Log to this file. Make sure you do not set the same log -# file for both the API and registry servers! -#log_file = %DEST%/glance/api.log - -# Send logs to syslog (/dev/log) instead of to file specified by `log_file` -use_syslog = %SYSLOG% - -# ============ Notification System Options ===================== - -# Notifications can be sent when images are create, updated or deleted. -# There are three methods of sending notifications, logging (via the -# log_file directive), rabbit (via a rabbitmq queue) or noop (no -# notifications sent, the default) -notifier_strategy = noop - -# Configuration options if sending notifications via rabbitmq (these are -# the defaults) -rabbit_host = localhost -rabbit_port = 5672 -rabbit_use_ssl = false -rabbit_userid = guest -rabbit_password = guest -rabbit_virtual_host = / -rabbit_notification_topic = glance_notifications - -# ============ Filesystem Store Options ======================== - -# Directory that the Filesystem backend store -# writes image data to -filesystem_store_datadir = %DEST%/glance/images/ - -# ============ Swift Store Options ============================= - -# Address where the Swift authentication service lives -swift_store_auth_address = 127.0.0.1:8080/v1.0/ - -# User to authenticate against the Swift authentication service -swift_store_user = jdoe - -# Auth key for the user authenticating against the -# Swift authentication service -swift_store_key = a86850deb2742ec3cb41518e26aa2d89 - -# Container within the account that the account should use -# for storing images in Swift -swift_store_container = glance - -# Do we create the container if it does not exist? -swift_store_create_container_on_put = False - -# What size, in MB, should Glance start chunking image files -# and do a large object manifest in Swift? By default, this is -# the maximum object size in Swift, which is 5GB -swift_store_large_object_size = 5120 - -# When doing a large object manifest, what size, in MB, should -# Glance write chunks to Swift? This amount of data is written -# to a temporary disk buffer during the process of chunking -# the image file, and the default is 200MB -swift_store_large_object_chunk_size = 200 - -# Whether to use ServiceNET to communicate with the Swift storage servers. -# (If you aren't RACKSPACE, leave this False!) -# -# To use ServiceNET for authentication, prefix hostname of -# `swift_store_auth_address` with 'snet-'. -# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/ -swift_enable_snet = False - -# ============ S3 Store Options ============================= - -# Address where the S3 authentication service lives -s3_store_host = 127.0.0.1:8080/v1.0/ - -# User to authenticate against the S3 authentication service -s3_store_access_key = <20-char AWS access key> - -# Auth key for the user authenticating against the -# S3 authentication service -s3_store_secret_key = <40-char AWS secret key> - -# Container within the account that the account should use -# for storing images in S3. Note that S3 has a flat namespace, -# so you need a unique bucket name for your glance images. An -# easy way to do this is append your AWS access key to "glance". -# S3 buckets in AWS *must* be lowercased, so remember to lowercase -# your AWS access key if you use it in your bucket name below! -s3_store_bucket = glance - -# Do we create the bucket if it does not exist? -s3_store_create_bucket_on_put = False - -# ============ Image Cache Options ======================== - -image_cache_enabled = False - -# Directory that the Image Cache writes data to -# Make sure this is also set in glance-pruner.conf -image_cache_datadir = /var/lib/glance/image-cache/ - -# Number of seconds after which we should consider an incomplete image to be -# stalled and eligible for reaping -image_cache_stall_timeout = 86400 - -# ============ Delayed Delete Options ============================= - -# Turn on/off delayed delete -delayed_delete = False - -# Delayed delete time in seconds -scrub_time = 43200 - -# Directory that the scrubber will use to remind itself of what to delete -# Make sure this is also set in glance-scrubber.conf -scrubber_datadir = /var/lib/glance/scrubber - -[pipeline:glance-api] -#pipeline = versionnegotiation context apiv1app -# NOTE: use the following pipeline for keystone -pipeline = versionnegotiation authtoken context apiv1app - -# To enable Image Cache Management API replace pipeline with below: -# pipeline = versionnegotiation context imagecache apiv1app -# NOTE: use the following pipeline for keystone auth (with caching) -# pipeline = versionnegotiation authtoken context imagecache apiv1app - -[pipeline:versions] -pipeline = versionsapp - -[app:versionsapp] -paste.app_factory = glance.api.versions:app_factory - -[app:apiv1app] -paste.app_factory = glance.api.v1:app_factory - -[filter:versionnegotiation] -paste.filter_factory = glance.api.middleware.version_negotiation:filter_factory - -[filter:imagecache] -paste.filter_factory = glance.api.middleware.image_cache:filter_factory - -[filter:context] -paste.filter_factory = glance.common.context:filter_factory - -[filter:authtoken] -paste.filter_factory = keystone.middleware.auth_token:filter_factory -service_protocol = http -service_host = 127.0.0.1 -service_port = 5000 -auth_host = 127.0.0.1 -auth_port = 35357 -auth_protocol = http -auth_uri = http://127.0.0.1:5000/ -admin_token = %SERVICE_TOKEN% diff --git a/files/glance-registry.conf b/files/glance-registry.conf deleted file mode 100644 index 1e041860..00000000 --- a/files/glance-registry.conf +++ /dev/null @@ -1,70 +0,0 @@ -[DEFAULT] -# Show more verbose log output (sets INFO log level output) -verbose = True - -# Show debugging output in logs (sets DEBUG log level output) -debug = True - -# Address to bind the registry server -bind_host = 0.0.0.0 - -# Port the bind the registry server to -bind_port = 9191 - -# Log to this file. Make sure you do not set the same log -# file for both the API and registry servers! -#log_file = %DEST%/glance/registry.log - -# Where to store images -filesystem_store_datadir = %DEST%/glance/images - -# Send logs to syslog (/dev/log) instead of to file specified by `log_file` -use_syslog = %SYSLOG% - -# SQLAlchemy connection string for the reference implementation -# registry server. Any valid SQLAlchemy connection string is fine. -# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine -sql_connection = %SQL_CONN% - -# Period in seconds after which SQLAlchemy should reestablish its connection -# to the database. -# -# MySQL uses a default `wait_timeout` of 8 hours, after which it will drop -# idle connections. This can result in 'MySQL Gone Away' exceptions. If you -# notice this, you can lower this value to ensure that SQLAlchemy reconnects -# before MySQL can drop the connection. -sql_idle_timeout = 3600 - -# Limit the api to return `param_limit_max` items in a call to a container. If -# a larger `limit` query param is provided, it will be reduced to this value. -api_limit_max = 1000 - -# If a `limit` query param is not provided in an api request, it will -# default to `limit_param_default` -limit_param_default = 25 - -[pipeline:glance-registry] -#pipeline = context registryapp -# NOTE: use the following pipeline for keystone -pipeline = authtoken keystone_shim context registryapp - -[app:registryapp] -paste.app_factory = glance.registry.server:app_factory - -[filter:context] -context_class = glance.registry.context.RequestContext -paste.filter_factory = glance.common.context:filter_factory - -[filter:authtoken] -paste.filter_factory = keystone.middleware.auth_token:filter_factory -service_protocol = http -service_host = 127.0.0.1 -service_port = 5000 -auth_host = 127.0.0.1 -auth_port = 35357 -auth_protocol = http -auth_uri = http://127.0.0.1:5000/ -admin_token = %SERVICE_TOKEN% - -[filter:keystone_shim] -paste.filter_factory = keystone.middleware.glance_auth_token:filter_factory diff --git a/files/horizon_settings.py b/files/horizon_settings.py index 3a17db2c..ce92e2c9 100644 --- a/files/horizon_settings.py +++ b/files/horizon_settings.py @@ -1,10 +1,28 @@ import os +from django.utils.translation import ugettext_lazy as _ + DEBUG = True TEMPLATE_DEBUG = DEBUG PROD = False USE_SSL = False +# Set SSL proxy settings: +# For Django 1.4+ pass this header from the proxy after terminating the SSL, +# and don't forget to strip it from the client's request. +# For more information see: +# https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header +# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https') + +# Specify a regular expression to validate user passwords. +# HORIZON_CONFIG = { +# "password_validator": { +# "regex": '.*', +# "help_text": _("Your password does not meet the requirements.") +# }, +# 'help_url': "http://docs.openstack.org" +# } + LOCAL_PATH = os.path.dirname(os.path.abspath(__file__)) # FIXME: We need to change this to mysql, instead of sqlite. @@ -12,23 +30,27 @@ 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(LOCAL_PATH, 'dashboard_openstack.sqlite3'), + 'TEST_NAME': os.path.join(LOCAL_PATH, 'test.sqlite3'), }, } -CACHE_BACKEND = 'dummy://' - -# Add apps to horizon installation. -INSTALLED_APPS = ( - 'dashboard', - 'django.contrib.contenttypes', - 'django.contrib.sessions', - 'django.contrib.messages', - 'django.contrib.staticfiles', - 'django_openstack', - 'django_openstack.templatetags', - 'mailer', -) +# Set custom secret key: +# You can either set it to a specific value or you can let horizion generate a +# default secret key that is unique on this machine, e.i. regardless of the +# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, there +# may be situations where you would want to set this explicitly, e.g. when +# multiple dashboard instances are distributed on different machines (usually +# behind a load-balancer). Either you have to make sure that a session gets all +# requests routed to the same dashboard instance or you set the same SECRET_KEY +# for all of them. +from horizon.utils import secret_key +SECRET_KEY = secret_key.generate_or_read_from_file(os.path.join(LOCAL_PATH, '.secret_key_store')) +# We recommend you use memcached for development; otherwise after every reload +# of the django development server, you will have to login again. To use +# memcached set CACHE_BACKED to something like 'memcached://127.0.0.1:11211/' +CACHE_BACKEND = 'dummy://' +SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db' # Send email to the console by default EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' @@ -44,32 +66,62 @@ # EMAIL_HOST_USER = 'djangomail' # EMAIL_HOST_PASSWORD = 'top-secret!' -# FIXME: This needs to be changed to allow for multi-node setup. -OPENSTACK_KEYSTONE_URL = "http://localhost:5000/v2.0/" -OPENSTACK_KEYSTONE_ADMIN_URL = "http://localhost:35357/v2.0" +# For multiple regions uncomment this configuration, and add (endpoint, title). +# AVAILABLE_REGIONS = [ +# ('http://cluster1.example.com:5000/v2.0', 'cluster1'), +# ('http://cluster2.example.com:5000/v2.0', 'cluster2'), +# ] + +OPENSTACK_HOST = "127.0.0.1" +OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST OPENSTACK_KEYSTONE_DEFAULT_ROLE = "Member" -# NOTE(tres): Available services should come from the service -# catalog in Keystone. -SWIFT_ENABLED = False +# Disable SSL certificate checks (useful for self-signed certificates): +# OPENSTACK_SSL_NO_VERIFY = True + +HORIZON_CONFIG = { + 'dashboards': ('project', 'admin', 'settings',), + 'default_dashboard': 'project', +} + +# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the +# capabilities of the auth backend for Keystone. +# If Keystone has been configured to use LDAP as the auth backend then set +# can_edit_user to False and name to 'ldap'. +# +# TODO(tres): Remove these once Keystone has an API to identify auth backend. +OPENSTACK_KEYSTONE_BACKEND = { + 'name': 'native', + 'can_edit_user': True +} + +OPENSTACK_HYPERVISOR_FEATURES = { + 'can_set_mount_point': True +} + +# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints +# in the Keystone service catalog. Use this setting when Horizon is running +# external to the OpenStack environment. The default is 'internalURL'. +#OPENSTACK_ENDPOINT_TYPE = "publicURL" -# Configure quantum connection details for networking -QUANTUM_ENABLED = False -QUANTUM_URL = '127.0.0.1' -QUANTUM_PORT = '9696' -QUANTUM_TENANT = '1234' -QUANTUM_CLIENT_VERSION='0.1' +# The number of objects (Swift containers/objects or images) to display +# on a single page before providing a paging element (a "more" link) +# to paginate results. +API_RESULT_LIMIT = 1000 +API_RESULT_PAGE_SIZE = 20 -# No monitoring links currently -EXTERNAL_MONITORING = [] +SWIFT_PAGINATE_LIMIT = 100 + +# The timezone of the server. This should correspond with the timezone +# of your entire OpenStack installation, and hopefully be in UTC. +TIME_ZONE = "UTC" -# Uncomment the following segment to silence most logging -# django.db and boto DEBUG logging is extremely verbose. #LOGGING = { # 'version': 1, -# # set to True will disable all logging except that specified, unless -# # nothing is specified except that django.db.backends will still log, -# # even when set to True, so disable explicitly +# # When set to True this will disable all logging except +# # for loggers specified in this configuration dictionary. Note that +# # if nothing is specified here and disable_existing_loggers is True, +# # django.db.backends will still log unless it is disabled explicitly. # 'disable_existing_loggers': False, # 'handlers': { # 'null': { @@ -77,22 +129,41 @@ # 'class': 'django.utils.log.NullHandler', # }, # 'console': { -# 'level': 'DEBUG', +# # Set the level to "DEBUG" for verbose output logging. +# 'level': 'INFO', # 'class': 'logging.StreamHandler', # }, # }, # 'loggers': { -# # Comment or Uncomment these to turn on/off logging output +# # Logging from django.db.backends is VERY verbose, send to null +# # by default. # 'django.db.backends': { # 'handlers': ['null'], # 'propagate': False, # }, -# 'django_openstack': { -# 'handlers': ['null'], +# 'horizon': { +# 'handlers': ['console'], +# 'propagate': False, +# }, +# 'openstack_dashboard': { +# 'handlers': ['console'], +# 'propagate': False, +# }, +# 'novaclient': { +# 'handlers': ['console'], # 'propagate': False, # }, +# 'keystoneclient': { +# 'handlers': ['console'], +# 'propagate': False, +# }, +# 'glanceclient': { +# 'handlers': ['console'], +# 'propagate': False, +# }, +# 'nose.plugins.manager': { +# 'handlers': ['console'], +# 'propagate': False, +# } # } #} - -# How much ram on each compute host? -COMPUTE_HOST_RAM_GB = 16 diff --git a/files/keystone.conf b/files/keystone.conf deleted file mode 100644 index 687273b4..00000000 --- a/files/keystone.conf +++ /dev/null @@ -1,86 +0,0 @@ -[DEFAULT] -# Show more verbose log output (sets INFO log level output) -verbose = False - -# Show debugging output in logs (sets DEBUG log level output) -debug = False - -# Which backend store should Keystone use by default. -# Default: 'sqlite' -# Available choices are 'sqlite' [future will include LDAP, PAM, etc] -default_store = sqlite - -# Log to this file. Make sure you do not set the same log -# file for both the API and registry servers! -log_file = %DEST%/keystone/keystone.log - -# List of backends to be configured -backends = keystone.backends.sqlalchemy -#For LDAP support, add: ,keystone.backends.ldap - -# Dictionary Maps every service to a header.Missing services would get header -# X_(SERVICE_NAME) Key => Service Name, Value => Header Name -service-header-mappings = { - 'nova' : 'X-Server-Management-Url', - 'swift' : 'X-Storage-Url', - 'cdn' : 'X-CDN-Management-Url'} - -# Address to bind the API server -# TODO Properties defined within app not available via pipeline. -service_host = 0.0.0.0 - -# Port the bind the API server to -service_port = 5000 - -# Address to bind the Admin API server -admin_host = 0.0.0.0 - -# Port the bind the Admin API server to -admin_port = 35357 - -#Role that allows to perform admin operations. -keystone-admin-role = KeystoneAdmin - -#Role that allows to perform service admin operations. -keystone-service-admin-role = KeystoneServiceAdmin - -[keystone.backends.sqlalchemy] -# SQLAlchemy connection string for the reference implementation registry -# server. Any valid SQLAlchemy connection string is fine. -# See: http://bit.ly/ideIpI -#sql_connection = sqlite:///keystone.db -sql_connection = %SQL_CONN% -backend_entities = ['UserRoleAssociation', 'Endpoints', 'Role', 'Tenant', - 'User', 'Credentials', 'EndpointTemplates', 'Token', - 'Service'] - -# Period in seconds after which SQLAlchemy should reestablish its connection -# to the database. -sql_idle_timeout = 30 - -[pipeline:admin] -pipeline = - urlrewritefilter - admin_api - -[pipeline:keystone-legacy-auth] -pipeline = - urlrewritefilter - legacy_auth - RAX-KEY-extension - service_api - -[app:service_api] -paste.app_factory = keystone.server:service_app_factory - -[app:admin_api] -paste.app_factory = keystone.server:admin_app_factory - -[filter:urlrewritefilter] -paste.filter_factory = keystone.middleware.url:filter_factory - -[filter:legacy_auth] -paste.filter_factory = keystone.frontends.legacy_token_auth:filter_factory - -[filter:RAX-KEY-extension] -paste.filter_factory = keystone.contrib.extensions.service.raxkey.frontend:filter_factory diff --git a/files/keystone_data.sh b/files/keystone_data.sh index d926c52d..4c76c9b5 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -1,48 +1,221 @@ #!/bin/bash -BIN_DIR=${BIN_DIR:-.} -# Tenants -$BIN_DIR/keystone-manage $* tenant add admin -$BIN_DIR/keystone-manage $* tenant add demo -$BIN_DIR/keystone-manage $* tenant add invisible_to_admin +# +# Initial data for Keystone using python-keystoneclient +# +# Tenant User Roles +# ------------------------------------------------------------------ +# service glance admin +# service swift admin # if enabled +# service heat admin # if enabled +# service ceilometer admin # if enabled +# Tempest Only: +# alt_demo alt_demo Member +# +# Variables set before calling this script: +# SERVICE_TOKEN - aka admin_token in keystone.conf +# SERVICE_ENDPOINT - local Keystone admin endpoint +# SERVICE_TENANT_NAME - name of tenant containing service accounts +# SERVICE_HOST - host used for endpoint creation +# ENABLED_SERVICES - stack.sh's list of services to start +# DEVSTACK_DIR - Top-level DevStack directory +# KEYSTONE_CATALOG_BACKEND - used to determine service catalog creation + +# Defaults +# -------- + +ADMIN_PASSWORD=${ADMIN_PASSWORD:-secrete} +SERVICE_PASSWORD=${SERVICE_PASSWORD:-$ADMIN_PASSWORD} +export SERVICE_TOKEN=$SERVICE_TOKEN +export SERVICE_ENDPOINT=$SERVICE_ENDPOINT +SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service} + +function get_id () { + echo `"$@" | awk '/ id / { print $4 }'` +} + +# Lookups +SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") +ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") +MEMBER_ROLE=$(keystone role-list | awk "/ Member / { print \$2 }") -# Users -$BIN_DIR/keystone-manage $* user add admin %ADMIN_PASSWORD% -$BIN_DIR/keystone-manage $* user add demo %ADMIN_PASSWORD% # Roles -$BIN_DIR/keystone-manage $* role add Admin -$BIN_DIR/keystone-manage $* role add Member -$BIN_DIR/keystone-manage $* role add KeystoneAdmin -$BIN_DIR/keystone-manage $* role add KeystoneServiceAdmin -$BIN_DIR/keystone-manage $* role add sysadmin -$BIN_DIR/keystone-manage $* role add netadmin -$BIN_DIR/keystone-manage $* role grant Admin admin admin -$BIN_DIR/keystone-manage $* role grant Member demo demo -$BIN_DIR/keystone-manage $* role grant sysadmin demo demo -$BIN_DIR/keystone-manage $* role grant netadmin demo demo -$BIN_DIR/keystone-manage $* role grant Member demo invisible_to_admin -$BIN_DIR/keystone-manage $* role grant Admin admin demo -$BIN_DIR/keystone-manage $* role grant Admin admin -$BIN_DIR/keystone-manage $* role grant KeystoneAdmin admin -$BIN_DIR/keystone-manage $* role grant KeystoneServiceAdmin admin +# ----- + +# The ResellerAdmin role is used by Nova and Ceilometer so we need to keep it. +# The admin role in swift allows a user to act as an admin for their tenant, +# but ResellerAdmin is needed for a user to act as any tenant. The name of this +# role is also configurable in swift-proxy.conf +RESELLER_ROLE=$(get_id keystone role-create --name=ResellerAdmin) + # Services -$BIN_DIR/keystone-manage $* service add nova compute "Nova Compute Service" -$BIN_DIR/keystone-manage $* service add glance image "Glance Image Service" -$BIN_DIR/keystone-manage $* service add keystone identity "Keystone Identity Service" -$BIN_DIR/keystone-manage $* service add swift object-store "Swift Service" - -#endpointTemplates -$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne nova http://%HOST_IP%:8774/v1.1/%tenant_id% http://%HOST_IP%:8774/v1.1/%tenant_id% http://%HOST_IP%:8774/v1.1/%tenant_id% 1 1 -$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne glance http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% 1 1 -$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne keystone http://%HOST_IP%:5000/v2.0 http://%HOST_IP%:35357/v2.0 http://%HOST_IP%:5000/v2.0 1 1 -$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne swift http://%HOST_IP%:8080/v1/AUTH_%tenant_id% http://%HOST_IP%:8080/ http://%HOST_IP%:8080/v1/AUTH_%tenant_id% 1 1 - -# Tokens -$BIN_DIR/keystone-manage $* token add %SERVICE_TOKEN% admin admin 2015-02-05T00:00 - -# EC2 related creds - note we are setting the secret key to ADMIN_PASSWORD -# but keystone doesn't parse them - it is just a blob from keystone's -# point of view -$BIN_DIR/keystone-manage $* credentials add admin EC2 'admin' '%ADMIN_PASSWORD%' admin || echo "no support for adding credentials" -$BIN_DIR/keystone-manage $* credentials add demo EC2 'demo' '%ADMIN_PASSWORD%' demo || echo "no support for adding credentials" +# -------- + +if [[ "$ENABLED_SERVICES" =~ "n-api" ]] && [[ "$ENABLED_SERVICES" =~ "swift" ]]; then + NOVA_USER=$(keystone user-list | awk "/ nova / { print \$2 }") + # Nova needs ResellerAdmin role to download images when accessing + # swift through the s3 api. + keystone user-role-add \ + --tenant_id $SERVICE_TENANT \ + --user_id $NOVA_USER \ + --role_id $RESELLER_ROLE +fi + +# Heat +if [[ "$ENABLED_SERVICES" =~ "heat" ]]; then + HEAT_USER=$(get_id keystone user-create --name=heat \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=heat@example.com) + keystone user-role-add --tenant_id $SERVICE_TENANT \ + --user_id $HEAT_USER \ + --role_id $ADMIN_ROLE + # heat_stack_user role is for users created by Heat + keystone role-create --name heat_stack_user + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + HEAT_CFN_SERVICE=$(get_id keystone service-create \ + --name=heat-cfn \ + --type=cloudformation \ + --description="Heat CloudFormation Service") + keystone endpoint-create \ + --region RegionOne \ + --service_id $HEAT_CFN_SERVICE \ + --publicurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" \ + --adminurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" \ + --internalurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" + HEAT_SERVICE=$(get_id keystone service-create \ + --name=heat \ + --type=orchestration \ + --description="Heat Service") + keystone endpoint-create \ + --region RegionOne \ + --service_id $HEAT_SERVICE \ + --publicurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \ + --adminurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \ + --internalurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" + fi +fi + +# Glance +if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then + GLANCE_USER=$(get_id keystone user-create \ + --name=glance \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=glance@example.com) + keystone user-role-add \ + --tenant_id $SERVICE_TENANT \ + --user_id $GLANCE_USER \ + --role_id $ADMIN_ROLE + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + GLANCE_SERVICE=$(get_id keystone service-create \ + --name=glance \ + --type=image \ + --description="Glance Image Service") + keystone endpoint-create \ + --region RegionOne \ + --service_id $GLANCE_SERVICE \ + --publicurl "http://$SERVICE_HOST:9292" \ + --adminurl "http://$SERVICE_HOST:9292" \ + --internalurl "http://$SERVICE_HOST:9292" + fi +fi + +# Swift +if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then + SWIFT_USER=$(get_id keystone user-create \ + --name=swift \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=swift@example.com) + keystone user-role-add \ + --tenant_id $SERVICE_TENANT \ + --user_id $SWIFT_USER \ + --role_id $ADMIN_ROLE + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + SWIFT_SERVICE=$(get_id keystone service-create \ + --name=swift \ + --type="object-store" \ + --description="Swift Service") + keystone endpoint-create \ + --region RegionOne \ + --service_id $SWIFT_SERVICE \ + --publicurl "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" \ + --adminurl "http://$SERVICE_HOST:8080" \ + --internalurl "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" + fi +fi + +if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]]; then + CEILOMETER_USER=$(get_id keystone user-create --name=ceilometer \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=ceilometer@example.com) + keystone user-role-add --tenant_id $SERVICE_TENANT \ + --user_id $CEILOMETER_USER \ + --role_id $ADMIN_ROLE + # Ceilometer needs ResellerAdmin role to access swift account stats. + keystone user-role-add --tenant_id $SERVICE_TENANT \ + --user_id $CEILOMETER_USER \ + --role_id $RESELLER_ROLE + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + CEILOMETER_SERVICE=$(get_id keystone service-create \ + --name=ceilometer \ + --type=metering \ + --description="Ceilometer Service") + keystone endpoint-create \ + --region RegionOne \ + --service_id $CEILOMETER_SERVICE \ + --publicurl "http://$SERVICE_HOST:8777/" \ + --adminurl "http://$SERVICE_HOST:8777/" \ + --internalurl "http://$SERVICE_HOST:8777/" + fi +fi + +# EC2 +if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + EC2_SERVICE=$(get_id keystone service-create \ + --name=ec2 \ + --type=ec2 \ + --description="EC2 Compatibility Layer") + keystone endpoint-create \ + --region RegionOne \ + --service_id $EC2_SERVICE \ + --publicurl "http://$SERVICE_HOST:8773/services/Cloud" \ + --adminurl "http://$SERVICE_HOST:8773/services/Admin" \ + --internalurl "http://$SERVICE_HOST:8773/services/Cloud" + fi +fi + +# S3 +if [[ "$ENABLED_SERVICES" =~ "n-obj" || "$ENABLED_SERVICES" =~ "swift" ]]; then + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + S3_SERVICE=$(get_id keystone service-create \ + --name=s3 \ + --type=s3 \ + --description="S3") + keystone endpoint-create \ + --region RegionOne \ + --service_id $S3_SERVICE \ + --publicurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \ + --adminurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \ + --internalurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" + fi +fi + +if [[ "$ENABLED_SERVICES" =~ "tempest" ]]; then + # Tempest has some tests that validate various authorization checks + # between two regular users in separate tenants + ALT_DEMO_TENANT=$(get_id keystone tenant-create \ + --name=alt_demo) + ALT_DEMO_USER=$(get_id keystone user-create \ + --name=alt_demo \ + --pass="$ADMIN_PASSWORD" \ + --email=alt_demo@example.com) + keystone user-role-add \ + --tenant_id $ALT_DEMO_TENANT \ + --user_id $ALT_DEMO_USER \ + --role_id $MEMBER_ROLE +fi diff --git a/files/ldap/manager.ldif.in b/files/ldap/manager.ldif.in new file mode 100644 index 00000000..e522150f --- /dev/null +++ b/files/ldap/manager.ldif.in @@ -0,0 +1,10 @@ +dn: olcDatabase={${LDAP_OLCDB_NUMBER}}hdb,cn=config +changetype: modify +replace: olcSuffix +olcSuffix: dc=openstack,dc=org +- +replace: olcRootDN +olcRootDN: dc=Manager,dc=openstack,dc=org +- +${LDAP_ROOTPW_COMMAND}: olcRootPW +olcRootPW: ${SLAPPASS} diff --git a/files/ldap/openstack.ldif b/files/ldap/openstack.ldif new file mode 100644 index 00000000..287fda45 --- /dev/null +++ b/files/ldap/openstack.ldif @@ -0,0 +1,21 @@ +dn: dc=openstack,dc=org +dc: openstack +objectClass: dcObject +objectClass: organizationalUnit +ou: openstack + +dn: ou=Groups,dc=openstack,dc=org +objectClass: organizationalUnit +ou: Groups + +dn: ou=Users,dc=openstack,dc=org +objectClass: organizationalUnit +ou: Users + +dn: ou=Roles,dc=openstack,dc=org +objectClass: organizationalUnit +ou: Roles + +dn: ou=Projects,dc=openstack,dc=org +objectClass: organizationalUnit +ou: Projects diff --git a/files/nova-api-paste.ini b/files/nova-api-paste.ini deleted file mode 100644 index 2c642f8d..00000000 --- a/files/nova-api-paste.ini +++ /dev/null @@ -1,127 +0,0 @@ -####### -# EC2 # -####### - -[composite:ec2] -use = egg:Paste#urlmap -/: ec2versions -/services/Cloud: ec2cloud -/services/Admin: ec2admin -/latest: ec2metadata -/2007-01-19: ec2metadata -/2007-03-01: ec2metadata -/2007-08-29: ec2metadata -/2007-10-10: ec2metadata -/2007-12-15: ec2metadata -/2008-02-01: ec2metadata -/2008-09-01: ec2metadata -/2009-04-04: ec2metadata -/1.0: ec2metadata - -[pipeline:ec2cloud] -pipeline = logrequest totoken authtoken keystonecontext cloudrequest authorizer ec2executor - -[pipeline:ec2admin] -pipeline = logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor - -[pipeline:ec2metadata] -pipeline = logrequest ec2md - -[pipeline:ec2versions] -pipeline = logrequest ec2ver - -[filter:logrequest] -paste.filter_factory = nova.api.ec2:RequestLogging.factory - -[filter:ec2lockout] -paste.filter_factory = nova.api.ec2:Lockout.factory - -[filter:totoken] -paste.filter_factory = keystone.middleware.ec2_token:EC2Token.factory - -[filter:ec2noauth] -paste.filter_factory = nova.api.ec2:NoAuth.factory - -[filter:authenticate] -paste.filter_factory = nova.api.ec2:Authenticate.factory - -[filter:cloudrequest] -controller = nova.api.ec2.cloud.CloudController -paste.filter_factory = nova.api.ec2:Requestify.factory - -[filter:adminrequest] -controller = nova.api.ec2.admin.AdminController -paste.filter_factory = nova.api.ec2:Requestify.factory - -[filter:authorizer] -paste.filter_factory = nova.api.ec2:Authorizer.factory - -[app:ec2executor] -paste.app_factory = nova.api.ec2:Executor.factory - -[app:ec2ver] -paste.app_factory = nova.api.ec2:Versions.factory - -[app:ec2md] -paste.app_factory = nova.api.ec2.metadatarequesthandler:MetadataRequestHandler.factory - -############# -# Openstack # -############# - -[composite:osapi] -use = egg:Paste#urlmap -/: osversions -/v1.0: openstackapi10 -/v1.1: openstackapi11 - -[pipeline:openstackapi10] -pipeline = faultwrap authtoken keystonecontext ratelimit osapiapp10 - -[pipeline:openstackapi11] -pipeline = faultwrap authtoken keystonecontext ratelimit extensions osapiapp11 - -[filter:faultwrap] -paste.filter_factory = nova.api.openstack:FaultWrapper.factory - -[filter:auth] -paste.filter_factory = nova.api.openstack.auth:AuthMiddleware.factory - -[filter:noauth] -paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory - -[filter:ratelimit] -paste.filter_factory = nova.api.openstack.limits:RateLimitingMiddleware.factory - -[filter:extensions] -paste.filter_factory = nova.api.openstack.extensions:ExtensionMiddleware.factory - -[app:osapiapp10] -paste.app_factory = nova.api.openstack:APIRouterV10.factory - -[app:osapiapp11] -paste.app_factory = nova.api.openstack:APIRouterV11.factory - -[pipeline:osversions] -pipeline = faultwrap osversionapp - -[app:osversionapp] -paste.app_factory = nova.api.openstack.versions:Versions.factory - -########## -# Shared # -########## - -[filter:keystonecontext] -paste.filter_factory = keystone.middleware.nova_keystone_context:NovaKeystoneContext.factory - -[filter:authtoken] -paste.filter_factory = keystone.middleware.auth_token:filter_factory -service_protocol = http -service_host = 127.0.0.1 -service_port = 5000 -auth_host = 127.0.0.1 -auth_port = 35357 -auth_protocol = http -auth_uri = http://127.0.0.1:5000/ -admin_token = %SERVICE_TOKEN% diff --git a/files/pips/horizon b/files/pips/horizon deleted file mode 100644 index bebc0bee..00000000 --- a/files/pips/horizon +++ /dev/null @@ -1,20 +0,0 @@ -nose==1.0.0 -Django==1.3 -django-nose==0.1.2 -django-mailer -django-registration==0.7 -kombu -python-cloudfiles -python-dateutil -webob -sqlalchemy -paste -PasteDeploy -sqlalchemy-migrate -eventlet -xattr -pep8 -pylint - --e git+https://github.com/jacobian/openstack.compute.git#egg=openstack - diff --git a/files/pips/keystone b/files/pips/keystone deleted file mode 100644 index 09636e49..00000000 --- a/files/pips/keystone +++ /dev/null @@ -1 +0,0 @@ -PassLib diff --git a/files/rpms-suse/ceilometer-collector b/files/rpms-suse/ceilometer-collector new file mode 100644 index 00000000..c76454fd --- /dev/null +++ b/files/rpms-suse/ceilometer-collector @@ -0,0 +1,4 @@ +# Not available in openSUSE main repositories, but can be fetched from OBS +# (devel:languages:python and server:database projects) +mongodb +python-pymongo diff --git a/files/rpms-suse/cinder b/files/rpms-suse/cinder new file mode 100644 index 00000000..e5b47274 --- /dev/null +++ b/files/rpms-suse/cinder @@ -0,0 +1,2 @@ +lvm2 +tgt diff --git a/files/rpms-suse/general b/files/rpms-suse/general new file mode 100644 index 00000000..8ed74ec0 --- /dev/null +++ b/files/rpms-suse/general @@ -0,0 +1,23 @@ +bridge-utils +curl +euca2ools +git-core +iputils +openssh +psmisc +python-cmd2 # dist:opensuse-12.3 +python-netaddr +python-pep8 +python-pip +python-pylint +python-unittest2 +python-virtualenv +screen +tar +tcpdump +unzip +vim-enhanced +wget + +findutils-locate # useful when debugging +lsof # useful when debugging diff --git a/files/rpms-suse/glance b/files/rpms-suse/glance new file mode 100644 index 00000000..dd68ac08 --- /dev/null +++ b/files/rpms-suse/glance @@ -0,0 +1,12 @@ +gcc +libxml2-devel +python-PasteDeploy +python-Routes +python-SQLAlchemy +python-argparse +python-devel +python-eventlet +python-greenlet +python-iso8601 +python-wsgiref +python-xattr diff --git a/files/rpms-suse/horizon b/files/rpms-suse/horizon new file mode 100644 index 00000000..7e46ffe0 --- /dev/null +++ b/files/rpms-suse/horizon @@ -0,0 +1,23 @@ +apache2 # NOPRIME +apache2-mod_wsgi # NOPRIME +nodejs +python-CherryPy # why? (coming from apts) +python-Paste +python-PasteDeploy +python-Routes +python-Sphinx +python-SQLAlchemy +python-WebOb +python-anyjson +python-beautifulsoup +python-coverage +python-dateutil +python-eventlet +python-kombu +python-mox +python-netaddr +python-nose +python-pep8 +python-pylint +python-sqlalchemy-migrate +python-xattr diff --git a/files/rpms-suse/keystone b/files/rpms-suse/keystone new file mode 100644 index 00000000..b3c876ad --- /dev/null +++ b/files/rpms-suse/keystone @@ -0,0 +1,17 @@ +cyrus-sasl-devel +openldap2-devel +python-Paste +python-PasteDeploy +python-PasteScript +python-Routes +python-SQLAlchemy +python-WebOb +python-devel +python-distribute +python-setuptools # instead of python-distribute; dist:sle11sp2 +python-greenlet +python-lxml +python-mysql +python-py-bcrypt +python-pysqlite +sqlite3 diff --git a/files/rpms-suse/n-api b/files/rpms-suse/n-api new file mode 100644 index 00000000..ad943ffd --- /dev/null +++ b/files/rpms-suse/n-api @@ -0,0 +1,2 @@ +gcc # temporary because this pulls in glance to get the client without running the glance prereqs +python-dateutil diff --git a/files/rpms-suse/n-cpu b/files/rpms-suse/n-cpu new file mode 100644 index 00000000..7040b843 --- /dev/null +++ b/files/rpms-suse/n-cpu @@ -0,0 +1,6 @@ +# Stuff for diablo volumes +genisoimage +lvm2 +open-iscsi +sysfsutils +sg3_utils diff --git a/files/rpms-suse/n-novnc b/files/rpms-suse/n-novnc new file mode 100644 index 00000000..c8722b9f --- /dev/null +++ b/files/rpms-suse/n-novnc @@ -0,0 +1 @@ +python-numpy diff --git a/files/rpms-suse/n-vol b/files/rpms-suse/n-vol new file mode 100644 index 00000000..e5b47274 --- /dev/null +++ b/files/rpms-suse/n-vol @@ -0,0 +1,2 @@ +lvm2 +tgt diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova new file mode 100644 index 00000000..03067162 --- /dev/null +++ b/files/rpms-suse/nova @@ -0,0 +1,50 @@ +curl +# Note: we need to package dhcp_release in dnsmasq! +dnsmasq +ebtables +gawk +iptables +iputils +kpartx +kvm +# qemu as fallback if kvm cannot be used +qemu +libvirt # NOPRIME +libvirt-python +libxml2-python +mysql-community-server # NOPRIME +parted +python-M2Crypto +python-m2crypto # dist:sle11sp2 +python-Paste +python-PasteDeploy +python-Routes +python-SQLAlchemy +python-Tempita +python-boto +python-carrot +python-cheetah +python-eventlet +python-feedparser +python-greenlet +python-iso8601 +python-kombu +python-lockfile +python-lxml # needed for glance which is needed for nova --- this shouldn't be here +python-mox +python-mysql +python-netaddr +python-paramiko +python-python-gflags +python-sqlalchemy-migrate +python-suds +python-xattr # needed for glance which is needed for nova --- this shouldn't be here +rabbitmq-server # NOPRIME +socat +sqlite3 +sudo +vlan + +# FIXME: qpid is not part of openSUSE, those names are tentative +python-qpid # NOPRIME +qpidd # NOPRIME diff --git a/files/rpms-suse/postgresql b/files/rpms-suse/postgresql new file mode 100644 index 00000000..bf19d397 --- /dev/null +++ b/files/rpms-suse/postgresql @@ -0,0 +1 @@ +python-psycopg2 diff --git a/files/rpms-suse/quantum b/files/rpms-suse/quantum new file mode 100644 index 00000000..068c15c2 --- /dev/null +++ b/files/rpms-suse/quantum @@ -0,0 +1,27 @@ +# Note: we need to package dhcp_release in dnsmasq! +dnsmasq +ebtables +iptables +iputils +mysql-community-server # NOPRIME +python-boto +python-eventlet +python-greenlet +python-iso8601 +python-kombu +python-mysql +python-netaddr +python-Paste +python-PasteDeploy +python-pyudev +python-Routes +python-SQLAlchemy +python-suds +rabbitmq-server # NOPRIME +sqlite3 +sudo +vlan + +# FIXME: qpid is not part of openSUSE, those names are tentative +python-qpid # NOPRIME +qpidd # NOPRIME diff --git a/files/rpms-suse/ryu b/files/rpms-suse/ryu new file mode 100644 index 00000000..763fd24c --- /dev/null +++ b/files/rpms-suse/ryu @@ -0,0 +1,5 @@ +python-distribute +python-setuptools # instead of python-distribute; dist:sle11sp2 +python-Sphinx +python-gevent +python-python-gflags diff --git a/files/rpms-suse/swift b/files/rpms-suse/swift new file mode 100644 index 00000000..db379bbc --- /dev/null +++ b/files/rpms-suse/swift @@ -0,0 +1,19 @@ +curl +gcc +memcached +python-PasteDeploy +python-WebOb +python-configobj +python-coverage +python-devel +python-distribute +python-setuptools # instead of python-distribute; dist:sle11sp2 +python-eventlet +python-greenlet +python-netifaces +python-nose +python-simplejson +python-xattr +sqlite3 +xfsprogs +xinetd diff --git a/files/rpms/ceilometer-collector b/files/rpms/ceilometer-collector new file mode 100644 index 00000000..c5c855c0 --- /dev/null +++ b/files/rpms/ceilometer-collector @@ -0,0 +1,2 @@ +mongodb-server +pymongo diff --git a/files/rpms/cinder b/files/rpms/cinder new file mode 100644 index 00000000..df861aad --- /dev/null +++ b/files/rpms/cinder @@ -0,0 +1,2 @@ +lvm2 +scsi-target-utils diff --git a/files/rpms/general b/files/rpms/general new file mode 100644 index 00000000..e4f143d1 --- /dev/null +++ b/files/rpms/general @@ -0,0 +1,17 @@ +bridge-utils +curl +euca2ools # only for testing client +git-core +openssh-server +psmisc +pylint +python-netaddr +python-pep8 +python-pip +python-unittest2 +python-virtualenv +screen +tar +tcpdump +unzip +wget diff --git a/files/rpms/glance b/files/rpms/glance new file mode 100644 index 00000000..eff6c2c0 --- /dev/null +++ b/files/rpms/glance @@ -0,0 +1,10 @@ +libxml2-devel +python-argparse +python-devel +python-eventlet +python-greenlet +python-paste-deploy +python-routes +python-sqlalchemy +python-wsgiref +pyxattr diff --git a/files/rpms/horizon b/files/rpms/horizon new file mode 100644 index 00000000..12f75ba5 --- /dev/null +++ b/files/rpms/horizon @@ -0,0 +1,27 @@ +Django +django-registration +gcc +httpd # NOPRIME +mod_wsgi # NOPRIME +pylint +python-anyjson +python-BeautifulSoup +python-boto +python-coverage +python-dateutil +python-eventlet +python-greenlet +python-httplib2 +python-kombu +python-migrate +python-mox +python-netaddr +python-nose +python-paste +python-paste-deploy +python-pep8 +python-routes +python-sphinx +python-sqlalchemy +python-webob +pyxattr diff --git a/files/rpms/keystone b/files/rpms/keystone new file mode 100644 index 00000000..59868c7f --- /dev/null +++ b/files/rpms/keystone @@ -0,0 +1,11 @@ +python-greenlet +python-lxml +python-paste +python-paste-deploy +python-paste-script +python-routes +python-setuptools +python-sqlalchemy +python-sqlite2 +python-webob +sqlite diff --git a/files/rpms/ldap b/files/rpms/ldap new file mode 100644 index 00000000..2f7ab5de --- /dev/null +++ b/files/rpms/ldap @@ -0,0 +1,3 @@ +openldap-servers +openldap-clients +python-ldap diff --git a/files/rpms/n-api b/files/rpms/n-api new file mode 100644 index 00000000..0f08daac --- /dev/null +++ b/files/rpms/n-api @@ -0,0 +1 @@ +python-dateutil diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu new file mode 100644 index 00000000..149672ac --- /dev/null +++ b/files/rpms/n-cpu @@ -0,0 +1,6 @@ +# Stuff for diablo volumes +iscsi-initiator-utils +lvm2 +genisoimage +sysfsutils +sg3_utils diff --git a/files/rpms/n-novnc b/files/rpms/n-novnc new file mode 100644 index 00000000..24ce15ab --- /dev/null +++ b/files/rpms/n-novnc @@ -0,0 +1 @@ +numpy diff --git a/files/rpms/n-spice b/files/rpms/n-spice new file mode 100644 index 00000000..24ce15ab --- /dev/null +++ b/files/rpms/n-spice @@ -0,0 +1 @@ +numpy diff --git a/files/rpms/n-vol b/files/rpms/n-vol new file mode 100644 index 00000000..df861aad --- /dev/null +++ b/files/rpms/n-vol @@ -0,0 +1,2 @@ +lvm2 +scsi-target-utils diff --git a/files/rpms/nova b/files/rpms/nova new file mode 100644 index 00000000..568ee7f5 --- /dev/null +++ b/files/rpms/nova @@ -0,0 +1,42 @@ +MySQL-python +curl +dnsmasq-utils # for dhcp_release +ebtables +gawk +iptables +iputils +kpartx +kvm +libvirt-bin # NOPRIME +libvirt-python +libxml2-python +numpy # needed by websockify for spice console +m2crypto +mysql-server # NOPRIME +parted +python-boto +python-carrot +python-cheetah +python-eventlet +python-feedparser +python-gflags +python-greenlet +python-iso8601 +python-kombu +python-lockfile +python-migrate +python-mox +python-netaddr +python-paramiko +python-paste +python-paste-deploy +python-qpid +python-routes +python-sqlalchemy +python-suds +python-tempita +rabbitmq-server # NOPRIME +qpid-cpp-server-daemon # NOPRIME +sqlite +sudo +vconfig diff --git a/files/rpms/postgresql b/files/rpms/postgresql new file mode 100644 index 00000000..bf19d397 --- /dev/null +++ b/files/rpms/postgresql @@ -0,0 +1 @@ +python-psycopg2 diff --git a/files/rpms/quantum b/files/rpms/quantum new file mode 100644 index 00000000..05398fcf --- /dev/null +++ b/files/rpms/quantum @@ -0,0 +1,23 @@ +MySQL-python +dnsmasq-utils # for dhcp_release +ebtables +iptables +iputils +mysql-server # NOPRIME +python-boto +python-eventlet +python-greenlet +python-iso8601 +python-kombu +python-netaddr +python-paste +python-paste-deploy +python-qpid +python-routes +python-sqlalchemy +python-suds +rabbitmq-server # NOPRIME +qpid-cpp-server-daemon # NOPRIME +sqlite +sudo +vconfig diff --git a/files/rpms/ryu b/files/rpms/ryu new file mode 100644 index 00000000..4a4fc523 --- /dev/null +++ b/files/rpms/ryu @@ -0,0 +1,5 @@ +python-setuptools +python-gevent +python-gflags +python-netifaces +python-sphinx diff --git a/files/rpms/swift b/files/rpms/swift new file mode 100644 index 00000000..ce41ceb8 --- /dev/null +++ b/files/rpms/swift @@ -0,0 +1,18 @@ +curl +gcc +memcached +python-configobj +python-coverage +python-devel +python-eventlet +python-greenlet +python-netifaces +python-nose +python-paste-deploy +python-setuptools +python-simplejson +python-webob +pyxattr +sqlite +xfsprogs +xinetd diff --git a/files/screenrc b/files/screenrc deleted file mode 100644 index 1ca47da5..00000000 --- a/files/screenrc +++ /dev/null @@ -1,9 +0,0 @@ -hardstatus on -hardstatus alwayslastline -hardstatus string "%{.bW}%-w%{.rW}%n %t%{-}%+w %=%{..G}%H %{..Y}%d/%m %c" - -defscrollback 1024 - -vbell off -startup_message off - diff --git a/files/sudo/nova b/files/sudo/nova deleted file mode 100644 index 62685b31..00000000 --- a/files/sudo/nova +++ /dev/null @@ -1,47 +0,0 @@ -Cmnd_Alias NOVACMDS = /bin/chmod /var/lib/nova/tmp/*/root/.ssh, \ - /bin/chown /var/lib/nova/tmp/*/root/.ssh, \ - /bin/chown, \ - /bin/chmod, \ - /bin/dd, \ - /sbin/ifconfig, \ - /sbin/ip, \ - /sbin/route, \ - /sbin/iptables, \ - /sbin/iptables-save, \ - /sbin/iptables-restore, \ - /sbin/ip6tables-save, \ - /sbin/ip6tables-restore, \ - /sbin/kpartx, \ - /sbin/losetup, \ - /sbin/lvcreate, \ - /sbin/lvdisplay, \ - /sbin/lvremove, \ - /bin/mkdir, \ - /bin/mount, \ - /sbin/pvcreate, \ - /usr/bin/tee, \ - /sbin/tune2fs, \ - /bin/umount, \ - /sbin/vgcreate, \ - /usr/bin/virsh, \ - /usr/bin/qemu-nbd, \ - /usr/sbin/brctl, \ - /sbin/brctl, \ - /usr/sbin/radvd, \ - /usr/sbin/vblade-persist, \ - /sbin/pvcreate, \ - /sbin/aoe-discover, \ - /sbin/vgcreate, \ - /bin/aoe-stat, \ - /bin/kill, \ - /sbin/vconfig, \ - /usr/sbin/ietadm, \ - /sbin/vgs, \ - /sbin/iscsiadm, \ - /usr/bin/socat, \ - /sbin/parted, \ - /usr/sbin/dnsmasq, \ - /usr/sbin/arping - -%USER% ALL = (root) NOPASSWD: SETENV: NOVACMDS - diff --git a/files/swift/account-server.conf b/files/swift/account-server.conf deleted file mode 100644 index db0f097f..00000000 --- a/files/swift/account-server.conf +++ /dev/null @@ -1,20 +0,0 @@ -[DEFAULT] -devices = %NODE_PATH%/node -mount_check = false -bind_port = %BIND_PORT% -user = %USER% -log_facility = LOG_LOCAL%LOG_FACILITY% -swift_dir = %SWIFT_CONFIG_LOCATION% - -[pipeline:main] -pipeline = account-server - -[app:account-server] -use = egg:swift#account - -[account-replicator] -vm_test_mode = yes - -[account-auditor] - -[account-reaper] diff --git a/files/swift/container-server.conf b/files/swift/container-server.conf deleted file mode 100644 index bdc3e3a0..00000000 --- a/files/swift/container-server.conf +++ /dev/null @@ -1,22 +0,0 @@ -[DEFAULT] -devices = %NODE_PATH%/node -mount_check = false -bind_port = %BIND_PORT% -user = %USER% -log_facility = LOG_LOCAL%LOG_FACILITY% -swift_dir = %SWIFT_CONFIG_LOCATION% - -[pipeline:main] -pipeline = container-server - -[app:container-server] -use = egg:swift#container - -[container-replicator] -vm_test_mode = yes - -[container-updater] - -[container-auditor] - -[container-sync] diff --git a/files/swift/object-server.conf b/files/swift/object-server.conf deleted file mode 100644 index 06fbffea..00000000 --- a/files/swift/object-server.conf +++ /dev/null @@ -1,20 +0,0 @@ -[DEFAULT] -devices = %NODE_PATH%/node -mount_check = false -bind_port = %BIND_PORT% -user = %USER% -log_facility = LOG_LOCAL%LOG_FACILITY% -swift_dir = %SWIFT_CONFIG_LOCATION% - -[pipeline:main] -pipeline = object-server - -[app:object-server] -use = egg:swift#object - -[object-replicator] -vm_test_mode = yes - -[object-updater] - -[object-auditor] diff --git a/files/swift/proxy-server.conf b/files/swift/proxy-server.conf deleted file mode 100644 index 2db6d322..00000000 --- a/files/swift/proxy-server.conf +++ /dev/null @@ -1,33 +0,0 @@ -[DEFAULT] -bind_port = 8080 -user = %USER% -log_facility = LOG_LOCAL1 -swift_dir = %SWIFT_CONFIG_LOCATION% - -[pipeline:main] -pipeline = healthcheck cache %AUTH_SERVER% proxy-server - -[app:proxy-server] -use = egg:swift#proxy -allow_account_management = true -account_autocreate = true - -[filter:keystone] -use = egg:swiftkeystone2#keystone2 -keystone_admin_token = %SERVICE_TOKEN% -keystone_url = http://localhost:35357/v2.0 -keystone_admin_group = Member - -[filter:tempauth] -use = egg:swift#tempauth -user_admin_admin = admin .admin .reseller_admin -user_test_tester = testing .admin -user_test2_tester2 = testing2 .admin -user_test_tester3 = testing3 -bind_ip = 0.0.0.0 - -[filter:healthcheck] -use = egg:swift#healthcheck - -[filter:cache] -use = egg:swift#memcache diff --git a/files/swift/rsyncd.conf b/files/swift/rsyncd.conf index 66215c7f..c670531b 100644 --- a/files/swift/rsyncd.conf +++ b/files/swift/rsyncd.conf @@ -1,79 +1,79 @@ uid = %USER% gid = %GROUP% -log file = /var/log/rsyncd.log -pid file = /var/run/rsyncd.pid +log file = %SWIFT_DATA_DIR%/logs/rsyncd.log +pid file = %SWIFT_DATA_DIR%/run/rsyncd.pid address = 127.0.0.1 [account6012] max connections = 25 -path = %SWIFT_DATA_LOCATION%/1/node/ +path = %SWIFT_DATA_DIR%/1/node/ read only = false -lock file = /var/lock/account6012.lock +lock file = %SWIFT_DATA_DIR%/run/account6012.lock [account6022] max connections = 25 -path = %SWIFT_DATA_LOCATION%/2/node/ +path = %SWIFT_DATA_DIR%/2/node/ read only = false -lock file = /var/lock/account6022.lock +lock file = %SWIFT_DATA_DIR%/run/account6022.lock [account6032] max connections = 25 -path = %SWIFT_DATA_LOCATION%/3/node/ +path = %SWIFT_DATA_DIR%/3/node/ read only = false -lock file = /var/lock/account6032.lock +lock file = %SWIFT_DATA_DIR%/run/account6032.lock [account6042] max connections = 25 -path = %SWIFT_DATA_LOCATION%/4/node/ +path = %SWIFT_DATA_DIR%/4/node/ read only = false -lock file = /var/lock/account6042.lock +lock file = %SWIFT_DATA_DIR%/run/account6042.lock [container6011] max connections = 25 -path = %SWIFT_DATA_LOCATION%/1/node/ +path = %SWIFT_DATA_DIR%/1/node/ read only = false -lock file = /var/lock/container6011.lock +lock file = %SWIFT_DATA_DIR%/run/container6011.lock [container6021] max connections = 25 -path = %SWIFT_DATA_LOCATION%/2/node/ +path = %SWIFT_DATA_DIR%/2/node/ read only = false -lock file = /var/lock/container6021.lock +lock file = %SWIFT_DATA_DIR%/run/container6021.lock [container6031] max connections = 25 -path = %SWIFT_DATA_LOCATION%/3/node/ +path = %SWIFT_DATA_DIR%/3/node/ read only = false -lock file = /var/lock/container6031.lock +lock file = %SWIFT_DATA_DIR%/run/container6031.lock [container6041] max connections = 25 -path = %SWIFT_DATA_LOCATION%/4/node/ +path = %SWIFT_DATA_DIR%/4/node/ read only = false -lock file = /var/lock/container6041.lock +lock file = %SWIFT_DATA_DIR%/run/container6041.lock [object6010] max connections = 25 -path = %SWIFT_DATA_LOCATION%/1/node/ +path = %SWIFT_DATA_DIR%/1/node/ read only = false -lock file = /var/lock/object6010.lock +lock file = %SWIFT_DATA_DIR%/run/object6010.lock [object6020] max connections = 25 -path = %SWIFT_DATA_LOCATION%/2/node/ +path = %SWIFT_DATA_DIR%/2/node/ read only = false -lock file = /var/lock/object6020.lock +lock file = %SWIFT_DATA_DIR%/run/object6020.lock [object6030] max connections = 25 -path = %SWIFT_DATA_LOCATION%/3/node/ +path = %SWIFT_DATA_DIR%/3/node/ read only = false -lock file = /var/lock/object6030.lock +lock file = %SWIFT_DATA_DIR%/run/object6030.lock [object6040] max connections = 25 -path = %SWIFT_DATA_LOCATION%/4/node/ +path = %SWIFT_DATA_DIR%/4/node/ read only = false -lock file = /var/lock/object6040.lock +lock file = %SWIFT_DATA_DIR%/run/object6040.lock diff --git a/files/swift/rsyslog.conf b/files/swift/rsyslog.conf new file mode 100644 index 00000000..011c893b --- /dev/null +++ b/files/swift/rsyslog.conf @@ -0,0 +1,26 @@ +# Uncomment the following to have a log containing all logs together +#local1,local2,local3,local4,local5.* %SWIFT_LOGDIR%/all.log + +# Uncomment the following to have hourly proxy logs for stats processing +#$template HourlyProxyLog,"%SWIFT_LOGDIR%/hourly/%$YEAR%%$MONTH%%$DAY%%$HOUR%" +#local1.*;local1.!notice ?HourlyProxyLog + +local1.*;local1.!notice %SWIFT_LOGDIR%/proxy.log +local1.notice %SWIFT_LOGDIR%/proxy.error +local1.* ~ + +local2.*;local2.!notice %SWIFT_LOGDIR%/storage1.log +local2.notice %SWIFT_LOGDIR%/storage1.error +local2.* ~ + +local3.*;local3.!notice %SWIFT_LOGDIR%/storage2.log +local3.notice %SWIFT_LOGDIR%/storage2.error +local3.* ~ + +local4.*;local4.!notice %SWIFT_LOGDIR%/storage3.log +local4.notice %SWIFT_LOGDIR%/storage3.error +local4.* ~ + +local5.*;local5.!notice %SWIFT_LOGDIR%/storage4.log +local5.notice %SWIFT_LOGDIR%/storage4.error +local5.* ~ diff --git a/files/swift/swift-remakerings b/files/swift/swift-remakerings deleted file mode 100755 index c65353ce..00000000 --- a/files/swift/swift-remakerings +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -cd %SWIFT_CONFIG_LOCATION% - -rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz - -swift-ring-builder object.builder create %SWIFT_PARTITION_POWER_SIZE% 3 1 -swift-ring-builder object.builder add z1-127.0.0.1:6010/sdb1 1 -swift-ring-builder object.builder add z2-127.0.0.1:6020/sdb2 1 -swift-ring-builder object.builder add z3-127.0.0.1:6030/sdb3 1 -swift-ring-builder object.builder add z4-127.0.0.1:6040/sdb4 1 -swift-ring-builder object.builder rebalance - -swift-ring-builder container.builder create %SWIFT_PARTITION_POWER_SIZE% 3 1 -swift-ring-builder container.builder add z1-127.0.0.1:6011/sdb1 1 -swift-ring-builder container.builder add z2-127.0.0.1:6021/sdb2 1 -swift-ring-builder container.builder add z3-127.0.0.1:6031/sdb3 1 -swift-ring-builder container.builder add z4-127.0.0.1:6041/sdb4 1 -swift-ring-builder container.builder rebalance - -swift-ring-builder account.builder create %SWIFT_PARTITION_POWER_SIZE% 3 1 -swift-ring-builder account.builder add z1-127.0.0.1:6012/sdb1 1 -swift-ring-builder account.builder add z2-127.0.0.1:6022/sdb2 1 -swift-ring-builder account.builder add z3-127.0.0.1:6032/sdb3 1 -swift-ring-builder account.builder add z4-127.0.0.1:6042/sdb4 1 -swift-ring-builder account.builder rebalance diff --git a/files/swift/swift-startmain b/files/swift/swift-startmain deleted file mode 100755 index 69efebd9..00000000 --- a/files/swift/swift-startmain +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -swift-init all restart diff --git a/files/swift/swift.conf b/files/swift/swift.conf deleted file mode 100644 index 98df4663..00000000 --- a/files/swift/swift.conf +++ /dev/null @@ -1,3 +0,0 @@ -[swift-hash] -# random unique string that can never change (DO NOT LOSE) -swift_hash_path_suffix = %SWIFT_HASH% diff --git a/functions b/functions new file mode 100644 index 00000000..ae63436a --- /dev/null +++ b/functions @@ -0,0 +1,1176 @@ +# functions - Common functions used by DevStack components +# +# The following variables are assumed to be defined by certain functions: +# ``ENABLED_SERVICES`` +# ``EROR_ON_CLONE`` +# ``FILES`` +# ``GLANCE_HOSTPORT`` +# ``OFFLINE`` +# ``PIP_DOWNLOAD_CACHE`` +# ``PIP_USE_MIRRORS`` +# ``RECLONE`` +# ``TRACK_DEPENDS`` +# ``http_proxy``, ``https_proxy``, ``no_proxy`` + + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Exit 0 if address is in network or 1 if address is not in +# network or netaddr library is not installed. +# address_in_net ip-address ip-range +function address_in_net() { + python -c " +import netaddr +import sys +sys.exit(netaddr.IPAddress('$1') not in netaddr.IPNetwork('$2')) +" +} + + +# Wrapper for ``apt-get`` to set cache and proxy environment variables +# Uses globals ``OFFLINE``, ``*_proxy` +# apt_get operation package [package ...] +function apt_get() { + [[ "$OFFLINE" = "True" || -z "$@" ]] && return + local sudo="sudo" + [[ "$(id -u)" = "0" ]] && sudo="env" + $sudo DEBIAN_FRONTEND=noninteractive \ + http_proxy=$http_proxy https_proxy=$https_proxy \ + no_proxy=$no_proxy \ + apt-get --option "Dpkg::Options::=--force-confold" --assume-yes "$@" +} + + +# Gracefully cp only if source file/dir exists +# cp_it source destination +function cp_it { + if [ -e $1 ] || [ -d $1 ]; then + cp -pRL $1 $2 + fi +} + + +# Prints "message" and exits +# die "message" +function die() { + local exitcode=$? + set +o xtrace + echo $@ + exit $exitcode +} + + +# Checks an environment variable is not set or has length 0 OR if the +# exit code is non-zero and prints "message" and exits +# NOTE: env-var is the variable name without a '$' +# die_if_not_set env-var "message" +function die_if_not_set() { + ( + local exitcode=$? + set +o xtrace + local evar=$1; shift + if ! is_set $evar || [ $exitcode != 0 ]; then + echo $@ + exit -1 + fi + ) +} + + +# HTTP and HTTPS proxy servers are supported via the usual environment variables [1] +# ``http_proxy``, ``https_proxy`` and ``no_proxy``. They can be set in +# ``localrc`` or on the command line if necessary:: +# +# [1] http://www.w3.org/Daemon/User/Proxies/ProxyClients.html +# +# http_proxy=http://proxy.example.com:3128/ no_proxy=repo.example.net ./stack.sh + +function export_proxy_variables() { + if [[ -n "$http_proxy" ]]; then + export http_proxy=$http_proxy + fi + if [[ -n "$https_proxy" ]]; then + export https_proxy=$https_proxy + fi + if [[ -n "$no_proxy" ]]; then + export no_proxy=$no_proxy + fi +} + + +# Grab a numbered field from python prettytable output +# Fields are numbered starting with 1 +# Reverse syntax is supported: -1 is the last field, -2 is second to last, etc. +# get_field field-number +function get_field() { + while read data; do + if [ "$1" -lt 0 ]; then + field="(\$(NF$1))" + else + field="\$$(($1 + 1))" + fi + echo "$data" | awk -F'[ \t]*\\|[ \t]*' "{print $field}" + done +} + + +# get_packages() collects a list of package names of any type from the +# prerequisite files in ``files/{apts|rpms}``. The list is intended +# to be passed to a package installer such as apt or yum. +# +# Only packages required for the services in ``ENABLED_SERVICES`` will be +# included. Two bits of metadata are recognized in the prerequisite files: +# - ``# NOPRIME`` defers installation to be performed later in stack.sh +# - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection +# of the package to the distros listed. The distro names are case insensitive. +# +# Uses globals ``ENABLED_SERVICES`` +# get_packages dir +function get_packages() { + local package_dir=$1 + local file_to_parse + local service + + if [[ -z "$package_dir" ]]; then + echo "No package directory supplied" + return 1 + fi + if [[ -z "$DISTRO" ]]; then + GetDistro + fi + for service in general ${ENABLED_SERVICES//,/ }; do + # Allow individual services to specify dependencies + if [[ -e ${package_dir}/${service} ]]; then + file_to_parse="${file_to_parse} $service" + fi + # NOTE(sdague) n-api needs glance for now because that's where + # glance client is + if [[ $service == n-api ]]; then + if [[ ! $file_to_parse =~ nova ]]; then + file_to_parse="${file_to_parse} nova" + fi + if [[ ! $file_to_parse =~ glance ]]; then + file_to_parse="${file_to_parse} glance" + fi + elif [[ $service == c-* ]]; then + if [[ ! $file_to_parse =~ cinder ]]; then + file_to_parse="${file_to_parse} cinder" + fi + elif [[ $service == ceilometer-* ]]; then + if [[ ! $file_to_parse =~ ceilometer ]]; then + file_to_parse="${file_to_parse} ceilometer" + fi + elif [[ $service == n-* ]]; then + if [[ ! $file_to_parse =~ nova ]]; then + file_to_parse="${file_to_parse} nova" + fi + elif [[ $service == g-* ]]; then + if [[ ! $file_to_parse =~ glance ]]; then + file_to_parse="${file_to_parse} glance" + fi + elif [[ $service == key* ]]; then + if [[ ! $file_to_parse =~ keystone ]]; then + file_to_parse="${file_to_parse} keystone" + fi + elif [[ $service == q-* ]]; then + if [[ ! $file_to_parse =~ quantum ]]; then + file_to_parse="${file_to_parse} quantum" + fi + fi + done + + for file in ${file_to_parse}; do + local fname=${package_dir}/${file} + local OIFS line package distros distro + [[ -e $fname ]] || continue + + OIFS=$IFS + IFS=$'\n' + for line in $(<${fname}); do + if [[ $line =~ "NOPRIME" ]]; then + continue + fi + + if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then + # We are using BASH regexp matching feature. + package=${BASH_REMATCH[1]} + distros=${BASH_REMATCH[2]} + # In bash ${VAR,,} will lowecase VAR + [[ ${distros,,} =~ ${DISTRO,,} ]] && echo $package + continue + fi + + echo ${line%#*} + done + IFS=$OIFS + done +} + + +# Determine OS Vendor, Release and Update +# Tested with OS/X, Ubuntu, RedHat, CentOS, Fedora +# Returns results in global variables: +# os_VENDOR - vendor name +# os_RELEASE - release +# os_UPDATE - update +# os_PACKAGE - package type +# os_CODENAME - vendor's codename for release +# GetOSVersion +GetOSVersion() { + # Figure out which vendor we are + if [[ -n "`which sw_vers 2>/dev/null`" ]]; then + # OS/X + os_VENDOR=`sw_vers -productName` + os_RELEASE=`sw_vers -productVersion` + os_UPDATE=${os_RELEASE##*.} + os_RELEASE=${os_RELEASE%.*} + os_PACKAGE="" + if [[ "$os_RELEASE" =~ "10.7" ]]; then + os_CODENAME="lion" + elif [[ "$os_RELEASE" =~ "10.6" ]]; then + os_CODENAME="snow leopard" + elif [[ "$os_RELEASE" =~ "10.5" ]]; then + os_CODENAME="leopard" + elif [[ "$os_RELEASE" =~ "10.4" ]]; then + os_CODENAME="tiger" + elif [[ "$os_RELEASE" =~ "10.3" ]]; then + os_CODENAME="panther" + else + os_CODENAME="" + fi + elif [[ -x $(which lsb_release 2>/dev/null) ]]; then + os_VENDOR=$(lsb_release -i -s) + os_RELEASE=$(lsb_release -r -s) + os_UPDATE="" + os_PACKAGE="rpm" + if [[ "Debian,Ubuntu" =~ $os_VENDOR ]]; then + os_PACKAGE="deb" + elif [[ "SUSE LINUX" =~ $os_VENDOR ]]; then + lsb_release -d -s | grep -q openSUSE + if [[ $? -eq 0 ]]; then + os_VENDOR="openSUSE" + fi + elif [[ $os_VENDOR =~ Red.*Hat ]]; then + os_VENDOR="Red Hat" + fi + os_CODENAME=$(lsb_release -c -s) + elif [[ -r /etc/redhat-release ]]; then + # Red Hat Enterprise Linux Server release 5.5 (Tikanga) + # CentOS release 5.5 (Final) + # CentOS Linux release 6.0 (Final) + # Fedora release 16 (Verne) + os_CODENAME="" + for r in "Red Hat" CentOS Fedora; do + os_VENDOR=$r + if [[ -n "`grep \"$r\" /etc/redhat-release`" ]]; then + ver=`sed -e 's/^.* \(.*\) (\(.*\)).*$/\1\|\2/' /etc/redhat-release` + os_CODENAME=${ver#*|} + os_RELEASE=${ver%|*} + os_UPDATE=${os_RELEASE##*.} + os_RELEASE=${os_RELEASE%.*} + break + fi + os_VENDOR="" + done + os_PACKAGE="rpm" + elif [[ -r /etc/SuSE-release ]]; then + for r in openSUSE "SUSE Linux"; do + if [[ "$r" = "SUSE Linux" ]]; then + os_VENDOR="SUSE LINUX" + else + os_VENDOR=$r + fi + + if [[ -n "`grep \"$r\" /etc/SuSE-release`" ]]; then + os_CODENAME=`grep "CODENAME = " /etc/SuSE-release | sed 's:.* = ::g'` + os_RELEASE=`grep "VERSION = " /etc/SuSE-release | sed 's:.* = ::g'` + os_UPDATE=`grep "PATCHLEVEL = " /etc/SuSE-release | sed 's:.* = ::g'` + break + fi + os_VENDOR="" + done + os_PACKAGE="rpm" + fi + export os_VENDOR os_RELEASE os_UPDATE os_PACKAGE os_CODENAME +} + +# git update using reference as a branch. +# git_update_branch ref +function git_update_branch() { + + GIT_BRANCH=$1 + + git checkout -f origin/$GIT_BRANCH + # a local branch might not exist + git branch -D $GIT_BRANCH || true + git checkout -b $GIT_BRANCH +} + + +# git update using reference as a tag. Be careful editing source at that repo +# as working copy will be in a detached mode +# git_update_tag ref +function git_update_tag() { + + GIT_TAG=$1 + + git tag -d $GIT_TAG + # fetching given tag only + git fetch origin tag $GIT_TAG + git checkout -f $GIT_TAG +} + + +# git update using reference as a branch. +# git_update_remote_branch ref +function git_update_remote_branch() { + + GIT_BRANCH=$1 + + git checkout -b $GIT_BRANCH -t origin/$GIT_BRANCH +} + + +# Translate the OS version values into common nomenclature +# Sets ``DISTRO`` from the ``os_*`` values +function GetDistro() { + GetOSVersion + if [[ "$os_VENDOR" =~ (Ubuntu) ]]; then + # 'Everyone' refers to Ubuntu releases by the code name adjective + DISTRO=$os_CODENAME + elif [[ "$os_VENDOR" =~ (Fedora) ]]; then + # For Fedora, just use 'f' and the release + DISTRO="f$os_RELEASE" + elif [[ "$os_VENDOR" =~ (openSUSE) ]]; then + DISTRO="opensuse-$os_RELEASE" + elif [[ "$os_VENDOR" =~ (SUSE LINUX) ]]; then + # For SLE, also use the service pack + if [[ -z "$os_UPDATE" ]]; then + DISTRO="sle${os_RELEASE}" + else + DISTRO="sle${os_RELEASE}sp${os_UPDATE}" + fi + else + # Catch-all for now is Vendor + Release + Update + DISTRO="$os_VENDOR-$os_RELEASE.$os_UPDATE" + fi + export DISTRO +} + + +# Determine if current distribution is an Ubuntu-based distribution. +# It will also detect non-Ubuntu but Debian-based distros; this is not an issue +# since Debian and Ubuntu should be compatible. +# is_ubuntu +function is_ubuntu { + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + + [ "$os_PACKAGE" = "deb" ] +} + + +# Determine if current distribution is a Fedora-based distribution +# (Fedora, RHEL, CentOS). +# is_fedora +function is_fedora { + if [[ -z "$os_VENDOR" ]]; then + GetOSVersion + fi + + [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || [ "$os_VENDOR" = "CentOS" ] +} + + +# Determine if current distribution is a SUSE-based distribution +# (openSUSE, SLE). +# is_suse +function is_suse { + if [[ -z "$os_VENDOR" ]]; then + GetOSVersion + fi + + [ "$os_VENDOR" = "openSUSE" ] || [ "$os_VENDOR" = "SUSE LINUX" ] +} + + +# Exit after outputting a message about the distribution not being supported. +# exit_distro_not_supported [optional-string-telling-what-is-missing] +function exit_distro_not_supported { + if [[ -z "$DISTRO" ]]; then + GetDistro + fi + + if [ $# -gt 0 ]; then + echo "Support for $DISTRO is incomplete: no support for $@" + else + echo "Support for $DISTRO is incomplete." + fi + + exit 1 +} + + +# git clone only if directory doesn't exist already. Since ``DEST`` might not +# be owned by the installation user, we create the directory and change the +# ownership to the proper user. +# Set global RECLONE=yes to simulate a clone when dest-dir exists +# Set global ERROR_ON_CLONE=True to abort execution with an error if the git repo +# does not exist (default is False, meaning the repo will be cloned). +# Uses global ``OFFLINE`` +# git_clone remote dest-dir branch +function git_clone { + [[ "$OFFLINE" = "True" ]] && return + + GIT_REMOTE=$1 + GIT_DEST=$2 + GIT_REF=$3 + + if echo $GIT_REF | egrep -q "^refs"; then + # If our branch name is a gerrit style refs/changes/... + if [[ ! -d $GIT_DEST ]]; then + [[ "$ERROR_ON_CLONE" = "True" ]] && exit 1 + git clone $GIT_REMOTE $GIT_DEST + fi + cd $GIT_DEST + git fetch $GIT_REMOTE $GIT_REF && git checkout FETCH_HEAD + else + # do a full clone only if the directory doesn't exist + if [[ ! -d $GIT_DEST ]]; then + [[ "$ERROR_ON_CLONE" = "True" ]] && exit 1 + git clone $GIT_REMOTE $GIT_DEST + cd $GIT_DEST + # This checkout syntax works for both branches and tags + git checkout $GIT_REF + elif [[ "$RECLONE" == "yes" ]]; then + # if it does exist then simulate what clone does if asked to RECLONE + cd $GIT_DEST + # set the url to pull from and fetch + git remote set-url origin $GIT_REMOTE + git fetch origin + # remove the existing ignored files (like pyc) as they cause breakage + # (due to the py files having older timestamps than our pyc, so python + # thinks the pyc files are correct using them) + find $GIT_DEST -name '*.pyc' -delete + + # handle GIT_REF accordingly to type (tag, branch) + if [[ -n "`git show-ref refs/tags/$GIT_REF`" ]]; then + git_update_tag $GIT_REF + elif [[ -n "`git show-ref refs/heads/$GIT_REF`" ]]; then + git_update_branch $GIT_REF + elif [[ -n "`git show-ref refs/remotes/origin/$GIT_REF`" ]]; then + git_update_remote_branch $GIT_REF + else + echo $GIT_REF is neither branch nor tag + exit 1 + fi + + fi + fi +} + + +# Comment an option in an INI file +# inicomment config-file section option +function inicomment() { + local file=$1 + local section=$2 + local option=$3 + sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" "$file" +} + +# Uncomment an option in an INI file +# iniuncomment config-file section option +function iniuncomment() { + local file=$1 + local section=$2 + local option=$3 + sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" "$file" +} + + +# Get an option from an INI file +# iniget config-file section option +function iniget() { + local file=$1 + local section=$2 + local option=$3 + local line + line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") + echo ${line#*=} +} + +# Determinate is the given option present in the INI file +# ini_has_option config-file section option +function ini_has_option() { + local file=$1 + local section=$2 + local option=$3 + local line + line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") + [ -n "$line" ] +} + +# Set an option in an INI file +# iniset config-file section option value +function iniset() { + local file=$1 + local section=$2 + local option=$3 + local value=$4 + if ! grep -q "^\[$section\]" "$file"; then + # Add section at the end + echo -e "\n[$section]" >>"$file" + fi + if ! ini_has_option "$file" "$section" "$option"; then + # Add it + sed -i -e "/^\[$section\]/ a\\ +$option = $value +" "$file" + else + # Replace it + sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=[ \t]*\).*$|\1$value|" "$file" + fi +} + + +# is_service_enabled() checks if the service(s) specified as arguments are +# enabled by the user in ``ENABLED_SERVICES``. +# +# Multiple services specified as arguments are ``OR``'ed together; the test +# is a short-circuit boolean, i.e it returns on the first match. +# +# There are special cases for some 'catch-all' services:: +# **nova** returns true if any service enabled start with **n-** +# **cinder** returns true if any service enabled start with **c-** +# **ceilometer** returns true if any service enabled start with **ceilometer** +# **glance** returns true if any service enabled start with **g-** +# **quantum** returns true if any service enabled start with **q-** +# +# Uses global ``ENABLED_SERVICES`` +# is_service_enabled service [service ...] +function is_service_enabled() { + services=$@ + for service in ${services}; do + [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && return 0 + [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && return 0 + [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && return 0 + [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && return 0 + [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0 + [[ ${service} == "quantum" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0 + done + return 1 +} + + +# remove extra commas from the input string (i.e. ``ENABLED_SERVICES``) +# _cleanup_service_list service-list +function _cleanup_service_list () { + echo "$1" | sed -e ' + s/,,/,/g; + s/^,//; + s/,$// + ' +} + + +# enable_service() adds the services passed as argument to the +# ``ENABLED_SERVICES`` list, if they are not already present. +# +# For example: +# enable_service qpid +# +# This function does not know about the special cases +# for nova, glance, and quantum built into is_service_enabled(). +# Uses global ``ENABLED_SERVICES`` +# enable_service service [service ...] +function enable_service() { + local tmpsvcs="${ENABLED_SERVICES}" + for service in $@; do + if ! is_service_enabled $service; then + tmpsvcs+=",$service" + fi + done + ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs") + disable_negated_services +} + + +# disable_service() removes the services passed as argument to the +# ``ENABLED_SERVICES`` list, if they are present. +# +# For example: +# disable_service rabbit +# +# This function does not know about the special cases +# for nova, glance, and quantum built into is_service_enabled(). +# Uses global ``ENABLED_SERVICES`` +# disable_service service [service ...] +function disable_service() { + local tmpsvcs=",${ENABLED_SERVICES}," + local service + for service in $@; do + if is_service_enabled $service; then + tmpsvcs=${tmpsvcs//,$service,/,} + fi + done + ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs") +} + + +# disable_all_services() removes all current services +# from ``ENABLED_SERVICES`` to reset the configuration +# before a minimal installation +# Uses global ``ENABLED_SERVICES`` +# disable_all_services +function disable_all_services() { + ENABLED_SERVICES="" +} + + +# Remove all services starting with '-'. For example, to install all default +# services except rabbit (rabbit) set in ``localrc``: +# ENABLED_SERVICES+=",-rabbit" +# Uses global ``ENABLED_SERVICES`` +# disable_negated_services +function disable_negated_services() { + local tmpsvcs="${ENABLED_SERVICES}" + local service + for service in ${tmpsvcs//,/ }; do + if [[ ${service} == -* ]]; then + tmpsvcs=$(echo ${tmpsvcs}|sed -r "s/(,)?(-)?${service#-}(,)?/,/g") + fi + done + ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs") +} + + +# Distro-agnostic package installer +# install_package package [package ...] +function install_package() { + if is_ubuntu; then + [[ "$NO_UPDATE_REPOS" = "True" ]] || apt_get update + NO_UPDATE_REPOS=True + + apt_get install "$@" + elif is_fedora; then + yum_install "$@" + elif is_suse; then + zypper_install "$@" + else + exit_distro_not_supported "installing packages" + fi +} + + +# Distro-agnostic function to tell if a package is installed +# is_package_installed package [package ...] +function is_package_installed() { + if [[ -z "$@" ]]; then + return 1 + fi + + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + + if [[ "$os_PACKAGE" = "deb" ]]; then + dpkg -l "$@" > /dev/null + elif [[ "$os_PACKAGE" = "rpm" ]]; then + rpm --quiet -q "$@" + else + exit_distro_not_supported "finding if a package is installed" + fi +} + + +# Test if the named environment variable is set and not zero length +# is_set env-var +function is_set() { + local var=\$"$1" + eval "[ -n \"$var\" ]" # For ex.: sh -c "[ -n \"$var\" ]" would be better, but several exercises depends on this +} + + +# Wrapper for ``pip install`` to set cache and proxy environment variables +# Uses globals ``OFFLINE``, ``PIP_DOWNLOAD_CACHE``, ``PIP_USE_MIRRORS``, +# ``TRACK_DEPENDS``, ``*_proxy` +# pip_install package [package ...] +function pip_install { + [[ "$OFFLINE" = "True" || -z "$@" ]] && return + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + if [[ $TRACK_DEPENDS = True ]] ; then + source $DEST/.venv/bin/activate + CMD_PIP=$DEST/.venv/bin/pip + SUDO_PIP="env" + else + SUDO_PIP="sudo" + CMD_PIP=$(get_pip_command) + fi + if [[ "$PIP_USE_MIRRORS" != "False" ]]; then + PIP_MIRROR_OPT="--use-mirrors" + fi + $SUDO_PIP PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \ + HTTP_PROXY=$http_proxy \ + HTTPS_PROXY=$https_proxy \ + NO_PROXY=$no_proxy \ + $CMD_PIP install $PIP_MIRROR_OPT $@ +} + + +# Service wrapper to restart services +# restart_service service-name +function restart_service() { + if is_ubuntu; then + sudo /usr/sbin/service $1 restart + else + sudo /sbin/service $1 restart + fi +} + + +# Helper to launch a service in a named screen +# screen_it service "command-line" +function screen_it { + SCREEN_NAME=${SCREEN_NAME:-stack} + SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} + SCREEN_DEV=`trueorfalse True $SCREEN_DEV` + + if is_service_enabled $1; then + # Append the service to the screen rc file + screen_rc "$1" "$2" + + screen -S $SCREEN_NAME -X screen -t $1 + + if [[ -n ${SCREEN_LOGDIR} ]]; then + screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log + screen -S $SCREEN_NAME -p $1 -X log on + ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log + fi + + if [[ "$SCREEN_DEV" = "True" ]]; then + # sleep to allow bash to be ready to be send the command - we are + # creating a new window in screen and then sends characters, so if + # bash isn't running by the time we send the command, nothing happens + sleep 1.5 + + NL=`echo -ne '\015'` + screen -S $SCREEN_NAME -p $1 -X stuff "$2 || touch \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL" + else + screen -S $SCREEN_NAME -p $1 -X exec /bin/bash -c "$2 || touch \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"" + fi + fi +} + + +# Screen rc file builder +# screen_rc service "command-line" +function screen_rc { + SCREEN_NAME=${SCREEN_NAME:-stack} + SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc + if [[ ! -e $SCREENRC ]]; then + # Name the screen session + echo "sessionname $SCREEN_NAME" > $SCREENRC + # Set a reasonable statusbar + echo "hardstatus alwayslastline '$SCREEN_HARDSTATUS'" >> $SCREENRC + echo "screen -t shell bash" >> $SCREENRC + fi + # If this service doesn't already exist in the screenrc file + if ! grep $1 $SCREENRC 2>&1 > /dev/null; then + NL=`echo -ne '\015'` + echo "screen -t $1 bash" >> $SCREENRC + echo "stuff \"$2$NL\"" >> $SCREENRC + fi +} + +# Helper to remove the *.failure files under $SERVICE_DIR/$SCREEN_NAME +# This is used for service_check when all the screen_it are called finished +# init_service_check +function init_service_check() { + SCREEN_NAME=${SCREEN_NAME:-stack} + SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} + + if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then + mkdir -p "$SERVICE_DIR/$SCREEN_NAME" + fi + + rm -f "$SERVICE_DIR/$SCREEN_NAME"/*.failure +} + +# Helper to get the status of each running service +# service_check +function service_check() { + local service + local failures + SCREEN_NAME=${SCREEN_NAME:-stack} + SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} + + + if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then + echo "No service status directory found" + return + fi + + # Check if there is any falure flag file under $SERVICE_DIR/$SCREEN_NAME + failures=`ls "$SERVICE_DIR/$SCREEN_NAME"/*.failure 2>/dev/null` + + for service in $failures; do + service=`basename $service` + service=${service::-8} + echo "Error: Service $service is not running" + done + + if [ -n "$failures" ]; then + echo "More details about the above errors can be found with screen, with ./rejoin-stack.sh" + fi +} + +# ``pip install`` the dependencies of the package before ``setup.py develop`` +# so pip and not distutils processes the dependency chain +# Uses globals ``TRACK_DEPENDES``, ``*_proxy` +# setup_develop directory +function setup_develop() { + if [[ $TRACK_DEPENDS = True ]] ; then + SUDO_CMD="env" + else + SUDO_CMD="sudo" + fi + (cd $1; \ + python setup.py egg_info; \ + raw_links=$(awk '/^.+/ {print "-f " $1}' *.egg-info/dependency_links.txt); \ + depend_links=$(echo $raw_links | xargs); \ + require_file=$([ ! -r *-info/requires.txt ] || echo "-r *-info/requires.txt"); \ + pip_install $require_file $depend_links; \ + $SUDO_CMD \ + HTTP_PROXY=$http_proxy \ + HTTPS_PROXY=$https_proxy \ + NO_PROXY=$no_proxy \ + python setup.py develop \ + ) +} + + +# Service wrapper to start services +# start_service service-name +function start_service() { + if is_ubuntu; then + sudo /usr/sbin/service $1 start + else + sudo /sbin/service $1 start + fi +} + + +# Service wrapper to stop services +# stop_service service-name +function stop_service() { + if is_ubuntu; then + sudo /usr/sbin/service $1 stop + else + sudo /sbin/service $1 stop + fi +} + + +# Normalize config values to True or False +# Accepts as False: 0 no false False FALSE +# Accepts as True: 1 yes true True TRUE +# VAR=$(trueorfalse default-value test-value) +function trueorfalse() { + local default=$1 + local testval=$2 + + [[ -z "$testval" ]] && { echo "$default"; return; } + [[ "0 no false False FALSE" =~ "$testval" ]] && { echo "False"; return; } + [[ "1 yes true True TRUE" =~ "$testval" ]] && { echo "True"; return; } + echo "$default" +} + + +# Retrieve an image from a URL and upload into Glance +# Uses the following variables: +# ``FILES`` must be set to the cache dir +# ``GLANCE_HOSTPORT`` +# upload_image image-url glance-token +function upload_image() { + local image_url=$1 + local token=$2 + + # Create a directory for the downloaded image tarballs. + mkdir -p $FILES/images + + # Downloads the image (uec ami+aki style), then extracts it. + IMAGE_FNAME=`basename "$image_url"` + if [[ ! -f $FILES/$IMAGE_FNAME || "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then + wget -c $image_url -O $FILES/$IMAGE_FNAME + if [[ $? -ne 0 ]]; then + echo "Not found: $image_url" + return + fi + fi + + # OpenVZ-format images are provided as .tar.gz, but not decompressed prior to loading + if [[ "$image_url" =~ 'openvz' ]]; then + IMAGE="$FILES/${IMAGE_FNAME}" + IMAGE_NAME="${IMAGE_FNAME%.tar.gz}" + glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format ami --disk-format ami < "${IMAGE}" + return + fi + + KERNEL="" + RAMDISK="" + DISK_FORMAT="" + CONTAINER_FORMAT="" + UNPACK="" + case "$IMAGE_FNAME" in + *.tar.gz|*.tgz) + # Extract ami and aki files + [ "${IMAGE_FNAME%.tar.gz}" != "$IMAGE_FNAME" ] && + IMAGE_NAME="${IMAGE_FNAME%.tar.gz}" || + IMAGE_NAME="${IMAGE_FNAME%.tgz}" + xdir="$FILES/images/$IMAGE_NAME" + rm -Rf "$xdir"; + mkdir "$xdir" + tar -zxf $FILES/$IMAGE_FNAME -C "$xdir" + KERNEL=$(for f in "$xdir/"*-vmlinuz* "$xdir/"aki-*/image; do + [ -f "$f" ] && echo "$f" && break; done; true) + RAMDISK=$(for f in "$xdir/"*-initrd* "$xdir/"ari-*/image; do + [ -f "$f" ] && echo "$f" && break; done; true) + IMAGE=$(for f in "$xdir/"*.img "$xdir/"ami-*/image; do + [ -f "$f" ] && echo "$f" && break; done; true) + if [[ -z "$IMAGE_NAME" ]]; then + IMAGE_NAME=$(basename "$IMAGE" ".img") + fi + ;; + *.img) + IMAGE="$FILES/$IMAGE_FNAME"; + IMAGE_NAME=$(basename "$IMAGE" ".img") + format=$(qemu-img info ${IMAGE} | awk '/^file format/ { print $3; exit }') + if [[ ",qcow2,raw,vdi,vmdk,vpc," =~ ",$format," ]]; then + DISK_FORMAT=$format + else + DISK_FORMAT=raw + fi + CONTAINER_FORMAT=bare + ;; + *.img.gz) + IMAGE="$FILES/${IMAGE_FNAME}" + IMAGE_NAME=$(basename "$IMAGE" ".img.gz") + DISK_FORMAT=raw + CONTAINER_FORMAT=bare + UNPACK=zcat + ;; + *.qcow2) + IMAGE="$FILES/${IMAGE_FNAME}" + IMAGE_NAME=$(basename "$IMAGE" ".qcow2") + DISK_FORMAT=qcow2 + CONTAINER_FORMAT=bare + ;; + *) echo "Do not know what to do with $IMAGE_FNAME"; false;; + esac + + if [ "$CONTAINER_FORMAT" = "bare" ]; then + if [ "$UNPACK" = "zcat" ]; then + glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --public --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < <(zcat --force "${IMAGE}") + else + glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --public --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < "${IMAGE}" + fi + else + # Use glance client to add the kernel the root filesystem. + # We parse the results of the first upload to get the glance ID of the + # kernel for use when uploading the root filesystem. + KERNEL_ID=""; RAMDISK_ID=""; + if [ -n "$KERNEL" ]; then + KERNEL_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-kernel" --public --container-format aki --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2) + fi + if [ -n "$RAMDISK" ]; then + RAMDISK_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-ramdisk" --public --container-format ari --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2) + fi + glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "${IMAGE_NAME%.img}" --public --container-format ami --disk-format ami ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}" + fi +} + +# Set the database backend to use +# When called from stackrc/localrc DATABASE_BACKENDS has not been +# initialized yet, just save the configuration selection and call back later +# to validate it. +# $1 The name of the database backend to use (mysql, postgresql, ...) +function use_database { + if [[ -z "$DATABASE_BACKENDS" ]]; then + # The backends haven't initialized yet, just save the selection for now + DATABASE_TYPE=$1 + else + use_exclusive_service DATABASE_BACKENDS DATABASE_TYPE $1 + fi +} + +# Toggle enable/disable_service for services that must run exclusive of each other +# $1 The name of a variable containing a space-separated list of services +# $2 The name of a variable in which to store the enabled service's name +# $3 The name of the service to enable +function use_exclusive_service { + local options=${!1} + local selection=$3 + out=$2 + [ -z $selection ] || [[ ! "$options" =~ "$selection" ]] && return 1 + for opt in $options;do + [[ "$opt" = "$selection" ]] && enable_service $opt || disable_service $opt + done + eval "$out=$selection" + return 0 +} + +# Wait for an HTTP server to start answering requests +# wait_for_service timeout url +function wait_for_service() { + local timeout=$1 + local url=$2 + timeout $timeout sh -c "while ! http_proxy= https_proxy= curl -s $url >/dev/null; do sleep 1; done" +} + +# Wrapper for ``yum`` to set proxy environment variables +# Uses globals ``OFFLINE``, ``*_proxy` +# yum_install package [package ...] +function yum_install() { + [[ "$OFFLINE" = "True" ]] && return + local sudo="sudo" + [[ "$(id -u)" = "0" ]] && sudo="env" + $sudo http_proxy=$http_proxy https_proxy=$https_proxy \ + no_proxy=$no_proxy \ + yum install -y "$@" +} + +# ping check +# Uses globals ``ENABLED_SERVICES`` +function ping_check() { + if is_service_enabled quantum; then + _ping_check_quantum "$1" $2 $3 $4 + return + fi + _ping_check_novanet "$1" $2 $3 $4 +} + +# ping check for nova +# Uses globals ``MULTI_HOST``, ``PRIVATE_NETWORK`` +function _ping_check_novanet() { + local from_net=$1 + local ip=$2 + local boot_timeout=$3 + local expected=${4:-"True"} + local check_command="" + MULTI_HOST=`trueorfalse False $MULTI_HOST` + if [[ "$MULTI_HOST" = "True" && "$from_net" = "$PRIVATE_NETWORK_NAME" ]]; then + sleep $boot_timeout + return + fi + if [[ "$expected" = "True" ]]; then + check_command="while ! ping -c1 -w1 $ip; do sleep 1; done" + else + check_command="while ping -c1 -w1 $ip; do sleep 1; done" + fi + if ! timeout $boot_timeout sh -c "$check_command"; then + if [[ "$expected" = "True" ]]; then + echo "[Fail] Couldn't ping server" + else + echo "[Fail] Could ping server" + fi + exit 1 + fi +} + +# ssh check + +function ssh_check() { + if is_service_enabled quantum; then + _ssh_check_quantum "$1" $2 $3 $4 $5 + return + fi + _ssh_check_novanet "$1" $2 $3 $4 $5 +} + +function _ssh_check_novanet() { + local NET_NAME=$1 + local KEY_FILE=$2 + local FLOATING_IP=$3 + local DEFAULT_INSTANCE_USER=$4 + local ACTIVE_TIMEOUT=$5 + local probe_cmd="" + if ! timeout $ACTIVE_TIMEOUT sh -c "while ! ssh -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP echo success ; do sleep 1; done"; then + echo "server didn't become ssh-able!" + exit 1 + fi +} + + +# zypper wrapper to set arguments correctly +# zypper_install package [package ...] +function zypper_install() { + [[ "$OFFLINE" = "True" ]] && return + local sudo="sudo" + [[ "$(id -u)" = "0" ]] && sudo="env" + $sudo http_proxy=$http_proxy https_proxy=$https_proxy \ + zypper --non-interactive install --auto-agree-with-licenses "$@" +} + + +# Add a user to a group. +# add_user_to_group user group +function add_user_to_group() { + local user=$1 + local group=$2 + + if [[ -z "$os_VENDOR" ]]; then + GetOSVersion + fi + + # SLE11 and openSUSE 12.2 don't have the usual usermod + if ! is_suse || [[ "$os_VENDOR" = "openSUSE" && "$os_RELEASE" != "12.2" ]]; then + sudo usermod -a -G "$group" "$user" + else + sudo usermod -A "$group" "$user" + fi +} + + +# Get the path to the direcotry where python executables are installed. +# get_python_exec_prefix +function get_python_exec_prefix() { + if is_fedora; then + echo "/usr/bin" + else + echo "/usr/local/bin" + fi +} + +# Get the location of the $module-rootwrap executables, where module is cinder +# or nova. +# get_rootwrap_location module +function get_rootwrap_location() { + local module=$1 + + echo "$(get_python_exec_prefix)/$module-rootwrap" +} + +# Get the path to the pip command. +# get_pip_command +function get_pip_command() { + if is_fedora; then + which pip-python + else + which pip + fi +} + +# Restore xtrace +$XTRACE + + +# Local variables: +# -*- mode: Shell-script -*- +# End: diff --git a/lib/baremetal b/lib/baremetal new file mode 100644 index 00000000..26593867 --- /dev/null +++ b/lib/baremetal @@ -0,0 +1,435 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +# This file provides devstack with the environment and utilities to +# control nova-compute's baremetal driver. +# It sets reasonable defaults to run within a single host, +# using virtual machines in place of physical hardware. +# However, by changing just a few options, devstack+baremetal can in fact +# control physical hardware resources on the same network, if you know +# the MAC address(es) and IPMI credentials. +# +# At a minimum, to enable the baremetal driver, you must set these in loclarc: +# VIRT_DRIVER=baremetal +# ENABLED_SERVICES="$ENABLED_SERVICES,baremetal" +# +# +# We utilize diskimage-builder to create a ramdisk, and then +# baremetal driver uses that to push a disk image onto the node(s). +# +# Below we define various defaults which control the behavior of the +# baremetal compute service, and inform it of the hardware it will contorl. +# +# Below that, various functions are defined, which are called by devstack +# in the following order: +# +# before nova-cpu starts: +# - prepare_baremetal_toolchain +# - configure_baremetal_nova_dirs +# +# after nova and glance have started: +# - build_and_upload_baremetal_deploy_k_and_r $token +# - create_baremetal_flavor $BM_DEPLOY_KERNEL_ID $BM_DEPLOY_RAMDISK_ID +# - upload_baremetal_image $url $token +# - add_baremetal_node + + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# Sub-driver settings +# ------------------- + +# sub-driver to use for kernel deployment +# - nova.virt.baremetal.pxe.PXE +# - nova.virt.baremetal.tilera.TILERA +BM_DRIVER=${BM_DRIVER:-nova.virt.baremetal.pxe.PXE} + +# sub-driver to use for remote power management +# - nova.virt.baremetal.fake.FakePowerManager, for manual power control +# - nova.virt.baremetal.ipmi.Ipmi, for remote IPMI +# - nova.virt.baremetal.tilera_pdu.Pdu, for TilePro hardware +BM_POWER_MANAGER=${BM_POWER_MANAGER:-nova.virt.baremetal.fake.FakePowerManager} + + +# These should be customized to your environment and hardware +# ----------------------------------------------------------- + +# whether to create a fake environment, eg. for devstack-gate +BM_USE_FAKE_ENV=`trueorfalse False $BM_USE_FAKE_ENV` + +# Extra options to pass to bm_poseur +# change the bridge name or IP: --bridge br99 --bridge-ip 192.0.2.1 +# change the virtualization type: --engine qemu +BM_POSEUR_EXTRA_OPTS=${BM_POSEUR_EXTRA_OPTS:-} + +# BM_DNSMASQ_IFACE should match FLAT_NETWORK_BRIDGE +if [ "$BM_USE_FAKE_ENV" ]; then + BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-br99} + BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-192.0.2.32,192.0.2.48} +else + BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-eth0} + # if testing on a physical network, + # BM_DNSMASQ_RANGE must be changed to suit your network + BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-} +fi + +# BM_DNSMASQ_DNS provide dns server to bootstrap clients +BM_DNSMASQ_DNS=${BM_DNSMASQ_DNS:-} + +# BM_FIRST_MAC *must* be set to the MAC address of the node you will boot. +# This is passed to dnsmasq along with the kernel/ramdisk to +# deploy via PXE. +BM_FIRST_MAC=${BM_FIRST_MAC:-} + +# BM_SECOND_MAC is only important if the host has >1 NIC. +BM_SECOND_MAC=${BM_SECOND_MAC:-} + +# Hostname for the baremetal nova-compute node, if not run on this host +BM_HOSTNAME=${BM_HOSTNAME:-$(hostname -f)} + +# BM_PM_* options are only necessary if BM_POWER_MANAGER=...IPMI +BM_PM_ADDR=${BM_PM_ADDR:-0.0.0.0} +BM_PM_USER=${BM_PM_USER:-user} +BM_PM_PASS=${BM_PM_PASS:-pass} + +# BM_FLAVOR_* options are arbitrary and not necessarily related to physical +# hardware capacity. These can be changed if you are testing +# BaremetalHostManager with multiple nodes and different flavors. +BM_CPU_ARCH=${BM_CPU_ARCH:-x86_64} +BM_FLAVOR_CPU=${BM_FLAVOR_CPU:-1} +BM_FLAVOR_RAM=${BM_FLAVOR_RAM:-1024} +BM_FLAVOR_ROOT_DISK=${BM_FLAVOR_ROOT_DISK:-10} +BM_FLAVOR_EPHEMERAL_DISK=${BM_FLAVOR_EPHEMERAL_DISK:-0} +BM_FLAVOR_SWAP=${BM_FLAVOR_SWAP:-1} +BM_FLAVOR_NAME=${BM_FLAVOR_NAME:-bm.small} +BM_FLAVOR_ID=${BM_FLAVOR_ID:-11} +BM_FLAVOR_ARCH=${BM_FLAVOR_ARCH:-$BM_CPU_ARCH} + + +# Below this, we set some path and filenames. +# Defaults are probably sufficient. +BM_IMAGE_BUILD_DIR=${BM_IMAGE_BUILD_DIR:-$DEST/diskimage-builder} +BM_POSEUR_DIR=${BM_POSEUR_DIR:-$DEST/bm_poseur} + +BM_HOST_CURRENT_KERNEL=$(uname -r) +BM_DEPLOY_RAMDISK=${BM_DEPLOY_RAMDISK:-bm-deploy-$BM_HOST_CURRENT_KERNEL-initrd} +BM_DEPLOY_KERNEL=${BM_DEPLOY_KERNEL:-bm-deploy-$BM_HOST_CURRENT_KERNEL-vmlinuz} + +# If you need to add any extra flavors to the deploy ramdisk image +# eg, specific network drivers, specify them here +BM_DEPLOY_FLAVOR=${BM_DEPLOY_FLAVOR:-} + +# set URL and version for google shell-in-a-box +BM_SHELL_IN_A_BOX=${BM_SHELL_IN_A_BOX:-http://shellinabox.googlecode.com/files/shellinabox-2.14.tar.gz} + + +# Functions +# --------- + +# Check if baremetal is properly enabled +# Returns false if VIRT_DRIVER is not baremetal, or if ENABLED_SERVICES +# does not contain "baremetal" +function is_baremetal() { + if [[ "$ENABLED_SERVICES" =~ 'baremetal' && "$VIRT_DRIVER" = 'baremetal' ]]; then + return 0 + fi + return 1 +} + +# Install diskimage-builder and shell-in-a-box +# so that we can build the deployment kernel & ramdisk +function prepare_baremetal_toolchain() { + git_clone $BM_IMAGE_BUILD_REPO $BM_IMAGE_BUILD_DIR $BM_IMAGE_BUILD_BRANCH + git_clone $BM_POSEUR_REPO $BM_POSEUR_DIR $BM_POSEUR_BRANCH + + local shellinabox_basename=$(basename $BM_SHELL_IN_A_BOX) + if [[ ! -e $DEST/$shellinabox_basename ]]; then + cd $DEST + wget $BM_SHELL_IN_A_BOX + fi + if [[ ! -d $DEST/${shellinabox_basename%%.tar.gz} ]]; then + cd $DEST + tar xzf $shellinabox_basename + fi + if [[ ! $(which shellinaboxd) ]]; then + cd $DEST/${shellinabox_basename%%.tar.gz} + ./configure + make + sudo make install + fi +} + +# set up virtualized environment for devstack-gate testing +function create_fake_baremetal_env() { + local bm_poseur="$BM_POSEUR_DIR/bm_poseur" + # TODO(deva): add support for >1 VM + sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS create-bridge + sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS create-vm + BM_FIRST_MAC=$(sudo $bm_poseur get-macs) + + # NOTE: there is currently a limitation in baremetal driver + # that requires second MAC even if it is not used. + # Passing a fake value allows this to work. + # TODO(deva): remove this after driver issue is fixed. + BM_SECOND_MAC='12:34:56:78:90:12' +} + +function cleanup_fake_baremetal_env() { + local bm_poseur="$BM_POSEUR_DIR/bm_poseur" + sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS destroy-vm + sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS destroy-bridge +} + +# prepare various directories needed by baremetal hypervisor +function configure_baremetal_nova_dirs() { + # ensure /tftpboot is prepared + sudo mkdir -p /tftpboot + sudo mkdir -p /tftpboot/pxelinux.cfg + sudo cp /usr/lib/syslinux/pxelinux.0 /tftpboot/ + sudo chown -R $STACK_USER:libvirtd /tftpboot + + # ensure $NOVA_STATE_PATH/baremetal is prepared + sudo mkdir -p $NOVA_STATE_PATH/baremetal + sudo mkdir -p $NOVA_STATE_PATH/baremetal/console + sudo mkdir -p $NOVA_STATE_PATH/baremetal/dnsmasq + sudo touch $NOVA_STATE_PATH/baremetal/dnsmasq/dnsmasq-dhcp.host + sudo chown -R $STACK_USER $NOVA_STATE_PATH/baremetal + + # ensure dnsmasq is installed but not running + # because baremetal driver will reconfigure and restart this as needed + is_package_installed dnsmasq || install_package dnsmasq + stop_service dnsmasq +} + +# build deploy kernel+ramdisk, then upload them to glance +# this function sets BM_DEPLOY_KERNEL_ID and BM_DEPLOY_RAMDISK_ID +function upload_baremetal_deploy() { + token=$1 + + if [ ! -e $TOP_DIR/files/$BM_DEPLOY_KERNEL -a -e /boot/vmlinuz-$BM_HOST_CURRENT_KERNEL ]; then + sudo cp /boot/vmlinuz-$BM_HOST_CURRENT_KERNEL $TOP_DIR/files/$BM_DEPLOY_KERNEL + sudo chmod a+r $TOP_DIR/files/$BM_DEPLOY_KERNEL + fi + if [ ! -e $TOP_DIR/files/$BM_DEPLOY_RAMDISK ]; then + $BM_IMAGE_BUILD_DIR/bin/ramdisk-image-create $BM_DEPLOY_FLAVOR deploy \ + -o $TOP_DIR/files/$BM_DEPLOY_RAMDISK -k $BM_HOST_CURRENT_KERNEL + fi + + # load them into glance + BM_DEPLOY_KERNEL_ID=$(glance \ + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name $BM_DEPLOY_KERNEL \ + --public --disk-format=aki \ + < $TOP_DIR/files/$BM_DEPLOY_KERNEL | grep ' id ' | get_field 2) + BM_DEPLOY_RAMDISK_ID=$(glance \ + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name $BM_DEPLOY_RAMDISK \ + --public --disk-format=ari \ + < $TOP_DIR/files/$BM_DEPLOY_RAMDISK | grep ' id ' | get_field 2) +} + +# create a basic baremetal flavor, associated with deploy kernel & ramdisk +# +# Usage: create_baremetal_flavor +function create_baremetal_flavor() { + aki=$1 + ari=$2 + nova flavor-create $BM_FLAVOR_NAME $BM_FLAVOR_ID \ + $BM_FLAVOR_RAM $BM_FLAVOR_ROOT_DISK $BM_FLAVOR_CPU + nova flavor-key $BM_FLAVOR_NAME set \ + cpu_arch=$BM_FLAVOR_ARCH \ + deploy_kernel_id=$aki \ + deploy_ramdisk_id=$ari +} + +# pull run-time kernel/ramdisk out of disk image and load into glance +# note that $file is currently expected to be in qcow2 format +# Sets KERNEL_ID and RAMDISK_ID +# +# Usage: extract_and_upload_k_and_r_from_image $token $file +function extract_and_upload_k_and_r_from_image() { + token=$1 + file=$2 + image_name=$(basename "$file" ".qcow2") + + # this call returns the file names as "$kernel,$ramdisk" + out=$($BM_IMAGE_BUILD_DIR/bin/disk-image-get-kernel \ + -x -d $TOP_DIR/files -o bm-deploy -i $file) + if [ $? -ne 0 ]; then + die "Failed to get kernel and ramdisk from $file" + fi + XTRACE=$(set +o | grep xtrace) + set +o xtrace + out=$(echo "$out" | tail -1) + $XTRACE + OUT_KERNEL=${out%%,*} + OUT_RAMDISK=${out##*,} + + # load them into glance + KERNEL_ID=$(glance \ + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name $image_name-kernel \ + --public --disk-format=aki \ + < $TOP_DIR/files/$OUT_KERNEL | grep ' id ' | get_field 2) + RAMDISK_ID=$(glance \ + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name $image_name-initrd \ + --public --disk-format=ari \ + < $TOP_DIR/files/$OUT_RAMDISK | grep ' id ' | get_field 2) +} + + +# Re-implementation of devstack's "upload_image" function +# +# Takes the same parameters, but has some peculiarities which made it +# easier to create a separate method, rather than complicate the logic +# of the existing function. +function upload_baremetal_image() { + local image_url=$1 + local token=$2 + + # Create a directory for the downloaded image tarballs. + mkdir -p $FILES/images + + # Downloads the image (uec ami+aki style), then extracts it. + IMAGE_FNAME=`basename "$image_url"` + if [[ ! -f $FILES/$IMAGE_FNAME || \ + "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then + wget -c $image_url -O $FILES/$IMAGE_FNAME + if [[ $? -ne 0 ]]; then + echo "Not found: $image_url" + return + fi + fi + + local KERNEL="" + local RAMDISK="" + local DISK_FORMAT="" + local CONTAINER_FORMAT="" + case "$IMAGE_FNAME" in + *.tar.gz|*.tgz) + # Extract ami and aki files + [ "${IMAGE_FNAME%.tar.gz}" != "$IMAGE_FNAME" ] && + IMAGE_NAME="${IMAGE_FNAME%.tar.gz}" || + IMAGE_NAME="${IMAGE_FNAME%.tgz}" + xdir="$FILES/images/$IMAGE_NAME" + rm -Rf "$xdir"; + mkdir "$xdir" + tar -zxf $FILES/$IMAGE_FNAME -C "$xdir" + KERNEL=$(for f in "$xdir/"*-vmlinuz* "$xdir/"aki-*/image; do + [ -f "$f" ] && echo "$f" && break; done; true) + RAMDISK=$(for f in "$xdir/"*-initrd* "$xdir/"ari-*/image; do + [ -f "$f" ] && echo "$f" && break; done; true) + IMAGE=$(for f in "$xdir/"*.img "$xdir/"ami-*/image; do + [ -f "$f" ] && echo "$f" && break; done; true) + if [[ -z "$IMAGE_NAME" ]]; then + IMAGE_NAME=$(basename "$IMAGE" ".img") + fi + DISK_FORMAT=ami + CONTAINER_FORMAT=ami + ;; + *.qcow2) + IMAGE="$FILES/${IMAGE_FNAME}" + IMAGE_NAME=$(basename "$IMAGE" ".qcow2") + DISK_FORMAT=qcow2 + CONTAINER_FORMAT=bare + ;; + *) echo "Do not know what to do with $IMAGE_FNAME"; false;; + esac + + if [ "$CONTAINER_FORMAT" = "bare" ]; then + extract_and_upload_k_and_r_from_image $token $IMAGE + elif [ "$CONTAINER_FORMAT" = "ami" ]; then + KERNEL_ID=$(glance \ + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name "$IMAGE_NAME-kernel" --public \ + --container-format aki \ + --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2) + RAMDISK_ID=$(glance \ + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name "$IMAGE_NAME-ramdisk" --public \ + --container-format ari \ + --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2) + else + # TODO(deva): add support for other image types + return + fi + + glance \ + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name "${IMAGE_NAME%.img}" --public \ + --container-format $CONTAINER_FORMAT \ + --disk-format $DISK_FORMAT \ + ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} \ + ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}" + + # override DEFAULT_IMAGE_NAME so that tempest can find the image + # that we just uploaded in glance + DEFAULT_IMAGE_NAME="${IMAGE_NAME%.img}" +} + +function clear_baremetal_of_all_nodes() { + list=$(nova baremetal-node-list | awk -F '| ' 'NR>3 {print $2}' ) + for node in $list + do + nova baremetal-node-delete $node + done +} + +# inform nova-baremetal about nodes, MACs, etc +# Defaults to using BM_FIRST_MAC and BM_SECOND_MAC if parameters not specified +# +# Usage: add_baremetal_node +function add_baremetal_node() { + mac_1=${1:-$BM_FIRST_MAC} + mac_2=${2:-$BM_SECOND_MAC} + + id=$(nova baremetal-node-create \ + --pm_address="$BM_PM_ADDR" \ + --pm_user="$BM_PM_USER" \ + --pm_password="$BM_PM_PASS" \ + "$BM_HOSTNAME" \ + "$BM_FLAVOR_CPU" \ + "$BM_FLAVOR_RAM" \ + "$BM_FLAVOR_ROOT_DISK" \ + "$mac_1" \ + | grep ' id ' | get_field 2 ) + [ $? -eq 0 ] || [ "$id" ] || die "Error adding baremetal node" + id2=$(nova baremetal-add-interface "$id" "$mac_2" ) + [ $? -eq 0 ] || [ "$id2" ] || die "Error adding interface to barmetal node $id" +} + + +# Restore xtrace +$XTRACE diff --git a/lib/ceilometer b/lib/ceilometer new file mode 100644 index 00000000..bc37d92b --- /dev/null +++ b/lib/ceilometer @@ -0,0 +1,130 @@ +# lib/ceilometer +# Install and start **Ceilometer** service + +# To enable, add the following to localrc +# ENABLED_SERVICES+=ceilometer-acompute,ceilometer-acentral,ceilometer-collector,ceilometer-api + +# Dependencies: +# - functions +# - OS_AUTH_URL for auth in api +# - DEST set to the destination directory +# - SERVICE_PASSWORD, SERVICE_TENANT_NAME for auth in api +# - STACK_USER service user + +# stack.sh +# --------- +# install_ceilometer +# configure_ceilometer +# init_ceilometer +# start_ceilometer +# stop_ceilometer +# cleanup_ceilometer + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories +CEILOMETER_DIR=$DEST/ceilometer +CEILOMETERCLIENT_DIR=$DEST/python-ceilometerclient +CEILOMETER_CONF_DIR=/etc/ceilometer +CEILOMETER_CONF=$CEILOMETER_CONF_DIR/ceilometer.conf +CEILOMETER_API_LOG_DIR=/var/log/ceilometer-api +CEILOMETER_AUTH_CACHE_DIR=${CEILOMETER_AUTH_CACHE_DIR:-/var/cache/ceilometer} + +# Support potential entry-points console scripts +if [ -d $CEILOMETER_DIR/bin ] ; then + CEILOMETER_BIN_DIR=$CEILOMETER_DIR/bin +else + CEILOMETER_BIN_DIR=$(get_python_exec_prefix) +fi + +# cleanup_ceilometer() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_ceilometer() { + mongo ceilometer --eval "db.dropDatabase();" +} + +# configure_ceilometerclient() - Set config files, create data dirs, etc +function configure_ceilometerclient() { + setup_develop $CEILOMETERCLIENT_DIR +} + +# configure_ceilometer() - Set config files, create data dirs, etc +function configure_ceilometer() { + setup_develop $CEILOMETER_DIR + + [ ! -d $CEILOMETER_CONF_DIR ] && sudo mkdir -m 755 -p $CEILOMETER_CONF_DIR + sudo chown $USER $CEILOMETER_CONF_DIR + + [ ! -d $CEILOMETER_API_LOG_DIR ] && sudo mkdir -m 755 -p $CEILOMETER_API_LOG_DIR + sudo chown $USER $CEILOMETER_API_LOG_DIR + + iniset $CEILOMETER_CONF DEFAULT rpc_backend 'ceilometer.openstack.common.rpc.impl_kombu' + iniset $CEILOMETER_CONF DEFAULT notification_topics 'notifications,glance_notifications' + iniset $CEILOMETER_CONF DEFAULT verbose True + iniset $CEILOMETER_CONF DEFAULT rabbit_host $RABBIT_HOST + iniset $CEILOMETER_CONF DEFAULT rabbit_password $RABBIT_PASSWORD + iniset $CEILOMETER_CONF DEFAULT sql_connection $BASE_SQL_CONN/nova?charset=utf8 + + # Install the policy file for the API server + cp $CEILOMETER_DIR/etc/ceilometer/policy.json $CEILOMETER_CONF_DIR + cp $CEILOMETER_DIR/etc/ceilometer/pipeline.yaml $CEILOMETER_CONF_DIR + iniset $CEILOMETER_CONF DEFAULT policy_file $CEILOMETER_CONF_DIR/policy.json + + # the compute and central agents need these credentials in order to + # call out to the public nova and glance APIs + iniset $CEILOMETER_CONF DEFAULT os_username ceilometer + iniset $CEILOMETER_CONF DEFAULT os_password $SERVICE_PASSWORD + iniset $CEILOMETER_CONF DEFAULT os_tenant_name $SERVICE_TENANT_NAME + iniset $CEILOMETER_CONF DEFAULT os_auth_url $OS_AUTH_URL + + iniset $CEILOMETER_CONF keystone_authtoken auth_protocol http + iniset $CEILOMETER_CONF keystone_authtoken admin_user ceilometer + iniset $CEILOMETER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD + iniset $CEILOMETER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $CEILOMETER_CONF keystone_authtoken signing_dir $CEILOMETER_AUTH_CACHE_DIR + + cleanup_ceilometer +} + +# init_ceilometer() - Initialize etc. +function init_ceilometer() { + # Create cache dir + sudo mkdir -p $CEILOMETER_AUTH_CACHE_DIR + sudo chown $STACK_USER $CEILOMETER_AUTH_CACHE_DIR + rm -f $CEILOMETER_AUTH_CACHE_DIR/* +} + +# install_ceilometer() - Collect source and prepare +function install_ceilometer() { + git_clone $CEILOMETER_REPO $CEILOMETER_DIR $CEILOMETER_BRANCH +} + +# install_ceilometerclient() - Collect source and prepare +function install_ceilometerclient() { + git_clone $CEILOMETERCLIENT_REPO $CEILOMETERCLIENT_DIR $CEILOMETERCLIENT_BRANCH +} + +# start_ceilometer() - Start running processes, including screen +function start_ceilometer() { + screen_it ceilometer-acompute "cd $CEILOMETER_DIR && sg libvirtd \"$CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_CONF\"" + screen_it ceilometer-acentral "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-agent-central --config-file $CEILOMETER_CONF" + screen_it ceilometer-collector "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-collector --config-file $CEILOMETER_CONF" + screen_it ceilometer-api "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" +} + +# stop_ceilometer() - Stop running processes +function stop_ceilometer() { + # Kill the ceilometer screen windows + for serv in ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api; do + screen -S $SCREEN_NAME -p $serv -X kill + done +} + +# Restore xtrace +$XTRACE diff --git a/lib/cinder b/lib/cinder new file mode 100644 index 00000000..4d1ab420 --- /dev/null +++ b/lib/cinder @@ -0,0 +1,388 @@ +# lib/cinder +# Install and start **Cinder** volume service + +# Dependencies: +# - functions +# - DEST, DATA_DIR, STACK_USER must be defined +# SERVICE_{TENANT_NAME|PASSWORD} must be defined +# ``KEYSTONE_TOKEN_FORMAT`` must be defined + +# stack.sh +# --------- +# install_cinder +# configure_cinder +# init_cinder +# start_cinder +# stop_cinder +# cleanup_cinder + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# set up default driver +CINDER_DRIVER=${CINDER_DRIVER:-default} + +# set up default directories +CINDER_DIR=$DEST/cinder +CINDERCLIENT_DIR=$DEST/python-cinderclient +CINDER_STATE_PATH=${CINDER_STATE_PATH:=$DATA_DIR/cinder} +CINDER_AUTH_CACHE_DIR=${CINDER_AUTH_CACHE_DIR:-/var/cache/cinder} + +CINDER_CONF_DIR=/etc/cinder +CINDER_CONF=$CINDER_CONF_DIR/cinder.conf +CINDER_API_PASTE_INI=$CINDER_CONF_DIR/api-paste.ini + +# Public facing bits +CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST} +CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776} +CINDER_SERVICE_PORT_INT=${CINDER_SERVICE_PORT_INT:-18776} +CINDER_SERVICE_PROTOCOL=${CINDER_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} + +# Support entry points installation of console scripts +if [[ -d $CINDER_DIR/bin ]]; then + CINDER_BIN_DIR=$CINDER_DIR/bin +else + CINDER_BIN_DIR=$(get_python_exec_prefix) +fi + +# Name of the lvm volume group to use/create for iscsi volumes +VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} +VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} + +# _clean_volume_group removes all cinder volumes from the specified volume group +# _clean_volume_group $VOLUME_GROUP $VOLUME_NAME_PREFIX +function _clean_volume_group() { + local vg=$1 + local vg_prefix=$2 + # Clean out existing volumes + for lv in `sudo lvs --noheadings -o lv_name $vg`; do + # vg_prefix prefixes the LVs we want + if [[ "${lv#$vg_prefix}" != "$lv" ]]; then + sudo lvremove -f $vg/$lv + fi + done +} + +# cleanup_cinder() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_cinder() { + # ensure the volume group is cleared up because fails might + # leave dead volumes in the group + TARGETS=$(sudo tgtadm --op show --mode target) + if [ $? -ne 0 ]; then + # If tgt driver isn't running this won't work obviously + # So check the response and restart if need be + echo "tgtd seems to be in a bad state, restarting..." + if is_ubuntu; then + restart_service tgt + else + restart_service tgtd + fi + TARGETS=$(sudo tgtadm --op show --mode target) + fi + + if [[ -n "$TARGETS" ]]; then + iqn_list=( $(grep --no-filename -r iqn $SCSI_PERSIST_DIR | sed 's///') ) + for i in "${iqn_list[@]}"; do + echo removing iSCSI target: $i + sudo tgt-admin --delete $i + done + fi + + if is_service_enabled cinder; then + sudo rm -rf $CINDER_STATE_PATH/volumes/* + fi + + if is_ubuntu; then + stop_service tgt + else + stop_service tgtd + fi + + # Campsite rule: leave behind a volume group at least as clean as we found it + _clean_volume_group $VOLUME_GROUP $VOLUME_NAME_PREFIX +} + +# configure_cinder() - Set config files, create data dirs, etc +function configure_cinder() { + setup_develop $CINDER_DIR + setup_develop $CINDERCLIENT_DIR + + if [[ ! -d $CINDER_CONF_DIR ]]; then + sudo mkdir -p $CINDER_CONF_DIR + fi + sudo chown $STACK_USER $CINDER_CONF_DIR + + cp -p $CINDER_DIR/etc/cinder/policy.json $CINDER_CONF_DIR + + # Set the paths of certain binaries + CINDER_ROOTWRAP=$(get_rootwrap_location cinder) + + # If Cinder ships the new rootwrap filters files, deploy them + # (owned by root) and add a parameter to $CINDER_ROOTWRAP + ROOTWRAP_CINDER_SUDOER_CMD="$CINDER_ROOTWRAP" + if [[ -d $CINDER_DIR/etc/cinder/rootwrap.d ]]; then + # Wipe any existing rootwrap.d files first + if [[ -d $CINDER_CONF_DIR/rootwrap.d ]]; then + sudo rm -rf $CINDER_CONF_DIR/rootwrap.d + fi + # Deploy filters to /etc/cinder/rootwrap.d + sudo mkdir -m 755 $CINDER_CONF_DIR/rootwrap.d + sudo cp $CINDER_DIR/etc/cinder/rootwrap.d/*.filters $CINDER_CONF_DIR/rootwrap.d + sudo chown -R root:root $CINDER_CONF_DIR/rootwrap.d + sudo chmod 644 $CINDER_CONF_DIR/rootwrap.d/* + # Set up rootwrap.conf, pointing to /etc/cinder/rootwrap.d + sudo cp $CINDER_DIR/etc/cinder/rootwrap.conf $CINDER_CONF_DIR/ + sudo sed -e "s:^filters_path=.*$:filters_path=$CINDER_CONF_DIR/rootwrap.d:" -i $CINDER_CONF_DIR/rootwrap.conf + sudo chown root:root $CINDER_CONF_DIR/rootwrap.conf + sudo chmod 0644 $CINDER_CONF_DIR/rootwrap.conf + # Specify rootwrap.conf as first parameter to cinder-rootwrap + CINDER_ROOTWRAP="$CINDER_ROOTWRAP $CINDER_CONF_DIR/rootwrap.conf" + ROOTWRAP_CINDER_SUDOER_CMD="$CINDER_ROOTWRAP *" + fi + + TEMPFILE=`mktemp` + echo "$USER ALL=(root) NOPASSWD: $ROOTWRAP_CINDER_SUDOER_CMD" >$TEMPFILE + chmod 0440 $TEMPFILE + sudo chown root:root $TEMPFILE + sudo mv $TEMPFILE /etc/sudoers.d/cinder-rootwrap + + cp $CINDER_DIR/etc/cinder/api-paste.ini $CINDER_API_PASTE_INI + iniset $CINDER_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $CINDER_API_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $CINDER_API_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $CINDER_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $CINDER_API_PASTE_INI filter:authtoken admin_user cinder + iniset $CINDER_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD + iniset $CINDER_API_PASTE_INI filter:authtoken signing_dir $CINDER_AUTH_CACHE_DIR + + cp $CINDER_DIR/etc/cinder/cinder.conf.sample $CINDER_CONF + iniset $CINDER_CONF DEFAULT auth_strategy keystone + iniset $CINDER_CONF DEFAULT verbose True + iniset $CINDER_CONF DEFAULT volume_group $VOLUME_GROUP + iniset $CINDER_CONF DEFAULT volume_name_template ${VOLUME_NAME_PREFIX}%s + iniset $CINDER_CONF DEFAULT iscsi_helper tgtadm + local dburl + database_connection_url dburl cinder + iniset $CINDER_CONF DEFAULT sql_connection $dburl + iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI + iniset $CINDER_CONF DEFAULT root_helper "sudo ${CINDER_ROOTWRAP}" + iniset $CINDER_CONF DEFAULT osapi_volume_extension cinder.api.contrib.standard_extensions + iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH + + if is_service_enabled tls-proxy; then + # Set the service port for a proxy to take the original + iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT + fi + + if [ "$SYSLOG" != "False" ]; then + iniset $CINDER_CONF DEFAULT use_syslog True + fi + + iniset_rpc_backend cinder $CINDER_CONF DEFAULT + + if [[ "$CINDER_SECURE_DELETE" == "False" ]]; then + iniset $CINDER_CONF DEFAULT secure_delete False + iniset $CINDER_CONF DEFAULT volume_clear none + fi + + if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then + # Add color to logging output + iniset $CINDER_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_id)s %(project_id)s%(color)s] %(instance)s%(color)s%(message)s" + iniset $CINDER_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" + iniset $CINDER_CONF DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" + iniset $CINDER_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" + fi + + if [ "$CINDER_DRIVER" == "XenAPINFS" ]; then + ( + set -u + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.xenapi.sm.XenAPINFSDriver" + iniset $CINDER_CONF DEFAULT xenapi_connection_url "$CINDER_XENAPI_CONNECTION_URL" + iniset $CINDER_CONF DEFAULT xenapi_connection_username "$CINDER_XENAPI_CONNECTION_USERNAME" + iniset $CINDER_CONF DEFAULT xenapi_connection_password "$CINDER_XENAPI_CONNECTION_PASSWORD" + iniset $CINDER_CONF DEFAULT xenapi_nfs_server "$CINDER_XENAPI_NFS_SERVER" + iniset $CINDER_CONF DEFAULT xenapi_nfs_serverpath "$CINDER_XENAPI_NFS_SERVERPATH" + ) + elif [ "$CINDER_DRIVER" == "sheepdog" ]; then + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.sheepdog.SheepdogDriver" + fi +} + +# create_cinder_accounts() - Set up common required cinder accounts + +# Tenant User Roles +# ------------------------------------------------------------------ +# service cinder admin # if enabled + +# Migrated from keystone_data.sh +create_cinder_accounts() { + + SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + + # Cinder + if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then + CINDER_USER=$(keystone user-create \ + --name=cinder \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=cinder@example.com \ + | grep " id " | get_field 2) + keystone user-role-add \ + --tenant_id $SERVICE_TENANT \ + --user_id $CINDER_USER \ + --role_id $ADMIN_ROLE + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + CINDER_SERVICE=$(keystone service-create \ + --name=cinder \ + --type=volume \ + --description="Cinder Volume Service" \ + | grep " id " | get_field 2) + keystone endpoint-create \ + --region RegionOne \ + --service_id $CINDER_SERVICE \ + --publicurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \ + --adminurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \ + --internalurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" + fi + fi +} + +# create_cinder_cache_dir() - Part of the init_cinder() process +function create_cinder_cache_dir() { + # Create cache dir + sudo mkdir -p $CINDER_AUTH_CACHE_DIR + sudo chown $STACK_USER $CINDER_AUTH_CACHE_DIR + rm -f $CINDER_AUTH_CACHE_DIR/* +} + +create_cinder_volume_group() { + # Configure a default volume group called '`stack-volumes`' for the volume + # service if it does not yet exist. If you don't wish to use a file backed + # volume group, create your own volume group called ``stack-volumes`` before + # invoking ``stack.sh``. + # + # By default, the backing file is 5G in size, and is stored in ``/opt/stack/data``. + + if ! sudo vgs $VOLUME_GROUP; then + VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DATA_DIR/${VOLUME_GROUP}-backing-file} + + # Only create if the file doesn't already exists + [[ -f $VOLUME_BACKING_FILE ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE + + DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE` + + # Only create if the loopback device doesn't contain $VOLUME_GROUP + if ! sudo vgs $VOLUME_GROUP; then + sudo vgcreate $VOLUME_GROUP $DEV + fi + fi + + mkdir -p $CINDER_STATE_PATH/volumes +} + +# init_cinder() - Initialize database and volume group +function init_cinder() { + # Force nova volumes off + NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/osapi_volume,//") + + if is_service_enabled $DATABASE_BACKENDS; then + # (Re)create cinder database + recreate_database cinder utf8 + + # Migrate cinder database + $CINDER_BIN_DIR/cinder-manage db sync + fi + + if is_service_enabled c-vol; then + + create_cinder_volume_group + + if sudo vgs $VOLUME_GROUP; then + if is_fedora || is_suse; then + # service is not started by default + start_service tgtd + fi + + # Remove iscsi targets + sudo tgtadm --op show --mode target | grep $VOLUME_NAME_PREFIX | grep Target | cut -f3 -d ' ' | sudo xargs -n1 tgt-admin --delete || true + # Start with a clean volume group + _clean_volume_group $VOLUME_GROUP $VOLUME_NAME_PREFIX + fi + fi + + create_cinder_cache_dir +} + +# install_cinder() - Collect source and prepare +function install_cinder() { + git_clone $CINDER_REPO $CINDER_DIR $CINDER_BRANCH + git_clone $CINDERCLIENT_REPO $CINDERCLIENT_DIR $CINDERCLIENT_BRANCH +} + +# apply config.d approach (e.g. Oneiric does not have this) +function _configure_tgt_for_config_d() { + if [[ ! -d /etc/tgt/conf.d/ ]]; then + sudo mkdir -p /etc/tgt/conf.d + echo "include /etc/tgt/conf.d/*.conf" | sudo tee -a /etc/tgt/targets.conf + fi +} + +# start_cinder() - Start running processes, including screen +function start_cinder() { + if is_service_enabled c-vol; then + _configure_tgt_for_config_d + if [[ ! -f /etc/tgt/conf.d/stack.conf ]]; then + echo "include $CINDER_STATE_PATH/volumes/*" | sudo tee /etc/tgt/conf.d/stack.conf + fi + if is_ubuntu; then + # tgt in oneiric doesn't restart properly if tgtd isn't running + # do it in two steps + sudo stop tgt || true + sudo start tgt + elif is_fedora; then + # bypass redirection to systemctl during restart + sudo /sbin/service --skip-redirect tgtd restart + elif is_suse; then + restart_service tgtd + else + # note for other distros: unstack.sh also uses the tgt/tgtd service + # name, and would need to be adjusted too + exit_distro_not_supported "restarting tgt" + fi + fi + + screen_it c-api "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-api --config-file $CINDER_CONF" + screen_it c-vol "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF" + screen_it c-sch "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-scheduler --config-file $CINDER_CONF" + + # Start proxies if enabled + if is_service_enabled c-api && is_service_enabled tls-proxy; then + start_tls_proxy '*' $CINDER_SERVICE_PORT $CINDER_SERVICE_HOST $CINDER_SERVICE_PORT_INT & + fi +} + +# stop_cinder() - Stop running processes +function stop_cinder() { + # Kill the cinder screen windows + for serv in c-api c-sch c-vol; do + screen -S $SCREEN_NAME -p $serv -X kill + done + + if is_service_enabled c-vol; then + if is_ubuntu; then + stop_service tgt + else + stop_service tgtd + fi + fi +} + +# Restore xtrace +$XTRACE diff --git a/lib/database b/lib/database new file mode 100644 index 00000000..07e37aef --- /dev/null +++ b/lib/database @@ -0,0 +1,94 @@ +# lib/database +# Interface for interacting with different database backends + +# Dependencies: +# DATABASE_BACKENDS variable must contain a list of available database backends +# DATABASE_TYPE variable must be set + +# Each database must implement four functions: +# recreate_database_$DATABASE_TYPE +# install_database_$DATABASE_TYPE +# configure_database_$DATABASE_TYPE +# database_connection_url_$DATABASE_TYPE +# +# and call register_database $DATABASE_TYPE + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# Register a database backend +# $1 The name of the database backend +function register_database { + [ -z "$DATABASE_BACKENDS" ] && DATABASE_BACKENDS=$1 || DATABASE_BACKENDS+=" $1" +} + +for f in $TOP_DIR/lib/databases/*; do source $f; done + +# Set the database type based on the configuration +function initialize_database_backends { + for backend in $DATABASE_BACKENDS; do + is_service_enabled $backend && DATABASE_TYPE=$backend + done + + [ -z "$DATABASE_TYPE" ] && return 1 + + # For backward-compatibility, read in the MYSQL_HOST/USER variables and use + # them as the default values for the DATABASE_HOST/USER variables. + MYSQL_HOST=${MYSQL_HOST:-localhost} + MYSQL_USER=${MYSQL_USER:-root} + + DATABASE_HOST=${DATABASE_HOST:-${MYSQL_HOST}} + DATABASE_USER=${DATABASE_USER:-${MYSQL_USER}} + + if [ -n "$MYSQL_PASSWORD" ]; then + DATABASE_PASSWORD=$MYSQL_PASSWORD + else + read_password DATABASE_PASSWORD "ENTER A PASSWORD TO USE FOR THE DATABASE." + fi + + # We configure Nova, Horizon, Glance and Keystone to use MySQL as their + # database server. While they share a single server, each has their own + # database and tables. + + # By default this script will install and configure MySQL. If you want to + # use an existing server, you can pass in the user/password/host parameters. + # You will need to send the same ``DATABASE_PASSWORD`` to every host if you are doing + # a multi-node DevStack installation. + + # NOTE: Don't specify ``/db`` in this string so we can use it for multiple services + BASE_SQL_CONN=${BASE_SQL_CONN:-${DATABASE_TYPE}://$DATABASE_USER:$DATABASE_PASSWORD@$DATABASE_HOST} + + return 0 +} + +# Recreate a given database +# $1 The name of the database +# $2 The character set/encoding of the database +function recreate_database { + local db=$1 + local charset=$2 + recreate_database_$DATABASE_TYPE $db $charset +} + +# Install the database +function install_database { + install_database_$DATABASE_TYPE +} + +# Configure and start the database +function configure_database { + configure_database_$DATABASE_TYPE +} + +# Generate an SQLAlchemy connection URL and store it in a variable +# $1 The variable name in which to store the connection URL +# $2 The name of the database +function database_connection_url { + local var=$1 + local db=$2 + database_connection_url_$DATABASE_TYPE $var $db +} + +# Restore xtrace +$XTRACE diff --git a/lib/databases/mysql b/lib/databases/mysql new file mode 100644 index 00000000..94aedc64 --- /dev/null +++ b/lib/databases/mysql @@ -0,0 +1,124 @@ +# lib/databases/mysql +# Functions to control the configuration and operation of the **MySQL** database backend + +# Dependencies: +# DATABASE_{HOST,USER,PASSWORD} must be defined + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +register_database mysql + +function recreate_database_mysql { + local db=$1 + local charset=$2 + mysql -u$DATABASE_USER -p$DATABASE_PASSWORD -e "DROP DATABASE IF EXISTS $db;" + mysql -u$DATABASE_USER -p$DATABASE_PASSWORD -e "CREATE DATABASE $db CHARACTER SET $charset;" +} + +function configure_database_mysql { + echo_summary "Configuring and starting MySQL" + + if is_ubuntu; then + MY_CONF=/etc/mysql/my.cnf + MYSQL=mysql + elif is_fedora; then + MY_CONF=/etc/my.cnf + MYSQL=mysqld + elif is_suse; then + MY_CONF=/etc/my.cnf + MYSQL=mysql + else + exit_distro_not_supported "mysql configuration" + fi + + # Start mysql-server + if is_fedora || is_suse; then + # service is not started by default + start_service $MYSQL + fi + + # Set the root password - only works the first time. For Ubuntu, we already + # did that with debconf before installing the package. + if ! is_ubuntu; then + sudo mysqladmin -u root password $DATABASE_PASSWORD || true + fi + + # Update the DB to give user ‘$DATABASE_USER’@’%’ full control of the all databases: + sudo mysql -uroot -p$DATABASE_PASSWORD -h127.0.0.1 -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';" + + # Now update ``my.cnf`` for some local needs and restart the mysql service + + # Change ‘bind-address’ from localhost (127.0.0.1) to any (0.0.0.0) + sudo sed -i '/^bind-address/s/127.0.0.1/0.0.0.0/g' $MY_CONF + + # Set default db type to InnoDB + if sudo grep -q "default-storage-engine" $MY_CONF; then + # Change it + sudo bash -c "source $TOP_DIR/functions; iniset $MY_CONF mysqld default-storage-engine InnoDB" + else + # Add it + sudo sed -i -e "/^\[mysqld\]/ a \ +default-storage-engine = InnoDB" $MY_CONF + fi + + # Turn on slow query log + sudo sed -i '/log.slow.queries/d' $MY_CONF + sudo sed -i -e "/^\[mysqld\]/ a \ +log-slow-queries = /var/log/mysql/mysql-slow.log" $MY_CONF + + # Log all queries (any query taking longer than 0 seconds) + sudo sed -i '/long.query.time/d' $MY_CONF + sudo sed -i -e "/^\[mysqld\]/ a \ +long-query-time = 0" $MY_CONF + + # Log all non-indexed queries + sudo sed -i '/log.queries.not.using.indexes/d' $MY_CONF + sudo sed -i -e "/^\[mysqld\]/ a \ +log-queries-not-using-indexes" $MY_CONF + + restart_service $MYSQL +} + +function install_database_mysql { + if is_ubuntu; then + # Seed configuration with mysql password so that apt-get install doesn't + # prompt us for a password upon install. + cat <$HOME/.my.cnf +[client] +user=$DATABASE_USER +password=$DATABASE_PASSWORD +host=$DATABASE_HOST +EOF + chmod 0600 $HOME/.my.cnf + fi + # Install mysql-server + if is_ubuntu || is_fedora; then + install_package mysql-server + elif is_suse; then + install_package mysql-community-server + else + exit_distro_not_supported "mysql installation" + fi +} + +function database_connection_url_mysql { + local output=$1 + local db=$2 + eval "$output=$BASE_SQL_CONN/$db?charset=utf8" +} + +# Restore xtrace +$MY_XTRACE diff --git a/lib/databases/postgresql b/lib/databases/postgresql new file mode 100644 index 00000000..2c37f49b --- /dev/null +++ b/lib/databases/postgresql @@ -0,0 +1,79 @@ +# lib/databases/postgresql +# Functions to control the configuration and operation of the **PostgreSQL** database backend + +# Dependencies: +# DATABASE_{HOST,USER,PASSWORD} must be defined + +# Save trace setting +PG_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +register_database postgresql + +function recreate_database_postgresql { + local db=$1 + local charset=$2 + # Avoid unsightly error when calling dropdb when the database doesn't exist + psql -h$DATABASE_HOST -U$DATABASE_USER -dtemplate1 -c "DROP DATABASE IF EXISTS $db" + createdb -h $DATABASE_HOST -U$DATABASE_USER -l C -T template0 -E $charset $db +} + +function configure_database_postgresql { + echo_summary "Configuring and starting PostgreSQL" + if is_fedora; then + PG_HBA=/var/lib/pgsql/data/pg_hba.conf + PG_CONF=/var/lib/pgsql/data/postgresql.conf + sudo [ -e $PG_HBA ] || sudo postgresql-setup initdb + elif is_ubuntu; then + PG_DIR=`find /etc/postgresql -name pg_hba.conf|xargs dirname` + PG_HBA=$PG_DIR/pg_hba.conf + PG_CONF=$PG_DIR/postgresql.conf + elif is_suse; then + PG_HBA=/var/lib/pgsql/data/pg_hba.conf + PG_CONF=/var/lib/pgsql/data/postgresql.conf + # initdb is called when postgresql is first started + sudo [ -e $PG_HBA ] || start_service postgresql + else + exit_distro_not_supported "postgresql configuration" + fi + # Listen on all addresses + sudo sed -i "/listen_addresses/s/.*/listen_addresses = '*'/" $PG_CONF + # Do password auth from all IPv4 clients + sudo sed -i "/^host/s/all\s\+127.0.0.1\/32\s\+ident/$DATABASE_USER\t0.0.0.0\/0\tpassword/" $PG_HBA + # Do password auth for all IPv6 clients + sudo sed -i "/^host/s/all\s\+::1\/128\s\+ident/$DATABASE_USER\t::0\/0\tpassword/" $PG_HBA + restart_service postgresql + + # If creating the role fails, chances are it already existed. Try to alter it. + sudo -u root sudo -u postgres -i psql -c "CREATE ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" || \ + sudo -u root sudo -u postgres -i psql -c "ALTER ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'" +} + +function install_database_postgresql { + echo_summary "Installing postgresql" + PGPASS=$HOME/.pgpass + if [[ ! -e $PGPASS ]]; then + cat < $PGPASS +*:*:*:$DATABASE_USER:$DATABASE_PASSWORD +EOF + chmod 0600 $PGPASS + else + sed -i "s/:root:\w\+/:root:$DATABASE_PASSWORD/" $PGPASS + fi + if is_ubuntu; then + install_package postgresql + elif is_fedora || is_suse; then + install_package postgresql-server + else + exit_distro_not_supported "postgresql installation" + fi +} + +function database_connection_url_postgresql { + local output=$1 + local db=$2 + eval "$output=$BASE_SQL_CONN/$db?client_encoding=utf8" +} + +# Restore xtrace +$PG_XTRACE diff --git a/lib/glance b/lib/glance new file mode 100644 index 00000000..80d3902a --- /dev/null +++ b/lib/glance @@ -0,0 +1,203 @@ +# lib/glance +# Functions to control the configuration and operation of the **Glance** service + +# Dependencies: +# ``functions`` file +# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined +# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined +# ``SERVICE_HOST`` +# ``KEYSTONE_TOKEN_FORMAT`` must be defined + +# ``stack.sh`` calls the entry points in this order: +# +# install_glance +# configure_glance +# init_glance +# start_glance +# stop_glance +# cleanup_glance + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories +GLANCE_DIR=$DEST/glance +GLANCECLIENT_DIR=$DEST/python-glanceclient +GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache} +GLANCE_IMAGE_DIR=${GLANCE_IMAGE_DIR:=$DATA_DIR/glance/images} +GLANCE_AUTH_CACHE_DIR=${GLANCE_AUTH_CACHE_DIR:-/var/cache/glance} + +GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance} +GLANCE_REGISTRY_CONF=$GLANCE_CONF_DIR/glance-registry.conf +GLANCE_API_CONF=$GLANCE_CONF_DIR/glance-api.conf +GLANCE_REGISTRY_PASTE_INI=$GLANCE_CONF_DIR/glance-registry-paste.ini +GLANCE_API_PASTE_INI=$GLANCE_CONF_DIR/glance-api-paste.ini +GLANCE_CACHE_CONF=$GLANCE_CONF_DIR/glance-cache.conf +GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json + +# Support entry points installation of console scripts +if [[ -d $GLANCE_DIR/bin ]]; then + GLANCE_BIN_DIR=$GLANCE_DIR/bin +else + GLANCE_BIN_DIR=$(get_python_exec_prefix) +fi + +# Glance connection info. Note the port must be specified. +GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$SERVICE_HOST:9292} + + +# Entry Points +# ------------ + +# cleanup_glance() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_glance() { + # kill instances (nova) + # delete image files (glance) + # This function intentionally left blank + : +} + +# configure_glanceclient() - Set config files, create data dirs, etc +function configure_glanceclient() { + setup_develop $GLANCECLIENT_DIR +} + +# configure_glance() - Set config files, create data dirs, etc +function configure_glance() { + setup_develop $GLANCE_DIR + + if [[ ! -d $GLANCE_CONF_DIR ]]; then + sudo mkdir -p $GLANCE_CONF_DIR + fi + sudo chown $STACK_USER $GLANCE_CONF_DIR + + # Copy over our glance configurations and update them + cp $GLANCE_DIR/etc/glance-registry.conf $GLANCE_REGISTRY_CONF + iniset $GLANCE_REGISTRY_CONF DEFAULT debug True + inicomment $GLANCE_REGISTRY_CONF DEFAULT log_file + local dburl + database_connection_url dburl glance + iniset $GLANCE_REGISTRY_CONF DEFAULT sql_connection $dburl + iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG + iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone + iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ + iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_user glance + iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_password $SERVICE_PASSWORD + iniset $GLANCE_REGISTRY_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/registry + + cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF + iniset $GLANCE_API_CONF DEFAULT debug True + inicomment $GLANCE_API_CONF DEFAULT log_file + iniset $GLANCE_API_CONF DEFAULT sql_connection $dburl + iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG + iniset $GLANCE_API_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/ + iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ + iniset $GLANCE_API_CONF paste_deploy flavor keystone+cachemanagement + iniset $GLANCE_API_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $GLANCE_API_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $GLANCE_API_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $GLANCE_API_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ + iniset $GLANCE_API_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $GLANCE_API_CONF keystone_authtoken admin_user glance + iniset $GLANCE_API_CONF keystone_authtoken admin_password $SERVICE_PASSWORD + if is_service_enabled qpid; then + iniset $GLANCE_API_CONF DEFAULT notifier_strategy qpid + elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then + iniset $GLANCE_API_CONF DEFAULT notifier_strategy rabbit + iniset $GLANCE_API_CONF DEFAULT rabbit_host $RABBIT_HOST + iniset $GLANCE_API_CONF DEFAULT rabbit_password $RABBIT_PASSWORD + fi + iniset $GLANCE_API_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/api + + cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI + + cp -p $GLANCE_DIR/etc/glance-api-paste.ini $GLANCE_API_PASTE_INI + + cp $GLANCE_DIR/etc/glance-cache.conf $GLANCE_CACHE_CONF + iniset $GLANCE_CACHE_CONF DEFAULT debug True + inicomment $GLANCE_CACHE_CONF DEFAULT log_file + iniset $GLANCE_CACHE_CONF DEFAULT use_syslog $SYSLOG + iniset $GLANCE_CACHE_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/ + iniset $GLANCE_CACHE_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ + iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_url + iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0 + iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_tenant_name + iniset $GLANCE_CACHE_CONF DEFAULT admin_tenant_name $SERVICE_TENANT_NAME + iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_user + iniset $GLANCE_CACHE_CONF DEFAULT admin_user glance + iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_password + iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD + + cp -p $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON +} + +# create_glance_cache_dir() - Part of the init_glance() process +function create_glance_cache_dir() { + # Create cache dir + sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/api + sudo chown $STACK_USER $GLANCE_AUTH_CACHE_DIR/api + rm -f $GLANCE_AUTH_CACHE_DIR/api/* + sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/registry + sudo chown $STACK_USER $GLANCE_AUTH_CACHE_DIR/registry + rm -f $GLANCE_AUTH_CACHE_DIR/registry/* +} + +# init_glance() - Initialize databases, etc. +function init_glance() { + # Delete existing images + rm -rf $GLANCE_IMAGE_DIR + mkdir -p $GLANCE_IMAGE_DIR + + # Delete existing cache + rm -rf $GLANCE_CACHE_DIR + mkdir -p $GLANCE_CACHE_DIR + + # (Re)create glance database + recreate_database glance utf8 + + # Migrate glance database + $GLANCE_BIN_DIR/glance-manage db_sync + + create_glance_cache_dir +} + +# install_glanceclient() - Collect source and prepare +function install_glanceclient() { + git_clone $GLANCECLIENT_REPO $GLANCECLIENT_DIR $GLANCECLIENT_BRANCH +} + +# install_glance() - Collect source and prepare +function install_glance() { + git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH +} + +# start_glance() - Start running processes, including screen +function start_glance() { + screen_it g-reg "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf" + screen_it g-api "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf" + echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..." + if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then + echo "g-api did not start" + exit 1 + fi +} + +# stop_glance() - Stop running processes +function stop_glance() { + # Kill the Glance screen windows + screen -S $SCREEN_NAME -p g-api -X kill + screen -S $SCREEN_NAME -p g-reg -X kill +} + +# Restore xtrace +$XTRACE diff --git a/lib/heat b/lib/heat new file mode 100644 index 00000000..5b8b360a --- /dev/null +++ b/lib/heat @@ -0,0 +1,188 @@ +# lib/heat +# Install and start **Heat** service + +# To enable, add the following to localrc +# ENABLED_SERVICES+=,heat,h-api-cfn,h-api-cw,h-eng + +# Dependencies: +# - functions + +# stack.sh +# --------- +# install_heatclient +# install_heat +# configure_heatclient +# configure_heat +# init_heat +# start_heat +# stop_heat +# cleanup_heat + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- +HEAT_DIR=$DEST/heat +HEATCLIENT_DIR=$DEST/python-heatclient +# set up default directories + +# cleanup_heat() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_heat() { + # This function intentionally left blank + : +} + +# configure_heatclient() - Set config files, create data dirs, etc +function configure_heatclient() { + setup_develop $HEATCLIENT_DIR +} + +# configure_heat() - Set config files, create data dirs, etc +function configure_heat() { + setup_develop $HEAT_DIR + + HEAT_CONF_DIR=/etc/heat + if [[ ! -d $HEAT_CONF_DIR ]]; then + sudo mkdir -p $HEAT_CONF_DIR + fi + sudo chown $STACK_USER $HEAT_CONF_DIR + + HEAT_API_CFN_HOST=${HEAT_API_CFN_HOST:-$SERVICE_HOST} + HEAT_API_CFN_PORT=${HEAT_API_CFN_PORT:-8000} + HEAT_ENGINE_HOST=${HEAT_ENGINE_HOST:-$SERVICE_HOST} + HEAT_ENGINE_PORT=${HEAT_ENGINE_PORT:-8001} + HEAT_API_CW_HOST=${HEAT_API_CW_HOST:-$SERVICE_HOST} + HEAT_API_CW_PORT=${HEAT_API_CW_PORT:-8003} + HEAT_API_HOST=${HEAT_API_HOST:-$SERVICE_HOST} + HEAT_API_PORT=${HEAT_API_PORT:-8004} + + # Cloudformation API + HEAT_API_CFN_CONF=$HEAT_CONF_DIR/heat-api-cfn.conf + cp $HEAT_DIR/etc/heat/heat-api-cfn.conf $HEAT_API_CFN_CONF + iniset $HEAT_API_CFN_CONF DEFAULT debug True + inicomment $HEAT_API_CFN_CONF DEFAULT log_file + iniset $HEAT_API_CFN_CONF DEFAULT use_syslog $SYSLOG + iniset $HEAT_API_CFN_CONF DEFAULT bind_host $HEAT_API_CFN_HOST + iniset $HEAT_API_CFN_CONF DEFAULT bind_port $HEAT_API_CFN_PORT + + iniset_rpc_backend heat $HEAT_API_CFN_CONF DEFAULT + + HEAT_API_CFN_PASTE_INI=$HEAT_CONF_DIR/heat-api-cfn-paste.ini + cp $HEAT_DIR/etc/heat/heat-api-cfn-paste.ini $HEAT_API_CFN_PASTE_INI + iniset $HEAT_API_CFN_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $HEAT_API_CFN_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $HEAT_API_CFN_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $HEAT_API_CFN_PASTE_INI filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 + iniset $HEAT_API_CFN_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $HEAT_API_CFN_PASTE_INI filter:authtoken admin_user heat + iniset $HEAT_API_CFN_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD + iniset $HEAT_API_CFN_PASTE_INI filter:ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 + iniset $HEAT_API_CFN_PASTE_INI filter:ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens + + # OpenStack API + HEAT_API_CONF=$HEAT_CONF_DIR/heat-api.conf + cp $HEAT_DIR/etc/heat/heat-api.conf $HEAT_API_CONF + iniset $HEAT_API_CONF DEFAULT debug True + inicomment $HEAT_API_CONF DEFAULT log_file + iniset $HEAT_API_CONF DEFAULT use_syslog $SYSLOG + iniset $HEAT_API_CONF DEFAULT bind_host $HEAT_API_HOST + iniset $HEAT_API_CONF DEFAULT bind_port $HEAT_API_PORT + + iniset_rpc_backend heat $HEAT_API_CONF DEFAULT + + HEAT_API_PASTE_INI=$HEAT_CONF_DIR/heat-api-paste.ini + cp $HEAT_DIR/etc/heat/heat-api-paste.ini $HEAT_API_PASTE_INI + iniset $HEAT_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $HEAT_API_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $HEAT_API_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $HEAT_API_PASTE_INI filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 + iniset $HEAT_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $HEAT_API_PASTE_INI filter:authtoken admin_user heat + iniset $HEAT_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD + iniset $HEAT_API_PASTE_INI filter:ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 + iniset $HEAT_API_PASTE_INI filter:ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens + + # engine + HEAT_ENGINE_CONF=$HEAT_CONF_DIR/heat-engine.conf + cp $HEAT_DIR/etc/heat/heat-engine.conf $HEAT_ENGINE_CONF + iniset $HEAT_ENGINE_CONF DEFAULT debug True + inicomment $HEAT_ENGINE_CONF DEFAULT log_file + iniset $HEAT_ENGINE_CONF DEFAULT use_syslog $SYSLOG + iniset $HEAT_ENGINE_CONF DEFAULT bind_host $HEAT_ENGINE_HOST + iniset $HEAT_ENGINE_CONF DEFAULT bind_port $HEAT_ENGINE_PORT + iniset $HEAT_ENGINE_CONF DEFAULT heat_metadata_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT + iniset $HEAT_ENGINE_CONF DEFAULT heat_waitcondition_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1/waitcondition + iniset $HEAT_ENGINE_CONF DEFAULT heat_watch_server_url http://$HEAT_API_CW_HOST:$HEAT_API_CW_PORT + local dburl + database_connection_url dburl heat + iniset $HEAT_ENGINE_CONF DEFAULT sql_connection $dburl + iniset $HEAT_ENGINE_CONF DEFAULT auth_encryption_key `hexdump -n 16 -v -e '/1 "%02x"' /dev/random` + + iniset_rpc_backend heat $HEAT_ENGINE_CONF DEFAULT + + # Cloudwatch API + HEAT_API_CW_CONF=$HEAT_CONF_DIR/heat-api-cloudwatch.conf + cp $HEAT_DIR/etc/heat/heat-api-cloudwatch.conf $HEAT_API_CW_CONF + iniset $HEAT_API_CW_CONF DEFAULT debug True + inicomment $HEAT_API_CW_CONF DEFAULT log_file + iniset $HEAT_API_CW_CONF DEFAULT use_syslog $SYSLOG + iniset $HEAT_API_CW_CONF DEFAULT bind_host $HEAT_API_CW_HOST + iniset $HEAT_API_CW_CONF DEFAULT bind_port $HEAT_API_CW_PORT + + iniset_rpc_backend heat $HEAT_API_CW_CONF DEFAULT + + HEAT_API_CW_PASTE_INI=$HEAT_CONF_DIR/heat-api-cloudwatch-paste.ini + cp $HEAT_DIR/etc/heat/heat-api-cloudwatch-paste.ini $HEAT_API_CW_PASTE_INI + iniset $HEAT_API_CW_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST + iniset $HEAT_API_CW_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT + iniset $HEAT_API_CW_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset $HEAT_API_CW_PASTE_INI filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 + iniset $HEAT_API_CW_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $HEAT_API_CW_PASTE_INI filter:authtoken admin_user heat + iniset $HEAT_API_CW_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD + iniset $HEAT_API_CW_PASTE_INI filter:ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 + iniset $HEAT_API_CW_PASTE_INI filter:ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens +} + +# init_heat() - Initialize database +function init_heat() { + + # (re)create heat database + recreate_database heat utf8 + + $HEAT_DIR/bin/heat-db-setup $os_PACKAGE -r $DATABASE_PASSWORD + $HEAT_DIR/tools/nova_create_flavors.sh +} + +# install_heatclient() - Collect source and prepare +function install_heatclient() { + git_clone $HEATCLIENT_REPO $HEATCLIENT_DIR $HEATCLIENT_BRANCH +} + +# install_heat() - Collect source and prepare +function install_heat() { + git_clone $HEAT_REPO $HEAT_DIR $HEAT_BRANCH +} + +# start_heat() - Start running processes, including screen +function start_heat() { + screen_it h-eng "cd $HEAT_DIR; bin/heat-engine --config-file=$HEAT_CONF_DIR/heat-engine.conf" + screen_it h-api "cd $HEAT_DIR; bin/heat-api --config-dir=$HEAT_CONF_DIR/heat-api.conf" + screen_it h-api-cfn "cd $HEAT_DIR; bin/heat-api-cfn --config-dir=$HEAT_CONF_DIR/heat-api-cfn.conf" + screen_it h-api-cw "cd $HEAT_DIR; bin/heat-api-cloudwatch --config-dir=$HEAT_CONF_DIR/heat-api-cloudwatch.conf" +} + +# stop_heat() - Stop running processes +function stop_heat() { + # Kill the cinder screen windows + for serv in h-eng h-api-cfn h-api-cw; do + screen -S $SCREEN_NAME -p $serv -X kill + done +} + +# Restore xtrace +$XTRACE diff --git a/lib/horizon b/lib/horizon new file mode 100644 index 00000000..9180370b --- /dev/null +++ b/lib/horizon @@ -0,0 +1,153 @@ +# lib/horizon +# Functions to control the configuration and operation of the horizon service +# + +# Dependencies: +# ``functions`` file +# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined +# + +# ``stack.sh`` calls the entry points in this order: +# +# install_horizon +# configure_horizon +# init_horizon +# start_horizon +# stop_horizon +# cleanup_horizon + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# + +# Set up default directories +HORIZON_DIR=$DEST/horizon + +# Allow overriding the default Apache user and group, default to +# current user and his default group. +APACHE_USER=${APACHE_USER:-$USER} +APACHE_GROUP=${APACHE_GROUP:-$(id -gn $APACHE_USER)} + + +# Entry Points +# ------------ + +# cleanup_horizon() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_horizon() { + # kill instances (nova) + # delete image files (glance) + # This function intentionally left blank + : +} + +# configure_horizon() - Set config files, create data dirs, etc +function configure_horizon() { + setup_develop $HORIZON_DIR +} + +# init_horizon() - Initialize databases, etc. +function init_horizon() { + # Remove stale session database. + rm -f $HORIZON_DIR/openstack_dashboard/local/dashboard_openstack.sqlite3 + + # ``local_settings.py`` is used to override horizon default settings. + local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py + cp $FILES/horizon_settings.py $local_settings + + # Initialize the horizon database (it stores sessions and notices shown to + # users). The user system is external (keystone). + cd $HORIZON_DIR + python manage.py syncdb --noinput + cd $TOP_DIR + + # Create an empty directory that apache uses as docroot + sudo mkdir -p $HORIZON_DIR/.blackhole + + + if is_ubuntu; then + APACHE_NAME=apache2 + APACHE_CONF=sites-available/horizon + # Clean up the old config name + sudo rm -f /etc/apache2/sites-enabled/000-default + # Be a good citizen and use the distro tools here + sudo touch /etc/$APACHE_NAME/$APACHE_CONF + sudo a2ensite horizon + # WSGI isn't enabled by default, enable it + sudo a2enmod wsgi + elif is_fedora; then + APACHE_NAME=httpd + APACHE_CONF=conf.d/horizon.conf + sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf + elif is_suse; then + APACHE_NAME=apache2 + APACHE_CONF=vhosts.d/horizon.conf + # WSGI isn't enabled by default, enable it + sudo a2enmod wsgi + else + exit_distro_not_supported "apache configuration" + fi + + # Configure apache to run horizon + sudo sh -c "sed -e \" + s,%USER%,$APACHE_USER,g; + s,%GROUP%,$APACHE_GROUP,g; + s,%HORIZON_DIR%,$HORIZON_DIR,g; + s,%APACHE_NAME%,$APACHE_NAME,g; + s,%DEST%,$DEST,g; + \" $FILES/apache-horizon.template >/etc/$APACHE_NAME/$APACHE_CONF" + +} + +# install_horizon() - Collect source and prepare +function install_horizon() { + # Apache installation, because we mark it NOPRIME + if is_ubuntu; then + # Install apache2, which is NOPRIME'd + install_package apache2 libapache2-mod-wsgi + elif is_fedora; then + sudo rm -f /etc/httpd/conf.d/000-* + install_package httpd mod_wsgi + elif is_suse; then + install_package apache2 apache2-mod_wsgi + else + exit_distro_not_supported "apache installation" + fi + + # NOTE(sdague) quantal changed the name of the node binary + if is_ubuntu; then + if [[ ! -e "/usr/bin/node" ]]; then + install_package nodejs-legacy + fi + fi + + git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH $HORIZON_TAG +} + +# start_horizon() - Start running processes, including screen +function start_horizon() { + restart_service $APACHE_NAME + screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/$APACHE_NAME/horizon_error.log" +} + +# stop_horizon() - Stop running processes (non-screen) +function stop_horizon() { + if is_ubuntu; then + stop_service apache2 + elif is_fedora; then + stop_service httpd + elif is_suse; then + stop_service apache2 + else + exit_distro_not_supported "apache configuration" + fi +} + +# Restore xtrace +$XTRACE diff --git a/lib/keystone b/lib/keystone new file mode 100644 index 00000000..866c62e1 --- /dev/null +++ b/lib/keystone @@ -0,0 +1,334 @@ +# lib/keystone +# Functions to control the configuration and operation of **Keystone** + +# Dependencies: +# ``functions`` file +# ``BASE_SQL_CONN`` +# ``SERVICE_HOST``, ``SERVICE_PROTOCOL`` +# ``SERVICE_TOKEN`` +# ``S3_SERVICE_PORT`` (template backend only) +# ``STACK_USER`` + +# ``stack.sh`` calls the entry points in this order: +# +# install_keystone +# configure_keystone +# init_keystone +# start_keystone +# create_keystone_accounts +# stop_keystone +# cleanup_keystone + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories +KEYSTONE_DIR=$DEST/keystone +KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone} +KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf +KEYSTONE_AUTH_CACHE_DIR=${KEYSTONE_AUTH_CACHE_DIR:-/var/cache/keystone} + +KEYSTONECLIENT_DIR=$DEST/python-keystoneclient + +# Select the backend for Keystone's service catalog +KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-sql} +KEYSTONE_CATALOG=$KEYSTONE_CONF_DIR/default_catalog.templates + +# Select the backend for Tokens +KEYSTONE_TOKEN_BACKEND=${KEYSTONE_TOKEN_BACKEND:-sql} + +# Select Keystone's token format +# Choose from 'UUID' and 'PKI' +KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-PKI} + +# Set Keystone interface configuration +KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST} +KEYSTONE_AUTH_PORT=${KEYSTONE_AUTH_PORT:-35357} +KEYSTONE_AUTH_PORT_INT=${KEYSTONE_AUTH_PORT_INT:-35358} +KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-$SERVICE_PROTOCOL} + +# Public facing bits +KEYSTONE_SERVICE_HOST=${KEYSTONE_SERVICE_HOST:-$SERVICE_HOST} +KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000} +KEYSTONE_SERVICE_PORT_INT=${KEYSTONE_SERVICE_PORT_INT:-5001} +KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} + + +# Entry Points +# ------------ + +# cleanup_keystone() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_keystone() { + # kill instances (nova) + # delete image files (glance) + # This function intentionally left blank + : +} + +# configure_keystoneclient() - Set config files, create data dirs, etc +function configure_keystoneclient() { + setup_develop $KEYSTONECLIENT_DIR +} + +# configure_keystone() - Set config files, create data dirs, etc +function configure_keystone() { + setup_develop $KEYSTONE_DIR + + if [[ ! -d $KEYSTONE_CONF_DIR ]]; then + sudo mkdir -p $KEYSTONE_CONF_DIR + fi + sudo chown $STACK_USER $KEYSTONE_CONF_DIR + + if [[ "$KEYSTONE_CONF_DIR" != "$KEYSTONE_DIR/etc" ]]; then + cp -p $KEYSTONE_DIR/etc/keystone.conf.sample $KEYSTONE_CONF + cp -p $KEYSTONE_DIR/etc/policy.json $KEYSTONE_CONF_DIR + fi + + # Rewrite stock ``keystone.conf`` + local dburl + database_connection_url dburl keystone + + if is_service_enabled ldap; then + #Set all needed ldap values + iniset $KEYSTONE_CONF ldap password $LDAP_PASSWORD + iniset $KEYSTONE_CONF ldap user "dc=Manager,dc=openstack,dc=org" + iniset $KEYSTONE_CONF ldap suffix "dc=openstack,dc=org" + fi + + if [[ "$KEYSTONE_IDENTITY_BACKEND" == "ldap" ]]; then + iniset $KEYSTONE_CONF identity driver "keystone.identity.backends.ldap.Identity" + fi + + if is_service_enabled tls-proxy; then + # Set the service ports for a proxy to take the originals + iniset $KEYSTONE_CONF DEFAULT public_port $KEYSTONE_SERVICE_PORT_INT + iniset $KEYSTONE_CONF DEFAULT admin_port $KEYSTONE_AUTH_PORT_INT + fi + + iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN" + iniset $KEYSTONE_CONF signing token_format "$KEYSTONE_TOKEN_FORMAT" + iniset $KEYSTONE_CONF sql connection $dburl + iniset $KEYSTONE_CONF ec2 driver "keystone.contrib.ec2.backends.sql.Ec2" + sed -e " + /^pipeline.*ec2_extension crud_/s|ec2_extension crud_extension|ec2_extension s3_extension crud_extension|; + " -i $KEYSTONE_CONF + + # Append the S3 bits + iniset $KEYSTONE_CONF filter:s3_extension paste.filter_factory "keystone.contrib.s3:S3Extension.factory" + + if [[ "$KEYSTONE_TOKEN_BACKEND" = "sql" ]]; then + iniset $KEYSTONE_CONF token driver keystone.token.backends.sql.Token + else + iniset $KEYSTONE_CONF token driver keystone.token.backends.kvs.Token + fi + + if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then + # Configure ``keystone.conf`` to use sql + iniset $KEYSTONE_CONF catalog driver keystone.catalog.backends.sql.Catalog + inicomment $KEYSTONE_CONF catalog template_file + else + cp -p $FILES/default_catalog.templates $KEYSTONE_CATALOG + + # Add swift endpoints to service catalog if swift is enabled + if is_service_enabled swift; then + echo "catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:8080/" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.object_store.name = Swift Service" >> $KEYSTONE_CATALOG + fi + + # Add quantum endpoints to service catalog if quantum is enabled + if is_service_enabled quantum; then + echo "catalog.RegionOne.network.publicURL = http://%SERVICE_HOST%:$Q_PORT/" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.network.adminURL = http://%SERVICE_HOST%:$Q_PORT/" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.network.internalURL = http://%SERVICE_HOST%:$Q_PORT/" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.network.name = Quantum Service" >> $KEYSTONE_CATALOG + fi + + sed -e " + s,%SERVICE_HOST%,$SERVICE_HOST,g; + s,%S3_SERVICE_PORT%,$S3_SERVICE_PORT,g; + " -i $KEYSTONE_CATALOG + + # Configure ``keystone.conf`` to use templates + iniset $KEYSTONE_CONF catalog driver "keystone.catalog.backends.templated.TemplatedCatalog" + iniset $KEYSTONE_CONF catalog template_file "$KEYSTONE_CATALOG" + fi + + # Set up logging + LOGGING_ROOT="devel" + if [ "$SYSLOG" != "False" ]; then + LOGGING_ROOT="$LOGGING_ROOT,production" + fi + KEYSTONE_LOG_CONFIG="--log-config $KEYSTONE_CONF_DIR/logging.conf" + cp $KEYSTONE_DIR/etc/logging.conf.sample $KEYSTONE_CONF_DIR/logging.conf + iniset $KEYSTONE_CONF_DIR/logging.conf logger_root level "DEBUG" + iniset $KEYSTONE_CONF_DIR/logging.conf logger_root handlers "devel,production" + +} + +# create_keystone_accounts() - Sets up common required keystone accounts + +# Tenant User Roles +# ------------------------------------------------------------------ +# service -- -- +# -- -- Member +# admin admin admin +# demo admin admin +# demo demo Member, anotherrole +# invisible_to_admin demo Member + +# Migrated from keystone_data.sh +create_keystone_accounts() { + + # admin + ADMIN_TENANT=$(keystone tenant-create \ + --name admin \ + | grep " id " | get_field 2) + ADMIN_USER=$(keystone user-create \ + --name admin \ + --pass "$ADMIN_PASSWORD" \ + --email admin@example.com \ + | grep " id " | get_field 2) + ADMIN_ROLE=$(keystone role-create \ + --name admin \ + | grep " id " | get_field 2) + keystone user-role-add \ + --user_id $ADMIN_USER \ + --role_id $ADMIN_ROLE \ + --tenant_id $ADMIN_TENANT + + # service + SERVICE_TENANT=$(keystone tenant-create \ + --name $SERVICE_TENANT_NAME \ + | grep " id " | get_field 2) + + # The Member role is used by Horizon and Swift so we need to keep it: + MEMBER_ROLE=$(keystone role-create --name=Member | grep " id " | get_field 2) + # ANOTHER_ROLE demonstrates that an arbitrary role may be created and used + # TODO(sleepsonthefloor): show how this can be used for rbac in the future! + ANOTHER_ROLE=$(keystone role-create --name=anotherrole | grep " id " | get_field 2) + + # invisible tenant - admin can't see this one + INVIS_TENANT=$(keystone tenant-create --name=invisible_to_admin | grep " id " | get_field 2) + + # demo + DEMO_TENANT=$(keystone tenant-create \ + --name=demo \ + | grep " id " | get_field 2) + DEMO_USER=$(keystone user-create \ + --name demo \ + --pass "$ADMIN_PASSWORD" \ + --email demo@example.com \ + | grep " id " | get_field 2) + keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $DEMO_TENANT + keystone user-role-add --user_id $ADMIN_USER --role_id $ADMIN_ROLE --tenant_id $DEMO_TENANT + keystone user-role-add --user_id $DEMO_USER --role_id $ANOTHER_ROLE --tenant_id $DEMO_TENANT + keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $INVIS_TENANT + + # Keystone + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + KEYSTONE_SERVICE=$(keystone service-create \ + --name keystone \ + --type identity \ + --description "Keystone Identity Service" \ + | grep " id " | get_field 2) + keystone endpoint-create \ + --region RegionOne \ + --service_id $KEYSTONE_SERVICE \ + --publicurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0" \ + --adminurl "$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0" \ + --internalurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0" + fi + + # TODO(dtroyer): This is part of a series of changes...remove these when + # complete if they are really unused +# KEYSTONEADMIN_ROLE=$(keystone role-create \ +# --name KeystoneAdmin \ +# | grep " id " | get_field 2) +# KEYSTONESERVICE_ROLE=$(keystone role-create \ +# --name KeystoneServiceAdmin \ +# | grep " id " | get_field 2) + + # TODO(termie): these two might be dubious +# keystone user-role-add \ +# --user_id $ADMIN_USER \ +# --role_id $KEYSTONEADMIN_ROLE \ +# --tenant_id $ADMIN_TENANT +# keystone user-role-add \ +# --user_id $ADMIN_USER \ +# --role_id $KEYSTONESERVICE_ROLE \ +# --tenant_id $ADMIN_TENANT +} + +# init_keystone() - Initialize databases, etc. +function init_keystone() { + # (Re)create keystone database + recreate_database keystone utf8 + + # Initialize keystone database + $KEYSTONE_DIR/bin/keystone-manage db_sync + + if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then + # Set up certificates + rm -rf $KEYSTONE_CONF_DIR/ssl + $KEYSTONE_DIR/bin/keystone-manage pki_setup + + # Create cache dir + sudo mkdir -p $KEYSTONE_AUTH_CACHE_DIR + sudo chown $STACK_USER $KEYSTONE_AUTH_CACHE_DIR + rm -f $KEYSTONE_AUTH_CACHE_DIR/* + fi +} + +# install_keystoneclient() - Collect source and prepare +function install_keystoneclient() { + git_clone $KEYSTONECLIENT_REPO $KEYSTONECLIENT_DIR $KEYSTONECLIENT_BRANCH +} + +# install_keystone() - Collect source and prepare +function install_keystone() { + # only install ldap if the service has been enabled + if is_service_enabled ldap; then + install_ldap + fi + git_clone $KEYSTONE_REPO $KEYSTONE_DIR $KEYSTONE_BRANCH +} + +# start_keystone() - Start running processes, including screen +function start_keystone() { + # Get right service port for testing + local service_port=$KEYSTONE_SERVICE_PORT + if is_service_enabled tls-proxy; then + service_port=$KEYSTONE_SERVICE_PORT_INT + fi + + # Start Keystone in a screen window + screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug" + echo "Waiting for keystone to start..." + if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s http://$SERVICE_HOST:$service_port/v2.0/ >/dev/null; do sleep 1; done"; then + echo "keystone did not start" + exit 1 + fi + + # Start proxies if enabled + if is_service_enabled tls-proxy; then + start_tls_proxy '*' $KEYSTONE_SERVICE_PORT $KEYSTONE_SERVICE_HOST $KEYSTONE_SERVICE_PORT_INT & + start_tls_proxy '*' $KEYSTONE_AUTH_PORT $KEYSTONE_AUTH_HOST $KEYSTONE_AUTH_PORT_INT & + fi +} + +# stop_keystone() - Stop running processes +function stop_keystone() { + # Kill the Keystone screen window + screen -S $SCREEN_NAME -p key -X kill +} + +# Restore xtrace +$XTRACE diff --git a/lib/ldap b/lib/ldap new file mode 100644 index 00000000..5cb45347 --- /dev/null +++ b/lib/ldap @@ -0,0 +1,74 @@ +# lib/ldap +# Functions to control the installation and configuration of **ldap** + +# ``stack.sh`` calls the entry points in this order: +# + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# install_ldap +# install_ldap() - Collect source and prepare +function install_ldap() { + echo "Installing LDAP inside function" + echo "LDAP_PASSWORD is $LDAP_PASSWORD" + echo "os_VENDOR is $os_VENDOR" + printf "installing" + if is_ubuntu; then + echo "os vendor is Ubuntu" + LDAP_OLCDB_NUMBER=1 + LDAP_ROOTPW_COMMAND=replace + sudo DEBIAN_FRONTEND=noninteractive apt-get install slapd ldap-utils + #automatically starts LDAP on ubuntu so no need to call start_ldap + elif is_fedora; then + echo "os vendor is Fedora" + LDAP_OLCDB_NUMBER=2 + LDAP_ROOTPW_COMMAND=add + start_ldap + fi + + printf "generate password file" + SLAPPASS=`slappasswd -s $LDAP_PASSWORD` + + printf "secret is $SLAPPASS\n" + #create manager.ldif + TMP_MGR_DIFF_FILE=`mktemp -t manager_ldiff.$$.XXXXXXXXXX.ldif` + sed -e "s|\${LDAP_OLCDB_NUMBER}|$LDAP_OLCDB_NUMBER|" -e "s|\${SLAPPASS}|$SLAPPASS|" -e "s|\${LDAP_ROOTPW_COMMAND}|$LDAP_ROOTPW_COMMAND|" $FILES/ldap/manager.ldif.in >> $TMP_MGR_DIFF_FILE + + #update ldap olcdb + sudo ldapmodify -Y EXTERNAL -H ldapi:/// -f $TMP_MGR_DIFF_FILE + + # add our top level ldap nodes + if ldapsearch -x -w $LDAP_PASSWORD -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -x -b dc=openstack,dc=org | grep -q "Success" ; then + printf "LDAP already configured for OpenStack\n" + if [[ "$KEYSTONE_CLEAR_LDAP" == "yes" ]]; then + # clear LDAP state + clear_ldap_state + # reconfigure LDAP for OpenStack + ldapadd -c -x -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -w $LDAP_PASSWORD -f $FILES/ldap/openstack.ldif + fi + else + printf "Configuring LDAP for OpenStack\n" + ldapadd -c -x -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -w $LDAP_PASSWORD -f $FILES/ldap/openstack.ldif + fi +} + +# start_ldap() - Start LDAP +function start_ldap() { + sudo service slapd restart +} + + +# stop_ldap() - Stop LDAP +function stop_ldap() { + sudo service slapd stop +} + +# clear_ldap_state() - Clear LDAP State +function clear_ldap_state() { + ldapdelete -x -w $LDAP_PASSWORD -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -x -r "dc=openstack,dc=org" +} + +# Restore xtrace +$XTRACE diff --git a/lib/nova b/lib/nova new file mode 100644 index 00000000..849ec573 --- /dev/null +++ b/lib/nova @@ -0,0 +1,580 @@ +# lib/nova +# Functions to control the configuration and operation of the **Nova** service + +# Dependencies: +# ``functions`` file +# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined +# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined +# ``LIBVIRT_TYPE`` must be defined +# ``INSTANCE_NAME_PREFIX``, ``VOLUME_NAME_PREFIX`` must be defined +# ``KEYSTONE_TOKEN_FORMAT`` must be defined + +# ``stack.sh`` calls the entry points in this order: +# +# install_nova +# configure_nova +# create_nova_conf +# init_nova +# start_nova +# stop_nova +# cleanup_nova + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories +NOVA_DIR=$DEST/nova +NOVACLIENT_DIR=$DEST/python-novaclient +NOVA_STATE_PATH=${NOVA_STATE_PATH:=$DATA_DIR/nova} +# INSTANCES_PATH is the previous name for this +NOVA_INSTANCES_PATH=${NOVA_INSTANCES_PATH:=${INSTANCES_PATH:=$NOVA_STATE_PATH/instances}} +NOVA_AUTH_CACHE_DIR=${NOVA_AUTH_CACHE_DIR:-/var/cache/nova} + +NOVA_CONF_DIR=/etc/nova +NOVA_CONF=$NOVA_CONF_DIR/nova.conf +NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini} + +# Public facing bits +NOVA_SERVICE_HOST=${NOVA_SERVICE_HOST:-$SERVICE_HOST} +NOVA_SERVICE_PORT=${NOVA_SERVICE_PORT:-8774} +NOVA_SERVICE_PORT_INT=${NOVA_SERVICE_PORT_INT:-18774} +NOVA_SERVICE_PROTOCOL=${NOVA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} + +# Support entry points installation of console scripts +if [[ -d $NOVA_DIR/bin ]]; then + NOVA_BIN_DIR=$NOVA_DIR/bin +else + NOVA_BIN_DIR=$(get_python_exec_prefix) +fi + +# Set the paths of certain binaries +NOVA_ROOTWRAP=$(get_rootwrap_location nova) + +# Allow rate limiting to be turned off for testing, like for Tempest +# NOTE: Set API_RATE_LIMIT="False" to turn OFF rate limiting +API_RATE_LIMIT=${API_RATE_LIMIT:-"True"} + +# Nova supports pluggable schedulers. The default ``FilterScheduler`` +# should work in most cases. +SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler} + +QEMU_CONF=/etc/libvirt/qemu.conf + + +# Entry Points +# ------------ + +function add_nova_opt { + echo "$1" >>$NOVA_CONF +} + +# Helper to clean iptables rules +function clean_iptables() { + # Delete rules + sudo iptables -S -v | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-A" | sed "s/-A/-D/g" | awk '{print "sudo iptables",$0}' | bash + # Delete nat rules + sudo iptables -S -v -t nat | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-A" | sed "s/-A/-D/g" | awk '{print "sudo iptables -t nat",$0}' | bash + # Delete chains + sudo iptables -S -v | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-N" | sed "s/-N/-X/g" | awk '{print "sudo iptables",$0}' | bash + # Delete nat chains + sudo iptables -S -v -t nat | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-N" | sed "s/-N/-X/g" | awk '{print "sudo iptables -t nat",$0}' | bash +} + +# cleanup_nova() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_nova() { + if is_service_enabled n-cpu; then + # Clean iptables from previous runs + clean_iptables + + # Destroy old instances + instances=`sudo virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"` + if [ ! "$instances" = "" ]; then + echo $instances | xargs -n1 sudo virsh destroy || true + echo $instances | xargs -n1 sudo virsh undefine || true + fi + + # Logout and delete iscsi sessions + sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d " " -f2 | xargs sudo iscsiadm --mode node --logout || true + sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d " " -f2 | sudo iscsiadm --mode node --op delete || true + + # Clean out the instances directory. + sudo rm -rf $NOVA_INSTANCES_PATH/* + fi +} + +# configure_novaclient() - Set config files, create data dirs, etc +function configure_novaclient() { + setup_develop $NOVACLIENT_DIR +} + +# configure_nova_rootwrap() - configure Nova's rootwrap +function configure_nova_rootwrap() { + # Deploy new rootwrap filters files (owned by root). + # Wipe any existing rootwrap.d files first + if [[ -d $NOVA_CONF_DIR/rootwrap.d ]]; then + sudo rm -rf $NOVA_CONF_DIR/rootwrap.d + fi + # Deploy filters to /etc/nova/rootwrap.d + sudo mkdir -m 755 $NOVA_CONF_DIR/rootwrap.d + sudo cp $NOVA_DIR/etc/nova/rootwrap.d/*.filters $NOVA_CONF_DIR/rootwrap.d + sudo chown -R root:root $NOVA_CONF_DIR/rootwrap.d + sudo chmod 644 $NOVA_CONF_DIR/rootwrap.d/* + # Set up rootwrap.conf, pointing to /etc/nova/rootwrap.d + sudo cp $NOVA_DIR/etc/nova/rootwrap.conf $NOVA_CONF_DIR/ + sudo sed -e "s:^filters_path=.*$:filters_path=$NOVA_CONF_DIR/rootwrap.d:" -i $NOVA_CONF_DIR/rootwrap.conf + sudo chown root:root $NOVA_CONF_DIR/rootwrap.conf + sudo chmod 0644 $NOVA_CONF_DIR/rootwrap.conf + # Specify rootwrap.conf as first parameter to nova-rootwrap + ROOTWRAP_SUDOER_CMD="$NOVA_ROOTWRAP $NOVA_CONF_DIR/rootwrap.conf *" + + # Set up the rootwrap sudoers for nova + TEMPFILE=`mktemp` + echo "$USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE + chmod 0440 $TEMPFILE + sudo chown root:root $TEMPFILE + sudo mv $TEMPFILE /etc/sudoers.d/nova-rootwrap +} + +# configure_nova() - Set config files, create data dirs, etc +function configure_nova() { + setup_develop $NOVA_DIR + + # Put config files in ``/etc/nova`` for everyone to find + if [[ ! -d $NOVA_CONF_DIR ]]; then + sudo mkdir -p $NOVA_CONF_DIR + fi + sudo chown $STACK_USER $NOVA_CONF_DIR + + cp -p $NOVA_DIR/etc/nova/policy.json $NOVA_CONF_DIR + + configure_nova_rootwrap + + if is_service_enabled n-api; then + # Use the sample http middleware configuration supplied in the + # Nova sources. This paste config adds the configuration required + # for Nova to validate Keystone tokens. + + # Remove legacy paste config if present + rm -f $NOVA_DIR/bin/nova-api-paste.ini + + # Get the sample configuration file in place + cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_CONF_DIR + + iniset $NOVA_API_PASTE_INI filter:authtoken auth_host $SERVICE_HOST + if is_service_enabled tls-proxy; then + iniset $NOVA_API_PASTE_INI filter:authtoken auth_protocol $SERVICE_PROTOCOL + fi + iniset $NOVA_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $NOVA_API_PASTE_INI filter:authtoken admin_user nova + iniset $NOVA_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD + fi + + iniset $NOVA_API_PASTE_INI filter:authtoken signing_dir $NOVA_AUTH_CACHE_DIR + + if is_service_enabled n-cpu; then + # Force IP forwarding on, just on case + sudo sysctl -w net.ipv4.ip_forward=1 + + # Attempt to load modules: network block device - used to manage qcow images + sudo modprobe nbd || true + + # Check for kvm (hardware based virtualization). If unable to initialize + # kvm, we drop back to the slower emulation mode (qemu). Note: many systems + # come with hardware virtualization disabled in BIOS. + if [[ "$LIBVIRT_TYPE" == "kvm" ]]; then + sudo modprobe kvm || true + if [ ! -e /dev/kvm ]; then + echo "WARNING: Switching to QEMU" + LIBVIRT_TYPE=qemu + if which selinuxenabled 2>&1 > /dev/null && selinuxenabled; then + # https://bugzilla.redhat.com/show_bug.cgi?id=753589 + sudo setsebool virt_use_execmem on + fi + fi + fi + + # Install and configure **LXC** if specified. LXC is another approach to + # splitting a system into many smaller parts. LXC uses cgroups and chroot + # to simulate multiple systems. + if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then + if is_ubuntu; then + if [[ ! "$DISTRO" > natty ]]; then + cgline="none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0" + sudo mkdir -p /cgroup + if ! grep -q cgroup /etc/fstab; then + echo "$cgline" | sudo tee -a /etc/fstab + fi + if ! mount -n | grep -q cgroup; then + sudo mount /cgroup + fi + fi + fi + fi + + # Prepare directories and packages for baremetal driver + if is_baremetal; then + configure_baremetal_nova_dirs + fi + + if is_service_enabled quantum && is_quantum_ovs_base_plugin && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF ; then + # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces + cat < $rules_dir/50-libvirt-$STACK_USER.rules +polkit.addRule(function(action, subject) { + if (action.id == 'org.libvirt.unix.manage' && + subject.user == '"$STACK_USER"') { + return polkit.Result.YES; + } +}); +EOF" + unset rules_dir + else + sudo bash -c 'cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla +[libvirt Management Access] +Identity=unix-group:libvirtd +Action=org.libvirt.unix.manage +ResultAny=yes +ResultInactive=yes +ResultActive=yes +EOF' + fi + elif is_suse; then + # Work around the fact that polkit-default-privs overrules pklas + # with 'unix-group:$group'. + sudo bash -c "cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla +[libvirt Management Access] +Identity=unix-user:$USER +Action=org.libvirt.unix.manage +ResultAny=yes +ResultInactive=yes +ResultActive=yes +EOF" + fi + + # The user that nova runs as needs to be member of **libvirtd** group otherwise + # nova-compute will be unable to use libvirt. + if ! getent group libvirtd >/dev/null; then + sudo groupadd libvirtd + fi + add_user_to_group $STACK_USER libvirtd + + # libvirt detects various settings on startup, as we potentially changed + # the system configuration (modules, filesystems), we need to restart + # libvirt to detect those changes. + restart_service $LIBVIRT_DAEMON + + + # Instance Storage + # ---------------- + + # Nova stores each instance in its own directory. + mkdir -p $NOVA_INSTANCES_PATH + + # You can specify a different disk to be mounted and used for backing the + # virtual machines. If there is a partition labeled nova-instances we + # mount it (ext filesystems can be labeled via e2label). + if [ -L /dev/disk/by-label/nova-instances ]; then + if ! mount -n | grep -q $NOVA_INSTANCES_PATH; then + sudo mount -L nova-instances $NOVA_INSTANCES_PATH + sudo chown -R $STACK_USER $NOVA_INSTANCES_PATH + fi + fi + + # Clean up old instances + cleanup_nova + fi +} + +# create_nova_accounts() - Set up common required nova accounts + +# Tenant User Roles +# ------------------------------------------------------------------ +# service nova admin, [ResellerAdmin (swift only)] + +# Migrated from keystone_data.sh +create_nova_accounts() { + + SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + + # Nova + if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then + NOVA_USER=$(keystone user-create \ + --name=nova \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=nova@example.com \ + | grep " id " | get_field 2) + keystone user-role-add \ + --tenant_id $SERVICE_TENANT \ + --user_id $NOVA_USER \ + --role_id $ADMIN_ROLE + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + NOVA_SERVICE=$(keystone service-create \ + --name=nova \ + --type=compute \ + --description="Nova Compute Service" \ + | grep " id " | get_field 2) + keystone endpoint-create \ + --region RegionOne \ + --service_id $NOVA_SERVICE \ + --publicurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \ + --adminurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \ + --internalurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" + fi + fi +} + +# create_nova_conf() - Create a new nova.conf file +function create_nova_conf() { + # Remove legacy ``nova.conf`` + rm -f $NOVA_DIR/bin/nova.conf + + # (Re)create ``nova.conf`` + rm -f $NOVA_CONF + add_nova_opt "[DEFAULT]" + iniset $NOVA_CONF DEFAULT verbose "True" + iniset $NOVA_CONF DEFAULT debug "True" + iniset $NOVA_CONF DEFAULT auth_strategy "keystone" + iniset $NOVA_CONF DEFAULT allow_resize_to_same_host "True" + iniset $NOVA_CONF DEFAULT api_paste_config "$NOVA_API_PASTE_INI" + iniset $NOVA_CONF DEFAULT rootwrap_config "$NOVA_CONF_DIR/rootwrap.conf" + iniset $NOVA_CONF DEFAULT compute_scheduler_driver "$SCHEDULER" + iniset $NOVA_CONF DEFAULT dhcpbridge_flagfile "$NOVA_CONF" + iniset $NOVA_CONF DEFAULT force_dhcp_release "True" + iniset $NOVA_CONF DEFAULT fixed_range "$FIXED_RANGE" + iniset $NOVA_CONF DEFAULT default_floating_pool "$PUBLIC_NETWORK_NAME" + iniset $NOVA_CONF DEFAULT s3_host "$SERVICE_HOST" + iniset $NOVA_CONF DEFAULT s3_port "$S3_SERVICE_PORT" + iniset $NOVA_CONF DEFAULT osapi_compute_extension "nova.api.openstack.compute.contrib.standard_extensions" + iniset $NOVA_CONF DEFAULT my_ip "$HOST_IP" + local dburl + database_connection_url dburl nova + iniset $NOVA_CONF DEFAULT sql_connection "$dburl" + if is_baremetal; then + database_connection_url dburl nova_bm + iniset $NOVA_CONF baremetal sql_connection $dburl + fi + iniset $NOVA_CONF DEFAULT libvirt_type "$LIBVIRT_TYPE" + iniset $NOVA_CONF DEFAULT libvirt_cpu_mode "none" + iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x" + + if is_service_enabled n-api; then + iniset $NOVA_CONF DEFAULT enabled_apis "$NOVA_ENABLED_APIS" + if is_service_enabled tls-proxy; then + # Set the service port for a proxy to take the original + iniset $NOVA_CONF DEFAULT osapi_compute_listen_port "$NOVA_SERVICE_PORT_INT" + fi + fi + if is_service_enabled cinder; then + iniset $NOVA_CONF DEFAULT volume_api_class "nova.volume.cinder.API" + fi + if [ -n "$NOVA_STATE_PATH" ]; then + iniset $NOVA_CONF DEFAULT state_path "$NOVA_STATE_PATH" + iniset $NOVA_CONF DEFAULT lock_path "$NOVA_STATE_PATH" + fi + if [ -n "$NOVA_INSTANCES_PATH" ]; then + iniset $NOVA_CONF DEFAULT instances_path "$NOVA_INSTANCES_PATH" + fi + if [ "$MULTI_HOST" != "False" ]; then + iniset $NOVA_CONF DEFAULT multi_host "True" + iniset $NOVA_CONF DEFAULT send_arp_for_ha "True" + fi + if [ "$SYSLOG" != "False" ]; then + iniset $NOVA_CONF DEFAULT use_syslog "True" + fi + if [ "$API_RATE_LIMIT" != "True" ]; then + iniset $NOVA_CONF DEFAULT api_rate_limit "False" + fi + if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then + # Add color to logging output + iniset $NOVA_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s%(color)s] %(instance)s%(color)s%(message)s" + iniset $NOVA_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" + iniset $NOVA_CONF DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" + iniset $NOVA_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" + else + # Show user_name and project_name instead of user_id and project_id + iniset $NOVA_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" + fi + if is_service_enabled ceilometer; then + iniset $NOVA_CONF DEFAULT instance_usage_audit "True" + iniset $NOVA_CONF DEFAULT instance_usage_audit_period "hour" + iniset $NOVA_CONF DEFAULT notification_driver "nova.openstack.common.notifier.rpc_notifier" + iniset $NOVA_CONF DEFAULT notification_driver "ceilometer.compute.nova_notifier" + fi + + + # Provide some transition from ``EXTRA_FLAGS`` to ``EXTRA_OPTS`` + if [[ -z "$EXTRA_OPTS" && -n "$EXTRA_FLAGS" ]]; then + EXTRA_OPTS=$EXTRA_FLAGS + fi + + # Define extra nova conf flags by defining the array ``EXTRA_OPTS``. + # For Example: ``EXTRA_OPTS=(foo=true bar=2)`` + for I in "${EXTRA_OPTS[@]}"; do + # Replace the first '=' with ' ' for iniset syntax + iniset $NOVA_CONF DEFAULT ${I/=/ } + done +} + +# create_nova_cache_dir() - Part of the init_nova() process +function create_nova_cache_dir() { + # Create cache dir + sudo mkdir -p $NOVA_AUTH_CACHE_DIR + sudo chown $STACK_USER $NOVA_AUTH_CACHE_DIR + rm -f $NOVA_AUTH_CACHE_DIR/* +} + +function create_nova_conf_nova_network() { + iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NET_MAN" + iniset $NOVA_CONF DEFAULT public_interface "$PUBLIC_INTERFACE" + iniset $NOVA_CONF DEFAULT vlan_interface "$VLAN_INTERFACE" + iniset $NOVA_CONF DEFAULT flat_network_bridge "$FLAT_NETWORK_BRIDGE" + if [ -n "$FLAT_INTERFACE" ]; then + iniset $NOVA_CONF DEFAULT flat_interface "$FLAT_INTERFACE" + fi +} + +# create_nova_keys_dir() - Part of the init_nova() process +function create_nova_keys_dir() { + # Create keys dir + sudo mkdir -p ${NOVA_STATE_PATH}/keys + sudo chown -R $STACK_USER ${NOVA_STATE_PATH} +} + +# init_nova() - Initialize databases, etc. +function init_nova() { + # All nova components talk to a central database. + # Only do this step once on the API node for an entire cluster. + if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then + # (Re)create nova database + # Explicitly use latin1: to avoid lp#829209, nova expects the database to + # use latin1 by default, and then upgrades the database to utf8 (see the + # 082_essex.py in nova) + recreate_database nova latin1 + + # Migrate nova database + $NOVA_BIN_DIR/nova-manage db sync + + # (Re)create nova baremetal database + if is_baremetal; then + recreate_database nova_bm latin1 + $NOVA_BIN_DIR/nova-baremetal-manage db sync + fi + fi + + create_nova_cache_dir + create_nova_keys_dir +} + +# install_novaclient() - Collect source and prepare +function install_novaclient() { + git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH +} + +# install_nova() - Collect source and prepare +function install_nova() { + if is_service_enabled n-cpu; then + if is_ubuntu; then + install_package libvirt-bin + elif is_fedora || is_suse; then + install_package libvirt + else + exit_distro_not_supported "libvirt installation" + fi + + # Install and configure **LXC** if specified. LXC is another approach to + # splitting a system into many smaller parts. LXC uses cgroups and chroot + # to simulate multiple systems. + if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then + if is_ubuntu; then + if [[ "$DISTRO" > natty ]]; then + install_package cgroup-lite + fi + else + ### FIXME(dtroyer): figure this out + echo "RPM-based cgroup not implemented yet" + yum_install libcgroup-tools + fi + fi + fi + + git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH +} + +# start_nova_api() - Start the API process ahead of other things +function start_nova_api() { + # Get right service port for testing + local service_port=$NOVA_SERVICE_PORT + if is_service_enabled tls-proxy; then + service_port=$NOVA_SERVICE_PORT_INT + fi + + screen_it n-api "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api" + echo "Waiting for nova-api to start..." + if ! wait_for_service $SERVICE_TIMEOUT http://$SERVICE_HOST:$service_port; then + echo "nova-api did not start" + exit 1 + fi + + # Start proxies if enabled + if is_service_enabled tls-proxy; then + start_tls_proxy '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT & + fi +} + +# start_nova() - Start running processes, including screen +function start_nova() { + # The group **libvirtd** is added to the current user in this script. + # Use 'sg' to execute nova-compute as a member of the **libvirtd** group. + # ``screen_it`` checks ``is_service_enabled``, it is not needed here + screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor" + screen_it n-cpu "cd $NOVA_DIR && sg libvirtd $NOVA_BIN_DIR/nova-compute" + screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert" + screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network" + screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler" + screen_it n-novnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-novncproxy --config-file $NOVA_CONF --web $NOVNC_DIR" + screen_it n-xvnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-xvpvncproxy --config-file $NOVA_CONF" + screen_it n-spice "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $NOVA_CONF --web $SPICE_DIR" + screen_it n-cauth "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-consoleauth" +} + +# stop_nova() - Stop running processes (non-screen) +function stop_nova() { + # Kill the nova screen windows + for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-cond n-spice; do + screen -S $SCREEN_NAME -p $serv -X kill + done +} + +# Restore xtrace +$XTRACE diff --git a/lib/quantum b/lib/quantum new file mode 100644 index 00000000..61a5218e --- /dev/null +++ b/lib/quantum @@ -0,0 +1,727 @@ +# lib/quantum +# functions - funstions specific to quantum + +# Dependencies: +# ``functions`` file +# ``DEST`` must be defined + +# ``stack.sh`` calls the entry points in this order: +# +# install_quantum +# install_quantumclient +# install_quantum_agent_packages +# install_quantum_third_party +# setup_quantum +# setup_quantumclient +# configure_quantum +# init_quantum +# configure_quantum_third_party +# init_quantum_third_party +# start_quantum_third_party +# create_nova_conf_quantum +# start_quantum_service_and_check +# create_quantum_initial_network +# setup_quantum_debug +# start_quantum_agents +# +# ``unstack.sh`` calls the entry points in this order: +# +# stop_quantum + +# Functions in lib/quantum are classified into the following categories: +# +# - entry points (called from stack.sh or unstack.sh) +# - internal functions +# - quantum exercises +# - 3rd party programs + + +# Quantum Networking +# ------------------ + +# Make sure that quantum is enabled in ``ENABLED_SERVICES``. If you want +# to run Quantum on this host, make sure that q-svc is also in +# ``ENABLED_SERVICES``. +# +# If you're planning to use the Quantum openvswitch plugin, set +# ``Q_PLUGIN`` to "openvswitch" and make sure the q-agt service is enabled +# in ``ENABLED_SERVICES``. If you're planning to use the Quantum +# linuxbridge plugin, set ``Q_PLUGIN`` to "linuxbridge" and make sure the +# q-agt service is enabled in ``ENABLED_SERVICES``. +# +# See "Quantum Network Configuration" below for additional variables +# that must be set in localrc for connectivity across hosts with +# Quantum. +# +# With Quantum networking the NET_MAN variable is ignored. + + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Quantum Network Configuration +# ----------------------------- + +# Set up default directories +QUANTUM_DIR=$DEST/quantum +QUANTUMCLIENT_DIR=$DEST/python-quantumclient +QUANTUM_AUTH_CACHE_DIR=${QUANTUM_AUTH_CACHE_DIR:-/var/cache/quantum} + +QUANTUM_CONF_DIR=/etc/quantum +QUANTUM_CONF=$QUANTUM_CONF_DIR/quantum.conf +export QUANTUM_TEST_CONFIG_FILE=${QUANTUM_TEST_CONFIG_FILE:-"$QUANTUM_CONF_DIR/debug.ini"} + +# Default Quantum Plugin +Q_PLUGIN=${Q_PLUGIN:-openvswitch} +# Default Quantum Port +Q_PORT=${Q_PORT:-9696} +# Default Quantum Host +Q_HOST=${Q_HOST:-$HOST_IP} +# Default admin username +Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-quantum} +# Default auth strategy +Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone} +# Use namespace or not +Q_USE_NAMESPACE=${Q_USE_NAMESPACE:-True} +Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True} +# Meta data IP +Q_META_DATA_IP=${Q_META_DATA_IP:-$HOST_IP} +# Allow Overlapping IP among subnets +Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-False} +# Use quantum-debug command +Q_USE_DEBUG_COMMAND=${Q_USE_DEBUG_COMMAND:-False} +# The name of the default q-l3 router +Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1} + +if is_service_enabled quantum; then + Q_RR_CONF_FILE=$QUANTUM_CONF_DIR/rootwrap.conf + if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + Q_RR_COMMAND="sudo" + else + QUANTUM_ROOTWRAP=$(get_rootwrap_location quantum) + Q_RR_COMMAND="sudo $QUANTUM_ROOTWRAP $Q_RR_CONF_FILE" + fi + + # Provider Network Configurations + # -------------------------------- + + # The following variables control the Quantum openvswitch and + # linuxbridge plugins' allocation of tenant networks and + # availability of provider networks. If these are not configured + # in localrc, tenant networks will be local to the host (with no + # remote connectivity), and no physical resources will be + # available for the allocation of provider networks. + + # To use GRE tunnels for tenant networks, set to True in + # localrc. GRE tunnels are only supported by the openvswitch + # plugin, and currently only on Ubuntu. + ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-False} + + # If using GRE tunnels for tenant networks, specify the range of + # tunnel IDs from which tenant networks are allocated. Can be + # overriden in localrc in necesssary. + TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGE:-1:1000} + + # To use VLANs for tenant networks, set to True in localrc. VLANs + # are supported by the openvswitch and linuxbridge plugins, each + # requiring additional configuration described below. + ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False} + + # If using VLANs for tenant networks, set in localrc to specify + # the range of VLAN VIDs from which tenant networks are + # allocated. An external network switch must be configured to + # trunk these VLANs between hosts for multi-host connectivity. + # + # Example: ``TENANT_VLAN_RANGE=1000:1999`` + TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} + + # If using VLANs for tenant networks, or if using flat or VLAN + # provider networks, set in localrc to the name of the physical + # network, and also configure OVS_PHYSICAL_BRIDGE for the + # openvswitch agent or LB_PHYSICAL_INTERFACE for the linuxbridge + # agent, as described below. + # + # Example: ``PHYSICAL_NETWORK=default`` + PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-} + + # With the openvswitch plugin, if using VLANs for tenant networks, + # or if using flat or VLAN provider networks, set in localrc to + # the name of the OVS bridge to use for the physical network. The + # bridge will be created if it does not already exist, but a + # physical interface must be manually added to the bridge as a + # port for external connectivity. + # + # Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` + OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-} + + # With the linuxbridge plugin, if using VLANs for tenant networks, + # or if using flat or VLAN provider networks, set in localrc to + # the name of the network interface to use for the physical + # network. + # + # Example: ``LB_PHYSICAL_INTERFACE=eth1`` + LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-} + + # With the openvswitch plugin, set to True in localrc to enable + # provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False. + # + # Example: ``OVS_ENABLE_TUNNELING=True`` + OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS} +fi + +# Quantum plugin specific functions +# --------------------------------- +# Please refer to lib/quantum_plugins/README.md for details. +source $TOP_DIR/lib/quantum_plugins/$Q_PLUGIN + +# Entry Points +# ------------ + +# configure_quantum() +# Set common config for all quantum server and agents. +function configure_quantum() { + _configure_quantum_common + iniset_rpc_backend quantum $QUANTUM_CONF DEFAULT + + if is_service_enabled q-svc; then + _configure_quantum_service + fi + if is_service_enabled q-agt; then + _configure_quantum_plugin_agent + fi + if is_service_enabled q-dhcp; then + _configure_quantum_dhcp_agent + fi + if is_service_enabled q-l3; then + _configure_quantum_l3_agent + fi + if is_service_enabled q-meta; then + _configure_quantum_metadata_agent + fi + + _configure_quantum_debug_command + + _cleanup_quantum +} + +function create_nova_conf_quantum() { + iniset $NOVA_CONF DEFAULT network_api_class "nova.network.quantumv2.api.API" + iniset $NOVA_CONF DEFAULT quantum_admin_username "$Q_ADMIN_USERNAME" + iniset $NOVA_CONF DEFAULT quantum_admin_password "$SERVICE_PASSWORD" + iniset $NOVA_CONF DEFAULT quantum_admin_auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0" + iniset $NOVA_CONF DEFAULT quantum_auth_strategy "$Q_AUTH_STRATEGY" + iniset $NOVA_CONF DEFAULT quantum_admin_tenant_name "$SERVICE_TENANT_NAME" + iniset $NOVA_CONF DEFAULT quantum_url "http://$Q_HOST:$Q_PORT" + + # set NOVA_VIF_DRIVER and optionally set options in nova_conf + quantum_plugin_create_nova_conf + + iniset $NOVA_CONF DEFAULT libvirt_vif_driver "$NOVA_VIF_DRIVER" + iniset $NOVA_CONF DEFAULT linuxnet_interface_driver "$LINUXNET_VIF_DRIVER" + if is_service_enabled q-meta; then + iniset $NOVA_CONF DEFAULT service_quantum_metadata_proxy "True" + fi +} + +# create_quantum_accounts() - Set up common required quantum accounts + +# Tenant User Roles +# ------------------------------------------------------------------ +# service quantum admin # if enabled + +# Migrated from keystone_data.sh +function create_quantum_accounts() { + + SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + + if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then + QUANTUM_USER=$(keystone user-create \ + --name=quantum \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=quantum@example.com \ + | grep " id " | get_field 2) + keystone user-role-add \ + --tenant_id $SERVICE_TENANT \ + --user_id $QUANTUM_USER \ + --role_id $ADMIN_ROLE + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + QUANTUM_SERVICE=$(keystone service-create \ + --name=quantum \ + --type=network \ + --description="Quantum Service" \ + | grep " id " | get_field 2) + keystone endpoint-create \ + --region RegionOne \ + --service_id $QUANTUM_SERVICE \ + --publicurl "http://$SERVICE_HOST:9696/" \ + --adminurl "http://$SERVICE_HOST:9696/" \ + --internalurl "http://$SERVICE_HOST:9696/" + fi + fi +} + +function create_quantum_initial_network() { + TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1) + + # Create a small network + # Since quantum command is executed in admin context at this point, + # ``--tenant_id`` needs to be specified. + if is_baremetal; then + sudo ovs-vsctl add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE + for IP in $(ip addr show dev $PUBLIC_INTERFACE | grep ' inet ' | awk '{print $2}'); do + sudo ip addr del $IP dev $PUBLIC_INTERFACE + sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE + done + NET_ID=$(quantum net-create $PHYSICAL_NETWORK --tenant_id $TENANT_ID --provider:network_type flat --provider:physical_network "$PHYSICAL_NETWORK" | grep ' id ' | get_field 2) + SUBNET_ID=$(quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) + sudo ifconfig $OVS_PHYSICAL_BRIDGE up + else + NET_ID=$(quantum net-create --tenant_id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) + SUBNET_ID=$(quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) + fi + + if is_service_enabled q-l3; then + # Create a router, and add the private subnet as one of its interfaces + if [[ "$Q_USE_NAMESPACE" == "True" ]]; then + # If namespaces are enabled, create a tenant-owned router. + ROUTER_ID=$(quantum router-create --tenant_id $TENANT_ID $Q_ROUTER_NAME | grep ' id ' | get_field 2) + else + # If namespaces are disabled, the L3 agent can only target + # a single router, which should not be tenant-owned. + ROUTER_ID=$(quantum router-create $Q_ROUTER_NAME | grep ' id ' | get_field 2) + fi + quantum router-interface-add $ROUTER_ID $SUBNET_ID + # Create an external network, and a subnet. Configure the external network as router gw + EXT_NET_ID=$(quantum net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2) + EXT_GW_IP=$(quantum subnet-create --ip_version 4 ${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2) + quantum router-gateway-set $ROUTER_ID $EXT_NET_ID + + if is_quantum_ovs_base_plugin && [[ "$Q_USE_NAMESPACE" = "True" ]]; then + CIDR_LEN=${FLOATING_RANGE#*/} + sudo ip addr add $EXT_GW_IP/$CIDR_LEN dev $PUBLIC_BRIDGE + sudo ip link set $PUBLIC_BRIDGE up + ROUTER_GW_IP=`quantum port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' '{ print $8; }'` + sudo route add -net $FIXED_RANGE gw $ROUTER_GW_IP + fi + if [[ "$Q_USE_NAMESPACE" == "False" ]]; then + # Explicitly set router id in l3 agent configuration + iniset $Q_L3_CONF_FILE DEFAULT router_id $ROUTER_ID + fi + fi +} + +# init_quantum() - Initialize databases, etc. +function init_quantum() { + : +} + +# install_quantum() - Collect source and prepare +function install_quantum() { + git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH +} + +# install_quantumclient() - Collect source and prepare +function install_quantumclient() { + git_clone $QUANTUMCLIENT_REPO $QUANTUMCLIENT_DIR $QUANTUMCLIENT_BRANCH +} + +# install_quantum_agent_packages() - Collect source and prepare +function install_quantum_agent_packages() { + # install packages that is specific to plugin agent + quantum_plugin_install_agent_packages +} + +function setup_quantum() { + setup_develop $QUANTUM_DIR +} + +function setup_quantumclient() { + setup_develop $QUANTUMCLIENT_DIR +} + +# Start running processes, including screen +function start_quantum_service_and_check() { + # Start the Quantum service + screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE" + echo "Waiting for Quantum to start..." + if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:9696; do sleep 1; done"; then + echo "Quantum did not start" + exit 1 + fi +} + +# Start running processes, including screen +function start_quantum_agents() { + # Start up the quantum agents if enabled + screen_it q-agt "python $AGENT_BINARY --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE" + screen_it q-dhcp "python $AGENT_DHCP_BINARY --config-file $QUANTUM_CONF --config-file=$Q_DHCP_CONF_FILE" + screen_it q-meta "python $AGENT_META_BINARY --config-file $QUANTUM_CONF --config-file=$Q_META_CONF_FILE" + screen_it q-l3 "python $AGENT_L3_BINARY --config-file $QUANTUM_CONF --config-file=$Q_L3_CONF_FILE" +} + +# stop_quantum() - Stop running processes (non-screen) +function stop_quantum() { + if is_service_enabled q-dhcp; then + pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }') + [ ! -z "$pid" ] && sudo kill -9 $pid + fi +} + +# _cleanup_quantum() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function _cleanup_quantum() { + : +} + +# _configure_quantum_common() +# Set common config for all quantum server and agents. +# This MUST be called before other _configure_quantum_* functions. +function _configure_quantum_common() { + # Put config files in ``QUANTUM_CONF_DIR`` for everyone to find + if [[ ! -d $QUANTUM_CONF_DIR ]]; then + sudo mkdir -p $QUANTUM_CONF_DIR + fi + sudo chown $STACK_USER $QUANTUM_CONF_DIR + + cp $QUANTUM_DIR/etc/quantum.conf $QUANTUM_CONF + + # set plugin-specific variables + # Q_PLUGIN_CONF_PATH, Q_PLUGIN_CONF_FILENAME, Q_DB_NAME, Q_PLUGIN_CLASS + quantum_plugin_configure_common + + if [[ $Q_PLUGIN_CONF_PATH == '' || $Q_PLUGIN_CONF_FILENAME == '' || $Q_PLUGIN_CLASS == '' ]]; then + echo "Quantum plugin not set.. exiting" + exit 1 + fi + + # If needed, move config file from ``$QUANTUM_DIR/etc/quantum`` to ``QUANTUM_CONF_DIR`` + mkdir -p /$Q_PLUGIN_CONF_PATH + Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME + cp $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE + + database_connection_url dburl $Q_DB_NAME + iniset /$Q_PLUGIN_CONF_FILE DATABASE sql_connection $dburl + unset dburl + + _quantum_setup_rootwrap +} + +function _configure_quantum_debug_command() { + if [[ "$Q_USE_DEBUG_COMMAND" != "True" ]]; then + return + fi + + cp $QUANTUM_DIR/etc/l3_agent.ini $QUANTUM_TEST_CONFIG_FILE + + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT verbose False + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT debug False + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT root_helper "$Q_RR_COMMAND" + # Intermediate fix until Quantum patch lands and then line above will + # be cleaned. + iniset $QUANTUM_TEST_CONFIG_FILE AGENT root_helper "$Q_RR_COMMAND" + + _quantum_setup_keystone $QUANTUM_TEST_CONFIG_FILE DEFAULT set_auth_url + _quantum_setup_interface_driver $QUANTUM_TEST_CONFIG_FILE + + quantum_plugin_configure_debug_command +} + +function _configure_quantum_dhcp_agent() { + AGENT_DHCP_BINARY="$QUANTUM_DIR/bin/quantum-dhcp-agent" + Q_DHCP_CONF_FILE=$QUANTUM_CONF_DIR/dhcp_agent.ini + + cp $QUANTUM_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE + + iniset $Q_DHCP_CONF_FILE DEFAULT verbose True + iniset $Q_DHCP_CONF_FILE DEFAULT debug True + iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE + iniset $Q_DHCP_CONF_FILE DEFAULT state_path $DATA_DIR/quantum + iniset $Q_DHCP_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" + + _quantum_setup_keystone $Q_DHCP_CONF_FILE DEFAULT set_auth_url + _quantum_setup_interface_driver $Q_DHCP_CONF_FILE + + quantum_plugin_configure_dhcp_agent +} + +function _configure_quantum_l3_agent() { + AGENT_L3_BINARY="$QUANTUM_DIR/bin/quantum-l3-agent" + PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} + Q_L3_CONF_FILE=$QUANTUM_CONF_DIR/l3_agent.ini + + cp $QUANTUM_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE + + iniset $Q_L3_CONF_FILE DEFAULT verbose True + iniset $Q_L3_CONF_FILE DEFAULT debug True + iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE + iniset $Q_L3_CONF_FILE DEFAULT state_path $DATA_DIR/quantum + iniset $Q_L3_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" + + _quantum_setup_keystone $Q_L3_CONF_FILE DEFAULT set_auth_url + _quantum_setup_interface_driver $Q_L3_CONF_FILE + + quantum_plugin_configure_l3_agent +} + +function _configure_quantum_metadata_agent() { + AGENT_META_BINARY="$QUANTUM_DIR/bin/quantum-metadata-agent" + Q_META_CONF_FILE=$QUANTUM_CONF_DIR/metadata_agent.ini + + cp $QUANTUM_DIR/etc/metadata_agent.ini $Q_META_CONF_FILE + + iniset $Q_META_CONF_FILE DEFAULT verbose True + iniset $Q_META_CONF_FILE DEFAULT debug True + iniset $Q_META_CONF_FILE DEFAULT state_path $DATA_DIR/quantum + iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP + iniset $Q_META_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" + + _quantum_setup_keystone $Q_META_CONF_FILE DEFAULT set_auth_url +} + +# _configure_quantum_plugin_agent() - Set config files for quantum plugin agent +# It is called when q-agt is enabled. +function _configure_quantum_plugin_agent() { + # Specify the default root helper prior to agent configuration to + # ensure that an agent's configuration can override the default + iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" + + # Configure agent for plugin + quantum_plugin_configure_plugin_agent +} + +# _configure_quantum_service() - Set config files for quantum service +# It is called when q-svc is enabled. +function _configure_quantum_service() { + Q_API_PASTE_FILE=$QUANTUM_CONF_DIR/api-paste.ini + Q_POLICY_FILE=$QUANTUM_CONF_DIR/policy.json + + cp $QUANTUM_DIR/etc/api-paste.ini $Q_API_PASTE_FILE + cp $QUANTUM_DIR/etc/policy.json $Q_POLICY_FILE + + if is_service_enabled $DATABASE_BACKENDS; then + recreate_database $Q_DB_NAME utf8 + else + echo "A database must be enabled in order to use the $Q_PLUGIN Quantum plugin." + exit 1 + fi + + # Update either configuration file with plugin + iniset $QUANTUM_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS + + iniset $QUANTUM_CONF DEFAULT verbose True + iniset $QUANTUM_CONF DEFAULT debug True + iniset $QUANTUM_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP + + iniset $QUANTUM_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY + _quantum_setup_keystone $QUANTUM_CONF keystone_authtoken + # Comment out keystone authtoken configuration in api-paste.ini + # It is required to avoid any breakage in Quantum where the sample + # api-paste.ini has authtoken configurations. + _quantum_commentout_keystone_authtoken $Q_API_PASTE_FILE filter:authtoken + + # Configure plugin + quantum_plugin_configure_service +} + +# Utility Functions +#------------------ + +# _quantum_setup_rootwrap() - configure Quantum's rootwrap +function _quantum_setup_rootwrap() { + if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + return + fi + # Deploy new rootwrap filters files (owned by root). + # Wipe any existing rootwrap.d files first + Q_CONF_ROOTWRAP_D=$QUANTUM_CONF_DIR/rootwrap.d + if [[ -d $Q_CONF_ROOTWRAP_D ]]; then + sudo rm -rf $Q_CONF_ROOTWRAP_D + fi + # Deploy filters to $QUANTUM_CONF_DIR/rootwrap.d + mkdir -p -m 755 $Q_CONF_ROOTWRAP_D + cp -pr $QUANTUM_DIR/etc/quantum/rootwrap.d/* $Q_CONF_ROOTWRAP_D/ + sudo chown -R root:root $Q_CONF_ROOTWRAP_D + sudo chmod 644 $Q_CONF_ROOTWRAP_D/* + # Set up rootwrap.conf, pointing to $QUANTUM_CONF_DIR/rootwrap.d + sudo cp -p $QUANTUM_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE + sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE + sudo chown root:root $Q_RR_CONF_FILE + sudo chmod 0644 $Q_RR_CONF_FILE + # Specify rootwrap.conf as first parameter to quantum-rootwrap + ROOTWRAP_SUDOER_CMD="$QUANTUM_ROOTWRAP $Q_RR_CONF_FILE *" + + # Set up the rootwrap sudoers for quantum + TEMPFILE=`mktemp` + echo "$USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE + chmod 0440 $TEMPFILE + sudo chown root:root $TEMPFILE + sudo mv $TEMPFILE /etc/sudoers.d/quantum-rootwrap + + # Update the root_helper + iniset $QUANTUM_CONF AGENT root_helper "$Q_RR_COMMAND" +} + +# Configures keystone integration for quantum service and agents +function _quantum_setup_keystone() { + local conf_file=$1 + local section=$2 + local use_auth_url=$3 + if [[ -n $use_auth_url ]]; then + iniset $conf_file $section auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0" + else + iniset $conf_file $section auth_host $KEYSTONE_SERVICE_HOST + iniset $conf_file $section auth_port $KEYSTONE_AUTH_PORT + iniset $conf_file $section auth_protocol $KEYSTONE_SERVICE_PROTOCOL + fi + iniset $conf_file $section admin_tenant_name $SERVICE_TENANT_NAME + iniset $conf_file $section admin_user $Q_ADMIN_USERNAME + iniset $conf_file $section admin_password $SERVICE_PASSWORD + iniset $conf_file $section signing_dir $QUANTUM_AUTH_CACHE_DIR + # Create cache dir + sudo mkdir -p $QUANTUM_AUTH_CACHE_DIR + sudo chown $STACK_USER $QUANTUM_AUTH_CACHE_DIR + rm -f $QUANTUM_AUTH_CACHE_DIR/* +} + +function _quantum_commentout_keystone_authtoken() { + local conf_file=$1 + local section=$2 + + inicomment $conf_file $section auth_host + inicomment $conf_file $section auth_port + inicomment $conf_file $section auth_protocol + inicomment $conf_file $section auth_url + + inicomment $conf_file $section admin_tenant_name + inicomment $conf_file $section admin_user + inicomment $conf_file $section admin_password + inicomment $conf_file $section signing_dir +} + +function _quantum_setup_interface_driver() { + quantum_plugin_setup_interface_driver $1 +} + +# Functions for Quantum Exercises +#-------------------------------- + +function delete_probe() { + local from_net="$1" + net_id=`_get_net_id $from_net` + probe_id=`quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'` + quantum-debug --os-tenant-name admin --os-username admin probe-delete $probe_id +} + +function setup_quantum_debug() { + if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then + public_net_id=`_get_net_id $PUBLIC_NETWORK_NAME` + quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create $public_net_id + private_net_id=`_get_net_id $PRIVATE_NETWORK_NAME` + quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create $private_net_id + fi +} + +function teardown_quantum_debug() { + delete_probe $PUBLIC_NETWORK_NAME + delete_probe $PRIVATE_NETWORK_NAME +} + +function _get_net_id() { + quantum --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD net-list | grep $1 | awk '{print $2}' +} + +function _get_probe_cmd_prefix() { + local from_net="$1" + net_id=`_get_net_id $from_net` + probe_id=`quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}' | head -n 1` + echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id" +} + +function _ping_check_quantum() { + local from_net=$1 + local ip=$2 + local timeout_sec=$3 + local expected=${4:-"True"} + local check_command="" + probe_cmd=`_get_probe_cmd_prefix $from_net` + if [[ "$expected" = "True" ]]; then + check_command="while ! $probe_cmd ping -w 1 -c 1 $ip; do sleep 1; done" + else + check_command="while $probe_cmd ping -w 1 -c 1 $ip; do sleep 1; done" + fi + if ! timeout $timeout_sec sh -c "$check_command"; then + if [[ "$expected" = "True" ]]; then + echo "[Fail] Couldn't ping server" + else + echo "[Fail] Could ping server" + fi + exit 1 + fi +} + +# ssh check +function _ssh_check_quantum() { + local from_net=$1 + local key_file=$2 + local ip=$3 + local user=$4 + local timeout_sec=$5 + local probe_cmd = "" + probe_cmd=`_get_probe_cmd_prefix $from_net` + if ! timeout $timeout_sec sh -c "while ! $probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success ; do sleep 1; done"; then + echo "server didn't become ssh-able!" + exit 1 + fi +} + +# Quantum 3rd party programs +#--------------------------- +# please refer to lib/quantum_thirdparty/README.md for details +QUANTUM_THIRD_PARTIES="" +for f in $TOP_DIR/lib/quantum_thirdparty/*; do + third_party=$(basename $f) + if is_service_enabled $third_party; then + source $TOP_DIR/lib/quantum_thirdparty/$third_party + QUANTUM_THIRD_PARTIES="$QUANTUM_THIRD_PARTIES,$third_party" + fi +done + +function _quantum_third_party_do() { + for third_party in ${QUANTUM_THIRD_PARTIES//,/ }; do + ${1}_${third_party} + done +} + +# configure_quantum_third_party() - Set config files, create data dirs, etc +function configure_quantum_third_party() { + _quantum_third_party_do configure +} + +# init_quantum_third_party() - Initialize databases, etc. +function init_quantum_third_party() { + _quantum_third_party_do init +} + +# install_quantum_third_party() - Collect source and prepare +function install_quantum_third_party() { + _quantum_third_party_do install +} + +# start_quantum_third_party() - Start running processes, including screen +function start_quantum_third_party() { + _quantum_third_party_do start +} + +# stop_quantum_third_party - Stop running processes (non-screen) +function stop_quantum_third_party() { + _quantum_third_party_do stop +} + + +# Restore xtrace +$XTRACE diff --git a/lib/quantum_plugins/README.md b/lib/quantum_plugins/README.md new file mode 100644 index 00000000..a66d35a3 --- /dev/null +++ b/lib/quantum_plugins/README.md @@ -0,0 +1,34 @@ +Quantum plugin specific files +============================= +Quantum plugins require plugin specific behavior. +The files under the directory, ``lib/quantum_plugins/``, will be used +when their service is enabled. +Each plugin has ``lib/quantum_plugins/$Q_PLUGIN`` and define the following +functions. +Plugin specific configuration variables should be in this file. + +* filename: ``$Q_PLUGIN`` + * The corresponding file name MUST be the same to plugin name ``$Q_PLUGIN``. + Plugin specific configuration variables should be in this file. + +functions +--------- +``lib/quantum`` calls the following functions when the ``$Q_PLUGIN`` is enabled + +* ``quantum_plugin_create_nova_conf`` : + set ``NOVA_VIF_DRIVER`` and optionally set options in nova_conf + e.g. + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"} +* ``quantum_plugin_install_agent_packages`` : + install packages that is specific to plugin agent + e.g. + install_package bridge-utils +* ``quantum_plugin_configure_common`` : + set plugin-specific variables, ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``, + ``Q_DB_NAME``, ``Q_PLUGIN_CLASS`` +* ``quantum_plugin_configure_debug_command`` +* ``quantum_plugin_configure_dhcp_agent`` +* ``quantum_plugin_configure_l3_agent`` +* ``quantum_plugin_configure_plugin_agent`` +* ``quantum_plugin_configure_service`` +* ``quantum_plugin_setup_interface_driver`` diff --git a/lib/quantum_plugins/bigswitch_floodlight b/lib/quantum_plugins/bigswitch_floodlight new file mode 100644 index 00000000..2c928bec --- /dev/null +++ b/lib/quantum_plugins/bigswitch_floodlight @@ -0,0 +1,55 @@ +# Quantum Big Switch/FloodLight plugin +# ------------------------------------ + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +source $TOP_DIR/lib/quantum_plugins/ovs_base +source $TOP_DIR/lib/quantum_thirdparty/bigswitch_floodlight # for third party service specific configuration values + +function quantum_plugin_create_nova_conf() { + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"} +} + +function quantum_plugin_install_agent_packages() { + _quantum_ovs_base_install_agent_packages +} + +function quantum_plugin_configure_common() { + Q_PLUGIN_CONF_PATH=etc/quantum/plugins/bigswitch + Q_PLUGIN_CONF_FILENAME=restproxy.ini + Q_DB_NAME="restproxy_quantum" + Q_PLUGIN_CLASS="quantum.plugins.bigswitch.plugin.QuantumRestProxyV2" + BS_FL_CONTROLLERS_PORT=${BS_FL_CONTROLLERS_PORT:-localhost:80} + BS_FL_CONTROLLER_TIMEOUT=${BS_FL_CONTROLLER_TIMEOUT:-10} +} + +function quantum_plugin_configure_debug_command() { + _quantum_ovs_base_configure_debug_command +} + +function quantum_plugin_configure_dhcp_agent() { + : +} + +function quantum_plugin_configure_l3_agent() { + _quantum_ovs_base_configure_l3_agent +} + +function quantum_plugin_configure_plugin_agent() { + : +} + +function quantum_plugin_configure_service() { + iniset /$Q_PLUGIN_CONF_FILE RESTPROXY servers $BS_FL_CONTROLLERS_PORT + iniset /$Q_PLUGIN_CONF_FILE RESTPROXY servertimeout $BS_FL_CONTROLLER_TIMEOUT +} + +function quantum_plugin_setup_interface_driver() { + local conf_file=$1 + iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver +} + +# Restore xtrace +$MY_XTRACE diff --git a/lib/quantum_plugins/brocade b/lib/quantum_plugins/brocade new file mode 100644 index 00000000..c372c19f --- /dev/null +++ b/lib/quantum_plugins/brocade @@ -0,0 +1,49 @@ +# Brocade Quantum Plugin +# ---------------------- + +# Save trace setting +BRCD_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +function is_quantum_ovs_base_plugin() { + return 1 +} + +function quantum_plugin_create_nova_conf() { + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver"} +} + +function quantum_plugin_install_agent_packages() { + install_package bridge-utils +} + +function quantum_plugin_configure_common() { + Q_PLUGIN_CONF_PATH=etc/quantum/plugins/brocade + Q_PLUGIN_CONF_FILENAME=brocade.ini + Q_DB_NAME="brcd_quantum" + Q_PLUGIN_CLASS="quantum.plugins.brocade.QuantumPlugin.BrocadePluginV2" +} + +function quantum_plugin_configure_debug_command() { + : +} + +function quantum_plugin_configure_dhcp_agent() { + : +} + +function quantum_plugin_configure_l3_agent() { + : +} + +function quantum_plugin_configure_plugin_agent() { + AGENT_BINARY="$QUANTUM_DIR/bin/quantum-linuxbridge-agent" +} + +function quantum_plugin_setup_interface_driver() { + local conf_file=$1 + iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver +} + +# Restore xtrace +$BRCD_XTRACE diff --git a/lib/quantum_plugins/linuxbridge b/lib/quantum_plugins/linuxbridge new file mode 100644 index 00000000..6d5d4e08 --- /dev/null +++ b/lib/quantum_plugins/linuxbridge @@ -0,0 +1,79 @@ +# Quantum Linux Bridge plugin +# --------------------------- + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +function is_quantum_ovs_base_plugin() { + # linuxbridge doesn't use OVS + return 1 +} + +function quantum_plugin_create_nova_conf() { + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver"} +} + +function quantum_plugin_install_agent_packages() { + install_package bridge-utils +} + +function quantum_plugin_configure_common() { + Q_PLUGIN_CONF_PATH=etc/quantum/plugins/linuxbridge + Q_PLUGIN_CONF_FILENAME=linuxbridge_conf.ini + Q_DB_NAME="quantum_linux_bridge" + Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.lb_quantum_plugin.LinuxBridgePluginV2" +} + +function quantum_plugin_configure_debug_command() { + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge +} + +function quantum_plugin_configure_dhcp_agent() { + : +} + +function quantum_plugin_configure_l3_agent() { + iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge +} + +function quantum_plugin_configure_plugin_agent() { + # Setup physical network interface mappings. Override + # ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` for more + # complex physical network configurations. + if [[ "$LB_INTERFACE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$LB_PHYSICAL_INTERFACE" != "" ]]; then + LB_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$LB_PHYSICAL_INTERFACE + fi + if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE physical_interface_mappings $LB_INTERFACE_MAPPINGS + fi + AGENT_BINARY="$QUANTUM_DIR/bin/quantum-linuxbridge-agent" +} + +function quantum_plugin_configure_service() { + if [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE VLANS tenant_network_type vlan + else + echo "WARNING - The linuxbridge plugin is using local tenant networks, with no connectivity between hosts." + fi + + # Override ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` + # for more complex physical network configurations. + if [[ "$LB_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then + LB_VLAN_RANGES=$PHYSICAL_NETWORK + if [[ "$TENANT_VLAN_RANGE" != "" ]]; then + LB_VLAN_RANGES=$LB_VLAN_RANGES:$TENANT_VLAN_RANGE + fi + fi + if [[ "$LB_VLAN_RANGES" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE VLANS network_vlan_ranges $LB_VLAN_RANGES + fi +} + +function quantum_plugin_setup_interface_driver() { + local conf_file=$1 + iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver +} + +# Restore xtrace +$MY_XTRACE diff --git a/lib/quantum_plugins/openvswitch b/lib/quantum_plugins/openvswitch new file mode 100644 index 00000000..181e7e71 --- /dev/null +++ b/lib/quantum_plugins/openvswitch @@ -0,0 +1,144 @@ +# Quantum Open vSwtich plugin +# --------------------------- + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +source $TOP_DIR/lib/quantum_plugins/ovs_base + +function quantum_plugin_create_nova_conf() { + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"} + if [ "$VIRT_DRIVER" = 'xenserver' ]; then + iniset $NOVA_CONF DEFAULT xenapi_vif_driver nova.virt.xenapi.vif.XenAPIOpenVswitchDriver + iniset $NOVA_CONF DEFAULT xenapi_ovs_integration_bridge $FLAT_NETWORK_BRIDGE + fi +} + +function quantum_plugin_install_agent_packages() { + _quantum_ovs_base_install_agent_packages +} + +function quantum_plugin_configure_common() { + Q_PLUGIN_CONF_PATH=etc/quantum/plugins/openvswitch + Q_PLUGIN_CONF_FILENAME=ovs_quantum_plugin.ini + Q_DB_NAME="ovs_quantum" + Q_PLUGIN_CLASS="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2" +} + +function quantum_plugin_configure_debug_command() { + _quantum_ovs_base_configure_debug_command +} + +function quantum_plugin_configure_dhcp_agent() { + : +} + +function quantum_plugin_configure_l3_agent() { + _quantum_ovs_base_configure_l3_agent +} + +function quantum_plugin_configure_plugin_agent() { + # Setup integration bridge + OVS_BRIDGE=${OVS_BRIDGE:-br-int} + _quantum_ovs_base_setup_bridge $OVS_BRIDGE + + # Setup agent for tunneling + if [[ "$OVS_ENABLE_TUNNELING" = "True" ]]; then + # Verify tunnels are supported + # REVISIT - also check kernel module support for GRE and patch ports + OVS_VERSION=`ovs-vsctl --version | head -n 1 | awk '{print $4;}'` + if [ $OVS_VERSION \< "1.4" ] && ! is_service_enabled q-svc ; then + echo "You are running OVS version $OVS_VERSION." + echo "OVS 1.4+ is required for tunneling between multiple hosts." + exit 1 + fi + iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True + iniset /$Q_PLUGIN_CONF_FILE OVS local_ip $HOST_IP + fi + + # Setup physical network bridge mappings. Override + # ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more + # complex physical network configurations. + if [[ "$OVS_BRIDGE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then + OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE + + # Configure bridge manually with physical interface as port for multi-node + sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_PHYSICAL_BRIDGE + fi + if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings $OVS_BRIDGE_MAPPINGS + fi + AGENT_BINARY="$QUANTUM_DIR/bin/quantum-openvswitch-agent" + + if [ "$VIRT_DRIVER" = 'xenserver' ]; then + # Nova will always be installed along with quantum for a domU + # devstack install, so it should be safe to rely on nova.conf + # for xenapi configuration. + Q_RR_DOM0_COMMAND="$QUANTUM_DIR/bin/quantum-rootwrap-xen-dom0 $NOVA_CONF" + # Under XS/XCP, the ovs agent needs to target the dom0 + # integration bridge. This is enabled by using a root wrapper + # that executes commands on dom0 via a XenAPI plugin. + iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_DOM0_COMMAND" + + # FLAT_NETWORK_BRIDGE is the dom0 integration bridge. To + # ensure the bridge lacks direct connectivity, set + # VM_VLAN=-1;VM_DEV=invalid in localrc + iniset /$Q_PLUGIN_CONF_FILE OVS integration_bridge $FLAT_NETWORK_BRIDGE + + # The ovs agent needs to ensure that the ports associated with + # a given network share the same local vlan tag. On + # single-node XS/XCP, this requires monitoring both the dom0 + # bridge, where VM's are attached, and the domU bridge, where + # dhcp servers are attached. + if is_service_enabled q-dhcp; then + iniset /$Q_PLUGIN_CONF_FILE OVS domu_integration_bridge $OVS_BRIDGE + # DomU will use the regular rootwrap + iniset /$Q_PLUGIN_CONF_FILE AGENT domu_root_helper "$Q_RR_COMMAND" + # Plug the vm interface into the domU integration bridge. + sudo ip addr flush dev $GUEST_INTERFACE_DEFAULT + sudo ip link set $OVS_BRIDGE up + # Assign the VM IP only if it has been set explicitly + if [[ "$VM_IP" != "" ]]; then + sudo ip addr add $VM_IP dev $OVS_BRIDGE + fi + sudo ovs-vsctl add-port $OVS_BRIDGE $GUEST_INTERFACE_DEFAULT + fi + fi +} + +function quantum_plugin_configure_service() { + if [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE OVS tenant_network_type gre + iniset /$Q_PLUGIN_CONF_FILE OVS tunnel_id_ranges $TENANT_TUNNEL_RANGES + elif [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE OVS tenant_network_type vlan + else + echo "WARNING - The openvswitch plugin is using local tenant networks, with no connectivity between hosts." + fi + + # Override ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` + # for more complex physical network configurations. + if [[ "$OVS_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then + OVS_VLAN_RANGES=$PHYSICAL_NETWORK + if [[ "$TENANT_VLAN_RANGE" != "" ]]; then + OVS_VLAN_RANGES=$OVS_VLAN_RANGES:$TENANT_VLAN_RANGE + fi + fi + if [[ "$OVS_VLAN_RANGES" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE OVS network_vlan_ranges $OVS_VLAN_RANGES + fi + + # Enable tunnel networks if selected + if [[ $OVS_ENABLE_TUNNELING = "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True + fi +} + +function quantum_plugin_setup_interface_driver() { + local conf_file=$1 + iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver +} + +# Restore xtrace +$MY_XTRACE diff --git a/lib/quantum_plugins/ovs_base b/lib/quantum_plugins/ovs_base new file mode 100644 index 00000000..8563674f --- /dev/null +++ b/lib/quantum_plugins/ovs_base @@ -0,0 +1,49 @@ +# common functions for ovs based plugin +# ------------------------------------- + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +function is_quantum_ovs_base_plugin() { + # Yes, we use OVS. + return 0 +} + +function _quantum_ovs_base_setup_bridge() { + local bridge=$1 + quantum-ovs-cleanup + sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge + sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge +} + +function _quantum_ovs_base_install_agent_packages() { + local kernel_version + # Install deps + # FIXME add to ``files/apts/quantum``, but don't install if not needed! + if is_ubuntu; then + kernel_version=`cat /proc/version | cut -d " " -f3` + install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version + else + ### FIXME(dtroyer): Find RPMs for OpenVSwitch + echo "OpenVSwitch packages need to be located" + # Fedora does not started OVS by default + restart_service openvswitch + fi +} + +function _quantum_ovs_base_configure_debug_command() { + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE +} + +function _quantum_ovs_base_configure_l3_agent() { + iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE + + quantum-ovs-cleanup + sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE + # ensure no IP is configured on the public bridge + sudo ip addr flush dev $PUBLIC_BRIDGE +} + +# Restore xtrace +$MY_XTRACE diff --git a/lib/quantum_plugins/ryu b/lib/quantum_plugins/ryu new file mode 100644 index 00000000..2dfd4f70 --- /dev/null +++ b/lib/quantum_plugins/ryu @@ -0,0 +1,66 @@ +# Quantum Ryu plugin +# ------------------ + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +source $TOP_DIR/lib/quantum_plugins/ovs_base +source $TOP_DIR/lib/quantum_thirdparty/ryu # for configuration value + +function quantum_plugin_create_nova_conf() { + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"} + iniset $NOVA_CONF DEFAULT libvirt_ovs_integration_bridge "$OVS_BRIDGE" +} + +function quantum_plugin_install_agent_packages() { + _quantum_ovs_base_install_agent_packages + + # quantum_ryu_agent requires ryu module + install_ryu +} + +function quantum_plugin_configure_common() { + Q_PLUGIN_CONF_PATH=etc/quantum/plugins/ryu + Q_PLUGIN_CONF_FILENAME=ryu.ini + Q_DB_NAME="ovs_quantum" + Q_PLUGIN_CLASS="quantum.plugins.ryu.ryu_quantum_plugin.RyuQuantumPluginV2" +} + +function quantum_plugin_configure_debug_command() { + _quantum_ovs_base_configure_debug_command + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT +} + +function quantum_plugin_configure_dhcp_agent() { + iniset $Q_DHCP_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT +} + +function quantum_plugin_configure_l3_agent() { + iniset $Q_L3_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT + _quantum_ovs_base_configure_l3_agent +} + +function quantum_plugin_configure_plugin_agent() { + # Set up integration bridge + OVS_BRIDGE=${OVS_BRIDGE:-br-int} + _quantum_ovs_base_setup_bridge $OVS_BRIDGE + if [ -n "$RYU_INTERNAL_INTERFACE" ]; then + sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_BRIDGE $RYU_INTERNAL_INTERFACE + fi + iniset /$Q_PLUGIN_CONF_FILE OVS integration_bridge $OVS_BRIDGE + AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/ryu/agent/ryu_quantum_agent.py" +} + +function quantum_plugin_configure_service() { + iniset /$Q_PLUGIN_CONF_FILE OVS openflow_rest_api $RYU_API_HOST:$RYU_API_PORT +} + +function quantum_plugin_setup_interface_driver() { + local conf_file=$1 + iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver + iniset $conf_file DEFAULT ovs_use_veth True +} + +# Restore xtrace +$MY_XTRACE diff --git a/lib/quantum_thirdparty/README.md b/lib/quantum_thirdparty/README.md new file mode 100644 index 00000000..3b5837d7 --- /dev/null +++ b/lib/quantum_thirdparty/README.md @@ -0,0 +1,36 @@ +Quantum third party specific files +================================== +Some Quantum plugins require third party programs to function. +The files under the directory, ``lib/quantum_thirdparty/``, will be used +when their service are enabled. +Third party program specific configuration variables should be in this file. + +* filename: ```` + * The corresponding file name should be same to service name, ````. + +functions +--------- +``lib/quantum`` calls the following functions when the ```` is enabled + +functions to be implemented +* ``configure_``: + set config files, create data dirs, etc + e.g. + sudo python setup.py deploy + iniset $XXXX_CONF... + +* ``init_``: + initialize databases, etc + +* ``install_``: + collect source and prepare + e.g. + git clone xxx + +* ``start_``: + start running processes, including screen + e.g. + screen_it XXXX "cd $XXXXY_DIR && $XXXX_DIR/bin/XXXX-bin" + +* ``stop_``: + stop running processes (non-screen) diff --git a/lib/quantum_thirdparty/bigswitch_floodlight b/lib/quantum_thirdparty/bigswitch_floodlight new file mode 100644 index 00000000..60e39248 --- /dev/null +++ b/lib/quantum_thirdparty/bigswitch_floodlight @@ -0,0 +1,50 @@ +# Big Switch/FloodLight OpenFlow Controller +# ------------------------------------------ + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +BS_FL_CONTROLLERS_PORT=${BS_FL_CONTROLLERS_PORT:-localhost:80} +BS_FL_OF_PORT=${BS_FL_OF_PORT:-6633} +OVS_BRIDGE=${OVS_BRIDGE:-br-int} + +function configure_bigswitch_floodlight() { + : +} + +function init_bigswitch_floodlight() { + install_quantum_agent_packages + + echo -n "Installing OVS managed by the openflow controllers:" + echo ${BS_FL_CONTROLLERS_PORT} + + # Create local OVS bridge and configure it + sudo ovs-vsctl --no-wait -- --if-exists del-br ${OVS_BRIDGE} + sudo ovs-vsctl --no-wait add-br ${OVS_BRIDGE} + sudo ovs-vsctl --no-wait br-set-external-id ${OVS_BRIDGE} bridge-id ${OVS_BRIDGE} + + ctrls= + for ctrl in `echo ${BS_FL_CONTROLLERS_PORT} | tr ',' ' '` + do + ctrl=${ctrl%:*} + ctrls="${ctrls} tcp:${ctrl}:${BS_FL_OF_PORT}" + done + echo "Adding Network conttrollers: " ${ctrls} + sudo ovs-vsctl --no-wait set-controller ${OVS_BRIDGE} ${ctrls} +} + +function install_bigswitch_floodlight() { + : +} + +function start_bigswitch_floodlight() { + : +} + +function stop_bigswitch_floodlight() { + : +} + +# Restore xtrace +$MY_XTRACE diff --git a/lib/quantum_thirdparty/ryu b/lib/quantum_thirdparty/ryu new file mode 100644 index 00000000..7a01923c --- /dev/null +++ b/lib/quantum_thirdparty/ryu @@ -0,0 +1,73 @@ +# Ryu OpenFlow Controller +# ----------------------- + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +RYU_DIR=$DEST/ryu +# Ryu API Host +RYU_API_HOST=${RYU_API_HOST:-127.0.0.1} +# Ryu API Port +RYU_API_PORT=${RYU_API_PORT:-8080} +# Ryu OFP Host +RYU_OFP_HOST=${RYU_OFP_HOST:-127.0.0.1} +# Ryu OFP Port +RYU_OFP_PORT=${RYU_OFP_PORT:-6633} +# Ryu Applications +RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest} +# Ryu configuration +RYU_CONF_CONTENTS=${RYU_CONF_CONTENTS:-" +--app_lists=$RYU_APPS +--wsapi_host=$RYU_API_HOST +--wsapi_port=$RYU_API_PORT +--ofp_listen_host=$RYU_OFP_HOST +--ofp_tcp_listen_port=$RYU_OFP_PORT +--quantum_url=http://$Q_HOST:$Q_PORT +--quantum_admin_username=$Q_ADMIN_USERNAME +--quantum_admin_password=$SERVICE_PASSWORD +--quantum_admin_tenant_name=$SERVICE_TENANT_NAME +--quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0 +--quantum_auth_strategy=$Q_AUTH_STRATEGY +--quantum_controller_addr=tcp:$RYU_OFP_HOST:$RYU_OFP_PORT +"} + +function configure_ryu() { + setup_develop $RYU_DIR +} + +function init_ryu() { + RYU_CONF_DIR=/etc/ryu + if [[ ! -d $RYU_CONF_DIR ]]; then + sudo mkdir -p $RYU_CONF_DIR + fi + sudo chown $STACK_USER $RYU_CONF_DIR + RYU_CONF=$RYU_CONF_DIR/ryu.conf + sudo rm -rf $RYU_CONF + + echo "${RYU_CONF_CONTENTS}" > $RYU_CONF +} + +# install_ryu can be called multiple times as quantum_pluing/ryu may call +# this function for quantum-ryu-agent +# Make this function idempotent and avoid cloning same repo many times +# with RECLONE=yes +_RYU_INSTALLED=${_RYU_INSTALLED:-False} +function install_ryu() { + if [[ "$_RYU_INSTALLED" == "False" ]]; then + git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH + _RYU_INSTALLED=True + fi +} + +function start_ryu() { + screen_it ryu "cd $RYU_DIR && $RYU_DIR/bin/ryu-manager --flagfile $RYU_CONF" +} + +function stop_ryu() { + : +} + +# Restore xtrace +$MY_XTRACE diff --git a/lib/rpc_backend b/lib/rpc_backend new file mode 100644 index 00000000..f35f9dbd --- /dev/null +++ b/lib/rpc_backend @@ -0,0 +1,123 @@ +# lib/rpc_backend +# Interface for interactig with different rpc backend +# rpc backend settings + +# Dependencies: +# ``functions`` file +# ``RABBIT_{HOST|PASSWORD}`` must be defined when RabbitMQ is used + +# ``stack.sh`` calls the entry points in this order: +# +# check_rpc_backend +# install_rpc_backend +# restart_rpc_backend +# iniset_rpc_backend + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# Entry Points +# ------------ + +# Make sure we only have one rpc backend enabled. +# Also check the specified rpc backend is available on your platform. +function check_rpc_backend() { + local rpc_backend_cnt=0 + for svc in qpid zeromq rabbit; do + is_service_enabled $svc && + ((rpc_backend_cnt++)) + done + if [ "$rpc_backend_cnt" -gt 1 ]; then + echo "ERROR: only one rpc backend may be enabled," + echo " set only one of 'rabbit', 'qpid', 'zeromq'" + echo " via ENABLED_SERVICES." + elif [ "$rpc_backend_cnt" == 0 ]; then + echo "ERROR: at least one rpc backend must be enabled," + echo " set one of 'rabbit', 'qpid', 'zeromq'" + echo " via ENABLED_SERVICES." + fi + + if is_service_enabled qpid && ! qpid_is_supported; then + echo "Qpid support is not available for this version of your distribution." + exit 1 + fi +} + +# install rpc backend +function install_rpc_backend() { + if is_service_enabled rabbit; then + # Install rabbitmq-server + # the temp file is necessary due to LP: #878600 + tfile=$(mktemp) + install_package rabbitmq-server > "$tfile" 2>&1 + cat "$tfile" + rm -f "$tfile" + elif is_service_enabled qpid; then + if is_fedora; then + install_package qpid-cpp-server-daemon + elif is_ubuntu; then + install_package qpidd + else + exit_distro_not_supported "qpid installation" + fi + elif is_service_enabled zeromq; then + if is_fedora; then + install_package zeromq python-zmq + elif is_ubuntu; then + install_package libzmq1 python-zmq + elif is_suse; then + install_package libzmq1 python-pyzmq + else + exit_distro_not_supported "zeromq installation" + fi + fi +} + +# restart the rpc backend +function restart_rpc_backend() { + if is_service_enabled rabbit; then + # Start rabbitmq-server + echo_summary "Starting RabbitMQ" + if is_fedora || is_suse; then + # service is not started by default + restart_service rabbitmq-server + fi + # change the rabbit password since the default is "guest" + sudo rabbitmqctl change_password guest $RABBIT_PASSWORD + elif is_service_enabled qpid; then + echo_summary "Starting qpid" + restart_service qpidd + fi +} + +# iniset cofiguration +function iniset_rpc_backend() { + local package=$1 + local file=$2 + local section=$3 + if is_service_enabled zeromq; then + iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_zmq + elif is_service_enabled qpid; then + iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_qpid + elif is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then + iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_kombu + iniset $file $section rabbit_host $RABBIT_HOST + iniset $file $section rabbit_password $RABBIT_PASSWORD + fi +} + +# Check if qpid can be used on the current distro. +# qpid_is_supported +function qpid_is_supported() { + if [[ -z "$DISTRO" ]]; then + GetDistro + fi + + # Qpid was introduced to Ubuntu in precise, disallow it on oneiric; it is + # not in openSUSE either right now. + ( ! ([[ "$DISTRO" = "oneiric" ]] || is_suse) ) +} + +# Restore xtrace +$XTRACE diff --git a/lib/swift b/lib/swift new file mode 100644 index 00000000..5ba7e56f --- /dev/null +++ b/lib/swift @@ -0,0 +1,378 @@ +# lib/swift +# Functions to control the configuration and operation of the **Swift** service + +# Dependencies: +# ``functions`` file +# ``DEST``, ``SCREEN_NAME``, `SWIFT_HASH` must be defined +# ``STACK_USER`` must be defined +# ``SWIFT_DATA_DIR`` or ``DATA_DIR`` must be defined +# ``lib/keystone`` file +# ``stack.sh`` calls the entry points in this order: +# +# install_swift +# configure_swift +# init_swift +# start_swift +# stop_swift +# cleanup_swift + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories +SWIFT_DIR=$DEST/swift +SWIFTCLIENT_DIR=$DEST/python-swiftclient +SWIFT_AUTH_CACHE_DIR=${SWIFT_AUTH_CACHE_DIR:-/var/cache/swift} + +# TODO: add logging to different location. + +# Set ``SWIFT_DATA_DIR`` to the location of swift drives and objects. +# Default is the common DevStack data directory. +SWIFT_DATA_DIR=${SWIFT_DATA_DIR:-${DATA_DIR}/swift} + +# Set ``SWIFT_CONFIG_DIR`` to the location of the configuration files. +# Default is ``/etc/swift``. +SWIFT_CONFIG_DIR=${SWIFT_CONFIG_DIR:-/etc/swift} + +# DevStack will create a loop-back disk formatted as XFS to store the +# swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in +# kilobytes. +# Default is 1 gigabyte. +SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000} + +# The ring uses a configurable number of bits from a path’s MD5 hash as +# a partition index that designates a device. The number of bits kept +# from the hash is known as the partition power, and 2 to the partition +# power indicates the partition count. Partitioning the full MD5 hash +# ring allows other parts of the cluster to work in batches of items at +# once which ends up either more efficient or at least less complex than +# working with each item separately or the entire cluster all at once. +# By default we define 9 for the partition count (which mean 512). +SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9} + +# Set ``SWIFT_REPLICAS`` to configure how many replicas are to be +# configured for your Swift cluster. By default the three replicas would need a +# bit of IO and Memory on a VM you may want to lower that to 1 if you want to do +# only some quick testing. +SWIFT_REPLICAS=${SWIFT_REPLICAS:-3} +SWIFT_REPLICAS_SEQ=$(seq ${SWIFT_REPLICAS}) + +# Set ``OBJECT_PORT_BASE``, ``CONTAINER_PORT_BASE``, ``ACCOUNT_PORT_BASE`` +# Port bases used in port number calclution for the service "nodes" +# The specified port number will be used, the additinal ports calculated by +# base_port + node_num * 10 +OBJECT_PORT_BASE=6010 +CONTAINER_PORT_BASE=6011 +ACCOUNT_PORT_BASE=6012 + + +# Entry Points +# ------------ + +# cleanup_swift() - Remove residual data files +function cleanup_swift() { + rm -f ${SWIFT_CONFIG_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz} + if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then + sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 + fi + if [[ -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then + rm ${SWIFT_DATA_DIR}/drives/images/swift.img + fi +} + +# configure_swift() - Set config files, create data dirs and loop image +function configure_swift() { + local swift_auth_server + local node_number + local swift_node_config + local swift_log_dir + + setup_develop $SWIFT_DIR + + # Make sure to kill all swift processes first + swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true + + # First do a bit of setup by creating the directories and + # changing the permissions so we can run it as our user. + + USER_GROUP=$(id -g) + sudo mkdir -p ${SWIFT_DATA_DIR}/{drives,cache,run,logs} + sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_DIR} + + # Create a loopback disk and format it to XFS. + if [[ -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then + if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then + sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 + sudo rm -f ${SWIFT_DATA_DIR}/drives/images/swift.img + fi + fi + + mkdir -p ${SWIFT_DATA_DIR}/drives/images + sudo touch ${SWIFT_DATA_DIR}/drives/images/swift.img + sudo chown $USER: ${SWIFT_DATA_DIR}/drives/images/swift.img + + dd if=/dev/zero of=${SWIFT_DATA_DIR}/drives/images/swift.img \ + bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE} + + # Make a fresh XFS filesystem + mkfs.xfs -f -i size=1024 ${SWIFT_DATA_DIR}/drives/images/swift.img + + # Mount the disk with mount options to make it as efficient as possible + mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1 + if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then + sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 \ + ${SWIFT_DATA_DIR}/drives/images/swift.img ${SWIFT_DATA_DIR}/drives/sdb1 + fi + + # Create a link to the above mount and + # create all of the directories needed to emulate a few different servers + for node_number in ${SWIFT_REPLICAS_SEQ}; do + sudo ln -sf ${SWIFT_DATA_DIR}/drives/sdb1/$node_number ${SWIFT_DATA_DIR}/$node_number; + drive=${SWIFT_DATA_DIR}/drives/sdb1/${node_number} + node=${SWIFT_DATA_DIR}/${node_number}/node + node_device=${node}/sdb1 + [[ -d $node ]] && continue + [[ -d $drive ]] && continue + sudo install -o ${USER} -g $USER_GROUP -d $drive + sudo install -o ${USER} -g $USER_GROUP -d $node_device + sudo chown -R $USER: ${node} + done + + sudo mkdir -p ${SWIFT_CONFIG_DIR}/{object,container,account}-server + sudo chown -R $USER: ${SWIFT_CONFIG_DIR} + + if [[ "$SWIFT_CONFIG_DIR" != "/etc/swift" ]]; then + # Some swift tools are hard-coded to use ``/etc/swift`` and are apparently not going to be fixed. + # Create a symlink if the config dir is moved + sudo ln -sf ${SWIFT_CONFIG_DIR} /etc/swift + fi + + # Swift use rsync to synchronize between all the different + # partitions (which make more sense when you have a multi-node + # setup) we configure it with our version of rsync. + sed -e " + s/%GROUP%/${USER_GROUP}/; + s/%USER%/$USER/; + s,%SWIFT_DATA_DIR%,$SWIFT_DATA_DIR,; + " $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf + # rsyncd.conf just prepared for 4 nodes + if is_ubuntu; then + sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync + else + sudo sed -i '/disable *= *yes/ { s/yes/no/ }' /etc/xinetd.d/rsync + fi + + if is_service_enabled swift3;then + swift_auth_server="s3token " + fi + + # By default Swift will be installed with the tempauth middleware + # which has some default username and password if you have + # configured keystone it will checkout the directory. + if is_service_enabled key; then + swift_auth_server+="authtoken keystoneauth" + else + swift_auth_server=tempauth + fi + + SWIFT_CONFIG_PROXY_SERVER=${SWIFT_CONFIG_DIR}/proxy-server.conf + cp ${SWIFT_DIR}/etc/proxy-server.conf-sample ${SWIFT_CONFIG_PROXY_SERVER} + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user ${USER} + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir ${SWIFT_CONFIG_DIR} + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers 1 + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level DEBUG + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080} + + # Only enable Swift3 if we have it enabled in ENABLED_SERVICES + is_service_enabled swift3 && swift3=swift3 || swift3="" + + iniset ${SWIFT_CONFIG_PROXY_SERVER} pipeline:main pipeline "catch_errors healthcheck cache ratelimit ${swift3} ${swift_auth_server} proxy-logging proxy-server" + + iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true + + # Configure Keystone + sed -i '/^# \[filter:authtoken\]/,/^# \[filter:keystoneauth\]$/ s/^#[ \t]*//' ${SWIFT_CONFIG_PROXY_SERVER} + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_host $KEYSTONE_AUTH_HOST + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_port $KEYSTONE_AUTH_PORT + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_user swift + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_password $SERVICE_PASSWORD + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken signing_dir $SWIFT_AUTH_CACHE_DIR + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth use + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles "Member, admin" + + if is_service_enabled swift3; then + cat <>${SWIFT_CONFIG_PROXY_SERVER} +# NOTE(chmou): s3token middleware is not updated yet to use only +# username and password. +[filter:s3token] +paste.filter_factory = keystone.middleware.s3_token:filter_factory +auth_port = ${KEYSTONE_AUTH_PORT} +auth_host = ${KEYSTONE_AUTH_HOST} +auth_protocol = ${KEYSTONE_AUTH_PROTOCOL} +auth_token = ${SERVICE_TOKEN} +admin_token = ${SERVICE_TOKEN} + +[filter:swift3] +use = egg:swift3#swift3 +EOF + fi + + cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONFIG_DIR}/swift.conf + iniset ${SWIFT_CONFIG_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH} + + # This function generates an object/account/proxy configuration + # emulating 4 nodes on different ports + function generate_swift_config() { + local swift_node_config=$1 + local node_id=$2 + local bind_port=$3 + + log_facility=$[ node_id - 1 ] + node_path=${SWIFT_DATA_DIR}/${node_number} + + iniuncomment ${swift_node_config} DEFAULT user + iniset ${swift_node_config} DEFAULT user ${USER} + + iniuncomment ${swift_node_config} DEFAULT bind_port + iniset ${swift_node_config} DEFAULT bind_port ${bind_port} + + iniuncomment ${swift_node_config} DEFAULT swift_dir + iniset ${swift_node_config} DEFAULT swift_dir ${SWIFT_CONFIG_DIR} + + iniuncomment ${swift_node_config} DEFAULT devices + iniset ${swift_node_config} DEFAULT devices ${node_path} + + iniuncomment ${swift_node_config} DEFAULT log_facility + iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility} + + iniuncomment ${swift_node_config} DEFAULT mount_check + iniset ${swift_node_config} DEFAULT mount_check false + + iniuncomment ${swift_node_config} ${server_type}-replicator vm_test_mode + iniset ${swift_node_config} ${server_type}-replicator vm_test_mode yes + } + + for node_number in ${SWIFT_REPLICAS_SEQ}; do + swift_node_config=${SWIFT_CONFIG_DIR}/object-server/${node_number}.conf + cp ${SWIFT_DIR}/etc/object-server.conf-sample ${swift_node_config} + generate_swift_config ${swift_node_config} ${node_number} $[OBJECT_PORT_BASE + 10 * (node_number - 1)] + iniset ${swift_node_config} filter:recon recon_cache_path ${SWIFT_DATA_DIR}/cache + # Using a sed and not iniset/iniuncomment because we want to a global + # modification and make sure it works for new sections. + sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config} + + swift_node_config=${SWIFT_CONFIG_DIR}/container-server/${node_number}.conf + cp ${SWIFT_DIR}/etc/container-server.conf-sample ${swift_node_config} + generate_swift_config ${swift_node_config} ${node_number} $[CONTAINER_PORT_BASE + 10 * (node_number - 1)] + iniuncomment ${swift_node_config} app:container-server allow_versions + iniset ${swift_node_config} app:container-server allow_versions "true" + sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config} + + swift_node_config=${SWIFT_CONFIG_DIR}/account-server/${node_number}.conf + cp ${SWIFT_DIR}/etc/account-server.conf-sample ${swift_node_config} + generate_swift_config ${swift_node_config} ${node_number} $[ACCOUNT_PORT_BASE + 10 * (node_number - 1)] + sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config} + done + + swift_log_dir=${SWIFT_DATA_DIR}/logs + rm -rf ${swift_log_dir} + mkdir -p ${swift_log_dir}/hourly + sudo chown -R $USER:adm ${swift_log_dir} + sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \ + tee /etc/rsyslog.d/10-swift.conf +} + +# configure_swiftclient() - Set config files, create data dirs, etc +function configure_swiftclient() { + setup_develop $SWIFTCLIENT_DIR +} + +# init_swift() - Initialize rings +function init_swift() { + local node_number + # Make sure to kill all swift processes first + swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true + + # This is where we create three different rings for swift with + # different object servers binding on different ports. + pushd ${SWIFT_CONFIG_DIR} >/dev/null && { + + rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz + + swift-ring-builder object.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 + swift-ring-builder container.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 + swift-ring-builder account.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 + + for node_number in ${SWIFT_REPLICAS_SEQ}; do + swift-ring-builder object.builder add z${node_number}-127.0.0.1:$[OBJECT_PORT_BASE + 10 * (node_number - 1)]/sdb1 1 + swift-ring-builder container.builder add z${node_number}-127.0.0.1:$[CONTAINER_PORT_BASE + 10 * (node_number - 1)]/sdb1 1 + swift-ring-builder account.builder add z${node_number}-127.0.0.1:$[ACCOUNT_PORT_BASE + 10 * (node_number - 1)]/sdb1 1 + done + swift-ring-builder object.builder rebalance + swift-ring-builder container.builder rebalance + swift-ring-builder account.builder rebalance + } && popd >/dev/null + + # Create cache dir + sudo mkdir -p $SWIFT_AUTH_CACHE_DIR + sudo chown $STACK_USER $SWIFT_AUTH_CACHE_DIR + rm -f $SWIFT_AUTH_CACHE_DIR/* +} + +function install_swift() { + git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH +} + +function install_swiftclient() { + git_clone $SWIFTCLIENT_REPO $SWIFTCLIENT_DIR $SWIFTCLIENT_BRANCH +} + + +# start_swift() - Start running processes, including screen +function start_swift() { + # (re)start rsyslog + restart_service rsyslog + # Start rsync + if is_ubuntu; then + sudo /etc/init.d/rsync restart || : + else + sudo systemctl start xinetd.service + fi + + # First spawn all the swift services then kill the + # proxy service so we can run it in foreground in screen. + # ``swift-init ... {stop|restart}`` exits with '1' if no servers are running, + # ignore it just in case + swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true + swift-init --run-dir=${SWIFT_DATA_DIR}/run proxy stop || true + screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_DIR}/proxy-server.conf -v" +} + +# stop_swift() - Stop running processes (non-screen) +function stop_swift() { + # screen normally killed by unstack.sh + swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true +} + +# Restore xtrace +$XTRACE diff --git a/lib/tempest b/lib/tempest new file mode 100644 index 00000000..364323de --- /dev/null +++ b/lib/tempest @@ -0,0 +1,297 @@ +# lib/tempest +# Install and configure Tempest + +# Dependencies: +# ``functions`` file +# ``lib/nova`` service is runing +# +# - ``DEST``, ``FILES`` +# - ``ADMIN_PASSWORD`` +# - ``DEFAULT_IMAGE_NAME`` +# - ``S3_SERVICE_PORT`` +# - ``SERVICE_HOST`` +# - ``BASE_SQL_CONN`` ``lib/database`` declares +# - ``PUBLIC_NETWORK_NAME`` +# - ``Q_USE_NAMESPACE`` +# - ``Q_ROUTER_NAME`` +# - ``VIRT_DRIVER`` +# - ``LIBVIRT_TYPE`` +# - ``KEYSTONE_SERVICE_PROTOCOL``, ``KEYSTONE_SERVICE_HOST`` from lib/keystone +# Optional Dependencies: +# ALT_* (similar vars exists in keystone_data.sh) +# ``LIVE_MIGRATION_AVAILABLE`` +# ``USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION`` +# ``DEFAULT_INSTANCE_TYPE`` +# ``DEFAULT_INSTANCE_USER`` +# ``stack.sh`` calls the entry points in this order: +# +# install_tempest +# configure_tempest +# init_tempest + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories +TEMPEST_DIR=$DEST/tempest +TEMPEST_CONF_DIR=$TEMPEST_DIR/etc +TEMPEST_CONF=$TEMPEST_CONF_DIR/tempest.conf + +NOVA_SOURCE_DIR=$DEST/nova + +BUILD_INTERVAL=3 +BUILD_TIMEOUT=400 + + +BOTO_MATERIALS_PATH="$FILES/images/s3-materials/cirros-0.3.0" + +# Entry Points +# ------------ + +# configure_tempest() - Set config files, create data dirs, etc +function configure_tempest() { + setup_develop $TEMPEST_DIR + local image_lines + local images + local num_images + local image_uuid + local image_uuid_alt + local errexit + local password + local line + local flavors + local flavors_ref + local flavor_lines + local public_network_id + local public_router_id + local tenant_networks_reachable + local boto_instance_type="m1.tiny" + + # TODO(afazekas): + # sudo python setup.py deploy + + # This function exits on an error so that errors don't compound and you see + # only the first error that occured. + errexit=$(set +o | grep errexit) + set -o errexit + + # Save IFS + ifs=$IFS + + # Glance should already contain images to be used in tempest + # testing. Here we simply look for images stored in Glance + # and set the appropriate variables for use in the tempest config + # We ignore ramdisk and kernel images, look for the default image + # ``DEFAULT_IMAGE_NAME``. If not found, we set the ``image_uuid`` to the + # first image returned and set ``image_uuid_alt`` to the second, + # if there is more than one returned... + # ... Also ensure we only take active images, so we don't get snapshots in process + declare -a images + + while read -r IMAGE_NAME IMAGE_UUID; do + if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then + image_uuid="$IMAGE_UUID" + image_uuid_alt="$IMAGE_UUID" + fi + images+=($IMAGE_UUID) + done < <(glance image-list --status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }') + + case "${#images[*]}" in + 0) + echo "Found no valid images to use!" + exit 1 + ;; + 1) + if [ -z "$image_uuid" ]; then + image_uuid=${images[0]} + image_uuid_alt=${images[0]} + fi + ;; + *) + if [ -z "$image_uuid" ]; then + image_uuid=${images[0]} + image_uuid_alt=${images[1]} + fi + ;; + esac + + # Create tempest.conf from tempest.conf.sample + # copy every time, because the image UUIDS are going to change + cp $TEMPEST_CONF.sample $TEMPEST_CONF + + password=${ADMIN_PASSWORD:-secrete} + + # See files/keystone_data.sh where alt_demo user + # and tenant are set up... + ALT_USERNAME=${ALT_USERNAME:-alt_demo} + ALT_TENANT_NAME=${ALT_TENANT_NAME:-alt_demo} + + # If the ``DEFAULT_INSTANCE_TYPE`` not declared, use the new behavior + # Tempest creates instane types for himself + if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then + nova flavor-create m1.nano 42 64 0 1 + flavor_ref=42 + boto_instance_type=m1.nano + nova flavor-create m1.micro 84 128 0 1 + flavor_ref_alt=84 + else + # Check Nova for existing flavors and, if set, look for the + # ``DEFAULT_INSTANCE_TYPE`` and use that. + boto_instance_type=$DEFAULT_INSTANCE_TYPE + flavor_lines=`nova flavor-list` + IFS=$'\r\n' + flavors="" + for line in $flavor_lines; do + f=$(echo $line | awk "/ $DEFAULT_INSTANCE_TYPE / { print \$2 }") + flavors="$flavors $f" + done + + for line in $flavor_lines; do + flavors="$flavors `echo $line | grep -v "^\(|\s*ID\|+--\)" | cut -d' ' -f2`" + done + + IFS=" " + flavors=($flavors) + num_flavors=${#flavors[*]} + echo "Found $num_flavors flavors" + if [[ $num_flavors -eq 0 ]]; then + echo "Found no valid flavors to use!" + exit 1 + fi + flavor_ref=${flavors[0]} + flavor_ref_alt=$flavor_ref + if [[ $num_flavors -gt 1 ]]; then + flavor_ref_alt=${flavors[1]} + fi + fi + + if [ "$Q_USE_NAMESPACE" != "False" ]; then + tenant_networks_reachable=false + else + tenant_networks_reachable=true + fi + + if is_service_enabled q-l3; then + public_network_id=$(quantum net-list | grep $PUBLIC_NETWORK_NAME | \ + awk '{print $2}') + if [ "$Q_USE_NAMESPACE" == "False" ]; then + # If namespaces are disabled, devstack will create a single + # public router that tempest should be configured to use. + public_router_id=$(quantum router-list | awk "/ $Q_ROUTER_NAME / \ + { print \$2 }") + fi + fi + + # Timeouts + iniset $TEMPEST_CONF compute build_timeout $BUILD_TIMEOUT + iniset $TEMPEST_CONF volume build_timeout $BUILD_TIMEOUT + iniset $TEMPEST_CONF boto build_timeout $BUILD_TIMEOUT + iniset $TEMPEST_CONF compute build_interval $BUILD_INTERVAL + iniset $TEMPEST_CONF volume build_interval $BUILD_INTERVAL + iniset $TEMPEST_CONF boto build_interval $BUILD_INTERVAL + iniset $TEMPEST_CONF boto http_socket_timeout 5 + + # Identity + iniset $TEMPEST_CONF identity uri "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v2.0/" + iniset $TEMPEST_CONF identity password "$password" + iniset $TEMPEST_CONF identity alt_username $ALT_USERNAME + iniset $TEMPEST_CONF identity alt_password "$password" + iniset $TEMPEST_CONF identity alt_tenant_name $ALT_TENANT_NAME + iniset $TEMPEST_CONF identity admin_password "$password" + + # Compute + iniset $TEMPEST_CONF compute change_password_available False + # Note(nati) current tempest don't create network for each tenant + # so reuse same tenant for now + if is_service_enabled quantum; then + TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-False} + fi + iniset $TEMPEST_CONF compute allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} + iniset $TEMPEST_CONF compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED + iniset $TEMPEST_CONF compute network_for_ssh $PRIVATE_NETWORK_NAME + iniset $TEMPEST_CONF compute ip_version_for_ssh 4 + iniset $TEMPEST_CONF compute ssh_timeout $BUILD_TIMEOUT + iniset $TEMPEST_CONF compute image_ref $image_uuid + iniset $TEMPEST_CONF compute image_ssh_user ${DEFAULT_INSTANCE_USER:-cirros} + iniset $TEMPEST_CONF compute image_ref_alt $image_uuid_alt + iniset $TEMPEST_CONF compute image_alt_ssh_user ${DEFAULT_INSTANCE_USER:-cirros} + iniset $TEMPEST_CONF compute flavor_ref $flavor_ref + iniset $TEMPEST_CONF compute flavor_ref_alt $flavor_ref_alt + iniset $TEMPEST_CONF compute live_migration_available ${LIVE_MIGRATION_AVAILABLE:-False} + iniset $TEMPEST_CONF compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} + + # Whitebox + iniset $TEMPEST_CONF whitebox source_dir $NOVA_SOURCE_DIR + iniset $TEMPEST_CONF whitebox bin_dir $NOVA_BIN_DIR + # TODO(jaypipes): Create the key file here... right now, no whitebox + # tests actually use a key. + iniset $TEMPEST_CONF whitebox path_to_private_key $TEMPEST_DIR/id_rsa + iniset $TEMPEST_CONF whitebox db_uri $BASE_SQL_CONN/nova + + + # compute admin + iniset $TEMPEST_CONF "compute-admin" password "$password" # DEPRECATED + + # network + iniset $TEMPEST_CONF network api_version 2.0 + iniset $TEMPEST_CONF network tenant_networks_reachable "$tenant_networks_reachable" + iniset $TEMPEST_CONF network public_network_id "$public_network_id" + iniset $TEMPEST_CONF network public_router_id "$public_router_id" + + #boto + iniset $TEMPEST_CONF boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud" + iniset $TEMPEST_CONF boto s3_url "http://$SERVICE_HOST:${S3_SERVICE_PORT:-3333}" + iniset $TEMPEST_CONF boto s3_materials_path "$BOTO_MATERIALS_PATH" + iniset $TEMPEST_CONF boto instance_type "$boto_instance_type" + iniset $TEMPEST_CONF boto http_socket_timeout 30 + iniset $TEMPEST_CONF boto ssh_user ${DEFAULT_INSTANCE_USER:-cirros} + + echo "Created tempest configuration file:" + cat $TEMPEST_CONF + + # Restore IFS + IFS=$ifs + #Restore errexit + $errexit +} + +# install_tempest() - Collect source and prepare +function install_tempest() { + git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH + + # Tempest doesn't satisfy its dependencies on its own, so + # install them here instead. + pip_install -r $TEMPEST_DIR/tools/pip-requires +} + +# init_tempest() - Initialize ec2 images +function init_tempest() { + local base_image_name=cirros-0.3.0-x86_64 + # /opt/stack/devstack/files/images/cirros-0.3.0-x86_64-uec + local image_dir="$FILES/images/${base_image_name}-uec" + local kernel="$image_dir/${base_image_name}-vmlinuz" + local ramdisk="$image_dir/${base_image_name}-initrd" + local disk_image="$image_dir/${base_image_name}-blank.img" + # if the cirros uec downloaded and the system is uec capable + if [ -f "$kernel" -a -f "$ramdisk" -a -f "$disk_image" -a "$VIRT_DRIVER" != "openvz" \ + -a \( "$LIBVIRT_TYPE" != "lxc" -o "$VIRT_DRIVER" != "libvirt" \) ]; then + echo "Prepare aki/ari/ami Images" + ( #new namespace + # tenant:demo ; user: demo + source $TOP_DIR/accrc/demo/demo + euca-bundle-image -i "$kernel" --kernel true -d "$BOTO_MATERIALS_PATH" + euca-bundle-image -i "$ramdisk" --ramdisk true -d "$BOTO_MATERIALS_PATH" + euca-bundle-image -i "$disk_image" -d "$BOTO_MATERIALS_PATH" + ) 2>&1 + +# Dependencies: +# ``functions`` file +# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined +# + +# ``stack.sh`` calls the entry points in this order: +# +# install_XXXX +# configure_XXXX +# init_XXXX +# start_XXXX +# stop_XXXX +# cleanup_XXXX + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# + +# Set up default directories +XXXX_DIR=$DEST/XXXX +XXX_CONF_DIR=/etc/XXXX + + +# Entry Points +# ------------ + +# cleanup_XXXX() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_XXXX() { + # kill instances (nova) + # delete image files (glance) + # This function intentionally left blank + : +} + +# configure_XXXX() - Set config files, create data dirs, etc +function configure_XXXX() { + # sudo python setup.py deploy + # iniset $XXXX_CONF ... + # This function intentionally left blank + : +} + +# init_XXXX() - Initialize databases, etc. +function init_XXXX() { + # clean up from previous (possibly aborted) runs + # create required data files + : +} + +# install_XXXX() - Collect source and prepare +function install_XXXX() { + # git clone xxx + : +} + +# start_XXXX() - Start running processes, including screen +function start_XXXX() { + # screen_it XXXX "cd $XXXX_DIR && $XXXX_DIR/bin/XXXX-bin" + : +} + +# stop_XXXX() - Stop running processes (non-screen) +function stop_XXXX() { + # FIXME(dtroyer): stop only our screen screen window? + : +} + +# Restore xtrace +$XTRACE diff --git a/lib/tls b/lib/tls new file mode 100644 index 00000000..202edeff --- /dev/null +++ b/lib/tls @@ -0,0 +1,318 @@ +# lib/tls +# Functions to control the configuration and operation of the TLS proxy service + +# Dependencies: +# !! source _before_ any services that use ``SERVICE_HOST`` +# ``functions`` file +# ``DEST``, ``DATA_DIR`` must be defined +# ``HOST_IP``, ``SERVICE_HOST`` +# ``KEYSTONE_TOKEN_FORMAT`` must be defined + +# Entry points: +# configure_CA +# init_CA + +# configure_proxy +# start_tls_proxy + +# make_root_ca +# make_int_ca +# new_cert $INT_CA_DIR int-server "abc" +# start_tls_proxy HOST_IP 5000 localhost 5000 + + +if is_service_enabled tls-proxy; then + # TODO(dtroyer): revisit this below after the search for HOST_IP has been done + TLS_IP=${TLS_IP:-$SERVICE_IP} + + # Set the default ``SERVICE_PROTOCOL`` for TLS + SERVICE_PROTOCOL=https +fi + +# Make up a hostname for cert purposes +# will be added to /etc/hosts? +DEVSTACK_HOSTNAME=secure.devstack.org +DEVSTACK_CERT_NAME=devstack-cert +DEVSTACK_CERT=$DATA_DIR/$DEVSTACK_CERT_NAME.pem + +# CA configuration +ROOT_CA_DIR=${ROOT_CA_DIR:-$DATA_DIR/CA/root-ca} +INT_CA_DIR=${INT_CA_DIR:-$DATA_DIR/CA/int-ca} + +ORG_NAME="OpenStack" +ORG_UNIT_NAME="DevStack" + +# Stud configuration +STUD_PROTO="--tls" +STUD_CIPHERS='TLSv1+HIGH:!DES:!aNULL:!eNULL:@STRENGTH' + + +# CA Functions +# ============ + +# There may be more than one, get specific +OPENSSL=${OPENSSL:-/usr/bin/openssl} + +# Do primary CA configuration +function configure_CA() { + # build common config file + + # Verify ``TLS_IP`` is good + if [[ -n "$HOST_IP" && "$HOST_IP" != "$TLS_IP" ]]; then + # auto-discover has changed the IP + TLS_IP=$HOST_IP + fi +} + +# Creates a new CA directory structure +# create_CA_base ca-dir +function create_CA_base() { + local ca_dir=$1 + + if [[ -d $ca_dir ]]; then + # Bail out it exists + return 0 + fi + + for i in certs crl newcerts private; do + mkdir -p $ca_dir/$i + done + chmod 710 $ca_dir/private + echo "01" >$ca_dir/serial + cp /dev/null $ca_dir/index.txt +} + + +# Create a new CA configuration file +# create_CA_config ca-dir common-name +function create_CA_config() { + local ca_dir=$1 + local common_name=$2 + + echo " +[ ca ] +default_ca = CA_default + +[ CA_default ] +dir = $ca_dir +policy = policy_match +database = \$dir/index.txt +serial = \$dir/serial +certs = \$dir/certs +crl_dir = \$dir/crl +new_certs_dir = \$dir/newcerts +certificate = \$dir/cacert.pem +private_key = \$dir/private/cacert.key +RANDFILE = \$dir/private/.rand +default_md = default + +[ req ] +default_bits = 1024 +default_md = sha1 + +prompt = no +distinguished_name = ca_distinguished_name + +x509_extensions = ca_extensions + +[ ca_distinguished_name ] +organizationName = $ORG_NAME +organizationalUnitName = $ORG_UNIT_NAME Certificate Authority +commonName = $common_name + +[ policy_match ] +countryName = optional +stateOrProvinceName = optional +organizationName = match +organizationalUnitName = optional +commonName = supplied + +[ ca_extensions ] +basicConstraints = critical,CA:true +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always, issuer +keyUsage = cRLSign, keyCertSign + +" >$ca_dir/ca.conf +} + +# Create a new signing configuration file +# create_signing_config ca-dir +function create_signing_config() { + local ca_dir=$1 + + echo " +[ ca ] +default_ca = CA_default + +[ CA_default ] +dir = $ca_dir +policy = policy_match +database = \$dir/index.txt +serial = \$dir/serial +certs = \$dir/certs +crl_dir = \$dir/crl +new_certs_dir = \$dir/newcerts +certificate = \$dir/cacert.pem +private_key = \$dir/private/cacert.key +RANDFILE = \$dir/private/.rand +default_md = default + +[ req ] +default_bits = 1024 +default_md = sha1 + +prompt = no +distinguished_name = req_distinguished_name + +x509_extensions = req_extensions + +[ req_distinguished_name ] +organizationName = $ORG_NAME +organizationalUnitName = $ORG_UNIT_NAME Server Farm + +[ policy_match ] +countryName = optional +stateOrProvinceName = optional +organizationName = match +organizationalUnitName = optional +commonName = supplied + +[ req_extensions ] +basicConstraints = CA:false +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always, issuer +keyUsage = digitalSignature, keyEncipherment, keyAgreement +extendedKeyUsage = serverAuth, clientAuth +subjectAltName = \$ENV::SUBJECT_ALT_NAME + +" >$ca_dir/signing.conf +} + +# Create root and intermediate CAs +# init_CA +function init_CA { + # Ensure CAs are built + make_root_CA $ROOT_CA_DIR + make_int_CA $INT_CA_DIR $ROOT_CA_DIR + + # Create the CA bundle + cat $ROOT_CA_DIR/cacert.pem $INT_CA_DIR/cacert.pem >>$INT_CA_DIR/ca-chain.pem +} + +# Create an initial server cert +# init_cert +function init_cert { + if [[ ! -r $DEVSTACK_CERT ]]; then + if [[ -n "$TLS_IP" ]]; then + # Lie to let incomplete match routines work + TLS_IP="DNS:$TLS_IP" + fi + make_cert $INT_CA_DIR $DEVSTACK_CERT_NAME $DEVSTACK_HOSTNAME "$TLS_IP" + + # Create a cert bundle + cat $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/cacert.pem >$DEVSTACK_CERT + fi +} + + +# make_cert creates and signs a new certificate with the given commonName and CA +# make_cert ca-dir cert-name "common-name" ["alt-name" ...] +function make_cert() { + local ca_dir=$1 + local cert_name=$2 + local common_name=$3 + local alt_names=$4 + + # Generate a signing request + $OPENSSL req \ + -sha1 \ + -newkey rsa \ + -nodes \ + -keyout $ca_dir/private/$cert_name.key \ + -out $ca_dir/$cert_name.csr \ + -subj "/O=${ORG_NAME}/OU=${ORG_UNIT_NAME} Servers/CN=${common_name}" + + if [[ -z "$alt_names" ]]; then + alt_names="DNS:${common_name}" + else + alt_names="DNS:${common_name},${alt_names}" + fi + + # Sign the request valid for 1 year + SUBJECT_ALT_NAME="$alt_names" \ + $OPENSSL ca -config $ca_dir/signing.conf \ + -extensions req_extensions \ + -days 365 \ + -notext \ + -in $ca_dir/$cert_name.csr \ + -out $ca_dir/$cert_name.crt \ + -subj "/O=${ORG_NAME}/OU=${ORG_UNIT_NAME} Servers/CN=${common_name}" \ + -batch +} + + +# Make an intermediate CA to sign everything else +# make_int_CA ca-dir signing-ca-dir +function make_int_CA() { + local ca_dir=$1 + local signing_ca_dir=$2 + + # Create the root CA + create_CA_base $ca_dir + create_CA_config $ca_dir 'Intermediate CA' + create_signing_config $ca_dir + + # Create a signing certificate request + $OPENSSL req -config $ca_dir/ca.conf \ + -sha1 \ + -newkey rsa \ + -nodes \ + -keyout $ca_dir/private/cacert.key \ + -out $ca_dir/cacert.csr \ + -outform PEM + + # Sign the intermediate request valid for 1 year + $OPENSSL ca -config $signing_ca_dir/ca.conf \ + -extensions ca_extensions \ + -days 365 \ + -notext \ + -in $ca_dir/cacert.csr \ + -out $ca_dir/cacert.pem \ + -batch +} + +# Make a root CA to sign other CAs +# make_root_CA ca-dir +function make_root_CA() { + local ca_dir=$1 + + # Create the root CA + create_CA_base $ca_dir + create_CA_config $ca_dir 'Root CA' + + # Create a self-signed certificate valid for 5 years + $OPENSSL req -config $ca_dir/ca.conf \ + -x509 \ + -nodes \ + -newkey rsa \ + -days 21360 \ + -keyout $ca_dir/private/cacert.key \ + -out $ca_dir/cacert.pem \ + -outform PEM +} + + +# Proxy Functions +# =============== + +# Starts the TLS proxy for the given IP/ports +# start_tls_proxy front-host front-port back-host back-port +function start_tls_proxy() { + local f_host=$1 + local f_port=$2 + local b_host=$3 + local b_port=$4 + + stud $STUD_PROTO -f $f_host,$f_port -b $b_host,$b_port $DEVSTACK_CERT 2>/dev/null +} diff --git a/openrc b/openrc index 4b36112e..3ef44fd1 100644 --- a/openrc +++ b/openrc @@ -1,62 +1,86 @@ #!/usr/bin/env bash +# +# source openrc [username] [tenantname] +# +# Configure a set of credentials for $TENANT/$USERNAME: +# Set OS_TENANT_NAME to override the default tenant 'demo' +# Set OS_USERNAME to override the default user name 'demo' +# Set ADMIN_PASSWORD to set the password for 'admin' and 'demo' + +# NOTE: support for the old NOVA_* novaclient environment variables has +# been removed. + +if [[ -n "$1" ]]; then + OS_USERNAME=$1 +fi +if [[ -n "$2" ]]; then + OS_TENANT_NAME=$2 +fi + +# Find the other rc files +RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd) + +# Import common functions +source $RC_DIR/functions # Load local configuration -source ./stackrc +source $RC_DIR/stackrc -# Set api host endpoint -HOST_IP=${HOST_IP:-127.0.0.1} +# Load the last env variables if available +if [[ -r $TOP_DIR/.stackenv ]]; then + source $TOP_DIR/.stackenv +fi + +# Get some necessary configuration +source $RC_DIR/lib/tls -# Nova original used project_id as the *account* that owned resources (servers, -# ip address, ...) With the addition of Keystone we have standardized on the -# term **tenant** as the entity that owns the resources. **novaclient** still -# uses the old deprecated terms project_id. Note that this field should now be -# set to tenant_name, not tenant_id. -export NOVA_PROJECT_ID=${TENANT:-demo} +# The introduction of Keystone to the OpenStack ecosystem has standardized the +# term **tenant** as the entity that owns resources. In some places references +# still exist to the original Nova term **project** for this use. Also, +# **tenant_name** is prefered to **tenant_id**. +export OS_TENANT_NAME=${OS_TENANT_NAME:-demo} # In addition to the owning entity (tenant), nova stores the entity performing # the action as the **user**. -export NOVA_USERNAME=${USERNAME:-demo} +export OS_USERNAME=${OS_USERNAME:-demo} # With Keystone you pass the keystone password instead of an api key. -export NOVA_API_KEY=${ADMIN_PASSWORD:-secrete} +# Recent versions of novaclient use OS_PASSWORD instead of NOVA_API_KEYs +# or NOVA_PASSWORD. +export OS_PASSWORD=${ADMIN_PASSWORD:-secrete} + +# Don't put the key into a keyring by default. Testing for development is much +# easier with this off. +export OS_NO_CACHE=${OS_NO_CACHE:-1} + +# Set api HOST_IP endpoint. SERVICE_HOST may also be used to specify the endpoint, +# which is convenient for some localrc configurations. +HOST_IP=${HOST_IP:-127.0.0.1} +SERVICE_HOST=${SERVICE_HOST:-$HOST_IP} +SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http} -# With the addition of Keystone, to use an openstack cloud you should -# authenticate against keystone, which returns a **Token** and **Service -# Catalog**. The catalog contains the endpoint for all services the user/tenant -# has access to - including nova, glance, keystone, swift, ... We currently -# recommend using the 2.0 *auth api*. +# Some exercises call glance directly. On a single-node installation, Glance +# should be listening on HOST_IP. If its running elsewhere, it can be set here +GLANCE_HOST=${GLANCE_HOST:-$HOST_IP} + +# Authenticating against an Openstack cloud using Keystone returns a **Token** +# and **Service Catalog**. The catalog contains the endpoints for all services +# the user/tenant has access to - including nova, glance, keystone, swift, ... +# We currently recommend using the 2.0 *identity api*. # -# *NOTE*: Using the 2.0 *auth api* does not mean that compute api is 2.0. We +# *NOTE*: Using the 2.0 *identity api* does not mean that compute api is 2.0. We # will use the 1.1 *compute api* -export NOVA_URL=${NOVA_URL:-http://$HOST_IP:5000/v2.0/} +export OS_AUTH_URL=$SERVICE_PROTOCOL://$SERVICE_HOST:5000/v2.0 + +# Set the pointer to our CA certificate chain. Harmless if TLS is not used. +export OS_CACERT=$INT_CA_DIR/ca-chain.pem # Currently novaclient needs you to specify the *compute api* version. This # needs to match the config of your catalog returned by Keystone. export NOVA_VERSION=${NOVA_VERSION:-1.1} - -# FIXME - why does this need to be specified? -export NOVA_REGION_NAME=${NOVA_REGION_NAME:-RegionOne} - -# Set the ec2 url so euca2ools works -export EC2_URL=${EC2_URL:-http://$HOST_IP:8773/services/Cloud} - -# Access key is set in the initial keystone data to be the same as username -export EC2_ACCESS_KEY=${USERNAME:-demo} - -# Secret key is set in the initial keystone data to the admin password -export EC2_SECRET_KEY=${ADMIN_PASSWORD:-secrete} +# In the future this will change names: +export COMPUTE_API_VERSION=${COMPUTE_API_VERSION:-$NOVA_VERSION} # set log level to DEBUG (helps debug issues) +# export KEYSTONECLIENT_DEBUG=1 # export NOVACLIENT_DEBUG=1 - -# Max time till the vm is bootable -export BOOT_TIMEOUT=${BOOT_TIMEOUT:-15} - -# Max time to wait while vm goes from build to active state -export ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-10} - -# Max time from run instance command until it is running -export RUNNING_TIMEOUT=${RUNNING_TIMEOUT:-$(($BOOT_TIMEOUT + $ACTIVE_TIMEOUT))} - -# Max time to wait for proper IP association and dis-association. -export ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10} diff --git a/rejoin-stack.sh b/rejoin-stack.sh new file mode 100755 index 00000000..a82c73cb --- /dev/null +++ b/rejoin-stack.sh @@ -0,0 +1,18 @@ +#! /usr/bin/env bash + +# This script rejoins an existing screen, or re-creates a +# screen session from a previous run of stack.sh. + +TOP_DIR=`dirname $0` + +# if screenrc exists, run screen +if [[ -e $TOP_DIR/stack-screenrc ]]; then + if screen -ls | egrep -q "[0-9].stack"; then + echo "Attaching to already started screen session.." + exec screen -r stack + fi + exec screen -c $TOP_DIR/stack-screenrc +fi + +echo "Couldn't find $TOP_DIR/stack-screenrc file; have you run stack.sh yet?" +exit 1 diff --git a/samples/local.sh b/samples/local.sh new file mode 100755 index 00000000..59015259 --- /dev/null +++ b/samples/local.sh @@ -0,0 +1,67 @@ +#!/usr/bin/env bash + +# Sample ``local.sh`` for user-configurable tasks to run automatically +# at the sucessful conclusion of ``stack.sh``. + +# NOTE: Copy this file to the root ``devstack`` directory for it to +# work properly. + +# This is a collection of some of the things we have found to be useful to run +# after ``stack.sh`` to tweak the OpenStack configuration that DevStack produces. +# These should be considered as samples and are unsupported DevStack code. + + +# Keep track of the devstack directory +TOP_DIR=$(cd $(dirname "$0") && pwd) + +# Import common functions +source $TOP_DIR/functions + +# Use openrc + stackrc + localrc for settings +source $TOP_DIR/stackrc + +# Destination path for installation ``DEST`` +DEST=${DEST:-/opt/stack} + + +# Import ssh keys +# --------------- + +# Import keys from the current user into the default OpenStack user (usually +# ``demo``) + +# Get OpenStack auth +source $TOP_DIR/openrc + +# Add first keypair found in localhost:$HOME/.ssh +for i in $HOME/.ssh/id_rsa.pub $HOME/.ssh/id_dsa.pub; do + if [[ -r $i ]]; then + nova keypair-add --pub_key=$i `hostname` + break + fi +done + + +# Create A Flavor +# --------------- + +# Get OpenStack admin auth +source $TOP_DIR/openrc admin admin + +# Name of new flavor +# set in ``localrc`` with ``DEFAULT_INSTANCE_TYPE=m1.micro`` +MI_NAME=m1.micro + +# Create micro flavor if not present +if [[ -z $(nova flavor-list | grep $MI_NAME) ]]; then + nova flavor-create $MI_NAME 6 128 0 1 +fi + + +# Other Uses +# ---------- + +# Add tcp/22 and icmp to default security group +nova secgroup-add-rule default tcp 22 22 0.0.0.0/0 +nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0 + diff --git a/samples/localrc b/samples/localrc new file mode 100644 index 00000000..bcaa788a --- /dev/null +++ b/samples/localrc @@ -0,0 +1,89 @@ +# Sample ``localrc`` for user-configurable variables in ``stack.sh`` + +# NOTE: Copy this file to the root ``devstack`` directory for it to +# work properly. + +# ``localrc`` is a user-maintained setings file that is sourced from ``stackrc``. +# This gives it the ability to override any variables set in ``stackrc``. +# Also, most of the settings in ``stack.sh`` are written to only be set if no +# value has already been set; this lets ``localrc`` effectively override the +# default values. + +# This is a collection of some of the settings we have found to be useful +# in our DevStack development environments. Additional settings are described +# in http://devstack.org/localrc.html +# These should be considered as samples and are unsupported DevStack code. + + +# Minimal Contents +# ---------------- + +# While ``stack.sh`` is happy to run without ``localrc``, devlife is better when +# there are a few minimal variables set: + +# If the ``*_PASSWORD`` variables are not set here you will be prompted to enter +# values for them by ``stack.sh`` and they will be added to ``localrc``. +ADMIN_PASSWORD=nomoresecrete +MYSQL_PASSWORD=stackdb +RABBIT_PASSWORD=stackqueue +SERVICE_PASSWORD=$ADMIN_PASSWORD + +# ``HOST_IP`` should be set manually for best results if the NIC configuration +# of the host is unusual, i.e. ``eth1`` has the default route but ``eth0`` is the +# public interface. It is auto-detected in ``stack.sh`` but often is indeterminate +# on later runs due to the IP moving from an Ethernet interface to a bridge on +# the host. Setting it here also makes it available for ``openrc`` to include +# when setting ``OS_AUTH_URL``. +# ``HOST_IP`` is not set by default. +#HOST_IP=w.x.y.z + + +# Logging +# ------- + +# By default ``stack.sh`` output only goes to the terminal where it runs. It can +# be configured to additionally log to a file by setting ``LOGFILE`` to the full +# path of the destination log file. A timestamp will be appended to the given name. +LOGFILE=$DEST/logs/stack.sh.log + +# Old log files are automatically removed after 7 days to keep things neat. Change +# the number of days by setting ``LOGDAYS``. +LOGDAYS=2 + +# Nova logs will be colorized if ``SYSLOG`` is not set; turn this off by setting +# ``LOG_COLOR`` false. +#LOG_COLOR=False + + +# Using milestone-proposed branches +# --------------------------------- + +# Uncomment these to grab the milestone-proposed branches from the repos: +#CINDER_BRANCH=milestone-proposed +#GLANCE_BRANCH=milestone-proposed +#HORIZON_BRANCH=milestone-proposed +#KEYSTONE_BRANCH=milestone-proposed +#KEYSTONECLIENT_BRANCH=milestone-proposed +#NOVA_BRANCH=milestone-proposed +#NOVACLIENT_BRANCH=milestone-proposed +#QUANTUM_BRANCH=milestone-proposed +#SWIFT_BRANCH=milestone-proposed + + +# Swift +# ----- + +# Swift is now used as the back-end for the S3-like object store. If Nova's +# objectstore (``n-obj`` in ``ENABLED_SERVICES``) is enabled, it will NOT +# run if Swift is enabled. Setting the hash value is required and you will +# be prompted for it if Swift is enabled so just set it to something already: +SWIFT_HASH=66a3d6b56c1f479c8b4e70ab5c2000f5 + +# For development purposes the default of 3 replicas is usually not required. +# Set this to 1 to save some resources: +SWIFT_REPLICAS=1 + +# The data for Swift is stored in the source tree by default (``$DEST/swift/data``) +# and can be moved by setting ``SWIFT_DATA_DIR``. The directory will be created +# if it does not exist. +SWIFT_DATA_DIR=$DEST/data diff --git a/stack.sh b/stack.sh index 4461e403..331743f0 100755 --- a/stack.sh +++ b/stack.sh @@ -1,8 +1,9 @@ #!/usr/bin/env bash -# **stack.sh** is an opinionated openstack developer installation. - -# This script installs and configures *nova*, *glance*, *horizon* and *keystone* +# ``stack.sh`` is an opinionated OpenStack developer installation. It +# installs and configures various combinations of **Ceilometer**, **Cinder**, +# **Glance**, **Heat**, **Horizon**, **Keystone**, **Nova**, **Quantum** +# and **Swift** # This script allows you to specify configuration options of what git # repositories to use, enabled services, network configuration and various @@ -10,178 +11,307 @@ # shared settings for common resources (mysql, rabbitmq) and build a multi-node # developer install. -# To keep this script simple we assume you are running on an **Ubuntu 11.04 -# Natty** machine. It should work in a VM or physical server. Additionally we -# put the list of *apt* and *pip* dependencies and other configuration files in -# this repo. So start by grabbing this script and the dependencies. +# To keep this script simple we assume you are running on a recent **Ubuntu** +# (11.10 Oneiric or newer) or **Fedora** (F16 or newer) machine. It +# should work in a VM or physical server. Additionally we put the list of +# ``apt`` and ``rpm`` dependencies and other configuration files in this repo. # Learn more and get the most recent version at http://devstack.org -# Sanity Check -# ============ - -# Warn users who aren't on natty, but allow them to override check and attempt -# installation with ``FORCE=yes ./stack`` -if ! egrep -q 'natty|oneiric' /etc/lsb-release; then - echo "WARNING: this script has only been tested on natty and oneiric" - if [[ "$FORCE" != "yes" ]]; then - echo "If you wish to run this script anyway run with FORCE=yes" - exit 1 - fi -fi - -# Keep track of the current devstack directory. +# Keep track of the devstack directory TOP_DIR=$(cd $(dirname "$0") && pwd) -# stack.sh keeps the list of **apt** and **pip** dependencies in external -# files, along with config templates and other useful files. You can find these -# in the ``files`` directory (next to this script). We will reference this -# directory using the ``FILES`` variable in this script. -FILES=$TOP_DIR/files -if [ ! -d $FILES ]; then - echo "ERROR: missing devstack/files - did you grab more than just stack.sh?" - exit 1 -fi +# Import common functions +source $TOP_DIR/functions +# Determine what system we are running on. This provides ``os_VENDOR``, +# ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` +# and ``DISTRO`` +GetDistro -# Settings -# ======== +# Global Settings +# =============== -# This script is customizable through setting environment variables. If you -# want to override a setting you can either:: +# ``stack.sh`` is customizable through setting environment variables. If you +# want to override a setting you can set and export it:: # -# export MYSQL_PASSWORD=anothersecret +# export DATABASE_PASSWORD=anothersecret # ./stack.sh # -# You can also pass options on a single line ``MYSQL_PASSWORD=simple ./stack.sh`` +# You can also pass options on a single line ``DATABASE_PASSWORD=simple ./stack.sh`` # -# Additionally, you can put any local variables into a ``localrc`` file, like:: +# Additionally, you can put any local variables into a ``localrc`` file:: # -# MYSQL_PASSWORD=anothersecret -# MYSQL_USER=hellaroot +# DATABASE_PASSWORD=anothersecret +# DATABASE_USER=hellaroot # # We try to have sensible defaults, so you should be able to run ``./stack.sh`` -# in most cases. -# -# We our settings from ``stackrc``. This file is distributed with devstack and -# contains locations for what repositories to use. If you want to use other -# repositories and branches, you can add your own settings with another file -# called ``localrc`` +# in most cases. ``localrc`` is not distributed with DevStack and will never +# be overwritten by a DevStack update. # -# If ``localrc`` exists, then ``stackrc`` will load those settings. This is -# useful for changing a branch or repository to test other versions. Also you -# can store your other settings like **MYSQL_PASSWORD** or **ADMIN_PASSWORD** instead -# of letting devstack generate random ones for you. -source ./stackrc +# DevStack distributes ``stackrc`` which contains locations for the OpenStack +# repositories and branches to configure. ``stackrc`` sources ``localrc`` to +# allow you to safely override those settings. + +if [[ ! -r $TOP_DIR/stackrc ]]; then + echo "ERROR: missing $TOP_DIR/stackrc - did you grab more than just stack.sh?" + exit 1 +fi +source $TOP_DIR/stackrc + + +# Local Settings +# -------------- + +# Make sure the proxy config is visible to sub-processes +export_proxy_variables # Destination path for installation ``DEST`` DEST=${DEST:-/opt/stack} -# Configure services to syslog instead of writing to individual log files -SYSLOG=${SYSLOG:-False} -# apt-get wrapper to just get arguments set correctly -function apt_get() { - local sudo="sudo" - [ "$(id -u)" = "0" ] && sudo="env" - $sudo DEBIAN_FRONTEND=noninteractive apt-get \ - --option "Dpkg::Options::=--force-confold" --assume-yes "$@" -} +# Sanity Check +# ------------ + +# Clean up last environment var cache +if [[ -r $TOP_DIR/.stackenv ]]; then + rm $TOP_DIR/.stackenv +fi + +# Import common services (database, message queue) configuration +source $TOP_DIR/lib/database +source $TOP_DIR/lib/rpc_backend + +# Validate database selection +# Since DATABASE_BACKENDS is now set, this also gets ENABLED_SERVICES +# properly configured for the database selection. +use_database $DATABASE_TYPE || echo "Invalid database '$DATABASE_TYPE'" + +# Remove services which were negated in ENABLED_SERVICES +# using the "-" prefix (e.g., "-rabbit") instead of +# calling disable_service(). +disable_negated_services + +# Warn users who aren't on an explicitly supported distro, but allow them to +# override check and attempt installation with ``FORCE=yes ./stack`` +if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|f16|f17|f18|opensuse-12.2) ]]; then + echo "WARNING: this script has not been tested on $DISTRO" + if [[ "$FORCE" != "yes" ]]; then + echo "If you wish to run this script anyway run with FORCE=yes" + exit 1 + fi +fi + +# Make sure we only have one rpc backend enabled, +# and the specified rpc backend is available on your platform. +check_rpc_backend + +# ``stack.sh`` keeps function libraries here +# Make sure ``$TOP_DIR/lib`` directory is present +if [ ! -d $TOP_DIR/lib ]; then + echo "ERROR: missing devstack/lib" + exit 1 +fi + +# ``stack.sh`` keeps the list of ``apt`` and ``rpm`` dependencies and config +# templates and other useful files in the ``files`` subdirectory +FILES=$TOP_DIR/files +if [ ! -d $FILES ]; then + echo "ERROR: missing devstack/files" + exit 1 +fi + +SCREEN_NAME=${SCREEN_NAME:-stack} +# Check to see if we are already running DevStack +if type -p screen >/dev/null && screen -ls | egrep -q "[0-9].$SCREEN_NAME"; then + echo "You are already running a stack.sh session." + echo "To rejoin this session type 'screen -x stack'." + echo "To destroy this session, type './unstack.sh'." + exit 1 +fi + +# Set up logging level +VERBOSE=$(trueorfalse True $VERBOSE) -# OpenStack is designed to be run as a regular user (Horizon will fail to run -# as root, since apache refused to startup serve content from root user). If -# stack.sh is run as root, it automatically creates a stack user with +# root Access +# ----------- + +# OpenStack is designed to be run as a non-root user; Horizon will fail to run +# as **root** since Apache will not serve content from **root** user). If +# ``stack.sh`` is run as **root**, it automatically creates a **stack** user with # sudo privileges and runs as that user. if [[ $EUID -eq 0 ]]; then ROOTSLEEP=${ROOTSLEEP:-10} echo "You are running this script as root." - echo "In $ROOTSLEEP seconds, we will create a user 'stack' and run as that user" + echo "In $ROOTSLEEP seconds, we will create a user '$STACK_USER' and run as that user" sleep $ROOTSLEEP - # since this script runs as a normal user, we need to give that user - # ability to run sudo - dpkg -l sudo || apt_get update && apt_get install sudo - - if ! getent passwd stack >/dev/null; then - echo "Creating a user called stack" - useradd -U -G sudo -s /bin/bash -d $DEST -m stack + # Give the non-root user the ability to run as **root** via ``sudo`` + is_package_installed sudo || install_package sudo + if ! getent group $STACK_USER >/dev/null; then + echo "Creating a group called $STACK_USER" + groupadd $STACK_USER + fi + if ! getent passwd $STACK_USER >/dev/null; then + echo "Creating a user called $STACK_USER" + useradd -g $STACK_USER -s /bin/bash -d $DEST -m $STACK_USER fi - echo "Giving stack user passwordless sudo priviledges" - # natty uec images sudoers does not have a '#includedir'. add one. + echo "Giving stack user passwordless sudo privileges" + # UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || echo "#includedir /etc/sudoers.d" >> /etc/sudoers - ( umask 226 && echo "stack ALL=(ALL) NOPASSWD:ALL" \ + ( umask 226 && echo "$STACK_USER ALL=(ALL) NOPASSWD:ALL" \ > /etc/sudoers.d/50_stack_sh ) - echo "Copying files to stack user" - STACK_DIR="$DEST/${PWD##*/}" - cp -r -f "$PWD" "$STACK_DIR" - chown -R stack "$STACK_DIR" + echo "Copying files to $STACK_USER user" + STACK_DIR="$DEST/${TOP_DIR##*/}" + cp -r -f -T "$TOP_DIR" "$STACK_DIR" + chown -R $STACK_USER "$STACK_DIR" + cd "$STACK_DIR" if [[ "$SHELL_AFTER_RUN" != "no" ]]; then - exec su -c "set -e; cd $STACK_DIR; bash stack.sh; bash" stack + exec sudo -u $STACK_USER bash -l -c "set -e; bash stack.sh; bash" else - exec su -c "set -e; cd $STACK_DIR; bash stack.sh" stack + exec sudo -u $STACK_USER bash -l -c "set -e; source stack.sh" fi exit 1 else - # Our user needs passwordless priviledges for certain commands which nova - # uses internally. - # Natty uec images sudoers does not have a '#includedir'. add one. + # We're not **root**, make sure ``sudo`` is available + is_package_installed sudo || die "Sudo is required. Re-run stack.sh as root ONE TIME ONLY to set up sudo." + + # UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || echo "#includedir /etc/sudoers.d" | sudo tee -a /etc/sudoers + + # Set up devstack sudoers TEMPFILE=`mktemp` - cat $FILES/sudo/nova > $TEMPFILE - sed -e "s,%USER%,$USER,g" -i $TEMPFILE + echo "$STACK_USER ALL=(root) NOPASSWD:ALL" >$TEMPFILE + # Some binaries might be under /sbin or /usr/sbin, so make sure sudo will + # see them by forcing PATH + echo "Defaults:$STACK_USER secure_path=/sbin:/usr/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/local/bin" >> $TEMPFILE chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE - sudo mv $TEMPFILE /etc/sudoers.d/stack_sh_nova + sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh + + # Remove old file + sudo rm -f /etc/sudoers.d/stack_sh_nova fi -# Set the destination directories for openstack projects -NOVA_DIR=$DEST/nova -HORIZON_DIR=$DEST/horizon -GLANCE_DIR=$DEST/glance -KEYSTONE_DIR=$DEST/keystone -NOVACLIENT_DIR=$DEST/python-novaclient -OPENSTACKX_DIR=$DEST/openstackx -NOVNC_DIR=$DEST/noVNC -SWIFT_DIR=$DEST/swift -SWIFT_KEYSTONE_DIR=$DEST/swift-keystone2 -QUANTUM_DIR=$DEST/quantum +# Create the destination directory and ensure it is writable by the user +sudo mkdir -p $DEST +if [ ! -w $DEST ]; then + sudo chown $STACK_USER $DEST +fi -# Default Quantum Plugin -Q_PLUGIN=${Q_PLUGIN:-openvswitch} +# Set ``OFFLINE`` to ``True`` to configure ``stack.sh`` to run cleanly without +# Internet access. ``stack.sh`` must have been previously run with Internet +# access to install prerequisites and fetch repositories. +OFFLINE=`trueorfalse False $OFFLINE` -# Specify which services to launch. These generally correspond to screen tabs -ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-vnc,horizon,mysql,rabbit} +# Set ``ERROR_ON_CLONE`` to ``True`` to configure ``stack.sh`` to exit if +# the destination git repository does not exist during the ``git_clone`` +# operation. +ERROR_ON_CLONE=`trueorfalse False $ERROR_ON_CLONE` -# Name of the lvm volume group to use/create for iscsi volumes -VOLUME_GROUP=${VOLUME_GROUP:-nova-volumes} +# Destination path for service data +DATA_DIR=${DATA_DIR:-${DEST}/data} +sudo mkdir -p $DATA_DIR +sudo chown $STACK_USER $DATA_DIR -# Nova hypervisor configuration. We default to libvirt whth **kvm** but will -# drop back to **qemu** if we are unable to load the kvm module. Stack.sh can -# also install an **LXC** based system. -VIRT_DRIVER=${VIRT_DRIVER:-libvirt} -LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm} -# nova supports pluggable schedulers. ``SimpleScheduler`` should work in most -# cases unless you are working on multi-zone mode. -SCHEDULER=${SCHEDULER:-nova.scheduler.simple.SimpleScheduler} +# Common Configuration +# ==================== -# Use the first IP unless an explicit is set by ``HOST_IP`` environment variable -if [ ! -n "$HOST_IP" ]; then - HOST_IP=`LC_ALL=C /sbin/ifconfig | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'` +# Set fixed and floating range here so we can make sure not to use addresses +# from either range when attempting to guess the IP to use for the host. +# Note that setting FIXED_RANGE may be necessary when running DevStack +# in an OpenStack cloud that uses either of these address ranges internally. +FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.224/28} +FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24} +FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256} +NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1} + +# Find the interface used for the default route +HOST_IP_IFACE=${HOST_IP_IFACE:-$(ip route | sed -n '/^default/{ s/.*dev \(\w\+\)\s\+.*/\1/; p; }' | head -1)} +# Search for an IP unless an explicit is set by ``HOST_IP`` environment variable +if [ -z "$HOST_IP" -o "$HOST_IP" == "dhcp" ]; then + HOST_IP="" + HOST_IPS=`LC_ALL=C ip -f inet addr show ${HOST_IP_IFACE} | awk '/inet/ {split($2,parts,"/"); print parts[1]}'` + for IP in $HOST_IPS; do + # Attempt to filter out IP addresses that are part of the fixed and + # floating range. Note that this method only works if the ``netaddr`` + # python library is installed. If it is not installed, an error + # will be printed and the first IP from the interface will be used. + # If that is not correct set ``HOST_IP`` in ``localrc`` to the correct + # address. + if ! (address_in_net $IP $FIXED_RANGE || address_in_net $IP $FLOATING_RANGE); then + HOST_IP=$IP + break; + fi + done + if [ "$HOST_IP" == "" ]; then + echo "Could not determine host ip address." + echo "Either localrc specified dhcp on ${HOST_IP_IFACE} or defaulted" + exit 1 + fi fi +# Allow the use of an alternate hostname (such as localhost/127.0.0.1) for service endpoints. +SERVICE_HOST=${SERVICE_HOST:-$HOST_IP} +SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http} + +# Configure services to use syslog instead of writing to individual log files +SYSLOG=`trueorfalse False $SYSLOG` +SYSLOG_HOST=${SYSLOG_HOST:-$HOST_IP} +SYSLOG_PORT=${SYSLOG_PORT:-516} + +# Use color for logging output (only available if syslog is not used) +LOG_COLOR=`trueorfalse True $LOG_COLOR` + # Service startup timeout SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} + +# Configure Projects +# ================== + +# Get project function libraries +source $TOP_DIR/lib/tls +source $TOP_DIR/lib/horizon +source $TOP_DIR/lib/keystone +source $TOP_DIR/lib/glance +source $TOP_DIR/lib/nova +source $TOP_DIR/lib/cinder +source $TOP_DIR/lib/swift +source $TOP_DIR/lib/ceilometer +source $TOP_DIR/lib/heat +source $TOP_DIR/lib/quantum +source $TOP_DIR/lib/baremetal +source $TOP_DIR/lib/ldap + +# Set the destination directories for OpenStack projects +HORIZON_DIR=$DEST/horizon +OPENSTACKCLIENT_DIR=$DEST/python-openstackclient +NOVNC_DIR=$DEST/noVNC +SPICE_DIR=$DEST/spice-html5 +SWIFT3_DIR=$DEST/swift3 + +# Should cinder perform secure deletion of volumes? +# Defaults to true, can be set to False to avoid this bug when testing: +# https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1023755 +CINDER_SECURE_DELETE=`trueorfalse True $CINDER_SECURE_DELETE` + +# Name of the LVM volume group to use/create for iscsi volumes +VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} +VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} +INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-} + # Generic helper to configure passwords function read_password { + XTRACE=$(set +o | grep xtrace) set +o xtrace var=$1; msg=$2 pw=${!var} @@ -201,150 +331,289 @@ function read_password { echo '################################################################################' echo $msg echo '################################################################################' - echo "This value will be written to your localrc file so you don't have to enter it again." - echo "It is probably best to avoid spaces and weird characters." + echo "This value will be written to your localrc file so you don't have to enter it " + echo "again. Use only alphanumeric characters." echo "If you leave this blank, a random default value will be used." - echo "Enter a password now:" - read $var - pw=${!var} + pw=" " + while true; do + echo "Enter a password now:" + read -e $var + pw=${!var} + [[ "$pw" = "`echo $pw | tr -cd [:alnum:]`" ]] && break + echo "Invalid chars in password. Try again:" + done if [ ! $pw ]; then pw=`openssl rand -hex 10` fi eval "$var=$pw" echo "$var=$pw" >> $localrc fi - set -o xtrace + $XTRACE } # Nova Network Configuration # -------------------------- -# FIXME: more documentation about why these are important flags. Also -# we should make sure we use the same variable names as the flag names. +# FIXME: more documentation about why these are important options. Also +# we should make sure we use the same variable names as the option names. -PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-eth0} -FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24} -FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256} -FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.224/28} +if [ "$VIRT_DRIVER" = 'xenserver' ]; then + PUBLIC_INTERFACE_DEFAULT=eth3 + # Allow ``build_domU.sh`` to specify the flat network bridge via kernel args + FLAT_NETWORK_BRIDGE_DEFAULT=$(grep -o 'flat_network_bridge=[[:alnum:]]*' /proc/cmdline | cut -d= -f 2 | sort -u) + GUEST_INTERFACE_DEFAULT=eth1 +elif [ "$VIRT_DRIVER" = 'baremetal' ]; then + PUBLIC_INTERFACE_DEFAULT=eth0 + FLAT_NETWORK_BRIDGE_DEFAULT=br100 + FLAT_INTERFACE=${FLAT_INTERFACE:-eth0} + FORCE_DHCP_RELEASE=${FORCE_DHCP_RELEASE:-False} + NET_MAN=${NET_MAN:-FlatManager} + STUB_NETWORK=${STUB_NETWORK:-False} +else + PUBLIC_INTERFACE_DEFAULT=br100 + FLAT_NETWORK_BRIDGE_DEFAULT=br100 + GUEST_INTERFACE_DEFAULT=eth0 +fi + +PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT} NET_MAN=${NET_MAN:-FlatDHCPManager} -EC2_DMZ_HOST=${EC2_DMZ_HOST:-$HOST_IP} -FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-br100} -VLAN_INTERFACE=${VLAN_INTERFACE:-$PUBLIC_INTERFACE} +EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST} +FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT} +VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT} +FORCE_DHCP_RELEASE=${FORCE_DHCP_RELEASE:-True} + +# Test floating pool and range are used for testing. They are defined +# here until the admin APIs can replace nova-manage +TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test} +TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29} -# Multi-host is a mode where each compute node runs its own network node. This +# ``MULTI_HOST`` is a mode where each compute node runs its own network node. This # allows network operations and routing for a VM to occur on the server that is # running the VM - removing a SPOF and bandwidth bottleneck. -MULTI_HOST=${MULTI_HOST:-False} +MULTI_HOST=`trueorfalse False $MULTI_HOST` -# If you are using FlatDHCP on multiple hosts, set the ``FLAT_INTERFACE`` -# variable but make sure that the interface doesn't already have an -# ip or you risk breaking things. +# If you are using the FlatDHCP network mode on multiple hosts, set the +# ``FLAT_INTERFACE`` variable but make sure that the interface doesn't already +# have an IP or you risk breaking things. # # **DHCP Warning**: If your flat interface device uses DHCP, there will be a # hiccup while the network is moved from the flat interface to the flat network # bridge. This will happen when you launch your first instance. Upon launch -# you will lose all connectivity to the node, and the vm launch will probably +# you will lose all connectivity to the node, and the VM launch will probably # fail. # # If you are running on a single node and don't need to access the VMs from -# devices other than that node, you can set the flat interface to the same -# value as ``FLAT_NETWORK_BRIDGE``. This will stop the network hiccup from -# occurring. -FLAT_INTERFACE=${FLAT_INTERFACE:-eth0} +# devices other than that node, you can set ``FLAT_INTERFACE=`` +# This will stop nova from bridging any interfaces into ``FLAT_NETWORK_BRIDGE``. +FLAT_INTERFACE=${FLAT_INTERFACE-$GUEST_INTERFACE_DEFAULT} ## FIXME(ja): should/can we check that FLAT_INTERFACE is sane? -# Using Quantum networking: -# -# Make sure that q-svc is enabled in ENABLED_SERVICES. If it is the network -# manager will be set to the QuantumManager. -# -# If you're planning to use the Quantum openvswitch plugin, set Q_PLUGIN to -# "openvswitch" and make sure the q-agt service is enabled in -# ENABLED_SERVICES. -# -# With Quantum networking the NET_MAN variable is ignored. +# Database Configuration +# ---------------------- -# MySQL & RabbitMQ -# ---------------- +# To select between database backends, add a line to localrc like: +# +# use_database postgresql +# +# The available database backends are defined in the ``DATABASE_BACKENDS`` +# variable defined in stackrc. By default, MySQL is enabled as the database +# backend. -# We configure Nova, Horizon, Glance and Keystone to use MySQL as their -# database server. While they share a single server, each has their own -# database and tables. +initialize_database_backends && echo "Using $DATABASE_TYPE database backend" || echo "No database enabled" -# By default this script will install and configure MySQL. If you want to -# use an existing server, you can pass in the user/password/host parameters. -# You will need to send the same ``MYSQL_PASSWORD`` to every host if you are doing -# a multi-node devstack installation. -MYSQL_HOST=${MYSQL_HOST:-localhost} -MYSQL_USER=${MYSQL_USER:-root} -read_password MYSQL_PASSWORD "ENTER A PASSWORD TO USE FOR MYSQL." -# don't specify /db in this string, so we can use it for multiple services -BASE_SQL_CONN=${BASE_SQL_CONN:-mysql://$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST} +# RabbitMQ or Qpid +# -------------------------- # Rabbit connection info -RABBIT_HOST=${RABBIT_HOST:-localhost} -read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT." - -# Glance connection info. Note the port must be specified. -GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$HOST_IP:9292} - -# SWIFT -# ----- -# TODO: implement glance support -# TODO: add logging to different location. - -# By default the location of swift drives and objects is located inside -# the swift source directory. SWIFT_DATA_LOCATION variable allow you to redefine -# this. -SWIFT_DATA_LOCATION=${SWIFT_DATA_LOCATION:-${SWIFT_DIR}/data} - -# We are going to have the configuration files inside the source -# directory, change SWIFT_CONFIG_LOCATION if you want to adjust that. -SWIFT_CONFIG_LOCATION=${SWIFT_CONFIG_LOCATION:-${SWIFT_DIR}/config} - -# devstack will create a loop-back disk formatted as XFS to store the -# swift data. By default the disk size is 1 gigabyte. The variable -# SWIFT_LOOPBACK_DISK_SIZE specified in bytes allow you to change -# that. -SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000} - -# The ring uses a configurable number of bits from a path’s MD5 hash as -# a partition index that designates a device. The number of bits kept -# from the hash is known as the partition power, and 2 to the partition -# power indicates the partition count. Partitioning the full MD5 hash -# ring allows other parts of the cluster to work in batches of items at -# once which ends up either more efficient or at least less complex than -# working with each item separately or the entire cluster all at once. -# By default we define 9 for the partition count (which mean 512). -SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9} - -# We only ask for Swift Hash if we have enabled swift service. -if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then - # SWIFT_HASH is a random unique string for a swift cluster that +if is_service_enabled rabbit; then + RABBIT_HOST=${RABBIT_HOST:-localhost} + read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT." +fi + +if is_service_enabled swift; then + # If we are using swift3, we can default the s3 port to swift instead + # of nova-objectstore + if is_service_enabled swift3;then + S3_SERVICE_PORT=${S3_SERVICE_PORT:-8080} + fi + # We only ask for Swift Hash if we have enabled swift service. + # ``SWIFT_HASH`` is a random unique string for a swift cluster that # can never change. read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH." fi +# Set default port for nova-objectstore +S3_SERVICE_PORT=${S3_SERVICE_PORT:-3333} + + # Keystone # -------- -# Service Token - Openstack components need to have an admin token -# to validate user tokens. +# The ``SERVICE_TOKEN`` is used to bootstrap the Keystone database. It is +# just a string and is not a 'real' Keystone token. read_password SERVICE_TOKEN "ENTER A SERVICE_TOKEN TO USE FOR THE SERVICE ADMIN TOKEN." +# Services authenticate to Identity with servicename/``SERVICE_PASSWORD`` +read_password SERVICE_PASSWORD "ENTER A SERVICE_PASSWORD TO USE FOR THE SERVICE AUTHENTICATION." # Horizon currently truncates usernames and passwords at 20 characters read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (20 CHARS OR LESS)." +# Keystone can now optionally install OpenLDAP by adding ldap to the list +# of enabled services in the localrc file (e.g. ENABLED_SERVICES=key,ldap). +# If OpenLDAP has already been installed but you need to clear out +# the Keystone contents of LDAP set KEYSTONE_CLEAR_LDAP to yes +# (e.g. KEYSTONE_CLEAR_LDAP=yes ) in the localrc file. To enable the +# Keystone Identity Driver (keystone.identity.backends.ldap.Identity) +# set KEYSTONE_IDENTITY_BACKEND to ldap (e.g. KEYSTONE_IDENTITY_BACKEND=ldap) +# in the localrc file. + + +# only request ldap password if the service is enabled +if is_service_enabled ldap; then + read_password LDAP_PASSWORD "ENTER A PASSWORD TO USE FOR LDAP" +fi + +# Set the tenant for service accounts in Keystone +SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service} + + +# Log files +# --------- -LOGFILE=${LOGFILE:-"$PWD/stack.sh.$$.log"} -( -# So that errors don't compound we exit on any errors so you see only the -# first error that occurred. +# Draw a spinner so the user knows something is happening +function spinner() { + local delay=0.75 + local spinstr='/-\|' + printf "..." >&3 + while [ true ]; do + local temp=${spinstr#?} + printf "[%c]" "$spinstr" >&3 + local spinstr=$temp${spinstr%"$temp"} + sleep $delay + printf "\b\b\b" >&3 + done +} + +# Echo text to the log file, summary log file and stdout +# echo_summary "something to say" +function echo_summary() { + if [[ -t 3 && "$VERBOSE" != "True" ]]; then + kill >/dev/null 2>&1 $LAST_SPINNER_PID + if [ ! -z "$LAST_SPINNER_PID" ]; then + printf "\b\b\bdone\n" >&3 + fi + echo -n $@ >&6 + spinner & + LAST_SPINNER_PID=$! + else + echo $@ >&6 + fi +} + +# Echo text only to stdout, no log files +# echo_nolog "something not for the logs" +function echo_nolog() { + echo $@ >&3 +} + +# Set up logging for ``stack.sh`` +# Set ``LOGFILE`` to turn on logging +# Append '.xxxxxxxx' to the given name to maintain history +# where 'xxxxxxxx' is a representation of the date the file was created +TIMESTAMP_FORMAT=${TIMESTAMP_FORMAT:-"%F-%H%M%S"} +if [[ -n "$LOGFILE" || -n "$SCREEN_LOGDIR" ]]; then + LOGDAYS=${LOGDAYS:-7} + CURRENT_LOG_TIME=$(date "+$TIMESTAMP_FORMAT") +fi + +if [[ -n "$LOGFILE" ]]; then + # First clean up old log files. Use the user-specified ``LOGFILE`` + # as the template to search for, appending '.*' to match the date + # we added on earlier runs. + LOGDIR=$(dirname "$LOGFILE") + LOGNAME=$(basename "$LOGFILE") + mkdir -p $LOGDIR + find $LOGDIR -maxdepth 1 -name $LOGNAME.\* -mtime +$LOGDAYS -exec rm {} \; + LOGFILE=$LOGFILE.${CURRENT_LOG_TIME} + SUMFILE=$LOGFILE.${CURRENT_LOG_TIME}.summary + + # Redirect output according to config + + # Copy stdout to fd 3 + exec 3>&1 + if [[ "$VERBOSE" == "True" ]]; then + # Redirect stdout/stderr to tee to write the log file + exec 1> >( awk ' + { + cmd ="date +\"%Y-%m-%d %H:%M:%S \"" + cmd | getline now + close("date +\"%Y-%m-%d %H:%M:%S \"") + sub(/^/, now) + print + fflush() + }' | tee "${LOGFILE}" ) 2>&1 + # Set up a second fd for output + exec 6> >( tee "${SUMFILE}" ) + else + # Set fd 1 and 2 to primary logfile + exec 1> "${LOGFILE}" 2>&1 + # Set fd 6 to summary logfile and stdout + exec 6> >( tee "${SUMFILE}" /dev/fd/3 ) + fi + + echo_summary "stack.sh log $LOGFILE" + # Specified logfile name always links to the most recent log + ln -sf $LOGFILE $LOGDIR/$LOGNAME + ln -sf $SUMFILE $LOGDIR/$LOGNAME.summary +else + # Set up output redirection without log files + # Copy stdout to fd 3 + exec 3>&1 + if [[ "$VERBOSE" != "True" ]]; then + # Throw away stdout and stderr + exec 1>/dev/null 2>&1 + fi + # Always send summary fd to original stdout + exec 6>&3 +fi + +# Set up logging of screen windows +# Set ``SCREEN_LOGDIR`` to turn on logging of screen windows to the +# directory specified in ``SCREEN_LOGDIR``, we will log to the the file +# ``screen-$SERVICE_NAME-$TIMESTAMP.log`` in that dir and have a link +# ``screen-$SERVICE_NAME.log`` to the latest log file. +# Logs are kept for as long specified in ``LOGDAYS``. +if [[ -n "$SCREEN_LOGDIR" ]]; then + + # We make sure the directory is created. + if [[ -d "$SCREEN_LOGDIR" ]]; then + # We cleanup the old logs + find $SCREEN_LOGDIR -maxdepth 1 -name screen-\*.log -mtime +$LOGDAYS -exec rm {} \; + else + mkdir -p $SCREEN_LOGDIR + fi +fi + + +# Set Up Script Execution +# ----------------------- + +# Kill background processes on exit +trap clean EXIT +clean() { + local r=$? + kill >/dev/null 2>&1 $(jobs -p) + exit $r +} + + +# Exit on any errors so that errors don't compound trap failed ERR failed() { local r=$? + kill >/dev/null 2>&1 $(jobs -p) set +o xtrace [ -n "$LOGFILE" ] && echo "${0##*/} failed: full log in $LOGFILE" exit $r @@ -354,769 +623,736 @@ failed() { # an error. It is also useful for following along as the install occurs. set -o xtrace -# create the destination directory and ensure it is writable by the user -sudo mkdir -p $DEST -if [ ! -w $DEST ]; then - sudo chown `whoami` $DEST -fi # Install Packages # ================ -# -# Openstack uses a fair number of other projects. - - -# install apt requirements -apt_get update -apt_get install `cat $FILES/apts/* | cut -d\# -f1 | grep -Ev "mysql-server|rabbitmq-server|memcached"` - -# install python requirements -sudo PIP_DOWNLOAD_CACHE=/var/cache/pip pip install --use-mirrors `cat $FILES/pips/*` - -# git clone only if directory doesn't exist already. Since ``DEST`` might not -# be owned by the installation user, we create the directory and change the -# ownership to the proper user. -function git_clone { - - GIT_REMOTE=$1 - GIT_DEST=$2 - GIT_BRANCH=$3 - - # do a full clone only if the directory doesn't exist - if [ ! -d $GIT_DEST ]; then - git clone $GIT_REMOTE $GIT_DEST - cd $2 - # This checkout syntax works for both branches and tags - git checkout $GIT_BRANCH - elif [[ "$RECLONE" == "yes" ]]; then - # if it does exist then simulate what clone does if asked to RECLONE - cd $GIT_DEST - # set the url to pull from and fetch - git remote set-url origin $GIT_REMOTE - git fetch origin - # remove the existing ignored files (like pyc) as they cause breakage - # (due to the py files having older timestamps than our pyc, so python - # thinks the pyc files are correct using them) - find $GIT_DEST -name '*.pyc' -delete - git checkout -f origin/$GIT_BRANCH - # a local branch might not exist - git branch -D $GIT_BRANCH || true - git checkout -b $GIT_BRANCH - fi -} -# compute service -git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH -# storage service -git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH -# swift + keystone middleware -git_clone $SWIFT_KEYSTONE_REPO $SWIFT_KEYSTONE_DIR $SWIFT_KEYSTONE_BRANCH -# image catalog service -git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH -# unified auth system (manages accounts/tokens) -git_clone $KEYSTONE_REPO $KEYSTONE_DIR $KEYSTONE_BRANCH -# a websockets/html5 or flash powered VNC console for vm instances -git_clone $NOVNC_REPO $NOVNC_DIR $NOVNC_BRANCH -# django powered web control panel for openstack -git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH $HORIZON_TAG -# python client library to nova that horizon (and others) use -git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH -# openstackx is a collection of extensions to openstack.compute & nova -# that is *deprecated*. The code is being moved into python-novaclient & nova. -git_clone $OPENSTACKX_REPO $OPENSTACKX_DIR $OPENSTACKX_BRANCH -# quantum -git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH +# OpenStack uses a fair number of other projects. -# Initialization -# ============== +# Install package requirements +# Source it so the entire environment is available +echo_summary "Installing package prerequisites" +source $TOP_DIR/tools/install_prereqs.sh +install_rpc_backend -# setup our checkouts so they are installed into python path -# allowing ``import nova`` or ``import glance.client`` -cd $KEYSTONE_DIR; sudo python setup.py develop -cd $SWIFT_DIR; sudo python setup.py develop -cd $SWIFT_KEYSTONE_DIR; sudo python setup.py develop -cd $GLANCE_DIR; sudo python setup.py develop -cd $NOVACLIENT_DIR; sudo python setup.py develop -cd $NOVA_DIR; sudo python setup.py develop -cd $OPENSTACKX_DIR; sudo python setup.py develop -cd $HORIZON_DIR/django-openstack; sudo python setup.py develop -cd $HORIZON_DIR/openstack-dashboard; sudo python setup.py develop -cd $QUANTUM_DIR; sudo python setup.py develop - -# Add a useful screenrc. This isn't required to run openstack but is we do -# it since we are going to run the services in screen for simple -cp $FILES/screenrc ~/.screenrc - -# Rabbit -# --------- - -if [[ "$ENABLED_SERVICES" =~ "rabbit" ]]; then - # Install and start rabbitmq-server - # the temp file is necessary due to LP: #878600 - tfile=$(mktemp) - apt_get install rabbitmq-server > "$tfile" 2>&1 - cat "$tfile" - rm -f "$tfile" - # change the rabbit password since the default is "guest" - sudo rabbitmqctl change_password guest $RABBIT_PASSWORD +if is_service_enabled $DATABASE_BACKENDS; then + install_database fi -# Mysql -# --------- +if is_service_enabled q-agt; then + install_quantum_agent_packages +fi -if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then - - # Seed configuration with mysql password so that apt-get install doesn't - # prompt us for a password upon install. - cat <$HOME/.my.cnf -[client] -user=$MYSQL_USER -password=$MYSQL_PASSWORD -host=$MYSQL_HOST -EOF - chmod 0600 $HOME/.my.cnf - fi +TRACK_DEPENDS=${TRACK_DEPENDS:-False} - # Install and start mysql-server - apt_get install mysql-server - # Update the DB to give user ‘$MYSQL_USER’@’%’ full control of the all databases: - sudo mysql -uroot -p$MYSQL_PASSWORD -h127.0.0.1 -e "GRANT ALL PRIVILEGES ON *.* TO '$MYSQL_USER'@'%' identified by '$MYSQL_PASSWORD';" +# Install python packages into a virtualenv so that we can track them +if [[ $TRACK_DEPENDS = True ]] ; then + echo_summary "Installing Python packages into a virtualenv $DEST/.venv" + install_package python-virtualenv - # Edit /etc/mysql/my.cnf to change ‘bind-address’ from localhost (127.0.0.1) to any (0.0.0.0) and restart the mysql service: - sudo sed -i 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf - sudo service mysql restart + rm -rf $DEST/.venv + virtualenv --system-site-packages $DEST/.venv + source $DEST/.venv/bin/activate + $DEST/.venv/bin/pip freeze > $DEST/requires-pre-pip fi -# Horizon -# --------- +# Check Out Source +# ---------------- -# Setup the django horizon application to serve via apache/wsgi +echo_summary "Installing OpenStack project source" -if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then +# Grab clients first +install_keystoneclient +install_glanceclient +install_novaclient +# Check out the client libs that are used most +git_clone $OPENSTACKCLIENT_REPO $OPENSTACKCLIENT_DIR $OPENSTACKCLIENT_BRANCH - # Horizon currently imports quantum even if you aren't using it. Instead - # of installing quantum we can create a simple module that will pass the - # initial imports - mkdir -p $HORIZON_DIR/openstack-dashboard/quantum || true - touch $HORIZON_DIR/openstack-dashboard/quantum/__init__.py - touch $HORIZON_DIR/openstack-dashboard/quantum/client.py +# glance, swift middleware and nova api needs keystone middleware +if is_service_enabled key g-api n-api swift; then + # unified auth system (manages accounts/tokens) + install_keystone +fi +if is_service_enabled swift; then + install_swiftclient + install_swift + if is_service_enabled swift3; then + # swift3 middleware to provide S3 emulation to Swift + git_clone $SWIFT3_REPO $SWIFT3_DIR $SWIFT3_BRANCH + fi +fi - # ``local_settings.py`` is used to override horizon default settings. - cp $FILES/horizon_settings.py $HORIZON_DIR/openstack-dashboard/local/local_settings.py +if is_service_enabled g-api n-api; then + # image catalog service + install_glance +fi +if is_service_enabled nova; then + # compute service + install_nova +fi +if is_service_enabled n-novnc; then + # a websockets/html5 or flash powered VNC console for vm instances + git_clone $NOVNC_REPO $NOVNC_DIR $NOVNC_BRANCH +fi +if is_service_enabled n-spice; then + # a websockets/html5 or flash powered SPICE console for vm instances + git_clone $SPICE_REPO $SPICE_DIR $SPICE_BRANCH +fi +if is_service_enabled horizon; then + # dashboard + install_horizon +fi +if is_service_enabled quantum; then + install_quantum + install_quantumclient + install_quantum_third_party +fi +if is_service_enabled heat; then + install_heat + install_heatclient +fi +if is_service_enabled cinder; then + install_cinder +fi +if is_service_enabled ceilometer; then + install_ceilometerclient + install_ceilometer +fi - # Initialize the horizon database (it stores sessions and notices shown to - # users). The user system is external (keystone). - cd $HORIZON_DIR/openstack-dashboard - dashboard/manage.py syncdb - # create an empty directory that apache uses as docroot - sudo mkdir -p $HORIZON_DIR/.blackhole +# Initialization +# ============== - ## Configure apache's 000-default to run horizon - sudo cp $FILES/000-default.template /etc/apache2/sites-enabled/000-default - sudo sed -e "s,%USER%,$USER,g" -i /etc/apache2/sites-enabled/000-default - sudo sed -e "s,%HORIZON_DIR%,$HORIZON_DIR,g" -i /etc/apache2/sites-enabled/000-default - sudo service apache2 restart -fi +echo_summary "Configuring OpenStack projects" +# Set up our checkouts so they are installed into python path +# allowing ``import nova`` or ``import glance.client`` +configure_keystoneclient +configure_novaclient +setup_develop $OPENSTACKCLIENT_DIR +if is_service_enabled key g-api n-api swift; then + configure_keystone +fi +if is_service_enabled swift; then + configure_swift + configure_swiftclient + if is_service_enabled swift3; then + setup_develop $SWIFT3_DIR + fi +fi +if is_service_enabled g-api n-api; then + configure_glance +fi -# Glance -# ------ +# Do this _after_ glance is installed to override the old binary +# TODO(dtroyer): figure out when this is no longer necessary +configure_glanceclient -if [[ "$ENABLED_SERVICES" =~ "g-reg" ]]; then - GLANCE_IMAGE_DIR=$DEST/glance/images - # Delete existing images - rm -rf $GLANCE_IMAGE_DIR +if is_service_enabled nova; then + configure_nova +fi +if is_service_enabled horizon; then + configure_horizon +fi +if is_service_enabled quantum; then + setup_quantumclient + setup_quantum +fi +if is_service_enabled heat; then + configure_heat + configure_heatclient +fi +if is_service_enabled cinder; then + configure_cinder +fi - # Use local glance directories - mkdir -p $GLANCE_IMAGE_DIR +if [[ $TRACK_DEPENDS = True ]] ; then + $DEST/.venv/bin/pip freeze > $DEST/requires-post-pip + if ! diff -Nru $DEST/requires-pre-pip $DEST/requires-post-pip > $DEST/requires.diff ; then + cat $DEST/requires.diff + fi + echo "Ran stack.sh in depend tracking mode, bailing out now" + exit 0 +fi - # (re)create glance database - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS glance;' - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE glance;' +if is_service_enabled tls-proxy; then + configure_CA + init_CA + init_cert + # Add name to /etc/hosts + # don't be naive and add to existing line! +fi - # Copy over our glance configurations and update them - GLANCE_CONF=$GLANCE_DIR/etc/glance-registry.conf - cp $FILES/glance-registry.conf $GLANCE_CONF - sudo sed -e "s,%SQL_CONN%,$BASE_SQL_CONN/glance,g" -i $GLANCE_CONF - sudo sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $GLANCE_CONF - sudo sed -e "s,%DEST%,$DEST,g" -i $GLANCE_CONF - sudo sed -e "s,%SYSLOG%,$SYSLOG,g" -i $GLANCE_CONF +# Syslog +# ------ - GLANCE_API_CONF=$GLANCE_DIR/etc/glance-api.conf - cp $FILES/glance-api.conf $GLANCE_API_CONF - sudo sed -e "s,%DEST%,$DEST,g" -i $GLANCE_API_CONF - sudo sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $GLANCE_API_CONF - sudo sed -e "s,%SYSLOG%,$SYSLOG,g" -i $GLANCE_API_CONF +if [[ $SYSLOG != "False" ]]; then + if [[ "$SYSLOG_HOST" = "$HOST_IP" ]]; then + # Configure the master host to receive + cat </tmp/90-stack-m.conf +\$ModLoad imrelp +\$InputRELPServerRun $SYSLOG_PORT +EOF + sudo mv /tmp/90-stack-m.conf /etc/rsyslog.d + else + # Set rsyslog to send to remote host + cat </tmp/90-stack-s.conf +*.* :omrelp:$SYSLOG_HOST:$SYSLOG_PORT +EOF + sudo mv /tmp/90-stack-s.conf /etc/rsyslog.d + fi + echo_summary "Starting rsyslog" + restart_service rsyslog fi -# Nova -# ---- -if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then - # We are going to use a sample http middleware configuration based on the - # one from the keystone project to launch nova. This paste config adds - # the configuration required for nova to validate keystone tokens. We add - # our own service token to the configuration. - cp $FILES/nova-api-paste.ini $NOVA_DIR/bin - sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini +# Finalize queue installation +# ---------------------------- +restart_rpc_backend + + +# Configure database +# ------------------ + +if is_service_enabled $DATABASE_BACKENDS; then + configure_database fi -if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then - # Virtualization Configuration - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# Configure screen +# ---------------- - # attempt to load modules: network block device - used to manage qcow images - sudo modprobe nbd || true +if [ -z "$SCREEN_HARDSTATUS" ]; then + SCREEN_HARDSTATUS='%{= .} %-Lw%{= .}%> %n%f %t*%{= .}%+Lw%< %-=%{g}(%{d}%H/%l%{g})' +fi - # Check for kvm (hardware based virtualization). If unable to initialize - # kvm, we drop back to the slower emulation mode (qemu). Note: many systems - # come with hardware virtualization disabled in BIOS. - if [[ "$LIBVIRT_TYPE" == "kvm" ]]; then - sudo modprobe kvm || true - if [ ! -e /dev/kvm ]; then - echo "WARNING: Switching to QEMU" - LIBVIRT_TYPE=qemu - fi - fi +# Clear screen rc file +SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc +if [[ -e $SCREENRC ]]; then + echo -n > $SCREENRC +fi - # Install and configure **LXC** if specified. LXC is another approach to - # splitting a system into many smaller parts. LXC uses cgroups and chroot - # to simulate multiple systems. - if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then - apt_get install lxc - # lxc uses cgroups (a kernel interface via virtual filesystem) configured - # and mounted to ``/cgroup`` - sudo mkdir -p /cgroup - if ! grep -q cgroup /etc/fstab; then - echo none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0 | sudo tee -a /etc/fstab - fi - if ! mount -n | grep -q cgroup; then - sudo mount /cgroup - fi - fi +# Create a new named screen to run processes in +screen -d -m -S $SCREEN_NAME -t shell -s /bin/bash +sleep 1 - # The user that nova runs as needs to be member of libvirtd group otherwise - # nova-compute will be unable to use libvirt. - sudo usermod -a -G libvirtd `whoami` - # libvirt detects various settings on startup, as we potentially changed - # the system configuration (modules, filesystems), we need to restart - # libvirt to detect those changes. - sudo /etc/init.d/libvirt-bin restart +# Set a reasonable status bar +screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS" +# Initialize the directory for service status check +init_service_check - # Instance Storage - # ~~~~~~~~~~~~~~~~ +# Keystone +# -------- - # Nova stores each instance in its own directory. - mkdir -p $NOVA_DIR/instances +if is_service_enabled key; then + echo_summary "Starting Keystone" + init_keystone + start_keystone - # You can specify a different disk to be mounted and used for backing the - # virtual machines. If there is a partition labeled nova-instances we - # mount it (ext filesystems can be labeled via e2label). - if [ -L /dev/disk/by-label/nova-instances ]; then - if ! mount -n | grep -q nova-instances; then - sudo mount -L nova-instances $NOVA_DIR/instances - sudo chown -R `whoami` $NOVA_DIR/instances - fi + # Set up a temporary admin URI for Keystone + SERVICE_ENDPOINT=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0 + + if is_service_enabled tls-proxy; then + export OS_CACERT=$INT_CA_DIR/ca-chain.pem + # Until the client support is fixed, just use the internal endpoint + SERVICE_ENDPOINT=http://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT_INT/v2.0 fi - # Clean out the instances directory. - sudo rm -rf $NOVA_DIR/instances/* + # Do the keystone-specific bits from keystone_data.sh + export OS_SERVICE_TOKEN=$SERVICE_TOKEN + export OS_SERVICE_ENDPOINT=$SERVICE_ENDPOINT + create_keystone_accounts + create_nova_accounts + create_cinder_accounts + create_quantum_accounts + + # ``keystone_data.sh`` creates services, admin and demo users, and roles. + ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \ + SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \ + S3_SERVICE_PORT=$S3_SERVICE_PORT KEYSTONE_CATALOG_BACKEND=$KEYSTONE_CATALOG_BACKEND \ + DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES HEAT_API_CFN_PORT=$HEAT_API_CFN_PORT \ + HEAT_API_PORT=$HEAT_API_PORT \ + bash -x $FILES/keystone_data.sh + + # Set up auth creds now that keystone is bootstrapped + export OS_AUTH_URL=$SERVICE_ENDPOINT + export OS_TENANT_NAME=admin + export OS_USERNAME=admin + export OS_PASSWORD=$ADMIN_PASSWORD + unset OS_SERVICE_TOKEN OS_SERVICE_ENDPOINT fi -if [[ "$ENABLED_SERVICES" =~ "n-net" ]]; then - # delete traces of nova networks from prior runs - sudo killall dnsmasq || true - rm -rf $NOVA_DIR/networks - mkdir -p $NOVA_DIR/networks + +# Horizon +# ------- + +# Set up the django horizon application to serve via apache/wsgi + +if is_service_enabled horizon; then + echo_summary "Configuring and starting Horizon" + init_horizon + start_horizon fi -# Storage Service -if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then - # We first do a bit of setup by creating the directories and - # changing the permissions so we can run it as our user. - - USER_GROUP=$(id -g) - sudo mkdir -p ${SWIFT_DATA_LOCATION}/drives - sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_LOCATION}/drives - - # We then create a loopback disk and format it to XFS. - if [[ ! -e ${SWIFT_DATA_LOCATION}/drives/images/swift.img ]];then - mkdir -p ${SWIFT_DATA_LOCATION}/drives/images - sudo touch ${SWIFT_DATA_LOCATION}/drives/images/swift.img - sudo chown $USER: ${SWIFT_DATA_LOCATION}/drives/images/swift.img - - dd if=/dev/zero of=${SWIFT_DATA_LOCATION}/drives/images/swift.img \ - bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE} - mkfs.xfs -f -i size=1024 ${SWIFT_DATA_LOCATION}/drives/images/swift.img - fi - # After the drive being created we mount the disk with a few mount - # options to make it most efficient as possible for swift. - mkdir -p ${SWIFT_DATA_LOCATION}/drives/sdb1 - if ! egrep -q ${SWIFT_DATA_LOCATION}/drives/sdb1 /proc/mounts;then - sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 \ - ${SWIFT_DATA_LOCATION}/drives/images/swift.img ${SWIFT_DATA_LOCATION}/drives/sdb1 - fi +# Glance +# ------ - # We then create link to that mounted location so swift would know - # where to go. - for x in {1..4}; do sudo ln -sf ${SWIFT_DATA_LOCATION}/drives/sdb1/$x ${SWIFT_DATA_LOCATION}/$x; done - - # We now have to emulate a few different servers into one we - # create all the directories needed for swift - tmpd="" - for d in ${SWIFT_DATA_LOCATION}/drives/sdb1/{1..4} \ - ${SWIFT_CONFIG_LOCATION}/{object,container,account}-server \ - ${SWIFT_DATA_LOCATION}/{1..4}/node/sdb1 /var/run/swift ;do - [[ -d $d ]] && continue - sudo install -o ${USER} -g $USER_GROUP -d $d - done +if is_service_enabled g-reg; then + echo_summary "Configuring Glance" - # We do want to make sure this is all owned by our user. - sudo chown -R $USER: ${SWIFT_DATA_LOCATION}/{1..4}/node - sudo chown -R $USER: ${SWIFT_CONFIG_LOCATION} - - # swift-init has a bug using /etc/swift until bug #885595 is fixed - # we have to create a link - sudo ln -sf ${SWIFT_CONFIG_LOCATION} /etc/swift - - # Swift use rsync to syncronize between all the different - # partitions (which make more sense when you have a multi-node - # setup) we configure it with our version of rsync. - sed -e "s/%GROUP%/${USER_GROUP}/;s/%USER%/$USER/;s,%SWIFT_DATA_LOCATION%,$SWIFT_DATA_LOCATION," $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf - sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync - - # By default Swift will be installed with the tempauth middleware - # which has some default username and password if you have - # configured keystone it will checkout the directory. - if [[ "$ENABLED_SERVICES" =~ "key" ]]; then - swift_auth_server=keystone - - # We install the memcache server as this is will be used by the - # middleware to cache the tokens auths for a long this is needed. - apt_get install memcached - - # We need a special version of bin/swift which understand the - # OpenStack api 2.0, we download it until this is getting - # integrated in swift. - sudo curl -s -o/usr/local/bin/swift \ - 'https://review.openstack.org/gitweb?p=openstack/swift.git;a=blob_plain;f=bin/swift;hb=48bfda6e2fdf3886c98bd15649887d54b9a2574e' - else - swift_auth_server=tempauth - fi - - # We do the install of the proxy-server and swift configuration - # replacing a few directives to match our configuration. - sed "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},;s/%USER%/$USER/;s/%SERVICE_TOKEN%/${SERVICE_TOKEN}/;s/%AUTH_SERVER%/${swift_auth_server}/" \ - $FILES/swift/proxy-server.conf|sudo tee ${SWIFT_CONFIG_LOCATION}/proxy-server.conf - - sed -e "s/%SWIFT_HASH%/$SWIFT_HASH/" $FILES/swift/swift.conf > ${SWIFT_CONFIG_LOCATION}/swift.conf - - # We need to generate a object/account/proxy configuration - # emulating 4 nodes on different ports we have a little function - # that help us doing that. - function generate_swift_configuration() { - local server_type=$1 - local bind_port=$2 - local log_facility=$3 - local node_number - - for node_number in {1..4};do - node_path=${SWIFT_DATA_LOCATION}/${node_number} - sed -e "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},;s,%USER%,$USER,;s,%NODE_PATH%,${node_path},;s,%BIND_PORT%,${bind_port},;s,%LOG_FACILITY%,${log_facility}," \ - $FILES/swift/${server_type}-server.conf > ${SWIFT_CONFIG_LOCATION}/${server_type}-server/${node_number}.conf - bind_port=$(( ${bind_port} + 10 )) - log_facility=$(( ${log_facility} + 1 )) - done - } - generate_swift_configuration object 6010 2 - generate_swift_configuration container 6011 2 - generate_swift_configuration account 6012 2 + init_glance - # We create two helper scripts : - # - # - swift-remakerings - # Allow to recreate rings from scratch. - # - swift-startmain - # Restart your full cluster. - # - sed -e "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},;s/%SWIFT_PARTITION_POWER_SIZE%/$SWIFT_PARTITION_POWER_SIZE/" $FILES/swift/swift-remakerings | \ - sudo tee /usr/local/bin/swift-remakerings - sudo install -m755 $FILES/swift/swift-startmain /usr/local/bin/ - sudo chmod +x /usr/local/bin/swift-* + # Store the images in swift if enabled. + if is_service_enabled swift; then + iniset $GLANCE_API_CONF DEFAULT default_store swift + iniset $GLANCE_API_CONF DEFAULT swift_store_auth_address $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ + iniset $GLANCE_API_CONF DEFAULT swift_store_user $SERVICE_TENANT_NAME:glance + iniset $GLANCE_API_CONF DEFAULT swift_store_key $SERVICE_PASSWORD + iniset $GLANCE_API_CONF DEFAULT swift_store_create_container_on_put True + fi +fi - # We then can start rsync. - sudo /etc/init.d/rsync restart || : - # Create our ring for the object/container/account. - /usr/local/bin/swift-remakerings +# Quantum +# ------- - # And now we launch swift-startmain to get our cluster running - # ready to be tested. - /usr/local/bin/swift-startmain || : +if is_service_enabled quantum; then + echo_summary "Configuring Quantum" - unset s swift_hash swift_auth_server tmpd + configure_quantum + init_quantum fi -# Volume Service -# -------------- +# Some Quantum plugins require network controllers which are not +# a part of the OpenStack project. Configure and start them. +if is_service_enabled quantum; then + configure_quantum_third_party + init_quantum_third_party + start_quantum_third_party +fi -if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then - # - # Configure a default volume group called 'nova-volumes' for the nova-volume - # service if it does not yet exist. If you don't wish to use a file backed - # volume group, create your own volume group called 'nova-volumes' before - # invoking stack.sh. - # - # By default, the backing file is 2G in size, and is stored in /opt/stack. - # - if ! sudo vgdisplay | grep -q $VOLUME_GROUP; then - VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DEST/nova-volumes-backing-file} - VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-2052M} - truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE - DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE` - sudo vgcreate $VOLUME_GROUP $DEV - fi - # Configure iscsitarget - sudo sed 's/ISCSITARGET_ENABLE=false/ISCSITARGET_ENABLE=true/' -i /etc/default/iscsitarget - sudo /etc/init.d/iscsitarget restart -fi +# Nova +# ---- -function add_nova_flag { - echo "$1" >> $NOVA_DIR/bin/nova.conf -} +if is_service_enabled nova; then + echo_summary "Configuring Nova" + configure_nova +fi -# (re)create nova.conf -rm -f $NOVA_DIR/bin/nova.conf -add_nova_flag "--verbose" -add_nova_flag "--nodaemon" -add_nova_flag "--allow_admin_api" -add_nova_flag "--scheduler_driver=$SCHEDULER" -add_nova_flag "--dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf" -add_nova_flag "--fixed_range=$FIXED_RANGE" -if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then - add_nova_flag "--network_manager=nova.network.quantum.manager.QuantumManager" - if [[ "$Q_PLUGIN" = "openvswitch" ]]; then - add_nova_flag "--libvirt_vif_type=ethernet" - add_nova_flag "--libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtOpenVswitchDriver" +if is_service_enabled n-net q-dhcp; then + # Delete traces of nova networks from prior runs + # Do not kill any dnsmasq instance spawned by NetworkManager + netman_pid=$(pidof NetworkManager || true) + if [ -z "$netman_pid" ]; then + sudo killall dnsmasq || true + else + sudo ps h -o pid,ppid -C dnsmasq | grep -v $netman_pid | awk '{print $1}' | sudo xargs kill || true fi -else - add_nova_flag "--network_manager=nova.network.manager.$NET_MAN" -fi -if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then - add_nova_flag "--volume_group=$VOLUME_GROUP" -fi -add_nova_flag "--my_ip=$HOST_IP" -add_nova_flag "--public_interface=$PUBLIC_INTERFACE" -add_nova_flag "--vlan_interface=$VLAN_INTERFACE" -add_nova_flag "--sql_connection=$BASE_SQL_CONN/nova" -add_nova_flag "--libvirt_type=$LIBVIRT_TYPE" -add_nova_flag "--osapi_extensions_path=$OPENSTACKX_DIR/extensions" -add_nova_flag "--vncproxy_url=http://$HOST_IP:6080" -add_nova_flag "--vncproxy_wwwroot=$NOVNC_DIR/" -add_nova_flag "--api_paste_config=$NOVA_DIR/bin/nova-api-paste.ini" -add_nova_flag "--image_service=nova.image.glance.GlanceImageService" -add_nova_flag "--ec2_dmz_host=$EC2_DMZ_HOST" -add_nova_flag "--rabbit_host=$RABBIT_HOST" -add_nova_flag "--rabbit_password=$RABBIT_PASSWORD" -add_nova_flag "--glance_api_servers=$GLANCE_HOSTPORT" -add_nova_flag "--force_dhcp_release" -if [ -n "$INSTANCES_PATH" ]; then - add_nova_flag "--instances_path=$INSTANCES_PATH" -fi -if [ "$MULTI_HOST" != "False" ]; then - add_nova_flag "--multi_host" - add_nova_flag "--send_arp_for_ha" -fi -if [ "$SYSLOG" != "False" ]; then - add_nova_flag "--use_syslog" -fi - -# XenServer -# --------- -if [ "$VIRT_DRIVER" = 'xenserver' ]; then - read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN." - add_nova_flag "--connection_type=xenapi" - add_nova_flag "--xenapi_connection_url=http://169.254.0.1" - add_nova_flag "--xenapi_connection_username=root" - add_nova_flag "--xenapi_connection_password=$XENAPI_PASSWORD" - add_nova_flag "--flat_injected=False" - add_nova_flag "--flat_interface=eth1" - add_nova_flag "--flat_network_bridge=xenbr1" - add_nova_flag "--public_interface=eth3" -else - add_nova_flag "--flat_network_bridge=$FLAT_NETWORK_BRIDGE" - if [ -n "$FLAT_INTERFACE" ]; then - add_nova_flag "--flat_interface=$FLAT_INTERFACE" - fi + clean_iptables + rm -rf ${NOVA_STATE_PATH}/networks + sudo mkdir -p ${NOVA_STATE_PATH}/networks + sudo chown -R ${USER} ${NOVA_STATE_PATH}/networks + # Force IP forwarding on, just on case + sudo sysctl -w net.ipv4.ip_forward=1 fi -# Nova Database -# ~~~~~~~~~~~~~ -# All nova components talk to a central database. We will need to do this step -# only once for an entire cluster. +# Storage Service +# --------------- + +if is_service_enabled swift; then + echo_summary "Configuring Swift" + init_swift +fi + -if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then - # (re)create nova database - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS nova;' - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE nova;' +# Volume Service +# -------------- - # (re)create nova database - $NOVA_DIR/bin/nova-manage db sync +if is_service_enabled cinder; then + echo_summary "Configuring Cinder" + init_cinder fi +if is_service_enabled nova; then + echo_summary "Configuring Nova" + # Rebuild the config file from scratch + create_nova_conf + init_nova + + # Additional Nova configuration that is dependent on other services + if is_service_enabled quantum; then + create_nova_conf_quantum + elif is_service_enabled n-net; then + create_nova_conf_nova_network + fi + # All nova-compute workers need to know the vnc configuration options + # These settings don't hurt anything if n-xvnc and n-novnc are disabled + if is_service_enabled n-cpu; then + NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"} + iniset $NOVA_CONF DEFAULT novncproxy_base_url "$NOVNCPROXY_URL" + XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"} + iniset $NOVA_CONF DEFAULT xvpvncproxy_base_url "$XVPVNCPROXY_URL" + SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:6082/spice_auto.html"} + iniset $NOVA_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL" + fi + if [ "$VIRT_DRIVER" = 'xenserver' ]; then + VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1} + else + VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1} + fi -# Keystone -# -------- + if is_service_enabled n-novnc || is_service_enabled n-xvnc ; then + # Address on which instance vncservers will listen on compute hosts. + # For multi-host, this should be the management ip of the compute host. + VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1} + iniset $NOVA_CONF DEFAULT vnc_enabled true + iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN" + iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" + else + iniset $NOVA_CONF DEFAULT vnc_enabled false + fi -if [[ "$ENABLED_SERVICES" =~ "key" ]]; then - # (re)create keystone database - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS keystone;' - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE keystone;' + if is_service_enabled n-spice; then + # Address on which instance spiceservers will listen on compute hosts. + # For multi-host, this should be the management ip of the compute host. + SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=127.0.0.1} + SPICESERVER_LISTEN=${SPICESERVER_LISTEN=127.0.0.1} + iniset $NOVA_CONF spice enabled true + iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN" + iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS" + else + iniset $NOVA_CONF spice enabled false + fi - # FIXME (anthony) keystone should use keystone.conf.example - KEYSTONE_CONF=$KEYSTONE_DIR/etc/keystone.conf - cp $FILES/keystone.conf $KEYSTONE_CONF - sudo sed -e "s,%SQL_CONN%,$BASE_SQL_CONN/keystone,g" -i $KEYSTONE_CONF - sudo sed -e "s,%DEST%,$DEST,g" -i $KEYSTONE_CONF + iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST" + iniset_rpc_backend nova $NOVA_CONF DEFAULT + iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT" + + + # XenServer + # --------- + + if [ "$VIRT_DRIVER" = 'xenserver' ]; then + echo_summary "Using XenServer virtualization driver" + read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN." + iniset $NOVA_CONF DEFAULT compute_driver "xenapi.XenAPIDriver" + XENAPI_CONNECTION_URL=${XENAPI_CONNECTION_URL:-"http://169.254.0.1"} + XENAPI_USER=${XENAPI_USER:-"root"} + iniset $NOVA_CONF DEFAULT xenapi_connection_url "$XENAPI_CONNECTION_URL" + iniset $NOVA_CONF DEFAULT xenapi_connection_username "$XENAPI_USER" + iniset $NOVA_CONF DEFAULT xenapi_connection_password "$XENAPI_PASSWORD" + iniset $NOVA_CONF DEFAULT flat_injected "False" + # Need to avoid crash due to new firewall support + XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"} + iniset $NOVA_CONF DEFAULT firewall_driver "$XEN_FIREWALL_DRIVER" + + # OpenVZ + # ------ + + elif [ "$VIRT_DRIVER" = 'openvz' ]; then + echo_summary "Using OpenVZ virtualization driver" + iniset $NOVA_CONF DEFAULT compute_driver "openvz.driver.OpenVzDriver" + iniset $NOVA_CONF DEFAULT connection_type "openvz" + LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} + iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER" + + # Bare Metal + # ---------- + + elif [ "$VIRT_DRIVER" = 'baremetal' ]; then + echo_summary "Using BareMetal driver" + LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.firewall.NoopFirewallDriver"} + iniset $NOVA_CONF DEFAULT compute_driver nova.virt.baremetal.driver.BareMetalDriver + iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER + iniset $NOVA_CONF DEFAULT scheduler_host_manager nova.scheduler.baremetal_host_manager.BaremetalHostManager + # NOTE(deva): ComputeCapabilitiesFilter does not currently work with Baremetal. See bug # 1129485 + # As a work around, we disable CCFilter by explicitly enabling all the other default filters. + iniset $NOVA_CONF DEFAULT scheduler_default_filters ComputeFilter,RetryFilter,AvailabilityZoneFilter,ImagePropertiesFilter + iniset $NOVA_CONF baremetal instance_type_extra_specs cpu_arch:$BM_CPU_ARCH + iniset $NOVA_CONF baremetal driver $BM_DRIVER + iniset $NOVA_CONF baremetal power_manager $BM_POWER_MANAGER + iniset $NOVA_CONF baremetal tftp_root /tftpboot + + # Define extra baremetal nova conf flags by defining the array ``EXTRA_BAREMETAL_OPTS``. + for I in "${EXTRA_BAREMETAL_OPTS[@]}"; do + # Attempt to convert flags to options + iniset $NOVA_CONF baremetal ${I//=/ } + done + + # Default + # ------- - # keystone_data.sh creates our admin user and our ``SERVICE_TOKEN``. - KEYSTONE_DATA=$KEYSTONE_DIR/bin/keystone_data.sh - cp $FILES/keystone_data.sh $KEYSTONE_DATA - sudo sed -e "s,%HOST_IP%,$HOST_IP,g" -i $KEYSTONE_DATA - sudo sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $KEYSTONE_DATA - sudo sed -e "s,%ADMIN_PASSWORD%,$ADMIN_PASSWORD,g" -i $KEYSTONE_DATA - # initialize keystone with default users/endpoints - BIN_DIR=$KEYSTONE_DIR/bin bash $KEYSTONE_DATA + else + echo_summary "Using libvirt virtualization driver" + iniset $NOVA_CONF DEFAULT compute_driver "libvirt.LibvirtDriver" + LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} + iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER" + fi fi +# Extra things to prepare nova for baremetal, before nova starts +if is_service_enabled nova && is_baremetal; then + echo_summary "Preparing for nova baremetal" + prepare_baremetal_toolchain + configure_baremetal_nova_dirs + if [[ "$BM_USE_FAKE_ENV" = "True" ]]; then + create_fake_baremetal_env + fi +fi # Launch Services # =============== -# nova api crashes if we start it with a regular screen command, -# so send the start command by forcing text into the window. # Only run the services specified in ``ENABLED_SERVICES`` -# our screen helper to launch a service in a hidden named screen -function screen_it { - NL=`echo -ne '\015'` - if [[ "$ENABLED_SERVICES" =~ "$1" ]]; then - screen -S stack -X screen -t $1 - # sleep to allow bash to be ready to be send the command - we are - # creating a new window in screen and then sends characters, so if - # bash isn't running by the time we send the command, nothing happens - sleep 1 - screen -S stack -p $1 -X stuff "$2$NL" - fi -} - -# create a new named screen to run processes in -screen -d -m -S stack -t stack -sleep 1 - -# launch the glance registry service -if [[ "$ENABLED_SERVICES" =~ "g-reg" ]]; then - screen_it g-reg "cd $GLANCE_DIR; bin/glance-registry --config-file=etc/glance-registry.conf" +# Launch Swift Services +if is_service_enabled swift; then + echo_summary "Starting Swift" + start_swift fi -# launch the glance api and wait for it to answer before continuing -if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then - screen_it g-api "cd $GLANCE_DIR; bin/glance-api --config-file=etc/glance-api.conf" - echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then - echo "g-api did not start" - exit 1 - fi +# Launch the Glance services +if is_service_enabled g-api g-reg; then + echo_summary "Starting Glance" + start_glance fi -# launch the keystone and wait for it to answer before continuing -if [[ "$ENABLED_SERVICES" =~ "key" ]]; then - screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone --config-file $KEYSTONE_CONF -d" - echo "Waiting for keystone to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget -q -O- http://127.0.0.1:5000; do sleep 1; done"; then - echo "keystone did not start" - exit 1 - fi +# Create an access key and secret key for nova ec2 register image +if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nova; then + NOVA_USER_ID=$(keystone user-list | grep ' nova ' | get_field 1) + NOVA_TENANT_ID=$(keystone tenant-list | grep " $SERVICE_TENANT_NAME " | get_field 1) + CREDS=$(keystone ec2-credentials-create --user_id $NOVA_USER_ID --tenant_id $NOVA_TENANT_ID) + ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }') + SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }') + iniset $NOVA_CONF DEFAULT s3_access_key "$ACCESS_KEY" + iniset $NOVA_CONF DEFAULT s3_secret_key "$SECRET_KEY" + iniset $NOVA_CONF DEFAULT s3_affix_tenant "True" fi -# launch the nova-api and wait for it to answer before continuing -if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then - screen_it n-api "cd $NOVA_DIR && $NOVA_DIR/bin/nova-api" - echo "Waiting for nova-api to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget -q -O- http://127.0.0.1:8774; do sleep 1; done"; then - echo "nova-api did not start" - exit 1 - fi +screen_it zeromq "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-rpc-zmq-receiver" + +# Launch the nova-api and wait for it to answer before continuing +if is_service_enabled n-api; then + echo_summary "Starting Nova API" + start_nova_api fi -# Quantum -if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then - # Install deps - # FIXME add to files/apts/quantum, but don't install if not needed! - apt_get install openvswitch-switch openvswitch-datapath-dkms - - # Create database for the plugin/agent - if [[ "$Q_PLUGIN" = "openvswitch" ]]; then - if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then - mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE IF NOT EXISTS ovs_quantum;' - else - echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin." - exit 1 - fi - fi +if is_service_enabled q-svc; then + echo_summary "Starting Quantum" + + start_quantum_service_and_check + create_quantum_initial_network + setup_quantum_debug +elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then + # Create a small network + $NOVA_BIN_DIR/nova-manage network create "$PRIVATE_NETWORK_NAME" $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS + + # Create some floating ips + $NOVA_BIN_DIR/nova-manage floating create $FLOATING_RANGE --pool=$PUBLIC_NETWORK_NAME - QUANTUM_PLUGIN_INI_FILE=$QUANTUM_DIR/quantum/plugins.ini - # Make sure we're using the openvswitch plugin - sed -i -e "s/^provider =.*$/provider = quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin/g" $QUANTUM_PLUGIN_INI_FILE - screen_it q-svc "cd $QUANTUM_DIR && export PYTHONPATH=.:$PYTHONPATH; python $QUANTUM_DIR/bin/quantum $QUANTUM_DIR/etc/quantum.conf" + # Create a second pool + $NOVA_BIN_DIR/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL fi -# Quantum agent (for compute nodes) -if [[ "$ENABLED_SERVICES" =~ "q-agt" ]]; then - if [[ "$Q_PLUGIN" = "openvswitch" ]]; then - # Set up integration bridge - OVS_BRIDGE=${OVS_BRIDGE:-br-int} - sudo ovs-vsctl --no-wait -- --if-exists del-br $OVS_BRIDGE - sudo ovs-vsctl --no-wait add-br $OVS_BRIDGE - sudo ovs-vsctl --no-wait br-set-external-id $OVS_BRIDGE bridge-id br-int - fi +if is_service_enabled quantum; then + start_quantum_agents +fi +if is_service_enabled nova; then + echo_summary "Starting Nova" + start_nova +fi +if is_service_enabled cinder; then + echo_summary "Starting Cinder" + start_cinder +fi +if is_service_enabled ceilometer; then + echo_summary "Configuring Ceilometer" + configure_ceilometer + configure_ceilometerclient + echo_summary "Starting Ceilometer" + init_ceilometer + start_ceilometer +fi - # Start up the quantum <-> openvswitch agent - screen_it q-agt "sleep 4; sudo python $QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py $QUANTUM_DIR/quantum/plugins/openvswitch/ovs_quantum_plugin.ini -v" +# Starting the nova-objectstore only if swift3 service is not enabled. +# Swift will act as s3 objectstore. +is_service_enabled swift3 || \ + screen_it n-obj "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-objectstore" + + +# Configure and launch heat engine, api and metadata +if is_service_enabled heat; then + # Initialize heat, including replacing nova flavors + echo_summary "Configuring Heat" + init_heat + echo_summary "Starting Heat" + start_heat fi -# If we're using Quantum (i.e. q-svc is enabled), network creation has to -# happen after we've started the Quantum service. -if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then - # create a small network - $NOVA_DIR/bin/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE +# Create account rc files +# ======================= - if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then - echo "Not creating floating IPs (not supported by QuantumManager)" - else - # create some floating ips - $NOVA_DIR/bin/nova-manage floating create $FLOATING_RANGE - fi +# Creates source able script files for easier user switching. +# This step also creates certificates for tenants and users, +# which is helpful in image bundle steps. + +if is_service_enabled nova && is_service_enabled key; then + $TOP_DIR/tools/create_userrc.sh -PA --target-dir $TOP_DIR/accrc fi -# Launching nova-compute should be as simple as running ``nova-compute`` but -# have to do a little more than that in our script. Since we add the group -# ``libvirtd`` to our user in this script, when nova-compute is run it is -# within the context of our original shell (so our groups won't be updated). -# Use 'sg' to execute nova-compute as a member of the libvirtd group. -screen_it n-cpu "cd $NOVA_DIR && sg libvirtd $NOVA_DIR/bin/nova-compute" -screen_it n-vol "cd $NOVA_DIR && $NOVA_DIR/bin/nova-volume" -screen_it n-net "cd $NOVA_DIR && $NOVA_DIR/bin/nova-network" -screen_it n-sch "cd $NOVA_DIR && $NOVA_DIR/bin/nova-scheduler" -screen_it n-vnc "cd $NOVNC_DIR && ./utils/nova-wsproxy.py --flagfile $NOVA_DIR/bin/nova.conf --web . 6080" -screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/apache2/error.log" # Install Images # ============== # Upload an image to glance. # -# The default image is a small ***TTY*** testing image, which lets you login -# the username/password of root/password. -# -# TTY also uses cloud-init, supporting login via keypair and sending scripts as +# The default image is cirros, a small testing image which lets you login as **root** +# cirros also uses ``cloud-init``, supporting login via keypair and sending scripts as # userdata. See https://help.ubuntu.com/community/CloudInit for more on cloud-init # -# Override ``IMAGE_URLS`` with a comma-separated list of uec images. -# -# * **natty**: http://uec-images.ubuntu.com/natty/current/natty-server-cloudimg-amd64.tar.gz +# Override ``IMAGE_URLS`` with a comma-separated list of UEC images. # * **oneiric**: http://uec-images.ubuntu.com/oneiric/current/oneiric-server-cloudimg-amd64.tar.gz +# * **precise**: http://uec-images.ubuntu.com/precise/current/precise-server-cloudimg-amd64.tar.gz -if [[ "$ENABLED_SERVICES" =~ "g-reg" ]]; then - # Create a directory for the downloaded image tarballs. - mkdir -p $FILES/images +if is_service_enabled g-reg; then + TOKEN=$(keystone token-get | grep ' id ' | get_field 2) - # Option to upload legacy ami-tty, which works with xenserver - if [ $UPLOAD_LEGACY_TTY ]; then - if [ ! -f $FILES/tty.tgz ]; then - wget -c http://images.ansolabs.com/tty.tgz -O $FILES/tty.tgz - fi + if is_baremetal; then + echo_summary "Creating and uploading baremetal images" - tar -zxf $FILES/tty.tgz -C $FILES/images - RVAL=`glance add -A $SERVICE_TOKEN name="tty-kernel" is_public=true container_format=aki disk_format=aki < $FILES/images/aki-tty/image` - KERNEL_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "` - RVAL=`glance add -A $SERVICE_TOKEN name="tty-ramdisk" is_public=true container_format=ari disk_format=ari < $FILES/images/ari-tty/image` - RAMDISK_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "` - glance add -A $SERVICE_TOKEN name="tty" is_public=true container_format=ami disk_format=ami kernel_id=$KERNEL_ID ramdisk_id=$RAMDISK_ID < $FILES/images/ami-tty/image + # build and upload separate deploy kernel & ramdisk + upload_baremetal_deploy $TOKEN + + # upload images, separating out the kernel & ramdisk for PXE boot + for image_url in ${IMAGE_URLS//,/ }; do + upload_baremetal_image $image_url $TOKEN + done + else + echo_summary "Uploading images" + + # Option to upload legacy ami-tty, which works with xenserver + if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then + IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}https://github.com/downloads/citrix-openstack/warehouse/tty.tgz" + fi + + for image_url in ${IMAGE_URLS//,/ }; do + upload_image $image_url $TOKEN + done fi +fi + +# If we are running nova with baremetal driver, there are a few +# last-mile configuration bits to attend to, which must happen +# after n-api and n-sch have started. +# Also, creating the baremetal flavor must happen after images +# are loaded into glance, though just knowing the IDs is sufficient here +if is_service_enabled nova && is_baremetal; then + # create special flavor for baremetal if we know what images to associate + [[ -n "$BM_DEPLOY_KERNEL_ID" ]] && [[ -n "$BM_DEPLOY_RAMDISK_ID" ]] && \ + create_baremetal_flavor $BM_DEPLOY_KERNEL_ID $BM_DEPLOY_RAMDISK_ID + + # otherwise user can manually add it later by calling nova-baremetal-manage + # otherwise user can manually add it later by calling nova-baremetal-manage + [[ -n "$BM_FIRST_MAC" ]] && add_baremetal_node + + # NOTE: we do this here to ensure that our copy of dnsmasq is running + sudo pkill dnsmasq || true + sudo dnsmasq --conf-file= --port=0 --enable-tftp --tftp-root=/tftpboot \ + --dhcp-boot=pxelinux.0 --bind-interfaces --pid-file=/var/run/dnsmasq.pid \ + --interface=$BM_DNSMASQ_IFACE --dhcp-range=$BM_DNSMASQ_RANGE \ + ${BM_DNSMASQ_DNS:+--dhcp-option=option:dns-server,$BM_DNSMASQ_DNS} + # ensure callback daemon is running + sudo pkill nova-baremetal-deploy-helper || true + screen_it baremetal "nova-baremetal-deploy-helper" +fi + +# Save some values we generated for later use +CURRENT_RUN_TIME=$(date "+$TIMESTAMP_FORMAT") +echo "# $CURRENT_RUN_TIME" >$TOP_DIR/.stackenv +for i in BASE_SQL_CONN ENABLED_SERVICES HOST_IP LOGFILE \ + SERVICE_HOST SERVICE_PROTOCOL STACK_USER TLS_IP; do + echo $i=${!i} >>$TOP_DIR/.stackenv +done - for image_url in ${IMAGE_URLS//,/ }; do - # Downloads the image (uec ami+aki style), then extracts it. - IMAGE_FNAME=`basename "$image_url"` - IMAGE_NAME=`basename "$IMAGE_FNAME" .tar.gz` - if [ ! -f $FILES/$IMAGE_FNAME ]; then - wget -c $image_url -O $FILES/$IMAGE_FNAME - fi - # Extract ami and aki files - tar -zxf $FILES/$IMAGE_FNAME -C $FILES/images +# Run extras +# ========== - # Use glance client to add the kernel the root filesystem. - # We parse the results of the first upload to get the glance ID of the - # kernel for use when uploading the root filesystem. - RVAL=`glance add -A $SERVICE_TOKEN name="$IMAGE_NAME-kernel" is_public=true container_format=aki disk_format=aki < $FILES/images/$IMAGE_NAME-vmlinuz*` - KERNEL_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "` - glance add -A $SERVICE_TOKEN name="$IMAGE_NAME" is_public=true container_format=ami disk_format=ami kernel_id=$KERNEL_ID < $FILES/images/$IMAGE_NAME.img +if [[ -d $TOP_DIR/extras.d ]]; then + for i in $TOP_DIR/extras.d/*.sh; do + [[ -r $i ]] && source $i stack done fi + +# Run local script +# ================ + +# Run ``local.sh`` if it exists to perform user-managed tasks +if [[ -x $TOP_DIR/local.sh ]]; then + echo "Running user script $TOP_DIR/local.sh" + $TOP_DIR/local.sh +fi + +# Check the status of running services +service_check + # Fin # === +set +o xtrace -) 2>&1 | tee "${LOGFILE}" +if [[ -n "$LOGFILE" ]]; then + exec 1>&3 + # Force all output to stdout and logs now + exec 1> >( tee -a "${LOGFILE}" ) 2>&1 +else + # Force all output to stdout now + exec 1>&3 +fi -# Check that the left side of the above pipe succeeded -for ret in "${PIPESTATUS[@]}"; do [ $ret -eq 0 ] || exit $ret; done -( # Using the cloud -# =============== +# --------------- echo "" echo "" echo "" -# If you installed the horizon on this server, then you should be able +# If you installed Horizon on this server you should be able # to access the site using your browser. -if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then - echo "horizon is now available at http://$HOST_IP/" +if is_service_enabled horizon; then + echo "Horizon is now available at http://$SERVICE_HOST/" fi -# If keystone is present, you can point nova cli to this server -if [[ "$ENABLED_SERVICES" =~ "key" ]]; then - echo "keystone is serving at http://$HOST_IP:5000/v2.0/" - echo "examples on using novaclient command line is in exercise.sh" - echo "the default users are: admin and demo" - echo "the password: $ADMIN_PASSWORD" +# Warn that the default flavors have been changed by Heat +if is_service_enabled heat; then + echo "Heat has replaced the default flavors. View by running: nova flavor-list" fi -# indicate how long this took to run (bash maintained variable 'SECONDS') -echo "stack.sh completed in $SECONDS seconds." +# If Keystone is present you can point ``nova`` cli to this server +if is_service_enabled key; then + echo "Keystone is serving at $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/" + echo "Examples on using novaclient command line is in exercise.sh" + echo "The default users are: admin and demo" + echo "The password: $ADMIN_PASSWORD" +fi + +# Echo ``HOST_IP`` - useful for ``build_uec.sh``, which uses dhcp to give the instance an address +echo "This is your host ip: $HOST_IP" + +# Warn that ``EXTRA_FLAGS`` needs to be converted to ``EXTRA_OPTS`` +if [[ -n "$EXTRA_FLAGS" ]]; then + echo_summary "WARNING: EXTRA_FLAGS is defined and may need to be converted to EXTRA_OPTS" +fi -) | tee -a "$LOGFILE" +# Indicate how long this took to run (bash maintained variable ``SECONDS``) +echo_summary "stack.sh completed in $SECONDS seconds." diff --git a/stackrc b/stackrc index b541711f..91f4e2b5 100644 --- a/stackrc +++ b/stackrc @@ -1,48 +1,203 @@ +# stackrc +# +# Find the other rc files +RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd) + +# Destination path for installation +DEST=/opt/stack + +# Destination for working data +DATA_DIR=${DEST}/data + +# Select the default database +DATABASE_TYPE=mysql + +# Determine stack user +if [[ $EUID -eq 0 ]]; then + STACK_USER=stack +else + STACK_USER=$(whoami) +fi + +# Specify which services to launch. These generally correspond to +# screen tabs. To change the default list, use the ``enable_service`` and +# ``disable_service`` functions in ``localrc``. +# For example, to enable Swift add this to ``localrc``: +# enable_service swift +ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,tempest,$DATABASE_TYPE + +# Set the default Nova APIs to enable +NOVA_ENABLED_APIS=ec2,osapi_compute,metadata + +# Whether to use 'dev mode' for screen windows. Dev mode works by +# stuffing text into the screen windows so that a developer can use +# ctrl-c, up-arrow, enter to restart the service. Starting services +# this way is slightly unreliable, and a bit slower, so this can +# be disabled for automated testing by setting this value to false. +SCREEN_DEV=True + +# Repositories +# ------------ + +# Base GIT Repo URL +# Another option is http://review.openstack.org/p +GIT_BASE=https://github.com + +# metering service +CEILOMETER_REPO=${GIT_BASE}/openstack/ceilometer.git +CEILOMETER_BRANCH=master + +# ceilometer client library +CEILOMETERCLIENT_REPO=${GIT_BASE}/openstack/python-ceilometerclient +CEILOMETERCLIENT_BRANCH=master + +# volume service +CINDER_REPO=${GIT_BASE}/openstack/cinder +CINDER_BRANCH=master + +# volume client +CINDERCLIENT_REPO=${GIT_BASE}/openstack/python-cinderclient +CINDERCLIENT_BRANCH=master + # compute service -NOVA_REPO=https://github.com/cloudbuilders/nova.git -NOVA_BRANCH=diablo +NOVA_REPO=${GIT_BASE}/openstack/nova.git +NOVA_BRANCH=master # storage service -SWIFT_REPO=https://github.com/openstack/swift.git -SWIFT_BRANCH=stable/diablo +SWIFT_REPO=${GIT_BASE}/openstack/swift.git +SWIFT_BRANCH=master +SWIFT3_REPO=https://github.com/fujita/swift3.git +SWIFT3_BRANCH=master -# swift and keystone integration -SWIFT_KEYSTONE_REPO=https://github.com/cloudbuilders/swift-keystone2.git -SWIFT_KEYSTONE_BRANCH=master +# python swift client library +SWIFTCLIENT_REPO=${GIT_BASE}/openstack/python-swiftclient +SWIFTCLIENT_BRANCH=master # image catalog service -GLANCE_REPO=https://github.com/cloudbuilders/glance.git -GLANCE_BRANCH=diablo +GLANCE_REPO=${GIT_BASE}/openstack/glance.git +GLANCE_BRANCH=master + +# python glance client library +GLANCECLIENT_REPO=${GIT_BASE}/openstack/python-glanceclient +GLANCECLIENT_BRANCH=master # unified auth system (manages accounts/tokens) -KEYSTONE_REPO=https://github.com/cloudbuilders/keystone.git -KEYSTONE_BRANCH=diablo +KEYSTONE_REPO=${GIT_BASE}/openstack/keystone.git +KEYSTONE_BRANCH=master # a websockets/html5 or flash powered VNC console for vm instances -NOVNC_REPO=https://github.com/cloudbuilders/noVNC.git -NOVNC_BRANCH=diablo +NOVNC_REPO=https://github.com/kanaka/noVNC.git +NOVNC_BRANCH=master + +# a websockets/html5 or flash powered SPICE console for vm instances +SPICE_REPO=http://anongit.freedesktop.org/git/spice/spice-html5.git +SPICE_BRANCH=master # django powered web control panel for openstack -HORIZON_REPO=https://github.com/openstack/horizon.git -HORIZON_BRANCH=stable/diablo +HORIZON_REPO=${GIT_BASE}/openstack/horizon.git +HORIZON_BRANCH=master # python client library to nova that horizon (and others) use -NOVACLIENT_REPO=https://github.com/rackspace/python-novaclient.git +NOVACLIENT_REPO=${GIT_BASE}/openstack/python-novaclient.git NOVACLIENT_BRANCH=master -# openstackx is a collection of extensions to openstack.compute & nova -# that is *deprecated*. The code is being moved into python-novaclient & nova. -OPENSTACKX_REPO=https://github.com/cloudbuilders/openstackx.git -OPENSTACKX_BRANCH=diablo +# consolidated openstack python client +OPENSTACKCLIENT_REPO=${GIT_BASE}/openstack/python-openstackclient.git +OPENSTACKCLIENT_BRANCH=master + +# python keystone client library to nova that horizon uses +KEYSTONECLIENT_REPO=${GIT_BASE}/openstack/python-keystoneclient +KEYSTONECLIENT_BRANCH=master # quantum service -QUANTUM_REPO=https://github.com/openstack/quantum -QUANTUM_BRANCH=stable/diablo +QUANTUM_REPO=${GIT_BASE}/openstack/quantum +QUANTUM_BRANCH=master + +# quantum client +QUANTUMCLIENT_REPO=${GIT_BASE}/openstack/python-quantumclient +QUANTUMCLIENT_BRANCH=master + +# Tempest test suite +TEMPEST_REPO=${GIT_BASE}/openstack/tempest.git +TEMPEST_BRANCH=master + +# heat service +HEAT_REPO=${GIT_BASE}/openstack/heat.git +HEAT_BRANCH=master + +# python heat client library +HEATCLIENT_REPO=${GIT_BASE}/openstack/python-heatclient.git +HEATCLIENT_BRANCH=master + +# ryu service +RYU_REPO=https://github.com/osrg/ryu.git +RYU_BRANCH=master + +# diskimage-builder +BM_IMAGE_BUILD_REPO=https://github.com/stackforge/diskimage-builder.git +BM_IMAGE_BUILD_BRANCH=master -# Specify a comma-separated list of uec images to download and install into glance. -IMAGE_URLS=http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz +# bm_poseur +# Used to simulate a hardware environment for baremetal +# Only used if BM_USE_FAKE_ENV is set +BM_POSEUR_REPO=https://github.com/tripleo/bm_poseur.git +BM_POSEUR_BRANCH=master + + +# Nova hypervisor configuration. We default to libvirt with **kvm** but will +# drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can +# also install an **LXC** or **OpenVZ** based system. +VIRT_DRIVER=${VIRT_DRIVER:-libvirt} +LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm} # allow local overrides of env variables -if [ -f ./localrc ]; then - source ./localrc +if [ -f $RC_DIR/localrc ]; then + source $RC_DIR/localrc fi + +# Specify a comma-separated list of UEC images to download and install into glance. +# supported urls here are: +# * "uec-style" images: +# If the file ends in .tar.gz, uncompress the tarball and and select the first +# .img file inside it as the image. If present, use "*-vmlinuz*" as the kernel +# and "*-initrd*" as the ramdisk +# example: http://cloud-images.ubuntu.com/releases/oneiric/release/ubuntu-11.10-server-cloudimg-amd64.tar.gz +# * disk image (*.img,*.img.gz) +# if file ends in .img, then it will be uploaded and registered as a to +# glance as a disk image. If it ends in .gz, it is uncompressed first. +# example: +# http://cloud-images.ubuntu.com/releases/oneiric/release/ubuntu-11.10-server-cloudimg-armel-disk1.img +# http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-rootfs.img.gz +# * OpenVZ image: +# OpenVZ uses its own format of image, and does not support UEC style images + +#IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image +#IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img" # cirros full disk image + +# Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of +# which may be set in ``localrc``. Also allow ``DEFAULT_IMAGE_NAME`` and +# ``IMAGE_URLS`` to be set directly in ``localrc``. +case "$VIRT_DRIVER" in + openvz) + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-11.10-x86_64} + IMAGE_URLS=${IMAGE_URLS:-"http://download.openvz.org/template/precreated/ubuntu-11.10-x86_64.tar.gz"};; + libvirt) + case "$LIBVIRT_TYPE" in + lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.0-x86_64-rootfs} + IMAGE_URLS=${IMAGE_URLS:-"http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-rootfs.img.gz"};; + *) # otherwise, use the uec style image (with kernel, ramdisk, disk) + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.0-x86_64-uec} + IMAGE_URLS=${IMAGE_URLS:-"http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-uec.tar.gz"};; + esac + ;; + *) # otherwise, use the uec style image (with kernel, ramdisk, disk) + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.0-x86_64-uec} + IMAGE_URLS=${IMAGE_URLS:-"http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-uec.tar.gz"};; +esac + +# 5Gb default volume backing file size +VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-5130M} + +PRIVATE_NETWORK_NAME=${PRIVATE_NETWORK_NAME:-"private"} +PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-"nova"} diff --git a/tests/functions.sh b/tests/functions.sh new file mode 100755 index 00000000..4fe64436 --- /dev/null +++ b/tests/functions.sh @@ -0,0 +1,337 @@ +#!/usr/bin/env bash + +# Tests for DevStack functions + +TOP=$(cd $(dirname "$0")/.. && pwd) + +# Import common functions +source $TOP/functions + +# Import configuration +source $TOP/openrc + + +echo "Testing die_if_not_set()" + +bash -cx "source $TOP/functions; X=`echo Y && true`; die_if_not_set X 'not OK'" +if [[ $? != 0 ]]; then + echo "die_if_not_set [X='Y' true] Failed" +else + echo 'OK' +fi + +bash -cx "source $TOP/functions; X=`true`; die_if_not_set X 'OK'" +if [[ $? = 0 ]]; then + echo "die_if_not_set [X='' true] Failed" +fi + +bash -cx "source $TOP/functions; X=`echo Y && false`; die_if_not_set X 'not OK'" +if [[ $? != 0 ]]; then + echo "die_if_not_set [X='Y' false] Failed" +else + echo 'OK' +fi + +bash -cx "source $TOP/functions; X=`false`; die_if_not_set X 'OK'" +if [[ $? = 0 ]]; then + echo "die_if_not_set [X='' false] Failed" +fi + + +echo "Testing INI functions" + +cat >test.ini < $ENABLED_SERVICES" + else + echo "changing $start to $finish with $add failed: $ENABLED_SERVICES" + fi +} + +test_enable_service '' a 'a' +test_enable_service 'a' b 'a,b' +test_enable_service 'a,b' c 'a,b,c' +test_enable_service 'a,b' c 'a,b,c' +test_enable_service 'a,b,' c 'a,b,c' +test_enable_service 'a,b' c,d 'a,b,c,d' +test_enable_service 'a,b' "c d" 'a,b,c,d' +test_enable_service 'a,b,c' c 'a,b,c' + +test_enable_service 'a,b,-c' c 'a,b' +test_enable_service 'a,b,c' -c 'a,b' + +function test_disable_service() { + local start="$1" + local del="$2" + local finish="$3" + + ENABLED_SERVICES="$start" + disable_service "$del" + if [ "$ENABLED_SERVICES" = "$finish" ] + then + echo "OK: $start - $del -> $ENABLED_SERVICES" + else + echo "changing $start to $finish with $del failed: $ENABLED_SERVICES" + fi +} + +echo "Testing disable_service()" +test_disable_service 'a,b,c' a 'b,c' +test_disable_service 'a,b,c' b 'a,c' +test_disable_service 'a,b,c' c 'a,b' + +test_disable_service 'a,b,c' a 'b,c' +test_disable_service 'b,c' b 'c' +test_disable_service 'c' c '' +test_disable_service '' d '' + +test_disable_service 'a,b,c,' c 'a,b' +test_disable_service 'a,b' c 'a,b' + + +echo "Testing disable_all_services()" +ENABLED_SERVICES=a,b,c +disable_all_services + +if [[ -z "$ENABLED_SERVICES" ]] +then + echo "OK" +else + echo "disabling all services FAILED: $ENABLED_SERVICES" +fi + +echo "Testing disable_negated_services()" + + +function test_disable_negated_services() { + local start="$1" + local finish="$2" + + ENABLED_SERVICES="$start" + disable_negated_services + if [ "$ENABLED_SERVICES" = "$finish" ] + then + echo "OK: $start + $add -> $ENABLED_SERVICES" + else + echo "changing $start to $finish failed: $ENABLED_SERVICES" + fi +} + +test_disable_negated_services '-a' '' +test_disable_negated_services '-a,a' '' +test_disable_negated_services '-a,-a' '' +test_disable_negated_services 'a,-a' '' +test_disable_negated_services 'b,a,-a' 'b' +test_disable_negated_services 'a,b,-a' 'b' +test_disable_negated_services 'a,-a,b' 'b' + + +echo "Testing is_package_installed()" + +if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion +fi + +if [[ "$os_PACKAGE" = "deb" ]]; then + is_package_installed dpkg + VAL=$? +elif [[ "$os_PACKAGE" = "rpm" ]]; then + is_package_installed rpm + VAL=$? +else + VAL=1 +fi +if [[ "$VAL" -eq 0 ]]; then + echo "OK" +else + echo "is_package_installed() on existing package failed" +fi + +if [[ "$os_PACKAGE" = "deb" ]]; then + is_package_installed dpkg bash + VAL=$? +elif [[ "$os_PACKAGE" = "rpm" ]]; then + is_package_installed rpm bash + VAL=$? +else + VAL=1 +fi +if [[ "$VAL" -eq 0 ]]; then + echo "OK" +else + echo "is_package_installed() on more than one existing package failed" +fi + +is_package_installed zzzZZZzzz +VAL=$? +if [[ "$VAL" -ne 0 ]]; then + echo "OK" +else + echo "is_package_installed() on non-existing package failed" +fi diff --git a/tools/build_bm.sh b/tools/build_bm.sh index 44cf3030..ab0ba0ef 100755 --- a/tools/build_bm.sh +++ b/tools/build_bm.sh @@ -1,7 +1,17 @@ #!/usr/bin/env bash + +# **build_bm.sh** + # Build an OpenStack install on a bare metal machine. set +x +# Keep track of the current directory +TOOLS_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $TOOLS_DIR/..; pwd) + +# Import common functions +source $TOP_DIR/functions + # Source params source ./stackrc diff --git a/tools/build_bm_multi.sh b/tools/build_bm_multi.sh index 133d5372..f1242ee4 100755 --- a/tools/build_bm_multi.sh +++ b/tools/build_bm_multi.sh @@ -1,4 +1,7 @@ #!/usr/bin/env bash + +# **build_bm_multi.sh** + # Build an OpenStack install on several bare metal machines. SHELL_AFTER_RUN=no diff --git a/tools/build_libvirt.sh b/tools/build_libvirt.sh deleted file mode 100755 index d1928794..00000000 --- a/tools/build_libvirt.sh +++ /dev/null @@ -1,467 +0,0 @@ -#!/usr/bin/env bash - -# exit on error to stop unexpected errors -set -o errexit - -# Make sure that we have the proper version of ubuntu -UBUNTU_VERSION=`cat /etc/lsb-release | grep CODENAME | sed 's/.*=//g'` -if [ ! "oneiric" = "$UBUNTU_VERSION" ]; then - if [ ! "natty" = "$UBUNTU_VERSION" ]; then - echo "This script only works with oneiric and natty" - exit 1 - fi -fi - -# Clean up any resources that may be in use -cleanup() { - set +o errexit - unmount_images - - if [ -n "$ROOTFS" ]; then - umount $ROOTFS/dev - umount $ROOTFS - fi - - # Release NBD devices - if [ -n "$NBD" ]; then - qemu-nbd -d $NBD - fi - - # Kill ourselves to signal any calling process - trap 2; kill -2 $$ -} - -trap cleanup SIGHUP SIGINT SIGTERM - -# Echo commands -set -o xtrace - -# Keep track of the current directory -TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=`cd $TOOLS_DIR/..; pwd` - -# Where to store files and instances -WORK_DIR=${WORK_DIR:-/opt/kvmstack} - -# Where to store images -IMAGES_DIR=$WORK_DIR/images - -# Create images dir -mkdir -p $IMAGES_DIR - -# Abort if localrc is not set -if [ ! -e $TOP_DIR/localrc ]; then - echo "You must have a localrc with ALL necessary passwords defined before proceeding." - echo "See stack.sh for required passwords." - exit 1 -fi - -cd $TOP_DIR - -# Source params -source ./stackrc - -# Configure the root password of the vm to be the same as ``ADMIN_PASSWORD`` -ROOT_PASSWORD=${ADMIN_PASSWORD:-password} - -# Base image (natty by default) -DIST_NAME=${DIST_NAME:-natty} -IMAGE_FNAME=$DIST_NAME.raw - -# Name of our instance, used by libvirt -GUEST_NAME=${GUEST_NAME:-devstack} - -# Original version of built image -BASE_IMAGE=$IMAGES_DIR/$DIST_NAME.raw - -# Copy of base image, which we pre-install with tasty treats -VM_IMAGE=$IMAGES_DIR/$DIST_NAME.$GUEST_NAME.raw - -# Mop up after previous runs -virsh destroy $GUEST_NAME || true - -# Where this vm is stored -VM_DIR=$WORK_DIR/instances/$GUEST_NAME - -# Create vm dir -mkdir -p $VM_DIR - -# Mount point into copied base image -COPY_DIR=$VM_DIR/copy -mkdir -p $COPY_DIR - -# Get the base image if it does not yet exist -if [ ! -e $BASE_IMAGE ]; then - $TOOLS_DIR/get_uec_image.sh -f raw -r 5000 $DIST_NAME $BASE_IMAGE -fi - -# Create a copy of the base image -if [ ! -e $VM_IMAGE ]; then - cp -p $BASE_IMAGE $VM_IMAGE -fi - -# Unmount the copied base image -function unmount_images() { - # unmount the filesystem - while df | grep -q $COPY_DIR; do - umount $COPY_DIR || echo 'ok' - sleep 1 - done -} - -# Unmount from failed runs -unmount_images - -# Ctrl-c catcher -function kill_unmount() { - unmount_images - exit 1 -} - -# Install deps if needed -dpkg -l kvm libvirt-bin kpartx || apt-get install -y --force-yes kvm libvirt-bin kpartx - -# Where Openstack code will live in image -DEST=${DEST:-/opt/stack} - -# Mount the file system -# For some reason, UEC-based images want 255 heads * 63 sectors * 512 byte sectors = 8225280 -mount -t ext4 -o loop,offset=8225280 $VM_IMAGE $COPY_DIR - -# git clone only if directory doesn't exist already. Since ``DEST`` might not -# be owned by the installation user, we create the directory and change the -# ownership to the proper user. -function git_clone { - if [ ! -d $2 ]; then - sudo mkdir $2 - sudo chown `whoami` $2 - git clone $1 $2 - cd $2 - # This checkout syntax works for both branches and tags - git checkout $3 - fi -} - -# Make sure that base requirements are installed -cp /etc/resolv.conf $COPY_DIR/etc/resolv.conf -chroot $COPY_DIR apt-get update -chroot $COPY_DIR apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1` -chroot $COPY_DIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1` -chroot $COPY_DIR pip install `cat files/pips/*` - -# Clean out code repos if directed to do so -if [ "$CLEAN" = "1" ]; then - rm -rf $COPY_DIR/$DEST -fi - -# Cache openstack code -mkdir -p $COPY_DIR/$DEST -git_clone $NOVA_REPO $COPY_DIR/$DEST/nova $NOVA_BRANCH -git_clone $GLANCE_REPO $COPY_DIR/$DEST/glance $GLANCE_BRANCH -git_clone $KEYSTONE_REPO $COPY_DIR/$DESTkeystone $KEYSTONE_BRANCH -git_clone $NOVNC_REPO $COPY_DIR/$DEST/noVNC $NOVNC_BRANCH -git_clone $HORIZON_REPO $COPY_DIR/$DEST/horizon $HORIZON_BRANCH $HORIZON_TAG -git_clone $NOVACLIENT_REPO $COPY_DIR/$DEST/python-novaclient $NOVACLIENT_BRANCH -git_clone $OPENSTACKX_REPO $COPY_DIR/$DEST/openstackx $OPENSTACKX_BRANCH -git_clone $KEYSTONE_REPO $COPY_DIR/$DEST/keystone $KEYSTONE_BRANCH -git_clone $NOVNC_REPO $COPY_DIR/$DEST/noVNC $NOVNC_BRANCH - -# Back to devstack -cd $TOP_DIR - -# Unmount the filesystems -unmount_images - -# Network configuration variables -GUEST_NETWORK=${GUEST_NETWORK:-1} -GUEST_RECREATE_NET=${GUEST_RECREATE_NET:-yes} - -GUEST_IP=${GUEST_IP:-192.168.$GUEST_NETWORK.50} -GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24} -GUEST_NETMASK=${GUEST_NETMASK:-255.255.255.0} -GUEST_GATEWAY=${GUEST_GATEWAY:-192.168.$GUEST_NETWORK.1} -GUEST_MAC=${GUEST_MAC:-"02:16:3e:07:69:`printf '%02X' $GUEST_NETWORK`"} -GUEST_RAM=${GUEST_RAM:-1524288} -GUEST_CORES=${GUEST_CORES:-1} - -# libvirt.xml configuration -NET_XML=$VM_DIR/net.xml -cat > $NET_XML < - devstack-$GUEST_NETWORK - - - - -EOF - -if [[ "$GUEST_RECREATE_NET" == "yes" ]]; then - virsh net-destroy devstack-$GUEST_NETWORK || true - virsh net-create $VM_DIR/net.xml -fi - -# libvirt.xml configuration -LIBVIRT_XML=$VM_DIR/libvirt.xml -cat > $LIBVIRT_XML < - $GUEST_NAME - $GUEST_RAM - - hvm - - - - - - $GUEST_CORES - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -EOF - -# Mount point for instance fs -ROOTFS=$VM_DIR/root -mkdir -p $ROOTFS - -# Clean up from previous runs -umount $ROOTFS || echo 'ok' - -# Clean up old runs -cd $VM_DIR -rm -f $VM_DIR/disk - -# Create our instance fs -qemu-img create -f qcow2 -b $VM_IMAGE disk - -# Finds the next available NBD device -# Exits script if error connecting or none free -# map_nbd image -# returns full nbd device path -function map_nbd { - for i in `seq 0 15`; do - if [ ! -e /sys/block/nbd$i/pid ]; then - NBD=/dev/nbd$i - # Connect to nbd and wait till it is ready - qemu-nbd -c $NBD $1 - if ! timeout 60 sh -c "while ! [ -e ${NBD}p1 ]; do sleep 1; done"; then - echo "Couldn't connect $NBD" - exit 1 - fi - break - fi - done - if [ -z "$NBD" ]; then - echo "No free NBD slots" - exit 1 - fi - echo $NBD -} - -# Make sure we have nbd-ness -modprobe nbd max_part=63 - -# Set up nbd -NBD=`map_nbd disk` -NBD_DEV=`basename $NBD` - -# Mount the instance -mount ${NBD}p1 $ROOTFS - -# Configure instance network -INTERFACES=$ROOTFS/etc/network/interfaces -cat > $INTERFACES <> $ROOTFS/etc/sudoers - -# Gracefully cp only if source file/dir exists -function cp_it { - if [ -e $1 ] || [ -d $1 ]; then - cp -pRL $1 $2 - fi -} - -# Copy over your ssh keys and env if desired -COPYENV=${COPYENV:-1} -if [ "$COPYENV" = "1" ]; then - cp_it ~/.ssh $ROOTFS/$DEST/.ssh - cp_it ~/.ssh/id_rsa.pub $ROOTFS/$DEST/.ssh/authorized_keys - cp_it ~/.gitconfig $ROOTFS/$DEST/.gitconfig - cp_it ~/.vimrc $ROOTFS/$DEST/.vimrc - cp_it ~/.bashrc $ROOTFS/$DEST/.bashrc -fi - -# pre-cache uec images -for image_url in ${IMAGE_URLS//,/ }; do - IMAGE_FNAME=`basename "$image_url"` - if [ ! -f $IMAGES_DIR/$IMAGE_FNAME ]; then - wget -c $image_url -O $IMAGES_DIR/$IMAGE_FNAME - fi - cp $IMAGES_DIR/$IMAGE_FNAME $ROOTFS/$DEST/devstack/files -done - -# Configure the runner -RUN_SH=$ROOTFS/$DEST/run.sh -cat > $RUN_SH < /$DEST/run.sh.log -echo >> /$DEST/run.sh.log -echo >> /$DEST/run.sh.log -echo "All done! Time to start clicking." >> /$DEST/run.sh.log -cat $DEST/run.sh.log -EOF -chmod 755 $RUN_SH - -# Make runner launch on boot -RC_LOCAL=$ROOTFS/etc/init.d/zlocal -cat > $RC_LOCAL < /etc/hostname -hostname $GUEST_NAME -su -c "$DEST/run.sh" stack -EOF -chmod +x $RC_LOCAL -chroot $ROOTFS sudo update-rc.d zlocal defaults 99 - -# Make our ip address hostnames look nice at the command prompt -echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $ROOTFS/$DEST/.bashrc -echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $ROOTFS/etc/profile - -# Give stack ownership over $DEST so it may do the work needed -chroot $ROOTFS chown -R stack $DEST - -# Set the hostname -echo $GUEST_NAME > $ROOTFS/etc/hostname - -# We need the hostname to resolve for rabbit to launch -if ! grep -q $GUEST_NAME $ROOTFS/etc/hosts; then - echo "$GUEST_IP $GUEST_NAME" >> $ROOTFS/etc/hosts -fi - -# GRUB 2 wants to see /dev -mount -o bind /dev $ROOTFS/dev - -# Change boot params so that we get a console log -G_DEV_UUID=`blkid -t LABEL=cloudimg-rootfs -s UUID -o value | head -1` -sed -e "s/GRUB_TIMEOUT=.*$/GRUB_TIMEOUT=3/" -i $ROOTFS/etc/default/grub -sed -e "s,GRUB_CMDLINE_LINUX_DEFAULT=.*$,GRUB_CMDLINE_LINUX_DEFAULT=\"console=ttyS0 console=tty0 ds=nocloud ubuntu-pass=pass\",g" -i $ROOTFS/etc/default/grub -sed -e 's/[#]*GRUB_TERMINAL=.*$/GRUB_TERMINAL="serial console"/' -i $ROOTFS/etc/default/grub -echo 'GRUB_SERIAL_COMMAND="serial --unit=0"' >>$ROOTFS/etc/default/grub -echo 'GRUB_DISABLE_OS_PROBER=true' >>$ROOTFS/etc/default/grub -echo "GRUB_DEVICE_UUID=$G_DEV_UUID" >>$ROOTFS/etc/default/grub - -chroot $ROOTFS update-grub -umount $ROOTFS/dev - -# Pre-generate ssh host keys and allow password login -chroot $ROOTFS dpkg-reconfigure openssh-server -sed -e 's/^PasswordAuthentication.*$/PasswordAuthentication yes/' -i $ROOTFS/etc/ssh/sshd_config - -# Unmount -umount $ROOTFS || echo 'ok' -ROOTFS="" -qemu-nbd -d $NBD -NBD="" - -# Create the instance -cd $VM_DIR && virsh create libvirt.xml - -# Tail the console log till we are done -WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1} -if [ "$WAIT_TILL_LAUNCH" = "1" ]; then - # Done creating the container, let's tail the log - echo - echo "=============================================================" - echo " -- YAY! --" - echo "=============================================================" - echo - echo "We're done launching the vm, about to start tailing the" - echo "stack.sh log. It will take a second or two to start." - echo - echo "Just CTRL-C at any time to stop tailing." - - while [ ! -e "$VM_DIR/console.log" ]; do - sleep 1 - done - - tail -F $VM_DIR/console.log & - - TAIL_PID=$! - - function kill_tail() { - kill $TAIL_PID - exit 1 - } - - # Let Ctrl-c kill tail and exit - trap kill_tail SIGINT - - set +o xtrace - - echo "Waiting stack.sh to finish..." - while ! cat $VM_DIR/console.log | grep -q 'All done' ; do - sleep 1 - done - - set -o xtrace - - kill $TAIL_PID - - if ! grep -q "^stack.sh completed in" $VM_DIR/console.log; then - exit 1 - fi - echo "" - echo "Finished - Zip-a-dee Doo-dah!" -fi diff --git a/tools/build_lxc.sh b/tools/build_lxc.sh deleted file mode 100755 index 9d8ce926..00000000 --- a/tools/build_lxc.sh +++ /dev/null @@ -1,320 +0,0 @@ -#!/usr/bin/env bash - -# Sanity check -if [ "$EUID" -ne "0" ]; then - echo "This script must be run with root privileges." - exit 1 -fi - -# Keep track of ubuntu version -UBUNTU_VERSION=`cat /etc/lsb-release | grep CODENAME | sed 's/.*=//g'` - -# Move to top devstack dir -cd .. - -# Abort if localrc is not set -if [ ! -e ./localrc ]; then - echo "You must have a localrc with ALL necessary passwords defined before proceeding." - echo "See stack.sh for required passwords." - exit 1 -fi - -# Source params -source ./stackrc - -# Store cwd -CWD=`pwd` - -# Configurable params -BRIDGE=${BRIDGE:-br0} -GUEST_NAME=${GUEST_NAME:-STACK} -GUEST_IP=${GUEST_IP:-192.168.1.50} -GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24} -GUEST_NETMASK=${GUEST_NETMASK:-255.255.255.0} -GUEST_GATEWAY=${GUEST_GATEWAY:-192.168.1.1} -NAMESERVER=${NAMESERVER:-`cat /etc/resolv.conf | grep nameserver | head -1 | cut -d " " -f2`} -COPYENV=${COPYENV:-1} -DEST=${DEST:-/opt/stack} -WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1} - -# Param string to pass to stack.sh. Like "EC2_DMZ_HOST=192.168.1.1 MYSQL_USER=nova" -# By default, n-vol is disabled for lxc, as iscsitarget doesn't work properly in lxc -STACKSH_PARAMS=${STACKSH_PARAMS:-"ENABLED_SERVICES=g-api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-vnc,horizon,mysql,rabbit"} - -# Option to use the version of devstack on which we are currently working -USE_CURRENT_DEVSTACK=${USE_CURRENT_DEVSTACK:-1} - - -# Install deps -apt-get install -y lxc debootstrap - -# Install cgroup-bin from source, since the packaging is buggy and possibly incompatible with our setup -if ! which cgdelete | grep -q cgdelete; then - apt-get install -y g++ bison flex libpam0g-dev make - wget http://sourceforge.net/projects/libcg/files/libcgroup/v0.37.1/libcgroup-0.37.1.tar.bz2/download -O /tmp/libcgroup-0.37.1.tar.bz2 - cd /tmp && bunzip2 libcgroup-0.37.1.tar.bz2 && tar xfv libcgroup-0.37.1.tar - cd libcgroup-0.37.1 - ./configure - make install - ldconfig -fi - -# Create lxc configuration -LXC_CONF=/tmp/$GUEST_NAME.conf -cat > $LXC_CONF <> $ROOTFS/etc/sudoers - -# Copy kernel modules -mkdir -p $ROOTFS/lib/modules/`uname -r`/kernel -cp -p /lib/modules/`uname -r`/modules.dep $ROOTFS/lib/modules/`uname -r`/ -cp -pR /lib/modules/`uname -r`/kernel/net $ROOTFS/lib/modules/`uname -r`/kernel/ - -# Gracefully cp only if source file/dir exists -function cp_it { - if [ -e $1 ] || [ -d $1 ]; then - cp -pRL $1 $2 - fi -} - -# Copy over your ssh keys and env if desired -if [ "$COPYENV" = "1" ]; then - cp_it ~/.ssh $ROOTFS/$DEST/.ssh - cp_it ~/.ssh/id_rsa.pub $ROOTFS/$DEST/.ssh/authorized_keys - cp_it ~/.gitconfig $ROOTFS/$DEST/.gitconfig - cp_it ~/.vimrc $ROOTFS/$DEST/.vimrc - cp_it ~/.bashrc $ROOTFS/$DEST/.bashrc -fi - -# Make our ip address hostnames look nice at the command prompt -echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $ROOTFS/$DEST/.bashrc -echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $ROOTFS/etc/profile - -# Give stack ownership over $DEST so it may do the work needed -chroot $ROOTFS chown -R stack $DEST - -# Configure instance network -INTERFACES=$ROOTFS/etc/network/interfaces -cat > $INTERFACES < $RUN_SH < /$DEST/run.sh.log -echo >> /$DEST/run.sh.log -echo >> /$DEST/run.sh.log -echo "All done! Time to start clicking." >> /$DEST/run.sh.log -EOF - -# Make the run.sh executable -chmod 755 $RUN_SH - -# Make runner launch on boot -RC_LOCAL=$ROOTFS/etc/init.d/local -cat > $RC_LOCAL <> $CHROOTCACHE/natty-dev/etc/sudoers -fi - -# clone git repositories onto the system -# ====================================== - -if [ ! -d $CHROOTCACHE/natty-stack ]; then - rsync -azH $CHROOTCACHE/natty-dev/ $CHROOTCACHE/natty-stack/ -fi - -# git clone only if directory doesn't exist already. Since ``DEST`` might not -# be owned by the installation user, we create the directory and change the -# ownership to the proper user. -function git_clone { - - # clone new copy or fetch latest changes - CHECKOUT=$CHROOTCACHE/natty-stack$2 - if [ ! -d $CHECKOUT ]; then - mkdir -p $CHECKOUT - git clone $1 $CHECKOUT - else - pushd $CHECKOUT - git fetch - popd - fi - - # FIXME(ja): checkout specified version (should works for branches and tags) - - pushd $CHECKOUT - # checkout the proper branch/tag - git checkout $3 - # force our local version to be the same as the remote version - git reset --hard origin/$3 - popd - - # give ownership to the stack user - chroot $CHROOTCACHE/natty-stack/ chown -R stack $2 -} - -git_clone $NOVA_REPO $DEST/nova $NOVA_BRANCH -git_clone $GLANCE_REPO $DEST/glance $GLANCE_BRANCH -git_clone $KEYSTONE_REPO $DEST/keystone $KEYSTONE_BRANCH -git_clone $NOVNC_REPO $DEST/novnc $NOVNC_BRANCH -git_clone $HORIZON_REPO $DEST/horizon $HORIZON_BRANCH $HORIZON_TAG -git_clone $NOVACLIENT_REPO $DEST/python-novaclient $NOVACLIENT_BRANCH -git_clone $OPENSTACKX_REPO $DEST/openstackx $OPENSTACKX_BRANCH - -chroot $CHROOTCACHE/natty-stack mkdir -p $DEST/files -wget -c http://images.ansolabs.com/tty.tgz -O $CHROOTCACHE/natty-stack$DEST/files/tty.tgz - -# Use this version of devstack? -if [ "$USE_CURRENT_DEVSTACK" = "1" ]; then - rm -rf $CHROOTCACHE/natty-stack/$DEST/devstack - cp -pr $CWD $CHROOTCACHE/natty-stack/$DEST/devstack -fi - -cp -pr $CHROOTCACHE/natty-stack $NFSDIR - -# set hostname -echo $NAME > $NFSDIR/etc/hostname -echo "127.0.0.1 localhost $NAME" > $NFSDIR/etc/hosts - -# injecting root's public ssh key if it exists -if [ -f /root/.ssh/id_rsa.pub ]; then - mkdir $NFSDIR/root/.ssh - chmod 700 $NFSDIR/root/.ssh - cp /root/.ssh/id_rsa.pub $NFSDIR/root/.ssh/authorized_keys -fi diff --git a/tools/build_pxe_env.sh b/tools/build_pxe_env.sh index 1ab51f89..e6f98b4b 100755 --- a/tools/build_pxe_env.sh +++ b/tools/build_pxe_env.sh @@ -1,5 +1,8 @@ #!/bin/bash -e -# build_pxe_env.sh - Create a PXE boot environment + +# **build_pxe_env.sh** + +# Create a PXE boot environment # # build_pxe_env.sh destdir # @@ -10,8 +13,7 @@ dpkg -l syslinux || apt-get install -y syslinux DEST_DIR=${1:-/tmp}/tftpboot -PXEDIR=${PXEDIR:-/var/cache/devstack/pxe} -OPWD=`pwd` +PXEDIR=${PXEDIR:-/opt/ramstack/pxe} PROGDIR=`dirname $0` # Clean up any resources that may be in use @@ -28,7 +30,11 @@ cleanup() { trap 2; kill -2 $$ } -trap cleanup SIGHUP SIGINT SIGTERM +trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT + +# Keep track of the current directory +TOOLS_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=`cd $TOOLS_DIR/..; pwd` mkdir -p $DEST_DIR/pxelinux.cfg cd $DEST_DIR @@ -42,7 +48,7 @@ default menu.c32 prompt 0 timeout 0 -MENU TITLE PXE Boot Menu +MENU TITLE devstack PXE Boot Menu EOF @@ -54,7 +60,7 @@ fi # Get image into place if [ ! -r $PXEDIR/stack-initrd.img ]; then - cd $OPWD + cd $TOP_DIR $PROGDIR/build_ramdisk.sh $PXEDIR/stack-initrd.img fi if [ ! -r $PXEDIR/stack-initrd.gz ]; then @@ -110,3 +116,5 @@ LABEL local MENU LABEL ^Local disk LOCALBOOT 0 EOF + +trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT diff --git a/tools/build_ramdisk.sh b/tools/build_ramdisk.sh index 2c914dc7..2c455685 100755 --- a/tools/build_ramdisk.sh +++ b/tools/build_ramdisk.sh @@ -1,7 +1,10 @@ #!/bin/bash -# build_ramdisk.sh - Build RAM disk images -# exit on error to stop unexpected errors +# **build_ramdisk.sh** + +# Build RAM disk images + +# Exit on error to stop unexpected errors set -o errexit if [ ! "$#" -eq "1" ]; then @@ -47,7 +50,10 @@ IMG_FILE=$1 # Keep track of the current directory TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=`cd $TOOLS_DIR/..; pwd` +TOP_DIR=$(cd $TOOLS_DIR/..; pwd) + +# Import common functions +. $TOP_DIR/functions # Store cwd CWD=`pwd` @@ -57,7 +63,7 @@ cd $TOP_DIR # Source params source ./stackrc -CACHEDIR=${CACHEDIR:-/var/cache/devstack} +CACHEDIR=${CACHEDIR:-/opt/stack/cache} DEST=${DEST:-/opt/stack} @@ -81,7 +87,7 @@ fi # Finds the next available NBD device # Exits script if error connecting or none free # map_nbd image -# returns full nbd device path +# Returns full nbd device path function map_nbd { for i in `seq 0 15`; do if [ ! -e /sys/block/nbd$i/pid ]; then @@ -102,7 +108,7 @@ function map_nbd { echo $NBD } -# prime image with as many apt/pips as we can +# Prime image with as many apt as we can DEV_FILE=$CACHEDIR/$DIST_NAME-dev.img DEV_FILE_TMP=`mktemp $DEV_FILE.XXXXXX` if [ ! -r $DEV_FILE ]; then @@ -115,22 +121,21 @@ if [ ! -r $DEV_FILE ]; then chroot $MNTDIR apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1` chroot $MNTDIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1` - chroot $MNTDIR pip install `cat files/pips/*` # Create a stack user that is a member of the libvirtd group so that stack # is able to interact with libvirt. chroot $MNTDIR groupadd libvirtd - chroot $MNTDIR useradd stack -s /bin/bash -d $DEST -G libvirtd + chroot $MNTDIR useradd $STACK_USER -s /bin/bash -d $DEST -G libvirtd mkdir -p $MNTDIR/$DEST - chroot $MNTDIR chown stack $DEST + chroot $MNTDIR chown $STACK_USER $DEST - # a simple password - pass - echo stack:pass | chroot $MNTDIR chpasswd + # A simple password - pass + echo $STACK_USER:pass | chroot $MNTDIR chpasswd echo root:$ROOT_PASSWORD | chroot $MNTDIR chpasswd - # and has sudo ability (in the future this should be limited to only what + # And has sudo ability (in the future this should be limited to only what # stack requires) - echo "stack ALL=(ALL) NOPASSWD: ALL" >> $MNTDIR/etc/sudoers + echo "$STACK_USER ALL=(ALL) NOPASSWD: ALL" >> $MNTDIR/etc/sudoers umount $MNTDIR rmdir $MNTDIR @@ -140,7 +145,8 @@ if [ ! -r $DEV_FILE ]; then fi rm -f $DEV_FILE_TMP -# clone git repositories onto the system + +# Clone git repositories onto the system # ====================================== IMG_FILE_TMP=`mktemp $IMG_FILE.XXXXXX` @@ -170,35 +176,6 @@ if [ ! -r "`ls $MNTDIR/boot/vmlinuz-*-generic | head -1`" ]; then chroot $MNTDIR apt-get install -y linux-generic fi -# git clone only if directory doesn't exist already. Since ``DEST`` might not -# be owned by the installation user, we create the directory and change the -# ownership to the proper user. -function git_clone { - - # clone new copy or fetch latest changes - CHECKOUT=${MNTDIR}$2 - if [ ! -d $CHECKOUT ]; then - mkdir -p $CHECKOUT - git clone $1 $CHECKOUT - else - pushd $CHECKOUT - git fetch - popd - fi - - # FIXME(ja): checkout specified version (should works for branches and tags) - - pushd $CHECKOUT - # checkout the proper branch/tag - git checkout $3 - # force our local version to be the same as the remote version - git reset --hard origin/$3 - popd - - # give ownership to the stack user - chroot $MNTDIR chown -R stack $2 -} - git_clone $NOVA_REPO $DEST/nova $NOVA_BRANCH git_clone $GLANCE_REPO $DEST/glance $GLANCE_BRANCH git_clone $KEYSTONE_REPO $DEST/keystone $KEYSTONE_BRANCH @@ -210,7 +187,7 @@ git_clone $OPENSTACKX_REPO $DEST/openstackx $OPENSTACKX_BRANCH # Use this version of devstack rm -rf $MNTDIR/$DEST/devstack cp -pr $CWD $MNTDIR/$DEST/devstack -chroot $MNTDIR chown -R stack $DEST/devstack +chroot $MNTDIR chown -R $STACK_USER $DEST/devstack # Configure host network for DHCP mkdir -p $MNTDIR/etc/network @@ -248,7 +225,7 @@ EOF # Make the run.sh executable chmod 755 $RUN_SH -chroot $MNTDIR chown stack $DEST/run.sh +chroot $MNTDIR chown $STACK_USER $DEST/run.sh umount $MNTDIR rmdir $MNTDIR diff --git a/tools/build_tempest.sh b/tools/build_tempest.sh new file mode 100755 index 00000000..1758e7da --- /dev/null +++ b/tools/build_tempest.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash +# +# **build_tempest.sh** + +# Checkout and prepare a Tempest repo: https://github.com/openstack/tempest.git + +function usage { + echo "$0 - Check out and prepare a Tempest repo" + echo "" + echo "Usage: $0" + exit 1 +} + +if [ "$1" = "-h" ]; then + usage +fi + +# Clean up any resources that may be in use +cleanup() { + set +o errexit + + # Kill ourselves to signal any calling process + trap 2; kill -2 $$ +} + +trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT + +# Keep track of the current directory +TOOLS_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $TOOLS_DIR/..; pwd) + +# Import common functions +. $TOP_DIR/functions + +# Abort if localrc is not set +if [ ! -e $TOP_DIR/localrc ]; then + echo "You must have a localrc with ALL necessary passwords and configuration defined before proceeding." + echo "See stack.sh for required passwords." + exit 1 +fi + +# Source params +source ./stackrc + +# Where Openstack code lives +DEST=${DEST:-/opt/stack} + +TEMPEST_DIR=$DEST/tempest + +# Install tests and prerequisites +git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH + +trap - SIGHUP SIGINT SIGTERM SIGQUIT EXIT diff --git a/tools/build_uec.sh b/tools/build_uec.sh index d95ba77d..6c4a26c2 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +# **build_uec.sh** + # Make sure that we have the proper version of ubuntu (only works on oneiric) if ! egrep -q "oneiric" /etc/lsb-release; then echo "This script only works with ubuntu oneiric." @@ -8,7 +10,10 @@ fi # Keep track of the current directory TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=`cd $TOOLS_DIR/..; pwd` +TOP_DIR=$(cd $TOOLS_DIR/..; pwd) + +# Import common functions +. $TOP_DIR/functions cd $TOP_DIR @@ -34,30 +39,37 @@ fi # Install deps if needed DEPS="kvm libvirt-bin kpartx cloud-utils curl" -apt-get install -y --force-yes $DEPS +apt_get install -y --force-yes $DEPS || true # allow this to fail gracefully for concurrent builds # Where to store files and instances -WORK_DIR=${WORK_DIR:-/opt/kvmstack} +WORK_DIR=${WORK_DIR:-/opt/uecstack} # Where to store images image_dir=$WORK_DIR/images/$DIST_NAME mkdir -p $image_dir -# Original version of built image -uec_url=http://uec-images.ubuntu.com/$DIST_NAME/current/$DIST_NAME-server-cloudimg-amd64.tar.gz -tarball=$image_dir/$(basename $uec_url) +# Start over with a clean base image, if desired +if [ $CLEAN_BASE ]; then + rm -f $image_dir/disk +fi -# download the base uec image if we haven't already -if [ ! -f $tarball ]; then - curl $uec_url -o $tarball - (cd $image_dir && tar -Sxvzf $tarball) - resize-part-image $image_dir/*.img $GUEST_SIZE $image_dir/disk - cp $image_dir/*-vmlinuz-virtual $image_dir/kernel +# Get the base image if it does not yet exist +if [ ! -e $image_dir/disk ]; then + $TOOLS_DIR/get_uec_image.sh -r $GUEST_SIZE $DIST_NAME $image_dir/disk $image_dir/kernel fi +# Copy over dev environment if COPY_ENV is set. +# This will also copy over your current devstack. +if [ $COPY_ENV ]; then + cd $TOOLS_DIR + ./copy_dev_environment_to_uec.sh $image_dir/disk +fi -# Configure the root password of the vm to be the same as ``ADMIN_PASSWORD`` -ROOT_PASSWORD=${ADMIN_PASSWORD:-password} +# Option to warm the base image with software requirements. +if [ $WARM_CACHE ]; then + cd $TOOLS_DIR + ./warm_apts_for_uec.sh $image_dir/disk +fi # Name of our instance, used by libvirt GUEST_NAME=${GUEST_NAME:-devstack} @@ -90,9 +102,10 @@ GUEST_CORES=${GUEST_CORES:-1} # libvirt.xml configuration NET_XML=$vm_dir/net.xml +NET_NAME=${NET_NAME:-devstack-$GUEST_NETWORK} cat > $NET_XML < - devstack-$GUEST_NETWORK + $NET_NAME @@ -104,9 +117,9 @@ cat > $NET_XML < $LIBVIRT_XML < - + - + @@ -170,23 +183,48 @@ instance-type: m1.ignore local-hostname: $GUEST_NAME.local EOF -# set metadata +# set user-data cat > $vm_dir/uec/user-data< localrc <> $vm_dir/uec/user-data< localrc < /opt/stack/.ssh/authorized_keys +chown -R $STACK_USER /opt/stack +chmod 700 /opt/stack/.ssh +chmod 600 /opt/stack/.ssh/authorized_keys + +grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || + echo "#includedir /etc/sudoers.d" >> /etc/sudoers +( umask 226 && echo "stack ALL=(ALL) NOPASSWD:ALL" \ + > /etc/sudoers.d/50_stack_sh ) +EOF +fi + +# Run stack.sh +cat >> $vm_dir/uec/user-data< $MNT_DIR/etc/network/interfaces <$MNT_DIR/etc/hostname +echo "127.0.0.1 localhost ramstack" >$MNT_DIR/etc/hosts + +# Configure the runner +RUN_SH=$MNT_DIR/$DEST/run.sh +cat > $RUN_SH < $DEST/run.sh.log +echo >> $DEST/run.sh.log +echo >> $DEST/run.sh.log +echo "All done! Time to start clicking." >> $DEST/run.sh.log +EOF + +# Make the run.sh executable +chmod 755 $RUN_SH +chroot $MNT_DIR chown stack $DEST/run.sh + +umount $MNT_DIR/dev +umount $MNT_DIR +rmdir $MNT_DIR +mv $DEST_FILE_TMP $DEST_FILE +rm -f $DEST_FILE_TMP + +trap - SIGHUP SIGINT SIGTERM SIGQUIT EXIT diff --git a/tools/build_usb_boot.sh b/tools/build_usb_boot.sh index e4dabc0e..85662298 100755 --- a/tools/build_usb_boot.sh +++ b/tools/build_usb_boot.sh @@ -1,5 +1,8 @@ #!/bin/bash -e -# build_usb_boot.sh - Create a syslinux boot environment + +# **build_usb_boot.sh** + +# Create a syslinux boot environment # # build_usb_boot.sh destdev # @@ -7,9 +10,7 @@ # Needs to run as root DEST_DIR=${1:-/tmp/syslinux-boot} -PXEDIR=${PXEDIR:-/var/cache/devstack/pxe} -OPWD=`pwd` -PROGDIR=`dirname $0` +PXEDIR=${PXEDIR:-/opt/ramstack/pxe} # Clean up any resources that may be in use cleanup() { @@ -29,7 +30,11 @@ cleanup() { trap 2; kill -2 $$ } -trap cleanup SIGHUP SIGINT SIGTERM +trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT + +# Keep track of the current directory +TOOLS_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=`cd $TOOLS_DIR/..; pwd` if [ -b $DEST_DIR ]; then # We have a block device, install syslinux and mount it @@ -62,7 +67,7 @@ default /syslinux/menu.c32 prompt 0 timeout 0 -MENU TITLE Boot Menu +MENU TITLE devstack Boot Menu EOF @@ -74,8 +79,8 @@ fi # Get image into place if [ ! -r $PXEDIR/stack-initrd.img ]; then - cd $OPWD - $PROGDIR/build_ramdisk.sh $PXEDIR/stack-initrd.img + cd $TOP_DIR + $TOOLS_DIR/build_uec_ramdisk.sh $PXEDIR/stack-initrd.img fi if [ ! -r $PXEDIR/stack-initrd.gz ]; then gzip -1 -c $PXEDIR/stack-initrd.img >$PXEDIR/stack-initrd.gz @@ -139,3 +144,5 @@ if [ -n "$DEST_DEV" ]; then umount $DEST_DIR rmdir $DEST_DIR fi + +trap - SIGHUP SIGINT SIGTERM SIGQUIT EXIT diff --git a/tools/setup_stack_user.sh b/tools/copy_dev_environment_to_uec.sh similarity index 63% rename from tools/setup_stack_user.sh rename to tools/copy_dev_environment_to_uec.sh index 231a20f3..3fd4423f 100755 --- a/tools/setup_stack_user.sh +++ b/tools/copy_dev_environment_to_uec.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +# **copy_dev_environment_to_uec.sh** + # Echo commands set -o xtrace @@ -8,11 +10,17 @@ set -o errexit # Keep track of the current directory TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=`cd $TOOLS_DIR/..; pwd` +TOP_DIR=$(cd $TOOLS_DIR/..; pwd) + +# Import common functions +. $TOP_DIR/functions # Change dir to top of devstack cd $TOP_DIR +# Source params +source ./stackrc + # Echo usage usage() { echo "Add stack user and keys" @@ -38,27 +46,14 @@ mkdir -p $STAGING_DIR/$DEST # Create a stack user that is a member of the libvirtd group so that stack # is able to interact with libvirt. chroot $STAGING_DIR groupadd libvirtd || true -chroot $STAGING_DIR useradd stack -s /bin/bash -d $DEST -G libvirtd || true +chroot $STAGING_DIR useradd $STACK_USER -s /bin/bash -d $DEST -G libvirtd || true # Add a simple password - pass -echo stack:pass | chroot $STAGING_DIR chpasswd +echo $STACK_USER:pass | chroot $STAGING_DIR chpasswd # Configure sudo -grep -q "^#includedir.*/etc/sudoers.d" $STAGING_DIR/etc/sudoers || - echo "#includedir /etc/sudoers.d" | sudo tee -a $STAGING_DIR/etc/sudoers -cp $TOP_DIR/files/sudo/* $STAGING_DIR/etc/sudoers.d/ -sed -e "s,%USER%,$USER,g" -i $STAGING_DIR/etc/sudoers.d/* - -# and has sudo ability (in the future this should be limited to only what -# stack requires) -echo "stack ALL=(ALL) NOPASSWD: ALL" >> $STAGING_DIR/etc/sudoers - -# Gracefully cp only if source file/dir exists -function cp_it { - if [ -e $1 ] || [ -d $1 ]; then - cp -pRL $1 $2 - fi -} +( umask 226 && echo "$STACK_USER ALL=(ALL) NOPASSWD:ALL" \ + > $STAGING_DIR/etc/sudoers.d/50_stack_sh ) # Copy over your ssh keys and env if desired cp_it ~/.ssh $STAGING_DIR/$DEST/.ssh @@ -67,8 +62,12 @@ cp_it ~/.gitconfig $STAGING_DIR/$DEST/.gitconfig cp_it ~/.vimrc $STAGING_DIR/$DEST/.vimrc cp_it ~/.bashrc $STAGING_DIR/$DEST/.bashrc +# Copy devstack +rm -rf $STAGING_DIR/$DEST/devstack +cp_it . $STAGING_DIR/$DEST/devstack + # Give stack ownership over $DEST so it may do the work needed -chroot $STAGING_DIR chown -R stack $DEST +chroot $STAGING_DIR chown -R $STACK_USER $DEST # Unmount umount $STAGING_DIR diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh new file mode 100755 index 00000000..619d63f7 --- /dev/null +++ b/tools/create_userrc.sh @@ -0,0 +1,258 @@ +#!/usr/bin/env bash + +# **create_userrc.sh** + +# Pre-create rc files and credentials for the default users. + +# Warning: This script just for development purposes + +ACCOUNT_DIR=./accrc + +display_help() +{ +cat < + +This script creates certificates and sourcable rc files per tenant/user. + +Target account directory hierarchy: +target_dir-| + |-cacert.pem + |-tenant1-name| + | |- user1 + | |- user1-cert.pem + | |- user1-pk.pem + | |- user2 + | .. + |-tenant2-name.. + .. + +Optional Arguments +-P include password to the rc files; with -A it assume all users password is the same +-A try with all user +-u create files just for the specified user +-C create user and tenant, the specifid tenant will be the user's tenant +-r when combined with -C and the (-u) user exists it will be the user's tenant role in the (-C)tenant (default: Member) +-p password for the user +--os-username +--os-password +--os-tenant-name +--os-tenant-id +--os-auth-url +--target-dir +--skip-tenant +--debug + +Example: +$0 -AP +$0 -P -C mytenant -u myuser -p mypass +EOF +} + +if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-name:,os-tenant-id:,os-auth-url:,target-dir:,skip-tenant:,help,debug -- "$@") +then + #parse error + display_help + exit 1 +fi +eval set -- $options +ADDPASS="" + +# The services users usually in the service tenant. +# rc files for service users, is out of scope. +# Supporting different tanent for services is out of scope. +SKIP_TENANT=",service," # tenant names are between commas(,) +MODE="" +ROLE=Member +USER_NAME="" +USER_PASS="" +while [ $# -gt 0 ] +do + case "$1" in + -h|--help) display_help; exit 0 ;; + --os-username) export OS_USERNAME=$2; shift ;; + --os-password) export OS_PASSWORD=$2; shift ;; + --os-tenant-name) export OS_TENANT_NAME=$2; shift ;; + --os-tenant-id) export OS_TENANT_ID=$2; shift ;; + --skip-tenant) SKIP_TENANT="$SKIP_TENANT$2,"; shift ;; + --os-auth-url) export OS_AUTH_URL=$2; shift ;; + --target-dir) ACCOUNT_DIR=$2; shift ;; + --debug) set -o xtrace ;; + -u) MODE=${MODE:-one}; USER_NAME=$2; shift ;; + -p) USER_PASS=$2; shift ;; + -A) MODE=all; ;; + -P) ADDPASS="yes" ;; + -C) MODE=create; TENANT=$2; shift ;; + -r) ROLE=$2; shift ;; + (--) shift; break ;; + (-*) echo "$0: error - unrecognized option $1" >&2; display_help; exit 1 ;; + (*) echo "$0: error - unexpected argument $1" >&2; display_help; exit 1 ;; + esac + shift +done + +if [ -z "$OS_PASSWORD" ]; then + if [ -z "$ADMIN_PASSWORD" ];then + echo "The admin password is required option!" >&2 + exit 2 + else + OS_PASSWORD=$ADMIN_PASSWORD + fi +fi + +if [ -z "$OS_TENANT_NAME" -a -z "$OS_TENANT_ID" ]; then + export OS_TENANT_NAME=admin +fi + +if [ -z "$OS_USERNAME" ]; then + export OS_USERNAME=admin +fi + +if [ -z "$OS_AUTH_URL" ]; then + export OS_AUTH_URL=http://localhost:5000/v2.0/ +fi + +USER_PASS=${USER_PASS:-$OS_PASSWORD} +USER_NAME=${USER_NAME:-$OS_USERNAME} + +if [ -z "$MODE" ]; then + echo "You must specify at least -A or -u parameter!" >&2 + echo + display_help + exit 3 +fi + +export -n SERVICE_TOKEN SERVICE_ENDPOINT OS_SERVICE_TOKEN OS_SERVICE_ENDPOINT + +EC2_URL=http://localhost:8773/service/Cloud +S3_URL=http://localhost:3333 + +ec2=`keystone endpoint-get --service ec2 | awk '/\|[[:space:]]*ec2.publicURL/ {print $4}'` +[ -n "$ec2" ] && EC2_URL=$ec2 + +s3=`keystone endpoint-get --service s3 | awk '/\|[[:space:]]*s3.publicURL/ {print $4}'` +[ -n "$s3" ] && S3_URL=$s3 + + +mkdir -p "$ACCOUNT_DIR" +ACCOUNT_DIR=`readlink -f "$ACCOUNT_DIR"` +EUCALYPTUS_CERT=$ACCOUNT_DIR/cacert.pem +mv "$EUCALYPTUS_CERT" "$EUCALYPTUS_CERT.old" &>/dev/null +if ! nova x509-get-root-cert "$EUCALYPTUS_CERT"; then + echo "Failed to update the root certificate: $EUCALYPTUS_CERT" >&2 + mv "$EUCALYPTUS_CERT.old" "$EUCALYPTUS_CERT" &>/dev/null +fi + + +function add_entry(){ + local user_id=$1 + local user_name=$2 + local tenant_id=$3 + local tenant_name=$4 + local user_passwd=$5 + + # The admin user can see all user's secret AWS keys, it does not looks good + local line=`keystone ec2-credentials-list --user_id $user_id | grep -E "^\\|[[:space:]]*($tenant_name|$tenant_id)[[:space:]]*\\|" | head -n 1` + if [ -z "$line" ]; then + keystone ec2-credentials-create --user-id $user_id --tenant-id $tenant_id 1>&2 + line=`keystone ec2-credentials-list --user_id $user_id | grep -E "^\\|[[:space:]]*($tenant_name|$tenant_id)[[:space:]]*\\|" | head -n 1` + fi + local ec2_access_key ec2_secret_key + read ec2_access_key ec2_secret_key <<< `echo $line | awk '{print $4 " " $6 }'` + mkdir -p "$ACCOUNT_DIR/$tenant_name" + local rcfile="$ACCOUNT_DIR/$tenant_name/$user_name" + # The certs subject part are the tenant ID "dash" user ID, but the CN should be the first part of the DN + # Generally the subject DN parts should be in reverse order like the Issuer + # The Serial does not seams correctly marked either + local ec2_cert="$rcfile-cert.pem" + local ec2_private_key="$rcfile-pk.pem" + # Try to preserve the original file on fail (best effort) + mv -f "$ec2_private_key" "$ec2_private_key.old" &>/dev/null + mv -f "$ec2_cert" "$ec2_cert.old" &>/dev/null + # It will not create certs when the password is incorrect + if ! nova --os-password "$user_passwd" --os-username "$user_name" --os-tenant-name "$tenant_name" x509-create-cert "$ec2_private_key" "$ec2_cert"; then + mv -f "$ec2_private_key.old" "$ec2_private_key" &>/dev/null + mv -f "$ec2_cert.old" "$ec2_cert" &>/dev/null + fi + cat >"$rcfile" <>"$rcfile" + fi +} + +#admin users expected +function create_or_get_tenant(){ + local tenant_name=$1 + local tenant_id=`keystone tenant-list | awk '/\|[[:space:]]*'"$tenant_name"'[[:space:]]*\|.*\|/ {print $2}'` + if [ -n "$tenant_id" ]; then + echo $tenant_id + else + keystone tenant-create --name "$tenant_name" | awk '/\|[[:space:]]*id[[:space:]]*\|.*\|/ {print $4}' + fi +} + +function create_or_get_role(){ + local role_name=$1 + local role_id=`keystone role-list| awk '/\|[[:space:]]*'"$role_name"'[[:space:]]*\|/ {print $2}'` + if [ -n "$role_id" ]; then + echo $role_id + else + keystone role-create --name "$role_name" |awk '/\|[[:space:]]*id[[:space:]]*\|.*\|/ {print $4}' + fi +} + +# Provides empty string when the user does not exists +function get_user_id(){ + local user_name=$1 + keystone user-list | awk '/^\|[^|]*\|[[:space:]]*'"$user_name"'[[:space:]]*\|.*\|/ {print $2}' +} + +if [ $MODE != "create" ]; then +# looks like I can't ask for all tenant related to a specified user + for tenant_id_at_name in `keystone tenant-list | awk 'BEGIN {IGNORECASE = 1} /true[[:space:]]*\|$/ {print $2 "@" $4}'`; do + read tenant_id tenant_name <<< `echo "$tenant_id_at_name" | sed 's/@/ /'` + if echo $SKIP_TENANT| grep -q ",$tenant_name,"; then + continue; + fi + for user_id_at_name in `keystone user-list --tenant-id $tenant_id | awk 'BEGIN {IGNORECASE = 1} /true[[:space:]]*\|[^|]*\|$/ {print $2 "@" $4}'`; do + read user_id user_name <<< `echo "$user_id_at_name" | sed 's/@/ /'` + if [ $MODE = one -a "$user_name" != "$USER_NAME" ]; then + continue; + fi + add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS" + done + done +else + tenant_name=$TENANT + tenant_id=`create_or_get_tenant "$TENANT"` + user_name=$USER_NAME + user_id=`get_user_id $user_name` + if [ -z "$user_id" ]; then + #new user + user_id=`keystone user-create --name "$user_name" --tenant-id "$tenant_id" --pass "$USER_PASS" --email "$user_name@example.com" | awk '/\|[[:space:]]*id[[:space:]]*\|.*\|/ {print $4}'` + #The password is in the cmd line. It is not a good thing + add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS" + else + #new role + role_id=`create_or_get_role "$ROLE"` + keystone user-role-add --user-id "$user_id" --tenant-id "$tenant_id" --role-id "$role_id" + add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS" + fi +fi diff --git a/tools/get_uec_image.sh b/tools/get_uec_image.sh index 7b95aab8..3c62064a 100755 --- a/tools/get_uec_image.sh +++ b/tools/get_uec_image.sh @@ -1,31 +1,32 @@ #!/bin/bash -# get_uec_image.sh - Prepare Ubuntu images in various formats -# -# Supported formats: qcow (kvm), vmdk (vmserver), vdi (vbox), vhd (vpc), raw -# -# Required to run as root -CACHEDIR=${CACHEDIR:-/var/cache/devstack} -FORMAT=${FORMAT:-qcow2} -ROOTSIZE=${ROOTSIZE:-2000} -MIN_PKGS=${MIN_PKGS:-"apt-utils gpgv openssh-server"} +# **get_uec_image.sh** + +# Download and prepare Ubuntu UEC images + +CACHEDIR=${CACHEDIR:-/opt/stack/cache} +ROOTSIZE=${ROOTSIZE:-2000M} # Keep track of the current directory TOOLS_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=`cd $TOOLS_DIR/..; pwd` +TOP_DIR=$(cd $TOOLS_DIR/..; pwd) -# exit on error to stop unexpected errors +# Import common functions +. $TOP_DIR/functions + +# Exit on error to stop unexpected errors set -o errexit +set -o xtrace usage() { - echo "Usage: $0 - Prepare Ubuntu images" + echo "Usage: $0 - Download and prepare Ubuntu UEC images" echo "" - echo "$0 [-f format] [-r rootsize] release imagefile" + echo "$0 [-r rootsize] release imagefile [kernel]" echo "" - echo "-f format - image format: qcow2 (default), vmdk, vdi, vhd, xen, raw, fs" - echo "-r size - root fs size in MB (min 2000MB)" - echo "release - Ubuntu release: jaunty - oneric" + echo "-r size - root fs size (min 2000MB)" + echo "release - Ubuntu release: lucid - quantal" echo "imagefile - output image file" + echo "kernel - output kernel" exit 1 } @@ -38,42 +39,21 @@ cleanup() { rm -f $IMG_FILE_TMP fi - # Release NBD devices - if [ -n "$NBD" ]; then - qemu-nbd -d $NBD - fi - # Kill ourselves to signal any calling process trap 2; kill -2 $$ } -# apt-get wrapper to just get arguments set correctly -function apt_get() { - local sudo="sudo" - [ "$(id -u)" = "0" ] && sudo="env" - $sudo DEBIAN_FRONTEND=noninteractive apt-get \ - --option "Dpkg::Options::=--force-confold" --assume-yes "$@" -} - -while getopts f:hmr: c; do +while getopts hr: c; do case $c in - f) FORMAT=$OPTARG - ;; h) usage ;; - m) MINIMAL=1 - ;; r) ROOTSIZE=$OPTARG - if [[ $ROOTSIZE < 2000 ]]; then - echo "root size must be greater than 2000MB" - exit 1 - fi ;; esac done shift `expr $OPTIND - 1` -if [ ! "$#" -eq "2" ]; then +if [[ ! "$#" -eq "2" && ! "$#" -eq "3" ]]; then usage fi @@ -81,134 +61,51 @@ fi DIST_NAME=$1 IMG_FILE=$2 IMG_FILE_TMP=`mktemp $IMG_FILE.XXXXXX` - -case $FORMAT in - kvm|qcow2) FORMAT=qcow2 - QFORMAT=qcow2 - ;; - vmserver|vmdk) - FORMAT=vmdk - QFORMAT=vmdk - ;; - vbox|vdi) FORMAT=vdi - QFORMAT=vdi - ;; - vhd|vpc) FORMAT=vhd - QFORMAT=vpc - ;; - xen) FORMAT=raw - QFORMAT=raw - ;; - raw) FORMAT=raw - QFORMAT=raw - ;; - *) echo "Unknown format: $FORMAT" - usage -esac +KERNEL=$3 case $DIST_NAME in + quantal) ;; + precise) ;; oneiric) ;; natty) ;; maverick) ;; lucid) ;; - karmic) ;; - jaunty) ;; *) echo "Unknown release: $DIST_NAME" usage ;; esac -trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT - -# Check for dependencies +trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT -if [ ! -x "`which qemu-img`" -o ! -x "`which qemu-nbd`" ]; then +# Check dependencies +if [ ! -x "`which qemu-img`" -o -z "`dpkg -l | grep cloud-utils`" ]; then # Missing KVM? - apt_get install qemu-kvm + apt_get install qemu-kvm cloud-utils fi -# Prepare the base image +# Find resize script +RESIZE=`which resize-part-image || which uec-resize-image` +if [ -z "$RESIZE" ]; then + echo "resize tool from cloud-utils not found" + exit 1 +fi # Get the UEC image UEC_NAME=$DIST_NAME-server-cloudimg-amd64 -if [ ! -e $CACHEDIR/$UEC_NAME-disk1.img ]; then - mkdir -p $CACHEDIR - (cd $CACHEDIR && wget -N http://uec-images.ubuntu.com/$DIST_NAME/current/$UEC_NAME-disk1.img) -fi - -if [ "$FORMAT" = "qcow2" ]; then - # Just copy image - cp -p $CACHEDIR/$UEC_NAME-disk1.img $IMG_FILE_TMP -else - # Convert image - qemu-img convert -O $QFORMAT $CACHEDIR/$UEC_NAME-disk1.img $IMG_FILE_TMP +if [ ! -d $CACHEDIR/$DIST_NAME ]; then + mkdir -p $CACHEDIR/$DIST_NAME fi - -# Resize the image if necessary -if [ $ROOTSIZE -gt 2000 ]; then - # Resize the container - qemu-img resize $IMG_FILE_TMP +$((ROOTSIZE - 2000))M +if [ ! -e $CACHEDIR/$DIST_NAME/$UEC_NAME.tar.gz ]; then + (cd $CACHEDIR/$DIST_NAME && wget -N http://uec-images.ubuntu.com/$DIST_NAME/current/$UEC_NAME.tar.gz) + (cd $CACHEDIR/$DIST_NAME && tar Sxvzf $UEC_NAME.tar.gz) fi -# Finds the next available NBD device -# Exits script if error connecting or none free -# map_nbd image -# returns full nbd device path -function map_nbd { - for i in `seq 0 15`; do - if [ ! -e /sys/block/nbd$i/pid ]; then - NBD=/dev/nbd$i - # Connect to nbd and wait till it is ready - qemu-nbd -c $NBD $1 - if ! timeout 60 sh -c "while ! [ -e ${NBD}p1 ]; do sleep 1; done"; then - echo "Couldn't connect $NBD" - exit 1 - fi - break - fi - done - if [ -z "$NBD" ]; then - echo "No free NBD slots" - exit 1 - fi - echo $NBD -} +$RESIZE $CACHEDIR/$DIST_NAME/$UEC_NAME.img ${ROOTSIZE} $IMG_FILE_TMP +mv $IMG_FILE_TMP $IMG_FILE -# Set up nbd -modprobe nbd max_part=63 -NBD=`map_nbd $IMG_FILE_TMP` - -# Resize partition 1 to full size of the disk image -echo "d -n -p -1 -2 - -t -83 -a -1 -w -" | fdisk $NBD -e2fsck -f -p ${NBD}p1 -resize2fs ${NBD}p1 - -# Do some preliminary installs -MNTDIR=`mktemp -d mntXXXXXXXX` -mount -t ext4 ${NBD}p1 $MNTDIR - -# Install our required packages -cp -p files/sources.list $MNTDIR/etc/apt/sources.list -sed -e "s,%DIST%,$DIST_NAME,g" -i $MNTDIR/etc/apt/sources.list -cp -p /etc/resolv.conf $MNTDIR/etc/resolv.conf -chroot $MNTDIR apt-get update -chroot $MNTDIR apt-get install -y $MIN_PKGS -rm -f $MNTDIR/etc/resolv.conf - -umount $MNTDIR -rmdir $MNTDIR -qemu-nbd -d $NBD -NBD="" +# Copy kernel to destination +if [ -n "$KERNEL" ]; then + cp -p $CACHEDIR/$DIST_NAME/*-vmlinuz-virtual $KERNEL +fi -mv $IMG_FILE_TMP $IMG_FILE +trap - SIGHUP SIGINT SIGTERM SIGQUIT EXIT diff --git a/tools/info.sh b/tools/info.sh new file mode 100755 index 00000000..ef1f3380 --- /dev/null +++ b/tools/info.sh @@ -0,0 +1,159 @@ +#!/usr/bin/env bash + +# **info.sh** + +# Produce a report on the state of devstack installs +# +# Output fields are separated with '|' chars +# Output types are git,localrc,os,pip,pkg: +# +# git||[] +# localtc|= +# os|= +# pip|| +# pkg|| + +function usage { + echo "$0 - Report on the devstack configuration" + echo "" + echo "Usage: $0" + exit 1 +} + +if [ "$1" = "-h" ]; then + usage +fi + +# Keep track of the current directory +TOOLS_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $TOOLS_DIR/..; pwd) +cd $TOP_DIR + +# Import common functions +source $TOP_DIR/functions + +# Source params +source $TOP_DIR/stackrc + +DEST=${DEST:-/opt/stack} +FILES=$TOP_DIR/files +if [[ ! -d $FILES ]]; then + echo "ERROR: missing devstack/files - did you grab more than just stack.sh?" + exit 1 +fi + + +# OS +# -- + +# Determine what OS we're using +GetDistro + +echo "os|distro=$DISTRO" +echo "os|vendor=$os_VENDOR" +echo "os|release=$os_RELEASE" +if [ -n "$os_UPDATE" ]; then + echo "os|version=$os_UPDATE" +fi + + +# Repos +# ----- + +# git_report +function git_report() { + local dir=$1 + local proj ref branch head + if [[ -d $dir/.git ]]; then + pushd $dir >/dev/null + proj=$(basename $dir) + ref=$(git symbolic-ref HEAD) + branch=${ref##refs/heads/} + head=$(git show-branch --sha1-name $branch | cut -d' ' -f1) + echo "git|${proj}|${branch}${head}" + popd >/dev/null + fi +} + +for i in $DEST/*; do + if [[ -d $i ]]; then + git_report $i + fi +done + + +# Packages +# -------- + +# - We are going to check packages only for the services needed. +# - We are parsing the packages files and detecting metadatas. + +if is_ubuntu; then + PKG_DIR=$FILES/apts +elif is_fedora; then + PKG_DIR=$FILES/rpms +elif is_suse; then + PKG_DIR=$FILES/rpms-suse +else + exit_distro_not_supported "list of packages" +fi + +for p in $(get_packages $PKG_DIR); do + if [[ "$os_PACKAGE" = "deb" ]]; then + ver=$(dpkg -s $p 2>/dev/null | grep '^Version: ' | cut -d' ' -f2) + elif [[ "$os_PACKAGE" = "rpm" ]]; then + ver=$(rpm -q --queryformat "%{VERSION}-%{RELEASE}\n" $p) + else + exit_distro_not_supported "finding version of a package" + fi + echo "pkg|${p}|${ver}" +done + + +# Pips +# ---- + +CMD_PIP=$(get_pip_command) + +# Pip tells us what is currently installed +FREEZE_FILE=$(mktemp --tmpdir freeze.XXXXXX) +$CMD_PIP freeze >$FREEZE_FILE 2>/dev/null + +# Loop through our requirements and look for matches +while read line; do + if [[ -n "$line" ]]; then + if [[ "$line" =~ \+(.*)@(.*)#egg=(.*) ]]; then + # Handle URLs + p=${BASH_REMATCH[1]} + ver=${BASH_REMATCH[2]} + elif [[ "$line" =~ (.*)[=\<\>]=(.*) ]]; then + # Normal pip packages + p=${BASH_REMATCH[1]} + ver=${BASH_REMATCH[2]} + else + # Unhandled format in freeze file + #echo "unknown: $p" + continue + fi + echo "pip|${p}|${ver}" + else + # No match in freeze file + #echo "unknown: $p" + continue + fi +done <$FREEZE_FILE + +rm $FREEZE_FILE + + +# localrc +# ------- + +# Dump localrc with 'localrc|' prepended and comments and passwords left out +if [[ -r $TOP_DIR/localrc ]]; then + sed -e ' + /PASSWORD/d; + /^#/d; + s/^/localrc\|/; + ' $TOP_DIR/localrc +fi diff --git a/tools/install_openvpn.sh b/tools/install_openvpn.sh old mode 100644 new mode 100755 index a3a2346f..2f52aa14 --- a/tools/install_openvpn.sh +++ b/tools/install_openvpn.sh @@ -1,5 +1,8 @@ #!/bin/bash -# install_openvpn.sh - Install OpenVPN and generate required certificates + +# **install_openvpn.sh** + +# Install OpenVPN and generate required certificates # # install_openvpn.sh --client name # install_openvpn.sh --server [name] @@ -10,18 +13,42 @@ # --server mode configures the host with a running OpenVPN server instance # --client mode creates a tarball of a client configuration for this server +# Get config file +if [ -e localrc ]; then + . localrc +fi +if [ -e vpnrc ]; then + . vpnrc +fi + +# Do some IP manipulation +function cidr2netmask() { + set -- $(( 5 - ($1 / 8) )) 255 255 255 255 $(( (255 << (8 - ($1 % 8))) & 255 )) 0 0 0 + if [[ $1 -gt 1 ]]; then + shift $1 + else + shift + fi + echo ${1-0}.${2-0}.${3-0}.${4-0} +} + +FIXED_NET=`echo $FIXED_RANGE | cut -d'/' -f1` +FIXED_CIDR=`echo $FIXED_RANGE | cut -d'/' -f2` +FIXED_MASK=`cidr2netmask $FIXED_CIDR` + # VPN Config VPN_SERVER=${VPN_SERVER:-`ifconfig eth0 | awk "/inet addr:/ { print \$2 }" | cut -d: -f2`} # 50.56.12.212 VPN_PROTO=${VPN_PROTO:-tcp} VPN_PORT=${VPN_PORT:-6081} -VPN_DEV=${VPN_DEV:-tun} -VPN_CLIENT_NET=${VPN_CLIENT_NET:-172.16.28.0} -VPN_CLIENT_MASK=${VPN_CLIENT_MASK:-255.255.255.0} -VPN_LOCAL_NET=${VPN_LOCAL_NET:-10.0.0.0} -VPN_LOCAL_MASK=${VPN_LOCAL_MASK:-255.255.0.0} +VPN_DEV=${VPN_DEV:-tap0} +VPN_BRIDGE=${VPN_BRIDGE:-br100} +VPN_BRIDGE_IF=${VPN_BRIDGE_IF:-$FLAT_INTERFACE} +VPN_CLIENT_NET=${VPN_CLIENT_NET:-$FIXED_NET} +VPN_CLIENT_MASK=${VPN_CLIENT_MASK:-$FIXED_MASK} +VPN_CLIENT_DHCP="${VPN_CLIENT_DHCP:-net.1 net.254}" VPN_DIR=/etc/openvpn -CA_DIR=/etc/openvpn/easy-rsa +CA_DIR=$VPN_DIR/easy-rsa usage() { echo "$0 - OpenVPN install and certificate generation" @@ -39,14 +66,24 @@ if [ -z $1 ]; then fi # Install OpenVPN -if [ ! -x `which openvpn` ]; then +VPN_EXEC=`which openvpn` +if [ -z "$VPN_EXEC" -o ! -x "$VPN_EXEC" ]; then apt-get install -y openvpn bridge-utils fi if [ ! -d $CA_DIR ]; then cp -pR /usr/share/doc/openvpn/examples/easy-rsa/2.0/ $CA_DIR fi -OPWD=`pwd` +# Keep track of the current directory +TOOLS_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $TOOLS_DIR/.. && pwd) + +WEB_DIR=$TOP_DIR/../vpn +if [[ ! -d $WEB_DIR ]]; then + mkdir -p $WEB_DIR +fi +WEB_DIR=$(cd $TOP_DIR/../vpn && pwd) + cd $CA_DIR source ./vars @@ -73,21 +110,51 @@ do_server() { (cd $CA_DIR/keys; cp $NAME.crt $NAME.key ca.crt dh1024.pem ta.key $VPN_DIR ) + cat >$VPN_DIR/br-up <$VPN_DIR/br-down <$VPN_DIR/$NAME.conf </dev/null || echo "0") +DELTA=$(($NOW - $LAST_RUN)) +if [[ $DELTA -lt $PREREQ_RERUN_SECONDS && -z "$FORCE_PREREQ" ]]; then + echo "Re-run time has not expired ($(($PREREQ_RERUN_SECONDS - $DELTA)) seconds remaining); exiting..." + return 0 +fi + +# Make sure the proxy config is visible to sub-processes +export_proxy_variables + + +# Install Packages +# ================ + +# Install package requirements +if is_ubuntu; then + install_package $(get_packages $FILES/apts) +elif is_fedora; then + install_package $(get_packages $FILES/rpms) +elif is_suse; then + install_package $(get_packages $FILES/rpms-suse) +else + exit_distro_not_supported "list of packages" +fi + +if [[ -n "$SYSLOG" && "$SYSLOG" != "False" ]]; then + if is_ubuntu || is_fedora; then + install_package rsyslog-relp + elif is_suse; then + install_package rsyslog-module-relp + else + exit_distro_not_supported "rsyslog-relp installation" + fi +fi + + +# Mark end of run +# --------------- + +date "+%s" >$PREREQ_RERUN_MARKER +date >>$PREREQ_RERUN_MARKER diff --git a/tools/jenkins/README.md b/tools/jenkins/README.md new file mode 100644 index 00000000..371017db --- /dev/null +++ b/tools/jenkins/README.md @@ -0,0 +1,38 @@ +Getting Started With Jenkins and Devstack +========================================= +This little corner of devstack is to show how to get an Openstack jenkins +environment up and running quickly, using the rcb configuration methodology. + + +To create a jenkins server +-------------------------- + + cd tools/jenkins/jenkins_home + ./build_jenkins.sh + +This will create a jenkins environment configured with sample test scripts that run against xen and kvm. + +Configuring XS +-------------- +In order to make the tests for XS work, you must install xs 5.6 on a separate machine, +and install the the jenkins public key on that server. You then need to create the +/var/lib/jenkins/xenrc on your jenkins server like so: + + MYSQL_PASSWORD=secrete + SERVICE_TOKEN=secrete + ADMIN_PASSWORD=secrete + RABBIT_PASSWORD=secrete + # This is the password for your guest (for both stack and root users) + GUEST_PASSWORD=secrete + # Do not download the usual images yet! + IMAGE_URLS="" + FLOATING_RANGE=192.168.1.224/28 + VIRT_DRIVER=xenserver + # Explicitly set multi-host + MULTI_HOST=1 + # Give extra time for boot + ACTIVE_TIMEOUT=45 + # IMPORTANT: This is the ip of your xenserver + XEN_IP=10.5.5.1 + # IMPORTANT: The following must be set to your dom0 root password! + XENAPI_PASSWORD='MY_XEN_ROOT_PW' diff --git a/tools/jenkins/adapters/euca.sh b/tools/jenkins/adapters/euca.sh new file mode 100755 index 00000000..b49ce9f2 --- /dev/null +++ b/tools/jenkins/adapters/euca.sh @@ -0,0 +1,8 @@ +#!/bin/bash +# Echo commands, exit on error +set -o xtrace +set -o errexit + +TOP_DIR=$(cd ../../.. && pwd) +HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2` +ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises && ./euca.sh' diff --git a/tools/jenkins/adapters/floating_ips.sh b/tools/jenkins/adapters/floating_ips.sh new file mode 100755 index 00000000..a97f9357 --- /dev/null +++ b/tools/jenkins/adapters/floating_ips.sh @@ -0,0 +1,8 @@ +#!/bin/bash +# Echo commands, exit on error +set -o xtrace +set -o errexit + +TOP_DIR=$(cd ../../.. && pwd) +HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2` +ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises && ./floating_ips.sh' diff --git a/tools/jenkins/adapters/swift.sh b/tools/jenkins/adapters/swift.sh new file mode 100755 index 00000000..c1362ee4 --- /dev/null +++ b/tools/jenkins/adapters/swift.sh @@ -0,0 +1,8 @@ +#!/bin/bash +# Echo commands, exit on error +set -o xtrace +set -o errexit + +TOP_DIR=$(cd ../../.. && pwd) +HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2` +ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises && ./swift.sh' diff --git a/tools/jenkins/adapters/volumes.sh b/tools/jenkins/adapters/volumes.sh new file mode 100755 index 00000000..ec292097 --- /dev/null +++ b/tools/jenkins/adapters/volumes.sh @@ -0,0 +1,8 @@ +#!/bin/bash +# Echo commands, exit on error +set -o xtrace +set -o errexit + +TOP_DIR=$(cd ../../.. && pwd) +HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2` +ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises && ./volumes.sh' diff --git a/tools/jenkins/build_configuration.sh b/tools/jenkins/build_configuration.sh new file mode 100755 index 00000000..e295ef20 --- /dev/null +++ b/tools/jenkins/build_configuration.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +EXECUTOR_NUMBER=$1 +CONFIGURATION=$2 +ADAPTER=$3 +RC=$4 + +function usage() { + echo "Usage: $0 - Build a configuration" + echo "" + echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER] [RC (optional)]" + exit 1 +} + +# Validate inputs +if [[ "$EXECUTOR_NUMBER" = "" || "$CONFIGURATION" = "" || "$ADAPTER" = "" ]]; then + usage +fi + +# Execute configuration script +cd configurations && ./$CONFIGURATION.sh $EXECUTOR_NUMBER $CONFIGURATION $ADAPTER "$RC" diff --git a/tools/jenkins/configurations/kvm.sh b/tools/jenkins/configurations/kvm.sh new file mode 100755 index 00000000..d9a160ad --- /dev/null +++ b/tools/jenkins/configurations/kvm.sh @@ -0,0 +1,64 @@ +#!/bin/bash + +# exit on error to stop unexpected errors +set -o errexit +set -o xtrace + +EXECUTOR_NUMBER=$1 +CONFIGURATION=$2 +ADAPTER=$3 +RC=$4 + +function usage() { + echo "Usage: $0 - Build a test configuration" + echo "" + echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER] [RC (optional)]" + exit 1 +} + +# Validate inputs +if [[ "$EXECUTOR_NUMBER" = "" || "$CONFIGURATION" = "" || "$ADAPTER" = "" ]]; then + usage +fi + +# This directory +CUR_DIR=$(cd $(dirname "$0") && pwd) + +# devstack directory +cd ../../.. +TOP_DIR=$(pwd) + +# Deps +apt-get install -y --force-yes libvirt-bin || true + +# Name test instance based on executor +BASE_NAME=executor-`printf "%02d" $EXECUTOR_NUMBER` +GUEST_NAME=$BASE_NAME.$ADAPTER +virsh list | grep $BASE_NAME | cut -d " " -f1 | xargs -n 1 virsh destroy || true +virsh net-list | grep $BASE_NAME | cut -d " " -f1 | xargs -n 1 virsh net-destroy || true + +# Configure localrc +cat <localrc +RECLONE=yes +GUEST_NETWORK=$EXECUTOR_NUMBER +GUEST_NAME=$GUEST_NAME +FLOATING_RANGE=192.168.$EXECUTOR_NUMBER.128/27 +GUEST_CORES=1 +GUEST_RAM=12574720 +MYSQL_PASSWORD=chicken +RABBIT_PASSWORD=chicken +SERVICE_TOKEN=chicken +SERVICE_PASSWORD=chicken +ADMIN_PASSWORD=chicken +USERNAME=admin +TENANT=admin +NET_NAME=$BASE_NAME +ACTIVE_TIMEOUT=45 +BOOT_TIMEOUT=45 +$RC +EOF +cd tools +sudo ./build_uec.sh + +# Make the address of the instances available to test runners +echo HEAD=`cat /var/lib/libvirt/dnsmasq/$BASE_NAME.leases | cut -d " " -f3` > $TOP_DIR/addresses diff --git a/tools/jenkins/configurations/xs.sh b/tools/jenkins/configurations/xs.sh new file mode 100755 index 00000000..864f9491 --- /dev/null +++ b/tools/jenkins/configurations/xs.sh @@ -0,0 +1,53 @@ +#!/bin/bash +set -o errexit +set -o xtrace + + +EXECUTOR_NUMBER=$1 +CONFIGURATION=$2 +ADAPTER=$3 +RC=$4 + +function usage() { + echo "Usage: $0 - Build a test configuration" + echo "" + echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER] [RC (optional)]" + exit 1 +} + +# Validate inputs +if [[ "$EXECUTOR_NUMBER" = "" || "$CONFIGURATION" = "" || "$ADAPTER" = "" ]]; then + usage +fi + +# Configuration of xenrc +XENRC=/var/lib/jenkins/xenrc +if [ ! -e $XENRC ]; then + echo "/var/lib/jenkins/xenrc is not present! See README.md" + exit 1 +fi + +# Move to top of devstack +cd ../../.. + +# Use xenrc as the start of our localrc +cp $XENRC localrc + +# Set the PUB_IP +PUB_IP=192.168.1.1$EXECUTOR_NUMBER +echo "PUB_IP=$PUB_IP" >> localrc + +# Overrides +echo "$RC" >> localrc + +# Source localrc +. localrc + +# Make host ip available to tester +echo "HEAD=$PUB_IP" > addresses + +# Build configuration +REMOTE_DEVSTACK=/root/devstack +ssh root@$XEN_IP "rm -rf $REMOTE_DEVSTACK" +scp -pr . root@$XEN_IP:$REMOTE_DEVSTACK +ssh root@$XEN_IP "cd $REMOTE_DEVSTACK/tools/xen && ./build_domU.sh" diff --git a/tools/jenkins/jenkins_home/.gitignore b/tools/jenkins/jenkins_home/.gitignore new file mode 100644 index 00000000..d831d01c --- /dev/null +++ b/tools/jenkins/jenkins_home/.gitignore @@ -0,0 +1,3 @@ +builds +workspace +*.sw* diff --git a/tools/jenkins/jenkins_home/build_jenkins.sh b/tools/jenkins/jenkins_home/build_jenkins.sh new file mode 100755 index 00000000..e0e774ee --- /dev/null +++ b/tools/jenkins/jenkins_home/build_jenkins.sh @@ -0,0 +1,108 @@ +#!/bin/bash + +# Echo commands, exit on error +set -o xtrace +set -o errexit + +# Make sure only root can run our script +if [[ $EUID -ne 0 ]]; then + echo "This script must be run as root" + exit 1 +fi + +# This directory +CUR_DIR=$(cd $(dirname "$0") && pwd) + +# Configure trunk jenkins! +echo "deb http://pkg.jenkins-ci.org/debian binary/" > /etc/apt/sources.list.d/jenkins.list +wget -q -O - http://pkg.jenkins-ci.org/debian/jenkins-ci.org.key | sudo apt-key add - +apt-get update + + +# Clean out old jenkins - useful if you are having issues upgrading +CLEAN_JENKINS=${CLEAN_JENKINS:-no} +if [ "$CLEAN_JENKINS" = "yes" ]; then + apt-get remove jenkins jenkins-common +fi + +# Install software +DEPS="jenkins cloud-utils" +apt-get install -y --force-yes $DEPS + +# Install jenkins +if [ ! -e /var/lib/jenkins ]; then + echo "Jenkins installation failed" + exit 1 +fi + +# Make sure user has configured a jenkins ssh pubkey +if [ ! -e /var/lib/jenkins/.ssh/id_rsa.pub ]; then + echo "Public key for jenkins is missing. This is used to ssh into your instances." + echo "Please run "su -c ssh-keygen jenkins" before proceeding" + exit 1 +fi + +# Setup sudo +JENKINS_SUDO=/etc/sudoers.d/jenkins +cat > $JENKINS_SUDO < $JENKINS_GITCONF < + + 4 + Jenkins + jenkins@rcb.me + +EOF + +# Add build numbers +JOBS=`ls jobs` +for job in ${JOBS// / }; do + if [ ! -e jobs/$job/nextBuildNumber ]; then + echo 1 > jobs/$job/nextBuildNumber + fi +done + +# Set ownership to jenkins +chown -R jenkins $CUR_DIR + +# Make sure this directory is accessible to jenkins +if ! su -c "ls $CUR_DIR" jenkins; then + echo "Your devstack directory is not accessible by jenkins." + echo "There is a decent chance you are trying to run this from a directory in /root." + echo "If so, try moving devstack elsewhere (eg. /opt/devstack)." + exit 1 +fi + +# Move aside old jobs, if present +if [ ! -h /var/lib/jenkins/jobs ]; then + echo "Installing jobs symlink" + if [ -d /var/lib/jenkins/jobs ]; then + mv /var/lib/jenkins/jobs /var/lib/jenkins/jobs.old + fi +fi + +# Set up jobs symlink +rm -f /var/lib/jenkins/jobs +ln -s $CUR_DIR/jobs /var/lib/jenkins/jobs + +# List of plugins +PLUGINS=http://hudson-ci.org/downloads/plugins/build-timeout/1.6/build-timeout.hpi,http://mirrors.jenkins-ci.org/plugins/git/1.1.12/git.hpi,http://hudson-ci.org/downloads/plugins/global-build-stats/1.2/global-build-stats.hpi,http://hudson-ci.org/downloads/plugins/greenballs/1.10/greenballs.hpi,http://download.hudson-labs.org/plugins/console-column-plugin/1.0/console-column-plugin.hpi + +# Configure plugins +for plugin in ${PLUGINS//,/ }; do + name=`basename $plugin` + dest=/var/lib/jenkins/plugins/$name + if [ ! -e $dest ]; then + curl -L $plugin -o $dest + fi +done + +# Restart jenkins +/etc/init.d/jenkins stop || true +/etc/init.d/jenkins start diff --git a/tools/jenkins/jenkins_home/clean.sh b/tools/jenkins/jenkins_home/clean.sh new file mode 100755 index 00000000..eb03022a --- /dev/null +++ b/tools/jenkins/jenkins_home/clean.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# This script is not yet for general consumption. + +set -o errexit + +if [ ! "$FORCE" = "yes" ]; then + echo "FORCE not set to 'yes'. Make sure this is something you really want to do. Exiting." + exit 1 +fi + +virsh list | cut -d " " -f1 | grep -v "-" | egrep -e "[0-9]" | xargs -n 1 virsh destroy || true +virsh net-list | grep active | cut -d " " -f1 | xargs -n 1 virsh net-destroy || true +killall dnsmasq || true +if [ "$CLEAN" = "yes" ]; then + rm -rf jobs +fi +rm /var/lib/jenkins/jobs +git checkout -f +git fetch +git merge origin/jenkins +./build_jenkins.sh diff --git a/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/config.xml b/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/config.xml new file mode 100644 index 00000000..94c51f51 --- /dev/null +++ b/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/config.xml @@ -0,0 +1,82 @@ + + + + + false + + + + + RC + + + + + + + + 2 + + + origin + +refs/heads/*:refs/remotes/origin/* + git://github.com/cloudbuilders/devstack.git + + + + + master + + + false + false + false + false + false + false + false + + Default + + + + + + + false + + + true + false + false + false + + false + + + ADAPTER + + euca + floating_ips + + + + + + sed -i 's/) 2>&1 | tee "${LOGFILE}"/)/' stack.sh + + + set -o errexit +cd tools/jenkins +sudo ./build_configuration.sh $EXECUTOR_NUMBER kvm $ADAPTER "$RC" + + + set -o errexit +cd tools/jenkins +./run_test.sh $EXECUTOR_NUMBER $ADAPTER $RC "$RC" + + + + + false + diff --git a/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/configurations/axis-ADAPTER/euca/config.xml b/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/configurations/axis-ADAPTER/euca/config.xml new file mode 100644 index 00000000..0be70a5c --- /dev/null +++ b/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/configurations/axis-ADAPTER/euca/config.xml @@ -0,0 +1,15 @@ + + + false + + + false + false + false + false + + false + + + + \ No newline at end of file diff --git a/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/configurations/axis-ADAPTER/floatingips/config.xml b/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/configurations/axis-ADAPTER/floatingips/config.xml new file mode 100644 index 00000000..0be70a5c --- /dev/null +++ b/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/configurations/axis-ADAPTER/floatingips/config.xml @@ -0,0 +1,15 @@ + + + false + + + false + false + false + false + + false + + + + \ No newline at end of file diff --git a/tools/jenkins/jenkins_home/jobs/diablo-xs_ha/config.xml b/tools/jenkins/jenkins_home/jobs/diablo-xs_ha/config.xml new file mode 100644 index 00000000..49a57f04 --- /dev/null +++ b/tools/jenkins/jenkins_home/jobs/diablo-xs_ha/config.xml @@ -0,0 +1,88 @@ + + + + In order for this to work, you must create a /var/lib/jenkins/xenrc file as described in README.md + false + + + + + RC + + + + + + + + 2 + + + origin + +refs/heads/*:refs/remotes/origin/* + git://github.com/cloudbuilders/devstack.git + + + + + master + + + false + false + false + false + false + false + false + + Default + + + + + + + false + + + true + false + false + false + + false + + + ADAPTER + + euca + floating_ips + + + + + + sed -i 's/) 2>&1 | tee "${LOGFILE}"/)/' stack.sh + + + set -o errexit +cd tools/jenkins +sudo ./build_configuration.sh $EXECUTOR_NUMBER xs $ADAPTER "$RC" + + + #!/bin/bash +set -o errexit +set -o xtrace + +. localrc + +# Unlike kvm, ssh to the xen host to run tests, in case the test instance is launch with a host only network +ssh root@$XEN_IP "cd devstack && . localrc && cd tools/jenkins && ./run_test.sh $EXECUTOR_NUMBER $ADAPTER '$RC'" + + + + + + true + diff --git a/tools/jenkins/jenkins_home/print_summary.py b/tools/jenkins/jenkins_home/print_summary.py new file mode 100755 index 00000000..ea943e1c --- /dev/null +++ b/tools/jenkins/jenkins_home/print_summary.py @@ -0,0 +1,45 @@ +#!/usr/bin/python +import urllib +import json +import sys + + +def print_usage(): + print ("Usage: %s [jenkins_url (eg. http://50.56.12.202:8080/)]" + % sys.argv[0]) + sys.exit() + + +def fetch_blob(url): + return json.loads(urllib.urlopen(url + '/api/json').read()) + + +if len(sys.argv) < 2: + print_usage() + +BASE_URL = sys.argv[1] + +root = fetch_blob(BASE_URL) +results = {} +for job_url in root['jobs']: + job = fetch_blob(job_url['url']) + if job.get('activeConfigurations'): + (tag, name) = job['name'].split('-') + if not results.get(tag): + results[tag] = {} + if not results[tag].get(name): + results[tag][name] = [] + + for config_url in job['activeConfigurations']: + config = fetch_blob(config_url['url']) + + log_url = '' + if config.get('lastBuild'): + log_url = config['lastBuild']['url'] + 'console' + + results[tag][name].append({'test': config['displayName'], + 'status': config['color'], + 'logUrl': log_url, + 'healthReport': config['healthReport']}) + +print json.dumps(results) diff --git a/tools/jenkins/run_test.sh b/tools/jenkins/run_test.sh new file mode 100755 index 00000000..46495637 --- /dev/null +++ b/tools/jenkins/run_test.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +EXECUTOR_NUMBER=$1 +ADAPTER=$2 +RC=$3 + +function usage() { + echo "Usage: $0 - Run a test" + echo "" + echo "$0 [EXECUTOR_NUMBER] [ADAPTER] [RC (optional)]" + exit 1 +} + +# Validate inputs +if [[ "$EXECUTOR_NUMBER" = "" || "$ADAPTER" = "" ]]; then + usage +fi + +# Execute configuration script +cd adapters && ./$ADAPTER.sh $EXECUTOR_NUMBER $ADAPTER "$RC" diff --git a/tools/lxc_network_hostonlyplusnat.sh b/tools/lxc_network_hostonlyplusnat.sh deleted file mode 100755 index 4e29ed70..00000000 --- a/tools/lxc_network_hostonlyplusnat.sh +++ /dev/null @@ -1,93 +0,0 @@ -#!/bin/bash - -# Print some usage info -function usage { - echo "Usage: $0 [OPTION] [host_ip]" - echo "Set up temporary networking for LXC" - echo "" - echo " -n, --dry-run Just print the commands that would execute." - echo " -h, --help Print this usage message." - echo "" - exit -} - -# Allow passing the ip address on the command line. -function process_option { - case "$1" in - -h|--help) usage;; - -n|--dry-run) dry_run=1;; - *) host_ip="$1" - esac -} - -# Set up some defaults -host_ip= -dry_run=0 -bridge=br0 -DRIER= - -# Process the args -for arg in "$@"; do - process_option $arg -done - -if [ $dry_run ]; then - DRIER=echo -fi - -if [ "$UID" -ne "0" ]; then - echo "This script must be run with root privileges." - exit 1 -fi - -# Check for bridge-utils. -BRCTL=`which brctl` -if [ ! -x "$BRCTL" ]; then - echo "This script requires you to install bridge-utils." - echo "Try: sudo apt-get install bridge-utils." - exit 1 -fi - -# Scare off the nubs. -echo "=====================================================" -echo -echo "WARNING" -echo -echo "This script will modify your current network setup," -echo "this can be a scary thing and it is recommended that" -echo "you have something equivalent to physical access to" -echo "this machine before continuing in case your network" -echo "gets all funky." -echo -echo "If you don't want to continue, hit CTRL-C now." - -if [ -z "$host_ip" ]; -then - echo "Otherwise, please type in your host's ip address and" - echo "hit enter." - echo - echo "=====================================================" - read host_ip -else - echo "Otherwise hit enter." - echo - echo "=====================================================" - read accept -fi - - -# Add a bridge interface, this will choke if there is already -# a bridge named $bridge -$DRIER $BRCTL addbr $bridge -$DRIER ip addr add 192.168.1.1/24 dev $bridge -if [ $dry_run ]; then - echo "echo 1 > /proc/sys/net/ipv4/ip_forward" -else - echo 1 > /proc/sys/net/ipv4/ip_forward -fi -$DRIER ifconfig $bridge up - -# Set up the NAT for the instances -$DRIER iptables -t nat -A POSTROUTING -s 192.168.1.0/24 -j SNAT --to-source $host_ip -$DRIER iptables -I FORWARD -s 192.168.1.0/24 -j ACCEPT - diff --git a/tools/make_cert.sh b/tools/make_cert.sh new file mode 100755 index 00000000..cb93e57c --- /dev/null +++ b/tools/make_cert.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# **make_cert.sh** + +# Create a CA hierarchy (if necessary) and server certificate +# +# This mimics the CA structure that DevStack sets up when ``tls_proxy`` is enabled +# but in the curent directory unless ``DATA_DIR`` is set + +ENABLE_TLS=True +DATA_DIR=${DATA_DIR:-`pwd`/ca-data} + +ROOT_CA_DIR=$DATA_DIR/root +INT_CA_DIR=$DATA_DIR/int + +# Import common functions +source $TOP_DIR/functions + +# Import TLS functions +source lib/tls + +function usage { + echo "$0 - Create CA and/or certs" + echo "" + echo "Usage: $0 commonName [orgUnit]" + exit 1 +} + +CN=$1 +if [ -z "$CN" ]]; then + usage +fi +ORG_UNIT_NAME=${2:-$ORG_UNIT_NAME} + +# Useful on OS/X +if [[ `uname -s` == 'Darwin' && -d /usr/local/Cellar/openssl ]]; then + # set up for brew-installed modern OpenSSL + OPENSSL_CONF=/usr/local/etc/openssl/openssl.cnf + OPENSSL=/usr/local/Cellar/openssl/*/bin/openssl +fi + +DEVSTACK_CERT_NAME=$CN +DEVSTACK_HOSTNAME=$CN +DEVSTACK_CERT=$DATA_DIR/$DEVSTACK_CERT_NAME.pem + +# Make sure the CA is set up +configure_CA +init_CA + +# Create the server cert +make_cert $INT_CA_DIR $DEVSTACK_CERT_NAME $DEVSTACK_HOSTNAME + +# Create a cert bundle +cat $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/cacert.pem >$DEVSTACK_CERT + diff --git a/tools/make_image.sh b/tools/make_image.sh deleted file mode 100755 index a69f5e33..00000000 --- a/tools/make_image.sh +++ /dev/null @@ -1,187 +0,0 @@ -#!/bin/bash -# make_image.sh - Create Ubuntu images in various formats -# -# Supported formats: qcow (kvm), vmdk (vmserver), vdi (vbox), vhd (vpc), raw -# -# Requires sudo to root - -ROOTSIZE=${ROOTSIZE:-8192} -SWAPSIZE=${SWAPSIZE:-1024} -MIN_PKGS=${MIN_PKGS:-"apt-utils gpgv openssh-server"} - -usage() { - echo "Usage: $0 - Create Ubuntu images" - echo "" - echo "$0 [-m] [-r rootsize] [-s swapsize] release format" - echo "$0 -C [-m] release chrootdir" - echo "$0 -I [-r rootsize] [-s swapsize] chrootdir format" - echo "" - echo "-C - Create the initial chroot dir" - echo "-I - Create the final image from a chroot" - echo "-m - minimal installation" - echo "-r size - root fs size in MB" - echo "-s size - swap fs size in MB" - echo "release - Ubuntu release: jaunty - oneric" - echo "format - image format: qcow2, vmdk, vdi, vhd, xen, raw, fs" - exit 1 -} - -while getopts CIhmr:s: c; do - case $c in - C) CHROOTONLY=1 - ;; - I) IMAGEONLY=1 - ;; - h) usage - ;; - m) MINIMAL=1 - ;; - r) ROOTSIZE=$OPTARG - ;; - s) SWAPSIZE=$OPTARG - ;; - esac -done -shift `expr $OPTIND - 1` - -if [ ! "$#" -eq "2" -o -n "$CHROOTONLY" -a -n "$IMAGEONLY" ]; then - usage -fi - -# Default args -RELEASE=$1 -FORMAT=$2 -CHROOTDIR="" - -if [ -n "$CHROOTONLY" ]; then - RELEASE=$1 - CHROOTDIR=$2 - FORMAT="pass" -fi - -if [ -n "$IMAGEONLY" ]; then - CHROOTDIR=$1 - FORMAT=$2 - RELEASE="pass" -fi - -# Make sure that we have the proper version of ubuntu -UBUNTU_VERSION=`cat /etc/lsb-release | grep CODENAME | sed 's/.*=//g'` -if [ "$UBUNTU_VERSION" = "natty" -a "$RELEASE" = "oneiric" ]; then - echo "natty installs can't build oneiric images" - exit 1 -fi - -case $FORMAT in - kvm|qcow2) FORMAT=qcow2 - QFORMAT=qcow2 - HYPER=kvm - ;; - vmserver|vmdk) - FORMAT=vmdk - QFORMAT=vmdk - HYPER=vmserver - ;; - vbox|vdi) FORMAT=vdi - QFORMAT=vdi - HYPER=kvm - ;; - vhd|vpc) FORMAT=vhd - QFORMAT=vpc - HYPER=kvm - ;; - xen) FORMAT=raw - QFORMAT=raw - HYPER=xen - ;; - raw) FORMAT=raw - QFORMAT=raw - HYPER=kvm - ;; - pass) ;; - *) echo "Unknown format: $FORMAT" - usage -esac - -case $RELEASE in - oneiric) ;; - natty) ;; - maverick) ;; - lucid) ;; - karmic) ;; - jaunty) ;; - pass) ;; - *) echo "Unknown release: $RELEASE" - usage - ;; -esac - -# Install stuff if necessary -if [ -z `which vmbuilder` ]; then - sudo apt-get install -y ubuntu-vm-builder -fi - -if [ -n "$CHROOTONLY" ]; then - # Build a chroot directory - HYPER=kvm - if [ "$MINIMAL" = 1 ]; then - ARGS="--variant=minbase" - for i in $MIN_PKGS; do - ARGS="$ARGS --addpkg=$i" - done - fi - sudo vmbuilder $HYPER ubuntu $ARGS \ - --suite $RELEASE \ - --only-chroot \ - --chroot-dir=$CHROOTDIR \ - --overwrite \ - --addpkg=$MIN_PKGS \ - - sudo cp -p files/sources.list $CHROOTDIR/etc/apt/sources.list - sed -e "s,%DIST%,$RELEASE,g" -i $CHROOTDIR/etc/apt/sources.list - sudo chroot $CHROOTDIR apt-get update - - exit 0 -fi - -# Build the image -TMPDIR=tmp -TMPDISK=`mktemp imgXXXXXXXX` -SIZE=$[$ROOTSIZE+$SWAPSIZE+1] -dd if=/dev/null of=$TMPDISK bs=1M seek=$SIZE count=1 - -if [ -n "$IMAGEONLY" ]; then - # Build image from chroot - sudo vmbuilder $HYPER ubuntu $ARGS \ - --existing-chroot=$CHROOTDIR \ - --overwrite \ - --rootsize=$ROOTSIZE \ - --swapsize=$SWAPSIZE \ - --tmpfs - \ - --raw=$TMPDISK \ - -else - # Do the whole shebang in one pass - ARGS="--variant=minbase" - for i in $MIN_PKGS; do - ARGS="$ARGS --addpkg=$i" - done - sudo vmbuilder $HYPER ubuntu $ARGS \ - --suite $RELEASE \ - --overwrite \ - --rootsize=$ROOTSIZE \ - --swapsize=$SWAPSIZE \ - --tmpfs - \ - --raw=$TMPDISK \ - -fi - -if [ "$FORMAT" = "raw" ]; then - # Get image - mv $TMPDISK $RELEASE.$FORMAT -else - # Convert image - qemu-img convert -O $QFORMAT $TMPDISK $RELEASE.$FORMAT - rm $TMPDISK -fi -rm -rf ubuntu-$HYPER diff --git a/tools/upload_image.sh b/tools/upload_image.sh index da73f16a..dd21c9f2 100755 --- a/tools/upload_image.sh +++ b/tools/upload_image.sh @@ -1,90 +1,42 @@ -#!/bin/bash -# upload_image.sh - Upload Ubuntu images (create if necessary) in various formats -# Supported formats: qcow (kvm), vmdk (vmserver), vdi (vbox), vhd (vpc) -# Requires sudo to root - -usage() { - echo "$0 - Upload images to OpenStack" +#!/usr/bin/env bash +# upload_image.sh - Retrieve and upload an image into Glance +# +# upload_image.sh +# +# Assumes credentials are set via OS_* environment variables + +function usage { + echo "$0 - Retrieve and upload an image into Glance" + echo "" + echo "Usage: $0 [...]" echo "" - echo "$0 [-h host] [-p port] release format" + echo "Assumes credentials are set via OS_* environment variables" exit 1 } -HOST=${HOST:-localhost} -PORT=${PORT:-9292} -DEST=${DEST:-/opt/stack} - -while getopts h:p: c; do - case $c in - h) HOST=$OPTARG - ;; - p) PORT=$OPTARG - ;; - esac -done -shift `expr $OPTIND - 1` +# Keep track of the current directory +TOOLS_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $TOOLS_DIR/..; pwd) -RELEASE=$1 -FORMAT=$2 +# Import common functions +source $TOP_DIR/functions -case $FORMAT in - kvm|qcow2) FORMAT=qcow2 - TARGET=kvm - ;; - vmserver|vmdk) - FORMAT=vmdk - TARGET=vmserver - ;; - vbox|vdi) TARGET=kvm - FORMAT=vdi - ;; - vhd|vpc) TARGET=kvm - FORMAT=vhd - ;; - *) echo "Unknown format: $FORMAT" - usage -esac +# Import configuration +source $TOP_DIR/openrc "" "" "" "" -case $RELEASE in - natty) ;; - maverick) ;; - lucid) ;; - karmic) ;; - jaunty) ;; - *) if [ ! -r $RELEASE.$FORMAT ]; then - echo "Unknown release: $RELEASE" - usage - fi - ;; -esac +# Find the cache dir +FILES=$TOP_DIR/files -GLANCE=`which glance` -if [ -z "$GLANCE" ]; then - if [ -x "$DEST/glance/bin/glance" ]; then - # Look for stack.sh's install - GLANCE="$DEST/glance/bin/glance" - else - # Install Glance client in $DEST - echo "Glance not found, must install client" - OWD=`pwd` - cd $DEST - sudo apt-get install python-pip python-eventlet python-routes python-greenlet python-argparse python-sqlalchemy python-wsgiref python-pastedeploy python-xattr - sudo pip install kombu - sudo git clone https://github.com/cloudbuilders/glance.git - cd glance - sudo python setup.py develop - cd $OWD - GLANCE=`which glance` - fi +if [[ -z "$1" ]]; then + usage fi -# Create image if it doesn't exist -if [ ! -r $RELEASE.$FORMAT ]; then - DIR=`dirname $0` - echo "$RELEASE.$FORMAT not found, creating..." - $DIR/make_image.sh $RELEASE $FORMAT -fi +# Get a token to authenticate to glance +TOKEN=$(keystone token-get | grep ' id ' | get_field 2) + +# Glance connection info. Note the port must be specified. +GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$GLANCE_HOST:9292} -# Upload the image -echo "Uploading image $RELEASE.$FORMAT to $HOST" -$GLANCE add name=$RELEASE.$FORMAT is_public=true disk_format=$FORMAT --host $HOST --port $PORT <$RELEASE.$FORMAT +for IMAGE in "$*"; do + upload_image $IMAGE $TOKEN +done diff --git a/tools/warm_apts_and_pips.sh b/tools/warm_apts_for_uec.sh similarity index 90% rename from tools/warm_apts_and_pips.sh rename to tools/warm_apts_for_uec.sh index ec7e916c..3c15f52e 100755 --- a/tools/warm_apts_and_pips.sh +++ b/tools/warm_apts_for_uec.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +# **warm_apts_for_uec.sh** + # Echo commands set -o xtrace @@ -30,7 +32,7 @@ fi if [ ! -d files/apts ]; then echo "Please run this script from devstack/tools/" exit 1 -fi +fi # Mount the image STAGING_DIR=/tmp/`echo $1 | sed "s/\//_/g"`.stage @@ -46,8 +48,6 @@ cp /etc/resolv.conf $STAGING_DIR/etc/resolv.conf chroot $STAGING_DIR apt-get update chroot $STAGING_DIR apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1` chroot $STAGING_DIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1` || true -mkdir -p $STAGING_DIR/var/cache/pip -PIP_DOWNLOAD_CACHE=/var/cache/pip chroot $STAGING_DIR pip install `cat files/pips/*` || true # Unmount umount $STAGING_DIR diff --git a/tools/xen/README.md b/tools/xen/README.md index 63350ea7..1cd45cff 100644 --- a/tools/xen/README.md +++ b/tools/xen/README.md @@ -1,44 +1,49 @@ Getting Started With XenServer 5.6 and Devstack =============================================== The purpose of the code in this directory it to help developers bootstrap -a XenServer 5.6 + Openstack development environment. This file gives +a XenServer 5.6 (or greater) + Openstack development environment. This file gives some pointers on how to get started. +Xenserver is a Type 1 hypervisor, so it needs to be installed on bare metal. +The Openstack services are configured to run within a "privileged" virtual +machine on the Xenserver host (called OS domU). The VM uses the XAPI toolstack +to communicate with the host. + Step 1: Install Xenserver ------------------------ -Install XenServer 5.6 on a clean box. You can get XenServer by signing +Install XenServer 5.6+ on a clean box. You can get XenServer by signing up for an account on citrix.com, and then visiting: https://www.citrix.com/English/ss/downloads/details.asp?downloadId=2311504&productId=683148 +For details on installation, see: http://wiki.openstack.org/XenServer/Install + Here are some sample Xenserver network settings for when you are just -getting started (I use settings like this with a lappy + cheap wifi router): +getting started (Settings like this have been used with a laptop + cheap wifi router): * XenServer Host IP: 192.168.1.10 * XenServer Netmask: 255.255.255.0 * XenServer Gateway: 192.168.1.1 * XenServer DNS: 192.168.1.1 -Step 2: Prepare DOM0 -------------------- -At this point, your server is missing some critical software that you will -need to run devstack (like git). Do this to install required software: - - wget --no-check-certificate https://github.com/cloudbuilders/devstack/raw/xen/tools/xen/prepare_dom0.sh - chmod 755 prepare_dom0.sh - ./prepare_dom0.sh +Step 2: Download devstack +-------------------------- +On your XenServer host, run the following commands as root: -This script will also clone devstack in /root/devstack + wget --no-check-certificate https://github.com/openstack-dev/devstack/zipball/master + unzip -o master -d ./devstack + cd devstack/*/ -Step 3: Configure your localrc ------------------------------ +Step 3: Configure your localrc inside the devstack directory +------------------------------------------------------------ Devstack uses a localrc for user-specific configuration. Note that the XENAPI_PASSWORD must be your dom0 root password. Of course, use real passwords if this machine is exposed. - cat > /root/devstack/localrc < ./localrc <> /etc/sysconfig/network -fi - -# Also, enable ip forwarding in rc.local, since the above trick isn't working -if ! grep -q "echo 1 >/proc/sys/net/ipv4/ip_forward" /etc/rc.local; then - echo "echo 1 >/proc/sys/net/ipv4/ip_forward" >> /etc/rc.local -fi - -# Enable ip forwarding at runtime as well -echo 1 > /proc/sys/net/ipv4/ip_forward - -# Directory where we stage the build -STAGING_DIR=$TOP_DIR/stage - -# Option to clean out old stuff -CLEAN=${CLEAN:-0} -if [ "$CLEAN" = "1" ]; then - rm -rf $STAGING_DIR -fi - -# Download our base image. This image is made using prepare_guest.sh -BASE_IMAGE_URL=${BASE_IMAGE_URL:-http://images.ansolabs.com/xen/stage.tgz} -if [ ! -e $STAGING_DIR ]; then - if [ ! -e /tmp/stage.tgz ]; then - wget $BASE_IMAGE_URL -O /tmp/stage.tgz - fi - tar xfz /tmp/stage.tgz - cd $TOP_DIR -fi - -# Free up precious disk space -rm -f /tmp/stage.tgz - -# Make sure we have a stage -if [ ! -d $STAGING_DIR/etc ]; then - echo "Stage is not properly set up!" - exit 1 -fi - -# Directory where our conf files are stored -FILES_DIR=$TOP_DIR/files -TEMPLATES_DIR=$TOP_DIR/templates - -# Directory for supporting script files -SCRIPT_DIR=$TOP_DIR/scripts - -# Version of ubuntu with which we are working -UBUNTU_VERSION=`cat $STAGING_DIR/etc/lsb-release | grep "DISTRIB_CODENAME=" | sed "s/DISTRIB_CODENAME=//"` -KERNEL_VERSION=`ls $STAGING_DIR/boot/vmlinuz* | head -1 | sed "s/.*vmlinuz-//"` - -# Setup fake grub -rm -rf $STAGING_DIR/boot/grub/ -mkdir -p $STAGING_DIR/boot/grub/ -cp $TEMPLATES_DIR/menu.lst.in $STAGING_DIR/boot/grub/menu.lst -sed -e "s,@KERNEL_VERSION@,$KERNEL_VERSION,g" -i $STAGING_DIR/boot/grub/menu.lst - -# Setup fstab, tty, and other system stuff -cp $FILES_DIR/fstab $STAGING_DIR/etc/fstab -cp $FILES_DIR/hvc0.conf $STAGING_DIR/etc/init/ - -# Put the VPX into UTC. -rm -f $STAGING_DIR/etc/localtime - -# Configure dns (use same dns as dom0) -cp /etc/resolv.conf $STAGING_DIR/etc/resolv.conf - -# Copy over devstack -rm -f /tmp/devstack.tar -tar --exclude='stage' --exclude='xen/xvas' --exclude='xen/nova' -cvf /tmp/devstack.tar $TOP_DIR/../../../devstack -cd $STAGING_DIR/opt/stack/ -tar xf /tmp/devstack.tar -cd $TOP_DIR - -# Configure OVA -VDI_SIZE=$(($VDI_MB*1024*1024)) -PRODUCT_BRAND=${PRODUCT_BRAND:-openstack} -PRODUCT_VERSION=${PRODUCT_VERSION:-001} -BUILD_NUMBER=${BUILD_NUMBER:-001} -LABEL="$PRODUCT_BRAND $PRODUCT_VERSION-$BUILD_NUMBER" -OVA=$STAGING_DIR/tmp/ova.xml -cp $TEMPLATES_DIR/ova.xml.in $OVA -sed -e "s,@VDI_SIZE@,$VDI_SIZE,g" -i $OVA -sed -e "s,@PRODUCT_BRAND@,$PRODUCT_BRAND,g" -i $OVA -sed -e "s,@PRODUCT_VERSION@,$PRODUCT_VERSION,g" -i $OVA -sed -e "s,@BUILD_NUMBER@,$BUILD_NUMBER,g" -i $OVA - -# Directory for xvas -XVA_DIR=$TOP_DIR/xvas - -# Create xva dir -mkdir -p $XVA_DIR - -# Clean nova if desired -if [ "$CLEAN" = "1" ]; then - rm -rf $TOP_DIR/nova -fi - -# Checkout nova -if [ ! -d $TOP_DIR/nova ]; then - git clone git://github.com/cloudbuilders/nova.git - git checkout diablo -fi - -# Run devstack on launch -cat <$STAGING_DIR/etc/rc.local -GUEST_PASSWORD=$GUEST_PASSWORD STAGING_DIR=/ DO_TGZ=0 bash /opt/stack/devstack/tools/xen/prepare_guest.sh -su -c "/opt/stack/run.sh > /opt/stack/run.sh.log" stack -exit 0 -EOF - -# Install plugins -cp -pr $TOP_DIR/nova/plugins/xenserver/xenapi/etc/xapi.d /etc/ -chmod a+x /etc/xapi.d/plugins/* -yum --enablerepo=base install -y parted -mkdir -p /boot/guest - -# Set local storage il8n -SR_UUID=`xe sr-list --minimal name-label="Local storage"` -xe sr-param-set uuid=$SR_UUID other-config:i18n-key=local-storage - - -# Shutdown previous runs -DO_SHUTDOWN=${DO_SHUTDOWN:-1} -if [ "$DO_SHUTDOWN" = "1" ]; then - # Shutdown all domU's that created previously - xe vm-list --minimal name-label="$LABEL" | xargs ./scripts/uninstall-os-vpx.sh - - # Destroy any instances that were launched - for uuid in `xe vm-list | grep -1 instance | grep uuid | sed "s/.*\: //g"`; do - echo "Shutting down nova instance $uuid" - xe vm-unpause uuid=$uuid || true - xe vm-shutdown uuid=$uuid - xe vm-destroy uuid=$uuid - done -fi - -# Path to head xva. By default keep overwriting the same one to save space -USE_SEPARATE_XVAS=${USE_SEPARATE_XVAS:-0} -if [ "$USE_SEPARATE_XVAS" = "0" ]; then - XVA=$XVA_DIR/$UBUNTU_VERSION.xva -else - XVA=$XVA_DIR/$UBUNTU_VERSION.$GUEST_NAME.xva -fi - -# Clean old xva. In the future may not do this every time. -rm -f $XVA - -# Configure the hostname -echo $GUEST_NAME > $STAGING_DIR/etc/hostname - -# Hostname must resolve for rabbit -cat <$STAGING_DIR/etc/hosts -$MGT_IP $GUEST_NAME -127.0.0.1 localhost localhost.localdomain -EOF - -# Configure the network -INTERFACES=$STAGING_DIR/etc/network/interfaces -cp $TEMPLATES_DIR/interfaces.in $INTERFACES -sed -e "s,@ETH1_IP@,$VM_IP,g" -i $INTERFACES -sed -e "s,@ETH1_NETMASK@,$VM_NETMASK,g" -i $INTERFACES -sed -e "s,@ETH2_IP@,$MGT_IP,g" -i $INTERFACES -sed -e "s,@ETH2_NETMASK@,$MGT_NETMASK,g" -i $INTERFACES -sed -e "s,@ETH3_IP@,$PUB_IP,g" -i $INTERFACES -sed -e "s,@ETH3_NETMASK@,$PUB_NETMASK,g" -i $INTERFACES - -# Gracefully cp only if source file/dir exists -function cp_it { - if [ -e $1 ] || [ -d $1 ]; then - cp -pRL $1 $2 - fi -} - -# Copy over your ssh keys and env if desired -COPYENV=${COPYENV:-1} -if [ "$COPYENV" = "1" ]; then - cp_it ~/.ssh $STAGING_DIR/opt/stack/.ssh - cp_it ~/.ssh/id_rsa.pub $STAGING_DIR/opt/stack/.ssh/authorized_keys - cp_it ~/.gitconfig $STAGING_DIR/opt/stack/.gitconfig - cp_it ~/.vimrc $STAGING_DIR/opt/stack/.vimrc - cp_it ~/.bashrc $STAGING_DIR/opt/stack/.bashrc -fi - -# Configure run.sh -cat <$STAGING_DIR/opt/stack/run.sh -#!/bin/bash -cd /opt/stack/devstack -killall screen -UPLOAD_LEGACY_TTY=yes HOST_IP=$PUB_IP VIRT_DRIVER=xenserver FORCE=yes MULTI_HOST=1 $STACKSH_PARAMS ./stack.sh -EOF -chmod 755 $STAGING_DIR/opt/stack/run.sh - -# Create xva -if [ ! -e $XVA ]; then - rm -rf /tmp/mkxva* - UID=0 $SCRIPT_DIR/mkxva -o $XVA -t xva -x $OVA $STAGING_DIR $VDI_MB /tmp/ -fi - -# Start guest -$TOP_DIR/scripts/install-os-vpx.sh -f $XVA -v $VM_BR -m $MGT_BR -p $PUB_BR - -# If we have copied our ssh credentials, use ssh to monitor while the installation runs -WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1} -if [ "$WAIT_TILL_LAUNCH" = "1" ] && [ -e ~/.ssh/id_rsa.pub ] && [ "$COPYENV" = "1" ]; then - # Done creating the container, let's tail the log - echo - echo "=============================================================" - echo " -- YAY! --" - echo "=============================================================" - echo - echo "We're done launching the vm, about to start tailing the" - echo "stack.sh log. It will take a second or two to start." - echo - echo "Just CTRL-C at any time to stop tailing." - - set +o xtrace - - while ! ssh -q stack@$PUB_IP "[ -e run.sh.log ]"; do - sleep 1 - done - - ssh stack@$PUB_IP 'tail -f run.sh.log' & - - TAIL_PID=$! - - function kill_tail() { - kill $TAIL_PID - exit 1 - } - - # Let Ctrl-c kill tail and exit - trap kill_tail SIGINT - - echo "Waiting stack.sh to finish..." - while ! ssh -q stack@$PUB_IP "grep -q 'stack.sh completed in' run.sh.log"; do - sleep 1 - done - - kill $TAIL_PID - - if ssh -q stack@$PUB_IP "grep -q 'stack.sh failed' run.sh.log"; then - exit 1 - fi - echo "" - echo "Finished - Zip-a-dee Doo-dah!" - echo "You can then visit the OpenStack Dashboard" - echo "at http://$PUB_IP, and contact other services at the usual ports." -else - echo "################################################################################" - echo "" - echo "All Finished!" - echo "Now, you can monitor the progress of the stack.sh installation by " - echo "tailing /opt/stack/run.sh.log from within your domU." - echo "" - echo "ssh into your domU now: 'ssh stack@$PUB_IP' using your password" - echo "and then do: 'tail -f /opt/stack/run.sh.log'" - echo "" - echo "When the script completes, you can then visit the OpenStack Dashboard" - echo "at http://$PUB_IP, and contact other services at the usual ports." - -fi diff --git a/tools/xen/build_domU_multi.sh b/tools/xen/build_domU_multi.sh index 130bec5b..0285f42e 100755 --- a/tools/xen/build_domU_multi.sh +++ b/tools/xen/build_domU_multi.sh @@ -17,19 +17,19 @@ FLOATING_RANGE=${FLOATING_RANGE:-192.168.1.196/30} COMMON_VARS="$STACKSH_PARAMS MYSQL_HOST=$HEAD_MGT_IP RABBIT_HOST=$HEAD_MGT_IP GLANCE_HOSTPORT=$HEAD_MGT_IP:9292 FLOATING_RANGE=$FLOATING_RANGE" # Helper to launch containers -function build_domU { - GUEST_NAME=$1 PUB_IP=$2 MGT_IP=$3 DO_SHUTDOWN=$4 TERMINATE=$TERMINATE STACKSH_PARAMS="$COMMON_VARS $5" ./build_domU.sh +function build_xva { + GUEST_NAME=$1 PUB_IP=$2 MGT_IP=$3 DO_SHUTDOWN=$4 TERMINATE=$TERMINATE STACKSH_PARAMS="$COMMON_VARS $5" ./build_xva.sh } # Launch the head node - headnode uses a non-ip domain name, # because rabbit won't launch with an ip addr hostname :( -build_domU HEADNODE $HEAD_PUB_IP $HEAD_MGT_IP 1 "ENABLED_SERVICES=g-api,g-reg,key,n-api,n-sch,n-vnc,horizon,mysql,rabbit" +build_xva HEADNODE $HEAD_PUB_IP $HEAD_MGT_IP 1 "ENABLED_SERVICES=g-api,g-reg,key,n-api,n-sch,n-vnc,horizon,mysql,rabbit" # Wait till the head node is up -while ! curl -L http://$HEAD_PUB_IP | grep -q username; do - echo "Waiting for head node ($HEAD_PUB_IP) to start..." - sleep 5 -done +#while ! curl -L http://$HEAD_PUB_IP | grep -q username; do +# echo "Waiting for head node ($HEAD_PUB_IP) to start..." +# sleep 5 +#done # Build the HA compute host -build_domU $COMPUTE_PUB_IP $COMPUTE_PUB_IP $COMPUTE_MGT_IP 0 "ENABLED_SERVICES=n-cpu,n-net,n-api" +build_xva COMPUTENODE $COMPUTE_PUB_IP $COMPUTE_MGT_IP 0 "ENABLED_SERVICES=n-cpu,n-net,n-api" diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh new file mode 100755 index 00000000..b0fd003d --- /dev/null +++ b/tools/xen/build_xva.sh @@ -0,0 +1,145 @@ +#!/bin/bash + +# This script is run by install_os_domU.sh +# +# It modifies the ubuntu image created by install_os_domU.sh +# and previously moodified by prepare_guest_template.sh +# +# This script is responsible for: +# - pushing in the DevStack code +# - creating run.sh, to run the code on boot +# It does this by mounting the disk image of the VM. +# +# The resultant image is then templated and started +# by install_os_domU.sh + +# Exit on errors +set -o errexit +# Echo commands +set -o xtrace + +# This directory +TOP_DIR=$(cd $(dirname "$0") && pwd) + +# Include onexit commands +. $TOP_DIR/scripts/on_exit.sh + +# Source params - override xenrc params in your localrc to suite your taste +source xenrc + +# +# Parameters +# +GUEST_NAME="$1" + +# +# Mount the VDI +# +STAGING_DIR=$($TOP_DIR/scripts/manage-vdi open $GUEST_NAME 0 1 | grep -o "/tmp/tmp.[[:alnum:]]*") +add_on_exit "$TOP_DIR/scripts/manage-vdi close $GUEST_NAME 0 1" + +# Make sure we have a stage +if [ ! -d $STAGING_DIR/etc ]; then + echo "Stage is not properly set up!" + exit 1 +fi + +# Only support DHCP for now - don't support how different versions of Ubuntu handle resolv.conf +if [ "$MGT_IP" != "dhcp" ] && [ "$PUB_IP" != "dhcp" ]; then + echo "Configuration without DHCP not supported" + exit 1 +fi + +# Copy over devstack +rm -f /tmp/devstack.tar +cd $TOP_DIR/../../ +tar --exclude='stage' --exclude='xen/xvas' --exclude='xen/nova' -cvf /tmp/devstack.tar . +mkdir -p $STAGING_DIR/opt/stack/devstack +tar xf /tmp/devstack.tar -C $STAGING_DIR/opt/stack/devstack +cd $TOP_DIR + +# Run devstack on launch +cat <$STAGING_DIR/etc/rc.local +# network restart required for getting the right gateway +/etc/init.d/networking restart +chown -R $STACK_USER /opt/stack +su -c "/opt/stack/run.sh > /opt/stack/run.sh.log" $STACK_USER +exit 0 +EOF + +# Configure the hostname +echo $GUEST_NAME > $STAGING_DIR/etc/hostname + +# Hostname must resolve for rabbit +HOSTS_FILE_IP=$PUB_IP +if [ $MGT_IP != "dhcp" ]; then + HOSTS_FILE_IP=$MGT_IP +fi +cat <$STAGING_DIR/etc/hosts +$HOSTS_FILE_IP $GUEST_NAME +127.0.0.1 localhost localhost.localdomain +EOF + +# Configure the network +INTERFACES=$STAGING_DIR/etc/network/interfaces +TEMPLATES_DIR=$TOP_DIR/templates +cp $TEMPLATES_DIR/interfaces.in $INTERFACES +if [ $VM_IP == "dhcp" ]; then + echo 'eth1 on dhcp' + sed -e "s,iface eth1 inet static,iface eth1 inet dhcp,g" -i $INTERFACES + sed -e '/@ETH1_/d' -i $INTERFACES +else + sed -e "s,@ETH1_IP@,$VM_IP,g" -i $INTERFACES + sed -e "s,@ETH1_NETMASK@,$VM_NETMASK,g" -i $INTERFACES +fi + +if [ $MGT_IP == "dhcp" ]; then + echo 'eth2 on dhcp' + sed -e "s,iface eth2 inet static,iface eth2 inet dhcp,g" -i $INTERFACES + sed -e '/@ETH2_/d' -i $INTERFACES +else + sed -e "s,@ETH2_IP@,$MGT_IP,g" -i $INTERFACES + sed -e "s,@ETH2_NETMASK@,$MGT_NETMASK,g" -i $INTERFACES +fi + +if [ $PUB_IP == "dhcp" ]; then + echo 'eth3 on dhcp' + sed -e "s,iface eth3 inet static,iface eth3 inet dhcp,g" -i $INTERFACES + sed -e '/@ETH3_/d' -i $INTERFACES +else + sed -e "s,@ETH3_IP@,$PUB_IP,g" -i $INTERFACES + sed -e "s,@ETH3_NETMASK@,$PUB_NETMASK,g" -i $INTERFACES +fi + +if [ "$ENABLE_GI" == "true" ]; then + cat <>$INTERFACES +auto eth0 +iface eth0 inet dhcp +EOF +fi + +# Gracefully cp only if source file/dir exists +function cp_it { + if [ -e $1 ] || [ -d $1 ]; then + cp -pRL $1 $2 + fi +} + +# Copy over your ssh keys and env if desired +COPYENV=${COPYENV:-1} +if [ "$COPYENV" = "1" ]; then + cp_it ~/.ssh $STAGING_DIR/opt/stack/.ssh + cp_it ~/.ssh/id_rsa.pub $STAGING_DIR/opt/stack/.ssh/authorized_keys + cp_it ~/.gitconfig $STAGING_DIR/opt/stack/.gitconfig + cp_it ~/.vimrc $STAGING_DIR/opt/stack/.vimrc + cp_it ~/.bashrc $STAGING_DIR/opt/stack/.bashrc +fi + +# Configure run.sh +cat <$STAGING_DIR/opt/stack/run.sh +#!/bin/bash +cd /opt/stack/devstack +killall screen +VIRT_DRIVER=xenserver FORCE=yes MULTI_HOST=$MULTI_HOST HOST_IP_IFACE=$HOST_IP_IFACE $STACKSH_PARAMS ./stack.sh +EOF +chmod 755 $STAGING_DIR/opt/stack/run.sh diff --git a/tools/xen/devstackubuntupreseed.cfg b/tools/xen/devstackubuntupreseed.cfg new file mode 100644 index 00000000..d8caaeed --- /dev/null +++ b/tools/xen/devstackubuntupreseed.cfg @@ -0,0 +1,470 @@ +### Contents of the preconfiguration file (for squeeze) +### Localization +# Preseeding only locale sets language, country and locale. +d-i debian-installer/locale string en_US + +# The values can also be preseeded individually for greater flexibility. +#d-i debian-installer/language string en +#d-i debian-installer/country string NL +#d-i debian-installer/locale string en_GB.UTF-8 +# Optionally specify additional locales to be generated. +#d-i localechooser/supported-locales en_US.UTF-8, nl_NL.UTF-8 + +# Keyboard selection. +# Disable automatic (interactive) keymap detection. +d-i console-setup/ask_detect boolean false +#d-i keyboard-configuration/modelcode string pc105 +d-i keyboard-configuration/layoutcode string us +# To select a variant of the selected layout (if you leave this out, the +# basic form of the layout will be used): +#d-i keyboard-configuration/variantcode string dvorak + +### Network configuration +# Disable network configuration entirely. This is useful for cdrom +# installations on non-networked devices where the network questions, +# warning and long timeouts are a nuisance. +#d-i netcfg/enable boolean false + +# netcfg will choose an interface that has link if possible. This makes it +# skip displaying a list if there is more than one interface. +d-i netcfg/choose_interface select auto + +# To pick a particular interface instead: +#d-i netcfg/choose_interface select eth1 + +# If you have a slow dhcp server and the installer times out waiting for +# it, this might be useful. +#d-i netcfg/dhcp_timeout string 60 + +# If you prefer to configure the network manually, uncomment this line and +# the static network configuration below. +#d-i netcfg/disable_autoconfig boolean true + +# If you want the preconfiguration file to work on systems both with and +# without a dhcp server, uncomment these lines and the static network +# configuration below. +#d-i netcfg/dhcp_failed note +#d-i netcfg/dhcp_options select Configure network manually + +# Static network configuration. +#d-i netcfg/get_nameservers string 192.168.1.1 +#d-i netcfg/get_ipaddress string 192.168.1.42 +#d-i netcfg/get_netmask string 255.255.255.0 +#d-i netcfg/get_gateway string 192.168.1.1 +#d-i netcfg/confirm_static boolean true + +# Any hostname and domain names assigned from dhcp take precedence over +# values set here. However, setting the values still prevents the questions +# from being shown, even if values come from dhcp. +d-i netcfg/get_hostname string stack +d-i netcfg/get_domain string stackpass + +# Disable that annoying WEP key dialog. +d-i netcfg/wireless_wep string +# The wacky dhcp hostname that some ISPs use as a password of sorts. +#d-i netcfg/dhcp_hostname string radish + +# If non-free firmware is needed for the network or other hardware, you can +# configure the installer to always try to load it, without prompting. Or +# change to false to disable asking. +#d-i hw-detect/load_firmware boolean true + +### Network console +# Use the following settings if you wish to make use of the network-console +# component for remote installation over SSH. This only makes sense if you +# intend to perform the remainder of the installation manually. +#d-i anna/choose_modules string network-console +#d-i network-console/password password r00tme +#d-i network-console/password-again password r00tme + +### Mirror settings +# If you select ftp, the mirror/country string does not need to be set. +#d-i mirror/protocol string ftp +d-i mirror/country string manual +d-i mirror/http/hostname string archive.ubuntu.com +d-i mirror/http/directory string /ubuntu +d-i mirror/http/proxy string + +# Alternatively: by default, the installer uses CC.archive.ubuntu.com where +# CC is the ISO-3166-2 code for the selected country. You can preseed this +# so that it does so without asking. +#d-i mirror/http/mirror select CC.archive.ubuntu.com + +# Suite to install. +#d-i mirror/suite string squeeze +# Suite to use for loading installer components (optional). +#d-i mirror/udeb/suite string squeeze +# Components to use for loading installer components (optional). +#d-i mirror/udeb/components multiselect main, restricted + +### Clock and time zone setup +# Controls whether or not the hardware clock is set to UTC. +d-i clock-setup/utc boolean true + +# You may set this to any valid setting for $TZ; see the contents of +# /usr/share/zoneinfo/ for valid values. +d-i time/zone string US/Pacific + +# Controls whether to use NTP to set the clock during the install +d-i clock-setup/ntp boolean true +# NTP server to use. The default is almost always fine here. +d-i clock-setup/ntp-server string 0.us.pool.ntp.org + +### Partitioning +## Partitioning example +# If the system has free space you can choose to only partition that space. +# This is only honoured if partman-auto/method (below) is not set. +# Alternatives: custom, some_device, some_device_crypto, some_device_lvm. +#d-i partman-auto/init_automatically_partition select biggest_free + +# Alternatively, you may specify a disk to partition. If the system has only +# one disk the installer will default to using that, but otherwise the device +# name must be given in traditional, non-devfs format (so e.g. /dev/hda or +# /dev/sda, and not e.g. /dev/discs/disc0/disc). +# For example, to use the first SCSI/SATA hard disk: +#d-i partman-auto/disk string /dev/sda +# In addition, you'll need to specify the method to use. +# The presently available methods are: +# - regular: use the usual partition types for your architecture +# - lvm: use LVM to partition the disk +# - crypto: use LVM within an encrypted partition +d-i partman-auto/method string regular + +# If one of the disks that are going to be automatically partitioned +# contains an old LVM configuration, the user will normally receive a +# warning. This can be preseeded away... +d-i partman-lvm/device_remove_lvm boolean true +# The same applies to pre-existing software RAID array: +d-i partman-md/device_remove_md boolean true +# And the same goes for the confirmation to write the lvm partitions. +d-i partman-lvm/confirm boolean true + +# For LVM partitioning, you can select how much of the volume group to use +# for logical volumes. +#d-i partman-auto-lvm/guided_size string max +#d-i partman-auto-lvm/guided_size string 10GB +#d-i partman-auto-lvm/guided_size string 50% + +# You can choose one of the three predefined partitioning recipes: +# - atomic: all files in one partition +# - home: separate /home partition +# - multi: separate /home, /usr, /var, and /tmp partitions +d-i partman-auto/choose_recipe select atomic + +# Or provide a recipe of your own... +# If you have a way to get a recipe file into the d-i environment, you can +# just point at it. +#d-i partman-auto/expert_recipe_file string /hd-media/recipe + +# If not, you can put an entire recipe into the preconfiguration file in one +# (logical) line. This example creates a small /boot partition, suitable +# swap, and uses the rest of the space for the root partition: +#d-i partman-auto/expert_recipe string \ +# boot-root :: \ +# 40 50 100 ext3 \ +# $primary{ } $bootable{ } \ +# method{ format } format{ } \ +# use_filesystem{ } filesystem{ ext3 } \ +# mountpoint{ /boot } \ +# . \ +# 500 10000 1000000000 ext3 \ +# method{ format } format{ } \ +# use_filesystem{ } filesystem{ ext3 } \ +# mountpoint{ / } \ +# . \ +# 64 512 300% linux-swap \ +# method{ swap } format{ } \ +# . + +# If you just want to change the default filesystem from ext3 to something +# else, you can do that without providing a full recipe. +d-i partman/default_filesystem string ext3 + +# The full recipe format is documented in the file partman-auto-recipe.txt +# included in the 'debian-installer' package or available from D-I source +# repository. This also documents how to specify settings such as file +# system labels, volume group names and which physical devices to include +# in a volume group. + +# This makes partman automatically partition without confirmation, provided +# that you told it what to do using one of the methods above. +d-i partman-partitioning/confirm_write_new_label boolean true +d-i partman/choose_partition select finish +d-i partman/confirm boolean true +d-i partman/confirm_nooverwrite boolean true + +## Partitioning using RAID +# The method should be set to "raid". +#d-i partman-auto/method string raid +# Specify the disks to be partitioned. They will all get the same layout, +# so this will only work if the disks are the same size. +#d-i partman-auto/disk string /dev/sda /dev/sdb + +# Next you need to specify the physical partitions that will be used. +#d-i partman-auto/expert_recipe string \ +# multiraid :: \ +# 1000 5000 4000 raid \ +# $primary{ } method{ raid } \ +# . \ +# 64 512 300% raid \ +# method{ raid } \ +# . \ +# 500 10000 1000000000 raid \ +# method{ raid } \ +# . + +# Last you need to specify how the previously defined partitions will be +# used in the RAID setup. Remember to use the correct partition numbers +# for logical partitions. RAID levels 0, 1, 5, 6 and 10 are supported; +# devices are separated using "#". +# Parameters are: +# \ +# + +#d-i partman-auto-raid/recipe string \ +# 1 2 0 ext3 / \ +# /dev/sda1#/dev/sdb1 \ +# . \ +# 1 2 0 swap - \ +# /dev/sda5#/dev/sdb5 \ +# . \ +# 0 2 0 ext3 /home \ +# /dev/sda6#/dev/sdb6 \ +# . + +# For additional information see the file partman-auto-raid-recipe.txt +# included in the 'debian-installer' package or available from D-I source +# repository. + +# This makes partman automatically partition without confirmation. +d-i partman-md/confirm boolean true +d-i partman-partitioning/confirm_write_new_label boolean true +d-i partman/choose_partition select finish +d-i partman/confirm boolean true +d-i partman/confirm_nooverwrite boolean true + +## Controlling how partitions are mounted +# The default is to mount by UUID, but you can also choose "traditional" to +# use traditional device names, or "label" to try filesystem labels before +# falling back to UUIDs. +#d-i partman/mount_style select uuid + +### Base system installation +# Configure APT to not install recommended packages by default. Use of this +# option can result in an incomplete system and should only be used by very +# experienced users. +#d-i base-installer/install-recommends boolean false + +# The kernel image (meta) package to be installed; "none" can be used if no +# kernel is to be installed. +#d-i base-installer/kernel/image string linux-generic + +### Account setup +# Skip creation of a root account (normal user account will be able to +# use sudo). The default is false; preseed this to true if you want to set +# a root password. +d-i passwd/root-login boolean true +# Alternatively, to skip creation of a normal user account. +d-i passwd/make-user boolean false + +# Root password, either in clear text +d-i passwd/root-password password stackpass +d-i passwd/root-password-again password stackpass +# or encrypted using an MD5 hash. +#d-i passwd/root-password-crypted password [MD5 hash] + +# To create a normal user account. +#d-i passwd/user-fullname string Ubuntu User +#d-i passwd/username string ubuntu +# Normal user's password, either in clear text +#d-i passwd/user-password password insecure +#d-i passwd/user-password-again password insecure +# or encrypted using an MD5 hash. +#d-i passwd/user-password-crypted password [MD5 hash] +# Create the first user with the specified UID instead of the default. +#d-i passwd/user-uid string 1010 +# The installer will warn about weak passwords. If you are sure you know +# what you're doing and want to override it, uncomment this. +d-i user-setup/allow-password-weak boolean true + +# The user account will be added to some standard initial groups. To +# override that, use this. +#d-i passwd/user-default-groups string audio cdrom video + +# Set to true if you want to encrypt the first user's home directory. +d-i user-setup/encrypt-home boolean false + +### Apt setup +# You can choose to install restricted and universe software, or to install +# software from the backports repository. +#d-i apt-setup/restricted boolean true +#d-i apt-setup/universe boolean true +#d-i apt-setup/backports boolean true +# Uncomment this if you don't want to use a network mirror. +#d-i apt-setup/use_mirror boolean false +# Select which update services to use; define the mirrors to be used. +# Values shown below are the normal defaults. +#d-i apt-setup/services-select multiselect security +#d-i apt-setup/security_host string security.ubuntu.com +#d-i apt-setup/security_path string /ubuntu + +# Additional repositories, local[0-9] available +#d-i apt-setup/local0/repository string \ +# http://local.server/ubuntu squeeze main +#d-i apt-setup/local0/comment string local server +# Enable deb-src lines +#d-i apt-setup/local0/source boolean true +# URL to the public key of the local repository; you must provide a key or +# apt will complain about the unauthenticated repository and so the +# sources.list line will be left commented out +#d-i apt-setup/local0/key string http://local.server/key + +# By default the installer requires that repositories be authenticated +# using a known gpg key. This setting can be used to disable that +# authentication. Warning: Insecure, not recommended. +#d-i debian-installer/allow_unauthenticated boolean true + +### Package selection +#tasksel tasksel/first multiselect ubuntu-desktop +#tasksel tasksel/first multiselect lamp-server, print-server +#tasksel tasksel/first multiselect kubuntu-desktop +tasksel tasksel/first multiselect openssh-server + +# Individual additional packages to install +#d-i pkgsel/include string openssh-server build-essential +# Whether to upgrade packages after debootstrap. +# Allowed values: none, safe-upgrade, full-upgrade +#d-i pkgsel/upgrade select none + +# Language pack selection +#d-i pkgsel/language-packs multiselect de, en, zh + +# Policy for applying updates. May be "none" (no automatic updates), +# "unattended-upgrades" (install security updates automatically), or +# "landscape" (manage system with Landscape). +d-i pkgsel/update-policy select unattended-upgrades + +# Some versions of the installer can report back on what software you have +# installed, and what software you use. The default is not to report back, +# but sending reports helps the project determine what software is most +# popular and include it on CDs. +#popularity-contest popularity-contest/participate boolean false + +# By default, the system's locate database will be updated after the +# installer has finished installing most packages. This may take a while, so +# if you don't want it, you can set this to "false" to turn it off. +d-i pkgsel/updatedb boolean false + +### Boot loader installation +# Grub is the default boot loader (for x86). If you want lilo installed +# instead, uncomment this: +#d-i grub-installer/skip boolean true +# To also skip installing lilo, and install no bootloader, uncomment this +# too: +#d-i lilo-installer/skip boolean true + +# With a few exceptions for unusual partitioning setups, GRUB 2 is now the +# default. If you need GRUB Legacy for some particular reason, then +# uncomment this: +#d-i grub-installer/grub2_instead_of_grub_legacy boolean false + +# This is fairly safe to set, it makes grub install automatically to the MBR +# if no other operating system is detected on the machine. +d-i grub-installer/only_debian boolean true + +# This one makes grub-installer install to the MBR if it also finds some other +# OS, which is less safe as it might not be able to boot that other OS. +d-i grub-installer/with_other_os boolean true + +# Alternatively, if you want to install to a location other than the mbr, +# uncomment and edit these lines: +#d-i grub-installer/only_debian boolean false +#d-i grub-installer/with_other_os boolean false +#d-i grub-installer/bootdev string (hd0,0) +# To install grub to multiple disks: +#d-i grub-installer/bootdev string (hd0,0) (hd1,0) (hd2,0) + +# Optional password for grub, either in clear text +#d-i grub-installer/password password r00tme +#d-i grub-installer/password-again password r00tme +# or encrypted using an MD5 hash, see grub-md5-crypt(8). +#d-i grub-installer/password-crypted password [MD5 hash] + +# Use the following option to add additional boot parameters for the +# installed system (if supported by the bootloader installer). +# Note: options passed to the installer will be added automatically. +#d-i debian-installer/add-kernel-opts string nousb + +### Finishing up the installation +# During installations from serial console, the regular virtual consoles +# (VT1-VT6) are normally disabled in /etc/inittab. Uncomment the next +# line to prevent this. +d-i finish-install/keep-consoles boolean true + +# Avoid that last message about the install being complete. +d-i finish-install/reboot_in_progress note + +# This will prevent the installer from ejecting the CD during the reboot, +# which is useful in some situations. +#d-i cdrom-detect/eject boolean false + +# This is how to make the installer shutdown when finished, but not +# reboot into the installed system. +#d-i debian-installer/exit/halt boolean true +# This will power off the machine instead of just halting it. +#d-i debian-installer/exit/poweroff boolean true + +### X configuration +# X can detect the right driver for some cards, but if you're preseeding, +# you override whatever it chooses. Still, vesa will work most places. +#xserver-xorg xserver-xorg/config/device/driver select vesa + +# A caveat with mouse autodetection is that if it fails, X will retry it +# over and over. So if it's preseeded to be done, there is a possibility of +# an infinite loop if the mouse is not autodetected. +#xserver-xorg xserver-xorg/autodetect_mouse boolean true + +# Monitor autodetection is recommended. +xserver-xorg xserver-xorg/autodetect_monitor boolean true +# Uncomment if you have an LCD display. +#xserver-xorg xserver-xorg/config/monitor/lcd boolean true +# X has three configuration paths for the monitor. Here's how to preseed +# the "medium" path, which is always available. The "simple" path may not +# be available, and the "advanced" path asks too many questions. +xserver-xorg xserver-xorg/config/monitor/selection-method \ + select medium +xserver-xorg xserver-xorg/config/monitor/mode-list \ + select 1024x768 @ 60 Hz + +### Preseeding other packages +# Depending on what software you choose to install, or if things go wrong +# during the installation process, it's possible that other questions may +# be asked. You can preseed those too, of course. To get a list of every +# possible question that could be asked during an install, do an +# installation, and then run these commands: +# debconf-get-selections --installer > file +# debconf-get-selections >> file + + +#### Advanced options +### Running custom commands during the installation +# d-i preseeding is inherently not secure. Nothing in the installer checks +# for attempts at buffer overflows or other exploits of the values of a +# preconfiguration file like this one. Only use preconfiguration files from +# trusted locations! To drive that home, and because it's generally useful, +# here's a way to run any shell command you'd like inside the installer, +# automatically. + +# This first command is run as early as possible, just after +# preseeding is read. +#d-i preseed/early_command string anna-install some-udeb +# This command is run immediately before the partitioner starts. It may be +# useful to apply dynamic partitioner preseeding that depends on the state +# of the disks (which may not be visible when preseed/early_command runs). +#d-i partman/early_command \ +# string debconf-set partman-auto/disk "$(list-devices disk | head -n1)" +# This command is run just before the install finishes, but when there is +# still a usable /target directory. You can chroot to /target and use it +# directly, or use the apt-install and in-target commands to easily install +# packages and run commands in the target system. +#d-i preseed/late_command string apt-install zsh; in-target chsh -s /bin/zsh diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh new file mode 100755 index 00000000..0e275705 --- /dev/null +++ b/tools/xen/install_os_domU.sh @@ -0,0 +1,434 @@ +#!/bin/bash + +# This script is a level script +# It must be run on a XenServer or XCP machine +# +# It creates a DomU VM that runs OpenStack services +# +# For more details see: README.md + +# Exit on errors +set -o errexit +# Echo commands +set -o xtrace + +# Abort if localrc is not set +if [ ! -e ../../localrc ]; then + echo "You must have a localrc with ALL necessary passwords defined before proceeding." + echo "See the xen README for required passwords." + exit 1 +fi + +# This directory +TOP_DIR=$(cd $(dirname "$0") && pwd) + +# Source lower level functions +. $TOP_DIR/../../functions + +# Include onexit commands +. $TOP_DIR/scripts/on_exit.sh + + +# +# Get Settings +# + +# Source params - override xenrc params in your localrc to suit your taste +source xenrc + +xe_min() +{ + local cmd="$1" + shift + xe "$cmd" --minimal "$@" +} + + +# +# Prepare Dom0 +# including installing XenAPI plugins +# + +cd $TOP_DIR +if [ -f ./master ] +then + rm -rf ./master + rm -rf ./nova +fi + +# get nova +NOVA_ZIPBALL_URL=${NOVA_ZIPBALL_URL:-$(echo $NOVA_REPO | sed "s:\.git$::;s:$:/zipball/$NOVA_BRANCH:g")} +wget $NOVA_ZIPBALL_URL -O nova-zipball --no-check-certificate +unzip -o nova-zipball -d ./nova + +# install xapi plugins +XAPI_PLUGIN_DIR=/etc/xapi.d/plugins/ +if [ ! -d $XAPI_PLUGIN_DIR ]; then + # the following is needed when using xcp-xapi + XAPI_PLUGIN_DIR=/usr/lib/xcp/plugins/ +fi +cp -pr ./nova/*/plugins/xenserver/xenapi/etc/xapi.d/plugins/* $XAPI_PLUGIN_DIR + +# Install the netwrap xapi plugin to support agent control of dom0 networking +if [[ "$ENABLED_SERVICES" =~ "q-agt" && "$Q_PLUGIN" = "openvswitch" ]]; then + if [ -f ./quantum ]; then + rm -rf ./quantum + fi + # get quantum + QUANTUM_ZIPBALL_URL=${QUANTUM_ZIPBALL_URL:-$(echo $QUANTUM_REPO | sed "s:\.git$::;s:$:/zipball/$QUANTUM_BRANCH:g")} + wget $QUANTUM_ZIPBALL_URL -O quantum-zipball --no-check-certificate + unzip -o quantum-zipball -d ./quantum + cp -pr ./quantum/*/quantum/plugins/openvswitch/agent/xenapi/etc/xapi.d/plugins/* $XAPI_PLUGIN_DIR +fi + +chmod a+x ${XAPI_PLUGIN_DIR}* + +mkdir -p /boot/guest + + +# +# Configure Networking +# + +# Helper to create networks +# Uses echo trickery to return network uuid +function create_network() { + br=$1 + dev=$2 + vlan=$3 + netname=$4 + if [ -z $br ] + then + pif=$(xe_min pif-list device=$dev VLAN=$vlan) + if [ -z $pif ] + then + net=$(xe network-create name-label=$netname) + else + net=$(xe_min network-list PIF-uuids=$pif) + fi + echo $net + return 0 + fi + if [ ! $(xe_min network-list params=bridge | grep -w --only-matching $br) ] + then + echo "Specified bridge $br does not exist" + echo "If you wish to use defaults, please keep the bridge name empty" + exit 1 + else + net=$(xe_min network-list bridge=$br) + echo $net + fi +} + +function errorcheck() { + rc=$? + if [ $rc -ne 0 ] + then + exit $rc + fi +} + +# Create host, vm, mgmt, pub networks on XenServer +VM_NET=$(create_network "$VM_BR" "$VM_DEV" "$VM_VLAN" "vmbr") +errorcheck +MGT_NET=$(create_network "$MGT_BR" "$MGT_DEV" "$MGT_VLAN" "mgtbr") +errorcheck +PUB_NET=$(create_network "$PUB_BR" "$PUB_DEV" "$PUB_VLAN" "pubbr") +errorcheck + +# Helper to create vlans +function create_vlan() { + dev=$1 + vlan=$2 + net=$3 + # VLAN -1 refers to no VLAN (physical network) + if [ $vlan -eq -1 ] + then + return + fi + if [ -z $(xe_min vlan-list tag=$vlan) ] + then + pif=$(xe_min pif-list network-uuid=$net) + # We created a brand new network this time + if [ -z $pif ] + then + pif=$(xe_min pif-list device=$dev VLAN=-1) + xe vlan-create pif-uuid=$pif vlan=$vlan network-uuid=$net + else + echo "VLAN does not exist but PIF attached to this network" + echo "How did we reach here?" + exit 1 + fi + fi +} + +# Create vlans for vm and management +create_vlan $PUB_DEV $PUB_VLAN $PUB_NET +create_vlan $VM_DEV $VM_VLAN $VM_NET +create_vlan $MGT_DEV $MGT_VLAN $MGT_NET + +# Get final bridge names +if [ -z $VM_BR ]; then + VM_BR=$(xe_min network-list uuid=$VM_NET params=bridge) +fi +if [ -z $MGT_BR ]; then + MGT_BR=$(xe_min network-list uuid=$MGT_NET params=bridge) +fi +if [ -z $PUB_BR ]; then + PUB_BR=$(xe_min network-list uuid=$PUB_NET params=bridge) +fi + +# dom0 ip, XenAPI is assumed to be listening +HOST_IP=${HOST_IP:-`ifconfig xenbr0 | grep "inet addr" | cut -d ":" -f2 | sed "s/ .*//"`} + +# Set up ip forwarding, but skip on xcp-xapi +if [ -a /etc/sysconfig/network ]; then + if ! grep -q "FORWARD_IPV4=YES" /etc/sysconfig/network; then + # FIXME: This doesn't work on reboot! + echo "FORWARD_IPV4=YES" >> /etc/sysconfig/network + fi +fi +# Also, enable ip forwarding in rc.local, since the above trick isn't working +if ! grep -q "echo 1 >/proc/sys/net/ipv4/ip_forward" /etc/rc.local; then + echo "echo 1 >/proc/sys/net/ipv4/ip_forward" >> /etc/rc.local +fi +# Enable ip forwarding at runtime as well +echo 1 > /proc/sys/net/ipv4/ip_forward + + +# +# Shutdown previous runs +# + +DO_SHUTDOWN=${DO_SHUTDOWN:-1} +CLEAN_TEMPLATES=${CLEAN_TEMPLATES:-false} +if [ "$DO_SHUTDOWN" = "1" ]; then + # Shutdown all domU's that created previously + clean_templates_arg="" + if $CLEAN_TEMPLATES; then + clean_templates_arg="--remove-templates" + fi + ./scripts/uninstall-os-vpx.sh $clean_templates_arg + + # Destroy any instances that were launched + for uuid in `xe vm-list | grep -1 instance | grep uuid | sed "s/.*\: //g"`; do + echo "Shutting down nova instance $uuid" + xe vm-unpause uuid=$uuid || true + xe vm-shutdown uuid=$uuid || true + xe vm-destroy uuid=$uuid + done + + # Destroy orphaned vdis + for uuid in `xe vdi-list | grep -1 Glance | grep uuid | sed "s/.*\: //g"`; do + xe vdi-destroy uuid=$uuid + done +fi + + +# +# Create Ubuntu VM template +# and/or create VM from template +# + +GUEST_NAME=${GUEST_NAME:-"DevStackOSDomU"} +TNAME="devstack_template" +SNAME_PREPARED="template_prepared" +SNAME_FIRST_BOOT="before_first_boot" + +function wait_for_VM_to_halt() { + set +x + echo "Waiting for the VM to halt. Progress in-VM can be checked with vncviewer:" + mgmt_ip=$(echo $XENAPI_CONNECTION_URL | tr -d -c '1234567890.') + domid=$(xe vm-list name-label="$GUEST_NAME" params=dom-id minimal=true) + port=$(xenstore-read /local/domain/$domid/console/vnc-port) + echo "vncviewer -via $mgmt_ip localhost:${port:2}" + while true + do + state=$(xe_min vm-list name-label="$GUEST_NAME" power-state=halted) + if [ -n "$state" ] + then + break + else + echo -n "." + sleep 20 + fi + done + set -x +} + +templateuuid=$(xe template-list name-label="$TNAME") +if [ -z "$templateuuid" ]; then + # + # Install Ubuntu over network + # + + # always update the preseed file, incase we have a newer one + PRESEED_URL=${PRESEED_URL:-""} + if [ -z "$PRESEED_URL" ]; then + PRESEED_URL="${HOST_IP}/devstackubuntupreseed.cfg" + HTTP_SERVER_LOCATION="/opt/xensource/www" + if [ ! -e $HTTP_SERVER_LOCATION ]; then + HTTP_SERVER_LOCATION="/var/www/html" + mkdir -p $HTTP_SERVER_LOCATION + fi + cp -f $TOP_DIR/devstackubuntupreseed.cfg $HTTP_SERVER_LOCATION + MIRROR=${MIRROR:-""} + if [ -n "$MIRROR" ]; then + sed -e "s,d-i mirror/http/hostname string .*,d-i mirror/http/hostname string $MIRROR," \ + -i "${HTTP_SERVER_LOCATION}/devstackubuntupreseed.cfg" + fi + fi + + # Update the template + $TOP_DIR/scripts/install_ubuntu_template.sh $PRESEED_URL + + # create a new VM with the given template + # creating the correct VIFs and metadata + $TOP_DIR/scripts/install-os-vpx.sh -t "$UBUNTU_INST_TEMPLATE_NAME" -v $VM_BR -m $MGT_BR -p $PUB_BR -l $GUEST_NAME -r $OSDOMU_MEM_MB -k "flat_network_bridge=${VM_BR}" + + # wait for install to finish + wait_for_VM_to_halt + + # set VM to restart after a reboot + vm_uuid=$(xe_min vm-list name-label="$GUEST_NAME") + xe vm-param-set actions-after-reboot=Restart uuid="$vm_uuid" + + # + # Prepare VM for DevStack + # + + # Install XenServer tools, and other such things + $TOP_DIR/prepare_guest_template.sh "$GUEST_NAME" + + # start the VM to run the prepare steps + xe vm-start vm="$GUEST_NAME" + + # Wait for prep script to finish and shutdown system + wait_for_VM_to_halt + + # Make template from VM + snuuid=$(xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME_PREPARED") + xe snapshot-clone uuid=$snuuid new-name-label="$TNAME" +else + # + # Template already installed, create VM from template + # + vm_uuid=$(xe vm-install template="$TNAME" new-name-label="$GUEST_NAME") +fi + + +# +# Inject DevStack inside VM disk +# +$TOP_DIR/build_xva.sh "$GUEST_NAME" + +# create a snapshot before the first boot +# to allow a quick re-run with the same settings +xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME_FIRST_BOOT" + + +# +# Run DevStack VM +# +xe vm-start vm="$GUEST_NAME" + + +# +# Find IP and optionally wait for stack.sh to complete +# + +function find_ip_by_name() { + local guest_name="$1" + local interface="$2" + local period=10 + max_tries=10 + i=0 + while true + do + if [ $i -ge $max_tries ]; then + echo "Timed out waiting for devstack ip address" + exit 11 + fi + + devstackip=$(xe vm-list --minimal \ + name-label=$guest_name \ + params=networks | sed -ne "s,^.*${interface}/ip: \([0-9.]*\).*\$,\1,p") + if [ -z "$devstackip" ] + then + sleep $period + ((i++)) + else + echo $devstackip + break + fi + done +} + +function ssh_no_check() { + ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "$@" +} + +# Note the XenServer needs to be on the chosen +# network, so XenServer can access Glance API +if [ $HOST_IP_IFACE == "eth2" ]; then + DOMU_IP=$MGT_IP + if [ $MGT_IP == "dhcp" ]; then + DOMU_IP=$(find_ip_by_name $GUEST_NAME 2) + fi +else + DOMU_IP=$PUB_IP + if [ $PUB_IP == "dhcp" ]; then + DOMU_IP=$(find_ip_by_name $GUEST_NAME 3) + fi +fi + +# If we have copied our ssh credentials, use ssh to monitor while the installation runs +WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1} +COPYENV=${COPYENV:-1} +if [ "$WAIT_TILL_LAUNCH" = "1" ] && [ -e ~/.ssh/id_rsa.pub ] && [ "$COPYENV" = "1" ]; then + echo "We're done launching the vm, about to start tailing the" + echo "stack.sh log. It will take a second or two to start." + echo + echo "Just CTRL-C at any time to stop tailing." + + # wait for log to appear + while ! ssh_no_check -q stack@$DOMU_IP "[ -e run.sh.log ]"; do + sleep 10 + done + + set +x + echo -n "Waiting for startup script to finish" + while [ `ssh_no_check -q stack@$DOMU_IP pgrep -c run.sh` -ge 1 ] + do + sleep 10 + echo -n "." + done + echo "done!" + set -x + + # output the run.sh.log + ssh_no_check -q stack@$DOMU_IP 'cat run.sh.log' + + # Fail if the expected text is not found + ssh_no_check -q stack@$DOMU_IP 'cat run.sh.log' | grep -q 'stack.sh completed in' + + set +x + echo "################################################################################" + echo "" + echo "All Finished!" + echo "You can visit the OpenStack Dashboard" + echo "at http://$DOMU_IP, and contact other services at the usual ports." +else + set +x + echo "################################################################################" + echo "" + echo "All Finished!" + echo "Now, you can monitor the progress of the stack.sh installation by " + echo "tailing /opt/stack/run.sh.log from within your domU." + echo "" + echo "ssh into your domU now: 'ssh stack@$DOMU_IP' using your password" + echo "and then do: 'tail -f /opt/stack/run.sh.log'" + echo "" + echo "When the script completes, you can then visit the OpenStack Dashboard" + echo "at http://$DOMU_IP, and contact other services at the usual ports." +fi diff --git a/tools/xen/prepare_dom0.sh b/tools/xen/prepare_dom0.sh deleted file mode 100755 index ce16ada4..00000000 --- a/tools/xen/prepare_dom0.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/sh -set -o xtrace -set -o errexit - -# Install basics for vi and git -yum -y --enablerepo=base install gcc make vim-enhanced zlib-devel openssl-devel - -# Simple but usable vimrc -if [ ! -e /root/.vimrc ]; then - cat > /root/.vimrc <$STAGING_DIR/etc/apt/sources.list -deb http://us.archive.ubuntu.com/ubuntu/ oneiric main restricted -deb-src http://us.archive.ubuntu.com/ubuntu/ oneiric main restricted -deb http://us.archive.ubuntu.com/ubuntu/ oneiric-updates main restricted -deb-src http://us.archive.ubuntu.com/ubuntu/ oneiric-updates main restricted -deb http://us.archive.ubuntu.com/ubuntu/ oneiric universe -deb http://us.archive.ubuntu.com/ubuntu/ oneiric-updates universe -deb http://us.archive.ubuntu.com/ubuntu/ oneiric multiverse -deb http://us.archive.ubuntu.com/ubuntu/ oneiric-updates multiverse -EOF +XS_TOOLS_PATH=${XS_TOOLS_PATH:-"/root/xs-tools.deb"} +STACK_USER=${STACK_USER:-stack} # Install basics chroot $STAGING_DIR apt-get update -chroot $STAGING_DIR apt-get install -y linux-image-$KERNEL_VERSION chroot $STAGING_DIR apt-get install -y cracklib-runtime curl wget ssh openssh-server tcpdump ethtool chroot $STAGING_DIR apt-get install -y curl wget ssh openssh-server python-pip git vim-nox sudo chroot $STAGING_DIR pip install xenapi -# Install guest utilities -XEGUEST=xe-guest-utilities_5.6.100-651_amd64.deb -wget http://images.ansolabs.com/xen/$XEGUEST -O $XEGUEST -cp $XEGUEST $STAGING_DIR/root -chroot $STAGING_DIR dpkg -i /root/$XEGUEST +# Install XenServer guest utilities +cp $XS_TOOLS_PATH ${STAGING_DIR}${XS_TOOLS_PATH} +chroot $STAGING_DIR dpkg -i $XS_TOOLS_PATH chroot $STAGING_DIR update-rc.d -f xe-linux-distribution remove chroot $STAGING_DIR update-rc.d xe-linux-distribution defaults @@ -53,12 +47,12 @@ rm -f $STAGING_DIR/etc/localtime # Add stack user chroot $STAGING_DIR groupadd libvirtd -chroot $STAGING_DIR useradd stack -s /bin/bash -d /opt/stack -G libvirtd -echo stack:$GUEST_PASSWORD | chroot $STAGING_DIR chpasswd -echo "stack ALL=(ALL) NOPASSWD: ALL" >> $STAGING_DIR/etc/sudoers +chroot $STAGING_DIR useradd $STACK_USER -s /bin/bash -d /opt/stack -G libvirtd +echo $STACK_USER:$GUEST_PASSWORD | chroot $STAGING_DIR chpasswd +echo "$STACK_USER ALL=(ALL) NOPASSWD: ALL" >> $STAGING_DIR/etc/sudoers # Give ownership of /opt/stack to stack user -chroot $STAGING_DIR chown -R stack /opt/stack +chroot $STAGING_DIR chown -R $STACK_USER /opt/stack # Make our ip address hostnames look nice at the command prompt echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $STAGING_DIR/opt/stack/.bashrc @@ -86,3 +80,12 @@ if [ "$DO_TGZ" = "1" ]; then rm -f stage.tgz tar cfz stage.tgz stage fi + +# remove self from local.rc +# so this script is not run again +rm -rf /etc/rc.local +mv /etc/rc.local.preparebackup /etc/rc.local +cp $STAGING_DIR/etc/rc.local $STAGING_DIR/etc/rc.local.backup + +# shutdown to notify we are done +shutdown -h now diff --git a/tools/xen/prepare_guest_template.sh b/tools/xen/prepare_guest_template.sh new file mode 100755 index 00000000..19bd2f84 --- /dev/null +++ b/tools/xen/prepare_guest_template.sh @@ -0,0 +1,81 @@ +#!/bin/bash + +# This script is run by install_os_domU.sh +# +# Parameters: +# - $GUEST_NAME - hostname for the DomU VM +# +# It modifies the ubuntu image created by install_os_domU.sh +# +# This script is responsible for cusomtizing the fresh ubuntu +# image so on boot it runs the prepare_guest.sh script +# that modifies the VM so it is ready to run stack.sh. +# It does this by mounting the disk image of the VM. +# +# The resultant image is started by install_os_domU.sh, +# and once the VM has shutdown, build_xva.sh is run + +# Exit on errors +set -o errexit +# Echo commands +set -o xtrace + +# This directory +TOP_DIR=$(cd $(dirname "$0") && pwd) + +# Include onexit commands +. $TOP_DIR/scripts/on_exit.sh + +# Source params - override xenrc params in your localrc to suite your taste +source xenrc + +# +# Parameters +# +GUEST_NAME="$1" + +# Mount the VDI +STAGING_DIR=$($TOP_DIR/scripts/manage-vdi open $GUEST_NAME 0 1 | grep -o "/tmp/tmp.[[:alnum:]]*") +add_on_exit "$TOP_DIR/scripts/manage-vdi close $GUEST_NAME 0 1" + +# Make sure we have a stage +if [ ! -d $STAGING_DIR/etc ]; then + echo "Stage is not properly set up!" + exit 1 +fi + +# Copy XenServer tools deb into the VM +ISO_DIR="/opt/xensource/packages/iso" +XS_TOOLS_FILE_NAME="xs-tools.deb" +XS_TOOLS_PATH="/root/$XS_TOOLS_FILE_NAME" +if [ -e "$ISO_DIR" ]; then + TOOLS_ISO=$(ls -1 $ISO_DIR/xs-tools-*.iso | head -1) + TMP_DIR=/tmp/temp.$RANDOM + mkdir -p $TMP_DIR + mount -o loop $TOOLS_ISO $TMP_DIR + DEB_FILE=$(ls $TMP_DIR/Linux/*amd64.deb) + echo "Copying XenServer tools into VM from: $DEB_FILE" + cp $DEB_FILE "${STAGING_DIR}${XS_TOOLS_PATH}" + umount $TMP_DIR + rm -rf $TMP_DIR +else + echo "WARNING: no XenServer tools found, falling back to 5.6 tools" + TOOLS_URL="https://github.com/downloads/citrix-openstack/warehouse/xe-guest-utilities_5.6.100-651_amd64.deb" + wget $TOOLS_URL -O $XS_TOOLS_FILE_NAME + cp $XS_TOOLS_FILE_NAME "${STAGING_DIR}${XS_TOOLS_PATH}" + rm -rf $XS_TOOLS_FILE_NAME +fi + +# Copy prepare_guest.sh to VM +mkdir -p $STAGING_DIR/opt/stack/ +cp $TOP_DIR/prepare_guest.sh $STAGING_DIR/opt/stack/prepare_guest.sh + +# backup rc.local +cp $STAGING_DIR/etc/rc.local $STAGING_DIR/etc/rc.local.preparebackup + +# run prepare_guest.sh on boot +cat <$STAGING_DIR/etc/rc.local +GUEST_PASSWORD=$GUEST_PASSWORD STAGING_DIR=/ \ + DO_TGZ=0 XS_TOOLS_PATH=$XS_TOOLS_PATH \ + bash /opt/stack/prepare_guest.sh > /opt/stack/prepare_guest.log 2>&1 +EOF diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh index 9aebb138..241296bd 100755 --- a/tools/xen/scripts/install-os-vpx.sh +++ b/tools/xen/scripts/install-os-vpx.sh @@ -19,7 +19,7 @@ set -eux -. /etc/xensource-inventory +[[ -f "/etc/xensource-inventory" ]] && source "/etc/xensource-inventory" || source "/etc/xcp/inventory" NAME="XenServer OpenStack VPX" DATA_VDI_SIZE="500MiB" @@ -38,7 +38,7 @@ usage() cat << EOF Usage: $0 [-f FILE_PATH] [-d DISK_SIZE] [-v BRIDGE_NAME] [-m BRIDGE_NAME] [-p BRIDGE_NAME] - [-k PARAMS] [-r RAM] [-i|-c] [-w] [-b] + [-k PARAMS] [-r RAM] [-i|-c] [-w] [-b] [-l NAME_LABEL] [-t TEMPLATE_NW_INSTALL] Installs XenServer OpenStack VPX. @@ -60,6 +60,8 @@ cat << EOF -k params Specifies kernel parameters. -r MiB Specifies RAM used by the VPX, in MiB. By default it will take the value from the XVA. + -l name Specifies the name label for the VM. + -t template Network install an openstack domU from this template EXAMPLES: @@ -87,7 +89,7 @@ EOF get_params() { - while getopts "hicwbf:d:v:m:p:k:r:" OPTION; + while getopts "hicwbf:d:v:m:p:k:r:l:t:" OPTION; do case $OPTION in h) usage @@ -126,6 +128,12 @@ get_params() v) BRIDGE_V=$OPTARG ;; + l) + NAME_LABEL=$OPTARG + ;; + t) + TEMPLATE_NAME=$OPTARG + ;; ?) usage exit @@ -242,7 +250,7 @@ create_management_vif() # This installs the interface for public traffic, only if a bridge is specified -# The interface is not configured at this stage, but it will be, once the admin +# The interface is not configured at this stage, but it will be, once the admin # tasks are complete for the services of this VPX create_public_vif() { @@ -324,17 +332,11 @@ set_kernel_params() { local v="$1" local args=$KERNEL_PARAMS - local cmdline=$(cat /proc/cmdline) - for word in $cmdline - do - if echo "$word" | grep -q "geppetto" - then - args="$word $args" - fi - done if [ "$args" != "" ] then echo "Passing Geppetto args to VPX: $args." + pvargs=$(xe vm-param-get param-name=PV-args uuid="$v") + args="$pvargs $args" xe vm-param-set PV-args="$args" uuid="$v" fi } @@ -418,6 +420,20 @@ then destroy_vifs "$vm_uuid" set_all "$vm_uuid" +elif [ "$TEMPLATE_NAME" ] +then + echo $TEMPLATE_NAME + vm_uuid=$(xe_min vm-install template="$TEMPLATE_NAME" new-name-label="$NAME_LABEL") + destroy_vifs "$vm_uuid" + set_auto_start "$vm_uuid" + create_gi_vif "$vm_uuid" + create_vm_vif "$vm_uuid" + create_management_vif "$vm_uuid" + create_public_vif "$vm_uuid" + set_kernel_params "$vm_uuid" + xe vm-param-set other-config:os-vpx=true uuid="$vm_uuid" + xe vm-param-set actions-after-reboot=Destroy uuid="$vm_uuid" + set_memory "$vm_uuid" else if [ ! -f "$VPX_FILE" ] then @@ -443,7 +459,7 @@ else renumber_system_disk "$vm_uuid" - nl=$(xe_min vm-list params=name-label uuid=$vm_uuid) + nl=${NAME_LABEL:-$(xe_min vm-list params=name-label uuid=$vm_uuid)} xe vm-param-set \ "name-label=${nl/ import/}" \ other-config:os-vpx=true \ diff --git a/tools/xen/scripts/install_ubuntu_template.sh b/tools/xen/scripts/install_ubuntu_template.sh new file mode 100755 index 00000000..43b6decd --- /dev/null +++ b/tools/xen/scripts/install_ubuntu_template.sh @@ -0,0 +1,80 @@ +#!/bin/bash +# +# This creates an Ubuntu Server 32bit or 64bit template +# on Xenserver 5.6.x, 6.0.x and 6.1.x +# The template does a net install only +# +# Based on a script by: David Markey +# + +# Exit on errors +set -o errexit +# Echo commands +set -o xtrace + +# This directory +BASE_DIR=$(cd $(dirname "$0") && pwd) + +# For default setings see xenrc +source $BASE_DIR/../xenrc + +# Get the params +preseed_url=$1 + +# Delete template or skip template creation as required +previous_template=$(xe template-list name-label="$UBUNTU_INST_TEMPLATE_NAME" \ + params=uuid --minimal) +if [ -n "$previous_template" ]; then + if $CLEAN_TEMPLATES; then + xe template-param-clear param-name=other-config uuid=$previous_template + xe template-uninstall template-uuid=$previous_template force=true + else + echo "Template $UBUNTU_INST_TEMPLATE_NAME already present" + exit 0 + fi +fi + +# Get built-in template +builtin_name="Debian Squeeze 6.0 (32-bit)" +builtin_uuid=$(xe template-list name-label="$builtin_name" --minimal) +if [[ -z $builtin_uuid ]]; then + echo "Cant find the Debian Squeeze 32bit template on your XenServer." + exit 1 +fi + +# Clone built-in template to create new template +new_uuid=$(xe vm-clone uuid=$builtin_uuid \ + new-name-label="$UBUNTU_INST_TEMPLATE_NAME") +disk_size=$(($OSDOMU_VDI_GB * 1024 * 1024 * 1024)) + +# Some of these settings can be found in example preseed files +# however these need to be answered before the netinstall +# is ready to fetch the preseed file, and as such must be here +# to get a fully automated install +pvargs="-- quiet console=hvc0 partman/default_filesystem=ext3 \ +console-setup/ask_detect=false locale=${UBUNTU_INST_LOCALE} \ +keyboard-configuration/layoutcode=${UBUNTU_INST_KEYBOARD} \ +netcfg/choose_interface=${HOST_IP_IFACE} \ +netcfg/get_hostname=os netcfg/get_domain=os auto \ +url=${preseed_url}" + +if [ "$NETINSTALLIP" != "dhcp" ]; then + netcfgargs="netcfg/disable_autoconfig=true \ +netcfg/get_nameservers=${UBUNTU_INST_NAMESERVERS} \ +netcfg/get_ipaddress=${UBUNTU_INST_IP} \ +netcfg/get_netmask=${UBUNTU_INST_NETMASK} \ +netcfg/get_gateway=${UBUNTU_INST_GATEWAY} \ +netcfg/confirm_static=true" + pvargs="${pvargs} ${netcfgargs}" +fi + +xe template-param-set uuid=$new_uuid \ + other-config:install-methods=http \ + other-config:install-repository="$UBUNTU_INST_REPOSITORY" \ + PV-args="$pvargs" \ + other-config:debian-release="$UBUNTU_INST_RELEASE" \ + other-config:default_template=true \ + other-config:disks='' \ + other-config:install-arch="$UBUNTU_INST_ARCH" + +echo "Ubuntu template installed uuid:$new_uuid" diff --git a/tools/xen/scripts/manage-vdi b/tools/xen/scripts/manage-vdi new file mode 100755 index 00000000..05c4b074 --- /dev/null +++ b/tools/xen/scripts/manage-vdi @@ -0,0 +1,86 @@ +#!/bin/bash + +set -eux + +action="$1" +vm="$2" +device="${3-0}" +part="${4-}" + +function xe_min() { + local cmd="$1" + shift + xe "$cmd" --minimal "$@" +} + +function run_udev_settle() { + which_udev=$(which udevsettle) || true + if [ -n "$which_udev" ]; then + udevsettle + else + udevadm settle + fi +} + +vm_uuid=$(xe_min vm-list name-label="$vm") +vdi_uuid=$(xe_min vbd-list params=vdi-uuid vm-uuid="$vm_uuid" \ + userdevice="$device") + +dom0_uuid=$(xe_min vm-list is-control-domain=true) + +function get_mount_device() { + vbd_uuid=$1 + + dev=$(xe_min vbd-list params=device uuid="$vbd_uuid") + if [[ "$dev" =~ "sm/" ]]; then + DEBIAN_FRONTEND=noninteractive \ + apt-get --option "Dpkg::Options::=--force-confold" --assume-yes \ + install kpartx &> /dev/null || true + mapping=$(kpartx -av "/dev/$dev" | sed -ne 's,^add map \([a-z0-9\-]*\).*$,\1,p' | sed -ne "s,^\(.*${part}\)\$,\1,p") + if [ -z "$mapping" ]; then + echo "Failed to find mapping" + exit -1 + fi + echo "/dev/mapper/${mapping}" + else + echo "/dev/$dev$part" + fi +} + +function clean_dev_mappings() { + dev=$(xe_min vbd-list params=device uuid="$vbd_uuid") + if [[ "$dev" =~ "sm/" ]]; then + kpartx -dv "/dev/$dev" + fi +} + +function open_vdi() { + vbd_uuid=$(xe vbd-create vm-uuid="$dom0_uuid" vdi-uuid="$vdi_uuid" \ + device=autodetect) + mp=$(mktemp -d) + xe vbd-plug uuid="$vbd_uuid" + + run_udev_settle + + mount_device=$(get_mount_device "$vbd_uuid") + mount "$mount_device" "$mp" + echo "Your vdi is mounted at $mp" +} + +function close_vdi() { + vbd_uuid=$(xe_min vbd-list vm-uuid="$dom0_uuid" vdi-uuid="$vdi_uuid") + mount_device=$(get_mount_device "$vbd_uuid") + run_udev_settle + umount "$mount_device" + + clean_dev_mappings + + xe vbd-unplug uuid=$vbd_uuid + xe vbd-destroy uuid=$vbd_uuid +} + +if [ "$action" == "open" ]; then + open_vdi +elif [ "$action" == "close" ]; then + close_vdi +fi diff --git a/tools/xen/scripts/mkxva b/tools/xen/scripts/mkxva index dcdee61a..a316da2d 100755 --- a/tools/xen/scripts/mkxva +++ b/tools/xen/scripts/mkxva @@ -177,7 +177,7 @@ EOF /sbin/mkfs.ext3 -I 128 -m0 -F "$partition" /sbin/e2label "$partition" vpxroot make_fs_inner "$staging" "$partition" "" - + # Now run grub on the image we've created CLEAN_MOUNTPOINT=$(mktemp -d "$TMPDIR/mkfs-XXXXXX") @@ -203,7 +203,7 @@ EOF $SUDO umount "$CLEAN_MOUNTPOINT" CLEAN_MOUNTPOINT= - + # Grub expects a disk with name /dev/xxxx with a first partition # named /dev/xxxx1, so we give it what it wants using symlinks # Note: /dev is linked to the real /dev of the build machine, so @@ -214,14 +214,14 @@ EOF rm -f "$disk_part1_name" ln -s "$CLEAN_LOSETUP" "$disk_name" ln -s "$partition" "$disk_part1_name" - + # Feed commands into the grub shell to setup the disk grub --no-curses --device-map=/dev/null </dev/null @@ -359,7 +359,7 @@ done # cleanup -if [ -z "${DO_NOT_CLEANUP:-}" ] ; then +if [ -z "${DO_NOT_CLEANUP:-}" ] ; then rm -rf "$XVA_TARBALL_STAGING" rm -f "$FS_TMPFILE" fi diff --git a/tools/xen/scripts/on_exit.sh b/tools/xen/scripts/on_exit.sh new file mode 100755 index 00000000..a4db39c2 --- /dev/null +++ b/tools/xen/scripts/on_exit.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +set -e +set -o xtrace + +declare -a on_exit_hooks + +on_exit() +{ + for i in $(seq $((${#on_exit_hooks[*]} - 1)) -1 0) + do + eval "${on_exit_hooks[$i]}" + done +} + +add_on_exit() +{ + local n=${#on_exit_hooks[*]} + on_exit_hooks[$n]="$*" + if [[ $n -eq 0 ]] + then + trap on_exit EXIT + fi +} diff --git a/tools/xen/scripts/templatedelete.sh b/tools/xen/scripts/templatedelete.sh new file mode 100755 index 00000000..66765b24 --- /dev/null +++ b/tools/xen/scripts/templatedelete.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +#Usage: ./templatedelete.sh + +templateuuid="$1" + +xe template-param-set other-config:default_template=false uuid="$templateuuid" +xe template-param-set is-a-template=false uuid="$templateuuid" +xe vm-destroy uuid="$templateuuid" diff --git a/tools/xen/scripts/uninstall-os-vpx.sh b/tools/xen/scripts/uninstall-os-vpx.sh index a82f3a05..0feaec79 100755 --- a/tools/xen/scripts/uninstall-os-vpx.sh +++ b/tools/xen/scripts/uninstall-os-vpx.sh @@ -17,19 +17,19 @@ # under the License. # -remove_data= -if [ "$1" = "--remove-data" ] -then - remove_data=1 -fi +set -ex -set -eu +# By default, don't remove the templates +REMOVE_TEMPLATES=${REMOVE_TEMPLATES:-"false"} +if [ "$1" = "--remove-templates" ]; then + REMOVE_TEMPLATES=true +fi xe_min() { local cmd="$1" shift - /opt/xensource/bin/xe "$cmd" --minimal "$@" + xe "$cmd" --minimal "$@" } destroy_vdi() @@ -39,11 +39,8 @@ destroy_vdi() local dev=$(xe_min vbd-list uuid=$vbd_uuid params=userdevice) local vdi_uuid=$(xe_min vbd-list uuid=$vbd_uuid params=vdi-uuid) - if [ "$type" = 'Disk' ] && [ "$dev" != 'xvda' ] && [ "$dev" != '0' ] - then - echo -n "Destroying data disk... " + if [ "$type" == 'Disk' ] && [ "$dev" != 'xvda' ] && [ "$dev" != '0' ]; then xe vdi-destroy uuid=$vdi_uuid - echo "done." fi } @@ -52,50 +49,36 @@ uninstall() local vm_uuid="$1" local power_state=$(xe_min vm-list uuid=$vm_uuid params=power-state) - if [ "$power_state" != "halted" ] - then - echo -n "Shutting down VM... " + if [ "$power_state" != "halted" ]; then xe vm-shutdown vm=$vm_uuid force=true - echo "done." fi - if [ "$remove_data" = "1" ] - then - for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g') - do - destroy_vdi "$v" - done - fi + for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g'); do + destroy_vdi "$v" + done - echo -n "Deleting VM... " xe vm-uninstall vm=$vm_uuid force=true >/dev/null - echo "done." } uninstall_template() { local vm_uuid="$1" - if [ "$remove_data" = "1" ] - then - for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g') - do - destroy_vdi "$v" - done - fi + for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g'); do + destroy_vdi "$v" + done - echo -n "Deleting template... " xe template-uninstall template-uuid=$vm_uuid force=true >/dev/null - echo "done." } - -for u in $(xe_min vm-list other-config:os-vpx=true | sed -e 's/,/ /g') -do +# remove the VMs and their disks +for u in $(xe_min vm-list other-config:os-vpx=true | sed -e 's/,/ /g'); do uninstall "$u" done -for u in $(xe_min template-list other-config:os-vpx=true | sed -e 's/,/ /g') -do - uninstall_template "$u" -done +# remove the templates +if [ "$REMOVE_TEMPLATES" == "true" ]; then + for u in $(xe_min template-list other-config:os-vpx=true | sed -e 's/,/ /g'); do + uninstall_template "$u" + done +fi diff --git a/tools/xen/templates/interfaces.in b/tools/xen/templates/interfaces.in index 49c3d681..74b41ccf 100644 --- a/tools/xen/templates/interfaces.in +++ b/tools/xen/templates/interfaces.in @@ -1,8 +1,15 @@ auto lo iface lo inet loopback -auto eth0 -iface eth0 inet dhcp +# If eth3 is static, the order should not matter +# and eth0 will have the default gateway. If not, +# we probably want the default gateway to be +# what is on the public interface. Hence changed +# the order here. +auto eth3 +iface eth3 inet static + address @ETH3_IP@ + netmask @ETH3_NETMASK@ auto eth1 iface eth1 inet static @@ -14,8 +21,3 @@ auto eth2 iface eth2 inet static address @ETH2_IP@ netmask @ETH2_NETMASK@ - -auto eth3 -iface eth3 inet static - address @ETH3_IP@ - netmask @ETH3_NETMASK@ diff --git a/tools/xen/templates/ova.xml.in b/tools/xen/templates/ova.xml.in index 8443dcb8..01041e20 100644 --- a/tools/xen/templates/ova.xml.in +++ b/tools/xen/templates/ova.xml.in @@ -5,7 +5,7 @@ @PRODUCT_BRAND@ @PRODUCT_VERSION@-@BUILD_NUMBER@ - + diff --git a/tools/xen/xenrc b/tools/xen/xenrc new file mode 100644 index 00000000..1a5a2a93 --- /dev/null +++ b/tools/xen/xenrc @@ -0,0 +1,76 @@ +#!/bin/bash + +# +# XenServer specific defaults for the /tools/xen/ scripts +# Similar to stackrc, you can override these in your localrc +# + +# Name of this guest +GUEST_NAME=${GUEST_NAME:-DevStackOSDomU} + +# Size of image +VDI_MB=${VDI_MB:-5000} +OSDOMU_MEM_MB=1024 +OSDOMU_VDI_GB=8 + +# VM Password +GUEST_PASSWORD=${GUEST_PASSWORD:-secrete} + +# Host Interface, i.e. the interface on the nova vm you want to expose the +# services on. Usually eth2 (management network) or eth3 (public network) and +# not eth0 (private network with XenServer host) or eth1 (VM traffic network) +# This is also used as the interface for the Ubuntu install +HOST_IP_IFACE=${HOST_IP_IFACE:-eth3} + +# +# Our nova host's network info +# + +# A host-only ip that let's the interface come up, otherwise unused +VM_IP=${VM_IP:-10.255.255.255} +MGT_IP=${MGT_IP:-172.16.100.55} +PUB_IP=${PUB_IP:-192.168.1.55} + +# Public network +PUB_BR=${PUB_BR:-"xenbr0"} +PUB_DEV=${PUB_DEV:-eth0} +PUB_VLAN=${PUB_VLAN:--1} +PUB_NETMASK=${PUB_NETMASK:-255.255.255.0} + +# VM network params +VM_NETMASK=${VM_NETMASK:-255.255.255.0} +VM_BR=${VM_BR:-""} +VM_VLAN=${VM_VLAN:-100} +VM_DEV=${VM_DEV:-eth0} + +# MGMT network params +MGT_NETMASK=${MGT_NETMASK:-255.255.255.0} +MGT_BR=${MGT_BR:-""} +MGT_VLAN=${MGT_VLAN:-101} +MGT_DEV=${MGT_DEV:-eth0} + +# Decide if you should enable eth0, +# the guest installer network +# You need to disable this on xcp-xapi on Ubuntu 12.04 +ENABLE_GI=true + +# Ubuntu install settings +UBUNTU_INST_RELEASE="oneiric" +UBUNTU_INST_TEMPLATE_NAME="Ubuntu 11.10 (64-bit) for DevStack" +# For 12.04 use "precise" and update template name +# However, for 12.04, you should be using +# XenServer 6.1 and later or XCP 1.6 or later +# 11.10 is only really supported with XenServer 6.0.2 and later +UBUNTU_INST_ARCH="amd64" +UBUNTU_INST_REPOSITORY="http://archive.ubuntu.net/ubuntu" +UBUNTU_INST_LOCALE="en_US" +UBUNTU_INST_KEYBOARD="us" +# network configuration for HOST_IP_IFACE during install +UBUNTU_INST_IP="dhcp" +UBUNTU_INST_NAMESERVERS="" +UBUNTU_INST_NETMASK="" +UBUNTU_INST_GATEWAY="" + +# Load stackrc defaults +# then override with settings from localrc +cd ../.. && source ./stackrc && cd $TOP_DIR diff --git a/unstack.sh b/unstack.sh new file mode 100755 index 00000000..a086d5c6 --- /dev/null +++ b/unstack.sh @@ -0,0 +1,112 @@ +#!/usr/bin/env bash + +# **unstack.sh** + +# Stops that which is started by ``stack.sh`` (mostly) +# mysql and rabbit are left running as OpenStack code refreshes +# do not require them to be restarted. +# +# Stop all processes by setting ``UNSTACK_ALL`` or specifying ``--all`` +# on the command line + +# Keep track of the current devstack directory. +TOP_DIR=$(cd $(dirname "$0") && pwd) + +# Import common functions +source $TOP_DIR/functions + +# Import database library +source $TOP_DIR/lib/database + +# Load local configuration +source $TOP_DIR/stackrc + +# Destination path for service data +DATA_DIR=${DATA_DIR:-${DEST}/data} + +# Get project function libraries +source $TOP_DIR/lib/baremetal +source $TOP_DIR/lib/cinder +source $TOP_DIR/lib/horizon +source $TOP_DIR/lib/swift +source $TOP_DIR/lib/quantum + +# Determine what system we are running on. This provides ``os_VENDOR``, +# ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` +GetOSVersion + +if [[ "$1" == "--all" ]]; then + UNSTACK_ALL=${UNSTACK_ALL:-1} +fi + +# Run extras +# ========== + +if [[ -d $TOP_DIR/extras.d ]]; then + for i in $TOP_DIR/extras.d/*.sh; do + [[ -r $i ]] && source $i unstack + done +fi + +if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then + source $TOP_DIR/openrc + teardown_quantum_debug +fi + +# Shut down devstack's screen to get the bulk of OpenStack services in one shot +SCREEN=$(which screen) +if [[ -n "$SCREEN" ]]; then + SESSION=$(screen -ls | awk '/[0-9].stack/ { print $1 }') + if [[ -n "$SESSION" ]]; then + screen -X -S $SESSION quit + fi +fi + +# Swift runs daemons +if is_service_enabled swift; then + stop_swift + cleanup_swift +fi + +# Apache has the WSGI processes +if is_service_enabled horizon; then + stop_horizon +fi + +# Kill TLS proxies +if is_service_enabled tls-proxy; then + killall stud +fi + +# baremetal might have created a fake environment +if is_service_enabled baremetal && [[ "$BM_USE_FAKE_ENV" = "True" ]]; then + cleanup_fake_baremetal_env +fi + +SCSI_PERSIST_DIR=$CINDER_STATE_PATH/volumes/* + +# Get the iSCSI volumes +if is_service_enabled cinder; then + cleanup_cinder +fi + +if [[ -n "$UNSTACK_ALL" ]]; then + # Stop MySQL server + if is_service_enabled mysql; then + stop_service mysql + fi + + if is_service_enabled postgresql; then + stop_service postgresql + fi + + # Stop rabbitmq-server + if is_service_enabled rabbit; then + stop_service rabbitmq-server + fi +fi + +if is_service_enabled quantum; then + stop_quantum + stop_quantum_third_party +fi