neutron-lbaas-8.0.0/0000775000567000056710000000000012701410110015415 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/devstack/0000775000567000056710000000000012701410110017221 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/devstack/settings0000664000567000056710000000301512701407726021026 0ustar jenkinsjenkins00000000000000# settings for LBaaS devstack pluginlib/neutron_plugins/services/loadbalancer # For backward compatibility, treat q-lbaas and q-lbaasv1 the same. # In the future, the q-lbaas may default to q-lbaasv2 AGENT_LBAASV1_BINARY="$NEUTRON_BIN_DIR/neutron-lbaas-agent" AGENT_LBAASV2_BINARY="$NEUTRON_BIN_DIR/neutron-lbaasv2-agent" LBAAS_V1="q-lbaas q-lbaasv1" LBAAS_V2="q-lbaasv2" LBAAS_ANY="$LBAAS_V1 $LBAAS_V2" BARBICAN="barbican-svc" AUTH_URI=${AUTH_URI:-"http://127.0.0.1:5000/v2.0"} ADMIN_TENANT_NAME=${ADMIN_TENANT_NAME:-"admin"} ADMIN_USER=${ADMIN_USER:-"admin"} ADMIN_PASSWORD=${ADMIN_PASSWORD:-"password"} AUTH_VERSION=${AUTH_VERSION:-"2"} LBAAS_AGENT_CONF_PATH=/etc/neutron/services/loadbalancer/haproxy LBAAS_AGENT_CONF_FILENAME=$LBAAS_AGENT_CONF_PATH/lbaas_agent.ini LBAASV1_PLUGIN=${LBAASV1_PLUGIN:-"neutron_lbaas.services.loadbalancer.plugin.LoadBalancerPlugin"} LBAASV2_PLUGIN=${LBAASV2_PLUGIN:-"neutron_lbaas.services.loadbalancer.plugin.LoadBalancerPluginv2"} NEUTRON_LBAAS_DIR=$DEST/neutron-lbaas NEUTRON_LBAAS_CONF=$NEUTRON_CONF_DIR/neutron_lbaas.conf NEUTRON_LBAAS_SERVICE_PROVIDERV2_OCTAVIA=${NEUTRON_LBAAS_SERVICE_PROVIDERV2_OCTAVIA:-"LOADBALANCERV2:Octavia:neutron_lbaas.drivers.octavia.driver.OctaviaDriver:default"} NEUTRON_LBAAS_SERVICE_PROVIDERV1=${NEUTRON_LBAAS_SERVICE_PROVIDERV1:-"LOADBALANCER:Haproxy:neutron_lbaas.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default"} NEUTRON_LBAAS_SERVICE_PROVIDERV2=${NEUTRON_LBAAS_SERVICE_PROVIDERV2:-${NEUTRON_LBAAS_SERVICE_PROVIDERV2_OCTAVIA}} neutron-lbaas-8.0.0/devstack/plugin.sh0000664000567000056710000001355212701407726021104 0ustar jenkinsjenkins00000000000000# function definitions for neutron-lbaas devstack plugin function neutron_lbaas_install { setup_develop $NEUTRON_LBAAS_DIR neutron_agent_lbaas_install_agent_packages } function neutron_agent_lbaas_install_agent_packages { if is_ubuntu; then if [[ ${OFFLINE} == False ]]; then BACKPORT="deb http://archive.ubuntu.com/ubuntu trusty-backports main restricted universe multiverse" BACKPORT_EXISTS=$(grep ^ /etc/apt/sources.list /etc/apt/sources.list.d/* | grep "${BACKPORT}") || true if [[ -z "${BACKPORT_EXISTS}" ]]; then sudo add-apt-repository "${BACKPORT}" -y fi sudo apt-get update sudo apt-get install haproxy -t trusty-backports fi fi if is_fedora || is_suse; then install_package haproxy fi } function neutron_lbaas_configure_common { if is_service_enabled $LBAAS_V1 && is_service_enabled $LBAAS_V2; then die $LINENO "Do not enable both Version 1 and Version 2 of LBaaS." fi # Uses oslo config generator to generate LBaaS sample configuration files (cd $NEUTRON_LBAAS_DIR && exec ./tools/generate_config_file_samples.sh) cp $NEUTRON_LBAAS_DIR/etc/neutron_lbaas.conf.sample $NEUTRON_LBAAS_CONF if is_service_enabled $LBAAS_V1; then inicomment $NEUTRON_LBAAS_CONF service_providers service_provider iniadd $NEUTRON_LBAAS_CONF service_providers service_provider $NEUTRON_LBAAS_SERVICE_PROVIDERV1 elif is_service_enabled $LBAAS_V2; then inicomment $NEUTRON_LBAAS_CONF service_providers service_provider iniadd $NEUTRON_LBAAS_CONF service_providers service_provider $NEUTRON_LBAAS_SERVICE_PROVIDERV2 fi if is_service_enabled $LBAAS_V1; then _neutron_service_plugin_class_add $LBAASV1_PLUGIN iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES elif is_service_enabled $LBAAS_V2; then _neutron_service_plugin_class_add $LBAASV2_PLUGIN iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES fi # Ensure config is set up properly for authentication neutron-lbaas iniset $NEUTRON_LBAAS_CONF service_auth auth_uri $AUTH_URI iniset $NEUTRON_LBAAS_CONF service_auth admin_tenant_name $ADMIN_TENANT_NAME iniset $NEUTRON_LBAAS_CONF service_auth admin_user $ADMIN_USER iniset $NEUTRON_LBAAS_CONF service_auth admin_password $ADMIN_PASSWORD iniset $NEUTRON_LBAAS_CONF service_auth auth_version $AUTH_VERSION # Ensure config is set up properly for authentication neutron iniset $NEUTRON_CONF service_auth auth_uri $AUTH_URI iniset $NEUTRON_CONF service_auth admin_tenant_name $ADMIN_TENANT_NAME iniset $NEUTRON_CONF service_auth admin_user $ADMIN_USER iniset $NEUTRON_CONF service_auth admin_password $ADMIN_PASSWORD iniset $NEUTRON_CONF service_auth auth_version $AUTH_VERSION _neutron_deploy_rootwrap_filters $NEUTRON_LBAAS_DIR $NEUTRON_BIN_DIR/neutron-db-manage --service lbaas --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head } function neutron_lbaas_configure_agent { mkdir -p $LBAAS_AGENT_CONF_PATH cp $NEUTRON_LBAAS_DIR/etc/lbaas_agent.ini.sample $LBAAS_AGENT_CONF_FILENAME # ovs_use_veth needs to be set before the plugin configuration # occurs to allow plugins to override the setting. iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT ovs_use_veth $Q_OVS_USE_VETH neutron_plugin_setup_interface_driver $LBAAS_AGENT_CONF_FILENAME if is_fedora; then iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT user_group "nobody" iniset $LBAAS_AGENT_CONF_FILENAME haproxy user_group "nobody" fi } function neutron_lbaas_start { local is_run_process=True if is_service_enabled $LBAAS_V1; then LBAAS_VERSION="q-lbaas" AGENT_LBAAS_BINARY=${AGENT_LBAASV1_BINARY} elif is_service_enabled $LBAAS_V2; then LBAAS_VERSION="q-lbaasv2" AGENT_LBAAS_BINARY=${AGENT_LBAASV2_BINARY} # Octavia doesn't need the LBaaS V2 service running. If Octavia is the # only provider then don't run the process. if [[ "$NEUTRON_LBAAS_SERVICE_PROVIDERV2" == "$NEUTRON_LBAAS_SERVICE_PROVIDERV2_OCTAVIA" ]]; then is_run_process=False fi fi if [[ "$is_run_process" == "True" ]] ; then run_process $LBAAS_VERSION "python $AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_LBAAS_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME" fi } function neutron_lbaas_stop { pids=$(ps aux | awk '/haproxy/ { print $2 }') [ ! -z "$pids" ] && sudo kill $pids } function neutron_lbaas_cleanup { # delete all namespaces created by neutron-lbaas for ns in $(sudo ip netns list | grep -o -E '(qlbaas|nlbaas)-[0-9a-f-]*'); do sudo ip netns delete ${ns} done } # check for service enabled if is_service_enabled $LBAAS_ANY; then if ! is_service_enabled q-svc; then die "The neutron q-svc service must be enabled to use $LBAAS_ANY" fi if [[ "$1" == "stack" && "$2" == "install" ]]; then # Perform installation of service source echo_summary "Installing neutron-lbaas" neutron_agent_lbaas_install_agent_packages neutron_lbaas_install elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then # Configure after the other layer 1 and 2 services have been configured echo_summary "Configuring neutron-lbaas" neutron_lbaas_configure_common neutron_lbaas_configure_agent elif [[ "$1" == "stack" && "$2" == "extra" ]]; then # Initialize and start the LBaaS service echo_summary "Initializing neutron-lbaas" neutron_lbaas_start fi fi if [[ "$1" == "unstack" ]]; then # Shut down LBaaS services neutron_lbaas_stop fi if [[ "$1" == "clean" ]]; then # Remove state and transient data # Remember clean.sh first calls unstack.sh neutron_lbaas_cleanup fi neutron-lbaas-8.0.0/devstack/README.md0000664000567000056710000000440212701407726020523 0ustar jenkinsjenkins00000000000000This directory contains the neutron-lbaas devstack plugin. To configure the neutron load balancer, in the [[local|localrc]] section, you will need to enable the neutron-lbaas devstack plugin and enable the LBaaS service by editing the [[local|localrc]] section of your local.conf file. Octavia is the LBaaS V2 reference service provider and is used in the examples below. Enabling another service provider, such as the Haproxy, is also supported. This can be done by enabling the driver plugin, if applicable, and setting the appropriate service provider value for NEUTRON_LBAAS_SERVICE_PROVIDERV2. In addition, you can enable multiple service providers by enabling the applicable driver plugins and space-delimiting the service provider values in NEUTRON_LBAAS_SERVICE_PROVIDERV2. 1) Enable the plugins To enable the plugin, add a line of the form: enable_plugin neutron-lbaas [GITREF] enable_plugin octavia [GITREF] where is the URL of a neutron-lbaas repository is the URL of a octavia repository [GITREF] is an optional git ref (branch/ref/tag). The default is master. For example enable_plugin neutron-lbaas https://git.openstack.org/openstack/neutron-lbaas stable/liberty enable_plugin octavia https://git.openstack.org/openstack/octavia stable/liberty 2) Enable the LBaaS services To enable the LBaaS services, add lines in the form: ENABLED_SERVICES+= ENABLED_SERVICES+= where is "q-lbaasv1" for LBaaS Version 1, or "q-lbaasv2" for LBaaS Version 2. "q-lbaas" is synonymous with "q-lbaasv1". are "octavia" the Octavia driver, "o-cw" the Octavia Controller Worker, "o-hk" the Octavia housekeeping manager, "o-hm" the Octavia Health Manager, and "o-api" the Octavia API service. to the [[local|localrc]] section of local.conf For example # For LBaaS V2 ENABLED_SERVICES+=,q-lbaasv2 ENABLED_SERVICES+=,octavia,o-cw,o-hk,o-hm,o-api For more information, see the "Externally Hosted Plugins" section of http://docs.openstack.org/developer/devstack/plugins.html. neutron-lbaas-8.0.0/devstack/files/0000775000567000056710000000000012701410110020323 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/devstack/files/debs/0000775000567000056710000000000012701410110021240 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/devstack/files/debs/neutron-lbaas0000664000567000056710000000003312701407726023754 0ustar jenkinsjenkins00000000000000software-properties-common neutron-lbaas-8.0.0/devstack/samples/0000775000567000056710000000000012701410110020665 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/devstack/samples/local.conf0000775000567000056710000000550212701407726022656 0ustar jenkinsjenkins00000000000000# Sample ``local.conf`` that builds a devstack with neutron LBaaS Version 2 # NOTE: Copy this file to the root DevStack directory for it to work properly. # ``local.conf`` is a user-maintained settings file that is sourced from ``stackrc``. # This gives it the ability to override any variables set in ``stackrc``. # Also, most of the settings in ``stack.sh`` are written to only be set if no # value has already been set; this lets ``local.conf`` effectively override the # default values. # The ``localrc`` section replaces the old ``localrc`` configuration file. # Note that if ``localrc`` is present it will be used in favor of this section. [[local|localrc]] # The name of the RECLONE environment variable is a bit misleading. It doesn't actually # reclone repositories, rather it uses git fetch to make sure the repos are current. RECLONE=True # Load Barbican (optional) # enable_plugin barbican https://review.openstack.org/openstack/barbican # Load the external LBaaS plugin. enable_plugin neutron-lbaas https://git.openstack.org/openstack/neutron-lbaas enable_plugin octavia https://github.com/openstack/octavia LIBS_FROM_GIT+=python-neutronclient DATABASE_PASSWORD=password ADMIN_PASSWORD=password SERVICE_PASSWORD=password SERVICE_TOKEN=password RABBIT_PASSWORD=password # Enable Logging LOGFILE=$DEST/logs/stack.sh.log VERBOSE=True LOG_COLOR=True SCREEN_LOGDIR=$DEST/logs # Pre-requisite ENABLED_SERVICES=rabbit,mysql,key # Horizon ENABLED_SERVICES+=,horizon # Nova ENABLED_SERVICES+=,n-api,n-crt,n-obj,n-cpu,n-cond,n-sch IMAGE_URLS+=",http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img" ENABLED_SERVICES+=,g-api,g-reg # Neutron ENABLED_SERVICES+=,q-svc,q-agt,q-dhcp,q-l3,q-meta # Enable LBaaS V2 ENABLED_SERVICES+=,q-lbaasv2 # Cinder ENABLED_SERVICES+=,c-api,c-vol,c-sch # Octavia ENABLED_SERVICES+=,octavia,o-cw,o-hk,o-hm,o-api # enable DVR Q_PLUGIN=ml2 Q_ML2_TENANT_NETWORK_TYPE=vxlan Q_DVR_MODE=dvr_snat LOGFILE=$DEST/logs/stack.sh.log # Old log files are automatically removed after 7 days to keep things neat. Change # the number of days by setting ``LOGDAYS``. LOGDAYS=2 # Swift # ----- # Swift is now used as the back-end for the S3-like object store. If Nova's # objectstore (``n-obj`` in ``ENABLED_SERVICES``) is enabled, it will NOT # run if Swift is enabled. Setting the hash value is required and you will # be prompted for it if Swift is enabled so just set it to something already: SWIFT_HASH=66a3d6b56c1f479c8b4e70ab5c2000f5 # For development purposes the default of 3 replicas is usually not required. # Set this to 1 to save some resources: SWIFT_REPLICAS=1 # The data for Swift is stored by default in (``$DEST/data/swift``), # or (``$DATA_DIR/swift``) if ``DATA_DIR`` has been set, and can be # moved by setting ``SWIFT_DATA_DIR``. The directory will be created # if it does not exist. SWIFT_DATA_DIR=$DEST/data neutron-lbaas-8.0.0/devstack/samples/local.sh0000775000567000056710000000736112701407726022350 0ustar jenkinsjenkins00000000000000#!/usr/bin/env bash # Sample ``local.sh`` that configures two simple webserver instances and sets # up a Neutron LBaaS Version 2 loadbalancer. # Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0") && pwd) BOOT_DELAY=60 # Import common functions source ${TOP_DIR}/functions # Use openrc + stackrc for settings source ${TOP_DIR}/stackrc # Destination path for installation ``DEST`` DEST=${DEST:-/opt/stack} # Additional Variables IMAGE_NAME="cirros" SUBNET_NAME="private-subnet" if is_service_enabled nova; then # Get OpenStack demo user auth source ${TOP_DIR}/openrc demo demo # Create an SSH key to use for the instances HOST=$(echo $HOSTNAME | cut -d"." -f1) DEVSTACK_LBAAS_SSH_KEY_NAME=${HOST}_DEVSTACK_LBAAS_SSH_KEY_RSA DEVSTACK_LBAAS_SSH_KEY_DIR=${TOP_DIR} DEVSTACK_LBAAS_SSH_KEY=${DEVSTACK_LBAAS_SSH_KEY_DIR}/${DEVSTACK_LBAAS_SSH_KEY_NAME} rm -f ${DEVSTACK_LBAAS_SSH_KEY}.pub ${DEVSTACK_LBAAS_SSH_KEY} ssh-keygen -b 2048 -t rsa -f ${DEVSTACK_LBAAS_SSH_KEY} -N "" nova keypair-add --pub_key=${DEVSTACK_LBAAS_SSH_KEY}.pub ${DEVSTACK_LBAAS_SSH_KEY_NAME} # Add tcp/22,80 and icmp to default security group nova secgroup-add-rule default tcp 22 22 0.0.0.0/0 nova secgroup-add-rule default tcp 80 80 0.0.0.0/0 nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0 # Get Image id IMAGE_ID=$(glance image-list | awk -v image=${IMAGE_NAME} '$0 ~ image {print $2}' | head -1) # Get Network id NET_ID=$(neutron subnet-show ${SUBNET_NAME} | awk '/network_id/ {print $4}') # Boot some instances NOVA_BOOT_ARGS="--key-name ${DEVSTACK_LBAAS_SSH_KEY_NAME} --image ${IMAGE_ID} --flavor 1 --nic net-id=$NET_ID" nova boot ${NOVA_BOOT_ARGS} node1 nova boot ${NOVA_BOOT_ARGS} node2 echo "Waiting ${BOOT_DELAY} seconds for instances to boot" sleep ${BOOT_DELAY} # Get Instances IP Addresses SUBNET_ID=$(neutron subnet-show ${SUBNET_NAME} | awk '/ id / {print $4}') IP1=$(neutron port-list --device_owner compute:None -c fixed_ips | grep ${SUBNET_ID} | cut -d'"' -f8 | sed -n 1p) IP2=$(neutron port-list --device_owner compute:None -c fixed_ips | grep ${SUBNET_ID} | cut -d'"' -f8 | sed -n 2p) ssh-keygen -R ${IP1} ssh-keygen -R ${IP2} # Run a simple web server on the instances scp -i ${DEVSTACK_LBAAS_SSH_KEY} -o StrictHostKeyChecking=no ${TOP_DIR}/webserver.sh cirros@${IP1}:webserver.sh scp -i ${DEVSTACK_LBAAS_SSH_KEY} -o StrictHostKeyChecking=no ${TOP_DIR}/webserver.sh cirros@${IP2}:webserver.sh screen_process node1 "ssh -i ${DEVSTACK_LBAAS_SSH_KEY} -o StrictHostKeyChecking=no cirros@${IP1} ./webserver.sh" screen_process node2 "ssh -i ${DEVSTACK_LBAAS_SSH_KEY} -o StrictHostKeyChecking=no cirros@${IP2} ./webserver.sh" fi function wait_for_lb_active { echo "Waiting for $1 to become ACTIVE..." status=$(neutron lbaas-loadbalancer-show $1 | awk '/provisioning_status/ {print $4}') while [ "$status" != "ACTIVE" ] do sleep 2 status=$(neutron lbaas-loadbalancer-show $1 | awk '/provisioning_status/ {print $4}') if [ $status == "ERROR" ] then echo "$1 ERRORED. Exiting." exit 1; fi done } if is_service_enabled q-lbaasv2; then neutron lbaas-loadbalancer-create --name lb1 ${SUBNET_NAME} wait_for_lb_active "lb1" neutron lbaas-listener-create --loadbalancer lb1 --protocol HTTP --protocol-port 80 --name listener1 sleep 10 neutron lbaas-pool-create --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1 sleep 10 neutron lbaas-member-create --subnet ${SUBNET_NAME} --address ${IP1} --protocol-port 80 pool1 sleep 10 neutron lbaas-member-create --subnet ${SUBNET_NAME} --address ${IP2} --protocol-port 80 pool1 fi neutron-lbaas-8.0.0/devstack/samples/README-Vagrant.md0000664000567000056710000000225512701407726023573 0ustar jenkinsjenkins00000000000000This file describes how to use Vagrant (http://www.vagrantup.com) to create a devstack virtual machine that contains two nova instances running a simple web server and a working Neutron LBaaS Version 2 load balancer. 1) Install vagrant on your host machine. Vagrant is available for Windows, Mac OS, and most Linux distributions. Download and install the package appropriate for your system. On Ubuntu, simply type: sudo apt-get install vagrant 2) copy 'Vagrantfile' from this directory to any appropriate directory and run 'vagrant up': mkdir $HOME/lbaas-vagrant # or any other appropriate directory cp Vagrantfile $HOME/lbaas-vagrant cd $HOME/lbaas-vagrant vagrant up 3) Wait for the vagrant VM to boot and install, typically 20-30 minutes 4) SSH into the vagrant box vagrant ssh 5) Determine the loadbalancer IP: source openrc admin admin neutron lbaas-loadbalancer-show lb1 | grep vip_address 6) make HTTP requests to test your load balancer: curl where is the VIP address for lb1. The subsequent invocations of "curl " should demonstrate that the load balancer is alternating between two member nodes. neutron-lbaas-8.0.0/devstack/samples/README.md0000664000567000056710000000167012701407726022173 0ustar jenkinsjenkins00000000000000This directory contains sample files for configuring neutron LBaaS using devstack. By copying these files into the main devstack directory (not the neutron-lbaas/devstack directory directly above this one), and running stack.sh, you will create a fully functioning OpenStack installation running a neutron-lbaas load balancer. 1) Copy the files into place: cp local.conf local.sh webserver.sh where is the main devstack directory. Note: this is not neutron-lbaas/devstack. 2) Build your devstack: cd ./stack.sh 3) Test your loadbalancer: a) Determine the loadbalancer IP: source openrc admin admin neutron lbaas-loadbalancer-show lb1 | grep vip_address curl where is the VIP address for lb1. The subsequent invocations of "curl " should demonstrate that the load balancer is alternating between two member nodes. neutron-lbaas-8.0.0/devstack/samples/webserver.sh0000775000567000056710000000042612701407726023255 0ustar jenkinsjenkins00000000000000#!/bin/sh MYIP=$(/sbin/ifconfig eth0|grep 'inet addr'|awk -F: '{print $2}'| awk '{print $1}'); OUTPUT_STR="Welcome to $MYIP\r" OUTPUT_LEN=${#OUTPUT_STR} while true; do echo -e "HTTP/1.0 200 OK\r\nContent-Length: ${OUTPUT_LEN}\r\n\r\n${OUTPUT_STR}" | sudo nc -l -p 80 done neutron-lbaas-8.0.0/devstack/samples/Vagrantfile0000775000567000056710000000644512701407726023111 0ustar jenkinsjenkins00000000000000# -*- mode: ruby -*- # vi: set ft=ruby : # All Vagrant configuration is done below. The "2" in Vagrant.configure # configures the configuration version (we support older styles for # backwards compatibility). Please don't change it unless you know what # you're doing. Vagrant.configure(2) do |config| # The most common configuration options are documented and commented below. # For a complete reference, please see the online documentation at # https://docs.vagrantup.com. # Every Vagrant development environment requires a box. You can search for # boxes at https://atlas.hashicorp.com/search. config.vm.box = "ubuntu/trusty64" # Disable automatic box update checking. If you disable this, then # boxes will only be checked for updates when the user runs # `vagrant box outdated`. This is not recommended. # config.vm.box_check_update = false # Create a forwarded port mapping which allows access to a specific port # within the machine from a port on the host machine. In the example below, # accessing "localhost:8080" will access port 80 on the guest machine. #config.vm.network "forwarded_port", guest: 80, host: 8080 # Create a private network, which allows host-only access to the machine # using a specific IP. # config.vm.network "private_network", ip: "192.168.33.10" # Create a public network, which generally matched to bridged network. # Bridged networks make the machine appear as another physical device on # your network. # config.vm.network "public_network" # Share an additional folder to the guest VM. The first argument is # the path on the host to the actual folder. The second argument is # the path on the guest to mount the folder. And the optional third # argument is a set of non-required options. # config.vm.synced_folder "../data", "/vagrant_data" # Provider-specific configuration so you can fine-tune various # backing providers for Vagrant. These expose provider-specific options. # Example for VirtualBox: # config.vm.provider "virtualbox" do |vb| # Display the VirtualBox GUI when booting the machine vb.gui = true # Customize the amount of memory on the VM: vb.memory = "8192" end # # View the documentation for the provider you are using for more # information on available options # Define a Vagrant Push strategy for pushing to Atlas. Other push strategies # such as FTP and Heroku are also available. See the documentation at # https://docs.vagrantup.com/v2/push/atlas.html for more information. # config.push.define "atlas" do |push| # push.app = "YOUR_ATLAS_USERNAME/YOUR_APPLICATION_NAME" # end # Enable provisioning with a shell script. Additional provisioners such as # Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the # documentation for more information about their specific syntax and use. config.vm.provision "shell", privileged: false, inline: <<-SHELL #!/usr/bin/env bash sudo apt-get update sudo apt-get -y upgrade sudo apt-get -y install git git clone https://git.openstack.org/openstack-dev/devstack git clone https://git.openstack.org/openstack/neutron-lbaas git clone https://github.com/openstack/octavia cd neutron-lbaas/devstack/samples cp local.* webserver.sh ~/devstack cd ~/devstack ./stack.sh SHELL end neutron-lbaas-8.0.0/etc/0000775000567000056710000000000012701410110016170 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/etc/neutron/0000775000567000056710000000000012701410110017662 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/etc/neutron/rootwrap.d/0000775000567000056710000000000012701410110021761 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/etc/neutron/rootwrap.d/lbaas-haproxy.filters0000664000567000056710000000123112701407726026145 0ustar jenkinsjenkins00000000000000# neutron-rootwrap command filters for nodes on which neutron is # expected to control network # # This file should be owned by (and only-writeable by) the root user # format seems to be # cmd-name: filter-name, raw-command, user, args [Filters] # haproxy haproxy: CommandFilter, haproxy, root # lbaas-agent uses kill as well, that's handled by the generic KillFilter kill_haproxy_usr: KillFilter, root, /usr/sbin/haproxy, -9, -HUP ovs-vsctl: CommandFilter, ovs-vsctl, root mm-ctl: CommandFilter, mm-ctl, root # ip_lib ip: IpFilter, ip, root ip_exec: IpNetnsExecFilter, ip, root route: CommandFilter, route, root # arping arping: CommandFilter, arping, root neutron-lbaas-8.0.0/etc/oslo-config-generator/0000775000567000056710000000000012701410110022373 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/etc/oslo-config-generator/neutron_lbaas.conf0000664000567000056710000000014112701407726026115 0ustar jenkinsjenkins00000000000000[DEFAULT] output_file = etc/neutron_lbaas.conf.sample wrap_width = 79 namespace = neutron.lbaas neutron-lbaas-8.0.0/etc/oslo-config-generator/services_lbaas.conf0000664000567000056710000000015212701407726026250 0ustar jenkinsjenkins00000000000000[DEFAULT] output_file = etc/services_lbaas.conf.sample wrap_width = 79 namespace = neutron.lbaas.service neutron-lbaas-8.0.0/etc/oslo-config-generator/lbaas_agent.ini0000664000567000056710000000017112701407726025356 0ustar jenkinsjenkins00000000000000[DEFAULT] output_file = etc/lbaas_agent.ini.sample wrap_width = 79 namespace = neutron.lbaas.agent namespace = oslo.log neutron-lbaas-8.0.0/etc/README.txt0000664000567000056710000000047512701407726017717 0ustar jenkinsjenkins00000000000000To generate the sample neutron LBaaS configuration files, run the following command from the top level of the neutron LBaaS directory: tox -e genconfig If a 'tox' environment is unavailable, then you can run the following script instead to generate the configuration files: ./tools/generate_config_file_samples.sh neutron-lbaas-8.0.0/babel.cfg0000664000567000056710000000002112701407726017157 0ustar jenkinsjenkins00000000000000[python: **.py] neutron-lbaas-8.0.0/.coveragerc0000664000567000056710000000014612701407726017562 0ustar jenkinsjenkins00000000000000[run] branch = True source = neutron_lbaas omit = neutron_lbaas/tests* [report] ignore_errors = True neutron-lbaas-8.0.0/setup.cfg0000664000567000056710000000661212701410110017243 0ustar jenkinsjenkins00000000000000[metadata] name = neutron-lbaas summary = OpenStack Networking Load Balancing as a Service description-file = README.rst author = OpenStack author-email = openstack-dev@lists.openstack.org home-page = http://www.openstack.org/ classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 [files] packages = neutron_lbaas data_files = etc/neutron/rootwrap.d = etc/neutron/rootwrap.d/lbaas-haproxy.filters [global] setup-hooks = pbr.hooks.setup_hook [entry_points] console_scripts = neutron-lbaas-agent = neutron_lbaas.services.loadbalancer.agent.agent:main neutron-lbaasv2-agent = neutron_lbaas.agent.agent:main device_drivers = # These are for backwards compat with Juno loadbalancer service provider configuration values neutron.services.loadbalancer.drivers.a10networks.driver_v1.ThunderDriver = neutron_lbaas.services.loadbalancer.drivers.a10networks.driver_v1:ThunderDriver neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver = neutron_lbaas.services.loadbalancer.drivers.haproxy.plugin_driver:HaproxyOnHostPluginDriver neutron.services.loadbalancer.drivers.haproxy.namespace_driver.HaproxyNSDriver = neutron_lbaas.services.loadbalancer.drivers.haproxy.namespace_driver:HaproxyNSDriver neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver = neutron_lbaas.services.loadbalancer.drivers.netscaler.netscaler_driver:NetScalerPluginDriver neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver = neutron_lbaas.services.loadbalancer.drivers.radware.driver:LoadBalancerDriver loadbalancer_schedulers = neutron_lbaas.agent_scheduler.ChanceScheduler = neutron_lbaas.agent_scheduler:ChanceScheduler pool_schedulers = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler = neutron_lbaas.services.loadbalancer.agent_scheduler:ChanceScheduler neutron.services.loadbalancer.agent_scheduler.LeastPoolAgentScheduler = neutron_lbaas.services.loadbalancer.agent_scheduler:LeastPoolAgentScheduler neutron.service_plugins = lbaasv2 = neutron_lbaas.services.loadbalancer.plugin.LoadBalancerPluginv2 neutron.db.alembic_migrations = neutron-lbaas = neutron_lbaas.db.migration:alembic_migrations neutron_lbaas.cert_manager.backend = barbican = neutron_lbaas.common.cert_manager.barbican_cert_manager local = neutron_lbaas.common.cert_manager.local_cert_manager oslo.config.opts = neutron.lbaas = neutron_lbaas.opts:list_opts neutron.lbaas.agent = neutron_lbaas.opts:list_agent_opts neutron.lbaas.service = neutron_lbaas.opts:list_service_opts neutron_lbaas.cert_manager.barbican_auth = barbican_acl_auth = neutron_lbaas.common.cert_manager.barbican_auth.barbican_acl:BarbicanACLAuth [build_sphinx] all_files = 1 build-dir = doc/build source-dir = doc/source [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = neutron_lbaas/locale/neutron_lbaas.pot [compile_catalog] directory = neutron_lbaas/locale domain = neutron_lbaas [update_catalog] domain = neutron_lbaas output_dir = neutron_lbaas/locale input_file = neutron_lbaas/locale/neutron_lbaas.pot [wheel] universal = 1 [egg_info] tag_build = tag_date = 0 tag_svn_revision = 0 neutron-lbaas-8.0.0/tools/0000775000567000056710000000000012701410110016555 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/tools/pretty_tox.sh0000775000567000056710000000065612701407726021367 0ustar jenkinsjenkins00000000000000#! /bin/sh TESTRARGS=$1 #This is for supporting tempest tests in tox as the neutron-lbaas tempest tests fail when run in parallel CONCURRENCY=${OS_TESTR_CONCURRENCY:-} if [ -n "$CONCURRENCY" ] then CONCURRENCY="--concurrency=$CONCURRENCY" fi exec 3>&1 status=$(exec 4>&1 >&3; (python setup.py testr --slowest --testr-args="--subunit $TESTRARGS $CONCURRENCY"; echo $? >&4 ) | $(dirname $0)/subunit-trace.py -f) && exit $status neutron-lbaas-8.0.0/tools/check_i18n.py0000664000567000056710000001243412701407726021072 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import print_function import compiler import imp import os.path import sys def is_localized(node): """Check message wrapped by _()""" if isinstance(node.parent, compiler.ast.CallFunc): if isinstance(node.parent.node, compiler.ast.Name): if node.parent.node.name == '_': return True return False class ASTWalker(compiler.visitor.ASTVisitor): def default(self, node, *args): for child in node.getChildNodes(): child.parent = node compiler.visitor.ASTVisitor.default(self, node, *args) class Visitor(object): def __init__(self, filename, i18n_msg_predicates, msg_format_checkers, debug): self.filename = filename self.debug = debug self.error = 0 self.i18n_msg_predicates = i18n_msg_predicates self.msg_format_checkers = msg_format_checkers with open(filename) as f: self.lines = f.readlines() def visitConst(self, node): if not isinstance(node.value, str): return if is_localized(node): for (checker, msg) in self.msg_format_checkers: if checker(node): print('%s:%d %s: %s Error: %s' % (self.filename, node.lineno, self.lines[node.lineno - 1][:-1], checker.__name__, msg), file=sys.stderr) self.error = 1 return if debug: print('%s:%d %s: %s' % (self.filename, node.lineno, self.lines[node.lineno - 1][:-1], "Pass")) else: for (predicate, action, msg) in self.i18n_msg_predicates: if predicate(node): if action == 'skip': if debug: print('%s:%d %s: %s' % (self.filename, node.lineno, self.lines[node.lineno - 1][:-1], "Pass")) return elif action == 'error': print('%s:%d %s: %s Error: %s' % (self.filename, node.lineno, self.lines[node.lineno - 1][:-1], predicate.__name__, msg), file=sys.stderr) self.error = 1 return elif action == 'warn': print('%s:%d %s: %s' % (self.filename, node.lineno, self.lines[node.lineno - 1][:-1], "Warn: %s" % msg)) return print('Predicate with wrong action!', file=sys.stderr) def is_file_in_black_list(black_list, f): for f in black_list: if os.path.abspath(input_file).startswith( os.path.abspath(f)): return True return False def check_i18n(input_file, i18n_msg_predicates, msg_format_checkers, debug): input_mod = compiler.parseFile(input_file) v = compiler.visitor.walk(input_mod, Visitor(input_file, i18n_msg_predicates, msg_format_checkers, debug), ASTWalker()) return v.error if __name__ == '__main__': input_path = sys.argv[1] cfg_path = sys.argv[2] try: cfg_mod = imp.load_source('', cfg_path) except Exception: print("Load cfg module failed", file=sys.stderr) sys.exit(1) i18n_msg_predicates = cfg_mod.i18n_msg_predicates msg_format_checkers = cfg_mod.msg_format_checkers black_list = cfg_mod.file_black_list debug = False if len(sys.argv) > 3: if sys.argv[3] == '-d': debug = True if os.path.isfile(input_path): sys.exit(check_i18n(input_path, i18n_msg_predicates, msg_format_checkers, debug)) error = 0 for dirpath, dirs, files in os.walk(input_path): for f in files: if not f.endswith('.py'): continue input_file = os.path.join(dirpath, f) if is_file_in_black_list(black_list, input_file): continue if check_i18n(input_file, i18n_msg_predicates, msg_format_checkers, debug): error = 1 sys.exit(error) neutron-lbaas-8.0.0/tools/subunit-trace.py0000775000567000056710000002454412701407726021753 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2014 Hewlett-Packard Development Company, L.P. # Copyright 2014 Samsung Electronics # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Trace a subunit stream in reasonable detail and high accuracy.""" import argparse import functools import os import re import sys import mimeparse import subunit import testtools DAY_SECONDS = 60 * 60 * 24 FAILS = [] RESULTS = {} class Starts(testtools.StreamResult): def __init__(self, output): super(Starts, self).__init__() self._output = output def startTestRun(self): self._neednewline = False self._emitted = set() def status(self, test_id=None, test_status=None, test_tags=None, runnable=True, file_name=None, file_bytes=None, eof=False, mime_type=None, route_code=None, timestamp=None): super(Starts, self).status( test_id, test_status, test_tags=test_tags, runnable=runnable, file_name=file_name, file_bytes=file_bytes, eof=eof, mime_type=mime_type, route_code=route_code, timestamp=timestamp) if not test_id: if not file_bytes: return if not mime_type or mime_type == 'test/plain;charset=utf8': mime_type = 'text/plain; charset=utf-8' primary, sub, parameters = mimeparse.parse_mime_type(mime_type) content_type = testtools.content_type.ContentType( primary, sub, parameters) content = testtools.content.Content( content_type, lambda: [file_bytes]) text = content.as_text() if text and text[-1] not in '\r\n': self._neednewline = True self._output.write(text) elif test_status == 'inprogress' and test_id not in self._emitted: if self._neednewline: self._neednewline = False self._output.write('\n') worker = '' for tag in test_tags or (): if tag.startswith('worker-'): worker = '(' + tag[7:] + ') ' if timestamp: timestr = timestamp.isoformat() else: timestr = '' self._output.write('%s: %s%s [start]\n' % (timestr, worker, test_id)) self._emitted.add(test_id) def cleanup_test_name(name, strip_tags=True, strip_scenarios=False): """Clean up the test name for display. By default we strip out the tags in the test because they don't help us in identifying the test that is run to it's result. Make it possible to strip out the testscenarios information (not to be confused with tempest scenarios) however that's often needed to indentify generated negative tests. """ if strip_tags: tags_start = name.find('[') tags_end = name.find(']') if tags_start > 0 and tags_end > tags_start: newname = name[:tags_start] newname += name[tags_end + 1:] name = newname if strip_scenarios: tags_start = name.find('(') tags_end = name.find(')') if tags_start > 0 and tags_end > tags_start: newname = name[:tags_start] newname += name[tags_end + 1:] name = newname return name def get_duration(timestamps): start, end = timestamps if not start or not end: duration = '' else: delta = end - start duration = '%d.%06ds' % ( delta.days * DAY_SECONDS + delta.seconds, delta.microseconds) return duration def find_worker(test): for tag in test['tags']: if tag.startswith('worker-'): return int(tag[7:]) return 'NaN' # Print out stdout/stderr if it exists, always def print_attachments(stream, test, all_channels=False): """Print out subunit attachments. Print out subunit attachments that contain content. This runs in 2 modes, one for successes where we print out just stdout and stderr, and an override that dumps all the attachments. """ channels = ('stdout', 'stderr') for name, detail in test['details'].items(): # NOTE(sdague): the subunit names are a little crazy, and actually # are in the form pythonlogging:'' (with the colon and quotes) name = name.split(':')[0] if detail.content_type.type == 'test': detail.content_type.type = 'text' if (all_channels or name in channels) and detail.as_text(): title = "Captured %s:" % name stream.write("\n%s\n%s\n" % (title, ('~' * len(title)))) # indent attachment lines 4 spaces to make them visually # offset for line in detail.as_text().split('\n'): stream.write(" %s\n" % line) def show_outcome(stream, test, print_failures=False, failonly=False): global RESULTS status = test['status'] # TODO(sdague): ask lifeless why on this? if status == 'exists': return worker = find_worker(test) name = cleanup_test_name(test['id']) duration = get_duration(test['timestamps']) if worker not in RESULTS: RESULTS[worker] = [] RESULTS[worker].append(test) # don't count the end of the return code as a fail if name == 'process-returncode': return if status == 'fail': FAILS.append(test) stream.write('{%s} %s [%s] ... FAILED\n' % ( worker, name, duration)) if not print_failures: print_attachments(stream, test, all_channels=True) elif not failonly: if status == 'success': stream.write('{%s} %s [%s] ... ok\n' % ( worker, name, duration)) print_attachments(stream, test) elif status == 'skip': stream.write('{%s} %s ... SKIPPED: %s\n' % ( worker, name, test['details']['reason'].as_text())) else: stream.write('{%s} %s [%s] ... %s\n' % ( worker, name, duration, test['status'])) if not print_failures: print_attachments(stream, test, all_channels=True) stream.flush() def print_fails(stream): """Print summary failure report. Currently unused, however there remains debate on inline vs. at end reporting, so leave the utility function for later use. """ if not FAILS: return stream.write("\n==============================\n") stream.write("Failed %s tests - output below:" % len(FAILS)) stream.write("\n==============================\n") for f in FAILS: stream.write("\n%s\n" % f['id']) stream.write("%s\n" % ('-' * len(f['id']))) print_attachments(stream, f, all_channels=True) stream.write('\n') def count_tests(key, value): count = 0 for k, v in RESULTS.items(): for item in v: if key in item: if re.search(value, item[key]): count += 1 return count def run_time(): runtime = 0.0 for k, v in RESULTS.items(): for test in v: runtime += float(get_duration(test['timestamps']).strip('s')) return runtime def worker_stats(worker): tests = RESULTS[worker] num_tests = len(tests) delta = tests[-1]['timestamps'][1] - tests[0]['timestamps'][0] return num_tests, delta def print_summary(stream): stream.write("\n======\nTotals\n======\n") stream.write("Run: %s in %s sec.\n" % (count_tests('status', '.*'), run_time())) stream.write(" - Passed: %s\n" % count_tests('status', 'success')) stream.write(" - Skipped: %s\n" % count_tests('status', 'skip')) stream.write(" - Failed: %s\n" % count_tests('status', 'fail')) # we could have no results, especially as we filter out the process-codes if RESULTS: stream.write("\n==============\nWorker Balance\n==============\n") for w in range(max(RESULTS.keys()) + 1): if w not in RESULTS: stream.write( " - WARNING: missing Worker %s! " "Race in testr accounting.\n" % w) else: num, time = worker_stats(w) stream.write(" - Worker %s (%s tests) => %ss\n" % (w, num, time)) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--no-failure-debug', '-n', action='store_true', dest='print_failures', help='Disable printing failure ' 'debug information in realtime') parser.add_argument('--fails', '-f', action='store_true', dest='post_fails', help='Print failure debug ' 'information after the stream is proccesed') parser.add_argument('--failonly', action='store_true', dest='failonly', help="Don't print success items", default=( os.environ.get('TRACE_FAILONLY', False) is not False)) return parser.parse_args() def main(): args = parse_args() stream = subunit.ByteStreamToStreamResult( sys.stdin, non_subunit_name='stdout') starts = Starts(sys.stdout) outcomes = testtools.StreamToDict( functools.partial(show_outcome, sys.stdout, print_failures=args.print_failures, failonly=args.failonly )) summary = testtools.StreamSummary() result = testtools.CopyStreamResult([starts, outcomes, summary]) result.startTestRun() try: stream.run(result) finally: result.stopTestRun() if count_tests('status', '.*') == 0: print("The test run didn't actually run any tests") return 1 if args.post_fails: print_fails(sys.stdout) print_summary(sys.stdout) return (0 if summary.wasSuccessful() else 1) if __name__ == '__main__': sys.exit(main()) neutron-lbaas-8.0.0/tools/generate_config_file_samples.sh0000775000567000056710000000144012701407726025000 0ustar jenkinsjenkins00000000000000#!/bin/sh # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. set -e GEN_CMD=oslo-config-generator if ! type "$GEN_CMD" > /dev/null; then echo "ERROR: $GEN_CMD not installed on the system." exit 1 fi for file in `ls etc/oslo-config-generator/*`; do $GEN_CMD --config-file=$file done set -x neutron-lbaas-8.0.0/tools/check_i18n_test_case.txt0000664000567000056710000000264512701407726023316 0ustar jenkinsjenkins00000000000000# test-case for check_i18n.py # python check_i18n.py check_i18n.txt -d # message format checking # capital checking msg = _("hello world, error") msg = _("hello world_var, error") msg = _('file_list xyz, pass') msg = _("Hello world, pass") # format specifier checking msg = _("Hello %s world %d, error") msg = _("Hello %s world, pass") msg = _("Hello %(var1)s world %(var2)s, pass") # message has been localized # is_localized msg = _("Hello world, pass") msg = _("Hello world, pass") % var LOG.debug(_('Hello world, pass')) LOG.info(_('Hello world, pass')) raise x.y.Exception(_('Hello world, pass')) raise Exception(_('Hello world, pass')) # message need be localized # is_log_callfunc LOG.debug('hello world, error') LOG.debug('hello world, error' % xyz) sys.append('hello world, warn') # is_log_i18n_msg_with_mod LOG.debug(_('Hello world, error') % xyz) # default warn msg = 'hello world, warn' msg = 'hello world, warn' % var # message needn't be localized # skip only one word msg = '' msg = "hello,pass" # skip dict msg = {'hello world, pass': 1} # skip list msg = ["hello world, pass"] # skip subscript msg['hello world, pass'] # skip xml marker msg = ", pass" # skip sql statement msg = "SELECT * FROM xyz WHERE hello=1, pass" msg = "select * from xyz, pass" # skip add statement msg = 'hello world' + e + 'world hello, pass' # skip doc string """ Hello world, pass """ class Msg: pass neutron-lbaas-8.0.0/tools/install_venv_common.py0000664000567000056710000001350712701407726023234 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Provides methods needed by installation script for OpenStack development virtual environments. Since this script is used to bootstrap a virtualenv from the system's Python environment, it should be kept strictly compatible with Python 2.6. Synced in from openstack-common """ from __future__ import print_function import optparse import os import subprocess import sys class InstallVenv(object): def __init__(self, root, venv, requirements, test_requirements, py_version, project): self.root = root self.venv = venv self.requirements = requirements self.test_requirements = test_requirements self.py_version = py_version self.project = project def die(self, message, *args): print(message % args, file=sys.stderr) sys.exit(1) def check_python_version(self): if sys.version_info < (2, 6): self.die("Need Python Version >= 2.6") def run_command_with_code(self, cmd, redirect_output=True, check_exit_code=True): """Runs a command in an out-of-process shell. Returns the output of that command. Working directory is self.root. """ if redirect_output: stdout = subprocess.PIPE else: stdout = None proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout) output = proc.communicate()[0] if check_exit_code and proc.returncode != 0: self.die('Command "%s" failed.\n%s', ' '.join(cmd), output) return (output, proc.returncode) def run_command(self, cmd, redirect_output=True, check_exit_code=True): return self.run_command_with_code(cmd, redirect_output, check_exit_code)[0] def get_distro(self): if (os.path.exists('/etc/fedora-release') or os.path.exists('/etc/redhat-release')): return Fedora( self.root, self.venv, self.requirements, self.test_requirements, self.py_version, self.project) else: return Distro( self.root, self.venv, self.requirements, self.test_requirements, self.py_version, self.project) def check_dependencies(self): self.get_distro().install_virtualenv() def create_virtualenv(self, no_site_packages=True): """Creates the virtual environment and installs PIP. Creates the virtual environment and installs PIP only into the virtual environment. """ if not os.path.isdir(self.venv): print('Creating venv...', end=' ') if no_site_packages: self.run_command(['virtualenv', '-q', '--no-site-packages', self.venv]) else: self.run_command(['virtualenv', '-q', self.venv]) print('done.') else: print("venv already exists...") pass def pip_install(self, *args): self.run_command(['tools/with_venv.sh', 'pip', 'install', '--upgrade'] + list(args), redirect_output=False) def install_dependencies(self): print('Installing dependencies with pip (this can take a while)...') # First things first, make sure our venv has the latest pip and # setuptools and pbr self.pip_install('pip>=1.4') self.pip_install('setuptools') self.pip_install('pbr') self.pip_install('-r', self.requirements, '-r', self.test_requirements) def parse_args(self, argv): """Parses command-line arguments.""" parser = optparse.OptionParser() parser.add_option('-n', '--no-site-packages', action='store_true', help="Do not inherit packages from global Python " "install.") return parser.parse_args(argv[1:])[0] class Distro(InstallVenv): def check_cmd(self, cmd): return bool(self.run_command(['which', cmd], check_exit_code=False).strip()) def install_virtualenv(self): if self.check_cmd('virtualenv'): return if self.check_cmd('easy_install'): print('Installing virtualenv via easy_install...', end=' ') if self.run_command(['easy_install', 'virtualenv']): print('Succeeded') return else: print('Failed') self.die('ERROR: virtualenv not found.\n\n%s development' ' requires virtualenv, please install it using your' ' favorite package management tool' % self.project) class Fedora(Distro): """This covers all Fedora-based distributions. Includes: Fedora, RHEL, CentOS, Scientific Linux """ def check_pkg(self, pkg): return self.run_command_with_code(['rpm', '-q', pkg], check_exit_code=False)[1] == 0 def install_virtualenv(self): if self.check_cmd('virtualenv'): return if not self.check_pkg('python-virtualenv'): self.die("Please install 'python-virtualenv'.") super(Fedora, self).install_virtualenv() neutron-lbaas-8.0.0/tools/install_venv.py0000664000567000056710000000440012701407726021654 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2010 OpenStack Foundation. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Installation script for Neutron's development virtualenv """ from __future__ import print_function import os import sys import install_venv_common as install_venv def print_help(): help = """ Neutron development environment setup is complete. Neutron development uses virtualenv to track and manage Python dependencies while in development and testing. To activate the Neutron virtualenv for the extent of your current shell session you can run: $ source .venv/bin/activate Or, if you prefer, you can run commands in the virtualenv on a case by case basis by running: $ tools/with_venv.sh Also, make test will automatically use the virtualenv. """ print(help) def main(argv): root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) venv = os.path.join(root, '.venv') pip_requires = os.path.join(root, 'requirements.txt') test_requires = os.path.join(root, 'test-requirements.txt') py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) project = 'Neutron' install = install_venv.InstallVenv(root, venv, pip_requires, test_requires, py_version, project) options = install.parse_args(argv) install.check_python_version() install.check_dependencies() install.create_virtualenv(no_site_packages=options.no_site_packages) install.install_dependencies() print_help() if __name__ == '__main__': main(sys.argv) neutron-lbaas-8.0.0/tools/with_venv.sh0000775000567000056710000000132312701407726021147 0ustar jenkinsjenkins00000000000000#!/bin/bash # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. TOOLS=`dirname $0` VENV=$TOOLS/../.venv source $VENV/bin/activate && "$@" neutron-lbaas-8.0.0/tools/i18n_cfg.py0000664000567000056710000000664312701407726020561 0ustar jenkinsjenkins00000000000000import compiler import re def is_log_callfunc(n): """LOG.xxx('hello %s' % xyz) and LOG('hello')""" if isinstance(n.parent, compiler.ast.Mod): n = n.parent if isinstance(n.parent, compiler.ast.CallFunc): if isinstance(n.parent.node, compiler.ast.Getattr): if isinstance(n.parent.node.getChildNodes()[0], compiler.ast.Name): if n.parent.node.getChildNodes()[0].name == 'LOG': return True return False def is_log_i18n_msg_with_mod(n): """LOG.xxx("Hello %s" % xyz) should be LOG.xxx("Hello %s", xyz)""" if not isinstance(n.parent.parent, compiler.ast.Mod): return False n = n.parent.parent if isinstance(n.parent, compiler.ast.CallFunc): if isinstance(n.parent.node, compiler.ast.Getattr): if isinstance(n.parent.node.getChildNodes()[0], compiler.ast.Name): if n.parent.node.getChildNodes()[0].name == 'LOG': return True return False def is_wrong_i18n_format(n): """Check _('hello %s' % xyz)""" if isinstance(n.parent, compiler.ast.Mod): n = n.parent if isinstance(n.parent, compiler.ast.CallFunc): if isinstance(n.parent.node, compiler.ast.Name): if n.parent.node.name == '_': return True return False """ Used for check message need be localized or not. (predicate_func, action, message) """ i18n_msg_predicates = [ # Skip ['hello world', 1] (lambda n: isinstance(n.parent, compiler.ast.List), 'skip', ''), # Skip {'hellow world', 1} (lambda n: isinstance(n.parent, compiler.ast.Dict), 'skip', ''), # Skip msg['hello world'] (lambda n: isinstance(n.parent, compiler.ast.Subscript), 'skip', ''), # Skip doc string (lambda n: isinstance(n.parent, compiler.ast.Discard), 'skip', ''), # Skip msg = "hello", in normal, message should more than one word (lambda n: len(n.value.strip().split(' ')) <= 1, 'skip', ''), # Skip msg = 'hello world' + vars + 'world hello' (lambda n: isinstance(n.parent, compiler.ast.Add), 'skip', ''), # Skip xml markers msg = "" (lambda n: len(re.compile("").findall(n.value)) > 0, 'skip', ''), # Skip sql statement (lambda n: len( re.compile("^SELECT.*FROM", flags=re.I).findall(n.value)) > 0, 'skip', ''), # LOG.xxx() (is_log_callfunc, 'error', 'Message must be localized'), # _('hello %s' % xyz) should be _('hello %s') % xyz (is_wrong_i18n_format, 'error', ("Message format was wrong, _('hello %s' % xyz) " "should be _('hello %s') % xyz")), # default (lambda n: True, 'warn', 'Message might need localized') ] """ Used for checking message format. (checker_func, message) """ msg_format_checkers = [ # If message contain more than on format specifier, it should use # mapping key (lambda n: len(re.compile("%[bcdeEfFgGnosxX]").findall(n.value)) > 1, "The message shouldn't contain more than one format specifier"), # Check capital (lambda n: n.value.split(' ')[0].count('_') == 0 and n.value[0].isalpha() and n.value[0].islower(), "First letter must be capital"), (is_log_i18n_msg_with_mod, 'LOG.xxx("Hello %s" % xyz) should be LOG.xxx("Hello %s", xyz)') ] file_black_list = ["./neutron/tests/unit", "./neutron/openstack", "./neutron/plugins/bigswitch/tests"] neutron-lbaas-8.0.0/tools/clean.sh0000775000567000056710000000027412701407726020224 0ustar jenkinsjenkins00000000000000#!/bin/bash rm -rf ./*.deb ./*.tar.gz ./*.dsc ./*.changes rm -rf */*.deb rm -rf ./plugins/**/build/ ./plugins/**/dist rm -rf ./plugins/**/lib/neutron_*_plugin.egg-info ./plugins/neutron-* neutron-lbaas-8.0.0/tools/tox_install.sh0000775000567000056710000000306412701407727021503 0ustar jenkinsjenkins00000000000000#!/bin/sh # Many of neutron's repos suffer from the problem of depending on neutron, # but it not existing on pypi. # This wrapper for tox's package installer will use the existing package # if it exists, else use zuul-cloner if that program exists, else grab it # from neutron master via a hard-coded URL. That last case should only # happen with devs running unit tests locally. # From the tox.ini config page: # install_command=ARGV # default: # pip install {opts} {packages} ZUUL_CLONER=/usr/zuul-env/bin/zuul-cloner BRANCH_NAME=stable/mitaka neutron_installed=$(echo "import neutron" | python 2>/dev/null ; echo $?) set -e CONSTRAINTS_FILE=$1 shift install_cmd="pip install" if [ $CONSTRAINTS_FILE != "unconstrained" ]; then install_cmd="$install_cmd -c$CONSTRAINTS_FILE" fi if [ $neutron_installed -eq 0 ]; then echo "ALREADY INSTALLED" > /tmp/tox_install.txt echo "Neutron already installed; using existing package" elif [ -x "$ZUUL_CLONER" ]; then export ZUUL_BRANCH=${ZUUL_BRANCH-$BRANCH} echo "ZUUL CLONER" > /tmp/tox_install.txt cwd=$(/bin/pwd) cd /tmp $ZUUL_CLONER --cache-dir \ /opt/git \ --branch $BRANCH_NAME \ git://git.openstack.org \ openstack/neutron cd openstack/neutron $install_cmd -e . cd "$cwd" else echo "PIP HARDCODE" > /tmp/tox_install.txt if [ -z "$NEUTRON_PIP_LOCATION" ]; then NEUTRON_PIP_LOCATION="git+https://git.openstack.org/openstack/neutron@$BRANCH_NAME#egg=neutron" fi $install_cmd -U -e ${NEUTRON_PIP_LOCATION} fi $install_cmd -U $* exit $? neutron-lbaas-8.0.0/doc/0000775000567000056710000000000012701410110016162 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/doc/source/0000775000567000056710000000000012701410110017462 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/doc/source/index.rst0000664000567000056710000000041012701407726021341 0ustar jenkinsjenkins00000000000000.. documentation master file Neutron LBaaS Documentation =========================== Under Construction Dashboards ========== There is a collection of dashboards to help developers and reviewers located here. .. toctree:: :maxdepth: 2 dashboards/index neutron-lbaas-8.0.0/doc/source/conf.py0000664000567000056710000001753512701407726021017 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Copyright (c) 2010 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # # Keystone documentation build configuration file, created by # sphinx-quickstart on Tue May 18 13:50:15 2010. # # This file is execfile()'d with the current directory set to it's containing # dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import subprocess import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. BASE_DIR = os.path.dirname(os.path.abspath(__file__)) ROOT_DIR = os.path.abspath(os.path.join(BASE_DIR, "..", "..")) sys.path.insert(0, ROOT_DIR) # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz', 'sphinx.ext.todo', 'oslosphinx'] todo_include_todos = True # Add any paths that contain templates here, relative to this directory. templates_path = [] if os.getenv('HUDSON_PUBLISH_DOCS'): templates_path = ['_ga', '_templates'] else: templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Neutron LBaaS' copyright = u'2011-present, OpenStack Foundation.' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # Version info from neutron_lbaas.version import version_info as neutron_lbaas_version release = neutron_lbaas_version.release_string() # The short X.Y version. version = neutron_lbaas_version.version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. # unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = [] # The reST default role (for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['neutron_lbaas.'] # -- Options for man page output -------------------------------------------- # Grouping the document tree for man pages. # List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' #man_pages = [ # ('man/neutron-server', 'neutron-server', u'Neutron Server', # [u'OpenStack'], 1) #] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = ['_theme'] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local", "-n1"] html_last_updated_fmt = subprocess.Popen( git_cmd, stdout=subprocess.PIPE).communicate()[0] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. #htmlhelp_basename = 'neutrondoc' # -- Options for LaTeX output ------------------------------------------------ # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, # documentclass [howto/manual]). #latex_documents = [ # ('index', 'Neutron.tex', u'Neutron Documentation', # u'Neutron development team', 'manual'), #] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True neutron-lbaas-8.0.0/doc/source/dashboards/0000775000567000056710000000000012701410110021574 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/doc/source/dashboards/index.rst0000664000567000056710000000015412701407726023460 0ustar jenkinsjenkins00000000000000Neutron LBaaS Graphite Pages ============================ .. toctree:: :maxdepth: 1 check.dashboard neutron-lbaas-8.0.0/doc/source/dashboards/check.dashboard.rst0000664000567000056710000001167112701407726025362 0ustar jenkinsjenkins00000000000000 Neutron LBaaS Check Pipeline Thumbnails ======================================= Click to see full size figure. .. raw:: html
Failure Percentage - Last 10 Days - V2 API and Scenario Jobs
neutron-lbaas-8.0.0/LICENSE0000664000567000056710000002363712701407726016460 0ustar jenkinsjenkins00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. neutron-lbaas-8.0.0/ChangeLog0000664000567000056710000014377112701410107017212 0ustar jenkinsjenkins00000000000000CHANGES ======= 8.0.0 ----- * Constraint requirements using mitaka upper-constraints.txt file 8.0.0.0rc2 ---------- * Update devstack plugin for dependent packages * Update .gitreview for stable/mitaka 8.0.0.0rc1 ---------- * Fix lbaas-lb-status when health monitor disabled * Delete tempest fork * Fix the neutron-lbaas namespace driver gate * Updated from global requirements * Tag the alembic migration revisions for Mitaka * Make all tox targets constrained * Uncouple lbaas object models from neutron core plugin results * Add model sync test * Fix model vs migration mismatches in nsxv tables * Fix model vs migration mismatches for L7 rules and policies * Fix delete of session_persistence with pool update 8.0.0.0b3 --------- * Revert "Adds Cascade option for load balancer drivers" * Adds Cascade Delete for LoadBalancers to Octavia Driver * Adds Cascade option for load balancer drivers * Track alembic heads * Remove synchronous haproxy driver * L7 support for Radware LBaaS v2 driver * Remove unused pngmath Sphinx extension * L7 capability extension implementation for lbaas v2 * fix mismatch between v1 keyword and v2 driver in gate_hook * Fix shared-pools lbaas_listeners migration * Set netscaler_ncc_password as secret to prevent it from being logged * Updated from global requirements * Neutron LBaaS leaking admin_password into logs * Updated from global requirements * Expand gate hooks to allow for more than just octavia testing * Add check for listener's admin_state_up status * LBaaS: make use of neutron_lib exceptions * Shared pools support * Updated from global requirements * Updated from global requirements * Updated from global requirements * Migrate some stuff to neutron-lib * Updated from global requirements * Updated from global requirements * Enable debug logging in octavia scenario tests * Temporary gate fix for tempest library * Updated from global requirements * Optimize API gate devstack build time * Janky hack to get gate passing * Update translation setup * Don't always run Octavia amphroa image build * Add an explicit BRANCH_NAME to tox_install.sh * Set config values and fix casual failing of nc service in scenario tests * Make sample web server close connections * Updated from global requirements * Encode certificate data for processing * Adding "region and endpoint_type" parameters to barbican_acl.py * Implementing EventStreamer reciever for octavia * Refactor BarbicanAuth to allow for configurable auth method * Updated from global requirements * Allow overriding neutron git location, to allow local clones * Remove unused compute clients * Remove commented-out import from test client 8.0.0.0b2 --------- * Remove invalid fields of healthmonitor when its type is TCP/PING * Fix timing bug that causes false negative tests * Updated from global requirements * VMWare NSXv LBaaSv2 driver * Fix lbaas unit tests, new requirement used in neutron base class * Clean up removed hacking rule from [flake8] ignore lists * Remove vim header from source files * Add scenario test for load balancer's session persistence * Use the correct _ method from neutron_lbaas._i18n * Replaces __builtin__ with six.moves.builtins * Updated from global requirements * Pass service admin auth from the Octavia driver * Update admin auth data for devstack * Replace deprecated library function os.popen() with subprocess * Do not send project_id to Octavia API on PUTs * Remove OpenSSL from cert_parser * Fix unit tests failing because of neutron changes * Update and enable auth in devstack plugin * Use assertTrue/False instead of assertEqual(T/F) * Updated from global requirements * Keep py3.X compatibility for urllib.urlencode * Fix API tests * LOG.warn -> LOG.warning * Trivial: Remove unused logging import * Updated from global requirements * Fix db refresh race condition bug * Change status to DISABLED if admin_state_up if false * Updated from global requirements * Avoid duplicating tenant check when creating resources * Validate vip_id as uuid * Fix a typo from UPPER_CONTRAINTS_FILE to UPPER_CONSTRAINTS_FILE * Use keystoneauth to prevent logging sensitive data * Deprecated tox -downloadcache option removed * Updated from global requirements * [LBaaS v2] Validate name, description and tenant_id attributes length * Remove Neutron LBaaS static example configuration files * Updated from global requirements * Pass config info in neutron-lbaas api v2 tempest tests * Automatically generate neutron LBaaS configuration files * Updated from global requirements * Added constraints tox targets * Radware V2: ensure that password is 'secret' * Consume LBaaS v2 plugin queue in RPC workers * Radware: ensure that password is 'secret' * Fixing Radware LBaaS v2 driver LB deletion * Updated from global requirements * Send project_id/tenant_id to octavia api * Add reno for release notes management * Sync db models to migrations * Remove version from setup.cfg * Validate connection limit for LBaaS 8.0.0.0b1 --------- * Add flavor option to loadbalancerv2 creation * Put py34 first in the env order of tox * VMware LBaaSv1 method uses invalid context * Add jobs dashboard to docs * Updated from global requirements * Switch to internal _i18n pattern, as per oslo_i18n guidelines * Remove dependency on neutron for topics * Remove pylint from pep8 * Setup for translation * Support for Name field in Members and HMs * Openstack typo * Allow API tests to run in the gate * Disable automatic config file discovery for pep8 target * Correct the lbaasv2 pool create code * Updated from global requirements * Fix some inconsistency in docstrings * Add service_plugins entry_point for LBaaS v2 plugin * Revert "Revert "Remove TEMPEST_CONFIG_DIR in the api tox env"" * Revert "Remove TEMPEST_CONFIG_DIR in the api tox env" * Remove TEMPEST_CONFIG_DIR in the api tox env * Use assertFalse instead of assertEqual * Use Octavia stable/liberty branch in DevStack README * Set ZUUL_BRANCH using BRANCH if not available * Fixing TLS configuration issues * remove default=None for config options * LBaaS V2 service not needed with Octavia driver * Update local.conf to use Cirros 0.3.4 * Load class should be instantiate with obj * Change haproxy namespace driver to use stevedore alias * Avoiding usage of mutables as default arguments * Refine the error message for timeout in test * Updated from global requirements * Switch to using neutron.common.utils:replace_file() * Adds a Barbican option to local.conf * Fix default device_driver entry in lbaas_agent.ini * Updated from global requirements * Use noop octavia driver for minimal job too * Updated from global requirements * Removed new=True argument from create_connection * Add ability for octavia to allocate vip * Tempest tests for Health monitor using testscenarios * Include alembic versions directory to the package * Updated from global requirements * Enable debug logs for octavia in neutron gate * api tests: don't hide exception when creating load balancer * Updated from global requirements * Updated from global requirements * Remove the embrane driver * [LBaaS v2] Validate http_method and url_path for health monitor * lbaasv2: validate tenant-id is non-empty in api * Updated from global requirements * use assertIs(Not)None instead of assert(Not)Equal * Fix order of arguments in assertEqual * Use assert(Not)In instead of assertTrue/False 7.0.0 ----- * [LBaaS v2] Improve API tests performance * Update the devstack readme.md for Octavia as ref * Filter get_pool_members to return members from the desired pool * Allow updating TLS refs * Improve resource cleanup in API v2 tests * Do not update apt if devstack is in OFFLINE mode * Updated from global requirements * Allow updating TLS refs * Filter get_pool_members to return members from the desired pool * Tag the alembic migration revisions for Liberty * tox.ini: check migrations for the subproject only * Tag the alembic migration revisions for Liberty * Update the devstack readme.md for Octavia as ref * Use stable/liberty branch for neutron dep * Kill HEADS file 7.0.0.0rc1 ---------- * Update defaultbranch in .gitreview to stable/liberty * Open Mitaka development * Updated from global requirements * Fix scenario tests for octavia driver * The minimal test suite is running too many tests * Fix typo in test names for test_health_monitors_non_admin.py in API v2 * Set Octavia as the reference LBaaSv2 driver * Change ignore-errors to ignore_errors * Use admin context in octavia driver, improve logs * Split up gate tests for LBaaSv2 * Various fixes for tempest tests with octavia * Updated from global requirements * Add device_id when a loadbalancer is created instead of at listener create * Use "session_persistence" in data models for uniformity with API attribute * Adding barbican to gate hook * Fix health monitor ops in lbaas octavia driver * Removed +x flag from alembic script * Wait for lb delete on tempest resource_cleanup * Add an optional member status of NO_MONITOR * Remove fall-back logic to service provider registration 7.0.0.0b3 --------- * Updated from global requirements * Removing unused dependency: discover * Adopt the migration chaining and branch names from neutron * Registering Barbican consumers * Updated from global requirements * Switch to using os-testr's copy of subunit2html * Change sni_container plural from ids to refs * Fix loading CertManager method * Change tls and sni listener API attributes to ref * Fix incorrect module import * Updated from global requirements * Clean up test_listener_basic scenario * change if to else in in haproxy namespace drivers * Octavia driver correctly reports statuses * Updated from global requirements * Add basic listener scenario tests * Octavia driver * Remove ipv6_pd_enabled from subnet data model * Remove load_admin_roles from calls to get_admin_context * Remove requirements.txt from tox.ini * Use oslo.log library instead of system logging module * Refactor v2 scenario tests * Fix typos in namespace_driver.py and some tests * Updated from global requirements * Fix for code redundancy and healthmonitor scenario tests * Update for NetScaler v2 driver * Updated from global requirements * Updating devstack plugin for authtoken * Fixes private key loading issue * Fixing Radware LBaaS v2 driver bugs * Killed existing downgrade rules in migration scripts 7.0.0.0b2 --------- * Register provider configuration with ServiceTypeManager * Migrate to multi branch directory structure * Fix for AttributeError * Register alembic_migrations at install time * Prevent deletion of a subnet with lbaas v1 pool * Stop use of oslo_utils.timeutils.strtime() * Install HAProxy 1.5 on Ubuntu Devstack * Updated from global requirements * script.py.mako: added license text, removed downgrade section * Remove quantum untracked files from .gitignore * Fix for post_test_hook.sh * Scenario test for healthmonitor * Fixing Radware LBaaS v2 driver LB deletion * Fixes healthmonitor admin_state_up not working issue * Forcing cert_parser.dump_private_key to use PKCS8 * ensure_dir: switch to neutron.common.utils * pep8: re-enable neutron-db-manage check_migration * Tempest tests for Members using testscenarios * Tempest tests for Listener using testscenarios * Moved the contents related to DataDrivenTests to a new directory * Modified tox.ini to run the ddt tests * Updated from global requirements * Remove pylint env from tox.ini so tox runs * Updated from global requirements * New job for LBaaS V2 scenario tests * Updated from global requirements * Fixing Radware unit tests after mock module update * Fix test failures caused by mock update * Added admin/non_admin api tests * Allow users to run 'tox -epy34' * COMMON_PREFIXES cleanup - patch 2/5 * Vip delegation * Updated from global requirements * Delete neutron_lbaas.agent.common_rpc * Use message for DeviceNotFoundOnAgent * Agent code restructuring in neutron tree * Neutron LBaaS: Load Balancer Basic Scenario Test * Add v1 quota tests in-tree * Switch to oslo.service * Stop doing any magic cloning of neutron during CI * Ignored several invalid tests 7.0.0.0b1 --------- * Neutron_LBaaS: Update README.rst for Tests * Modified tox.ini and post_test_hook.sh * Updated from global requirements * Switch to oslo_utils.uuidutils * Introduced tempest API tests for openstack/neutron-lbaas * Switching from the ppa to neutron backports * Update version for Liberty 7.0.0a0 ------- * Run as root for killing haproxy * Admin API tempest for healthmonitor * Adding code to prevent vip port deletion from port api * Updated from global requirements * Fix typos * Remove unused modules from the test-requirements.txt file * Updated from global requirements * Nuke a useless and incorrect comment from requirements.txt * Add a new lbaas agent Scheduler, LeastPoolAgentScheduler * Cleaner mechanism for enabling lbaasv2 in devstack-gate * Get apiv1 tests passing * Remove unused 'tempest' tox env * Alias the jenkins job args to match the right tox envs * Python 3: use six.iteritems instead of dict.items * Remove unused default value * Enable random hash seeds * Do not assume http expected_codes ordering * Pull lbaasv1 tempest tests in-tree, use installed neutron for v1/v2 job * Haproxy socket bind failing for IPv6 vip address * Bugfixes and minor improvements to local.sh * Move xrange to six.moves.range for pep8 issue * Updated from global requirements * Fix neutron-lbaas tests * Fix scheduler unit tests * Ignore new N324 style requirement * Updated from global requirements * Fix a pid file name error * Updated from global requirements * Updated from global requirements * Fix tenant_id mismatch in test object creation 2015.1.0 -------- * Add HaproxyNSDriver to lbaas entry points * Updated from global requirements 2015.1.0rc2 ----------- * Fixing LBaaS object deletion on Radwrae back-end * Updated from global requirements * Add Vagrant file that can bring up a working LBaaS devstack * Add devstack scripts that set up a working loadbalancer * Admin/Non-admin API tempest tests * update .gitreview for stable/kilo * Add Kilo release milestone * Add Kilo release milestone * Pin neutron to stable/kilo * Update subnet data model to include ipv6_pd_enabled * Use TLS container UUIDs in Radware LBaaS * Use listener and pool UUIDs in Radware LBaaS * Fixing Radware LBaaS v2 driver entity deletion * Switch from neutron.common.log.log to oslo_log.helpers.log_method_call * Modified the tempest.config file 2015.1.0rc1 ----------- * changes to skip negative test cases with invalid/empty tenant_id * Updated from global requirements * Rename imports based on neutron tests reorganization * Use id attribute of the object in assert_modifications_allowed * Open Liberty development * Add HaproxyNSDriver to lbaas entry points * Fixes passphrase encoding error * Changes to wait for loadbalancer status after any CRUD operation (Changes required to support async client operations) * Fixes cert_parser intermediate validation * Implement KEMPtechnologies lbaas driver v2 shim * Included more API tests for neutron_lbaas * Remove test workaround for subnetpool_id * Fixes encrypted private key failures * Updating .coveragerc to exclude the tests * Update subnet data model to include subnetpool_id * VMWare Edge loadbalancer v1 driver * NetScaler driver for LBaaS V2 extension * Fixed db update_status to handle healthmonitor * Neutron LBaaS v2 Radware driver implementation * Updated from global requirements * VMWare Edge driver database model and migration 2015.1.0b3 ---------- * Fixes sni_containers access issues * Implemented dynamic loadbalancer status tree * Fixes certificate concatenation issue * tests: stop overwriting neutron BaseTestCase configuration files * Brocade Driver for lbaas v2 data model * Added api tests for session-persistence * Fixes barbican cert manager to support v2/v3 auth * Change default cert manager to barbican * LBaaS reference implementation driver to support TLS * TLS capability extension implementation for lbaas v2 * Install HAProxy 1.5 on Ubuntu Devstack * Hooks for lbaasv2 api tempest job * Validate health_monitor id when associating to pool * Fixes session persistence validation error * Bug fixes for cert manager * Neutron LBaaS: Creating Health Monitor API Tests * Neutron LBaaS: Adding test cases for Members * Neutron LBaaS: Creating Pool API Tests * Neutron LBaaS: Initial Listeners API tests * Neutron LBaaS: Initial Load Balancer Tests * Added tox tempest environment * Error correction of neutron_lbaas.conf * A10 Networks LBaaS v2 Driver * Updated from global requirements * Driver helper methods correctly update LB status * Allow LBaaS service provider to be configurable in devstack * Updating gitignore file * Fix invocation of lbaas agent * Updating agent mgr and namespace driver * Haproxy Namespace Driver * Completion of Agent Manager * Agent, device driver interface, and callback API * Added agent callbacks * oslo.log: clean up remaining usages of incubator module * Updated from global requirements * Fixed synchronous driver's import redirect * Return all data for list pools * Fixed bug where session persistence was removed * Neutron LBaaS: Base Test Case Class * Move pylint checks to pep8 testenv * Removed lockutils-wrapper from tox targets * Migrate to oslo.log * Listener, Pool, Member, and HealthMonitor Managers * Finish Agent Scheduler and LB Agent Driver Piece * Moving base driver code to a new location * Add devstack plugin for neutron-lbaas * Setup LBaaS V2 Agent Scheduler and Driver * Updated from global requirements * Stop storing and passing root_helper * New directory structure * Exposing the vip_port_id attribute of a load balancer * Neutron LBaaS: Initial Health Monitors Test Client * Neutron LBaaS: Added Members Test Client * LBaaS: Enable coverage testing * Add index on tenant_id * Updated from global requirements * Neutron LBaaS: Initial Pools Test Client * Nuetron LBaaS: Initial Listeners Test Client * Neutron LBaaS: Initial Load Balancers Test Client * Certificate Management Interface + Implementations * Enable a bunch of pylint rules, get stuff passing * Prevent config reads in tests and fix v1/v2 mutex 2015.1.0b2 ---------- * Updating Jinja config and loadbalancerdbv2 * Initial Tempest Directory * Common TLS utilities * Simplify user workflow creating loadbalancers * Implement managers for synchronous haproxy driver * Implement synchronous haproxy driver methods * Move config and extensions to service repo * Pass root_helper to ip_lib by keyword argument to prep for removal * oslo: migrate to namespace-less import paths * Updated from global requirements * Updated from global requirements * Updated from global requirements * Updated from global requirements * Migrate to oslo.concurrency * Updated from global requirements * Update hacking to 0.10 * Updated from global requirements * Updated from global requirements * Block subnet gateway IP to be used as LB VIP * Rename array of LB constants to better align with usage * Reordered neutron import statements in files * Moved lbaas-haproxy.filters from main neutron repo * Cleaned up requirements.txt * Sync with global requirements * Do not list SQLAlchemy < 0.9.7 in requirments.txt * Persistence lbaas http cookie * Fixing HEAD file to have correct migration * Do not list neutron in requirements.txt * LBaaS: Unit tests using policy.conf * Changed testr config to read environment variables for output * Backward compatibility for lbaas * Haproxy driver should respect vip/pool admin state 2015.1.0b1 ---------- * Update documentation files for LBaaS * Kill oslo-incubator files * Fixing the tests to run again * Initializing alembic for separate chain * Fix python neutron paths for neutron_lbaas * Post-split, get jenkins tests passing * Point gitreview at correct repo * Split lbaas services code into neutron-lbaas * Workflow documentation is now in infra-manual * tox.ini: Prevent casual addition of bash dependency * Updated from global requirements * Convert several uses of RpcCallback * Get rid of py26 references: OrderedDict, httplib, xml testing * Updated the README.rst * pretty_tox.sh: Portablity improvement * test_dhcp_agent: Fix no-op tests * Enable undefined-loop-variable pylint check * Fix incorrect exception order in _execute_request * Migrate to oslo.i18n * Migrate to oslo.middleware * Migrate to oslo.utils * Remove Python 2.6 classifier * Remove ryu plugin * Updated from global requirements * Show progress output while running unit tests * Drop RpcProxy usage from LBaaS code * Enforce log hints in neutron.services.loadbalancer * enable H401 hacking check * enable H237 check * Updated from global requirements * Validate loadbalancing method when updating a pool * Updated from global requirements * Update i18n translation for neutron.agents log msg's * enable F812 check for flake8 * enable F811 check for flake8 * Support pudb as a different post mortem debugger * switch to oslo.serialization * Add rootwrap filters for ofagent * Remove openvswitch core plugin entry point * Updated from global requirements * Updated from global requirements * Remove XML support * enable F402 check for flake8 * enable E713 in pep8 tests * Hyper-V: Remove useless use of "else" clause on for loop * Enable no-name-in-module pylint check * Updated from global requirements * Remove duplicate import of constants module * Switch run-time import to using importutils.import_module * Enable assignment-from-no-return pylint check * tox.ini: Avoid using bash where unnecessary * Ensure test_agent_manager handles random hashseeds * Empty files should not contain copyright or license * Remove single occurrence of lost-exception warning * Updated fileutils and its dependencies * remove E251 exemption from pep8 check * mock.assert_called_once() is not a valid method * Add pylint tox environment and disable all existing warnings * Updated from global requirements * Ignore top-level hidden dirs/files by default * Drop sslutils and versionutils modules * Removed kombu from requirements * Updated from global requirements * Updated from global requirements * Remove sslutils from openstack.common * remove openvswitch plugin * remove linuxbridge plugin * Open Kilo development * Implement ModelsMigrationsSync test from oslo.db * Fix entrypoint of OneConvergencePlugin plugin * Set dsvm-functional job to use system packages * Separate Configuration from Freescale SDN ML2 mechanism Driver * Remove @author(s) from copyright statements * Updated from global requirements * Adds ipset support for Security Groups * Remove faulty .assert_has_calls([]) * UTs: Disable auto deletion of ports/subnets/nets * Add requests_mock to test-requirements.txt * Removed kombu from requirements * Supply missing cisco_cfg_agent.ini file * Remove unused arg to config.setup_logging() * Updated from global requirements * Work toward Python 3.4 support and testing * Revert "Cisco DFA ML2 Mechanism Driver" * Big Switch: Separate L3 functions into L3 service * Remove reference to cisco_cfg_agent.ini from setup.cfg again * Adds router service plugin for CSR1kv * Support for extensions in ML2 * Cisco DFA ML2 Mechanism Driver * Adding mechanism driver in ML2 plugin for Nuage Networks * Fix state_path in tests * Remove ovs dependency in embrane plugin * Use lockutils module for tox functional env * Inline "for val in [ref]" statements * Prefer "val !=/== ref" over "val (not) in [ref]" in conditions * Updated from global requirements * Add specific docs build option to tox * A10 Networks LBaaS v1 Driver * Fix bigswitch setup.cfg lines * Reorder operations in create_vip * Remove auto-generation of db schema from models at startup * Updated from global requirements * Use jsonutils instead of stdlib json * Opencontrail plug-in implementation for core resources * Radware: When a pip is needed, reuse the Port * Remove redundant topic from rpc calls * Add a tox test environment for random hashseed testing * Updated from global requirements * Move from Python logging to Openstack logging * Remove reference to cisco_cfg_agent.ini from setup.cfg * Fix spelling mistakes * Removed configobj from test requirements * Use correct section for log message if interface_driver import fails * Fix deprecated opt in haproxy driver * Updated from global requirements * Functional tests work fine with random PYTHONHASHSEED * Set python hash seed to 0 in tox.ini * Configuration agent for Cisco devices * Updated from global requirements * Correct misspelled variable name * Move loadbalancer vip port creation outside of transaction * ML2 mechanism driver for SR-IOV capable NIC based switching, Part 2 * This patch changes the name of directory from mech_arista to arista * ML2 mechanism driver for SR-IOV capable NIC based switching, Part 1 * Move plugin.delete_port call out of transaction * Allow to import _LC, _LE, _LI and _LW functions directly * Make readme reference git.openstack.org not github * Bump hacking to version 0.9.2 * Use auth_token from keystonemiddleware * Change all occurences of no_delete to do_delete * Extract CommonDBMixin to a separate file * Remove reference to setuptools_git * Add a gate-specific tox env for functional tests * Add CONTRIBUTING.rst * Updated from global requirements * Updated from global requirements * Updated from global requirements * Fix example for running individual tests * Switch to using of oslo.db * remove unsupported middleware * Fix re-creation of the pool directory * Add config for performance gate job * LBaaS new object model logging no-op driver * Synced log module and its dependencies from olso-incubator * don't ignore rules that are already enforced * Moved rpc_compat.py code back into rpc.py * Updated from global requirements * Updated from global requirements * ofagent: move main module from ryu repository * Remove the useless vim modelines * Removed 'rpc' and 'notifier' incubator modules * Removed create_rpc_dispatcher methods * Use openstack.common.lockutils module for locks in tox functional tests * Renamed consume_in_thread -> consume_in_threads * Port to oslo.messaging * Updated from global requirements * Ignore emacs checkpoint files * Configure agents using neutron.common.config.init (formerly .parse) * Added missing core_plugins symbolic names * Fix pool statistics for LBaaS Haproxy driver * Introduced rpc_compat.create_connection() * Copy-paste RPC Service class for backwards compatibility * Introduce RpcCallback class * Adding static routes data for members * remove pep8 E122 exemption and correct style * remove E112 hacking exemption and fix errors * Updated from global requirements * Check the validation of 'delay' and 'timeout' * Monkey patch threading module as early as possible * Added RpcProxy class * Use import from six.moves to import the queue module * Freescale SDN Mechanism Driver for ML2 Plugin * Remove run-time version checking for openvswitch features * Removes unnecessary Embrane module-level mocks * Radware LBaaS driver is able to flip to a secondary backend node * Sync periodic_task from oslo-incubator * Added missing plugin .ini files to setup.cfg * Updated from global requirements * Synced jsonutils from oslo-incubator * Disallow 'timeout' in health_monitor to be negative * Remove redundant default=None for config options * Cisco APIC ML2 mechanism driver, part 2 * NSX: get rid of the last Nicira/NVP bits * Add missing translation support * Add mailmap entry * LBaaS VIP doesn't work after delete and re-add * Updated from global requirements * Remove explicit dependency on amqplib * Remove duplicate module-rgx line in .pylintrc * Fix importing module in test_netscaler_driver * Adding tenant-id while creating Radware ADC service * Fix H302 violations * Fix H302 violations in plugins package * Fix H302 violations in unit tests * lbaas on a network without gateway * Improve help strings for radware LbaaS driver * tests/unit: refactor reading neutron.conf.test * Don't print duplicate messages on SystemExit * Updated from global requirements * LBaaS: remove orphan haproxy instances on agent start * LBaaS: Set correct nullable parameter for agent_id * Add 2-leg configuration to Radware LBaaS Driver * Fix H302 violations in db package and services * Updated from global requirements * Fix LBaaS Haproxy occurs error if no member is added * Exclude .ropeproject from flake8 checks * Enable flake8 E711 and E712 checking * Updated from global requirements * Sync service and systemd modules from oslo-incubator * Move bash whitelisting to pep8 testenv * Fix Jenkins translation jobs * ignore build directory for pep8 * Enable hacking H301 check * Updated from global requirements * Remove last parts of Quantum compatibility shim * Cancelling thread start while unit tests running * UT: do not hide an original error in test resource ctxtmgr * Open Juno development * Start using oslosphinx theme for docs * LBaaS: make device driver decide whether to deploy instance * Updated from global requirements * Log received pool.status * rename ACTIVE_PENDING to ACTIVE_PENDING_STATUSES * Return meaningful error message on pool creation error * Avoid creating FixedIntervalLoopingCall in agent UT * Get rid of additional db contention on fetching VIP * Fix typo in lbaas agent exception message * add HEAD sentinel file that contains migration revision * Fix usage of save_and_reraise_exception * LBaaS: small cleanup in agent device driver interface * Remove individual cfg.CONF.resets from tests * Bugfix and refactoring for ovs_lib flow methods * Skip radware failing test for now * Embrane LBaaS Driver * Fix unittest failure in radware lbaas driver * Removes calls to mock.patch.stopall in unit tests * Updated from global requirements * Fix bug:range() is not same in py3.x and py2.x * Updated from global requirements * Updated from global requirements * Fix unittest failure in radware lbaas driver * One Convergence Neutron Plugin l3 ext support * One Convergence Neutron Plugin Implementation * BigSwitch: Add SSL Certificate Validation * Updated from global requirements * Add OpenDaylight ML2 MechanismDriver * Implementaion of Mechanism driver for Brocade VDX cluster of switches * replace rest of q_exc to n_exc in code base * Implement Mellanox ML2 MechanismDriver * Implement OpenFlow Agent mechanism driver * Finish off rebranding of the Nicira NVP plugin * BigSwitch: Add agent to support neutron sec groups * Adds the new IBM SDN-VE plugin * Updated from global requirements * Update License Headers to replace Nicira with VMware * stats table needs columns to be bigint * Developer documentation * LBaaS: check for associations before deleting health monitor * tests/service: consolidate setUp/tearDown logic * options: consolidate options definitions * Rename Neutron core/service plugins for VMware NSX * Updated from global requirements * Sync minimum requirements * Implements an LBaaS driver for NetScaler devices * Copy cache package from oslo-incubator * tests/unit: Initialize core plugin in TestL3GwModeMixin * Remove dependent module py3kcompat * Use save_and_reraise_exception when reraise exception * add router_id to response for CRU on fw/vip objs * Add migration support from agent to NSX dhcp/metadata services * Remove psutil dependency * LBaaS: move agent based driver files into a separate dir * mailmap: update .mailmap * Return request-id in API response * Prepare for multiple cisco ML2 mech drivers * Support building wheels (PEP-427) * Use oslo.rootwrap library instead of local copy * Enables BigSwitch/Restproxy ML2 VLAN driver * Add an explicit tox job for functional tests * Base ML2 bulk support on the loaded drivers * Extending quota support for neutron LBaaS entities * Enable hacking H233 rule * Update RPC code from oslo * Configure plugins by name * Update lockutils and fixture in openstack.common * LBaaS: handle NotFound exceptions in update_status callback * Rename nicira configuration elements to match new naming structure * Remove unused imports * Rename check_nvp_config utility tool * Fix NoSuchOptError in lbaas agent test * Corrects broken format strings in check_i18n.py * LBaaS: synchronize haproxy deploy/undeploy_instance methods * Updates tox.ini to use new features * LBaaS: fix handling pending create/update members and health monitors * Updated from global requirements * Sync global requirements to pin sphinx to sphinx>=1.1.2,<1.2 * Remove start index 0 in range() * LBaaS: unify haproxy-on-host plugin driver and agent * Add fwaas_driver.ini to setup.cfg * Add vpnaas and debug filters to setup.cfg * Fix misspells * Updates .gitignore * Enforce unique constraint on neutron pool members * Update Zhenguo Niu's mailmap * Replace stubout with fixtures * Ensure get_pid_to_kill works with rootwrap script * Adds tests, fixes Radware LBaaS driver as a result * LBaaS: fix reported binary name of a loadbalancer agent * Apply six for metaclass * Move Loadbalancer Noop driver to the unit tests * Updated from global requirements * Cleanup HACKING.rst * Remove confusing comment and code for LBaaS * Fix import log_handler error with publish_errors set * LBaaS UT: use constants vs magic numbers for http error codes * LBaaS: Fix incorrect pool status change * Updated from global requirements * Removing workflows from the Radware driver code * LBaaS: when returning VIP include session_persistence even if None * fixes test_kill_pids_in_file conflicts * Updated from global requirements * Cleanup and make HACKING.rst DRYer * Add support for managing async processes * Remove obsolete redhat-eventlet.patch * Radware LBaaS driver implementation * Open Icehouse development * Updated from global requirements * add portbinding host into vip port * Require oslo.config 1.2.0 final * LBaaS: include inactive members when retrieving logical config * Use built-in print() instead of print statement * Support advanced NVP LBaaS Service * Fix haproxy agent unit test to be runnable alone by tox * Add l2 population base classes * LBaaS: Fix healthmonitor disassociation for non-admin * Fix message i18n error * Install metering_agent.ini and vpn_agent.ini * fix conversion type missing * Enclose command args in with_venv.sh * ML2 Mechanism Driver for Cisco Nexus * LBaaS: make haproxy stats parsing more safe * LBaaS: add status of pool-monitor association to the pool return dict * Reference driver implementation (IPsec) for VPNaaS * Implement ML2 port binding * Arista ML2 Mechanism driver * LBaaS integration with service type framework * ML2 Mechanism Driver for Tail-f Network Control System (NCS) * Default to not capturing log output in tests * LBaaS: update status of members according to health statistics * Add Neutron l3 metering agent * Add list of pool ids to HealthMonitor dict * Update mailmap * Fix wrong example in HACKING.rst * Bumps hacking to 0.7.0 * remove binaries under bin * Fixes Windows setup dependency bug * Fix test_update_status unit test in Loadbalancer test suite * LBaaS: throw proper exception on duplicating monitor association * Restore Babel to requirements.txt * Remove DHCP lease logic * Remove last vestiges of nose * Updated from global requirements * sync some configuration items with codes * Ignore pbr*.egg directory * Fix H102, H103 Apache 2.0 license hacking check error * LBaaS: pass the complete healthmonitor object to driver * Cleanup remaining code that used 'status' fields of HealthMonitor * Remove openstack.common.exception usage * Avoid overwrite value of deprecated name in conf * Adds Babel dependency missing from 555d27c * Remove status* fields from HealthMonitor model * Fix the alphabetical order in requirement files * LBaaS: add status field to PoolMonitorAssociation table * Remove comments from requirements.txt (workaround pbr bug) * remove netifaces dependency of ryu-agent * LBaaS: add delete_health_monitor() to driver API * Add agent scheduling for LBaaS namespace agent * Add decorator helping to log method calls * Remove use_namespaces option from etc/lbaas_agent.ini * modernize quantum config in proper place * Add gre tunneling support for the ML2 plugin * Add VXLAN tunneling support for the ML2 plugin * xenapi - rename quantum to neutron * LBaaS: update DB pool stats received from lbaas agent * Enable logging before using it * Fix issue with pip installing oslo.config-1.2.0 * Initial Modular L2 Mechanism Driver implementation * Add status description field for lbaas objects * Add cover/ to .gitignore * Improve lbaas haproxy plugin_driver test coverage * fix some missing change from quantum to neutron * git remove old non-working packaging files * Rename Quantum to Neutron * Rename quantum to neutron in .gitreview * Sync install_venv_common from oslo * Update to use OSLO db * Require greenlet 0.3.2 (or later) * Remove single-version-externally-managed in setup.cfg * Fix single-version-externally-mananged typo in setup.cfg * Allow use of lowercase section names in conf files * Require pbr 0.5.16 or newer * Update to the latest stevedore * Rename agent_loadbalancer directory to loadbalancer * Remove unit tests that are no longer run * Update with latest OSLO code * Remove explicit distribute depend * Fix and enable H90x tests * Remove generic Exception when using assertRaises * Add *.swo/swp to .gitignore * python3: Introduce py33 to tox.ini * Rename README to README.rst * Rename requires files to standard names * Initial Modular L2 plugin implementation * Revert dependency on oslo.config 1.2.0 * Perform a sync with oslo-incubator * Require oslo.config 1.2.0a2 * update mailmap * Revert "Fix ./run_tests.sh --pep8" * Move to pbr * Docstrings formatted according to pep257 * relax amqplib and kombu version requirements * Fix ./run_tests.sh --pep8 * blueprint mellanox-quantum-plugin * Update flake8 pinned versions * Let the cover venv run individual tests * Copy the RHEL6 eventlet workaround from Oslo * Remove locals() from strings substitutions * Enable automatic validation of many HACKING rules * Shorten the path of the nicira nvp plugin * Allow pdb debugging in manually-invoked tests * Reformat openstack-common.conf * Switch to flake8 from pep8 * Parallelize quantum unit testing: * blueprint cisco-single-config * Add lbaas_agent files to setup.py * Add VIRTUAL_ENV key to enviroment passed to patch_tox_env * Pin SQLAlchemy to 0.7.x * Sync latest Oslo components for updated copyright * drop rfc.sh * Replace "OpenStack LLC" with "OpenStack Foundation" * First havana commit * remove references to netstack in setup.py * Switch to final 1.1.0 oslo.config release * Update to Quantum Client 2.2.0 * Update tox.ini to support RHEL 6.x * Switch to oslo.config * Add common test base class to hold common things * Pin pep8 to 1.3.3 * Enable HA proxy to work with fedora * Add initial testr support * LBaaS Agent Reference Implementation * Bump python-quantumclient version to 2.1.2 * Add scheduling feature basing on agent management extension * Remove compat cfg wrapper * Unpin PasteDeploy dependency version * Use testtools instead of unittest or unittest2 * Add midonet to setup.py * Sync latest install_venv_common.py with olso * Add check-nvp-config utility * Add unit test for ryu-agent * Use oslo-config-2013.1b3 * Adds Brocade Plugin implementation * Synchronize code from oslo * PLUMgrid quantum plugin * Update .coveragerc * Allow tools/install_venv_common.py to be run from within the source directory * Updated to latest oslo-version code * Use install_venv_common.py from oslo * Cisco plugin cleanup * Use babel to generate translation file * Update WebOb version to >=1.2 * Update latest OSLO * Adding multi switch support to the Cisco Nexus plugin * Adds support for deploying Quantum on Windows * Latest OSLO updates * Port to argparse based cfg * Add migration support to Quantum * Undo change to require WebOb 1.2.3, instead, require only >=1.0.8 * .gitignore cleanup * Upgrade WebOb to 1.2.3 * Logging module cleanup * Add OVS cleanup utility * Add tox artifacts to .gitignore * Add restproxy.ini to config_path in setup.py * Add script for checking i18n message * l3 agent rpc * Add metadata_agent.ini to config_path in setup.py * Remove __init__.py from bin/ and tools/ * add metadata proxy support for Quantum Networks * Use auth_token middleware in keystoneclient * Add QUANTUM_ prefix for env used by quantum-debug * Make tox.ini run pep8 checks on bin * Explicitly include versioninfo in tarball * Import lockutils and fileutils from openstack-common * Updated openstack-common setup and version code * Ensure that the anyjson version is correct * Add eventlet_backdoor and threadgroup from openstack-common * Add loopingcall from openstack-common * Added service from openstack-common * Drop lxml dependency * Add uuidutils module * Import order clean-up * pin sqlalchemy to 0.7 * Correct Intended Audience * Add OpenStack trove classifier for PyPI * Improve unit test times * l3_nat_agent was renamed to l3_agent and this was missed * Support for several HA RabbitMQ servers * add missing files from setup.py * Create .mailmap file * Lower webob dep from v1.2.0 to v1.0.8 * Implements agent for Quantum Networking testing * Create utility to clean-up netns * Update rootwrap; track changes in nova/cinder * Execute unit tests for Cisco plugin with Quantum tests * Add lease expiration script support for dnsmasq * Add nosehtmloutput as a test dependency * quantum l3 + floating IP support * Updates pip requirements * NEC OpenFlow plugin support * remove old gflags config code * RPC support for OVS Plugin and Agent * Initial implemention of MetaPlugin * RPC support for Linux Bridge Plugin and Agent * Exempt openstack-common from pep8 check * fix bug lp:1025526,update iniparser.py to accept empty value * Introduce files from openstack common * fix bug lp:1019230,update rpc from openstack-common * implement dhcp agent for quantum * Use setuptools git plugin for file inclusion * Remove paste configuration details to a seperate file. blueprint use-common-cfg * Implements the blueprint use-common-cfg for the quantum service. More specifically uses global CONF for the quantum.conf file * Add authZ through incorporation of policy checks * Bug #1013967 - Quantum is breaking on tests with pep 1.3 * Use openstack.common.exception * API v2: mprove validation of post/put, rename few attributes * Add API v2 support * Fix up test running to match jenkins expectation * Add build_sphinx options * Quantum should use openstack.common.jsonutils * Remove hardcoded version for pep8 from tools/test-requires * Quantum should use openstack.common.importutils * PEP8 fixes * Bug #1002605 * Parse linuxbridge plugins using openstack.common.cfg * Add HACKING.rst to tarball generation bug 1001220 * Include AUTHORS in release package * Change Resource.__call__() to not leak internal errors * Removed simplejson from pip-requires * Remove dependency on python-quantumclient * Add sphinx to the test build deps * Add HACKING.rst coding style doc * bug 963152: add a few missing files to sdist tarball * Fix path to python-quantumclient * Split out pip requires and aligned tox file * Fix missing files in sdist package [bug 954906] * Downgraded required version of WebOb to 1.0.8 * more files missing in sdist tarball * make sure pip-requires is included in setup.py sdist * remove pep8 and strict lxml version from setup.py * plugin: introduce ryu plugin * bug 934459: pip no longer supports -E * blueprint quantum-ovs-tunnel-agent * Initial commit: nvp plugin * Cleanup the source distribution * blueprint quantum-linux-bridge-plugin * Remove quantum CLI console script * Bug 925372: remove deprecated webob attributes (and also specify stable webob version in pip-requires) essex-3 ------- * Make tox config work * Pin versions to standard versions * Split out quantum.client and quantum.common * Quantum was missing depend on lxml * moving batch config out of quantum-server repo * Getting ready for the client split * Removed erroneous print from setup.py * Base version.py on glance * Fix lp bug 897882 * Install a good version of pip in the venv * Rename .quantum-venv to .venv * Remove plugin pip-requires essex-2 ------- * Bug #890028 * Fix for bug 900316 * Second round of packaging changes * Changes to make pip-based tests work with jenkins * Fix for bug 888811 * Fix for Bug #888820 - pip-requires file support for plugins essex-1 ------- * blueprint quantum-packaging * Add .gitreview config file for gerrit * Add code-coverage support to run_tests.sh (lp860160) 2011.3 ------ * Add rfc.sh to help with gerrit workflow * merge tyler's unit tests for cisco plugin changes lp845140 * merge salv's no-cheetah CLI branch lp 842190 * merge sumit's branch for lp837752 * Merging latest from lp:quantum * Merging lo:~salvatore-orlando/quantum/quantum-api-auth * Updating CLI for not using Cheetah anymore. Now using a mechanism based on Python built-in templates * Merging Sumit's changes including fixes for multinic support, and CLI module for working with extensions * Merging from Cisco branch * Merging from lp:quantum * merge cisco consolidated plugin changes * Merging lp:~salvatore-orlando/quantum/bug834449 * merge trunk * Merging from lp:quantum * merge salvatore's new cli code * Addressing comments from Dan * Merging from quantum * merge cisco extensions branch * Merging from Sumit's branch, changes to VIF-driver and Scheduler; extension action names have been changed in response to Salvatore's review comments in the extensions branch review * Syncing with Cisco extensions branch * Merging from Sumit's branch, import ordering related changes * Merging the Cisco branch * Finishing cli work Fixing bug with XML deserialization * Merging lp:~salvatore-orlando/quantum/quantum-api-alignment * merge latest quantum branch and resolve conflicts * Merging lp:~asomya/quantum/lp833163 Fix for Bug #833163: Pep8 violations in recent packaging changes that were merged into trunk (Critical) * PEP8 fixes for setup.py * Merging lp:~cisco-openstack/quantum/802dot1qbh-vifdriver-scheduler * Merging lp:~cisco-openstack/quantum/l2network-plugin-persistence * Merging lp:quantum * merging with lp:quantum * Making Keystone version configurable * Merging branch: lp:~danwent/quantum/test-refactor * Syncing with lp:quantum * Merging fixes and changes batch-config script. Thanks lp:danwent ! * Merging lp:~asomya/quantum/lp824145 Fix for Bug#824145 : Adding a setup script for quantum * merge trunk pep8 fixes adapting CLI to API v1.0 Fixing wsgi to avoid failure with extensions * merge trunk * Pulling in changes from lp:quantum * Merging Cisco's contribution to Quantum. Thanks to various folks at Cisco Systems, Quantum will have plugins to integrate with Cisco UCS blade servers using 802.1Qbh, Cisco Nexus family of switches and the ability for Quantum plugin to have multiple switches/devices within a single Quantum plugin * Merging from Sumit's branch pylint fixes and incorporating review comments * Mergin from cisco brach * Merging from lp:quantum * Introducting cheetah Updating list_nets in CLI Writing unit tests for list_nets Stubbing out with FakeConnection now * Merging quantum extenions framework into trunk. Thanks rajaram vinkesh, deepak & santhosh for the great work! * lp Bug#824145 : Adding a setup script for quantum * skeleton for cli unit tests * merge trunk * Merged quantum trunk * - Adding setup script * force batch_config.py to use json, as XML has issues (see bug: 798262) * update batch_config.py to use new client lib, hooray for deleting code * Merging changes addressing Bug # 802772. Thanks lp:danwent ! * Merging bugfix for Bug 822890 - Added License file for Quantum code distribution * L2 Network Plugin Framework merge * Adding Apache Version 2.0 license file. This is the official license agreement under which Quantum code is available to the Open Source community * merge * merge heckj's pip-requires fixes * updates to pip-requires for CI * Merged quantum trunk * Merging changes from lp:quantum * Completing API spec alignment Unit tests aligned with changes in the API spec * Merging the brand new Quantum-client-library feature * Merging lp:quantum updates * persistence of l2network & ucs plugins using mysql - db_conn.ini - configuration details of making a connection to the database - db_test_plugin.py - contains abstraction methods for storing database values in a dict and unit test cases for DB testing - l2network_db.py - db methods for l2network models - l2network_models.py - class definitions for the l2 network tables - ucs_db.py - db methods for ucs models - ucs_models.py - class definition for the ucs tables dynamic loading of the 2nd layer plugin db's based on passed arguments Create, Delete, Get, Getall, Update database methods at - Quantum, L2Network and Ucs Unit test cases for create, delete, getall and update operations for L2Network and Ucs plugins pep8 checks done branch based off revision 34 plugin-framework * Merged from trunk * merged the latest changes from plugin-framework branch - revision 39 conforming to the new cisco plugin directory structure and moving all db related modules into cisco/db folder updated db_test_plugin.py - added import of cisco constants module - added LOG.getLogger for logging component name - updated import module paths for l2network_models/db and ucs_models/db to use the new directory structure - updated (rearranged) imports section to obey openstack alphabetical placement convention updated db_conn.ini - updated database name from cisco_naas to quantum_l2network unit test cases ran successfully and pep8 checks done again * merge branch for to fix bug817826 * Merging the latest changes from lp:quantum * fix bug 817826 and similar error in batch_config.py * merge Salvatore's api branch with fixes for tests. Tweaking branch to remove unwanted bin/quantum.py as part of merge * Santhosh/Rajaram|latest merge from quantum and made extensions use options to load plugin * Apply fix for bug #797419 merging lp:~salvatore-orlando/quantum/bug797419 * Merging branch lp:~netstack/quantum/quantum-unit-tests * Merged from quantum trunk * Adapated plugin infrastructure to allow API to pass options to plugins Now using in-memory sqlite db for tests on FakePlugin teardown() now 'resets' the in-memory db Adding unit tests for APIs * Adding Routes>=1.12.3 to tools/pip-requires * Merging dan wendlandt's bugfixes for Bug #800466 and improvements that enable Quantum to seamlessly run on KVM! * more pep8 goodness * refactor batch_config, allow multiple attaches with the empty string * merge and pep8 cleanup * Merging latest changes from parent repo - lp:network-service , Parent repo had approved merge proposal for merging lp:~santhom/network-service/quantum_testing_framework , which has now been merged into lp:network-service * Merging pep8 and functional test related changes lp:~santhom/network-service/quantum_testing_framework branch * add example to usage string for batch_config.py * Bug fixes and clean-up, including supporting libvirt * Santhosh/Vinkesh | Added the testing framework. Moved the smoketest to tests/functional * Pushing initial started code based on Glance project and infrstructure work done by the melange team * Merging in latest changes from lp:quantum neutron-lbaas-8.0.0/PKG-INFO0000664000567000056710000000267712701410110016526 0ustar jenkinsjenkins00000000000000Metadata-Version: 1.1 Name: neutron-lbaas Version: 8.0.0 Summary: OpenStack Networking Load Balancing as a Service Home-page: http://www.openstack.org/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: Welcome! ======== This package contains the code for the Neutron Load Balancer as a Service (LBaaS) service. This includes third-party drivers. This package requires Neutron to run. External Resources: =================== The homepage for Neutron is: http://launchpad.net/neutron. Use this site for asking for help, and filing bugs. We use a single Launchpad page for all Neutron projects. Code is available on git.openstack.org at: . Please refer to Neutron documentation for more information: `Neutron README.rst `_ Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 neutron-lbaas-8.0.0/MANIFEST.in0000664000567000056710000000042212701407726017174 0ustar jenkinsjenkins00000000000000include AUTHORS include README.rst include ChangeLog include LICENSE include neutron_lbaas/db/migration/alembic_migrations/script.py.mako recursive-include neutron_lbaas/db/migration/alembic_migrations/versions * exclude .gitignore exclude .gitreview global-exclude *.pyc neutron-lbaas-8.0.0/tox.ini0000664000567000056710000001026512701407727016760 0ustar jenkinsjenkins00000000000000[tox] envlist = py34,py27,pep8 minversion = 1.6 skipsdist = True [testenv] setenv = VIRTUAL_ENV={envdir} usedevelop = True install_command = {toxinidir}/tools/tox_install.sh {env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=stable/mitaka} {opts} {packages} deps = -r{toxinidir}/test-requirements.txt whitelist_externals = sh commands = sh tools/pretty_tox.sh '{posargs}' # there is also secret magic in pretty_tox.sh which lets you run in a fail only # mode. To do this define the TRACE_FAILONLY environmental variable. [testenv:releasenotes] # TODO(ihrachys): remove once infra supports constraints for this target install_command = {toxinidir}/tools/tox_install.sh unconstrained {opts} {packages} commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [testenv:pep8] commands = flake8 # pylint --rcfile=.pylintrc --output-format=colorized {posargs:neutron_lbaas} neutron-db-manage --subproject neutron-lbaas --config-file neutron_lbaas/tests/etc/neutron.conf check_migration {[testenv:genconfig]commands} whitelist_externals = sh [testenv:i18n] commands = python ./tools/check_i18n.py ./neutron_lbaas ./tools/i18n_cfg.py [testenv:cover] # TODO(ihrachys): remove once infra supports constraints for this target install_command = {toxinidir}/tools/tox_install.sh unconstrained {opts} {packages} commands = python setup.py testr --coverage --coverage-package-name=neutron_lbaas --testr-args='{posargs}' [testenv:venv] # TODO(ihrachys): remove once infra supports constraints for this target install_command = {toxinidir}/tools/tox_install.sh unconstrained {opts} {packages} commands = {posargs} [testenv:docs] commands = python setup.py build_sphinx [testenv:py34] commands = python -m testtools.run \ neutron_lbaas.tests.unit.common.cert_manager.test_barbican [flake8] # E125 continuation line does not distinguish itself from next logical line # E126 continuation line over-indented for hanging indent # E128 continuation line under-indented for visual indent # E129 visually indented line with same indent as next logical line # E265 block comment should start with ‘# ‘ # H405 multi line docstring summary not separated with an empty line # TODO(marun) H404 multi line docstring should start with a summary # N324 contextlib.nested is deprecated ignore = E125,E126,E128,E129,E265,H404,H405,N324 show-source = true builtins = _ exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build,tools,.ropeproject,rally-scenarios,neutron_lbaas/tests/tempest/lib,neutron_lbaas/tests/tempest/v1/api [hacking] import_exceptions = neutron_lbaas._i18n local-check-factory = neutron.hacking.checks.factory # If you are running the tests locally you should set the env variable # TEMPEST_CONFIG_DIR=/opt/stack/tempest/etc [testenv:apiv1] sitepackages = True passenv = TEMPEST_CONFIG_DIR setenv = OS_TEST_PATH={toxinidir}/neutron_lbaas/tests/tempest/v1/api OS_TESTR_CONCURRENCY=1 deps = {[testenv]deps} git+https://git.openstack.org/openstack/tempest#egg=tempest # If you are running the tests locally you should set the env variable # TEMPEST_CONFIG_DIR=/opt/stack/tempest/etc [testenv:apiv2] sitepackages = True passenv = TEMPEST_CONFIG_DIR setenv = OS_TEST_PATH={toxinidir}/neutron_lbaas/tests/tempest/v2/api OS_TESTR_CONCURRENCY=1 deps = {[testenv]deps} git+https://git.openstack.org/openstack/tempest#egg=tempest # If you are running the tests locally you should set the env variable # TEMPEST_CONFIG_DIR=/opt/stack/tempest/etc [testenv:scenario] sitepackages = True passenv = TEMPEST_CONFIG_DIR setenv = OS_TEST_PATH={toxinidir}/neutron_lbaas/tests/tempest/v2/scenario OS_TESTR_CONCURRENCY=1 deps = {[testenv]deps} git+https://git.openstack.org/openstack/tempest#egg=tempest # If you are running the tests locally you should set the env variable # TEMPEST_CONFIG_DIR=/opt/stack/tempest/etc [testenv:ddt] sitepackages = True passenv = TEMPEST_CONFIG_DIR setenv = OS_TEST_PATH={toxinidir}/neutron_lbaas/tests/tempest/v2/ddt OS_TESTR_CONCURRENCY=1 deps = {[testenv]deps} git+https://git.openstack.org/openstack/tempest#egg=tempest [testenv:genconfig] commands = {toxinidir}/tools/generate_config_file_samples.sh neutron-lbaas-8.0.0/.testr.conf0000664000567000056710000000047112701407726017530 0ustar jenkinsjenkins00000000000000[DEFAULT] test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} OS_LOG_CAPTURE=${OS_LOG_CAPTURE:-1} ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./neutron_lbaas/tests/unit} $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list neutron-lbaas-8.0.0/neutron_lbaas/0000775000567000056710000000000012701410110020251 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/agent_scheduler.py0000664000567000056710000001277212701407726024013 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random from neutron.db import agents_db from neutron.db import agentschedulers_db from neutron.db import model_base from oslo_log import log as logging import six import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy.orm import joinedload from neutron_lbaas._i18n import _LW from neutron_lbaas.extensions import lbaas_agentschedulerv2 from neutron_lbaas.services.loadbalancer import constants as lb_const LOG = logging.getLogger(__name__) class LoadbalancerAgentBinding(model_base.BASEV2): """Represents binding between neutron loadbalancer and agents.""" __tablename__ = "lbaas_loadbalanceragentbindings" loadbalancer_id = sa.Column( sa.String(36), sa.ForeignKey("lbaas_loadbalancers.id", ondelete='CASCADE'), primary_key=True) agent = orm.relation(agents_db.Agent) agent_id = sa.Column( sa.String(36), sa.ForeignKey("agents.id", ondelete='CASCADE'), nullable=False) class LbaasAgentSchedulerDbMixin(agentschedulers_db.AgentSchedulerDbMixin, lbaas_agentschedulerv2 .LbaasAgentSchedulerPluginBase): agent_notifiers = {} def get_agent_hosting_loadbalancer(self, context, loadbalancer_id, active=None): query = context.session.query(LoadbalancerAgentBinding) query = query.options(joinedload('agent')) binding = query.get(loadbalancer_id) if (binding and self.is_eligible_agent( active, binding.agent)): return {'agent': self._make_agent_dict(binding.agent)} def get_lbaas_agents(self, context, active=None, filters=None): query = context.session.query(agents_db.Agent) query = query.filter_by(agent_type=lb_const.AGENT_TYPE_LOADBALANCERV2) if active is not None: query = query.filter_by(admin_state_up=active) if filters: for key, value in six.iteritems(filters): column = getattr(agents_db.Agent, key, None) if column: query = query.filter(column.in_(value)) return [agent for agent in query if self.is_eligible_agent(active, agent)] def list_loadbalancers_on_lbaas_agent(self, context, id): query = context.session.query( LoadbalancerAgentBinding.loadbalancer_id) query = query.filter_by(agent_id=id) loadbalancer_ids = [item[0] for item in query] if loadbalancer_ids: lbs = self.get_loadbalancers(context, filters={'id': loadbalancer_ids}) return lbs return [] def get_lbaas_agent_candidates(self, device_driver, active_agents): candidates = [] for agent in active_agents: agent_conf = self.get_configuration_dict(agent) if device_driver in agent_conf['device_drivers']: candidates.append(agent) return candidates class ChanceScheduler(object): """Allocate a loadbalancer agent for a vip in a random way.""" def schedule(self, plugin, context, loadbalancer, device_driver): """Schedule the load balancer to an active loadbalancer agent if there is no enabled agent hosting it. """ with context.session.begin(subtransactions=True): lbaas_agent = plugin.db.get_agent_hosting_loadbalancer( context, loadbalancer.id) if lbaas_agent: LOG.debug('Load balancer %(loadbalancer_id)s ' 'has already been hosted' ' by lbaas agent %(agent_id)s', {'loadbalancer_id': loadbalancer.id, 'agent_id': lbaas_agent['id']}) return active_agents = plugin.db.get_lbaas_agents(context, active=True) if not active_agents: LOG.warning( _LW('No active lbaas agents for load balancer %s'), loadbalancer.id) return candidates = plugin.db.get_lbaas_agent_candidates(device_driver, active_agents) if not candidates: LOG.warning(_LW('No lbaas agent supporting device driver %s'), device_driver) return chosen_agent = random.choice(candidates) binding = LoadbalancerAgentBinding() binding.agent = chosen_agent binding.loadbalancer_id = loadbalancer.id context.session.add(binding) LOG.debug( 'Load balancer %(loadbalancer_id)s is scheduled ' 'to lbaas agent %(agent_id)s', { 'loadbalancer_id': loadbalancer.id, 'agent_id': chosen_agent['id']} ) return chosen_agent neutron-lbaas-8.0.0/neutron_lbaas/tests/0000775000567000056710000000000012701410110021413 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/etc/0000775000567000056710000000000012701410110022166 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/etc/neutron.conf0000664000567000056710000000005712701407726024554 0ustar jenkinsjenkins00000000000000[DEFAULT] [database] connection = 'sqlite://' neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/0000775000567000056710000000000012701410110023074 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/etc/0000775000567000056710000000000012701410110023647 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/etc/__init__.py0000664000567000056710000000000012701407726025771 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/__init__.py0000664000567000056710000000000012701407726025216 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/lib/0000775000567000056710000000000012701410110023642 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/lib/__init__.py0000664000567000056710000000000012701407726025764 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/lib/services/0000775000567000056710000000000012701410110025465 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/lib/services/__init__.py0000664000567000056710000000000012701407726027607 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/lib/services/network/0000775000567000056710000000000012701410110027156 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/lib/services/network/__init__.py0000664000567000056710000000000012701407726031300 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/lib/services/network/json/0000775000567000056710000000000012701410110030127 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/lib/services/network/json/network_client.py0000664000567000056710000006214712701407726033565 0ustar jenkinsjenkins00000000000000# Copyright 2016 Rackspace Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time from six.moves.urllib import parse from tempest.lib.common import rest_client from tempest.lib.common.utils import misc from tempest.lib import exceptions as lib_exc from tempest import exceptions class NetworkClientJSON(rest_client.RestClient): """ Tempest REST client for Neutron. Uses v2 of the Neutron API, since the V1 API has been removed from the code base. Implements create, delete, update, list and show for the basic Neutron abstractions (networks, sub-networks, routers, ports and floating IP): Implements add/remove interface to router using subnet ID / port ID It also implements list, show, update and reset for OpenStack Networking quotas """ version = '2.0' uri_prefix = "v2.0" def get_uri(self, plural_name): # get service prefix from resource name # The following list represents resource names that do not require # changing underscore to a hyphen hyphen_exceptions = ["health_monitors", "firewall_rules", "firewall_policies"] # the following map is used to construct proper URI # for the given neutron resource service_resource_prefix_map = { 'networks': '', 'subnets': '', 'subnetpools': '', 'ports': '', 'pools': 'lb', 'vips': 'lb', 'health_monitors': 'lb', 'members': 'lb', 'ipsecpolicies': 'vpn', 'vpnservices': 'vpn', 'ikepolicies': 'vpn', 'ipsec-site-connections': 'vpn', 'metering_labels': 'metering', 'metering_label_rules': 'metering', 'firewall_rules': 'fw', 'firewall_policies': 'fw', 'firewalls': 'fw' } service_prefix = service_resource_prefix_map.get( plural_name) if plural_name not in hyphen_exceptions: plural_name = plural_name.replace("_", "-") if service_prefix: uri = '%s/%s/%s' % (self.uri_prefix, service_prefix, plural_name) else: uri = '%s/%s' % (self.uri_prefix, plural_name) return uri def pluralize(self, resource_name): # get plural from map or just add 's' # map from resource name to a plural name # needed only for those which can't be constructed as name + 's' resource_plural_map = { 'security_groups': 'security_groups', 'security_group_rules': 'security_group_rules', 'ipsecpolicy': 'ipsecpolicies', 'ikepolicy': 'ikepolicies', 'ipsec_site_connection': 'ipsec-site-connections', 'quotas': 'quotas', 'firewall_policy': 'firewall_policies' } return resource_plural_map.get(resource_name, resource_name + 's') def _lister(self, plural_name): def _list(**filters): uri = self.get_uri(plural_name) if filters: uri += '?' + parse.urlencode(filters, doseq=1) resp, body = self.get(uri) result = {plural_name: self.deserialize_list(body)} self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, result) return _list def _deleter(self, resource_name): def _delete(resource_id): plural = self.pluralize(resource_name) uri = '%s/%s' % (self.get_uri(plural), resource_id) resp, body = self.delete(uri) self.expected_success(204, resp.status) return rest_client.ResponseBody(resp, body) return _delete def _shower(self, resource_name): def _show(resource_id, **fields): # fields is a dict which key is 'fields' and value is a # list of field's name. An example: # {'fields': ['id', 'name']} plural = self.pluralize(resource_name) uri = '%s/%s' % (self.get_uri(plural), resource_id) if fields: uri += '?' + parse.urlencode(fields, doseq=1) resp, body = self.get(uri) body = self.deserialize_single(body) self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, body) return _show def _creater(self, resource_name): def _create(**kwargs): plural = self.pluralize(resource_name) uri = self.get_uri(plural) post_data = self.serialize({resource_name: kwargs}) resp, body = self.post(uri, post_data) body = self.deserialize_single(body) self.expected_success(201, resp.status) return rest_client.ResponseBody(resp, body) return _create def _updater(self, resource_name): def _update(res_id, **kwargs): plural = self.pluralize(resource_name) uri = '%s/%s' % (self.get_uri(plural), res_id) post_data = self.serialize({resource_name: kwargs}) resp, body = self.put(uri, post_data) body = self.deserialize_single(body) self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, body) return _update def __getattr__(self, name): method_prefixes = ["list_", "delete_", "show_", "create_", "update_"] method_functors = [self._lister, self._deleter, self._shower, self._creater, self._updater] for index, prefix in enumerate(method_prefixes): prefix_len = len(prefix) if name[:prefix_len] == prefix: return method_functors[index](name[prefix_len:]) raise AttributeError(name) # Subnetpool methods def create_subnetpool(self, post_data): body = self.serialize_list(post_data, "subnetpools", "subnetpool") uri = self.get_uri("subnetpools") resp, body = self.post(uri, body) body = {'subnetpool': self.deserialize_list(body)} self.expected_success(201, resp.status) return rest_client.ResponseBody(resp, body) def get_subnetpool(self, id): uri = self.get_uri("subnetpools") subnetpool_uri = '%s/%s' % (uri, id) resp, body = self.get(subnetpool_uri) body = {'subnetpool': self.deserialize_list(body)} self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, body) def delete_subnetpool(self, id): uri = self.get_uri("subnetpools") subnetpool_uri = '%s/%s' % (uri, id) resp, body = self.delete(subnetpool_uri) self.expected_success(204, resp.status) return rest_client.ResponseBody(resp, body) def list_subnetpools(self): uri = self.get_uri("subnetpools") resp, body = self.get(uri) body = {'subnetpools': self.deserialize_list(body)} self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, body) def update_subnetpool(self, id, post_data): body = self.serialize_list(post_data, "subnetpools", "subnetpool") uri = self.get_uri("subnetpools") subnetpool_uri = '%s/%s' % (uri, id) resp, body = self.put(subnetpool_uri, body) body = {'subnetpool': self.deserialize_list(body)} self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, body) # Common methods that are hard to automate def create_bulk_network(self, names, shared=False): network_list = [{'name': name, 'shared': shared} for name in names] post_data = {'networks': network_list} body = self.serialize_list(post_data, "networks", "network") uri = self.get_uri("networks") resp, body = self.post(uri, body) body = {'networks': self.deserialize_list(body)} self.expected_success(201, resp.status) return rest_client.ResponseBody(resp, body) def create_bulk_subnet(self, subnet_list): post_data = {'subnets': subnet_list} body = self.serialize_list(post_data, 'subnets', 'subnet') uri = self.get_uri('subnets') resp, body = self.post(uri, body) body = {'subnets': self.deserialize_list(body)} self.expected_success(201, resp.status) return rest_client.ResponseBody(resp, body) def create_bulk_port(self, port_list): post_data = {'ports': port_list} body = self.serialize_list(post_data, 'ports', 'port') uri = self.get_uri('ports') resp, body = self.post(uri, body) body = {'ports': self.deserialize_list(body)} self.expected_success(201, resp.status) return rest_client.ResponseBody(resp, body) def wait_for_resource_deletion(self, resource_type, id): """Waits for a resource to be deleted.""" start_time = int(time.time()) while True: if self.is_resource_deleted(resource_type, id): return if int(time.time()) - start_time >= self.build_timeout: raise exceptions.TimeoutException time.sleep(self.build_interval) def is_resource_deleted(self, resource_type, id): method = 'show_' + resource_type try: getattr(self, method)(id) except AttributeError: raise Exception("Unknown resource type %s " % resource_type) except lib_exc.NotFound: return True return False def wait_for_resource_status(self, fetch, status, interval=None, timeout=None): """ @summary: Waits for a network resource to reach a status @param fetch: the callable to be used to query the resource status @type fecth: callable that takes no parameters and returns the resource @param status: the status that the resource has to reach @type status: String @param interval: the number of seconds to wait between each status query @type interval: Integer @param timeout: the maximum number of seconds to wait for the resource to reach the desired status @type timeout: Integer """ if not interval: interval = self.build_interval if not timeout: timeout = self.build_timeout start_time = time.time() while time.time() - start_time <= timeout: resource = fetch() if resource['status'] == status: return time.sleep(interval) # At this point, the wait has timed out message = 'Resource %s' % (str(resource)) message += ' failed to reach status %s' % status message += ' (current: %s)' % resource['status'] message += ' within the required time %s' % timeout caller = misc.find_test_caller() if caller: message = '(%s) %s' % (caller, message) raise exceptions.TimeoutException(message) def deserialize_single(self, body): return json.loads(body) def deserialize_list(self, body): res = json.loads(body) # expecting response in form # {'resources': [ res1, res2] } => when pagination disabled # {'resources': [..], 'resources_links': {}} => if pagination enabled for k in res.keys(): if k.endswith("_links"): continue return res[k] def serialize(self, data): return json.dumps(data) def serialize_list(self, data, root=None, item=None): return self.serialize(data) def update_quotas(self, tenant_id, **kwargs): put_body = {'quota': kwargs} body = json.dumps(put_body) uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id) resp, body = self.put(uri, body) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body['quota']) def reset_quotas(self, tenant_id): uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id) resp, body = self.delete(uri) self.expected_success(204, resp.status) return rest_client.ResponseBody(resp, body) def create_router(self, name, admin_state_up=True, **kwargs): post_body = {'router': kwargs} post_body['router']['name'] = name post_body['router']['admin_state_up'] = admin_state_up body = json.dumps(post_body) uri = '%s/routers' % (self.uri_prefix) resp, body = self.post(uri, body) self.expected_success(201, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def _update_router(self, router_id, set_enable_snat, **kwargs): uri = '%s/routers/%s' % (self.uri_prefix, router_id) resp, body = self.get(uri) self.expected_success(200, resp.status) body = json.loads(body) update_body = {} update_body['name'] = kwargs.get('name', body['router']['name']) update_body['admin_state_up'] = kwargs.get( 'admin_state_up', body['router']['admin_state_up']) cur_gw_info = body['router']['external_gateway_info'] if cur_gw_info: # TODO(kevinbenton): setting the external gateway info is not # allowed for a regular tenant. If the ability to update is also # merged, a test case for this will need to be added similar to # the SNAT case. cur_gw_info.pop('external_fixed_ips', None) if not set_enable_snat: cur_gw_info.pop('enable_snat', None) update_body['external_gateway_info'] = kwargs.get( 'external_gateway_info', body['router']['external_gateway_info']) if 'distributed' in kwargs: update_body['distributed'] = kwargs['distributed'] update_body = dict(router=update_body) update_body = json.dumps(update_body) resp, body = self.put(uri, update_body) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def update_router(self, router_id, **kwargs): """Update a router leaving enable_snat to its default value.""" # If external_gateway_info contains enable_snat the request will fail # with 404 unless executed with admin client, and therefore we instruct # _update_router to not set this attribute # NOTE(salv-orlando): The above applies as long as Neutron's default # policy is to restrict enable_snat usage to admins only. return self._update_router(router_id, set_enable_snat=False, **kwargs) def update_router_with_snat_gw_info(self, router_id, **kwargs): """Update a router passing also the enable_snat attribute. This method must be execute with admin credentials, otherwise the API call will return a 404 error. """ return self._update_router(router_id, set_enable_snat=True, **kwargs) def add_router_interface_with_subnet_id(self, router_id, subnet_id): uri = '%s/routers/%s/add_router_interface' % (self.uri_prefix, router_id) update_body = {"subnet_id": subnet_id} update_body = json.dumps(update_body) resp, body = self.put(uri, update_body) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def add_router_interface_with_port_id(self, router_id, port_id): uri = '%s/routers/%s/add_router_interface' % (self.uri_prefix, router_id) update_body = {"port_id": port_id} update_body = json.dumps(update_body) resp, body = self.put(uri, update_body) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def remove_router_interface_with_subnet_id(self, router_id, subnet_id): uri = '%s/routers/%s/remove_router_interface' % (self.uri_prefix, router_id) update_body = {"subnet_id": subnet_id} update_body = json.dumps(update_body) resp, body = self.put(uri, update_body) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def remove_router_interface_with_port_id(self, router_id, port_id): uri = '%s/routers/%s/remove_router_interface' % (self.uri_prefix, router_id) update_body = {"port_id": port_id} update_body = json.dumps(update_body) resp, body = self.put(uri, update_body) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def associate_health_monitor_with_pool(self, health_monitor_id, pool_id): post_body = { "health_monitor": { "id": health_monitor_id, } } body = json.dumps(post_body) uri = '%s/lb/pools/%s/health_monitors' % (self.uri_prefix, pool_id) resp, body = self.post(uri, body) self.expected_success(201, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def disassociate_health_monitor_with_pool(self, health_monitor_id, pool_id): uri = '%s/lb/pools/%s/health_monitors/%s' % (self.uri_prefix, pool_id, health_monitor_id) resp, body = self.delete(uri) self.expected_success(204, resp.status) return rest_client.ResponseBody(resp, body) def list_router_interfaces(self, uuid): uri = '%s/ports?device_id=%s' % (self.uri_prefix, uuid) resp, body = self.get(uri) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def update_agent(self, agent_id, agent_info): """ :param agent_info: Agent update information. E.g {"admin_state_up": True} """ uri = '%s/agents/%s' % (self.uri_prefix, agent_id) agent = {"agent": agent_info} body = json.dumps(agent) resp, body = self.put(uri, body) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def list_pools_hosted_by_one_lbaas_agent(self, agent_id): uri = '%s/agents/%s/loadbalancer-pools' % (self.uri_prefix, agent_id) resp, body = self.get(uri) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def show_lbaas_agent_hosting_pool(self, pool_id): uri = ('%s/lb/pools/%s/loadbalancer-agent' % (self.uri_prefix, pool_id)) resp, body = self.get(uri) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def list_routers_on_l3_agent(self, agent_id): uri = '%s/agents/%s/l3-routers' % (self.uri_prefix, agent_id) resp, body = self.get(uri) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def list_l3_agents_hosting_router(self, router_id): uri = '%s/routers/%s/l3-agents' % (self.uri_prefix, router_id) resp, body = self.get(uri) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def add_router_to_l3_agent(self, agent_id, router_id): uri = '%s/agents/%s/l3-routers' % (self.uri_prefix, agent_id) post_body = {"router_id": router_id} body = json.dumps(post_body) resp, body = self.post(uri, body) self.expected_success(201, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def remove_router_from_l3_agent(self, agent_id, router_id): uri = '%s/agents/%s/l3-routers/%s' % ( self.uri_prefix, agent_id, router_id) resp, body = self.delete(uri) self.expected_success(204, resp.status) return rest_client.ResponseBody(resp, body) def list_dhcp_agent_hosting_network(self, network_id): uri = '%s/networks/%s/dhcp-agents' % (self.uri_prefix, network_id) resp, body = self.get(uri) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def list_networks_hosted_by_one_dhcp_agent(self, agent_id): uri = '%s/agents/%s/dhcp-networks' % (self.uri_prefix, agent_id) resp, body = self.get(uri) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def remove_network_from_dhcp_agent(self, agent_id, network_id): uri = '%s/agents/%s/dhcp-networks/%s' % (self.uri_prefix, agent_id, network_id) resp, body = self.delete(uri) self.expected_success(204, resp.status) return rest_client.ResponseBody(resp, body) def create_ikepolicy(self, name, **kwargs): post_body = { "ikepolicy": { "name": name, } } for key, val in kwargs.items(): post_body['ikepolicy'][key] = val body = json.dumps(post_body) uri = '%s/vpn/ikepolicies' % (self.uri_prefix) resp, body = self.post(uri, body) self.expected_success(201, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def update_extra_routes(self, router_id, nexthop, destination): uri = '%s/routers/%s' % (self.uri_prefix, router_id) put_body = { 'router': { 'routes': [{'nexthop': nexthop, "destination": destination}] } } body = json.dumps(put_body) resp, body = self.put(uri, body) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def delete_extra_routes(self, router_id): uri = '%s/routers/%s' % (self.uri_prefix, router_id) null_routes = None put_body = { 'router': { 'routes': null_routes } } body = json.dumps(put_body) resp, body = self.put(uri, body) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def list_lb_pool_stats(self, pool_id): uri = '%s/lb/pools/%s/stats' % (self.uri_prefix, pool_id) resp, body = self.get(uri) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def add_dhcp_agent_to_network(self, agent_id, network_id): post_body = {'network_id': network_id} body = json.dumps(post_body) uri = '%s/agents/%s/dhcp-networks' % (self.uri_prefix, agent_id) resp, body = self.post(uri, body) self.expected_success(201, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def insert_firewall_rule_in_policy(self, firewall_policy_id, firewall_rule_id, insert_after="", insert_before=""): uri = '%s/fw/firewall_policies/%s/insert_rule' % (self.uri_prefix, firewall_policy_id) body = { "firewall_rule_id": firewall_rule_id, "insert_after": insert_after, "insert_before": insert_before } body = json.dumps(body) resp, body = self.put(uri, body) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def remove_firewall_rule_from_policy(self, firewall_policy_id, firewall_rule_id): uri = '%s/fw/firewall_policies/%s/remove_rule' % (self.uri_prefix, firewall_policy_id) update_body = {"firewall_rule_id": firewall_rule_id} update_body = json.dumps(update_body) resp, body = self.put(uri, update_body) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/lib/services/network/json/__init__.py0000664000567000056710000000000012701407726032251 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/README.rst0000664000567000056710000000333212701407726024607 0ustar jenkinsjenkins00000000000000Welcome! ======== This contains the Tempest testing code for the Neutron Load Balancer as a Service (LBaaS) service. The tests currently require Tempest to be installed with a working devstack instance. It is assumed that you also have Neutron with the Neutron LBaaS service installed. Please see ``/neutron-lbaas/devstack/README.md`` for the required devstack configuration settings for Neutron-LBaaS. API and SCENARIO Testing with Tempest: -------------------------------------- Included in the repo are Tempest tests. If you are familiar with the Tempest Testing Framework continue on, otherwise please see the Tempest README : https://github.com/openstack/tempest/blob/master/README.rst 1. Using Devstack ^^^^^^^^^^^^^^^^^ If you have a running devstack environment, tempest will be automatically configured and placed in ``/opt/stack/tempest``. It will have a configuration file, tempest.conf, already set up to work with your devstack installation. Tests can be run in the following way but you need to have devstack running for apiv1 tests :: $> tox -e apiv1 for apiv2 tests :: $> tox -e apiv2 for scenario tests :: $> tox -e scenario 2. Not using Devstack ^^^^^^^^^^^^^^^^^^^^^ 6/19/2015 - As we do not have an external OpenStack environment with Neutron_LBaaS V2 to test with, this is TBD 3. Packages tempest vs. tempest-lib ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ As of 6/19/2015, tests are being migrated to tempest-lib, and while both that library and these tests are in-progress, a specific subset of tempest is also included in this repo at neutron_lbaas/tests/tempest/lib. External Resources: =================== For more information on the Tempest testing framework see: neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v2/0000775000567000056710000000000012701410110023423 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v2/__init__.py0000664000567000056710000000000012701407726025545 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v2/clients/0000775000567000056710000000000012701410110025064 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v2/clients/members_client.py0000664000567000056710000000512512701407726030454 0ustar jenkinsjenkins00000000000000# Copyright 2014, 2016 Rackspace US Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from six.moves.urllib import parse from tempest.lib.common import rest_client class MembersClientJSON(rest_client.RestClient): """ Tests Members API """ def list_members(self, pool_id, params=None): """ List all Members """ url = 'v2.0/lbaas/pools/{0}/members'.format(pool_id) if params: url = "{0}?{1}".format(url, parse.urlencode(params)) resp, body = self.get(url) body = jsonutils.loads(body) self.expected_success(200, resp.status) return rest_client.ResponseBodyList(resp, body['members']) def get_member(self, pool_id, member_id, params=None): url = 'v2.0/lbaas/pools/{0}/members/{1}'.format(pool_id, member_id) if params: url = '{0}?{1}'.format(url, parse.urlencode(params)) resp, body = self.get(url) body = jsonutils.loads(body) self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, body["member"]) def create_member(self, pool_id, **kwargs): url = 'v2.0/lbaas/pools/{0}/members'.format(pool_id) post_body = jsonutils.dumps({"member": kwargs}) resp, body = self.post(url, post_body) body = jsonutils.loads(body) self.expected_success(201, resp.status) return rest_client.ResponseBody(resp, body["member"]) def update_member(self, pool_id, member_id, **kwargs): url = 'v2.0/lbaas/pools/{0}/members/{1}'.format(pool_id, member_id) put_body = jsonutils.dumps({"member": kwargs}) resp, body = self.put(url, put_body) body = jsonutils.loads(body) self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, body["member"]) def delete_member(self, pool_id, member_id, **kwargs): url = 'v2.0/lbaas/pools/{0}/members/{1}'.format(pool_id, member_id) resp, body = self.delete(url) self.expected_success(204, resp.status) neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v2/clients/listeners_client.py0000664000567000056710000000526512701407726031037 0ustar jenkinsjenkins00000000000000# Copyright 2015, 2016 Rackspace US Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from six.moves.urllib import parse from tempest.lib.common import rest_client class ListenersClientJSON(rest_client.RestClient): """ Tests Listeners API """ def list_listeners(self, params=None): """List all listeners.""" url = 'v2.0/lbaas/listeners' if params: url = '{0}?{1}'.format(url, parse.urlencode(params)) resp, body = self.get(url) body = jsonutils.loads(body) self.expected_success(200, resp.status) return rest_client.ResponseBodyList(resp, body['listeners']) def get_listener(self, listener_id, params=None): """Get listener details.""" url = 'v2.0/lbaas/listeners/{0}'.format(listener_id) if params: url = '{0}?{1}'.format(url, parse.urlencode(params)) resp, body = self.get(url) body = jsonutils.loads(body) self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, body['listener']) def create_listener(self, **kwargs): """Create a listener build.""" post_body = jsonutils.dumps({'listener': kwargs}) resp, body = self.post('v2.0/lbaas/listeners', post_body) body = jsonutils.loads(body) self.expected_success(201, resp.status) return rest_client.ResponseBody(resp, body['listener']) def update_listener(self, listener_id, **kwargs): """Update an listener build.""" put_body = jsonutils.dumps({'listener': kwargs}) resp, body = self.put('v2.0/lbaas/listeners/{0}' .format(listener_id), put_body) body = jsonutils.loads(body) self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, body['listener']) def delete_listener(self, listener_id): """Delete an existing listener build.""" resp, body = self.delete("v2.0/lbaas/listeners/{0}" .format(listener_id)) self.expected_success(204, resp.status) return rest_client.ResponseBody(resp, body) neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v2/clients/load_balancers_client.py0000664000567000056710000000733112701407726031754 0ustar jenkinsjenkins00000000000000# Copyright 2014, 2016 Rackspace US Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from six.moves.urllib import parse from tempest.lib.common import rest_client class LoadBalancersClientJSON(rest_client.RestClient): """ Tests Load Balancers API """ def list_load_balancers(self, params=None): """List all load balancers.""" url = 'v2.0/lbaas/loadbalancers' if params: url = '{0}?{1}'.format(url, parse.urlencode(params)) resp, body = self.get(url) body = jsonutils.loads(body) self.expected_success(200, resp.status) return rest_client.ResponseBodyList(resp, body['loadbalancers']) def get_load_balancer(self, load_balancer_id, params=None): """Get load balancer details.""" url = 'v2.0/lbaas/loadbalancers/{0}'.format(load_balancer_id) if params: url = '{0}?{1}'.format(url, parse.urlencode(params)) resp, body = self.get(url) body = jsonutils.loads(body) self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, body['loadbalancer']) def create_load_balancer(self, **kwargs): """Create a load balancer build.""" post_body = jsonutils.dumps({'loadbalancer': kwargs}) resp, body = self.post('v2.0/lbaas/loadbalancers', post_body) body = jsonutils.loads(body) self.expected_success(201, resp.status) return rest_client.ResponseBody(resp, body['loadbalancer']) def update_load_balancer(self, load_balancer_id, **kwargs): """Update a load balancer build.""" put_body = jsonutils.dumps({'loadbalancer': kwargs}) resp, body = self.put('v2.0/lbaas/loadbalancers/{0}' .format(load_balancer_id), put_body) body = jsonutils.loads(body) self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, body['loadbalancer']) def delete_load_balancer(self, load_balancer_id): """Delete an existing load balancer build.""" resp, body = self.delete('v2.0/lbaas/loadbalancers/{0}' .format(load_balancer_id)) self.expected_success(204, resp.status) return rest_client.ResponseBody(resp, body) def get_load_balancer_status_tree(self, load_balancer_id, params=None): """Get a load balancer's status tree.""" url = 'v2.0/lbaas/loadbalancers/{0}/statuses'.format(load_balancer_id) if params: url = '{0}?{1}'.format(url, parse.urlencode(params)) resp, body = self.get(url) body = jsonutils.loads(body) self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, body['statuses']) def get_load_balancer_stats(self, load_balancer_id, params=None): """Get a load balancer's stats.""" url = 'v2.0/lbaas/loadbalancers/{0}/stats'.format(load_balancer_id) if params: url = '{0}?{1}'.format(url, parse.urlencode(params)) resp, body = self.get(url) body = jsonutils.loads(body) self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, body['stats']) neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v2/clients/__init__.py0000664000567000056710000000000012701407726027206 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v2/clients/pools_client.py0000664000567000056710000000510112701407726030150 0ustar jenkinsjenkins00000000000000# Copyright 2015, 2016 Rackspace Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from six.moves.urllib import parse from tempest.lib.common import rest_client class PoolsClientJSON(rest_client.RestClient): """ Test Pools API """ def list_pools(self, params=None): """List all pools""" url = 'v2.0/lbaas/pools' if params: url = '{0}?{1}'.format(url, parse.urlencode(params)) resp, body = self.get(url) body = jsonutils.loads(body) self.expected_success(200, resp.status) return rest_client.ResponseBodyList(resp, body['pools']) def get_pool(self, pool_id, params=None): """List details of a pool""" url = 'v2.0/lbaas/pools/{pool_id}'.format(pool_id=pool_id) if params: url = '{0}?{1}'.format(url, parse.urlencode(params)) resp, body = self.get(url) body = jsonutils.loads(body) self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, body['pool']) def create_pool(self, **kwargs): """Create a pool""" url = 'v2.0/lbaas/pools' post_body = jsonutils.dumps({'pool': kwargs}) resp, body = self.post(url, post_body) body = jsonutils.loads(body) self.expected_success(201, resp.status) return rest_client.ResponseBody(resp, body['pool']) def update_pool(self, pool_id, **kwargs): """Update a pool""" url = 'v2.0/lbaas/pools/{pool_id}'.format(pool_id=pool_id) post_body = jsonutils.dumps({'pool': kwargs}) resp, body = self.put(url, post_body) body = jsonutils.loads(body) self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, body['pool']) def delete_pool(self, pool_id): """Delete Pool""" url = 'v2.0/lbaas/pools/{pool_id}'.format(pool_id=pool_id) resp, body = self.delete(url) self.expected_success(204, resp.status) return rest_client.ResponseBody(resp, body) neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v2/clients/health_monitors_client.py0000664000567000056710000000547112701407726032225 0ustar jenkinsjenkins00000000000000# Copyright 2014, 2016 Rackspace US Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from six.moves.urllib import parse from tempest.lib.common import rest_client class HealthMonitorsClientJSON(rest_client.RestClient): """ Tests Health Monitors API """ def list_health_monitors(self, params=None): """List all health monitors.""" url = 'v2.0/lbaas/healthmonitors' if params: url = "{0}?{1}".format(url, parse.urlencode(params)) resp, body = self.get(url) body = jsonutils.loads(body) self.expected_success(200, resp.status) return rest_client.ResponseBodyList(resp, body['healthmonitors']) def get_health_monitor(self, health_monitor_id, params=None): """Get health monitor details.""" url = 'v2.0/lbaas/healthmonitors/{0}'.format(health_monitor_id) if params: url = '{0}?{1}'.format(url, parse.urlencode(params)) resp, body = self.get(url) body = jsonutils.loads(body) self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, body["healthmonitor"]) def create_health_monitor(self, **kwargs): """Create a health monitor.""" url = 'v2.0/lbaas/healthmonitors' post_body = jsonutils.dumps({"healthmonitor": kwargs}) resp, body = self.post(url, post_body) body = jsonutils.loads(body) self.expected_success(201, resp.status) return rest_client.ResponseBody(resp, body["healthmonitor"]) def update_health_monitor(self, health_monitor_id, **kwargs): """Update a health monitor.""" url = 'v2.0/lbaas/healthmonitors/{0}'.format(health_monitor_id) put_body = jsonutils.dumps({"healthmonitor": kwargs}) resp, body = self.put(url, put_body) body = jsonutils.loads(body) self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, body["healthmonitor"]) def delete_health_monitor(self, health_monitor_id): """Delete an existing health monitor.""" url = 'v2.0/lbaas/healthmonitors/{0}'.format(health_monitor_id) resp, body = self.delete(url) self.expected_success(204, resp.status) return rest_client.ResponseBody(resp, body) neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v2/api/0000775000567000056710000000000012701410110024174 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v2/api/test_members_admin.py0000664000567000056710000000670212701407726030437 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # Copyright 2016 Rackspace Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config from tempest.lib.common.utils import data_utils from tempest.lib import exceptions as ex from tempest import test from neutron_lbaas.tests.tempest.v2.api import base CONF = config.CONF class MemberTestJSON(base.BaseAdminTestCase): """ Test the member creation operation in admin scope in Neutron-LBaaS API using the REST client for members: """ @classmethod def resource_setup(cls): super(MemberTestJSON, cls).resource_setup() if not test.is_extension_enabled("lbaas", "network"): msg = "lbaas extension not enabled." raise cls.skipException(msg) network_name = data_utils.rand_name('network-') cls.network = cls.create_network(network_name) cls.subnet = cls.create_subnet(cls.network) cls.tenant_id = cls.subnet.get('tenant_id') cls.subnet_id = cls.subnet.get('id') cls.load_balancer = cls._create_active_load_balancer( tenant_id=cls.tenant_id, vip_subnet_id=cls.subnet.get('id')) cls.load_balancer_id = cls.load_balancer.get("id") cls._wait_for_load_balancer_status(cls.load_balancer_id) cls.listener = cls._create_listener( loadbalancer_id=cls.load_balancer.get('id'), protocol='HTTP', protocol_port=80) cls.listener_id = cls.listener.get('id') cls.pool = cls._create_pool(protocol='HTTP', tenant_id=cls.tenant_id, lb_algorithm='ROUND_ROBIN', listener_id=cls.listener_id) cls.pool_id = cls.pool.get('id') @classmethod def resource_cleanup(cls): super(MemberTestJSON, cls).resource_cleanup() @test.attr(type='smoke') def test_create_member_invalid_tenant_id(self): """Test create member with invalid tenant_id""" member_opts = {} member_opts['address'] = "127.0.0.1" member_opts['protocol_port'] = 80 member_opts['subnet_id'] = self.subnet_id member_opts['tenant_id'] = "$232!$pw" member = self._create_member(self.pool_id, **member_opts) self.addCleanup(self._delete_member, self.pool_id, member['id']) self.assertEqual(member['subnet_id'], self.subnet_id) self.assertEqual(member['tenant_id'], "$232!$pw") @test.attr(type='negative') def test_create_member_empty_tenant_id(self): """Test create member with an empty tenant_id should fail""" member_opts = {} member_opts['address'] = "127.0.0.1" member_opts['protocol_port'] = 80 member_opts['subnet_id'] = self.subnet_id member_opts['tenant_id'] = "" self.assertRaises(ex.BadRequest, self._create_member, self.pool_id, **member_opts) neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v2/api/test_members_non_admin.py0000664000567000056710000004750012701407726031312 0ustar jenkinsjenkins00000000000000# Copyright 2015, 2016 Rackspace US Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config from tempest.lib.common.utils import data_utils from tempest.lib import exceptions as ex from tempest import test from neutron_lbaas.tests.tempest.v2.api import base CONF = config.CONF class MemberTestJSON(base.BaseTestCase): """ Test the following operations in Neutron-LBaaS API using the REST client for members: list members of a pool create a member of a Pool update a pool member delete a member """ @classmethod def resource_setup(cls): super(MemberTestJSON, cls).resource_setup() if not test.is_extension_enabled("lbaas", "network"): msg = "lbaas extension not enabled." raise cls.skipException(msg) network_name = data_utils.rand_name('network-') cls.network = cls.create_network(network_name) cls.subnet = cls.create_subnet(cls.network) cls.tenant_id = cls.subnet.get('tenant_id') cls.subnet_id = cls.subnet.get('id') cls.load_balancer = cls._create_active_load_balancer( tenant_id=cls.tenant_id, vip_subnet_id=cls.subnet.get('id')) cls.load_balancer_id = cls.load_balancer.get("id") cls.listener = cls._create_listener( loadbalancer_id=cls.load_balancer.get('id'), protocol='HTTP', protocol_port=80) cls.listener_id = cls.listener.get('id') cls.pool = cls._create_pool(protocol='HTTP', tenant_id=cls.tenant_id, lb_algorithm='ROUND_ROBIN', listener_id=cls.listener_id) cls.pool_id = cls.pool.get('id') @classmethod def resource_cleanup(cls): super(MemberTestJSON, cls).resource_cleanup() @test.attr(type='smoke') def test_list_empty_members(self): """Test that pool members are empty.""" members = self.members_client.list_members(self.pool_id) self.assertEmpty(members, msg='Initial pool was supposed to be empty') @test.attr(type='smoke') def test_list_3_members(self): """Test that we can list members. """ member_ips_exp = set([u"127.0.0.0", u"127.0.0.1", u"127.0.0.2"]) for ip in member_ips_exp: member_opts = self.build_member_opts() member_opts["address"] = ip member = self._create_member(self.pool_id, **member_opts) self.addCleanup(self._delete_member, self.pool_id, member['id']) members = self.members_client.list_members(self.pool_id) self.assertEqual(3, len(members)) for member in members: self.assertEqual(member["tenant_id"], self.tenant_id) self.assertEqual(member["protocol_port"], 80) self.assertEqual(member["subnet_id"], self.subnet_id) found_member_ips = set([m["address"] for m in members]) self.assertEqual(found_member_ips, member_ips_exp) @test.attr(type='smoke') def test_add_member(self): """Test that we can add a single member.""" expect_empty_members = self.members_client.list_members(self.pool_id) self.assertEmpty(expect_empty_members) member_opts = self.build_member_opts() member = self._create_member(self.pool_id, **member_opts) member_id = member.get("id") self.addCleanup(self._delete_member, self.pool_id, member_id) self.assertEqual(member_opts["address"], member["address"]) self.assertEqual(self.tenant_id, member["tenant_id"]) self.assertEqual(80, member["protocol_port"]) self.assertEqual(self.subnet_id, member["subnet_id"]) # Should have default values for admin_state_up and weight self.assertTrue(member["admin_state_up"]) self.assertEqual(1, member["weight"]) @test.attr(type='smoke') def test_get_member(self): """Test that we can fetch a member by id.""" member_opts = self.build_member_opts() member_id = self._create_member(self.pool_id, **member_opts)["id"] self.addCleanup(self._delete_member, self.pool_id, member_id) member = self.members_client.get_member(self.pool_id, member_id) self.assertEqual(member_id, member["id"]) self.assertEqual(member_opts["address"], member["address"]) self.assertEqual(member_opts["tenant_id"], member["tenant_id"]) self.assertEqual(member_opts["protocol_port"], member["protocol_port"]) self.assertEqual(member_opts["subnet_id"], member["subnet_id"]) @test.attr(type='smoke') def test_create_member_missing_required_field_tenant_id(self): """Test if a non_admin user can create a member with tenant_id missing """ member_opts = {} member_opts['address'] = "127.0.0.1" member_opts['protocol_port'] = 80 member_opts['subnet_id'] = self.subnet_id member = self._create_member(self.pool_id, **member_opts) self.addCleanup(self._delete_member, self.pool_id, member['id']) @test.attr(type='negative') def test_create_member_missing_required_field_address(self): """Test create a member with missing field address""" member_opts = {} member_opts['protocol_port'] = 80 member_opts['subnet_id'] = self.subnet_id self.assertRaises(ex.BadRequest, self._create_member, self.pool_id, **member_opts) @test.attr(type='negative') def test_create_member_missing_required_field_protocol_port(self): """Test create a member with missing field protocol_port""" member_opts = {} member_opts['address'] = "127.0.0.1" member_opts['subnet_id'] = self.subnet_id self.assertRaises(ex.BadRequest, self._create_member, self.pool_id, **member_opts) @test.attr(type='negative') def test_create_member_missing_required_field_subnet_id(self): """Test create a member with missing field subnet_id """ member_opts = {} member_opts['protocol_port'] = 80 member_opts['address'] = "127.0.0.1" self.assertRaises(ex.BadRequest, self._create_member, self.pool_id, **member_opts) @test.attr(type='negative') def test_raises_BadRequest_when_missing_attrs_during_member_create(self): """Test failure on missing attributes on member create.""" member_opts = {} self.assertRaises(ex.BadRequest, self._create_member, self.pool_id, **member_opts) @test.attr(type='negative') def test_create_member_invalid_tenant_id(self): """Test create member with invalid tenant_id""" member_opts = {} member_opts['address'] = "127.0.0.1" member_opts['protocol_port'] = 80 member_opts['subnet_id'] = self.subnet_id member_opts['tenant_id'] = "$232!$pw" self.assertRaises(ex.BadRequest, self._create_member, self.pool_id, **member_opts) @test.attr(type='negative') def test_create_member_invalid_address(self): """Test create member with invalid address""" member_opts = {} member_opts['address'] = "127$% True | | 6 | | True | True --> False | | 7 | | True | False --> True | | 8 | | True | False --> False | | 9 | | False | True --> True | | 10 | | False | True --> False | | 11 | | False | False --> True | | 12 | | False | False --> False | |-----|------------------|------------------|-------------------------| """ # set up the scenarios scenario_lb_T = ('lb_T', {'lb_flag': True}) scenario_lb_F = ('lb_F', {'lb_flag': False}) scenario_listener_T = ('listener_T', {'listener_flag': True}) scenario_listener_F = ('listener_F', {'listener_flag': False}) scenario_lis_to_flag_T = ('listener_to_flag_T', {'listener_to_flag': True}) scenario_lis_to_flag_F = ('listener_to_flag_F', {'listener_to_flag': False}) # The following command creates 4 unique scenarios scenario_create_member = testscenarios.multiply_scenarios( [scenario_lb_T, scenario_lb_F], [scenario_listener_T, scenario_listener_F]) # The following command creates 8 unique scenarios scenario_update_member = testscenarios.multiply_scenarios( [scenario_lis_to_flag_T, scenario_lis_to_flag_F], scenario_create_member) class CreateListenerAdminStateTests(base_ddt.AdminStateTests): scenarios = scenario_create_member @classmethod def resource_setup(cls): super(CreateListenerAdminStateTests, cls).resource_setup() @classmethod def resource_cleanup(cls): super(CreateListenerAdminStateTests, cls).resource_cleanup() @classmethod def setup_load_balancer(cls, **kwargs): super(CreateListenerAdminStateTests, cls).setup_load_balancer(**kwargs) def test_create_listener_with_lb_and_listener_admin_states_up(self): """Test create a listener. Create a listener with various combinations of values for admin_state_up field of the listener and the load-balancer. """ self.resource_setup_load_balancer(self.lb_flag) self.resource_setup_listener(self.listener_flag) self.check_operating_status() self._delete_listener(self.listener_id) self._delete_load_balancer(self.load_balancer_id) class UpdateListenerAdminStateTests(base_ddt.AdminStateTests): scenarios = scenario_update_member @classmethod def resource_setup(cls): super(UpdateListenerAdminStateTests, cls).resource_setup() @classmethod def resource_cleanup(cls): super(UpdateListenerAdminStateTests, cls).resource_cleanup() @classmethod def setup_load_balancer(cls, **kwargs): super(UpdateListenerAdminStateTests, cls).setup_load_balancer(**kwargs) def test_update_listener_with_listener_admin_state_up(self): """Test updating a listener. Update a listener with various combinations of admin_state_up field of the listener and the load-balancer. """ self.resource_setup_load_balancer(self.lb_flag) self.resource_setup_listener(self.listener_flag) self.check_operating_status() self.listener = (self._update_listener( self.listener_id, name='new_name', admin_state_up=self.listener_to_flag)) self.check_operating_status() self._delete_listener(self.listener_id) self._delete_load_balancer(self.load_balancer_id) neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v2/ddt/test_members_admin_state_up.py0000664000567000056710000001374112701407726032346 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config import testscenarios from neutron_lbaas.tests.tempest.v2.ddt import base_ddt CONF = config.CONF """ Tests the following operations in the Neutron-LBaaS API using the REST client with various combinations of values for the admin_state_up field of lb, listener, pool and member. create member update member """ # set up the scenarios scenario_lb_T = ('lb_T', {'lb_flag': True}) scenario_lb_F = ('lb_F', {'lb_flag': False}) scenario_listener_T = ('listener_T', {'listener_flag': True}) scenario_listener_F = ('listener_F', {'listener_flag': False}) scenario_pool_T = ('pool_T', {'pool_flag': True}) scenario_pool_F = ('pool_F', {'pool_flag': False}) scenario_member_T = ('member_T', {'member_flag': True}) scenario_member_F = ('member_F', {'member_flag': False}) scenario_mem_to_flag_T = ('member_to_flag_T', {'member_to_flag': True}) scenario_mem_to_flag_F = ('member_to_flag_F', {'member_to_flag': False}) # The following command creates 16 unique scenarios scenario_create_member = testscenarios.multiply_scenarios( [scenario_lb_T, scenario_lb_F], [scenario_listener_T, scenario_listener_F], [scenario_pool_T, scenario_pool_F], [scenario_member_T, scenario_member_F]) # The following command creates 32 unique scenarios scenario_update_member = testscenarios.multiply_scenarios( [scenario_mem_to_flag_T, scenario_mem_to_flag_F], scenario_create_member) class CreateMemberAdminStateTests(base_ddt.AdminStateTests): scenarios = scenario_create_member @classmethod def resource_setup(cls): super(CreateMemberAdminStateTests, cls).resource_setup() @classmethod def resource_cleanup(cls): super(CreateMemberAdminStateTests, cls).resource_cleanup() def setUp(self): """Set up load balancer, listener, pool and member.""" super(CreateMemberAdminStateTests, self).setUp() self.resource_setup_load_balancer(self.lb_flag) self.resource_setup_listener(self.listener_flag) self.resource_setup_pool(self.pool_flag) self.resource_setup_member(self.member_flag) def tearDown(self): """Tearing down pools, listener and lb resources""" self._delete_member(self.pool_id, self.member_id) self._delete_pool(self.pool_id) self._delete_listener(self.listener_id) self._delete_load_balancer(self.load_balancer_id) super(CreateMemberAdminStateTests, self).tearDown() @classmethod def resource_setup_load_balancer(cls, admin_state_up_flag): (super(CreateMemberAdminStateTests, cls). resource_setup_load_balancer(admin_state_up_flag)) @classmethod def resource_setup_listener(cls, admin_state_up_flag): (super(CreateMemberAdminStateTests, cls). resource_setup_listener(admin_state_up_flag)) @classmethod def resource_setup_pool(cls, admin_state_up_flag): (super(CreateMemberAdminStateTests, cls). resource_setup_pool(admin_state_up_flag)) @classmethod def resource_setup_member(cls, admin_state_up_flag): (super(CreateMemberAdminStateTests, cls). resource_setup_member(admin_state_up_flag)) def test_create_member_with_admin_state_up(self): """Test create a member. """ self.check_operating_status() class UpdateMemberAdminStateTests(base_ddt.AdminStateTests): scenarios = scenario_update_member @classmethod def resource_setup(cls): super(UpdateMemberAdminStateTests, cls).resource_setup() @classmethod def resource_cleanup(cls): super(UpdateMemberAdminStateTests, cls).resource_cleanup() def setUp(self): """Set up load balancer, listener, pool and member resources.""" super(UpdateMemberAdminStateTests, self).setUp() self.resource_setup_load_balancer(self.lb_flag) self.resource_setup_listener(self.listener_flag) self.resource_setup_pool(self.pool_flag) self.resource_setup_member(self.member_flag) def tearDown(self): """Tearing down member, pool, listener and lb resources.""" self._delete_member(self.pool_id, self.member_id) self._delete_pool(self.pool_id) self._delete_listener(self.listener_id) self._delete_load_balancer(self.load_balancer_id) super(UpdateMemberAdminStateTests, self).tearDown() @classmethod def resource_setup_load_balancer(cls, admin_state_up_flag): (super(UpdateMemberAdminStateTests, cls). resource_setup_load_balancer(admin_state_up_flag)) @classmethod def resource_setup_listener(cls, admin_state_up_flag): (super(UpdateMemberAdminStateTests, cls). resource_setup_listener(admin_state_up_flag)) @classmethod def resource_setup_pool(cls, admin_state_up_flag): (super(UpdateMemberAdminStateTests, cls). resource_setup_pool(admin_state_up_flag)) @classmethod def resource_setup_member(cls, admin_state_up_flag): (super(UpdateMemberAdminStateTests, cls). resource_setup_member(admin_state_up_flag)) def test_update_member_with_admin_state_up(self): """Test update a member. """ self.create_member_kwargs = {'admin_state_up': self.member_to_flag} self.member = self._update_member(self.pool_id, self.member_id, **self.create_member_kwargs) self.check_operating_status() neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v2/scenario/0000775000567000056710000000000012701410110025226 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v2/scenario/test_healthmonitor_basic.py0000664000567000056710000000352412701407726032704 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import test from neutron_lbaas.tests.tempest.v2.scenario import base class TestHealthMonitorBasic(base.BaseTestCase): @test.services('compute', 'network') def test_health_monitor_basic(self): """This test checks load balancing with health monitor. The following is the scenario outline: 1. Create two instances. 2. SSH to the instances and start two servers: primary and secondary. 3. Create a load balancer, with two members and with ROUND_ROBIN algorithm, associate the VIP with a floating ip. 4. Create a health monitor. 5. Send NUM requests to the floating ip and check that they are shared between the two servers. 6. Disable the primary server and validate the traffic is being sent only to the secondary server. """ self._create_servers() self._start_servers() self._create_load_balancer() self._create_health_monitor() self._check_load_balancing() # stopping the primary server self._stop_server() # Asserting the traffic is sent only to the secondary server self._traffic_validation_after_stopping_server() neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v2/scenario/test_session_persistence.py0000664000567000056710000000462612701407726032761 0ustar jenkinsjenkins00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import test from neutron_lbaas.tests.tempest.v2.scenario import base class TestSessionPersistence(base.BaseTestCase): @test.services('compute', 'network') def test_session_persistence(self): """This test checks checks load balancing with session persistence. The following is the scenario outline: 1. Boot two instances. 2. SSH to the instance and start two servers. 3. Create a pool with SOURCE_IP session persistence type. 4. Create a load balancer with two members and with ROUND_ROBIN algorithm. 5. Send 10 requests to the floating ip, associated with the VIP, and make sure all the requests from the same ip are processed by the same member of the pool. 6. Change session persistence type of the pool to HTTP_COOKIE. 7. Check that this session persistence type also forces all the requests containing the same cookie to hit the same member of the pool. 8. Change session persistence type of the pool to APP_COOKIE. 9. Perform the same check. 10. Turn session persistence off and check that the requests are again distributed according to the ROUND_ROBIN algorithm. """ self._create_server('server1') self._start_servers() self._create_load_balancer(persistence_type="SOURCE_IP") self._check_source_ip_persistence() self._update_pool_session_persistence("HTTP_COOKIE") self._check_cookie_session_persistence() self._update_pool_session_persistence("APP_COOKIE", cookie_name="JSESSIONID") self._check_cookie_session_persistence() self._update_pool_session_persistence() self._check_load_balancing() neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v2/scenario/__init__.py0000664000567000056710000000000012701407726027350 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v2/scenario/base.py0000664000567000056710000006474312701407727026554 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # Copyright 2016 Rackspace Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import cookielib import shlex import socket import subprocess import tempfile import time from oslo_log import log as logging import six from six.moves.urllib import error from six.moves.urllib import request as urllib2 from tempest.common import waiters from tempest import config from tempest import exceptions from tempest.lib import exceptions as lib_exc from tempest.scenario import manager from tempest.services.network import resources as net_resources from tempest import test from neutron_lbaas._i18n import _ from neutron_lbaas.tests.tempest.v2.clients import health_monitors_client from neutron_lbaas.tests.tempest.v2.clients import listeners_client from neutron_lbaas.tests.tempest.v2.clients import load_balancers_client from neutron_lbaas.tests.tempest.v2.clients import members_client from neutron_lbaas.tests.tempest.v2.clients import pools_client config = config.CONF LOG = logging.getLogger(__name__) def _setup_config_args(auth_provider): """Set up ServiceClient arguments using config settings. """ service = config.network.catalog_type region = config.network.region or config.identity.region endpoint_type = config.network.endpoint_type build_interval = config.network.build_interval build_timeout = config.network.build_timeout # The disable_ssl appears in identity disable_ssl_certificate_validation = ( config.identity.disable_ssl_certificate_validation) ca_certs = None # Trace in debug section trace_requests = config.debug.trace_requests return [auth_provider, service, region, endpoint_type, build_interval, build_timeout, disable_ssl_certificate_validation, ca_certs, trace_requests] class BaseTestCase(manager.NetworkScenarioTest): def setUp(self): super(BaseTestCase, self).setUp() self.servers_keypairs = {} self.servers = {} self.members = [] self.floating_ips = {} self.servers_floating_ips = {} self.server_ips = {} self.port1 = 80 self.port2 = 88 self.num = 50 self.server_fixed_ips = {} self._create_security_group_for_test() self._set_net_and_subnet() mgr = self.get_client_manager() auth_provider = mgr.auth_provider self.client_args = _setup_config_args(auth_provider) self.load_balancers_client = ( load_balancers_client.LoadBalancersClientJSON(*self.client_args)) self.listeners_client = ( listeners_client.ListenersClientJSON(*self.client_args)) self.pools_client = pools_client.PoolsClientJSON(*self.client_args) self.members_client = members_client.MembersClientJSON( *self.client_args) self.health_monitors_client = ( health_monitors_client.HealthMonitorsClientJSON( *self.client_args)) @classmethod def skip_checks(cls): super(BaseTestCase, cls).skip_checks() cfg = config.network if not test.is_extension_enabled('lbaasv2', 'network'): msg = 'LBaaS Extension is not enabled' raise cls.skipException(msg) if not (cfg.tenant_networks_reachable or cfg.public_network_id): msg = ('Either tenant_networks_reachable must be "true", or ' 'public_network_id must be defined.') raise cls.skipException(msg) def _set_net_and_subnet(self): """ Query and set appropriate network and subnet attributes to be used for the test. Existing tenant networks are used if they are found. The configured private network and associated subnet is used as a fallback in absence of tenant networking. """ try: tenant_net = self._list_networks(tenant_id=self.tenant_id)[0] except IndexError: tenant_net = None if tenant_net: tenant_subnet = self._list_subnets(tenant_id=self.tenant_id)[0] self.subnet = net_resources.DeletableSubnet( client=self.network_client, **tenant_subnet) self.network = tenant_net else: self.network = self._get_network_by_name( config.compute.fixed_network_name) # We are assuming that the first subnet associated # with the fixed network is the one we want. In the future, we # should instead pull a subnet id from config, which is set by # devstack/admin/etc. subnet = self._list_subnets(network_id=self.network['id'])[0] self.subnet = net_resources.AttributeDict(subnet) def _create_security_group_for_test(self): self.security_group = self._create_security_group( tenant_id=self.tenant_id) self._create_security_group_rules_for_port(self.port1) self._create_security_group_rules_for_port(self.port2) def _create_security_group_rules_for_port(self, port): rule = { 'direction': 'ingress', 'protocol': 'tcp', 'port_range_min': port, 'port_range_max': port, } self._create_security_group_rule( secgroup=self.security_group, tenant_id=self.tenant_id, **rule) def _ipv6_subnet(self, address6_mode): router = self._get_router(tenant_id=self.tenant_id) self.network = self._create_network(tenant_id=self.tenant_id) self.subnet = self._create_subnet(network=self.network, namestart='sub6', ip_version=6, ipv6_ra_mode=address6_mode, ipv6_address_mode=address6_mode) self.subnet.add_to_router(router_id=router['id']) self.addCleanup(self.subnet.delete) def _create_server(self, name): keypair = self.create_keypair() security_groups = [{'name': self.security_group['name']}] create_kwargs = { 'networks': [ {'uuid': self.network['id']}, ], 'key_name': keypair['name'], 'security_groups': security_groups, 'name': name } net_name = self.network['name'] server = self.create_server(**create_kwargs) waiters.wait_for_server_status(self.servers_client, server['id'], 'ACTIVE') server = self.servers_client.show_server(server['id']) server = server['server'] self.servers_keypairs[server['id']] = keypair if (config.network.public_network_id and not config.network.tenant_networks_reachable): public_network_id = config.network.public_network_id floating_ip = self.create_floating_ip( server, public_network_id) self.floating_ips[floating_ip] = server self.server_ips[server['id']] = floating_ip.floating_ip_address else: self.server_ips[server['id']] =\ server['addresses'][net_name][0]['addr'] self.server_fixed_ips[server['id']] =\ server['addresses'][net_name][0]['addr'] self.assertTrue(self.servers_keypairs) return server def _create_servers(self): for count in range(2): self.server = self._create_server(name=("server%s" % (count + 1))) if count == 0: self.servers['primary'] = self.server['id'] else: self.servers['secondary'] = self.server['id'] self.assertEqual(len(self.servers_keypairs), 2) def _stop_server(self): for name, value in six.iteritems(self.servers): if name == 'primary': self.servers_client.stop_server(value) waiters.wait_for_server_status(self.servers_client, value, 'SHUTOFF') def _start_server(self): for name, value in six.iteritems(self.servers): if name == 'primary': self.servers_client.start(value) waiters.wait_for_server_status(self.servers_client, value, 'ACTIVE') def _start_servers(self): """ Start two backends 1. SSH to the instance 2. Start two http backends listening on ports 80 and 88 respectively """ for server_id, ip in six.iteritems(self.server_ips): private_key = self.servers_keypairs[server_id]['private_key'] server = self.servers_client.show_server(server_id)['server'] server_name = server['name'] username = config.validation.image_ssh_user ssh_client = self.get_remote_client( ip_address=ip, private_key=private_key) # Write a backend's response into a file resp = ('echo -ne "HTTP/1.1 200 OK\r\nContent-Length: 7\r\n' 'Set-Cookie:JSESSIONID=%(s_id)s\r\nConnection: close\r\n' 'Content-Type: text/html; ' 'charset=UTF-8\r\n\r\n%(server)s"; cat >/dev/null') with tempfile.NamedTemporaryFile() as script: script.write(resp % {'s_id': server_name[-1], 'server': server_name}) script.flush() with tempfile.NamedTemporaryFile() as key: key.write(private_key) key.flush() self.copy_file_to_host(script.name, "/tmp/script1", ip, username, key.name) # Start netcat start_server = ('while true; do ' 'sudo nc -ll -p %(port)s -e sh /tmp/%(script)s; ' 'done > /dev/null &') cmd = start_server % {'port': self.port1, 'script': 'script1'} ssh_client.exec_command(cmd) if len(self.server_ips) == 1: with tempfile.NamedTemporaryFile() as script: script.write(resp % {'s_id': 2, 'server': 'server2'}) script.flush() with tempfile.NamedTemporaryFile() as key: key.write(private_key) key.flush() self.copy_file_to_host(script.name, "/tmp/script2", ip, username, key.name) cmd = start_server % {'port': self.port2, 'script': 'script2'} ssh_client.exec_command(cmd) def _create_listener(self, load_balancer_id): """Create a listener with HTTP protocol listening on port 80.""" self.listener = self.listeners_client.create_listener( loadbalancer_id=load_balancer_id, protocol='HTTP', protocol_port=80) self.assertTrue(self.listener) self.addCleanup(self._cleanup_listener, self.listener.get('id'), load_balancer_id=load_balancer_id) return self.listener def _create_health_monitor(self): """Create a pool with ROUND_ROBIN algorithm.""" self.hm = self.health_monitors_client.create_health_monitor( type='HTTP', max_retries=5, delay=3, timeout=5, pool_id=self.pool['id']) self.assertTrue(self.hm) self.addCleanup(self._cleanup_health_monitor, self.hm.get('id'), load_balancer_id=self.load_balancer['id']) def _create_pool(self, listener_id, persistence_type=None, cookie_name=None): """Create a pool with ROUND_ROBIN algorithm.""" pool = { "listener_id": listener_id, "lb_algorithm": "ROUND_ROBIN", "protocol": "HTTP" } if persistence_type: pool.update({'session_persistence': {'type': persistence_type}}) if cookie_name: pool.update({'session_persistence': {"cookie_name": cookie_name}}) self.pool = self.pools_client.create_pool(**pool) self.assertTrue(self.pool) self.addCleanup(self._cleanup_pool, self.pool['id'], load_balancer_id=self.load_balancer['id']) return self.pool def _cleanup_load_balancer(self, load_balancer_id): self.delete_wrapper(self.load_balancers_client.delete_load_balancer, load_balancer_id) self._wait_for_load_balancer_status(load_balancer_id, delete=True) def _cleanup_listener(self, listener_id, load_balancer_id=None): self.delete_wrapper(self.listeners_client.delete_listener, listener_id) if load_balancer_id: self._wait_for_load_balancer_status(load_balancer_id) def _cleanup_pool(self, pool_id, load_balancer_id=None): self.delete_wrapper(self.pools_client.delete_pool, pool_id) if load_balancer_id: self._wait_for_load_balancer_status(load_balancer_id) def _cleanup_health_monitor(self, hm_id, load_balancer_id=None): self.delete_wrapper(self.health_monitors_client.delete_health_monitor, hm_id) if load_balancer_id: self._wait_for_load_balancer_status(load_balancer_id) def _create_members(self, load_balancer_id=None, pool_id=None, subnet_id=None): """ Create two members. In case there is only one server, create both members with the same ip but with different ports to listen on. """ for server_id, ip in six.iteritems(self.server_fixed_ips): if len(self.server_fixed_ips) == 1: member1 = self.members_client.create_member( pool_id=pool_id, address=ip, protocol_port=self.port1, subnet_id=subnet_id) self._wait_for_load_balancer_status(load_balancer_id) member2 = self.members_client.create_member( pool_id=pool_id, address=ip, protocol_port=self.port2, subnet_id=subnet_id) self._wait_for_load_balancer_status(load_balancer_id) self.members.extend([member1, member2]) else: member = self.members_client.create_member( pool_id=pool_id, address=ip, protocol_port=self.port1, subnet_id=subnet_id) self._wait_for_load_balancer_status(load_balancer_id) self.members.append(member) self.assertTrue(self.members) def _assign_floating_ip_to_lb_vip(self, lb): public_network_id = config.network.public_network_id port_id = lb.vip_port_id floating_ip = self.create_floating_ip(lb, public_network_id, port_id=port_id) self.floating_ips.setdefault(lb.id, []) self.floating_ips[lb.id].append(floating_ip) # Check for floating ip status before you check load-balancer self.check_floating_ip_status(floating_ip, "ACTIVE") def _create_load_balancer(self, ip_version=4, persistence_type=None): self.create_lb_kwargs = {'tenant_id': self.tenant_id, 'vip_subnet_id': self.subnet['id']} self.load_balancer = self.load_balancers_client.create_load_balancer( **self.create_lb_kwargs) load_balancer_id = self.load_balancer['id'] self.addCleanup(self._cleanup_load_balancer, load_balancer_id) self._wait_for_load_balancer_status(load_balancer_id) listener = self._create_listener(load_balancer_id=load_balancer_id) self._wait_for_load_balancer_status(load_balancer_id) self.pool = self._create_pool(listener_id=listener.get('id'), persistence_type=persistence_type) self._wait_for_load_balancer_status(load_balancer_id) self._create_members(load_balancer_id=load_balancer_id, pool_id=self.pool['id'], subnet_id=self.subnet['id']) self.vip_ip = self.load_balancer.get('vip_address') # if the ipv4 is used for lb, then fetch the right values from # tempest.conf file if ip_version == 4: if (config.network.public_network_id and not config.network.tenant_networks_reachable): load_balancer = net_resources.AttributeDict(self.load_balancer) self._assign_floating_ip_to_lb_vip(load_balancer) self.vip_ip = self.floating_ips[ load_balancer.id][0]['floating_ip_address'] # Currently the ovs-agent is not enforcing security groups on the # vip port - see https://bugs.launchpad.net/neutron/+bug/1163569 # However the linuxbridge-agent does, and it is necessary to add a # security group with a rule that allows tcp port 80 to the vip port. # self.network_client.update_port( self.ports_client.update_port( self.load_balancer.get('vip_port_id'), security_groups=[self.security_group.id]) def _wait_for_load_balancer_status(self, load_balancer_id, provisioning_status='ACTIVE', operating_status='ONLINE', delete=False): interval_time = 1 timeout = 600 end_time = time.time() + timeout while time.time() < end_time: try: lb = self.load_balancers_client.get_load_balancer( load_balancer_id) except lib_exc.NotFound as e: if delete: return else: raise e if (lb.get('provisioning_status') == provisioning_status and lb.get('operating_status') == operating_status): break elif (lb.get('provisioning_status') == 'ERROR' or lb.get('operating_status') == 'ERROR'): raise Exception( _("Wait for load balancer for load balancer: {lb_id} " "ran for {timeout} seconds and an ERROR was encountered " "with provisioning status: {provisioning_status} and " "operating status: {operating_status}").format( timeout=timeout, lb_id=lb.get('id'), provisioning_status=provisioning_status, operating_status=operating_status)) time.sleep(interval_time) else: raise Exception( _("Wait for load balancer ran for {timeout} seconds and did " "not observe {lb_id} reach {provisioning_status} " "provisioning status and {operating_status} " "operating status.").format( timeout=timeout, lb_id=lb.get('id'), provisioning_status=provisioning_status, operating_status=operating_status)) return lb def _wait_for_pool_session_persistence(self, pool_id, sp_type=None): interval_time = 1 timeout = 10 end_time = time.time() + timeout while time.time() < end_time: pool = self.pools_client.get_pool(pool_id) sp = pool.get('session_persistence', None) if (not (sp_type or sp) or pool['session_persistence']['type'] == sp_type): return pool time.sleep(interval_time) raise Exception( _("Wait for pool ran for {timeout} seconds and did " "not observe {pool_id} update session persistence type " "to {type}.").format( timeout=timeout, pool_id=pool_id, type=sp_type)) def _check_load_balancing(self): """ 1. Send NUM requests on the floating ip associated with the VIP 2. Check that the requests are shared between the two servers """ self._check_connection(self.vip_ip) counters = self._send_requests(self.vip_ip, ["server1", "server2"]) for member, counter in six.iteritems(counters): self.assertGreater(counter, 0, 'Member %s never balanced' % member) def _check_connection(self, check_ip, port=80): def try_connect(check_ip, port): try: resp = urllib2.urlopen("http://{0}:{1}/".format(check_ip, port)) if resp.getcode() == 200: return True return False except IOError: return False except error.HTTPError: return False timeout = config.validation .ping_timeout start = time.time() while not try_connect(check_ip, port): if (time.time() - start) > timeout: message = "Timed out trying to connect to %s" % check_ip raise exceptions.TimeoutException(message) def _send_requests(self, vip_ip, servers): counters = dict.fromkeys(servers, 0) for i in range(self.num): try: server = urllib2.urlopen("http://{0}/".format(vip_ip), None, 2).read() counters[server] += 1 # HTTP exception means fail of server, so don't increase counter # of success and continue connection tries except (error.HTTPError, error.URLError, socket.timeout): continue return counters def _traffic_validation_after_stopping_server(self): """Check that the requests are sent to the only ACTIVE server.""" counters = self._send_requests(self.vip_ip, ["server1", "server2"]) # Assert that no traffic is sent to server1. for member, counter in six.iteritems(counters): if member == 'server1': self.assertEqual(counter, 0, 'Member %s is not balanced' % member) def _check_load_balancing_after_deleting_resources(self): """ Check that the requests are not sent to any servers Assert that no traffic is sent to any servers """ counters = self._send_requests(self.vip_ip, ["server1", "server2"]) for member, counter in six.iteritems(counters): self.assertEqual(counter, 0, 'Member %s is balanced' % member) def _check_source_ip_persistence(self): """Check source ip session persistence. Verify that all requests from our ip are answered by the same server that handled it the first time. """ # Check that backends are reachable self._check_connection(self.vip_ip) resp = [] for count in range(10): resp.append( urllib2.urlopen("http://{0}/".format(self.vip_ip)).read()) self.assertEqual(len(set(resp)), 1) def _update_pool_session_persistence(self, persistence_type=None, cookie_name=None): """Update a pool with new session persistence type and cookie name.""" update_data = {} if persistence_type: update_data = {"session_persistence": { "type": persistence_type}} if cookie_name: update_data['session_persistence'].update( {"cookie_name": cookie_name}) self.pools_client.update_pool(self.pool['id'], **update_data) self.pool = self._wait_for_pool_session_persistence(self.pool['id'], persistence_type) self._wait_for_load_balancer_status(self.load_balancer['id']) if persistence_type: self.assertEqual(persistence_type, self.pool['session_persistence']['type']) if cookie_name: self.assertEqual(cookie_name, self.pool['session_persistence']['cookie_name']) def _check_cookie_session_persistence(self): """Check cookie persistence types by injecting cookies in requests.""" # Send first request and get cookie from the server's response cj = cookielib.CookieJar() opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)) opener.open("http://{0}/".format(self.vip_ip)) resp = [] # Send 10 subsequent requests with the cookie inserted in the headers. for count in range(10): request = urllib2.Request("http://{0}/".format(self.vip_ip)) cj.add_cookie_header(request) response = urllib2.urlopen(request) resp.append(response.read()) self.assertEqual(len(set(resp)), 1, message=resp) def copy_file_to_host(self, file_from, dest, host, username, pkey): dest = "%s@%s:%s" % (username, host, dest) cmd = ("scp -v -o UserKnownHostsFile=/dev/null " "-o StrictHostKeyChecking=no " "-i %(pkey)s %(file1)s %(dest)s" % {'pkey': pkey, 'file1': file_from, 'dest': dest}) args = shlex.split(cmd.encode('utf-8')) subprocess_args = {'stdout': subprocess.PIPE, 'stderr': subprocess.STDOUT} proc = subprocess.Popen(args, **subprocess_args) stdout, stderr = proc.communicate() if proc.returncode != 0: LOG.error(("Command {0} returned with exit status {1}," "output {2}, error {3}").format(cmd, proc.returncode, stdout, stderr)) return stdout neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v2/scenario/test_listener_basic.py0000664000567000056710000000351612701407726031655 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import test from neutron_lbaas.tests.tempest.v2.scenario import base class TestListenerBasic(base.BaseTestCase): """ This test checks load balancing and validates traffic The following is the scenario outline: 1. Create an instance 2. SSH to the instance and start two servers: primary, secondary 3. Create a load balancer, listener and pool with two members using ROUND_ROBIN algorithm, associate the VIP with a floating ip 4. Send NUM requests to the floating ip and check that they are shared between the two servers. 5. Delete listener and validate the traffic is not sent to any members """ def _delete_listener(self): """Delete a listener to test listener scenario.""" self._cleanup_pool(self.pool['id'], self.load_balancer['id']) self._cleanup_listener(self.listener['id'], self.load_balancer['id']) @test.services('compute', 'network') def test_listener_basic(self): self._create_server('server1') self._start_servers() self._create_load_balancer() self._check_load_balancing() self._delete_listener() self._check_load_balancing_after_deleting_resources() neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v2/scenario/test_load_balancer_basic.py0000664000567000056710000000264112701407726032574 0ustar jenkinsjenkins00000000000000# Copyright 2015 Rackspace Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import test from neutron_lbaas.tests.tempest.v2.scenario import base class TestLoadBalancerBasic(base.BaseTestCase): @test.services('compute', 'network') def test_load_balancer_basic(self): """This test checks basic load balancing. The following is the scenario outline: 1. Create an instance. 2. SSH to the instance and start two servers. 3. Create a load balancer with two members and with ROUND_ROBIN algorithm. 4. Associate the VIP with a floating ip. 5. Send NUM requests to the floating ip and check that they are shared between the two servers. """ self._create_server('server1') self._start_servers() self._create_load_balancer() self._check_load_balancing() neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v1/0000775000567000056710000000000012701410110023422 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v1/__init__.py0000664000567000056710000000000012701407726025544 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v1/api/0000775000567000056710000000000012701410110024173 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v1/api/clients.py0000664000567000056710000000563412701407726026241 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # Copyright 2016 Rackspace Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.common import cred_provider from tempest import config from tempest import manager from tempest.services.identity.v2.json.tenants_client import \ TenantsClient from neutron_lbaas.tests.tempest.lib.services.network.json.network_client import \ NetworkClientJSON CONF = config.CONF class Manager(manager.Manager): """ Top level manager for OpenStack tempest clients """ default_params = { 'disable_ssl_certificate_validation': CONF.identity.disable_ssl_certificate_validation, 'ca_certs': CONF.identity.ca_certificates_file, 'trace_requests': CONF.debug.trace_requests } # NOTE: Tempest uses timeout values of compute API if project specific # timeout values don't exist. default_params_with_timeout_values = { 'build_interval': CONF.compute.build_interval, 'build_timeout': CONF.compute.build_timeout } default_params_with_timeout_values.update(default_params) def __init__(self, credentials=None, service=None): super(Manager, self).__init__(credentials=credentials) self._set_identity_clients() self.network_client = NetworkClientJSON( self.auth_provider, CONF.network.catalog_type, CONF.network.region or CONF.identity.region, endpoint_type=CONF.network.endpoint_type, build_interval=CONF.network.build_interval, build_timeout=CONF.network.build_timeout, **self.default_params) def _set_identity_clients(self): params = { 'service': CONF.identity.catalog_type, 'region': CONF.identity.region, } params.update(self.default_params_with_timeout_values) params_v2_admin = params.copy() params_v2_admin['endpoint_type'] = CONF.identity.v2_admin_endpoint_type self.tenants_client = TenantsClient( self.auth_provider, **params_v2_admin) class AdminManager(Manager): """ Manager object that uses the admin credentials for its managed client objects """ def __init__(self, service=None): super(AdminManager, self).__init__( credentials=cred_provider.get_configured_credentials( 'identity_admin'), service=service) neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v1/api/__init__.py0000664000567000056710000000000012701407726026315 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v1/api/test_load_balancer.py0000664000567000056710000005063012701407727030402 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2016 Rackspace Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import test from tempest.lib.common.utils import data_utils from tempest.lib import decorators from neutron_lbaas.tests.tempest.v1.api import base class LoadBalancerTestJSON(base.BaseNetworkTest): """ Tests the following operations in the Neutron API using the REST client for Neutron: create vIP, and Pool show vIP list vIP update vIP delete vIP update pool delete pool show pool list pool health monitoring operations """ @classmethod def resource_setup(cls): super(LoadBalancerTestJSON, cls).resource_setup() if not test.is_extension_enabled('lbaas', 'network'): msg = "lbaas extension not enabled." raise cls.skipException(msg) cls.network = cls.create_network() cls.name = cls.network['name'] cls.subnet = cls.create_subnet(cls.network) pool_name = data_utils.rand_name('pool-') vip_name = data_utils.rand_name('vip-') cls.pool = cls.create_pool(pool_name, "ROUND_ROBIN", "HTTP", cls.subnet) cls.vip = cls.create_vip(name=vip_name, protocol="HTTP", protocol_port=80, subnet=cls.subnet, pool=cls.pool) cls.member = cls.create_member(80, cls.pool, cls._ip_version) cls.member_address = ("10.0.9.47" if cls._ip_version == 4 else "2015::beef") cls.health_monitor = cls.create_health_monitor(delay=4, max_retries=3, Type="TCP", timeout=1) def _check_list_with_filter(self, obj_name, attr_exceptions, **kwargs): create_obj = getattr(self.client, 'create_' + obj_name) delete_obj = getattr(self.client, 'delete_' + obj_name) list_objs = getattr(self.client, 'list_' + obj_name + 's') body = create_obj(**kwargs) obj = body[obj_name] self.addCleanup(delete_obj, obj['id']) for key, value in obj.iteritems(): # It is not relevant to filter by all arguments. That is why # there is a list of attr to except if key not in attr_exceptions: body = list_objs(**{key: value}) objs = [v[key] for v in body[obj_name + 's']] self.assertIn(value, objs) @test.attr(type='smoke') @decorators.idempotent_id('c96dbfab-4a80-4e74-a535-e950b5bedd47') def test_list_vips(self): # Verify the vIP exists in the list of all vIPs body = self.client.list_vips() vips = body['vips'] self.assertIn(self.vip['id'], [v['id'] for v in vips]) @test.attr(type='smoke') @decorators.idempotent_id('b8853f65-5089-4e69-befd-041a143427ff') def test_list_vips_with_filter(self): name = data_utils.rand_name('vip-') body = self.client.create_pool(name=data_utils.rand_name("pool-"), lb_method="ROUND_ROBIN", protocol="HTTPS", subnet_id=self.subnet['id']) pool = body['pool'] self.addCleanup(self.client.delete_pool, pool['id']) attr_exceptions = ['status', 'session_persistence', 'status_description'] self._check_list_with_filter( 'vip', attr_exceptions, name=name, protocol="HTTPS", protocol_port=81, subnet_id=self.subnet['id'], pool_id=pool['id'], description=data_utils.rand_name('description-'), admin_state_up=False) @test.attr(type='smoke') @decorators.idempotent_id('27f56083-9af9-4a48-abe9-ca1bcc6c9035') def test_create_update_delete_pool_vip(self): # Creates a vip name = data_utils.rand_name('vip-') address = self.subnet['allocation_pools'][0]['end'] body = self.client.create_pool( name=data_utils.rand_name("pool-"), lb_method='ROUND_ROBIN', protocol='HTTP', subnet_id=self.subnet['id']) pool = body['pool'] body = self.client.create_vip(name=name, protocol="HTTP", protocol_port=80, subnet_id=self.subnet['id'], pool_id=pool['id'], address=address) vip = body['vip'] vip_id = vip['id'] # Confirm VIP's address correctness with a show body = self.client.show_vip(vip_id) vip = body['vip'] self.assertEqual(address, vip['address']) # Verification of vip update new_name = "New_vip" new_description = "New description" persistence_type = "HTTP_COOKIE" update_data = {"session_persistence": { "type": persistence_type}} body = self.client.update_vip(vip_id, name=new_name, description=new_description, connection_limit=10, admin_state_up=False, **update_data) updated_vip = body['vip'] self.assertEqual(new_name, updated_vip['name']) self.assertEqual(new_description, updated_vip['description']) self.assertEqual(10, updated_vip['connection_limit']) self.assertFalse(updated_vip['admin_state_up']) self.assertEqual(persistence_type, updated_vip['session_persistence']['type']) self.client.delete_vip(vip['id']) self.client.wait_for_resource_deletion('vip', vip['id']) # Verification of pool update new_name = "New_pool" body = self.client.update_pool(pool['id'], name=new_name, description="new_description", lb_method='LEAST_CONNECTIONS') updated_pool = body['pool'] self.assertEqual(new_name, updated_pool['name']) self.assertEqual('new_description', updated_pool['description']) self.assertEqual('LEAST_CONNECTIONS', updated_pool['lb_method']) self.client.delete_pool(pool['id']) @test.attr(type='smoke') @decorators.idempotent_id('0435a95e-1d19-4d90-9e9f-3b979e9ad089') def test_show_vip(self): # Verifies the details of a vip body = self.client.show_vip(self.vip['id']) vip = body['vip'] for key, value in vip.iteritems(): # 'status' should not be confirmed in api tests if key != 'status': self.assertEqual(self.vip[key], value) @test.attr(type='smoke') @decorators.idempotent_id('6e7a7d31-8451-456d-b24a-e50479ce42a7') def test_show_pool(self): # Here we need to new pool without any dependence with vips body = self.client.create_pool(name=data_utils.rand_name("pool-"), lb_method='ROUND_ROBIN', protocol='HTTP', subnet_id=self.subnet['id']) pool = body['pool'] self.addCleanup(self.client.delete_pool, pool['id']) # Verifies the details of a pool body = self.client.show_pool(pool['id']) shown_pool = body['pool'] for key, value in pool.iteritems(): # 'status' should not be confirmed in api tests if key != 'status': self.assertEqual(value, shown_pool[key]) @test.attr(type='smoke') @decorators.idempotent_id('d1ab1ffa-e06a-487f-911f-56418cb27727') def test_list_pools(self): # Verify the pool exists in the list of all pools body = self.client.list_pools() pools = body['pools'] self.assertIn(self.pool['id'], [p['id'] for p in pools]) @test.attr(type='smoke') @decorators.idempotent_id('27cc4c1a-caac-4273-b983-2acb4afaad4f') def test_list_pools_with_filters(self): attr_exceptions = ['status', 'vip_id', 'members', 'provider', 'status_description'] self._check_list_with_filter( 'pool', attr_exceptions, name=data_utils.rand_name("pool-"), lb_method="ROUND_ROBIN", protocol="HTTPS", subnet_id=self.subnet['id'], description=data_utils.rand_name('description-'), admin_state_up=False) @test.attr(type='smoke') @decorators.idempotent_id('282d0dfd-5c3a-4c9b-b39c-c99782f39193') def test_list_members(self): # Verify the member exists in the list of all members body = self.client.list_members() members = body['members'] self.assertIn(self.member['id'], [m['id'] for m in members]) @test.attr(type='smoke') @decorators.idempotent_id('243b5126-24c6-4879-953e-7c7e32d8a57f') def test_list_members_with_filters(self): attr_exceptions = ['status', 'status_description'] self._check_list_with_filter('member', attr_exceptions, address=self.member_address, protocol_port=80, pool_id=self.pool['id']) @test.attr(type='smoke') @decorators.idempotent_id('fb833ee8-9e69-489f-b540-a409762b78b2') def test_create_update_delete_member(self): # Creates a member body = self.client.create_member(address=self.member_address, protocol_port=80, pool_id=self.pool['id']) member = body['member'] # Verification of member update body = self.client.update_member(member['id'], admin_state_up=False) updated_member = body['member'] self.assertFalse(updated_member['admin_state_up']) # Verification of member delete self.client.delete_member(member['id']) @test.attr(type='smoke') @decorators.idempotent_id('893cd71f-a7dd-4485-b162-f6ab9a534914') def test_show_member(self): # Verifies the details of a member body = self.client.show_member(self.member['id']) member = body['member'] for key, value in member.iteritems(): # 'status' should not be confirmed in api tests if key != 'status': self.assertEqual(self.member[key], value) @test.attr(type='smoke') @decorators.idempotent_id('8e5822c5-68a4-4224-8d6c-a617741ebc2d') def test_list_health_monitors(self): # Verify the health monitor exists in the list of all health monitors body = self.client.list_health_monitors() health_monitors = body['health_monitors'] self.assertIn(self.health_monitor['id'], [h['id'] for h in health_monitors]) @test.attr(type='smoke') @decorators.idempotent_id('49bac58a-511c-4875-b794-366698211d25') def test_list_health_monitors_with_filters(self): attr_exceptions = ['status', 'status_description', 'pools'] self._check_list_with_filter('health_monitor', attr_exceptions, delay=5, max_retries=4, type="TCP", timeout=2) @test.attr(type='smoke') @decorators.idempotent_id('e8ce05c4-d554-4d1e-a257-ad32ce134bb5') def test_create_update_delete_health_monitor(self): # Creates a health_monitor body = self.client.create_health_monitor(delay=4, max_retries=3, type="TCP", timeout=1) health_monitor = body['health_monitor'] # Verification of health_monitor update body = (self.client.update_health_monitor (health_monitor['id'], admin_state_up=False)) updated_health_monitor = body['health_monitor'] self.assertFalse(updated_health_monitor['admin_state_up']) # Verification of health_monitor delete body = self.client.delete_health_monitor(health_monitor['id']) @test.attr(type='smoke') @decorators.idempotent_id('d3e1aebc-06c2-49b3-9816-942af54012eb') def test_create_health_monitor_http_type(self): hm_type = "HTTP" body = self.client.create_health_monitor(delay=4, max_retries=3, type=hm_type, timeout=1) health_monitor = body['health_monitor'] self.addCleanup(self.client.delete_health_monitor, health_monitor['id']) self.assertEqual(hm_type, health_monitor['type']) @test.attr(type='smoke') @decorators.idempotent_id('0eff9f67-90fb-4bb1-b4ed-c5fda99fff0c') def test_update_health_monitor_http_method(self): body = self.client.create_health_monitor(delay=4, max_retries=3, type="HTTP", timeout=1) health_monitor = body['health_monitor'] self.addCleanup(self.client.delete_health_monitor, health_monitor['id']) body = (self.client.update_health_monitor (health_monitor['id'], http_method="POST", url_path="/home/user", expected_codes="290")) updated_health_monitor = body['health_monitor'] self.assertEqual("POST", updated_health_monitor['http_method']) self.assertEqual("/home/user", updated_health_monitor['url_path']) self.assertEqual("290", updated_health_monitor['expected_codes']) @test.attr(type='smoke') @decorators.idempotent_id('08e126ab-1407-483f-a22e-b11cc032ca7c') def test_show_health_monitor(self): # Verifies the details of a health_monitor body = self.client.show_health_monitor(self.health_monitor['id']) health_monitor = body['health_monitor'] for key, value in health_monitor.iteritems(): # 'status' should not be confirmed in api tests if key != 'status': self.assertEqual(self.health_monitor[key], value) @test.attr(type='smoke') @decorators.idempotent_id('87f7628e-8918-493d-af50-0602845dbb5b') def test_associate_disassociate_health_monitor_with_pool(self): # Verify that a health monitor can be associated with a pool self.client.associate_health_monitor_with_pool( self.health_monitor['id'], self.pool['id']) body = self.client.show_health_monitor( self.health_monitor['id']) health_monitor = body['health_monitor'] body = self.client.show_pool(self.pool['id']) pool = body['pool'] self.assertIn(pool['id'], [p['pool_id'] for p in health_monitor['pools']]) self.assertIn(health_monitor['id'], pool['health_monitors']) # Verify that a health monitor can be disassociated from a pool (self.client.disassociate_health_monitor_with_pool (self.health_monitor['id'], self.pool['id'])) body = self.client.show_pool(self.pool['id']) pool = body['pool'] body = self.client.show_health_monitor( self.health_monitor['id']) health_monitor = body['health_monitor'] self.assertNotIn(health_monitor['id'], pool['health_monitors']) self.assertNotIn(pool['id'], [p['pool_id'] for p in health_monitor['pools']]) @test.attr(type='smoke') @decorators.idempotent_id('525fc7dc-be24-408d-938d-822e9783e027') def test_get_lb_pool_stats(self): # Verify the details of pool stats body = self.client.list_lb_pool_stats(self.pool['id']) stats = body['stats'] self.assertIn("bytes_in", stats) self.assertIn("total_connections", stats) self.assertIn("active_connections", stats) self.assertIn("bytes_out", stats) @test.attr(type='smoke') @decorators.idempotent_id('66236be2-5121-4047-8cde-db4b83b110a5') def test_update_list_of_health_monitors_associated_with_pool(self): (self.client.associate_health_monitor_with_pool (self.health_monitor['id'], self.pool['id'])) self.client.update_health_monitor( self.health_monitor['id'], admin_state_up=False) body = self.client.show_pool(self.pool['id']) health_monitors = body['pool']['health_monitors'] for health_monitor_id in health_monitors: body = self.client.show_health_monitor(health_monitor_id) self.assertFalse(body['health_monitor']['admin_state_up']) (self.client.disassociate_health_monitor_with_pool (self.health_monitor['id'], self.pool['id'])) @test.attr(type='smoke') @decorators.idempotent_id('44ec9b40-b501-41e2-951f-4fc673b15ac0') def test_update_admin_state_up_of_pool(self): self.client.update_pool(self.pool['id'], admin_state_up=False) body = self.client.show_pool(self.pool['id']) pool = body['pool'] self.assertFalse(pool['admin_state_up']) @test.attr(type='smoke') @decorators.idempotent_id('466a9d4c-37c6-4ea2-b807-133437beb48c') def test_show_vip_associated_with_pool(self): body = self.client.show_pool(self.pool['id']) pool = body['pool'] body = self.client.show_vip(pool['vip_id']) vip = body['vip'] self.assertEqual(self.vip['name'], vip['name']) self.assertEqual(self.vip['id'], vip['id']) @test.attr(type='smoke') @decorators.idempotent_id('7b97694e-69d0-4151-b265-e1052a465aa8') def test_show_members_associated_with_pool(self): body = self.client.show_pool(self.pool['id']) members = body['pool']['members'] for member_id in members: body = self.client.show_member(member_id) self.assertIsNotNone(body['member']['status']) self.assertEqual(member_id, body['member']['id']) self.assertIsNotNone(body['member']['admin_state_up']) @test.attr(type='smoke') @decorators.idempotent_id('73ed6f27-595b-4b2c-969c-dbdda6b8ab34') def test_update_pool_related_to_member(self): # Create new pool body = self.client.create_pool(name=data_utils.rand_name("pool-"), lb_method='ROUND_ROBIN', protocol='HTTP', subnet_id=self.subnet['id']) new_pool = body['pool'] self.addCleanup(self.client.delete_pool, new_pool['id']) # Update member with new pool's id body = self.client.update_member(self.member['id'], pool_id=new_pool['id']) # Confirm with show that pool_id change body = self.client.show_member(self.member['id']) member = body['member'] self.assertEqual(member['pool_id'], new_pool['id']) # Update member with old pool id, this is needed for clean up body = self.client.update_member(self.member['id'], pool_id=self.pool['id']) @test.attr(type='smoke') @decorators.idempotent_id('cf63f071-bbe3-40ba-97a0-a33e11923162') def test_update_member_weight(self): self.client.update_member(self.member['id'], weight=2) body = self.client.show_member(self.member['id']) member = body['member'] self.assertEqual(2, member['weight']) @decorators.skip_because(bug="1402007") class LoadBalancerIpV6TestJSON(LoadBalancerTestJSON): _ip_version = 6 neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v1/api/base.py0000664000567000056710000004440512701407726025511 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # Copyright 2016 Rackspace Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from tempest import config from tempest import exceptions from tempest.lib.common.utils import data_utils from tempest.lib import exceptions as lib_exc from tempest import test from neutron_lbaas.tests.tempest.v1.api import clients CONF = config.CONF class BaseNetworkTest(test.BaseTestCase): """ Base class for the Neutron tests that use the Tempest Neutron REST client Per the Neutron API Guide, API v1.x was removed from the source code tree (docs.openstack.org/api/openstack-network/2.0/content/Overview-d1e71.html) Therefore, v2.x of the Neutron API is assumed. It is also assumed that the following options are defined in the [network] section of etc/tempest.conf: tenant_network_cidr with a block of cidr's from which smaller blocks can be allocated for tenant networks tenant_network_mask_bits with the mask bits to be used to partition the block defined by tenant-network_cidr Finally, it is assumed that the following option is defined in the [service_available] section of etc/tempest.conf neutron as True """ force_tenant_isolation = False credentials = ['primary'] # Default to ipv4. _ip_version = 4 @classmethod def get_client_manager(cls, credential_type=None, roles=None, force_new=None): manager = test.BaseTestCase.get_client_manager( credential_type=credential_type, roles=roles, force_new=force_new) # Neutron uses a different clients manager than the one in the Tempest return clients.Manager(manager.credentials) @classmethod def skip_checks(cls): # Create no network resources for these test. cls.set_network_resources() super(BaseNetworkTest, cls).resource_setup() if not CONF.service_available.neutron: raise cls.skipException("Neutron support is required") if cls._ip_version == 6 and not CONF.network_feature_enabled.ipv6: raise cls.skipException("IPv6 Tests are disabled.") @classmethod def setup_credentials(cls): # Create no network resources for these test. cls.set_network_resources() super(BaseNetworkTest, cls).setup_credentials() @classmethod def setup_clients(cls): super(BaseNetworkTest, cls).setup_clients() cls.client = cls.os.network_client @classmethod def resource_setup(cls): cls.networks = [] cls.shared_networks = [] cls.subnets = [] cls.ports = [] cls.routers = [] cls.pools = [] cls.vips = [] cls.members = [] cls.health_monitors = [] cls.vpnservices = [] cls.ikepolicies = [] cls.floating_ips = [] cls.metering_labels = [] cls.metering_label_rules = [] cls.fw_rules = [] cls.fw_policies = [] cls.ipsecpolicies = [] cls.ethertype = "IPv" + str(cls._ip_version) @classmethod def resource_cleanup(cls): if CONF.service_available.neutron: # Clean up ipsec policies for ipsecpolicy in cls.ipsecpolicies: cls._try_delete_resource(cls.client.delete_ipsecpolicy, ipsecpolicy['id']) # Clean up firewall policies for fw_policy in cls.fw_policies: cls._try_delete_resource(cls.client.delete_firewall_policy, fw_policy['id']) # Clean up firewall rules for fw_rule in cls.fw_rules: cls._try_delete_resource(cls.client.delete_firewall_rule, fw_rule['id']) # Clean up ike policies for ikepolicy in cls.ikepolicies: cls._try_delete_resource(cls.client.delete_ikepolicy, ikepolicy['id']) # Clean up vpn services for vpnservice in cls.vpnservices: cls._try_delete_resource(cls.client.delete_vpnservice, vpnservice['id']) # Clean up floating IPs for floating_ip in cls.floating_ips: cls._try_delete_resource(cls.client.delete_floatingip, floating_ip['id']) # Clean up routers for router in cls.routers: cls._try_delete_resource(cls.delete_router, router) # Clean up health monitors for health_monitor in cls.health_monitors: cls._try_delete_resource(cls.client.delete_health_monitor, health_monitor['id']) # Clean up members for member in cls.members: cls._try_delete_resource(cls.client.delete_member, member['id']) # Clean up vips for vip in cls.vips: cls._try_delete_resource(cls.client.delete_vip, vip['id']) # Clean up pools for pool in cls.pools: cls._try_delete_resource(cls.client.delete_pool, pool['id']) # Clean up metering label rules for metering_label_rule in cls.metering_label_rules: cls._try_delete_resource( cls.admin_client.delete_metering_label_rule, metering_label_rule['id']) # Clean up metering labels for metering_label in cls.metering_labels: cls._try_delete_resource( cls.admin_client.delete_metering_label, metering_label['id']) # Clean up ports for port in cls.ports: cls._try_delete_resource(cls.client.delete_port, port['id']) # Clean up subnets for subnet in cls.subnets: cls._try_delete_resource(cls.client.delete_subnet, subnet['id']) # Clean up networks for network in cls.networks: cls._try_delete_resource(cls.client.delete_network, network['id']) # Clean up shared networks for network in cls.shared_networks: cls._try_delete_resource(cls.admin_client.delete_network, network['id']) super(BaseNetworkTest, cls).resource_cleanup() @classmethod def _try_delete_resource(self, delete_callable, *args, **kwargs): """Cleanup resources in case of test-failure Some resources are explicitly deleted by the test. If the test failed to delete a resource, this method will execute the appropriate delete methods. Otherwise, the method ignores NotFound exceptions thrown for resources that were correctly deleted by the test. :param delete_callable: delete method :param args: arguments for delete method :param kwargs: keyword arguments for delete method """ try: delete_callable(*args, **kwargs) # if resource is not found, this means it was deleted in the test except lib_exc.NotFound: pass @classmethod def create_network(cls, network_name=None): """Wrapper utility that returns a test network.""" network_name = network_name or data_utils.rand_name('test-network-') body = cls.client.create_network(name=network_name) network = body['network'] cls.networks.append(network) return network @classmethod def create_shared_network(cls, network_name=None): network_name = network_name or data_utils.rand_name('sharednetwork-') post_body = {'name': network_name, 'shared': True} body = cls.admin_client.create_network(**post_body) network = body['network'] cls.shared_networks.append(network) return network @classmethod def create_subnet(cls, network, gateway='', cidr=None, mask_bits=None, ip_version=None, client=None, **kwargs): """Wrapper utility that returns a test subnet.""" # allow tests to use admin client if not client: client = cls.client # The cidr and mask_bits depend on the ip version. ip_version = ip_version if ip_version is not None else cls._ip_version gateway_not_set = gateway == '' if ip_version == 4: cidr = cidr or netaddr.IPNetwork(CONF.network.tenant_network_cidr) mask_bits = mask_bits or CONF.network.tenant_network_mask_bits elif ip_version == 6: cidr = ( cidr or netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)) mask_bits = mask_bits or CONF.network.tenant_network_v6_mask_bits # Find a cidr that is not in use yet and create a subnet with it for subnet_cidr in cidr.subnet(mask_bits): if gateway_not_set: gateway_ip = str(netaddr.IPAddress(subnet_cidr) + 1) else: gateway_ip = gateway try: body = client.create_subnet( network_id=network['id'], cidr=str(subnet_cidr), ip_version=ip_version, gateway_ip=gateway_ip, **kwargs) break except lib_exc.BadRequest as e: is_overlapping_cidr = 'overlaps with another subnet' in str(e) if not is_overlapping_cidr: raise else: message = 'Available CIDR for subnet creation could not be found' raise exceptions.BuildErrorException(message) subnet = body['subnet'] cls.subnets.append(subnet) return subnet @classmethod def create_port(cls, network, **kwargs): """Wrapper utility that returns a test port.""" body = cls.client.create_port(network_id=network['id'], **kwargs) port = body['port'] cls.ports.append(port) return port @classmethod def update_port(cls, port, **kwargs): """Wrapper utility that updates a test port.""" body = cls.client.update_port(port['id'], **kwargs) return body['port'] @classmethod def create_router(cls, router_name=None, admin_state_up=False, external_network_id=None, enable_snat=None, **kwargs): ext_gw_info = {} if external_network_id: ext_gw_info['network_id'] = external_network_id if enable_snat: ext_gw_info['enable_snat'] = enable_snat body = cls.client.create_router( router_name, external_gateway_info=ext_gw_info, admin_state_up=admin_state_up, **kwargs) router = body['router'] cls.routers.append(router) return router @classmethod def create_floatingip(cls, external_network_id): """Wrapper utility that returns a test floating IP.""" body = cls.client.create_floatingip( floating_network_id=external_network_id) fip = body['floatingip'] cls.floating_ips.append(fip) return fip @classmethod def create_pool(cls, name, lb_method, protocol, subnet): """Wrapper utility that returns a test pool.""" body = cls.client.create_pool( name=name, lb_method=lb_method, protocol=protocol, subnet_id=subnet['id']) pool = body['pool'] cls.pools.append(pool) return pool @classmethod def update_pool(cls, name): """Wrapper utility that returns a test pool.""" body = cls.client.update_pool(name=name) pool = body['pool'] return pool @classmethod def create_vip(cls, name, protocol, protocol_port, subnet, pool): """Wrapper utility that returns a test vip.""" body = cls.client.create_vip(name=name, protocol=protocol, protocol_port=protocol_port, subnet_id=subnet['id'], pool_id=pool['id']) vip = body['vip'] cls.vips.append(vip) return vip @classmethod def update_vip(cls, name): body = cls.client.update_vip(name=name) vip = body['vip'] return vip @classmethod def create_member(cls, protocol_port, pool, ip_version=None): """Wrapper utility that returns a test member.""" ip_version = ip_version if ip_version is not None else cls._ip_version member_address = "fd00::abcd" if ip_version == 6 else "10.0.9.46" body = cls.client.create_member(address=member_address, protocol_port=protocol_port, pool_id=pool['id']) member = body['member'] cls.members.append(member) return member @classmethod def update_member(cls, admin_state_up): body = cls.client.update_member(admin_state_up=admin_state_up) member = body['member'] return member @classmethod def create_health_monitor(cls, delay, max_retries, Type, timeout): """Wrapper utility that returns a test health monitor.""" body = cls.client.create_health_monitor(delay=delay, max_retries=max_retries, type=Type, timeout=timeout) health_monitor = body['health_monitor'] cls.health_monitors.append(health_monitor) return health_monitor @classmethod def update_health_monitor(cls, admin_state_up): body = cls.client.update_vip(admin_state_up=admin_state_up) health_monitor = body['health_monitor'] return health_monitor @classmethod def create_router_interface(cls, router_id, subnet_id): """Wrapper utility that returns a router interface.""" interface = cls.client.add_router_interface_with_subnet_id( router_id, subnet_id) return interface @classmethod def create_vpnservice(cls, subnet_id, router_id): """Wrapper utility that returns a test vpn service.""" body = cls.client.create_vpnservice( subnet_id=subnet_id, router_id=router_id, admin_state_up=True, name=data_utils.rand_name("vpnservice-")) vpnservice = body['vpnservice'] cls.vpnservices.append(vpnservice) return vpnservice @classmethod def create_ikepolicy(cls, name): """Wrapper utility that returns a test ike policy.""" body = cls.client.create_ikepolicy(name=name) ikepolicy = body['ikepolicy'] cls.ikepolicies.append(ikepolicy) return ikepolicy @classmethod def create_firewall_rule(cls, action, protocol): """Wrapper utility that returns a test firewall rule.""" body = cls.client.create_firewall_rule( name=data_utils.rand_name("fw-rule"), action=action, protocol=protocol) fw_rule = body['firewall_rule'] cls.fw_rules.append(fw_rule) return fw_rule @classmethod def create_firewall_policy(cls): """Wrapper utility that returns a test firewall policy.""" body = cls.client.create_firewall_policy( name=data_utils.rand_name("fw-policy")) fw_policy = body['firewall_policy'] cls.fw_policies.append(fw_policy) return fw_policy @classmethod def delete_router(cls, router): body = cls.client.list_router_interfaces(router['id']) interfaces = body['ports'] for i in interfaces: try: cls.client.remove_router_interface_with_subnet_id( router['id'], i['fixed_ips'][0]['subnet_id']) except lib_exc.NotFound: pass cls.client.delete_router(router['id']) @classmethod def create_ipsecpolicy(cls, name): """Wrapper utility that returns a test ipsec policy.""" body = cls.client.create_ipsecpolicy(name=name) ipsecpolicy = body['ipsecpolicy'] cls.ipsecpolicies.append(ipsecpolicy) return ipsecpolicy class BaseAdminNetworkTest(BaseNetworkTest): credentials = ['primary', 'admin'] @classmethod def setup_clients(cls): super(BaseAdminNetworkTest, cls).setup_clients() cls.admin_client = cls.os_adm.network_client cls.identity_admin_client = cls.os_adm.tenants_client @classmethod def create_metering_label(cls, name, description): """Wrapper utility that returns a test metering label.""" body = cls.admin_client.create_metering_label( description=description, name=data_utils.rand_name("metering-label")) metering_label = body['metering_label'] cls.metering_labels.append(metering_label) return metering_label @classmethod def create_metering_label_rule(cls, remote_ip_prefix, direction, metering_label_id): """Wrapper utility that returns a test metering label rule.""" body = cls.admin_client.create_metering_label_rule( remote_ip_prefix=remote_ip_prefix, direction=direction, metering_label_id=metering_label_id) metering_label_rule = body['metering_label_rule'] cls.metering_label_rules.append(metering_label_rule) return metering_label_rule neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v1/api/admin/0000775000567000056710000000000012701410110025263 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v1/api/admin/__init__.py0000664000567000056710000000000012701407726027405 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v1/api/admin/test_quotas.py0000664000567000056710000000716312701407727030243 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2016 Rackspace Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import test from tempest.lib.common.utils import data_utils from neutron_lbaas.tests.tempest.v1.api import base class QuotasTest(base.BaseAdminNetworkTest): _interface = 'json' """ Tests the following operations in the Neutron API using the REST client for Neutron: list quotas for tenants who have non-default quota values show quotas for a specified tenant update quotas for a specified tenant reset quotas to default values for a specified tenant v2.0 of the API is assumed. It is also assumed that the per-tenant quota extension API is configured in /etc/neutron/neutron.conf as follows: quota_driver = neutron.db.quota_db.DbQuotaDriver """ @classmethod def skip_checks(cls): super(QuotasTest, cls).skip_checks() if not test.is_extension_enabled('quotas', 'network'): msg = "quotas extension not enabled." raise cls.skipException(msg) def _check_quotas(self, new_quotas): # Add a tenant to conduct the test test_tenant = data_utils.rand_name('test_tenant_') test_description = data_utils.rand_name('desc_') tenant = self.identity_admin_client.create_tenant( name=test_tenant, description=test_description) tenant_id = tenant['tenant']['id'] self.addCleanup(self.identity_admin_client.delete_tenant, tenant_id) # Change quotas for tenant quota_set = self.admin_client.update_quotas(tenant_id, **new_quotas) self.addCleanup(self.admin_client.reset_quotas, tenant_id) for key, value in new_quotas.iteritems(): self.assertEqual(value, quota_set[key]) # Confirm our tenant is listed among tenants with non default quotas non_default_quotas = self.admin_client.list_quotas() found = False for qs in non_default_quotas['quotas']: if qs['tenant_id'] == tenant_id: found = True self.assertTrue(found) # Confirm from API quotas were changed as requested for tenant quota_set = self.admin_client.show_quotas(tenant_id) quota_set = quota_set['quota'] for key, value in new_quotas.iteritems(): self.assertEqual(value, quota_set[key]) # Reset quotas to default and confirm self.admin_client.reset_quotas(tenant_id) non_default_quotas = self.admin_client.list_quotas() for q in non_default_quotas['quotas']: self.assertNotEqual(tenant_id, q['tenant_id']) @test.attr(type='gate') def test_quotas(self): new_quotas = {'network': 0, 'security_group': 0} self._check_quotas(new_quotas) @test.requires_ext(extension='lbaas', service='network') @test.attr(type='gate') def test_lbaas_quotas(self): new_quotas = {'vip': 1, 'pool': 2, 'member': 3, 'health_monitor': 4} self._check_quotas(new_quotas) neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v1/api/admin/test_lbaas_agent_scheduler.py0000664000567000056710000000553012701407726033220 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # Copyright 2016 Rackspace Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import test from tempest.lib.common.utils import data_utils from neutron_lbaas.tests.tempest.v1.api import base class LBaaSAgentSchedulerTestJSON(base.BaseAdminNetworkTest): """ Tests the following operations in the Neutron API using the REST client for Neutron: List pools the given LBaaS agent is hosting. Show a LBaaS agent hosting the given pool. v2.0 of the Neutron API is assumed. It is also assumed that the following options are defined in the [networki-feature-enabled] section of etc/tempest.conf: api_extensions """ @classmethod def resource_setup(cls): super(LBaaSAgentSchedulerTestJSON, cls).resource_setup() if not test.is_extension_enabled('lbaas_agent_scheduler', 'network'): msg = "LBaaS Agent Scheduler Extension not enabled." raise cls.skipException(msg) cls.network = cls.create_network() cls.subnet = cls.create_subnet(cls.network) pool_name = data_utils.rand_name('pool-') cls.pool = cls.create_pool(pool_name, "ROUND_ROBIN", "HTTP", cls.subnet) @test.attr(type='smoke') @test.idempotent_id('e5ea8b15-4f44-4350-963c-e0fcb533ee79') def test_list_pools_on_lbaas_agent(self): found = False body = self.admin_client.list_agents( agent_type="Loadbalancer agent") agents = body['agents'] for a in agents: msg = 'Load Balancer agent expected' self.assertEqual(a['agent_type'], 'Loadbalancer agent', msg) body = ( self.admin_client.list_pools_hosted_by_one_lbaas_agent( a['id'])) pools = body['pools'] if self.pool['id'] in [p['id'] for p in pools]: found = True msg = 'Unable to find Load Balancer agent hosting pool' self.assertTrue(found, msg) @test.attr(type='smoke') @test.idempotent_id('e2745593-fd79-4b98-a262-575fd7865796') def test_show_lbaas_agent_hosting_pool(self): body = self.admin_client.show_lbaas_agent_hosting_pool( self.pool['id']) self.assertEqual('Loadbalancer agent', body['agent']['agent_type']) neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v1/api/admin/test_load_balancer_admin_actions.py0000664000567000056710000001216612701407726034363 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis.inc # Copyright 2016 Rackspace Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import test from tempest.lib.common.utils import data_utils from tempest.lib import decorators from neutron_lbaas.tests.tempest.v1.api import base class LoadBalancerAdminTestJSON(base.BaseAdminNetworkTest): """ Test admin actions for load balancer. Create VIP for another tenant Create health monitor for another tenant """ @classmethod def resource_setup(cls): super(LoadBalancerAdminTestJSON, cls).resource_setup() if not test.is_extension_enabled('lbaas', 'network'): msg = "lbaas extension not enabled." raise cls.skipException(msg) cls.force_tenant_isolation = True manager = cls.get_client_manager() cls.client = manager.network_client cls.tenant_id = manager.credentials.tenant_id cls.network = cls.create_network() cls.subnet = cls.create_subnet(cls.network) cls.pool = cls.create_pool(data_utils.rand_name('pool-'), "ROUND_ROBIN", "HTTP", cls.subnet) @test.attr(type='smoke') @decorators.idempotent_id('6b0a20d8-4fcd-455e-b54f-ec4db5199518') def test_create_vip_as_admin_for_another_tenant(self): name = data_utils.rand_name('vip-') body = self.admin_client.create_pool( name=data_utils.rand_name('pool-'), lb_method="ROUND_ROBIN", protocol="HTTP", subnet_id=self.subnet['id'], tenant_id=self.tenant_id) pool = body['pool'] self.addCleanup(self.admin_client.delete_pool, pool['id']) body = self.admin_client.create_vip(name=name, protocol="HTTP", protocol_port=80, subnet_id=self.subnet['id'], pool_id=pool['id'], tenant_id=self.tenant_id) vip = body['vip'] self.addCleanup(self.admin_client.delete_vip, vip['id']) self.assertIsNotNone(vip['id']) self.assertEqual(self.tenant_id, vip['tenant_id']) body = self.client.show_vip(vip['id']) show_vip = body['vip'] self.assertEqual(vip['id'], show_vip['id']) self.assertEqual(vip['name'], show_vip['name']) @test.attr(type='smoke') @decorators.idempotent_id('74552cfc-ab78-4fb6-825b-f67bca379921') def test_create_health_monitor_as_admin_for_another_tenant(self): body = ( self.admin_client.create_health_monitor(delay=4, max_retries=3, type="TCP", timeout=1, tenant_id=self.tenant_id)) health_monitor = body['health_monitor'] self.addCleanup(self.admin_client.delete_health_monitor, health_monitor['id']) self.assertIsNotNone(health_monitor['id']) self.assertEqual(self.tenant_id, health_monitor['tenant_id']) body = self.client.show_health_monitor(health_monitor['id']) show_health_monitor = body['health_monitor'] self.assertEqual(health_monitor['id'], show_health_monitor['id']) @test.attr(type='smoke') @decorators.idempotent_id('266a192d-3c22-46c4-a8fb-802450301e82') def test_create_pool_from_admin_user_other_tenant(self): body = self.admin_client.create_pool( name=data_utils.rand_name('pool-'), lb_method="ROUND_ROBIN", protocol="HTTP", subnet_id=self.subnet['id'], tenant_id=self.tenant_id) pool = body['pool'] self.addCleanup(self.admin_client.delete_pool, pool['id']) self.assertIsNotNone(pool['id']) self.assertEqual(self.tenant_id, pool['tenant_id']) @test.attr(type='smoke') @decorators.idempotent_id('158bb272-b9ed-4cfc-803c-661dac46f783') def test_create_member_from_admin_user_other_tenant(self): body = self.admin_client.create_member(address="10.0.9.47", protocol_port=80, pool_id=self.pool['id'], tenant_id=self.tenant_id) member = body['member'] self.addCleanup(self.admin_client.delete_member, member['id']) self.assertIsNotNone(member['id']) self.assertEqual(self.tenant_id, member['tenant_id']) neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v1/scenario/0000775000567000056710000000000012701410110025225 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/tempest/v1/scenario/__init__.py0000664000567000056710000000000012701407726027347 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/__init__.py0000664000567000056710000000000012701407726023535 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/contrib/0000775000567000056710000000000012701410110023053 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/contrib/gate_hook.sh0000775000567000056710000000502612701407726025400 0ustar jenkinsjenkins00000000000000#!/bin/bash set -ex GATE_DEST=$BASE/new DEVSTACK_PATH=$GATE_DEST/devstack # Sort out our gate args . `dirname "$0"`/decode_args.sh testenv=${lbaastest:-"apiv2"} if [ "$lbaasversion" = "lbaasv1" ]; then testenv="apiv1" elif [ "$lbaasversion" = "lbaasv2" ]; then if [ "$lbaasenv" = "healthmonitor" ] || [ "$lbaasenv" = "listener" ] || [ "$lbaasenv" = "loadbalancer" ] || [ "$lbaasenv" = "member" ] || [ "$lbaasenv" = "minimal" ] || [ "$lbaasenv" = "pool" ]; then testenv="apiv2" elif [ "$lbaasenv" = "scenario" ]; then testenv="scenario" fi fi export DEVSTACK_LOCAL_CONFIG+=" enable_plugin neutron-lbaas https://git.openstack.org/openstack/neutron-lbaas enable_plugin barbican https://git.openstack.org/openstack/barbican " if [ "$lbaasdriver" = "octavia" ]; then export DEVSTACK_LOCAL_CONFIG+=" enable_plugin octavia https://git.openstack.org/openstack/octavia " fi if [ "$testenv" != "scenario" ]; then export DEVSTACK_LOCAL_CONFIG+=" DISABLE_AMP_IMAGE_BUILD=True " # Not needed for API tests ENABLED_SERVICES+="-horizon,-ceilometer-acentral,-ceilometer-acompute," ENABLED_SERVICES+="-ceilometer-alarm-evaluator,-ceilometer-alarm-notifier," ENABLED_SERVICES+="-ceilometer-anotification,-ceilometer-api," ENABLED_SERVICES+="-ceilometer-collector," fi # These are not needed with either v1 or v2 ENABLED_SERVICES+="-c-api,-c-bak,-c-sch,-c-vol,-cinder" ENABLED_SERVICES+=",-s-account,-s-container,-s-object,-s-proxy" if [ "$testenv" != "apiv1" ]; then # Override enabled services, so we can turn on lbaasv2. # While we're at it, disable cinder and swift, since we don't need them. ENABLED_SERVICES+=",q-lbaasv2,-q-lbaas" if [ "$lbaasdriver" = "octavia" ]; then ENABLED_SERVICES+=",octavia,o-cw,o-hk,o-hm,o-api" fi if [ "$lbaasdriver" = "namespace" ]; then export DEVSTACK_LOCAL_CONFIG+=" NEUTRON_LBAAS_SERVICE_PROVIDERV2=LOADBALANCERV2:Haproxy:neutron_lbaas.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default " fi fi export ENABLED_SERVICES if [ "$lbaasdriver" = "octavia" -a "$testenv" = "apiv2" ]; then cat > $DEVSTACK_PATH/local.conf < $DEVSTACK_PATH/local.conf < ./testrepository.subunit $SCRIPTS_DIR/subunit2html ./testrepository.subunit testr_results.html gzip -9 ./testrepository.subunit gzip -9 ./testr_results.html sudo mv ./*.gz /opt/stack/logs/ fi } owner=tempest # Set owner permissions according to job's requirements. cd $NEUTRON_LBAAS_DIR sudo chown -R $owner:stack $NEUTRON_LBAAS_DIR if [ "$lbaasdriver" = "octavia" ]; then sudo chown -R $owner:stack $OCTAVIA_DIR fi sudo_env=" OS_TESTR_CONCURRENCY=1" # Configure the api and scenario tests to use the tempest.conf set by devstack sudo_env+=" TEMPEST_CONFIG_DIR=$TEMPEST_CONFIG_DIR" if [ "$testenv" = "apiv2" ]; then sudo_env+=" OS_TEST_PATH=$NEUTRON_LBAAS_DIR/neutron_lbaas/tests/tempest/v2/api" elif [ "$testenv" = "apiv1" ]; then sudo_env+=" OS_TEST_PATH=$NEUTRON_LBAAS_DIR/neutron_lbaas/tests/tempest/v1/api" elif [ "$testenv" = "scenario" ]; then sudo_env+=" OS_TEST_PATH=$NEUTRON_LBAAS_DIR/neutron_lbaas/tests/tempest/v2/scenario" else echo "ERROR: unsupported testenv: $testenv" exit 1 fi # Run tests echo "Running neutron lbaas $testenv test suite" set +e sudo -H -u $owner $sudo_env tox -e $testenv -- $test_subset # sudo -H -u $owner $sudo_env testr init # sudo -H -u $owner $sudo_env testr run testr_exit_code=$? set -e # Collect and parse results generate_testr_results exit $testr_exit_code neutron-lbaas-8.0.0/neutron_lbaas/tests/contrib/decode_args.sh0000664000567000056710000000150212701407726025667 0ustar jenkinsjenkins00000000000000#!/bin/bash # This file is meant to be sourced by the other hooks # Legacy values for $1 and $2: # $1 - lbaasv2, lbaasv1 (lbaasversion) # $2 - scenario, minimal, api, healthmonitor, listener, loadbalancer, member, pool (lbaastest) # Args being phased in: # $1 - same # $2 - test-driver, with any missing -driver being "octavia" # scenario-octavia # minimal-octavia # api-namespace # api-{thirdparty} # healthmonitor-octavia # listener-octavia # loadbalancer-octavia # member-octavia # pool-octavia lbaasversion="$1" lbaastest="$2" lbaasenv=$(echo "$lbaastest" | perl -ne '/^(.*)-([^-]+)$/ && print "$1";') if [ -z "$lbaasenv" ]; then lbaasenv=$lbaastest fi lbaasdriver=$(echo "$lbaastest" | perl -ne '/^(.*)-([^-]+)$/ && print "$2";') if [ -z "$lbaasdriver" ]; then lbaasdriver='octavia' fi neutron-lbaas-8.0.0/neutron_lbaas/tests/base.py0000664000567000056710000002167212701407726022732 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock from neutron.db import servicetype_db as st_db from neutron.tests import base as n_base from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit.extensions import base as ext_base from neutron.tests.unit.extensions import test_quotasv2 from neutron.tests.unit import testlib_api from testtools import matchers class BaseTestCase(n_base.BaseTestCase): pass class NeutronDbPluginV2TestCase( test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def set_override(self, lbaas_provider): # override the default service provider self.service_providers = ( mock.patch.object(st_db.ServiceTypeManager, 'get_service_providers').start()) self.service_providers.return_value = ( self._to_provider_dicts(lbaas_provider)) # need to reload provider configuration st_db.ServiceTypeManager._instance = None def new_list_request(self, resource, fmt=None, params=None, id=None, subresource=None): return self._req( 'GET', resource, None, fmt, params=params, subresource=subresource, id=id ) def new_show_request(self, resource, id, fmt=None, subresource=None, sub_id=None, fields=None): if fields: params = "&".join(["fields=%s" % x for x in fields]) else: params = None return self._req('GET', resource, None, fmt, id=id, params=params, subresource=subresource, sub_id=sub_id) def new_update_request(self, resource, data, id, fmt=None, subresource=None, context=None, sub_id=None): return self._req( 'PUT', resource, data, fmt, id=id, subresource=subresource, context=context, sub_id=sub_id ) def _to_provider_dicts(self, lbaas_provider): provider_dicts = [] for provider in lbaas_provider: bits = provider.split(':') p = { 'service_type': bits[0], 'name': bits[1], 'driver': bits[2] } if len(bits) == 4: p['default'] = True provider_dicts.append(p) return provider_dicts def _test_list_with_sort(self, resource, items, sorts, resources=None, query_params='', id=None, subresource=None, subresources=None): query_str = query_params for key, direction in sorts: query_str = query_str + "&sort_key=%s&sort_dir=%s" % (key, direction) if not resources: resources = '%ss' % resource if subresource and not subresources: subresources = '%ss' % subresource req = self.new_list_request(resources, params=query_str, id=id, subresource=subresources) api = self._api_for_resource(resources) res = self.deserialize(self.fmt, req.get_response(api)) if subresource: resource = subresource if subresources: resources = subresources resource = resource.replace('-', '_') resources = resources.replace('-', '_') expected_res = [item[resource]['id'] for item in items] self.assertEqual(expected_res, [n['id'] for n in res[resources]]) def _test_list_with_pagination(self, resource, items, sort, limit, expected_page_num, resources=None, query_params='', verify_key='id', id=None, subresource=None, subresources=None): if not resources: resources = '%ss' % resource if subresource and not subresources: subresources = '%ss' % subresource query_str = query_params + '&' if query_params else '' query_str = query_str + ("limit=%s&sort_key=%s&" "sort_dir=%s") % (limit, sort[0], sort[1]) req = self.new_list_request(resources, params=query_str, id=id, subresource=subresources) items_res = [] page_num = 0 api = self._api_for_resource(resources) if subresource: resource = subresource if subresources: resources = subresources resource = resource.replace('-', '_') resources = resources.replace('-', '_') while req: page_num = page_num + 1 res = self.deserialize(self.fmt, req.get_response(api)) self.assertThat(len(res[resources]), matchers.LessThan(limit + 1)) items_res = items_res + res[resources] req = None if '%s_links' % resources in res: for link in res['%s_links' % resources]: if link['rel'] == 'next': content_type = 'application/%s' % self.fmt req = testlib_api.create_request(link['href'], '', content_type) self.assertEqual(len(res[resources]), limit) self.assertEqual(expected_page_num, page_num) self.assertEqual([item[resource][verify_key] for item in items], [n[verify_key] for n in items_res]) def _test_list_with_pagination_reverse(self, resource, items, sort, limit, expected_page_num, resources=None, query_params='', id=None, subresource=None, subresources=None): if not resources: resources = '%ss' % resource if subresource and not subresources: subresources = '%ss' % subresource resource = resource.replace('-', '_') api = self._api_for_resource(resources) if subresource: marker = items[-1][subresource]['id'] else: marker = items[-1][resource]['id'] query_str = query_params + '&' if query_params else '' query_str = query_str + ("limit=%s&page_reverse=True&" "sort_key=%s&sort_dir=%s&" "marker=%s") % (limit, sort[0], sort[1], marker) req = self.new_list_request(resources, params=query_str, id=id, subresource=subresources) if subresource: resource = subresource if subresources: resources = subresources item_res = [items[-1][resource]] page_num = 0 resources = resources.replace('-', '_') while req: page_num = page_num + 1 res = self.deserialize(self.fmt, req.get_response(api)) self.assertThat(len(res[resources]), matchers.LessThan(limit + 1)) res[resources].reverse() item_res = item_res + res[resources] req = None if '%s_links' % resources in res: for link in res['%s_links' % resources]: if link['rel'] == 'previous': content_type = 'application/%s' % self.fmt req = testlib_api.create_request(link['href'], '', content_type) self.assertEqual(len(res[resources]), limit) self.assertEqual(expected_page_num, page_num) expected_res = [item[resource]['id'] for item in items] expected_res.reverse() self.assertEqual(expected_res, [n['id'] for n in item_res]) class ExtensionTestCase(ext_base.ExtensionTestCase): pass class QuotaExtensionTestCase(test_quotasv2.QuotaExtensionTestCase): pass neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/0000775000567000056710000000000012701410110022372 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/__init__.py0000664000567000056710000000000012701407726024514 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/0000775000567000056710000000000012701410110024215 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/__init__.py0000664000567000056710000000000012701407726026337 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/0000775000567000056710000000000012701410110026624 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/test_loadbalancer_quota_ext.py0000664000567000056710000002104412701407726034761 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron import context from neutron import quota from neutron.tests.unit.api.v2 import test_base from oslo_config import cfg from neutron_lbaas.tests import base _get_path = test_base._get_path class LBaaSQuotaExtensionTestCase(base.QuotaExtensionTestCase): def setUp(self): super(LBaaSQuotaExtensionTestCase, self).setUp() cfg.CONF.set_override( 'quota_items', ['vip', 'pool', 'member', 'health_monitor', 'extra1', 'loadbalancer', 'listener', 'healthmonitor'], group='QUOTAS') quota.register_resources_from_config() class LBaaSQuotaExtensionDbTestCase(LBaaSQuotaExtensionTestCase): fmt = 'json' def setUp(self): cfg.CONF.set_override( 'quota_driver', 'neutron.db.quota_db.DbQuotaDriver', group='QUOTAS') super(LBaaSQuotaExtensionDbTestCase, self).setUp() def test_quotas_loaded_right(self): res = self.api.get(_get_path('quotas', fmt=self.fmt)) quota = self.deserialize(res) self.assertEqual([], quota['quotas']) self.assertEqual(200, res.status_int) def test_quotas_default_values(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id)} res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), extra_environ=env) quota = self.deserialize(res) self.assertEqual(10, quota['quota']['vip']) self.assertEqual(10, quota['quota']['pool']) self.assertEqual(-1, quota['quota']['member']) self.assertEqual(-1, quota['quota']['health_monitor']) self.assertEqual(-1, quota['quota']['extra1']) self.assertEqual(10, quota['quota']['loadbalancer']) self.assertEqual(-1, quota['quota']['listener']) self.assertEqual(-1, quota['quota']['healthmonitor']) def test_show_quotas_with_admin(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id + '2', is_admin=True)} res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), extra_environ=env) self.assertEqual(200, res.status_int) quota = self.deserialize(res) self.assertEqual(10, quota['quota']['vip']) self.assertEqual(10, quota['quota']['pool']) self.assertEqual(-1, quota['quota']['member']) self.assertEqual(-1, quota['quota']['health_monitor']) self.assertEqual(10, quota['quota']['loadbalancer']) self.assertEqual(-1, quota['quota']['listener']) self.assertEqual(-1, quota['quota']['healthmonitor']) def test_show_quotas_with_owner_tenant(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id, is_admin=False)} res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), extra_environ=env) self.assertEqual(200, res.status_int) quota = self.deserialize(res) self.assertEqual(10, quota['quota']['vip']) self.assertEqual(10, quota['quota']['pool']) self.assertEqual(-1, quota['quota']['member']) self.assertEqual(-1, quota['quota']['health_monitor']) self.assertEqual(10, quota['quota']['loadbalancer']) self.assertEqual(-1, quota['quota']['listener']) self.assertEqual(-1, quota['quota']['healthmonitor']) def test_update_quotas_to_unlimited(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id, is_admin=True)} quotas = {'quota': {'pool': -1}} res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), self.serialize(quotas), extra_environ=env, expect_errors=False) self.assertEqual(200, res.status_int) quotas = {'quota': {'loadbalancer': -1}} res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), self.serialize(quotas), extra_environ=env, expect_errors=False) self.assertEqual(200, res.status_int) def test_update_quotas_exceeding_current_limit(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id, is_admin=True)} quotas = {'quota': {'pool': 120}} res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), self.serialize(quotas), extra_environ=env, expect_errors=False) self.assertEqual(200, res.status_int) quotas = {'quota': {'loadbalancer': 120}} res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), self.serialize(quotas), extra_environ=env, expect_errors=False) self.assertEqual(200, res.status_int) def test_update_quotas_with_admin(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id + '2', is_admin=True)} quotas = {'quota': {'pool': 100}} res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), self.serialize(quotas), extra_environ=env) self.assertEqual(200, res.status_int) quotas = {'quota': {'loadbalancer': 100}} res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), self.serialize(quotas), extra_environ=env) self.assertEqual(200, res.status_int) env2 = {'neutron.context': context.Context('', tenant_id)} res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), extra_environ=env2) quota = self.deserialize(res) self.assertEqual(10, quota['quota']['vip']) self.assertEqual(100, quota['quota']['pool']) self.assertEqual(-1, quota['quota']['member']) self.assertEqual(-1, quota['quota']['health_monitor']) self.assertEqual(100, quota['quota']['loadbalancer']) self.assertEqual(-1, quota['quota']['listener']) self.assertEqual(-1, quota['quota']['healthmonitor']) class LBaaSQuotaExtensionCfgTestCase( LBaaSQuotaExtensionTestCase): def setUp(self): cfg.CONF.set_override( 'quota_driver', 'neutron.quota.ConfDriver', group='QUOTAS') super(LBaaSQuotaExtensionCfgTestCase, self).setUp() def test_quotas_default_values(self): tenant_id = 'tenant_id1' env = {'neutron.context': context.Context('', tenant_id)} res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt), extra_environ=env) quota = self.deserialize(res) self.assertEqual(10, quota['quota']['vip']) self.assertEqual(10, quota['quota']['pool']) self.assertEqual(-1, quota['quota']['member']) self.assertEqual(-1, quota['quota']['health_monitor']) self.assertEqual(-1, quota['quota']['extra1']) self.assertEqual(10, quota['quota']['loadbalancer']) self.assertEqual(-1, quota['quota']['listener']) self.assertEqual(-1, quota['quota']['healthmonitor']) def test_update_quotas_forbidden(self): tenant_id = 'tenant_id1' quotas = {'quota': {'pool': 100}} res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), self.serialize(quotas), expect_errors=True) self.assertEqual(403, res.status_int) quotas = {'quota': {'loadbalancer': 100}} res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt), self.serialize(quotas), expect_errors=True) self.assertEqual(403, res.status_int) neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/__init__.py0000664000567000056710000000000012701407726030746 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/drivers/0000775000567000056710000000000012701410110030302 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/drivers/netscaler/0000775000567000056710000000000012701410110032262 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/drivers/netscaler/__init__.py0000664000567000056710000000000012701407726034404 0ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/drivers/netscaler/test_ncc_client.pyneutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/drivers/netscaler/test_ncc_client0000664000567000056710000002304412701407726035373 0ustar jenkinsjenkins00000000000000# Copyright 2014 Citrix Systems # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.tests.unit import testlib_api import requests from neutron_lbaas.services.loadbalancer.drivers.netscaler import ncc_client from neutron_lbaas.services.loadbalancer.drivers.netscaler \ import netscaler_driver NCC_CLIENT_CLASS = ('neutron_lbaas.services.loadbalancer.drivers' '.netscaler.ncc_client.NSClient') TESTURI_SCHEME = 'http' TESTURI_HOSTNAME = '1.1.1.1' TESTURI_PORT = 4433 TESTURI_PATH = '/ncc_service/1.0' TESTURI = '%s://%s:%s%s' % (TESTURI_SCHEME, TESTURI_HOSTNAME, TESTURI_PORT, TESTURI_PATH) TEST_USERNAME = 'user211' TEST_PASSWORD = '@30xHl5cT' TEST_TENANT_ID = '9c5245a2-0432-9d4c-4829-9bd7028603a1' TESTVIP_ID = '52ab5d71-6bb2-457f-8414-22a4ba55efec' class TestNSClient(testlib_api.WebTestCase): """A Unit test for the NetScaler NCC client module.""" def setUp(self): self.log = mock.patch.object(ncc_client, 'LOG').start() super(TestNSClient, self).setUp() # mock the requests.request function call self.request_method_mock = mock.Mock() requests.request = self.request_method_mock self.testclient = self._get_nsclient() self.testclient.login = mock.Mock() self.testclient.login.side_effect = self.mock_auth_func( self.testclient) nfe_mock = mock.patch.object( ncc_client.NCCException, "is_not_found_exception").start() nfe_mock.return_value = True def mock_auth_func(self, ncc_test_client): ncc_test_client.auth = "SessId=123456789" def test_instantiate_nsclient_with_empty_uri(self): """Asserts that a call with empty URI will raise an exception.""" self.assertRaises(ncc_client.NCCException, ncc_client.NSClient, '', TEST_USERNAME, TEST_PASSWORD) def test_create_resource_with_no_connection(self): """Asserts that a call with no connection will raise an exception.""" # mock a connection object that fails to establish a connection self.request_method_mock.side_effect = ( requests.exceptions.ConnectionError()) resource_path = netscaler_driver.VIPS_RESOURCE resource_name = netscaler_driver.VIP_RESOURCE resource_body = self._get_testvip_httpbody_for_create() # call method under test: create_resource() and assert that # it raises an exception self.assertRaises(ncc_client.NCCException, self.testclient.create_resource, TEST_TENANT_ID, resource_path, resource_name, resource_body) def test_create_resource_with_error(self): """Asserts that a failed create call raises an exception.""" # create a mock object to represent a valid http response # with a failure status code. fake_response = requests.Response() fake_response.status_code = requests.codes.unavailable fake_response.headers = [] requests.request.return_value = fake_response resource_path = netscaler_driver.VIPS_RESOURCE resource_name = netscaler_driver.VIP_RESOURCE resource_body = self._get_testvip_httpbody_for_create() # call method under test: create_resource # and assert that it raises the expected exception. self.assertRaises(ncc_client.NCCException, self.testclient.create_resource, TEST_TENANT_ID, resource_path, resource_name, resource_body) def test_create_resource(self): """Asserts that a correct call will succeed.""" # obtain the mock object that corresponds to the call of request() fake_response = requests.Response() fake_response.status_code = requests.codes.created fake_response.headers = [] self.request_method_mock.return_value = fake_response resource_path = netscaler_driver.VIPS_RESOURCE resource_name = netscaler_driver.VIP_RESOURCE resource_body = self._get_testvip_httpbody_for_create() # call method under test: create_resource() self.testclient.create_resource(TEST_TENANT_ID, resource_path, resource_name, resource_body) # assert that request() was called # with the expected params. resource_url = "%s/%s" % (self.testclient.service_uri, resource_path) self.request_method_mock.assert_called_once_with( 'POST', url=resource_url, headers=mock.ANY, data=mock.ANY) def test_update_resource_with_error(self): """Asserts that a failed update call raises an exception.""" # create a valid http response with a failure status code. fake_response = requests.Response() fake_response.status_code = requests.codes.unavailable fake_response.headers = [] # obtain the mock object that corresponds to the call of request() self.request_method_mock.return_value = fake_response resource_path = "%s/%s" % (netscaler_driver.VIPS_RESOURCE, TESTVIP_ID) resource_name = netscaler_driver.VIP_RESOURCE resource_body = self._get_testvip_httpbody_for_update() # call method under test: update_resource() and # assert that it raises the expected exception. self.assertRaises(ncc_client.NCCException, self.testclient.update_resource, TEST_TENANT_ID, resource_path, resource_name, resource_body) def test_update_resource(self): """Asserts that a correct update call will succeed.""" # create a valid http response with a successful status code. fake_response = requests.Response() fake_response.status_code = requests.codes.ok fake_response.headers = [] # obtain the mock object that corresponds to the call of request() self.request_method_mock.return_value = fake_response resource_path = "%s/%s" % (netscaler_driver.VIPS_RESOURCE, TESTVIP_ID) resource_name = netscaler_driver.VIP_RESOURCE resource_body = self._get_testvip_httpbody_for_update() # call method under test: update_resource. self.testclient.update_resource(TEST_TENANT_ID, resource_path, resource_name, resource_body) resource_url = "%s/%s" % (self.testclient.service_uri, resource_path) # assert that requests.request() was called with the # expected params. self.request_method_mock.assert_called_once_with( 'PUT', url=resource_url, headers=mock.ANY, data=mock.ANY) def test_delete_resource_with_error(self): """Asserts that a failed delete call raises an exception.""" # create a valid http response with a failure status code. fake_response = requests.Response() fake_response.status_code = requests.codes.unavailable fake_response.headers = [] self.request_method_mock.return_value = fake_response resource_path = "%s/%s" % (netscaler_driver.VIPS_RESOURCE, TESTVIP_ID) # call method under test: create_resource self.assertRaises(ncc_client.NCCException, self.testclient.remove_resource, TEST_TENANT_ID, resource_path) def test_delete_resource(self): """Asserts that a correct delete call will succeed.""" # create a valid http response with a failure status code. fake_response = requests.Response() fake_response.status_code = requests.codes.ok fake_response.headers = [] # obtain the mock object that corresponds to the call of request() self.request_method_mock.return_value = fake_response resource_path = "%s/%s" % (netscaler_driver.VIPS_RESOURCE, TESTVIP_ID) resource_url = "%s/%s" % (self.testclient.service_uri, resource_path) # call method under test: create_resource self.testclient.remove_resource(TEST_TENANT_ID, resource_path) # assert that httplib.HTTPConnection request() was called with the # expected params self.request_method_mock.assert_called_once_with( 'DELETE', url=resource_url, headers=mock.ANY, data=mock.ANY) def _get_nsclient(self): return ncc_client.NSClient(TESTURI, TEST_USERNAME, TEST_PASSWORD) def _get_testvip_httpbody_for_create(self): body = { 'name': 'vip1', 'address': '10.0.0.3', 'pool_id': 'da477c13-24cd-4c9f-8c19-757a61ef3b9d', 'protocol': 'HTTP', 'protocol_port': 80, 'admin_state_up': True, } return body def _get_testvip_httpbody_for_update(self): body = {} body['name'] = 'updated vip1' body['admin_state_up'] = False return body ././@LongLink0000000000000000000000000000015600000000000011217 Lustar 00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/drivers/netscaler/test_netscaler_driver.pyneutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/drivers/netscaler/test_netscaler_0000664000567000056710000011300212701407726035403 0ustar jenkinsjenkins00000000000000# Copyright 2014 Citrix Systems # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import mock from neutron import context from neutron import manager from neutron.plugins.common import constants from neutron_lib import exceptions from neutron_lbaas.db.loadbalancer import loadbalancer_db from neutron_lbaas.services.loadbalancer.drivers.netscaler import ncc_client from neutron_lbaas.services.loadbalancer.drivers.netscaler \ import netscaler_driver from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancer LBAAS_DRIVER_CLASS = ('neutron_lbaas.services.loadbalancer.drivers' '.netscaler.netscaler_driver' '.NetScalerPluginDriver') NCC_CLIENT_CLASS = ('neutron_lbaas.services.loadbalancer.drivers' '.netscaler.ncc_client' '.NSClient') LBAAS_PROVIDER_NAME = 'netscaler' LBAAS_PROVIDER = ('LOADBALANCER:%s:%s:default' % (LBAAS_PROVIDER_NAME, LBAAS_DRIVER_CLASS)) #Test data TESTVIP_ID = '52ab5d71-6bb2-457f-8414-22a4ba55efec' TESTPOOL_ID = 'da477c13-24cd-4c9f-8c19-757a61ef3b9d' TESTMEMBER_ID = '84dea8bc-3416-4fb0-83f9-2ca6e7173bee' TESTMONITOR_ID = '9b9245a2-0413-4f15-87ef-9a41ef66048c' TESTVIP_PORT_ID = '327d9662-ade9-4c74-aaf6-c76f145c1180' TESTPOOL_PORT_ID = '132c1dbb-d3d8-45aa-96e3-71f2ea51651e' TESTPOOL_SNATIP_ADDRESS = '10.0.0.50' TESTPOOL_SNAT_PORT = { 'id': TESTPOOL_PORT_ID, 'fixed_ips': [{'ip_address': TESTPOOL_SNATIP_ADDRESS}] } TESTVIP_IP = '10.0.1.100' TESTMEMBER_IP = '10.0.0.5' class TestLoadBalancerPluginBase(test_db_loadbalancer .LoadBalancerPluginDbTestCase): def setUp(self): # mock the NSClient class (REST client) client_mock_cls = mock.patch(NCC_CLIENT_CLASS).start() #mock the REST methods of the NSClient class self.client_mock_instance = client_mock_cls.return_value self.create_resource_mock = self.client_mock_instance.create_resource self.create_resource_mock.side_effect = mock_create_resource_func self.update_resource_mock = self.client_mock_instance.update_resource self.update_resource_mock.side_effect = mock_update_resource_func self.retrieve_resource_mock = (self.client_mock_instance .retrieve_resource) self.retrieve_resource_mock.side_effect = mock_retrieve_resource_func self.remove_resource_mock = self.client_mock_instance.remove_resource self.remove_resource_mock.side_effect = mock_remove_resource_func super(TestLoadBalancerPluginBase, self).setUp( lbaas_provider=LBAAS_PROVIDER) loaded_plugins = manager.NeutronManager().get_service_plugins() self.plugin_instance = loaded_plugins[constants.LOADBALANCER] class TestNetScalerPluginDriver(TestLoadBalancerPluginBase): """Unit tests for the NetScaler LBaaS driver module.""" def setUp(self): mock.patch.object(netscaler_driver, 'LOG').start() super(TestNetScalerPluginDriver, self).setUp() self.plugin_instance.drivers[LBAAS_PROVIDER_NAME] = ( netscaler_driver.NetScalerPluginDriver(self.plugin_instance)) self.driver = self.plugin_instance.drivers[LBAAS_PROVIDER_NAME] self.context = context.get_admin_context() def test_create_vip(self): with contextlib.nested( self.subnet(), mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet') ) as (subnet, mock_get_subnet): mock_get_subnet.return_value = subnet['subnet'] with self.pool(provider=LBAAS_PROVIDER_NAME) as pool: testvip = self._build_testvip_contents(subnet['subnet'], pool['pool']) expectedvip = self._build_expectedvip_contents( testvip, subnet['subnet']) # mock the LBaaS plugin update_status(). self._mock_update_status() # reset the create_resource() mock self.create_resource_mock.reset_mock() # execute the method under test self.driver.create_vip(self.context, testvip) # First, assert that create_resource was called once # with expected params. self.create_resource_mock.assert_called_once_with( None, netscaler_driver.VIPS_RESOURCE, netscaler_driver.VIP_RESOURCE, expectedvip) #Finally, assert that the vip object is now ACTIVE self.mock_update_status_obj.assert_called_once_with( mock.ANY, loadbalancer_db.Vip, expectedvip['id'], constants.ACTIVE) def test_create_vip_without_connection(self): with contextlib.nested( self.subnet(), mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet') ) as (subnet, mock_get_subnet): mock_get_subnet.return_value = subnet['subnet'] with self.pool(provider=LBAAS_PROVIDER_NAME) as pool: testvip = self._build_testvip_contents(subnet['subnet'], pool['pool']) expectedvip = self._build_expectedvip_contents( testvip, subnet['subnet']) errorcode = ncc_client.NCCException.CONNECTION_ERROR self.create_resource_mock.side_effect = ( ncc_client.NCCException(errorcode)) # mock the plugin's update_status() self._mock_update_status() # reset the create_resource() mock self.create_resource_mock.reset_mock() # execute the method under test. self.driver.create_vip(self.context, testvip) # First, assert that update_resource was called once # with expected params. self.create_resource_mock.assert_called_once_with( None, netscaler_driver.VIPS_RESOURCE, netscaler_driver.VIP_RESOURCE, expectedvip) #Finally, assert that the vip object is in ERROR state self.mock_update_status_obj.assert_called_once_with( mock.ANY, loadbalancer_db.Vip, testvip['id'], constants.ERROR) def test_update_vip(self): with contextlib.nested( self.subnet(), mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet') ) as (subnet, mock_get_subnet): mock_get_subnet.return_value = subnet['subnet'] with self.pool(provider=LBAAS_PROVIDER_NAME) as pool: with self.vip(pool=pool, subnet=subnet) as vip: updated_vip = self._build_updated_testvip_contents( vip['vip'], subnet['subnet'], pool['pool']) expectedvip = self._build_updated_expectedvip_contents( updated_vip, subnet['subnet'], pool['pool']) # mock the plugin's update_status() self._mock_update_status() # reset the update_resource() mock self.update_resource_mock.reset_mock() # execute the method under test self.driver.update_vip(self.context, updated_vip, updated_vip) vip_resource_path = "%s/%s" % ( (netscaler_driver.VIPS_RESOURCE, vip['vip']['id'])) # First, assert that update_resource was called once # with expected params. (self.update_resource_mock .assert_called_once_with( None, vip_resource_path, netscaler_driver.VIP_RESOURCE, expectedvip)) #Finally, assert that the vip object is now ACTIVE self.mock_update_status_obj.assert_called_once_with( mock.ANY, loadbalancer_db.Vip, vip['vip']['id'], constants.ACTIVE) def test_delete_vip(self): with contextlib.nested( self.subnet(), mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet') ) as (subnet, mock_get_subnet): mock_get_subnet.return_value = subnet['subnet'] with self.pool(provider=LBAAS_PROVIDER_NAME) as pool: with contextlib.nested( self.vip(pool=pool, subnet=subnet), mock.patch.object(self.driver.plugin, '_delete_db_vip') ) as (vip, mock_delete_db_vip): mock_delete_db_vip.return_value = None #reset the remove_resource() mock self.remove_resource_mock.reset_mock() # execute the method under test self.driver.delete_vip(self.context, vip['vip']) vip_resource_path = "%s/%s" % ( (netscaler_driver.VIPS_RESOURCE, vip['vip']['id'])) # Assert that remove_resource() was called once # with expected params. (self.remove_resource_mock .assert_called_once_with(None, vip_resource_path)) def test_create_pool(self): with contextlib.nested( self.subnet(), mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet'), mock.patch.object(self.driver.plugin._core_plugin, 'get_ports'), mock.patch.object(self.driver.plugin._core_plugin, 'create_port') ) as (subnet, mock_get_subnet, mock_get_ports, mock_create_port): mock_get_subnet.return_value = subnet['subnet'] mock_get_ports.return_value = None mock_create_port.return_value = TESTPOOL_SNAT_PORT testpool = self._build_testpool_contents(subnet['subnet']) expectedpool = self._build_expectedpool_contents(testpool, subnet['subnet']) #reset the create_resource() mock self.create_resource_mock.reset_mock() # mock the plugin's update_status() self._mock_update_status() # execute the method under test self.driver.create_pool(self.context, testpool) # First, assert that create_resource was called once # with expected params. (self.create_resource_mock .assert_called_once_with(None, netscaler_driver.POOLS_RESOURCE, netscaler_driver.POOL_RESOURCE, expectedpool)) #Finally, assert that the pool object is now ACTIVE self.mock_update_status_obj.assert_called_once_with( mock.ANY, loadbalancer_db.Pool, expectedpool['id'], constants.ACTIVE) def test_create_pool_with_error(self): with contextlib.nested( self.subnet(), mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet'), mock.patch.object(self.driver.plugin._core_plugin, 'get_ports'), mock.patch.object(self.driver.plugin._core_plugin, 'create_port') ) as (subnet, mock_get_subnet, mock_get_ports, mock_create_port): mock_get_subnet.return_value = subnet['subnet'] mock_get_ports.return_value = None mock_create_port.return_value = TESTPOOL_SNAT_PORT errorcode = ncc_client.NCCException.CONNECTION_ERROR self.create_resource_mock.side_effect = (ncc_client .NCCException(errorcode)) testpool = self._build_testpool_contents(subnet['subnet']) expectedpool = self._build_expectedpool_contents(testpool, subnet['subnet']) # mock the plugin's update_status() self._mock_update_status() #reset the create_resource() mock self.create_resource_mock.reset_mock() # execute the method under test. self.driver.create_pool(self.context, testpool) # Also assert that create_resource was called once # with expected params. (self.create_resource_mock .assert_called_once_with(None, netscaler_driver.POOLS_RESOURCE, netscaler_driver.POOL_RESOURCE, expectedpool)) #Finally, assert that the pool object is in ERROR state self.mock_update_status_obj.assert_called_once_with( mock.ANY, loadbalancer_db.Pool, expectedpool['id'], constants.ERROR) def test_create_pool_with_snatportcreate_failure(self): with contextlib.nested( self.subnet(), mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet'), mock.patch.object(self.driver.plugin._core_plugin, 'get_ports'), mock.patch.object(self.driver.plugin._core_plugin, 'create_port') ) as (subnet, mock_get_subnet, mock_get_ports, mock_create_port): mock_get_subnet.return_value = subnet['subnet'] mock_get_ports.return_value = None mock_create_port.side_effect = exceptions.NeutronException() testpool = self._build_testpool_contents(subnet['subnet']) #reset the create_resource() mock self.create_resource_mock.reset_mock() # execute the method under test. self.assertRaises(exceptions.NeutronException, self.driver.create_pool, self.context, testpool) def test_update_pool(self): with contextlib.nested( self.subnet(), mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet') ) as (subnet, mock_get_subnet): mock_get_subnet.return_value = subnet['subnet'] with self.pool(provider=LBAAS_PROVIDER_NAME) as pool: updated_pool = self._build_updated_testpool_contents( pool['pool'], subnet['subnet']) expectedpool = self._build_updated_expectedpool_contents( updated_pool, subnet['subnet']) # mock the plugin's update_status() self._mock_update_status() # reset the update_resource() mock self.update_resource_mock.reset_mock() # execute the method under test. self.driver.update_pool(self.context, pool['pool'], updated_pool) pool_resource_path = "%s/%s" % ( (netscaler_driver.POOLS_RESOURCE, pool['pool']['id'])) # First, assert that update_resource was called once # with expected params. (self.update_resource_mock .assert_called_once_with(None, pool_resource_path, netscaler_driver.POOL_RESOURCE, expectedpool)) #Finally, assert that the pool object is now ACTIVE self.mock_update_status_obj.assert_called_once_with( mock.ANY, loadbalancer_db.Pool, pool['pool']['id'], constants.ACTIVE) def test_delete_pool(self): with contextlib.nested( self.subnet(), mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet') ) as (subnet, mock_get_subnet): mock_get_subnet.return_value = subnet['subnet'] with contextlib.nested( self.pool(provider=LBAAS_PROVIDER_NAME), mock.patch.object(self.driver.plugin._core_plugin, 'delete_port'), mock.patch.object(self.driver.plugin._core_plugin, 'get_ports'), mock.patch.object(self.driver.plugin, 'get_pools'), mock.patch.object(self.driver.plugin, '_delete_db_pool') ) as (pool, mock_delete_port, mock_get_ports, mock_get_pools, mock_delete_db_pool): mock_delete_port.return_value = None mock_get_ports.return_value = [{'id': TESTPOOL_PORT_ID}] mock_get_pools.return_value = [] mock_delete_db_pool.return_value = None #reset the remove_resource() mock self.remove_resource_mock.reset_mock() # execute the method under test. self.driver.delete_pool(self.context, pool['pool']) pool_resource_path = "%s/%s" % ( (netscaler_driver.POOLS_RESOURCE, pool['pool']['id'])) # Assert that delete_resource was called # once with expected params. (self.remove_resource_mock .assert_called_once_with(None, pool_resource_path)) def test_create_member(self): with contextlib.nested( self.subnet(), mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet') ) as (subnet, mock_get_subnet): mock_get_subnet.return_value = subnet['subnet'] with self.pool(provider=LBAAS_PROVIDER_NAME) as pool: testmember = self._build_testmember_contents(pool['pool']) expectedmember = self._build_expectedmember_contents( testmember) # mock the plugin's update_status() self._mock_update_status() #reset the create_resource() mock self.create_resource_mock.reset_mock() # execute the method under test. self.driver.create_member(self.context, testmember) # First, assert that create_resource was called once # with expected params. (self.create_resource_mock .assert_called_once_with( None, netscaler_driver.POOLMEMBERS_RESOURCE, netscaler_driver.POOLMEMBER_RESOURCE, expectedmember)) #Finally, assert that the member object is now ACTIVE self.mock_update_status_obj.assert_called_once_with( mock.ANY, loadbalancer_db.Member, expectedmember['id'], constants.ACTIVE) def test_update_member(self): with contextlib.nested( self.subnet(), mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet') ) as (subnet, mock_get_subnet): mock_get_subnet.return_value = subnet['subnet'] with self.pool(provider=LBAAS_PROVIDER_NAME) as pool: with self.member(pool_id=pool['pool']['id']) as member: updatedmember = (self._build_updated_testmember_contents( member['member'])) expectedmember = (self ._build_updated_expectedmember_contents( updatedmember)) # mock the plugin's update_status() self._mock_update_status() # reset the update_resource() mock self.update_resource_mock.reset_mock() # execute the method under test self.driver.update_member(self.context, member['member'], updatedmember) member_resource_path = "%s/%s" % ( (netscaler_driver.POOLMEMBERS_RESOURCE, member['member']['id'])) # First, assert that update_resource was called once # with expected params. (self.update_resource_mock .assert_called_once_with( None, member_resource_path, netscaler_driver.POOLMEMBER_RESOURCE, expectedmember)) #Finally, assert that the member object is now ACTIVE self.mock_update_status_obj.assert_called_once_with( mock.ANY, loadbalancer_db.Member, member['member']['id'], constants.ACTIVE) def test_delete_member(self): with contextlib.nested( self.subnet(), mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet') ) as (subnet, mock_get_subnet): mock_get_subnet.return_value = subnet['subnet'] with self.pool(provider=LBAAS_PROVIDER_NAME) as pool: with contextlib.nested( self.member(pool_id=pool['pool']['id']), mock.patch.object(self.driver.plugin, '_delete_db_member') ) as (member, mock_delete_db_member): mock_delete_db_member.return_value = None # reset the remove_resource() mock self.remove_resource_mock.reset_mock() # execute the method under test self.driver.delete_member(self.context, member['member']) member_resource_path = "%s/%s" % ( (netscaler_driver.POOLMEMBERS_RESOURCE, member['member']['id'])) # Assert that delete_resource was called once # with expected params. (self.remove_resource_mock .assert_called_once_with(None, member_resource_path)) def test_create_pool_health_monitor(self): with contextlib.nested( self.subnet(), mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet') ) as (subnet, mock_get_subnet): mock_get_subnet.return_value = subnet['subnet'] with self.pool(provider=LBAAS_PROVIDER_NAME) as pool: testhealthmonitor = self._build_testhealthmonitor_contents( pool['pool']) expectedhealthmonitor = ( self._build_expectedhealthmonitor_contents( testhealthmonitor)) with mock.patch.object(self.driver.plugin, 'update_pool_health_monitor') as mhm: # reset the create_resource() mock self.create_resource_mock.reset_mock() # execute the method under test. self.driver.create_pool_health_monitor(self.context, testhealthmonitor, pool['pool']['id']) # First, assert that create_resource was called once # with expected params. resource_path = "%s/%s/%s" % ( netscaler_driver.POOLS_RESOURCE, pool['pool']['id'], netscaler_driver.MONITORS_RESOURCE) (self.create_resource_mock .assert_called_once_with( None, resource_path, netscaler_driver.MONITOR_RESOURCE, expectedhealthmonitor)) # Finally, assert that the healthmonitor object is # now ACTIVE. (mhm.assert_called_once_with( mock.ANY, expectedhealthmonitor['id'], pool['pool']['id'], constants.ACTIVE, "")) def test_update_pool_health_monitor(self): with contextlib.nested( self.subnet(), mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet') ) as (subnet, mock_get_subnet): mock_get_subnet.return_value = subnet['subnet'] with self.pool(provider=LBAAS_PROVIDER_NAME) as pool: with self.health_monitor( pool_id=pool['pool']['id'] ) as (health_monitor): updatedhealthmonitor = ( self._build_updated_testhealthmonitor_contents( health_monitor['health_monitor'])) expectedhealthmonitor = ( self._build_updated_expectedhealthmonitor_contents( updatedhealthmonitor)) with mock.patch.object(self.driver.plugin, 'update_pool_health_monitor')as mhm: # reset the update_resource() mock self.update_resource_mock.reset_mock() # execute the method under test. self.driver.update_pool_health_monitor( self.context, health_monitor['health_monitor'], updatedhealthmonitor, pool['pool']['id']) monitor_resource_path = "%s/%s" % ( (netscaler_driver.MONITORS_RESOURCE, health_monitor['health_monitor']['id'])) # First, assert that update_resource was called once # with expected params. self.update_resource_mock.assert_called_once_with( None, monitor_resource_path, netscaler_driver.MONITOR_RESOURCE, expectedhealthmonitor) #Finally, assert that the member object is now ACTIVE (mhm.assert_called_once_with( mock.ANY, health_monitor['health_monitor']['id'], pool['pool']['id'], constants.ACTIVE, "")) def test_delete_pool_health_monitor(self): with contextlib.nested( self.subnet(), mock.patch.object(self.driver.plugin._core_plugin, 'get_subnet') ) as (subnet, mock_get_subnet): mock_get_subnet.return_value = subnet['subnet'] with self.pool(provider=LBAAS_PROVIDER_NAME) as pool: with contextlib.nested( self.health_monitor(pool_id=pool['pool']['id']), mock.patch.object(self.driver.plugin, '_delete_db_pool_health_monitor') ) as (health_monitor, mock_delete_db_monitor): mock_delete_db_monitor.return_value = None # reset the remove_resource() mock self.remove_resource_mock.reset_mock() # execute the method under test. self.driver.delete_pool_health_monitor( self.context, health_monitor['health_monitor'], pool['pool']['id']) monitor_resource_path = "%s/%s/%s/%s" % ( netscaler_driver.POOLS_RESOURCE, pool['pool']['id'], netscaler_driver.MONITORS_RESOURCE, health_monitor['health_monitor']['id']) # Assert that delete_resource was called once # with expected params. self.remove_resource_mock.assert_called_once_with( None, monitor_resource_path) def _build_testvip_contents(self, subnet, pool): vip_obj = dict(id=TESTVIP_ID, name='testvip', description='a test vip', tenant_id=self._tenant_id, subnet_id=subnet['id'], address=TESTVIP_IP, port_id=TESTVIP_PORT_ID, pool_id=pool['id'], protocol='HTTP', protocol_port=80, connection_limit=1000, admin_state_up=True, status='PENDING_CREATE', status_description='') return vip_obj def _build_expectedvip_contents(self, testvip, subnet): expectedvip = dict(id=testvip['id'], name=testvip['name'], description=testvip['description'], tenant_id=testvip['tenant_id'], subnet_id=testvip['subnet_id'], address=testvip['address'], network_id=subnet['network_id'], port_id=testvip['port_id'], pool_id=testvip['pool_id'], protocol=testvip['protocol'], protocol_port=testvip['protocol_port'], connection_limit=testvip['connection_limit'], admin_state_up=testvip['admin_state_up']) return expectedvip def _build_updated_testvip_contents(self, testvip, subnet, pool): #update some updateable fields of the vip testvip['name'] = 'udpated testvip' testvip['description'] = 'An updated version of test vip' testvip['connection_limit'] = 2000 return testvip def _build_updated_expectedvip_contents(self, testvip, subnet, pool): expectedvip = dict(name=testvip['name'], description=testvip['description'], connection_limit=testvip['connection_limit'], admin_state_up=testvip['admin_state_up'], pool_id=testvip['pool_id']) return expectedvip def _build_testpool_contents(self, subnet): pool_obj = dict(id=TESTPOOL_ID, name='testpool', description='a test pool', tenant_id=self._tenant_id, subnet_id=subnet['id'], protocol='HTTP', vip_id=None, admin_state_up=True, lb_method='ROUND_ROBIN', status='PENDING_CREATE', status_description='', members=[], health_monitors=[], health_monitors_status=None, provider=LBAAS_PROVIDER_NAME) return pool_obj def _build_expectedpool_contents(self, testpool, subnet): expectedpool = dict(id=testpool['id'], name=testpool['name'], description=testpool['description'], tenant_id=testpool['tenant_id'], subnet_id=testpool['subnet_id'], network_id=subnet['network_id'], protocol=testpool['protocol'], vip_id=testpool['vip_id'], lb_method=testpool['lb_method'], snat_ip=TESTPOOL_SNATIP_ADDRESS, port_id=TESTPOOL_PORT_ID, admin_state_up=testpool['admin_state_up']) return expectedpool def _build_updated_testpool_contents(self, testpool, subnet): updated_pool = dict(testpool.items()) updated_pool['name'] = 'udpated testpool' updated_pool['description'] = 'An updated version of test pool' updated_pool['lb_method'] = 'LEAST_CONNECTIONS' updated_pool['admin_state_up'] = True updated_pool['provider'] = LBAAS_PROVIDER_NAME updated_pool['status'] = 'PENDING_UPDATE' updated_pool['status_description'] = '' updated_pool['members'] = [] updated_pool["health_monitors"] = [] updated_pool["health_monitors_status"] = None return updated_pool def _build_updated_expectedpool_contents(self, testpool, subnet): expectedpool = dict(name=testpool['name'], description=testpool['description'], lb_method=testpool['lb_method'], admin_state_up=testpool['admin_state_up']) return expectedpool def _build_testmember_contents(self, pool): member_obj = dict( id=TESTMEMBER_ID, tenant_id=self._tenant_id, pool_id=pool['id'], address=TESTMEMBER_IP, protocol_port=8080, weight=2, admin_state_up=True, status='PENDING_CREATE', status_description='') return member_obj def _build_expectedmember_contents(self, testmember): expectedmember = dict( id=testmember['id'], tenant_id=testmember['tenant_id'], pool_id=testmember['pool_id'], address=testmember['address'], protocol_port=testmember['protocol_port'], weight=testmember['weight'], admin_state_up=testmember['admin_state_up']) return expectedmember def _build_updated_testmember_contents(self, testmember): updated_member = dict(testmember.items()) updated_member.update( weight=3, admin_state_up=True, status='PENDING_CREATE', status_description='' ) return updated_member def _build_updated_expectedmember_contents(self, testmember): expectedmember = dict(weight=testmember['weight'], pool_id=testmember['pool_id'], admin_state_up=testmember['admin_state_up']) return expectedmember def _build_testhealthmonitor_contents(self, pool): monitor_obj = dict( id=TESTMONITOR_ID, tenant_id=self._tenant_id, type='TCP', delay=10, timeout=5, max_retries=3, admin_state_up=True, pools=[]) pool_obj = dict(status='PENDING_CREATE', status_description=None, pool_id=pool['id']) monitor_obj['pools'].append(pool_obj) return monitor_obj def _build_expectedhealthmonitor_contents(self, testhealthmonitor): expectedmonitor = dict(id=testhealthmonitor['id'], tenant_id=testhealthmonitor['tenant_id'], type=testhealthmonitor['type'], delay=testhealthmonitor['delay'], timeout=testhealthmonitor['timeout'], max_retries=testhealthmonitor['max_retries'], admin_state_up=( testhealthmonitor['admin_state_up'])) return expectedmonitor def _build_updated_testhealthmonitor_contents(self, testmonitor): updated_monitor = dict(testmonitor.items()) updated_monitor.update( delay=30, timeout=3, max_retries=5, admin_state_up=True ) return updated_monitor def _build_updated_expectedhealthmonitor_contents(self, testmonitor): expectedmonitor = dict(delay=testmonitor['delay'], timeout=testmonitor['timeout'], max_retries=testmonitor['max_retries'], admin_state_up=testmonitor['admin_state_up']) return expectedmonitor def _mock_update_status(self): #patch the plugin's update_status() method with a mock object self.mock_update_status_patcher = mock.patch.object( self.driver.plugin, 'update_status') self.mock_update_status_obj = self.mock_update_status_patcher.start() def mock_create_resource_func(*args, **kwargs): return 201, {} def mock_update_resource_func(*args, **kwargs): return 202, {} def mock_retrieve_resource_func(*args, **kwargs): return 200, {} def mock_remove_resource_func(*args, **kwargs): return 200, {} neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/drivers/test_agent_driver_base.py0000664000567000056710000007724212701407726035415 0ustar jenkinsjenkins00000000000000# Copyright 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import mock from neutron import context from neutron.db import servicetype_db as st_db from neutron.extensions import portbindings from neutron import manager from neutron.plugins.common import constants from neutron.tests.unit import testlib_api from oslo_utils import uuidutils import six from six import moves from webob import exc from neutron_lbaas.db.loadbalancer import loadbalancer_db as ldb from neutron_lbaas.extensions import loadbalancer from neutron_lbaas.services.loadbalancer.drivers.common \ import agent_driver_base from neutron_lbaas.tests import base from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancer class TestLoadBalancerPluginBase( test_db_loadbalancer.LoadBalancerPluginDbTestCase): def setUp(self): def reset_device_driver(): agent_driver_base.AgentDriverBase.device_driver = None self.addCleanup(reset_device_driver) self.mock_importer = mock.patch.object( agent_driver_base, 'importutils').start() # needed to reload provider configuration st_db.ServiceTypeManager._instance = None agent_driver_base.AgentDriverBase.device_driver = 'dummy' super(TestLoadBalancerPluginBase, self).setUp( lbaas_provider=('LOADBALANCER:lbaas:neutron_lbaas.services.' 'loadbalancer.drivers.common.agent_driver_base.' 'AgentDriverBase:default')) # we need access to loaded plugins to modify models loaded_plugins = manager.NeutronManager().get_service_plugins() self.plugin_instance = loaded_plugins[constants.LOADBALANCER] class TestLoadBalancerCallbacks(TestLoadBalancerPluginBase): def setUp(self): super(TestLoadBalancerCallbacks, self).setUp() self.callbacks = agent_driver_base.LoadBalancerCallbacks( self.plugin_instance ) get_lbaas_agents_patcher = mock.patch( 'neutron_lbaas.services.loadbalancer.agent_scheduler' '.LbaasAgentSchedulerDbMixin.get_lbaas_agents') get_lbaas_agents_patcher.start() def test_get_ready_devices(self): with self.vip() as vip: with mock.patch('neutron_lbaas.services.loadbalancer.' 'agent_scheduler.LbaasAgentSchedulerDbMixin.' 'list_pools_on_lbaas_agent') as mock_agent_pools: mock_agent_pools.return_value = { 'pools': [{'id': vip['vip']['pool_id']}]} ready = self.callbacks.get_ready_devices( context.get_admin_context(), ) self.assertEqual([vip['vip']['pool_id']], ready) def test_get_ready_devices_multiple_vips_and_pools(self): ctx = context.get_admin_context() # add 3 pools and 2 vips directly to DB # to create 2 "ready" devices and one pool without vip pools = [] for i in moves.range(3): pools.append(ldb.Pool(id=uuidutils.generate_uuid(), subnet_id=self._subnet_id, protocol="HTTP", lb_method="ROUND_ROBIN", status=constants.ACTIVE, admin_state_up=True)) ctx.session.add(pools[i]) vip0 = ldb.Vip(id=uuidutils.generate_uuid(), protocol_port=80, protocol="HTTP", pool_id=pools[0].id, status=constants.ACTIVE, admin_state_up=True, connection_limit=3) ctx.session.add(vip0) pools[0].vip_id = vip0.id vip1 = ldb.Vip(id=uuidutils.generate_uuid(), protocol_port=80, protocol="HTTP", pool_id=pools[1].id, status=constants.ACTIVE, admin_state_up=True, connection_limit=3) ctx.session.add(vip1) pools[1].vip_id = vip1.id ctx.session.flush() self.assertEqual(3, ctx.session.query(ldb.Pool).count()) self.assertEqual(2, ctx.session.query(ldb.Vip).count()) with mock.patch('neutron_lbaas.services.loadbalancer.agent_scheduler' '.LbaasAgentSchedulerDbMixin' '.list_pools_on_lbaas_agent') as mock_agent_pools: mock_agent_pools.return_value = {'pools': [{'id': pools[0].id}, {'id': pools[1].id}, {'id': pools[2].id}]} ready = self.callbacks.get_ready_devices(ctx) self.assertEqual(3, len(ready)) self.assertIn(pools[0].id, ready) self.assertIn(pools[1].id, ready) self.assertIn(pools[2].id, ready) # cleanup ctx.session.query(ldb.Pool).delete() ctx.session.query(ldb.Vip).delete() def test_get_ready_devices_inactive_vip(self): with self.vip() as vip: # set the vip inactive need to use plugin directly since # status is not tenant mutable self.plugin_instance.update_vip( context.get_admin_context(), vip['vip']['id'], {'vip': {'status': constants.INACTIVE}} ) with mock.patch('neutron_lbaas.services.loadbalancer.' 'agent_scheduler.LbaasAgentSchedulerDbMixin.' 'list_pools_on_lbaas_agent') as mock_agent_pools: mock_agent_pools.return_value = { 'pools': [{'id': vip['vip']['pool_id']}]} ready = self.callbacks.get_ready_devices( context.get_admin_context(), ) self.assertEqual([vip['vip']['pool_id']], ready) def test_get_ready_devices_inactive_pool(self): with self.vip() as vip: # set the pool inactive need to use plugin directly since # status is not tenant mutable self.plugin_instance.update_pool( context.get_admin_context(), vip['vip']['pool_id'], {'pool': {'status': constants.INACTIVE}} ) with mock.patch('neutron_lbaas.services.loadbalancer.' 'agent_scheduler.LbaasAgentSchedulerDbMixin.' 'list_pools_on_lbaas_agent') as mock_agent_pools: mock_agent_pools.return_value = { 'pools': [{'id': vip['vip']['pool_id']}]} ready = self.callbacks.get_ready_devices( context.get_admin_context(), ) self.assertFalse(ready) def test_get_logical_device_non_active(self): with self.pool() as pool: ctx = context.get_admin_context() for status in ('INACTIVE', 'PENDING_CREATE', 'PENDING_UPDATE'): self.plugin_instance.update_status( ctx, ldb.Pool, pool['pool']['id'], status) pool['pool']['status'] = status expected = { 'pool': pool['pool'], 'members': [], 'healthmonitors': [], 'driver': 'dummy' } logical_config = self.callbacks.get_logical_device( ctx, pool['pool']['id'] ) self.assertEqual(expected, logical_config) def test_get_logical_device_active(self): with self.pool() as pool: with self.vip(pool=pool) as vip: with self.member(pool_id=vip['vip']['pool_id']) as member: ctx = context.get_admin_context() # activate objects self.plugin_instance.update_status( ctx, ldb.Pool, pool['pool']['id'], 'ACTIVE') self.plugin_instance.update_status( ctx, ldb.Member, member['member']['id'], 'ACTIVE') self.plugin_instance.update_status( ctx, ldb.Vip, vip['vip']['id'], 'ACTIVE') # build the expected port = self.plugin_instance._core_plugin.get_port( ctx, vip['vip']['port_id'] ) subnet = self.plugin_instance._core_plugin.get_subnet( ctx, vip['vip']['subnet_id'] ) port['fixed_ips'][0]['subnet'] = subnet # reload pool to add members and vip pool = self.plugin_instance.get_pool( ctx, pool['pool']['id'] ) pool['status'] = constants.ACTIVE vip['vip']['status'] = constants.ACTIVE vip['vip']['port'] = port member['member']['status'] = constants.ACTIVE expected = { 'pool': pool, 'vip': vip['vip'], 'members': [member['member']], 'healthmonitors': [], 'driver': 'dummy' } logical_config = self.callbacks.get_logical_device( ctx, pool['id'] ) self.assertEqual(expected, logical_config) def test_get_logical_device_inactive_member(self): with self.pool() as pool: with self.vip(pool=pool) as vip: with self.member(pool_id=vip['vip']['pool_id']) as member: ctx = context.get_admin_context() self.plugin_instance.update_status(ctx, ldb.Pool, pool['pool']['id'], 'ACTIVE') self.plugin_instance.update_status(ctx, ldb.Vip, vip['vip']['id'], 'ACTIVE') self.plugin_instance.update_status(ctx, ldb.Member, member['member']['id'], 'INACTIVE') logical_config = self.callbacks.get_logical_device( ctx, pool['pool']['id']) member['member']['status'] = constants.INACTIVE self.assertEqual([member['member']], logical_config['members']) def test_get_logical_device_pending_create_member(self): with self.pool() as pool: with self.vip(pool=pool) as vip: with self.member(pool_id=vip['vip']['pool_id']) as member: ctx = context.get_admin_context() self.plugin_instance.update_status(ctx, ldb.Pool, pool['pool']['id'], 'ACTIVE') self.plugin_instance.update_status(ctx, ldb.Vip, vip['vip']['id'], 'ACTIVE') member = self.plugin_instance.get_member( ctx, member['member']['id']) self.assertEqual('PENDING_CREATE', member['status']) logical_config = self.callbacks.get_logical_device( ctx, pool['pool']['id']) self.assertEqual([member], logical_config['members']) def test_get_logical_device_pending_create_health_monitor(self): with self.health_monitor() as monitor: with self.pool() as pool: with self.vip(pool=pool) as vip: ctx = context.get_admin_context() self.plugin_instance.update_status(ctx, ldb.Pool, pool['pool']['id'], 'ACTIVE') self.plugin_instance.update_status(ctx, ldb.Vip, vip['vip']['id'], 'ACTIVE') self.plugin_instance.create_pool_health_monitor( ctx, monitor, pool['pool']['id']) pool = self.plugin_instance.get_pool( ctx, pool['pool']['id']) monitor = self.plugin_instance.get_health_monitor( ctx, monitor['health_monitor']['id']) self.assertEqual( 'PENDING_CREATE', pool['health_monitors_status'][0]['status']) logical_config = self.callbacks.get_logical_device( ctx, pool['id']) self.assertEqual([monitor], logical_config['healthmonitors']) def _update_port_test_helper(self, expected, func, **kwargs): core = self.plugin_instance._core_plugin with self.pool() as pool: with self.vip(pool=pool) as vip: with self.member(pool_id=vip['vip']['pool_id']): ctx = context.get_admin_context() func(ctx, port_id=vip['vip']['port_id'], **kwargs) db_port = core.get_port(ctx, vip['vip']['port_id']) for k, v in six.iteritems(expected): self.assertEqual(v, db_port[k]) def test_plug_vip_port(self): exp = { 'device_owner': 'neutron:' + constants.LOADBALANCER, 'admin_state_up': True } self._update_port_test_helper( exp, self.callbacks.plug_vip_port, host='host' ) def test_plug_vip_port_mock_with_host(self): exp = { 'device_owner': 'neutron:' + constants.LOADBALANCER, 'admin_state_up': True, portbindings.HOST_ID: 'host' } with mock.patch.object( self.plugin._core_plugin, 'update_port') as mock_update_port: with self.pool() as pool: with self.vip(pool=pool) as vip: ctx = context.get_admin_context() self.callbacks.plug_vip_port( ctx, port_id=vip['vip']['port_id'], host='host') mock_update_port.assert_called_once_with( ctx, vip['vip']['port_id'], {'port': testlib_api.SubDictMatch(exp)}) def test_unplug_vip_port(self): exp = { 'device_owner': '', 'device_id': '', 'admin_state_up': False } self._update_port_test_helper( exp, self.callbacks.unplug_vip_port, host='host' ) def test_pool_deployed(self): with self.pool() as pool: with self.vip(pool=pool) as vip: with self.member(pool_id=vip['vip']['pool_id']) as member: ctx = context.get_admin_context() p = self.plugin_instance.get_pool(ctx, pool['pool']['id']) self.assertEqual('PENDING_CREATE', p['status']) v = self.plugin_instance.get_vip(ctx, vip['vip']['id']) self.assertEqual('PENDING_CREATE', v['status']) m = self.plugin_instance.get_member( ctx, member['member']['id']) self.assertEqual('PENDING_CREATE', m['status']) self.callbacks.pool_deployed(ctx, pool['pool']['id']) p = self.plugin_instance.get_pool(ctx, pool['pool']['id']) self.assertEqual('ACTIVE', p['status']) v = self.plugin_instance.get_vip(ctx, vip['vip']['id']) self.assertEqual('ACTIVE', v['status']) m = self.plugin_instance.get_member( ctx, member['member']['id']) self.assertEqual('ACTIVE', m['status']) def test_update_status_pool(self): with self.pool() as pool: pool_id = pool['pool']['id'] ctx = context.get_admin_context() p = self.plugin_instance.get_pool(ctx, pool_id) self.assertEqual('PENDING_CREATE', p['status']) self.callbacks.update_status(ctx, 'pool', pool_id, 'ACTIVE') p = self.plugin_instance.get_pool(ctx, pool_id) self.assertEqual('ACTIVE', p['status']) def test_update_status_pool_deleted_already(self): with mock.patch.object(agent_driver_base, 'LOG') as mock_log: pool_id = 'deleted_pool' ctx = context.get_admin_context() self.assertRaises(loadbalancer.PoolNotFound, self.plugin_instance.get_pool, ctx, pool_id) self.callbacks.update_status(ctx, 'pool', pool_id, 'ACTIVE') self.assertTrue(mock_log.warning.called) def test_update_status_health_monitor(self): with contextlib.nested( self.health_monitor(), self.pool() ) as (hm, pool): pool_id = pool['pool']['id'] ctx = context.get_admin_context() self.plugin_instance.create_pool_health_monitor(ctx, hm, pool_id) hm_id = hm['health_monitor']['id'] h = self.plugin_instance.get_pool_health_monitor(ctx, hm_id, pool_id) self.assertEqual('PENDING_CREATE', h['status']) self.callbacks.update_status( ctx, 'health_monitor', {'monitor_id': hm_id, 'pool_id': pool_id}, 'ACTIVE') h = self.plugin_instance.get_pool_health_monitor(ctx, hm_id, pool_id) self.assertEqual('ACTIVE', h['status']) class TestLoadBalancerAgentApi(base.BaseTestCase): def setUp(self): super(TestLoadBalancerAgentApi, self).setUp() self.api = agent_driver_base.LoadBalancerAgentApi('topic') def test_init(self): self.assertEqual('topic', self.api.client.target.topic) def _call_test_helper(self, method_name, method_args): with contextlib.nested( mock.patch.object(self.api.client, 'cast'), mock.patch.object(self.api.client, 'prepare'), ) as ( rpc_mock, prepare_mock ): prepare_mock.return_value = self.api.client getattr(self.api, method_name)(mock.sentinel.context, host='host', **method_args) prepare_args = {'server': 'host'} prepare_mock.assert_called_once_with(**prepare_args) if method_name == 'agent_updated': method_args = {'payload': method_args} rpc_mock.assert_called_once_with(mock.sentinel.context, method_name, **method_args) def test_agent_updated(self): self._call_test_helper('agent_updated', {'admin_state_up': 'test'}) def test_create_pool(self): self._call_test_helper('create_pool', {'pool': 'test', 'driver_name': 'dummy'}) def test_update_pool(self): self._call_test_helper('update_pool', {'old_pool': 'test', 'pool': 'test'}) def test_delete_pool(self): self._call_test_helper('delete_pool', {'pool': 'test'}) def test_create_vip(self): self._call_test_helper('create_vip', {'vip': 'test'}) def test_update_vip(self): self._call_test_helper('update_vip', {'old_vip': 'test', 'vip': 'test'}) def test_delete_vip(self): self._call_test_helper('delete_vip', {'vip': 'test'}) def test_create_member(self): self._call_test_helper('create_member', {'member': 'test'}) def test_update_member(self): self._call_test_helper('update_member', {'old_member': 'test', 'member': 'test'}) def test_delete_member(self): self._call_test_helper('delete_member', {'member': 'test'}) def test_create_monitor(self): self._call_test_helper('create_pool_health_monitor', {'health_monitor': 'test', 'pool_id': 'test'}) def test_update_monitor(self): self._call_test_helper('update_pool_health_monitor', {'old_health_monitor': 'test', 'health_monitor': 'test', 'pool_id': 'test'}) def test_delete_monitor(self): self._call_test_helper('delete_pool_health_monitor', {'health_monitor': 'test', 'pool_id': 'test'}) class TestLoadBalancerPluginNotificationWrapper(TestLoadBalancerPluginBase): def setUp(self): self.log = mock.patch.object(agent_driver_base, 'LOG') api_cls = mock.patch.object(agent_driver_base, 'LoadBalancerAgentApi').start() super(TestLoadBalancerPluginNotificationWrapper, self).setUp() self.mock_api = api_cls.return_value self.mock_get_driver = mock.patch.object(self.plugin_instance, '_get_driver') self.mock_get_driver.return_value = (agent_driver_base. AgentDriverBase( self.plugin_instance )) def test_create_vip(self): with self.subnet() as subnet: with self.pool(subnet=subnet) as pool: with self.vip(pool=pool, subnet=subnet) as vip: self.mock_api.create_vip.assert_called_once_with( mock.ANY, vip['vip'], 'host' ) def test_update_vip(self): with self.subnet() as subnet: with self.pool(subnet=subnet) as pool: with self.vip(pool=pool, subnet=subnet) as vip: ctx = context.get_admin_context() old_vip = vip['vip'].copy() vip['vip'].pop('status') new_vip = self.plugin_instance.update_vip( ctx, vip['vip']['id'], vip ) self.mock_api.update_vip.assert_called_once_with( mock.ANY, old_vip, new_vip, 'host' ) self.assertEqual( constants.PENDING_UPDATE, new_vip['status'] ) def test_delete_vip(self): with self.subnet() as subnet: with self.pool(subnet=subnet) as pool: with self.vip(pool=pool, subnet=subnet, do_delete=False) as vip: ctx = context.get_admin_context() self.plugin_instance.delete_vip(ctx, vip['vip']['id']) vip['vip']['status'] = 'PENDING_DELETE' self.mock_api.delete_vip.assert_called_once_with( mock.ANY, vip['vip'], 'host' ) def test_create_pool(self): with self.pool() as pool: self.mock_api.create_pool.assert_called_once_with( mock.ANY, pool['pool'], mock.ANY, 'dummy' ) def test_update_pool_non_active(self): with self.pool() as pool: pool['pool']['status'] = 'INACTIVE' ctx = context.get_admin_context() orig_pool = pool['pool'].copy() del pool['pool']['provider'] self.plugin_instance.update_pool(ctx, pool['pool']['id'], pool) self.mock_api.delete_pool.assert_called_once_with( mock.ANY, orig_pool, 'host') def test_update_pool_no_vip_id(self): with self.pool() as pool: ctx = context.get_admin_context() orig_pool = pool['pool'].copy() del pool['pool']['provider'] updated = self.plugin_instance.update_pool( ctx, pool['pool']['id'], pool) self.mock_api.update_pool.assert_called_once_with( mock.ANY, orig_pool, updated, 'host') def test_update_pool_with_vip_id(self): with self.pool() as pool: with self.vip(pool=pool) as vip: ctx = context.get_admin_context() old_pool = pool['pool'].copy() old_pool['vip_id'] = vip['vip']['id'] del pool['pool']['provider'] updated = self.plugin_instance.update_pool( ctx, pool['pool']['id'], pool) self.mock_api.update_pool.assert_called_once_with( mock.ANY, old_pool, updated, 'host') def test_delete_pool(self): with self.pool(do_delete=False) as pool: req = self.new_delete_request('pools', pool['pool']['id']) res = req.get_response(self.ext_api) self.assertEqual(exc.HTTPNoContent.code, res.status_int) pool['pool']['status'] = 'PENDING_DELETE' self.mock_api.delete_pool.assert_called_once_with( mock.ANY, pool['pool'], 'host') def test_create_member(self): with self.pool() as pool: pool_id = pool['pool']['id'] with self.member(pool_id=pool_id) as member: self.mock_api.create_member.assert_called_once_with( mock.ANY, member['member'], 'host') def test_update_member(self): with self.pool() as pool: pool_id = pool['pool']['id'] with self.member(pool_id=pool_id) as member: ctx = context.get_admin_context() updated = self.plugin_instance.update_member( ctx, member['member']['id'], member) self.mock_api.update_member.assert_called_once_with( mock.ANY, member['member'], updated, 'host') def test_update_member_new_pool(self): with self.pool() as pool1: pool1_id = pool1['pool']['id'] with self.pool() as pool2: pool2_id = pool2['pool']['id'] with self.member(pool_id=pool1_id) as member: self.mock_api.create_member.reset_mock() ctx = context.get_admin_context() old_member = member['member'].copy() member['member']['pool_id'] = pool2_id updated = self.plugin_instance.update_member( ctx, member['member']['id'], member) self.mock_api.delete_member.assert_called_once_with( mock.ANY, old_member, 'host') self.mock_api.create_member.assert_called_once_with( mock.ANY, updated, 'host') def test_delete_member(self): with self.pool() as pool: pool_id = pool['pool']['id'] with self.member(pool_id=pool_id, do_delete=False) as member: req = self.new_delete_request('members', member['member']['id']) res = req.get_response(self.ext_api) self.assertEqual(exc.HTTPNoContent.code, res.status_int) member['member']['status'] = 'PENDING_DELETE' self.mock_api.delete_member.assert_called_once_with( mock.ANY, member['member'], 'host') def test_create_pool_health_monitor(self): with contextlib.nested( self.health_monitor(), self.pool(), ) as (hm, pool): pool_id = pool['pool']['id'] ctx = context.get_admin_context() self.plugin_instance.create_pool_health_monitor(ctx, hm, pool_id) # hm now has a ref to the pool with which it is associated hm = self.plugin.get_health_monitor( ctx, hm['health_monitor']['id']) self.mock_api.create_pool_health_monitor.assert_called_once_with( mock.ANY, hm, pool_id, 'host') def test_delete_pool_health_monitor(self): with contextlib.nested( self.pool(), self.health_monitor() ) as (pool, hm): pool_id = pool['pool']['id'] ctx = context.get_admin_context() self.plugin_instance.create_pool_health_monitor(ctx, hm, pool_id) # hm now has a ref to the pool with which it is associated hm = self.plugin.get_health_monitor( ctx, hm['health_monitor']['id']) hm['pools'][0]['status'] = 'PENDING_DELETE' self.plugin_instance.delete_pool_health_monitor( ctx, hm['id'], pool_id) self.mock_api.delete_pool_health_monitor.assert_called_once_with( mock.ANY, hm, pool_id, 'host') def test_update_health_monitor_associated_with_pool(self): with contextlib.nested( self.health_monitor(type='HTTP'), self.pool() ) as (monitor, pool): data = { 'health_monitor': { 'id': monitor['health_monitor']['id'], 'tenant_id': self._tenant_id } } req = self.new_create_request( 'pools', data, fmt=self.fmt, id=pool['pool']['id'], subresource='health_monitors') res = req.get_response(self.ext_api) self.assertEqual(exc.HTTPCreated.code, res.status_int) # hm now has a ref to the pool with which it is associated ctx = context.get_admin_context() hm = self.plugin.get_health_monitor( ctx, monitor['health_monitor']['id']) self.mock_api.create_pool_health_monitor.assert_called_once_with( mock.ANY, hm, pool['pool']['id'], 'host' ) self.mock_api.reset_mock() data = {'health_monitor': {'delay': 20, 'timeout': 20, 'max_retries': 2, 'admin_state_up': False}} updated = hm.copy() updated.update(data['health_monitor']) req = self.new_update_request("health_monitors", data, monitor['health_monitor']['id']) req.get_response(self.ext_api) self.mock_api.update_pool_health_monitor.assert_called_once_with( mock.ANY, hm, updated, pool['pool']['id'], 'host') neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/drivers/__init__.py0000664000567000056710000000000012701407726032424 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/drivers/vmware/0000775000567000056710000000000012701410110031603 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/drivers/vmware/test_edge_driver.pyneutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/drivers/vmware/test_edge_driver.p0000664000567000056710000005714612701407727035343 0ustar jenkinsjenkins00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import mock from neutron import context from neutron.plugins.common import constants from neutron import manager from neutron_lbaas.db.loadbalancer import loadbalancer_db as lb_db from neutron_lbaas.services.loadbalancer.drivers.vmware import db from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancer EDGE_PROVIDER = ('LOADBALANCER:vmwareedge:neutron_lbaas.services.' 'loadbalancer.drivers.vmware.edge_driver.' 'EdgeLoadbalancerDriver:default') HEALTHMON_ID = 'cb297614-66c9-4048-8838-7e87231569ae' POOL_ID = 'b3dfb476-6fdf-4ddd-b6bd-e86ae78dc30b' TENANT_ID = 'f9135d3a908842bd8d785816c2c90d36' SUBNET_ID = 'c8924d77-ff57-406f-a13c-a8c5def01fc9' VIP_ID = 'f6393b95-34b0-4299-9001-cbc21e32bf03' VIP_PORT_ID = '49c547e3-6775-42ea-a607-91e8f1a07432' MEMBER_ID = '90dacafd-9c11-4af7-9d89-234e2d1fedb1' EDGE_ID = 'edge-x' EDGE_POOL_ID = '111' EDGE_VSE_ID = '222' APP_PROFILE_ID = '333' EDGE_MON_ID = '444' EDGE_FW_RULE_ID = '555' class TestLoadBalancerPluginBase( test_db_loadbalancer.LoadBalancerPluginDbTestCase): def setUp(self): super(TestLoadBalancerPluginBase, self).setUp( lbaas_provider=EDGE_PROVIDER) loaded_plugins = manager.NeutronManager().get_service_plugins() self.service_plugin = loaded_plugins[constants.LOADBALANCER] self.edge_driver = self.service_plugin.drivers['vmwareedge'] self.service_plugin._core_plugin.nsx_v = mock.Mock() class TestEdgeLoadBalancerPlugin(TestLoadBalancerPluginBase): def setUp(self): super(TestEdgeLoadBalancerPlugin, self).setUp() self.context = context.get_admin_context() def test_create_pool_successful(self): pool = {'id': POOL_ID} with contextlib.nested( mock.patch.object(db, 'add_nsxv_edge_pool_mapping'), mock.patch.object(self.edge_driver, 'pool_successful') ) as (mock_add_pool, mock_pool_successful): self.edge_driver.create_pool_successful(self.context, pool, EDGE_ID, EDGE_POOL_ID) mock_add_pool.assert_called_with(self.context, POOL_ID, EDGE_ID, EDGE_POOL_ID) mock_pool_successful.assert_called_with(self.context, pool) def test_delete_pool_successful(self): pool = {'id': POOL_ID} with contextlib.nested( mock.patch.object(self.service_plugin, '_delete_db_pool'), mock.patch.object(db, 'delete_nsxv_edge_pool_mapping') ) as (mock_del_db_pool, mock_del_mapping): self.edge_driver.delete_pool_successful(self.context, pool) mock_del_db_pool.assert_called_with(self.context, POOL_ID) mock_del_mapping.assert_called_with(self.context, POOL_ID) def test_pool_successful(self): pool = {'id': POOL_ID} with mock.patch.object(self.service_plugin, 'update_status') as ( mock_update_status): self.edge_driver.pool_successful(self.context, pool) mock_update_status.assert_called_with(self.context, lb_db.Pool, pool['id'], constants.ACTIVE) def test_pool_failed(self): pool = {'id': POOL_ID} with mock.patch.object(self.service_plugin, 'update_status') as ( mock_update_status): self.edge_driver.pool_failed(self.context, pool) mock_update_status.assert_called_with(self.context, lb_db.Pool, pool['id'], constants.ERROR) def test_create_pool(self): lbaas_pool = { 'status': 'PENDING_CREATE', 'lb_method': 'ROUND_ROBIN', 'protocol': 'HTTP', 'description': '', 'health_monitors': [], 'members': [], 'status_description': None, 'id': POOL_ID, 'vip_id': None, 'name': 'testpool', 'admin_state_up': True, 'subnet_id': SUBNET_ID, 'tenant_id': TENANT_ID, 'health_monitors_status': [], 'provider': 'vmwareedge'} with mock.patch.object(self.service_plugin._core_plugin.nsx_v, 'create_pool') as mock_create_pool: self.edge_driver.create_pool(self.context, lbaas_pool) mock_create_pool.assert_called_with(self.context, lbaas_pool) def test_update_pool(self): from_pool = { 'status': 'ACTIVE', 'lb_method': 'ROUND_ROBIN', 'protocol': 'HTTP', 'description': '', 'health_monitors': [], 'members': [], 'status_description': None, 'id': POOL_ID, 'vip_id': None, 'name': 'testpool2', 'admin_state_up': True, 'subnet_id': SUBNET_ID, 'tenant_id': TENANT_ID, 'health_monitors_status': [], 'provider': 'vmwareedge'} to_pool = { 'status': 'PENDING_UPDATE', 'lb_method': 'LEAST_CONNECTIONS', 'protocol': 'HTTP', 'description': '', 'health_monitors': [], 'members': [], 'status_description': None, 'id': POOL_ID, 'vip_id': None, 'name': 'testpool2', 'admin_state_up': True, 'subnet_id': SUBNET_ID, 'tenant_id': TENANT_ID, 'health_monitors_status': [], 'provider': 'vmwareedge'} mapping = {'edge_id': EDGE_ID, 'edge_pool_id': EDGE_POOL_ID} with contextlib.nested( mock.patch.object(db, 'get_nsxv_edge_pool_mapping'), mock.patch.object(self.service_plugin._core_plugin.nsx_v, 'update_pool') ) as (mock_get_mapping, mock_update_pool): mock_get_mapping.return_value = mapping self.edge_driver.update_pool(self.context, from_pool, to_pool) mock_update_pool.assert_called_with(self.context, from_pool, to_pool, mapping) def test_delete_pool(self): lbaas_pool = { 'status': 'PENDING_CREATE', 'lb_method': 'ROUND_ROBIN', 'protocol': 'HTTP', 'description': '', 'health_monitors': [], 'members': [], 'status_description': None, 'id': POOL_ID, 'vip_id': None, 'name': 'testpool', 'admin_state_up': True, 'subnet_id': SUBNET_ID, 'tenant_id': TENANT_ID, 'health_monitors_status': [], 'provider': 'vmwareedge'} mapping = {'edge_id': EDGE_ID, 'edge_pool_id': EDGE_POOL_ID} with contextlib.nested( mock.patch.object(db, 'get_nsxv_edge_pool_mapping'), mock.patch.object(self.service_plugin, 'get_pool', return_value={}), mock.patch.object(self.service_plugin._core_plugin.nsx_v, 'delete_pool') ) as (mock_get_mapping, mock_get_pool, mock_delete_pool): mock_get_mapping.return_value = mapping self.edge_driver.delete_pool(self.context, lbaas_pool) mock_delete_pool.assert_called_with(self.context, lbaas_pool, mapping) def test_create_vip_successful(self): vip = {'pool_id': POOL_ID} with contextlib.nested( mock.patch.object(db, 'add_nsxv_edge_vip_mapping'), mock.patch.object(self.edge_driver, 'vip_successful') ) as (mock_add_vip_mapping, mock_vip_successful): self.edge_driver.create_vip_successful( self.context, vip, EDGE_ID, APP_PROFILE_ID, EDGE_VSE_ID, EDGE_FW_RULE_ID) mock_add_vip_mapping.assert_called_with( self.context, POOL_ID, EDGE_ID, APP_PROFILE_ID, EDGE_VSE_ID, EDGE_FW_RULE_ID) mock_vip_successful.assert_called_with(self.context, vip) def test_delete_vip_successful(self): vip = {'pool_id': POOL_ID, 'id': VIP_ID} with contextlib.nested( mock.patch.object(db, 'delete_nsxv_edge_vip_mapping'), mock.patch.object(self.service_plugin, '_delete_db_vip') ) as (mock_del_vip_mapping, mock_del_vip): self.edge_driver.delete_vip_successful(self.context, vip) mock_del_vip_mapping.assert_called_with(self.context, POOL_ID) mock_del_vip.assert_called_with(self.context, VIP_ID) def test_vip_successful(self): vip = {'pool_id': POOL_ID, 'id': VIP_ID} with mock.patch.object(self.service_plugin, 'update_status') as ( mock_update_status): self.edge_driver.vip_successful(self.context, vip) mock_update_status.assert_called_with( self.context, lb_db.Vip, VIP_ID, constants.ACTIVE) def test_vip_failed(self): vip = {'pool_id': POOL_ID, 'id': VIP_ID} with mock.patch.object(self.service_plugin, 'update_status') as ( mock_update_status): self.edge_driver.vip_failed(self.context, vip) mock_update_status.assert_called_with( self.context, lb_db.Vip, VIP_ID, constants.ERROR) def test_create_vip(self): lbaas_vip = { 'status': 'PENDING_CREATE', 'protocol': 'HTTP', 'description': '', 'address': '10.0.0.8', 'protocol_port': 555, 'port_id': VIP_PORT_ID, 'id': VIP_ID, 'status_description': None, 'name': 'testvip1', 'admin_state_up': True, 'subnet_id': SUBNET_ID, 'tenant_id': TENANT_ID, 'connection_limit': -1, 'pool_id': POOL_ID, 'session_persistence': {'type': 'SOURCE_IP'}} mapping = {'edge_id': EDGE_ID, 'edge_pool_id': EDGE_POOL_ID} with contextlib.nested( mock.patch.object(db, 'get_nsxv_edge_pool_mapping'), mock.patch.object(self.service_plugin._core_plugin.nsx_v, 'create_vip') ) as (mock_get_mapping, mock_create_vip): mock_get_mapping.return_value = mapping self.edge_driver.create_vip(self.context, lbaas_vip) mock_create_vip.assert_called_with(self.context, lbaas_vip, mapping) def test_update_vip(self): vip_from = { 'status': 'ACTIVE', 'protocol': 'HTTP', 'description': '', 'address': '10.0.0.8', 'protocol_port': 555L, 'port_id': VIP_PORT_ID, 'id': VIP_ID, 'status_description': None, 'name': 'testvip1', 'admin_state_up': True, 'subnet_id': SUBNET_ID, 'tenant_id': TENANT_ID, 'connection_limit': -1L, 'pool_id': POOL_ID, 'session_persistence': {'type': 'SOURCE_IP'}} vip_to = { 'status': 'PENDING_UPDATE', 'protocol': 'HTTP', 'description': '', 'address': '10.0.0.8', 'protocol_port': 555L, 'port_id': VIP_PORT_ID, 'id': VIP_ID, 'status_description': None, 'name': 'testvip1', 'admin_state_up': True, 'subnet_id': SUBNET_ID, 'tenant_id': TENANT_ID, 'connection_limit': -1, 'pool_id': POOL_ID, 'session_persistence': {'type': 'HTTP_COOKIE'}} pool_mapping = {'edge_id': EDGE_ID, 'edge_pool_id': EDGE_POOL_ID} vip_mapping = {'edge_id': EDGE_ID, 'edge_vse_id': EDGE_VSE_ID, 'edge_app_profile_id': APP_PROFILE_ID} with contextlib.nested( mock.patch.object(db, 'get_nsxv_edge_pool_mapping'), mock.patch.object(db, 'get_nsxv_edge_vip_mapping'), mock.patch.object(self.service_plugin._core_plugin.nsx_v, 'update_vip') ) as (mock_get_pool_mapping, mock_get_vip_mapping, mock_upd_vip): mock_get_pool_mapping.return_value = pool_mapping mock_get_vip_mapping.return_value = vip_mapping self.edge_driver.update_vip(self.context, vip_from, vip_to) mock_upd_vip.assert_called_with(self.context, vip_from, vip_to, pool_mapping, vip_mapping) def test_delete_vip(self): lbaas_vip = { 'status': 'PENDING_DELETE', 'protocol': 'HTTP', 'description': '', 'address': '10.0.0.11', 'protocol_port': 555L, 'port_id': VIP_PORT_ID, 'id': VIP_ID, 'status_description': None, 'name': 'testvip', 'admin_state_up': True, 'subnet_id': SUBNET_ID, 'tenant_id': TENANT_ID, 'connection_limit': -1L, 'pool_id': POOL_ID, 'session_persistence': None} mapping = {'edge_id': EDGE_ID, 'edge_vse_id': EDGE_VSE_ID, 'edge_app_profile_id': APP_PROFILE_ID, 'edge_fw_rule_id': EDGE_FW_RULE_ID} with contextlib.nested( mock.patch.object(db, 'get_nsxv_edge_vip_mapping'), mock.patch.object(self.service_plugin._core_plugin.nsx_v, 'delete_vip') ) as (mock_get_mapping, mock_del_vip): mock_get_mapping.return_value = mapping self.edge_driver.delete_vip(self.context, lbaas_vip) mock_del_vip.assert_called_with(self.context, lbaas_vip, mapping) def test_member_successful(self): member = {'id': MEMBER_ID} with mock.patch.object(self.service_plugin, 'update_status') as ( mock_update_status): self.edge_driver.member_successful(self.context, member) mock_update_status.assert_called_with( self.context, lb_db.Member, member['id'], constants.ACTIVE) def test_member_failed(self): member = {'id': MEMBER_ID} with mock.patch.object(self.service_plugin, 'update_status') as ( mock_update_status): self.edge_driver.member_failed(self.context, member) mock_update_status.assert_called_with( self.context, lb_db.Member, member['id'], constants.ERROR) def test_create_member(self): lbaas_member = { 'admin_state_up': True, 'status': 'PENDING_CREATE', 'status_description': None, 'weight': 5, 'address': '10.0.0.4', 'tenant_id': TENANT_ID, 'protocol_port': 555, 'id': MEMBER_ID, 'pool_id': POOL_ID} mapping = {'edge_id': EDGE_ID, 'edge_pool_id': EDGE_POOL_ID} with contextlib.nested( mock.patch.object(db, 'get_nsxv_edge_pool_mapping'), mock.patch.object(self.service_plugin._core_plugin.nsx_v, 'create_member') ) as (mock_get_mapping, mock_create_member): mock_get_mapping.return_value = mapping self.edge_driver.create_member(self.context, lbaas_member) mock_create_member.assert_called_with(self.context, lbaas_member, mapping) def test_update_member(self): member_from = { 'admin_state_up': True, 'status': 'PENDING_UPDATE', 'status_description': None, 'weight': 5, 'address': '10.0.0.4', 'tenant_id': TENANT_ID, 'protocol_port': 555, 'id': MEMBER_ID, 'pool_id': POOL_ID} member_to = { 'admin_state_up': True, 'status': 'ACTIVE', 'status_description': None, 'weight': 10, 'address': '10.0.0.4', 'tenant_id': TENANT_ID, 'protocol_port': 555, 'id': MEMBER_ID, 'pool_id': POOL_ID} mapping = {'edge_id': EDGE_ID, 'edge_pool_id': EDGE_POOL_ID} with contextlib.nested( mock.patch.object(db, 'get_nsxv_edge_pool_mapping'), mock.patch.object(self.service_plugin._core_plugin.nsx_v, 'update_member') ) as (mock_get_mapping, mock_update_member): mock_get_mapping.return_value = mapping self.edge_driver.update_member(self.context, member_from, member_to) mock_update_member.assert_called_with(self.context, member_from, member_to, mapping) def test_delete_member(self): lbaas_member = { 'admin_state_up': True, 'status': 'PENDING_DELETE', 'status_description': None, 'weight': 5, 'address': '10.0.0.4', 'tenant_id': TENANT_ID, 'protocol_port': 555, 'id': MEMBER_ID, 'pool_id': POOL_ID} mapping = {'edge_id': EDGE_ID, 'edge_pool_id': EDGE_POOL_ID} with contextlib.nested( mock.patch.object(db, 'get_nsxv_edge_pool_mapping'), mock.patch.object(self.service_plugin._core_plugin.nsx_v, 'delete_member') ) as (mock_get_mapping, mock_delete_member): mock_get_mapping.return_value = mapping self.edge_driver.delete_member(self.context, lbaas_member) mock_delete_member.assert_called_with(self.context, lbaas_member, mapping) def test_create_pool_health_monitor_successful(self): hmon = {'id': HEALTHMON_ID} with contextlib.nested( mock.patch.object(db, 'add_nsxv_edge_monitor_mapping'), mock.patch.object(self.edge_driver, 'pool_health_monitor_successful') ) as (mock_add_pool_mon_mapping, mock_pool_hmon_successful): self.edge_driver.create_pool_health_monitor_successful( self.context, hmon, POOL_ID, EDGE_ID, EDGE_MON_ID) mock_add_pool_mon_mapping.assert_called_with( self.context, HEALTHMON_ID, EDGE_ID, EDGE_MON_ID) mock_pool_hmon_successful.assert_called_with(self.context, hmon, POOL_ID) def test_delete_pool_health_monitor_successful(self): hmon = {'id': HEALTHMON_ID, 'pool_id': POOL_ID} hmon_mapping = {'edge_id': EDGE_ID} with contextlib.nested( mock.patch.object(db, 'delete_nsxv_edge_monitor_mapping'), mock.patch.object(self.service_plugin, '_delete_db_pool_health_monitor') ) as (mock_del_pool_hmon_mapping, mock_del_db_pool_hmon): self.edge_driver.delete_pool_health_monitor_successful( self.context, hmon, POOL_ID, hmon_mapping) mock_del_pool_hmon_mapping.assert_called_with( self.context, HEALTHMON_ID, EDGE_ID) mock_del_db_pool_hmon.assert_called_with( self.context, HEALTHMON_ID, POOL_ID) def test_pool_health_monitor_successful(self): hmon = {'id': HEALTHMON_ID} with mock.patch.object(self.service_plugin, 'update_pool_health_monitor') as ( mock_update_hmon): self.edge_driver.pool_health_monitor_successful(self.context, hmon, POOL_ID) mock_update_hmon.assert_called_with( self.context, HEALTHMON_ID, POOL_ID, constants.ACTIVE, '') def test_pool_health_monitor_failed(self): hmon = {'id': HEALTHMON_ID} with mock.patch.object(self.service_plugin, 'update_pool_health_monitor') as ( mock_update_hmon): self.edge_driver.pool_health_monitor_failed(self.context, hmon, POOL_ID) mock_update_hmon.assert_called_with( self.context, HEALTHMON_ID, POOL_ID, constants.ERROR, '') def test_create_pool_health_monitor(self): hmon = { 'admin_state_up': True, 'tenant_id': TENANT_ID, 'delay': 5L, 'max_retries': 5L, 'timeout': 5L, 'pools': [ {'status': 'PENDING_CREATE', 'status_description': None, 'pool_id': POOL_ID}], 'type': 'PING', 'id': HEALTHMON_ID} pool_mapping = {'edge_id': EDGE_ID, 'edge_pool_id': EDGE_POOL_ID} with contextlib.nested( mock.patch.object(db, 'get_nsxv_edge_pool_mapping'), mock.patch.object(db, 'get_nsxv_edge_monitor_mapping'), mock.patch.object(self.service_plugin._core_plugin.nsx_v, 'create_pool_health_monitor') ) as (mock_get_pool_mapping, mock_get_mon_mapping, mock_create_pool_hm): mock_get_pool_mapping.return_value = pool_mapping mock_get_mon_mapping.return_value = None self.edge_driver.create_pool_health_monitor(self.context, hmon, POOL_ID) mock_create_pool_hm.assert_called_with(self.context, hmon, POOL_ID, pool_mapping, None) def test_update_pool_health_monitor(self): from_hmon = { 'admin_state_up': True, 'tenant_id': TENANT_ID, 'delay': 5L, 'max_retries': 5L, 'timeout': 5L, 'pools': [ {'status': 'PENDING_UPDATE', 'status_description': None, 'pool_id': POOL_ID}], 'type': 'PING', 'id': HEALTHMON_ID} to_hmon = { 'admin_state_up': True, 'tenant_id': TENANT_ID, 'delay': 5L, 'max_retries': 10L, 'timeout': 5L, 'pools': [ {'status': 'ACTIVE', 'status_description': None, 'pool_id': POOL_ID}], 'type': 'PING', 'id': HEALTHMON_ID} pool_mapping = {'edge_id': EDGE_ID, 'edge_pool_id': EDGE_POOL_ID} mon_mapping = {'edge_id': EDGE_ID, 'edge_monitor_id': EDGE_MON_ID} with contextlib.nested( mock.patch.object(db, 'get_nsxv_edge_pool_mapping'), mock.patch.object(db, 'get_nsxv_edge_monitor_mapping'), mock.patch.object(self.service_plugin._core_plugin.nsx_v, 'update_pool_health_monitor') ) as (mock_get_pool_mapping, mock_get_mon_mapping, mock_upd_pool_hm): mock_get_pool_mapping.return_value = pool_mapping mock_get_mon_mapping.return_value = mon_mapping self.edge_driver.update_pool_health_monitor( self.context, from_hmon, to_hmon, POOL_ID) mock_upd_pool_hm.assert_called_with( self.context, from_hmon, to_hmon, POOL_ID, mon_mapping) def test_delete_pool_health_monitor(self): hmon = { 'admin_state_up': True, 'tenant_id': TENANT_ID, 'delay': 5L, 'max_retries': 5L, 'timeout': 5L, 'pools': [ {'status': 'PENDING_DELETE', 'status_description': None, 'pool_id': POOL_ID}], 'type': 'PING', 'id': HEALTHMON_ID} pool_mapping = {'edge_id': EDGE_ID, 'edge_pool_id': EDGE_POOL_ID} mon_mapping = {'edge_id': EDGE_ID, 'edge_monitor_id': EDGE_MON_ID} with contextlib.nested( mock.patch.object(db, 'get_nsxv_edge_pool_mapping'), mock.patch.object(db, 'get_nsxv_edge_monitor_mapping'), mock.patch.object(self.service_plugin._core_plugin.nsx_v, 'delete_pool_health_monitor') ) as (mock_get_pool_mapping, mock_get_mon_mapping, mock_del_pool_hm): mock_get_pool_mapping.return_value = pool_mapping mock_get_mon_mapping.return_value = mon_mapping self.edge_driver.delete_pool_health_monitor(self.context, hmon, POOL_ID) mock_del_pool_hm.assert_called_with(self.context, hmon, POOL_ID, pool_mapping, mon_mapping) neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/drivers/vmware/__init__.py0000664000567000056710000000000012701407726033725 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/drivers/radware/0000775000567000056710000000000012701410110031727 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/drivers/radware/test_plugin_driver.pyneutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/drivers/radware/test_plugin_drive0000664000567000056710000013265712701407726035441 0ustar jenkinsjenkins00000000000000# Copyright 2013 Radware LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import contextlib import mock from neutron.api.v2 import attributes from neutron import context from neutron import manager from neutron.plugins.common import constants from oslo_config import cfg from oslo_serialization import jsonutils from six.moves import queue as Queue from neutron_lbaas.extensions import loadbalancer from neutron_lbaas.services.loadbalancer.drivers.radware import driver from neutron_lbaas.services.loadbalancer.drivers.radware \ import exceptions as r_exc from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancer GET_200 = ('/api/workflow/', '/api/service/', '/api/workflowTemplate') SERVER_DOWN_CODES = (-1, 301, 307) class QueueMock(Queue.Queue): def __init__(self, completion_handler): self.completion_handler = completion_handler super(QueueMock, self).__init__() def put_nowait(self, oper): self.completion_handler(oper) def _recover_function_mock(action, resource, data, headers, binary=False): pass def rest_call_function_mock(action, resource, data, headers, binary=False): if rest_call_function_mock.RESPOND_WITH_ERROR: return 400, 'error_status', 'error_description', None if rest_call_function_mock.RESPOND_WITH_SERVER_DOWN in SERVER_DOWN_CODES: val = rest_call_function_mock.RESPOND_WITH_SERVER_DOWN return val, 'error_status', 'error_description', None if action == 'GET': return _get_handler(resource) elif action == 'DELETE': return _delete_handler(resource) elif action == 'POST': return _post_handler(resource, binary) else: return 0, None, None, None def _get_handler(resource): if resource == GET_200[2]: if rest_call_function_mock.TEMPLATES_MISSING: data = jsonutils.loads('[]') else: data = jsonutils.loads( '[{"name":"openstack_l2_l3"},{"name":"openstack_l4"}]' ) return 200, '', '', data if resource in GET_200: return 200, '', '', '' else: data = jsonutils.loads('{"complete":"True", "success": "True"}') return 202, '', '', data def _delete_handler(resource): return 404, '', '', {'message': 'Not Found'} def _post_handler(resource, binary): if re.search(r'/api/workflow/.+/action/.+', resource): data = jsonutils.loads('{"uri":"some_uri"}') return 202, '', '', data elif re.search(r'/api/service\?name=.+', resource): data = jsonutils.loads('{"links":{"actions":{"provision":"someuri"}}}') return 201, '', '', data elif binary: return 201, '', '', '' else: return 202, '', '', '' RADWARE_PROVIDER = ('LOADBALANCER:radware:neutron_lbaas.services.' 'loadbalancer.drivers.radware.driver.' 'LoadBalancerDriver:default') class TestLoadBalancerPluginBase( test_db_loadbalancer.LoadBalancerPluginDbTestCase): def setUp(self): super(TestLoadBalancerPluginBase, self).setUp( lbaas_provider=RADWARE_PROVIDER) loaded_plugins = manager.NeutronManager().get_service_plugins() self.plugin_instance = loaded_plugins[constants.LOADBALANCER] class TestLoadBalancerPlugin(TestLoadBalancerPluginBase): def setUp(self): super(TestLoadBalancerPlugin, self).setUp() rest_call_function_mock.__dict__.update( {'RESPOND_WITH_ERROR': False}) rest_call_function_mock.__dict__.update( {'TEMPLATES_MISSING': False}) rest_call_function_mock.__dict__.update( {'RESPOND_WITH_SERVER_DOWN': 200}) self.operation_completer_start_mock = mock.Mock( return_value=None) self.operation_completer_join_mock = mock.Mock( return_value=None) self.driver_rest_call_mock = mock.Mock( side_effect=rest_call_function_mock) self.flip_servers_mock = mock.Mock( return_value=None) self.recover_mock = mock.Mock( side_effect=_recover_function_mock) radware_driver = self.plugin_instance.drivers['radware'] radware_driver.completion_handler.start = ( self.operation_completer_start_mock) radware_driver.completion_handler.join = ( self.operation_completer_join_mock) self.orig_call = radware_driver.rest_client.call self.orig__call = radware_driver.rest_client._call radware_driver.rest_client.call = self.driver_rest_call_mock radware_driver.rest_client._call = self.driver_rest_call_mock radware_driver.rest_client._flip_servers = self.flip_servers_mock radware_driver.rest_client._recover = self.recover_mock radware_driver.completion_handler.rest_client.call = ( self.driver_rest_call_mock) radware_driver.queue = QueueMock( radware_driver.completion_handler.handle_operation_completion) self.addCleanup(radware_driver.completion_handler.join) def test_get_pip(self): """Call _get_pip twice and verify that a Port is created once.""" port_dict = {'fixed_ips': [{'subnet_id': '10.10.10.10', 'ip_address': '11.11.11.11'}]} port_data = { 'tenant_id': 'tenant_id', 'name': 'port_name', 'network_id': 'network_id', 'mac_address': attributes.ATTR_NOT_SPECIFIED, 'admin_state_up': False, 'device_id': '', 'device_owner': 'neutron:' + constants.LOADBALANCER, 'fixed_ips': [{'subnet_id': '10.10.10.10'}] } self.plugin_instance._core_plugin.get_ports = mock.Mock( return_value=[]) self.plugin_instance._core_plugin.create_port = mock.Mock( return_value=port_dict) radware_driver = self.plugin_instance.drivers['radware'] radware_driver._get_pip(context.get_admin_context(), 'tenant_id', 'port_name', 'network_id', '10.10.10.10') self.plugin_instance._core_plugin.get_ports.assert_called_once_with( mock.ANY, filters={'name': ['port_name']}) self.plugin_instance._core_plugin.create_port.assert_called_once_with( mock.ANY, {'port': port_data}) self.plugin_instance._core_plugin.create_port.reset_mock() self.plugin_instance._core_plugin.get_ports.reset_mock() self.plugin_instance._core_plugin.get_ports.return_value = [port_dict] radware_driver._get_pip(context.get_admin_context(), 'tenant_id', 'port_name', 'network_id', '10.10.10.10') self.plugin_instance._core_plugin.get_ports.assert_called_once_with( mock.ANY, filters={'name': ['port_name']}) self.assertFalse(self.plugin_instance._core_plugin.create_port.called) def test_rest_client_recover_was_called(self): """Call the real REST client and verify _recover is called.""" radware_driver = self.plugin_instance.drivers['radware'] radware_driver.rest_client.call = self.orig_call radware_driver.rest_client._call = self.orig__call self.assertRaises(r_exc.RESTRequestFailure, radware_driver._verify_workflow_templates) self.recover_mock.assert_called_once_with('GET', '/api/workflowTemplate', None, None, False) def test_rest_client_flip_servers(self): radware_driver = self.plugin_instance.drivers['radware'] server = radware_driver.rest_client.server sec_server = radware_driver.rest_client.secondary_server radware_driver.rest_client._flip_servers() self.assertEqual(server, radware_driver.rest_client.secondary_server) self.assertEqual(sec_server, radware_driver.rest_client.server) def test_verify_workflow_templates_server_down(self): """Test the rest call failure when backend is down.""" for value in SERVER_DOWN_CODES: rest_call_function_mock.__dict__.update( {'RESPOND_WITH_SERVER_DOWN': value}) self.assertRaises(r_exc.RESTRequestFailure, self.plugin_instance.drivers['radware']. _verify_workflow_templates) def test_verify_workflow_templates(self): """Test the rest call failure handling by Exception raising.""" rest_call_function_mock.__dict__.update( {'TEMPLATES_MISSING': True}) self.assertRaises(r_exc.WorkflowMissing, self.plugin_instance.drivers['radware']. _verify_workflow_templates) def test_create_vip_failure(self): """Test the rest call failure handling by Exception raising.""" with self.network() as network: with self.subnet(network=network) as subnet: with self.pool(do_delete=False, provider='radware', subnet_id=subnet['subnet']['id']) as pool: vip_data = { 'name': 'vip1', 'subnet_id': subnet['subnet']['id'], 'pool_id': pool['pool']['id'], 'description': '', 'protocol_port': 80, 'protocol': 'HTTP', 'connection_limit': -1, 'admin_state_up': True, 'status': constants.PENDING_CREATE, 'tenant_id': self._tenant_id, 'session_persistence': '' } rest_call_function_mock.__dict__.update( {'RESPOND_WITH_ERROR': True}) self.assertRaises(r_exc.RESTRequestFailure, self.plugin_instance.create_vip, context.get_admin_context(), {'vip': vip_data}) def test_create_vip(self): with self.subnet() as subnet: with self.pool(provider='radware', subnet_id=subnet['subnet']['id']) as pool: vip_data = { 'name': 'vip1', 'subnet_id': subnet['subnet']['id'], 'pool_id': pool['pool']['id'], 'description': '', 'protocol_port': 80, 'protocol': 'HTTP', 'connection_limit': -1, 'admin_state_up': True, 'status': constants.PENDING_CREATE, 'tenant_id': self._tenant_id, 'session_persistence': '' } vip = self.plugin_instance.create_vip( context.get_admin_context(), {'vip': vip_data}) # Test creation REST calls calls = [ mock.call('GET', u'/api/service/srv_' + subnet['subnet']['network_id'], None, None), mock.call('POST', u'/api/service?name=srv_' + subnet['subnet']['network_id'] + '&tenant=' + vip['tenant_id'], mock.ANY, driver.CREATE_SERVICE_HEADER), mock.call('GET', u'/api/workflow/l2_l3_' + subnet['subnet']['network_id'], None, None), mock.call('POST', '/api/workflow/l2_l3_' + subnet['subnet']['network_id'] + '/action/setup_l2_l3', mock.ANY, driver.TEMPLATE_HEADER), mock.call('POST', 'someuri', None, driver.PROVISION_HEADER), mock.call('POST', '/api/workflowTemplate/' + 'openstack_l4' + '?name=' + pool['pool']['id'], mock.ANY, driver.TEMPLATE_HEADER), mock.call('POST', '/api/workflowTemplate/' + 'openstack_l2_l3' + '?name=l2_l3_' + subnet['subnet']['network_id'], mock.ANY, driver.TEMPLATE_HEADER), mock.call('POST', '/api/workflow/' + pool['pool']['id'] + '/action/BaseCreate', mock.ANY, driver.TEMPLATE_HEADER), mock.call('GET', '/api/workflow/' + pool['pool']['id'], None, None) ] self.driver_rest_call_mock.assert_has_calls(calls, any_order=True) #Test DB new_vip = self.plugin_instance.get_vip( context.get_admin_context(), vip['id'] ) self.assertEqual(constants.ACTIVE, new_vip['status']) # Delete VIP self.plugin_instance.delete_vip( context.get_admin_context(), vip['id']) # Test deletion REST calls calls = [ mock.call('DELETE', u'/api/workflow/' + pool['pool']['id'], None, None) ] self.driver_rest_call_mock.assert_has_calls( calls, any_order=True) def test_create_vip_2_leg(self): """Test creation of a VIP where Alteon VIP and PIP are different.""" with self.subnet(cidr='10.0.0.0/24') as subnet: with self.subnet(cidr='10.0.1.0/24') as pool_sub: with self.pool(provider='radware', subnet_id=pool_sub['subnet']['id']) as pool: vip_data = { 'name': 'vip1', 'subnet_id': subnet['subnet']['id'], 'pool_id': pool['pool']['id'], 'description': '', 'protocol_port': 80, 'protocol': 'HTTP', 'connection_limit': -1, 'admin_state_up': True, 'status': constants.PENDING_CREATE, 'tenant_id': self._tenant_id, 'session_persistence': '' } vip = self.plugin_instance.create_vip( context.get_admin_context(), {'vip': vip_data}) name_suffix = '%s_%s' % (subnet['subnet']['network_id'], pool_sub['subnet']['network_id']) # Test creation REST calls calls = [ mock.call('GET', '/api/workflowTemplate', None, None), mock.call('GET', '/api/service/srv_' + name_suffix, None, None), mock.call('POST', '/api/service?name=srv_' + name_suffix + '&tenant=' + vip['tenant_id'], mock.ANY, driver.CREATE_SERVICE_HEADER), mock.call('POST', 'someuri', None, driver.PROVISION_HEADER), mock.call('GET', '/api/workflow/l2_l3_' + name_suffix, None, None), mock.call('POST', '/api/workflowTemplate/' + 'openstack_l2_l3' + '?name=l2_l3_' + name_suffix, mock.ANY, driver.TEMPLATE_HEADER), mock.call('POST', '/api/workflow/l2_l3_' + name_suffix + '/action/setup_l2_l3', mock.ANY, driver.TEMPLATE_HEADER), mock.call('GET', '/api/workflow/' + pool['pool']['id'], None, None), mock.call('POST', '/api/workflowTemplate/' + 'openstack_l4' + '?name=' + pool['pool']['id'], mock.ANY, driver.TEMPLATE_HEADER), mock.call('POST', '/api/workflow/' + pool['pool']['id'] + '/action/BaseCreate', mock.ANY, driver.TEMPLATE_HEADER) ] self.driver_rest_call_mock.assert_has_calls(calls) #Test DB new_vip = self.plugin_instance.get_vip( context.get_admin_context(), vip['id'] ) self.assertEqual(constants.ACTIVE, new_vip['status']) # Test that PIP neutron port was created pip_port_filter = { 'name': ['pip_' + vip['id']], } plugin = manager.NeutronManager.get_plugin() num_ports = plugin.get_ports_count( context.get_admin_context(), filters=pip_port_filter) self.assertTrue(num_ports > 0) # Delete VIP self.plugin_instance.delete_vip( context.get_admin_context(), vip['id']) # Test deletion REST calls calls = [ mock.call('DELETE', u'/api/workflow/' + pool['pool']['id'], None, None) ] self.driver_rest_call_mock.assert_has_calls(calls) def test_update_vip(self): with self.subnet() as subnet: with self.pool(provider='radware', do_delete=False, subnet_id=subnet['subnet']['id']) as pool: vip_data = { 'name': 'vip1', 'subnet_id': subnet['subnet']['id'], 'pool_id': pool['pool']['id'], 'description': '', 'protocol_port': 80, 'protocol': 'HTTP', 'connection_limit': -1, 'admin_state_up': True, 'status': constants.PENDING_CREATE, 'tenant_id': self._tenant_id, 'session_persistence': '' } vip = self.plugin_instance.create_vip( context.get_admin_context(), {'vip': vip_data}) vip_data['status'] = constants.PENDING_UPDATE self.plugin_instance.update_vip( context.get_admin_context(), vip['id'], {'vip': vip_data}) # Test REST calls calls = [ mock.call('POST', '/api/workflow/' + pool['pool']['id'] + '/action/BaseCreate', mock.ANY, driver.TEMPLATE_HEADER), ] self.driver_rest_call_mock.assert_has_calls( calls, any_order=True) updated_vip = self.plugin_instance.get_vip( context.get_admin_context(), vip['id']) self.assertEqual(constants.ACTIVE, updated_vip['status']) # delete VIP self.plugin_instance.delete_vip( context.get_admin_context(), vip['id']) def test_update_vip_2_leg(self): """Test update of a VIP where Alteon VIP and PIP are different.""" with self.subnet(cidr='10.0.0.0/24') as subnet: with self.subnet(cidr='10.0.1.0/24') as pool_subnet: with self.pool(provider='radware', subnet_id=pool_subnet['subnet']['id']) as pool: vip_data = { 'name': 'vip1', 'subnet_id': subnet['subnet']['id'], 'pool_id': pool['pool']['id'], 'description': '', 'protocol_port': 80, 'protocol': 'HTTP', 'connection_limit': -1, 'admin_state_up': True, 'status': constants.PENDING_CREATE, 'tenant_id': self._tenant_id, 'session_persistence': '' } vip = self.plugin_instance.create_vip( context.get_admin_context(), {'vip': vip_data}) self.plugin_instance.update_vip( context.get_admin_context(), vip['id'], {'vip': vip_data}) # Test REST calls calls = [ mock.call('POST', '/api/workflow/' + pool['pool']['id'] + '/action/BaseCreate', mock.ANY, driver.TEMPLATE_HEADER), ] self.driver_rest_call_mock.assert_has_calls(calls) updated_vip = self.plugin_instance.get_vip( context.get_admin_context(), vip['id']) self.assertEqual(constants.ACTIVE, updated_vip['status']) # delete VIP self.plugin_instance.delete_vip( context.get_admin_context(), vip['id']) def test_delete_vip_failure(self): plugin = self.plugin_instance with self.network() as network: with self.subnet(network=network) as subnet: with self.pool(do_delete=False, provider='radware', subnet_id=subnet['subnet']['id']) as pool: with contextlib.nested( self.member(pool_id=pool['pool']['id'], do_delete=False), self.member(pool_id=pool['pool']['id'], address='192.168.1.101', do_delete=False), self.health_monitor(do_delete=False), self.vip(pool=pool, subnet=subnet, do_delete=False) ) as (mem1, mem2, hm, vip): plugin.create_pool_health_monitor( context.get_admin_context(), hm, pool['pool']['id'] ) rest_call_function_mock.__dict__.update( {'RESPOND_WITH_ERROR': True}) plugin.delete_vip( context.get_admin_context(), vip['vip']['id']) u_vip = plugin.get_vip( context.get_admin_context(), vip['vip']['id']) u_pool = plugin.get_pool( context.get_admin_context(), pool['pool']['id']) u_mem1 = plugin.get_member( context.get_admin_context(), mem1['member']['id']) u_mem2 = plugin.get_member( context.get_admin_context(), mem2['member']['id']) u_phm = plugin.get_pool_health_monitor( context.get_admin_context(), hm['health_monitor']['id'], pool['pool']['id']) self.assertEqual(constants.ERROR, u_vip['status']) self.assertEqual(constants.ACTIVE, u_pool['status']) self.assertEqual(constants.ACTIVE, u_mem1['status']) self.assertEqual(constants.ACTIVE, u_mem2['status']) self.assertEqual(constants.ACTIVE, u_phm['status']) def test_delete_vip(self): with self.subnet() as subnet: with self.pool(provider='radware', do_delete=False, subnet_id=subnet['subnet']['id']) as pool: vip_data = { 'name': 'vip1', 'subnet_id': subnet['subnet']['id'], 'pool_id': pool['pool']['id'], 'description': '', 'protocol_port': 80, 'protocol': 'HTTP', 'connection_limit': -1, 'admin_state_up': True, 'status': constants.PENDING_CREATE, 'tenant_id': self._tenant_id, 'session_persistence': '' } vip = self.plugin_instance.create_vip( context.get_admin_context(), {'vip': vip_data}) self.plugin_instance.delete_vip( context.get_admin_context(), vip['id']) calls = [ mock.call('DELETE', '/api/workflow/' + pool['pool']['id'], None, None) ] self.driver_rest_call_mock.assert_has_calls( calls, any_order=True) self.assertRaises(loadbalancer.VipNotFound, self.plugin_instance.get_vip, context.get_admin_context(), vip['id']) def test_delete_vip_2_leg(self): """Test deletion of a VIP where Alteon VIP and PIP are different.""" self.driver_rest_call_mock.reset_mock() with self.subnet(cidr='10.0.0.0/24') as subnet: with self.subnet(cidr='10.0.1.0/24') as pool_subnet: with self.pool(provider='radware', do_delete=False, subnet_id=pool_subnet['subnet']['id']) as pool: vip_data = { 'name': 'vip1', 'subnet_id': subnet['subnet']['id'], 'pool_id': pool['pool']['id'], 'description': '', 'protocol_port': 80, 'protocol': 'HTTP', 'connection_limit': -1, 'admin_state_up': True, 'status': constants.PENDING_CREATE, 'tenant_id': self._tenant_id, 'session_persistence': '' } vip = self.plugin_instance.create_vip( context.get_admin_context(), {'vip': vip_data}) self.plugin_instance.delete_vip( context.get_admin_context(), vip['id']) calls = [ mock.call('DELETE', '/api/workflow/' + pool['pool']['id'], None, None) ] self.driver_rest_call_mock.assert_has_calls(calls) # Test that PIP neutron port was deleted pip_port_filter = { 'name': ['pip_' + vip['id']], } plugin = manager.NeutronManager.get_plugin() num_ports = plugin.get_ports_count( context.get_admin_context(), filters=pip_port_filter) self.assertTrue(num_ports == 0) self.assertRaises(loadbalancer.VipNotFound, self.plugin_instance.get_vip, context.get_admin_context(), vip['id']) def test_update_pool(self): with self.subnet(): with self.pool() as pool: del pool['pool']['provider'] del pool['pool']['status'] self.plugin_instance.update_pool( context.get_admin_context(), pool['pool']['id'], pool) pool_db = self.plugin_instance.get_pool( context.get_admin_context(), pool['pool']['id']) self.assertEqual(constants.PENDING_UPDATE, pool_db['status']) def test_delete_pool_with_vip(self): with self.subnet() as subnet: with self.pool(provider='radware', do_delete=False, subnet_id=subnet['subnet']['id']) as pool: with self.vip(pool=pool, subnet=subnet): self.assertRaises(loadbalancer.PoolInUse, self.plugin_instance.delete_pool, context.get_admin_context(), pool['pool']['id']) def test_create_member_with_vip(self): with self.subnet() as subnet: with self.pool(provider='radware', subnet_id=subnet['subnet']['id']) as p: with self.vip(pool=p, subnet=subnet): with self.member(pool_id=p['pool']['id']): calls = [ mock.call( 'POST', '/api/workflow/' + p['pool']['id'] + '/action/BaseCreate', mock.ANY, driver.TEMPLATE_HEADER ), mock.call( 'POST', '/api/workflow/' + p['pool']['id'] + '/action/BaseCreate', mock.ANY, driver.TEMPLATE_HEADER ) ] self.driver_rest_call_mock.assert_has_calls( calls, any_order=True) def test_create_member_on_different_subnets(self): with contextlib.nested( self.subnet(), self.subnet(cidr='20.0.0.0/24'), self.subnet(cidr='30.0.0.0/24') ) as (vip_sub, pool_sub, member_sub): with self.pool(provider='radware', subnet_id=pool_sub['subnet']['id']) as pool: with contextlib.nested( self.port(subnet=vip_sub, fixed_ips=[{'ip_address': '10.0.0.2'}]), self.port(subnet=pool_sub, fixed_ips=[{'ip_address': '20.0.0.2'}]), self.port(subnet=member_sub, fixed_ips=[{'ip_address': '30.0.0.2'}]) ): with contextlib.nested( self.member(pool_id=pool['pool']['id'], address='10.0.0.2'), self.member(pool_id=pool['pool']['id'], address='20.0.0.2'), self.member(pool_id=pool['pool']['id'], address='30.0.0.2') ) as (member_vip, member_pool, member_out): with self.vip(pool=pool, subnet=vip_sub): calls = [ mock.call( 'POST', '/api/workflow/' + pool['pool']['id'] + '/action/BaseCreate', mock.ANY, driver.TEMPLATE_HEADER ) ] self.driver_rest_call_mock.assert_has_calls( calls, any_order=True) mock_calls = self.driver_rest_call_mock.mock_calls params = mock_calls[-2][1][2]['parameters'] member_subnet_array = params['member_subnet_array'] member_mask_array = params['member_mask_array'] member_gw_array = params['member_gw_array'] self.assertEqual(['10.0.0.0', '255.255.255.255', '30.0.0.0'], member_subnet_array) self.assertEqual(['255.255.255.0', '255.255.255.255', '255.255.255.0'], member_mask_array) self.assertEqual( [pool_sub['subnet']['gateway_ip'], '255.255.255.255', pool_sub['subnet']['gateway_ip']], member_gw_array) def test_create_member_on_different_subnet_no_port(self): with contextlib.nested( self.subnet(), self.subnet(cidr='20.0.0.0/24'), self.subnet(cidr='30.0.0.0/24') ) as (vip_sub, pool_sub, member_sub): with self.pool(provider='radware', subnet_id=pool_sub['subnet']['id']) as pool: with self.member(pool_id=pool['pool']['id'], address='30.0.0.2'): with self.vip(pool=pool, subnet=vip_sub): calls = [ mock.call( 'POST', '/api/workflow/' + pool['pool']['id'] + '/action/BaseCreate', mock.ANY, driver.TEMPLATE_HEADER ) ] self.driver_rest_call_mock.assert_has_calls( calls, any_order=True) mock_calls = self.driver_rest_call_mock.mock_calls params = mock_calls[-2][1][2]['parameters'] member_subnet_array = params['member_subnet_array'] member_mask_array = params['member_mask_array'] member_gw_array = params['member_gw_array'] self.assertEqual(['30.0.0.2'], member_subnet_array) self.assertEqual(['255.255.255.255'], member_mask_array) self.assertEqual([pool_sub['subnet']['gateway_ip']], member_gw_array) def test_create_member_on_different_subnet_multiple_ports(self): cfg.CONF.set_override("allow_overlapping_ips", 'true') with self.network() as other_net: with contextlib.nested( self.subnet(), self.subnet(cidr='20.0.0.0/24'), self.subnet(cidr='30.0.0.0/24'), self.subnet(network=other_net, cidr='30.0.0.0/24') ) as (vip_sub, pool_sub, member_sub1, member_sub2): with self.pool(provider='radware', subnet_id=pool_sub['subnet']['id']) as pool: with contextlib.nested( self.port(subnet=member_sub1, fixed_ips=[{'ip_address': '30.0.0.2'}]), self.port(subnet=member_sub2, fixed_ips=[{'ip_address': '30.0.0.2'}])): with self.member(pool_id=pool['pool']['id'], address='30.0.0.2'): with self.vip(pool=pool, subnet=vip_sub): calls = [ mock.call( 'POST', '/api/workflow/' + pool['pool']['id'] + '/action/BaseCreate', mock.ANY, driver.TEMPLATE_HEADER ) ] self.driver_rest_call_mock.assert_has_calls( calls, any_order=True) calls = self.driver_rest_call_mock.mock_calls params = calls[-2][1][2]['parameters'] m_sub_array = params['member_subnet_array'] m_mask_array = params['member_mask_array'] m_gw_array = params['member_gw_array'] self.assertEqual(['30.0.0.2'], m_sub_array) self.assertEqual(['255.255.255.255'], m_mask_array) self.assertEqual( [pool_sub['subnet']['gateway_ip']], m_gw_array) def test_update_member_with_vip(self): with self.subnet() as subnet: with self.pool(provider='radware', subnet_id=subnet['subnet']['id']) as p: with self.member(pool_id=p['pool']['id']) as member: with self.vip(pool=p, subnet=subnet): self.plugin_instance.update_member( context.get_admin_context(), member['member']['id'], member ) calls = [ mock.call( 'POST', '/api/workflow/' + p['pool']['id'] + '/action/BaseCreate', mock.ANY, driver.TEMPLATE_HEADER ), mock.call( 'POST', '/api/workflow/' + p['pool']['id'] + '/action/BaseCreate', mock.ANY, driver.TEMPLATE_HEADER ) ] self.driver_rest_call_mock.assert_has_calls( calls, any_order=True) updated_member = self.plugin_instance.get_member( context.get_admin_context(), member['member']['id'] ) updated_member = self.plugin_instance.get_member( context.get_admin_context(), member['member']['id'] ) self.assertEqual(constants.ACTIVE, updated_member['status']) def test_update_member_without_vip(self): with self.subnet(): with self.pool(provider='radware') as pool: with self.member(pool_id=pool['pool']['id']) as member: member['member']['status'] = constants.PENDING_UPDATE updated_member = self.plugin_instance.update_member( context.get_admin_context(), member['member']['id'], member ) self.assertEqual(constants.PENDING_UPDATE, updated_member['status']) def test_delete_member_with_vip(self): with self.subnet() as subnet: with self.pool(provider='radware', subnet_id=subnet['subnet']['id']) as p: with self.member(pool_id=p['pool']['id'], do_delete=False) as m: with self.vip(pool=p, subnet=subnet): # Reset mock and # wait for being sure the member # Changed status from PENDING-CREATE # to ACTIVE self.plugin_instance.delete_member( context.get_admin_context(), m['member']['id'] ) name, args, kwargs = ( self.driver_rest_call_mock.mock_calls[-2] ) deletion_post_graph = str(args[2]) self.assertTrue(re.search( r'.*\'member_address_array\': \[\].*', deletion_post_graph )) calls = [ mock.call( 'POST', '/api/workflow/' + p['pool']['id'] + '/action/BaseCreate', mock.ANY, driver.TEMPLATE_HEADER ) ] self.driver_rest_call_mock.assert_has_calls( calls, any_order=True) self.assertRaises(loadbalancer.MemberNotFound, self.plugin_instance.get_member, context.get_admin_context(), m['member']['id']) def test_delete_member_without_vip(self): with self.subnet(): with self.pool(provider='radware') as p: with self.member(pool_id=p['pool']['id'], do_delete=False) as m: self.plugin_instance.delete_member( context.get_admin_context(), m['member']['id'] ) self.assertRaises(loadbalancer.MemberNotFound, self.plugin_instance.get_member, context.get_admin_context(), m['member']['id']) def test_create_hm_with_vip(self): with self.subnet() as subnet: with self.health_monitor() as hm: with self.pool(provider='radware', subnet_id=subnet['subnet']['id']) as pool: with self.vip(pool=pool, subnet=subnet): self.plugin_instance.create_pool_health_monitor( context.get_admin_context(), hm, pool['pool']['id'] ) # Test REST calls calls = [ mock.call( 'POST', '/api/workflow/' + pool['pool']['id'] + '/action/BaseCreate', mock.ANY, driver.TEMPLATE_HEADER ), mock.call( 'POST', '/api/workflow/' + pool['pool']['id'] + '/action/BaseCreate', mock.ANY, driver.TEMPLATE_HEADER ) ] self.driver_rest_call_mock.assert_has_calls( calls, any_order=True) phm = self.plugin_instance.get_pool_health_monitor( context.get_admin_context(), hm['health_monitor']['id'], pool['pool']['id'] ) self.assertEqual(constants.ACTIVE, phm['status']) def test_delete_pool_hm_with_vip(self): with self.subnet() as subnet: with self.health_monitor(do_delete=False) as hm: with self.pool(provider='radware', subnet_id=subnet['subnet']['id']) as pool: with self.vip(pool=pool, subnet=subnet): self.plugin_instance.create_pool_health_monitor( context.get_admin_context(), hm, pool['pool']['id'] ) self.plugin_instance.delete_pool_health_monitor( context.get_admin_context(), hm['health_monitor']['id'], pool['pool']['id'] ) name, args, kwargs = ( self.driver_rest_call_mock.mock_calls[-2] ) deletion_post_graph = str(args[2]) self.assertTrue(re.search( r'.*\'hm_uuid_array\': \[\].*', deletion_post_graph )) calls = [ mock.call( 'POST', '/api/workflow/' + pool['pool']['id'] + '/action/BaseCreate', mock.ANY, driver.TEMPLATE_HEADER ) ] self.driver_rest_call_mock.assert_has_calls( calls, any_order=True) self.assertRaises( loadbalancer.PoolMonitorAssociationNotFound, self.plugin_instance.get_pool_health_monitor, context.get_admin_context(), hm['health_monitor']['id'], pool['pool']['id'] ) neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/drivers/radware/__init__.py0000664000567000056710000000000012701407726034051 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/drivers/a10networks/0000775000567000056710000000000012701410110032460 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/drivers/a10networks/__init__.py0000664000567000056710000000000012701407726034602 0ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/drivers/a10networks/test_driver_v1.pyneutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/drivers/a10networks/test_driver_v0000664000567000056710000001505012701407726035306 0ustar jenkinsjenkins00000000000000# Copyright 2014, Doug Wiegley (dougwig), A10 Networks # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import mock from neutron import context from neutron_lbaas.db.loadbalancer import loadbalancer_db as lb_db from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancer with mock.patch.dict(sys.modules, {'a10_neutron_lbaas': mock.Mock()}): from neutron_lbaas.services.loadbalancer.drivers.a10networks \ import driver_v1 def fake_model(id): return { 'id': id, 'tenant_id': "tennant-was-a-great-doctor" } def fake_member(id): return { 'id': id, 'tenant_id': "vippyvip", 'address': '1.1.1.1' } class TestA10ThunderDriver(test_db_loadbalancer.LoadBalancerPluginDbTestCase): def setUp(self): super(TestA10ThunderDriver, self).setUp() self.context = context.get_admin_context() self.plugin = mock.Mock() self.driver = driver_v1.ThunderDriver(self.plugin) self.driver.a10 = mock.Mock() self.m = fake_model('p1') def test__hm_binding_count(self): n = self.driver._hm_binding_count(self.context, 'hm01') self.assertEqual(0, n) def test__member_count(self): self.m = fake_member('mem1') n = self.driver._member_count(self.context, self.m) self.assertEqual(0, n) def test__member_get_ip(self): self.m = fake_member('mem1') z = self.driver._member_get_ip(self.context, self.m, False) self.assertEqual('1.1.1.1', z) z = self.driver._member_get_ip(self.context, self.m, True) self.assertEqual('1.1.1.1', z) def test__pool_get_hm(self): self.driver._pool_get_hm(self.context, 'hm01') self.plugin.get_health_monitor.assert_called_once_with( self.context, 'hm01') def test__pool_get_tenant_id(self): z = self.driver._pool_get_tenant_id(self.context, 'pool1') self.assertEqual('', z) def test__pool_get_vip_id(self): z = self.driver._pool_get_vip_id(self.context, 'pool1') self.assertEqual('', z) def test__pool_total(self): n = self.driver._pool_total(self.context, tenant_id='whatareyoudoingdave') self.assertEqual(0, n) def test__active(self): self.driver._active(self.context, 'vip', 'vip1') self.plugin.update_status.assert_called_once_with( self.context, lb_db.Vip, 'vip1', 'ACTIVE') def test__failed(self): self.driver._failed(self.context, 'vip', 'vip2-1-2') self.plugin.update_status.assert_called_once_with( self.context, lb_db.Vip, 'vip2-1-2', 'ERROR') def test__db_delete(self): self.driver._db_delete(self.context, 'pool', 'myid0101') self.plugin._delete_db_pool.assert_called_once_with( self.context, 'myid0101') def test__hm_active(self): self.driver._hm_active(self.context, 'hm01', 'pool1') self.plugin.update_pool_health_monitor.assert_called_once_with( self.context, 'hm01', 'pool1', 'ACTIVE') def test__hm_failed(self): self.driver._hm_failed(self.context, 'hm01', 'pool1') self.plugin.update_pool_health_monitor.assert_called_once_with( self.context, 'hm01', 'pool1', 'ERROR') def test__hm_db_delete(self): self.driver._hm_db_delete(self.context, 'hm01', 'pool2') self.plugin._delete_db_pool_health_monitor.assert_called_once_with( self.context, 'hm01', 'pool2') def test_create_vip(self): self.driver.create_vip(self.context, self.m) self.driver.a10.vip.create.assert_called_once_with( self.context, self.m) def test_update_vip(self): self.driver.update_vip(self.context, self.m, self.m) self.driver.a10.vip.update.assert_called_once_with( self.context, self.m, self.m) def test_delete_vip(self): self.driver.delete_vip(self.context, self.m) self.driver.a10.vip.delete.assert_called_once_with( self.context, self.m) def test_create_pool(self): self.driver.create_pool(self.context, self.m) self.driver.a10.pool.create.assert_called_once_with( self.context, self.m) def test_update_pool(self): self.driver.update_pool(self.context, self.m, self.m) self.driver.a10.pool.update.assert_called_once_with( self.context, self.m, self.m) def test_delete_pool(self): self.driver.delete_pool(self.context, self.m) self.driver.a10.pool.delete.assert_called_once_with( self.context, self.m) def test_stats(self): self.driver.stats(self.context, self.m['id']) self.driver.a10.pool.stats.assert_called_once_with( self.context, self.m['id']) def test_create_member(self): self.driver.create_member(self.context, self.m) self.driver.a10.member.create.assert_called_once_with( self.context, self.m) def test_update_member(self): self.driver.update_member(self.context, self.m, self.m) self.driver.a10.member.update.assert_called_once_with( self.context, self.m, self.m) def test_delete_member(self): self.driver.delete_member(self.context, self.m) self.driver.a10.member.delete.assert_called_once_with( self.context, self.m) def test_update_pool_health_monitor(self): self.driver.update_pool_health_monitor(self.context, self.m, self.m, 'pool1') self.driver.a10.hm.update.assert_called_once_with( self.context, self.m, self.m, 'pool1') def test_create_pool_health_monitor(self): self.driver.create_pool_health_monitor(self.context, self.m, 'pool1') self.driver.a10.hm.create.assert_called_once_with( self.context, self.m, 'pool1') def test_delete_pool_health_monitor(self): self.driver.delete_pool_health_monitor(self.context, self.m, 'pool1') self.driver.a10.hm.delete.assert_called_once_with( self.context, self.m, 'pool1') neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/drivers/test_driver_base.py0000664000567000056710000001562512701407726034234 0ustar jenkinsjenkins00000000000000# Copyright 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock from neutron.api.v2 import attributes from neutron import context as ncontext from neutron.plugins.common import constants from neutron_lbaas.drivers import driver_mixins from neutron_lbaas.extensions import loadbalancerv2 from neutron_lbaas.services.loadbalancer import constants as lb_const from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancerv2 class DummyManager(driver_mixins.BaseManagerMixin): def __init__(self, driver): super(DummyManager, self).__init__(driver) self.driver = driver self._db_delete_method = None @property def db_delete_method(self): return self._db_delete_method def delete(self, context, obj): pass def update(self, context, obj_old, obj): pass def create(self, context, obj): pass class TestBaseManager(test_db_loadbalancerv2.LbaasPluginDbTestCase): def _setup_db_data(self, context): hm = self.plugin.db.create_healthmonitor( context, {'admin_state_up': True, 'type': lb_const.HEALTH_MONITOR_HTTP, 'delay': 1, 'timeout': 1, 'max_retries': 1}) lb = self.plugin.db.create_loadbalancer( context, {'vip_address': '10.0.0.1', 'vip_subnet_id': self.subnet_id, 'admin_state_up': True}) pool = self.plugin.db.create_pool( context, {'protocol': lb_const.PROTOCOL_HTTP, 'session_persistence': None, 'lb_algorithm': lb_const.LB_METHOD_ROUND_ROBIN, 'admin_state_up': True, 'healthmonitor_id': hm.id, 'loadbalancer_id': lb.id}) self.plugin.db.create_pool_member( context, {'address': '10.0.0.1', 'protocol_port': 80, 'admin_state_up': True}, pool.id) listener = self.plugin.db.create_listener( context, {'protocol_port': 80, 'protocol': lb_const.PROTOCOL_HTTP, 'admin_state_up': True, 'loadbalancer_id': lb.id, 'default_pool_id': pool.id, 'sni_container_ids': []}) return listener def setUp(self): super(TestBaseManager, self).setUp() self.context = ncontext.get_admin_context() self.driver = mock.Mock() self.driver.plugin = self.plugin self.manager = DummyManager(self.driver) network = self._make_network(self.fmt, 'test-net', True) self.subnet = self._make_subnet( self.fmt, network, gateway=attributes.ATTR_NOT_SPECIFIED, cidr='10.0.0.0/24') self.subnet_id = self.subnet['subnet']['id'] self.listener = self._setup_db_data(self.context) class TestLBManager(TestBaseManager): def setUp(self): super(TestLBManager, self).setUp() self.manager._db_delete_method = self.plugin.db.delete_loadbalancer def test_success_completion(self): self.manager.successful_completion(self.context, self.listener.loadbalancer) lb = self.plugin.db.get_loadbalancer(self.context, self.listener.loadbalancer.id) self.assertEqual(constants.ACTIVE, lb.provisioning_status) self.assertEqual(lb_const.ONLINE, lb.operating_status) def test_success_completion_delete(self): self.plugin.db.delete_listener(self.context, self.listener.id) self.manager.successful_completion(self.context, self.listener.loadbalancer, delete=True) self.assertRaises(loadbalancerv2.EntityNotFound, self.plugin.db.get_loadbalancer, self.context, self.listener.loadbalancer.id) def test_failed_completion(self): self.manager.failed_completion(self.context, self.listener.loadbalancer) lb = self.plugin.db.get_loadbalancer(self.context, self.listener.loadbalancer.id) self.assertEqual(constants.ERROR, lb.provisioning_status) self.assertEqual(lb_const.OFFLINE, lb.operating_status) listener = self.plugin.db.get_listener(self.context, self.listener.id) self.assertEqual(constants.PENDING_CREATE, listener.provisioning_status) self.assertEqual(lb_const.OFFLINE, listener.operating_status) class TestListenerManager(TestBaseManager): """This should also cover Pool, Member, and Health Monitor cases.""" def setUp(self): super(TestListenerManager, self).setUp() self.manager._db_delete_method = self.plugin.db.delete_listener def test_success_completion(self): self.manager.successful_completion(self.context, self.listener) listener = self.plugin.db.get_listener(self.context, self.listener.id) self.assertEqual(constants.ACTIVE, listener.provisioning_status) self.assertEqual(lb_const.ONLINE, listener.operating_status) self.assertEqual(constants.ACTIVE, listener.loadbalancer.provisioning_status) # because the load balancer's original operating status was OFFLINE self.assertEqual(lb_const.OFFLINE, listener.loadbalancer.operating_status) def test_success_completion_delete(self): self.manager.successful_completion(self.context, self.listener, delete=True) self.assertRaises(loadbalancerv2.EntityNotFound, self.plugin.db.get_listener, self.context, self.listener.loadbalancer.id) def test_failed_completion(self): self.manager.failed_completion(self.context, self.listener) lb = self.plugin.db.get_loadbalancer(self.context, self.listener.loadbalancer.id) self.assertEqual(constants.ACTIVE, lb.provisioning_status) self.assertEqual(lb_const.OFFLINE, lb.operating_status) listener = self.plugin.db.get_listener(self.context, self.listener.id) self.assertEqual(constants.ERROR, listener.provisioning_status) self.assertEqual(lb_const.OFFLINE, listener.operating_status) neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/drivers/haproxy/0000775000567000056710000000000012701410110031774 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/drivers/haproxy/__init__.py0000664000567000056710000000000012701407726034116 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/drivers/haproxy/test_cfg.py0000664000567000056710000002610712701407726034175 0ustar jenkinsjenkins00000000000000# Copyright 2013 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import mock from neutron_lbaas.services.loadbalancer.drivers.haproxy import cfg from neutron_lbaas.tests import base class TestHaproxyCfg(base.BaseTestCase): def test_save_config(self): with contextlib.nested( mock.patch('neutron_lbaas.services.loadbalancer.' 'drivers.haproxy.cfg._build_global'), mock.patch('neutron_lbaas.services.loadbalancer.' 'drivers.haproxy.cfg._build_defaults'), mock.patch('neutron_lbaas.services.loadbalancer.' 'drivers.haproxy.cfg._build_frontend'), mock.patch('neutron_lbaas.services.loadbalancer.' 'drivers.haproxy.cfg._build_backend'), mock.patch('neutron.common.utils.replace_file') ) as (b_g, b_d, b_f, b_b, replace): test_config = ['globals', 'defaults', 'frontend', 'backend'] b_g.return_value = [test_config[0]] b_d.return_value = [test_config[1]] b_f.return_value = [test_config[2]] b_b.return_value = [test_config[3]] cfg.save_config('test_path', mock.Mock()) replace.assert_called_once_with('test_path', '\n'.join(test_config)) def test_build_global(self): expected_opts = ['global', '\tdaemon', '\tuser nobody', '\tgroup test_group', '\tlog /dev/log local0', '\tlog /dev/log local1 notice', '\tstats socket test_path mode 0666 level user'] opts = cfg._build_global(mock.Mock(), 'test_path', 'test_group') self.assertEqual(expected_opts, list(opts)) def test_build_defaults(self): expected_opts = ['defaults', '\tlog global', '\tretries 3', '\toption redispatch', '\ttimeout connect 5000', '\ttimeout client 50000', '\ttimeout server 50000'] opts = cfg._build_defaults(mock.Mock()) self.assertEqual(expected_opts, list(opts)) def test_build_frontend(self): test_config = {'vip': {'id': 'vip_id', 'protocol': 'HTTP', 'port': {'fixed_ips': [ {'ip_address': '10.0.0.2'}] }, 'protocol_port': 80, 'connection_limit': 2000, 'admin_state_up': True, }, 'pool': {'id': 'pool_id'}} expected_opts = ['frontend vip_id', '\toption tcplog', '\tbind 10.0.0.2:80', '\tmode http', '\tdefault_backend pool_id', '\tmaxconn 2000', '\toption forwardfor'] opts = cfg._build_frontend(test_config) self.assertEqual(expected_opts, list(opts)) test_config['vip']['connection_limit'] = -1 expected_opts.remove('\tmaxconn 2000') opts = cfg._build_frontend(test_config) self.assertEqual(expected_opts, list(opts)) test_config['vip']['admin_state_up'] = False expected_opts.append('\tdisabled') opts = cfg._build_frontend(test_config) self.assertEqual(expected_opts, list(opts)) def test_build_backend(self): test_config = {'pool': {'id': 'pool_id', 'protocol': 'HTTP', 'lb_method': 'ROUND_ROBIN', 'admin_state_up': True}, 'members': [{'status': 'ACTIVE', 'admin_state_up': True, 'id': 'member1_id', 'address': '10.0.0.3', 'protocol_port': 80, 'weight': 1}, {'status': 'INACTIVE', 'admin_state_up': True, 'id': 'member2_id', 'address': '10.0.0.4', 'protocol_port': 80, 'weight': 1}, {'status': 'PENDING_CREATE', 'admin_state_up': True, 'id': 'member3_id', 'address': '10.0.0.5', 'protocol_port': 80, 'weight': 1}], 'healthmonitors': [{'admin_state_up': True, 'delay': 3, 'max_retries': 4, 'timeout': 2, 'type': 'TCP'}], 'vip': {'session_persistence': {'type': 'HTTP_COOKIE'}}} expected_opts = ['backend pool_id', '\tmode http', '\tbalance roundrobin', '\toption forwardfor', '\ttimeout check 2s', '\tcookie SRV insert indirect nocache', '\tserver member1_id 10.0.0.3:80 weight 1 ' 'check inter 3s fall 4 cookie member1_id', '\tserver member2_id 10.0.0.4:80 weight 1 ' 'check inter 3s fall 4 cookie member2_id', '\tserver member3_id 10.0.0.5:80 weight 1 ' 'check inter 3s fall 4 cookie member3_id'] opts = cfg._build_backend(test_config) self.assertEqual(expected_opts, list(opts)) test_config['pool']['admin_state_up'] = False expected_opts.append('\tdisabled') opts = cfg._build_backend(test_config) self.assertEqual(expected_opts, list(opts)) def test_get_server_health_option(self): test_config = {'healthmonitors': [{'admin_state_up': False, 'delay': 3, 'max_retries': 4, 'timeout': 2, 'type': 'TCP', 'http_method': 'GET', 'url_path': '/', 'expected_codes': '200'}]} self.assertEqual(('', []), cfg._get_server_health_option(test_config)) self.assertEqual(('', []), cfg._get_server_health_option(test_config)) test_config['healthmonitors'][0]['admin_state_up'] = True expected = (' check inter 3s fall 4', ['timeout check 2s']) self.assertEqual(expected, cfg._get_server_health_option(test_config)) test_config['healthmonitors'][0]['type'] = 'HTTPS' expected = (' check inter 3s fall 4', ['timeout check 2s', 'option httpchk GET /', 'http-check expect rstatus 200', 'option ssl-hello-chk']) self.assertEqual(expected, cfg._get_server_health_option(test_config)) def test_has_http_cookie_persistence(self): config = {'vip': {'session_persistence': {'type': 'HTTP_COOKIE'}}} self.assertTrue(cfg._has_http_cookie_persistence(config)) config = {'vip': {'session_persistence': {'type': 'SOURCE_IP'}}} self.assertFalse(cfg._has_http_cookie_persistence(config)) config = {'vip': {'session_persistence': {}}} self.assertFalse(cfg._has_http_cookie_persistence(config)) def test_get_session_persistence(self): config = {'vip': {'session_persistence': {'type': 'SOURCE_IP'}}} self.assertEqual(['stick-table type ip size 10k', 'stick on src'], cfg._get_session_persistence(config)) config = {'vip': {'session_persistence': {'type': 'HTTP_COOKIE'}}, 'members': []} self.assertEqual([], cfg._get_session_persistence(config)) config = {'vip': {'session_persistence': {'type': 'HTTP_COOKIE'}}} self.assertEqual([], cfg._get_session_persistence(config)) config = {'vip': {'session_persistence': {'type': 'HTTP_COOKIE'}}, 'members': [{'id': 'member1_id'}]} self.assertEqual(['cookie SRV insert indirect nocache'], cfg._get_session_persistence(config)) config = {'vip': {'session_persistence': {'type': 'APP_COOKIE', 'cookie_name': 'test'}}} self.assertEqual(['appsession test len 56 timeout 3h'], cfg._get_session_persistence(config)) config = {'vip': {'session_persistence': {'type': 'APP_COOKIE'}}} self.assertEqual([], cfg._get_session_persistence(config)) config = {'vip': {'session_persistence': {'type': 'UNSUPPORTED'}}} self.assertEqual([], cfg._get_session_persistence(config)) def test_expand_expected_codes(self): exp_codes = '' self.assertEqual(set([]), cfg._expand_expected_codes(exp_codes)) exp_codes = '200' self.assertEqual(set(['200']), cfg._expand_expected_codes(exp_codes)) exp_codes = '200, 201' self.assertEqual(set(['200', '201']), cfg._expand_expected_codes(exp_codes)) exp_codes = '200, 201,202' self.assertEqual(set(['200', '201', '202']), cfg._expand_expected_codes(exp_codes)) exp_codes = '200-202' self.assertEqual(set(['200', '201', '202']), cfg._expand_expected_codes(exp_codes)) exp_codes = '200-202, 205' self.assertEqual(set(['200', '201', '202', '205']), cfg._expand_expected_codes(exp_codes)) exp_codes = '200, 201-203' self.assertEqual(set(['200', '201', '202', '203']), cfg._expand_expected_codes(exp_codes)) exp_codes = '200, 201-203, 205' self.assertEqual(set(['200', '201', '202', '203', '205']), cfg._expand_expected_codes(exp_codes)) exp_codes = '201-200, 205' self.assertEqual(set(['205']), cfg._expand_expected_codes(exp_codes)) neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/drivers/haproxy/sample_configs/0000775000567000056710000000000012701410110034765 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015600000000000011217 Lustar 00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/drivers/haproxy/sample_configs/__init__.pyneutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/drivers/haproxy/sample_configs/__0000664000567000056710000000000012701407726035276 0ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016400000000000011216 Lustar 00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/drivers/haproxy/sample_configs/sample_configs.pyneutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/drivers/haproxy/sample_configs/sa0000664000567000056710000002574212701407726035350 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import collections RET_PERSISTENCE = { 'type': 'HTTP_COOKIE', 'cookie_name': 'HTTP_COOKIE'} HASHSEED_ORDERED_CODES = list({'404', '405', '500'}) PIPED_CODES = '|'.join(HASHSEED_ORDERED_CODES) RET_MONITOR = { 'id': 'sample_monitor_id_1', 'type': 'HTTP', 'delay': 30, 'timeout': 31, 'max_retries': 3, 'http_method': 'GET', 'url_path': '/index.html', 'expected_codes': PIPED_CODES, 'admin_state_up': True} RET_MEMBER_1 = { 'id': 'sample_member_id_1', 'address': '10.0.0.99', 'protocol_port': 82, 'weight': 13, 'subnet_id': '10.0.0.1/24', 'admin_state_up': True, 'provisioning_status': 'ACTIVE'} RET_MEMBER_2 = { 'id': 'sample_member_id_2', 'address': '10.0.0.98', 'protocol_port': 82, 'weight': 13, 'subnet_id': '10.0.0.1/24', 'admin_state_up': True, 'provisioning_status': 'ACTIVE'} RET_POOL = { 'id': 'sample_pool_id_1', 'protocol': 'http', 'lb_algorithm': 'roundrobin', 'members': [RET_MEMBER_1, RET_MEMBER_2], 'health_monitor': RET_MONITOR, 'session_persistence': RET_PERSISTENCE, 'admin_state_up': True, 'provisioning_status': 'ACTIVE'} RET_DEF_TLS_CONT = {'id': 'cont_id_1', 'allencompassingpem': 'imapem'} RET_SNI_CONT_1 = {'id': 'cont_id_2', 'allencompassingpem': 'imapem2'} RET_SNI_CONT_2 = {'id': 'cont_id_3', 'allencompassingpem': 'imapem3'} RET_LISTENER = { 'id': 'sample_listener_id_1', 'protocol_port': '80', 'protocol': 'HTTP', 'protocol_mode': 'http', 'default_pool': RET_POOL, 'connection_limit': 98} RET_LISTENER_TLS = { 'id': 'sample_listener_id_1', 'protocol_port': '443', 'protocol_mode': 'HTTP', 'protocol': 'TERMINATED_HTTPS', 'default_pool': RET_POOL, 'connection_limit': 98, 'default_tls_container_id': 'cont_id_1', 'default_tls_path': '/v2/sample_loadbalancer_id_1/cont_id_1.pem', 'default_tls_container': RET_DEF_TLS_CONT} RET_LISTENER_TLS_SNI = { 'id': 'sample_listener_id_1', 'protocol_port': '443', 'protocol_mode': 'http', 'protocol': 'TERMINATED_HTTPS', 'default_pool': RET_POOL, 'connection_limit': 98, 'default_tls_container_id': 'cont_id_1', 'default_tls_path': '/v2/sample_loadbalancer_id_1/cont_id_1.pem', 'default_tls_container': RET_DEF_TLS_CONT, 'crt_dir': '/v2/sample_loadbalancer_id_1', 'sni_container_ids': ['cont_id_2', 'cont_id_3'], 'sni_containers': [RET_SNI_CONT_1, RET_SNI_CONT_2]} RET_LB = { 'name': 'test-lb', 'vip_address': '10.0.0.2', 'listeners': [RET_LISTENER], 'pools': [RET_POOL]} RET_LB_TLS = { 'name': 'test-lb', 'vip_address': '10.0.0.2', 'listeners': [RET_LISTENER_TLS], 'pools': [RET_POOL]} RET_LB_TLS_SNI = { 'name': 'test-lb', 'vip_address': '10.0.0.2', 'listeners': [RET_LISTENER_TLS_SNI], 'pools': [RET_POOL]} def sample_loadbalancer_tuple(proto=None, monitor=True, persistence=True, persistence_type=None, tls=False, sni=False): proto = 'HTTP' if proto is None else proto in_lb = collections.namedtuple( 'loadbalancer', 'id, name, vip_address, protocol, vip_port, ' 'listeners, pools') return in_lb( id='sample_loadbalancer_id_1', name='test-lb', vip_address='10.0.0.2', protocol=proto, vip_port=sample_vip_port_tuple(), listeners=[sample_listener_tuple(proto=proto, monitor=monitor, persistence=persistence, persistence_type=persistence_type, tls=tls, sni=sni)], pools=[sample_pool_tuple(proto=proto, monitor=monitor, persistence=persistence, persistence_type=persistence_type)] ) def sample_vip_port_tuple(): vip_port = collections.namedtuple('vip_port', 'fixed_ips') ip_address = collections.namedtuple('ip_address', 'ip_address') in_address = ip_address(ip_address='10.0.0.2') return vip_port(fixed_ips=[in_address]) def sample_listener_tuple(proto=None, monitor=True, persistence=True, persistence_type=None, tls=False, sni=False): proto = 'HTTP' if proto is None else proto port = '443' if proto is 'HTTPS' or proto is 'TERMINATED_HTTPS' else '80' in_listener = collections.namedtuple( 'listener', 'id, tenant_id, protocol_port, protocol, default_pool, ' 'connection_limit, admin_state_up, default_tls_container_id, ' 'sni_container_ids, default_tls_container, ' 'sni_containers, loadbalancer_id') return in_listener( id='sample_listener_id_1', tenant_id='sample_tenant_id', protocol_port=port, protocol=proto, default_pool=sample_pool_tuple( proto=proto, monitor=monitor, persistence=persistence, persistence_type=persistence_type), connection_limit=98, admin_state_up=True, default_tls_container_id='cont_id_1' if tls else '', sni_container_ids=['cont_id_2', 'cont_id_3'] if sni else [], default_tls_container=sample_tls_container_tuple( id='cont_id_1', certificate='--imapem1--\n', private_key='--imakey1--\n', intermediates=[ '--imainter1--\n', '--imainter1too--\n'], primary_cn='fakeCNM' ) if tls else '', sni_containers=[ sample_tls_sni_container_tuple( tls_container_id='cont_id_2', tls_container=sample_tls_container_tuple( id='cont_id_2', certificate='--imapem2--\n', private_key='--imakey2--\n', intermediates=[ '--imainter2--\n', '--imainter2too--\n'], primary_cn='fakeCN')), sample_tls_sni_container_tuple( tls_container_id='cont_id_3', tls_container=sample_tls_container_tuple( id='cont_id_3', certificate='--imapem3--\n', private_key='--imakey3--\n', intermediates=[ '--imainter3--\n', '--imainter3too--\n'], primary_cn='fakeCN2'))] if sni else [], loadbalancer_id='sample_loadbalancer_id_1' ) def sample_tls_sni_container_tuple(tls_container=None, tls_container_id=None): sc = collections.namedtuple('sni_container', 'tls_container,' 'tls_container_id') return sc(tls_container=tls_container, tls_container_id=tls_container_id) def sample_tls_container_tuple(id='cont_id_1', certificate=None, private_key=None, intermediates=None, primary_cn=None): intermediates = intermediates or [] sc = collections.namedtuple( 'tls_cert', 'id, certificate, private_key, intermediates, primary_cn') return sc(id=id, certificate=certificate, private_key=private_key, intermediates=intermediates or [], primary_cn=primary_cn) def sample_pool_tuple(proto=None, monitor=True, persistence=True, persistence_type=None, hm_admin_state=True): proto = 'HTTP' if proto is None else proto in_pool = collections.namedtuple( 'pool', 'id, protocol, lb_algorithm, members, healthmonitor,' 'session_persistence, admin_state_up, provisioning_status') mon = (sample_health_monitor_tuple(proto=proto, admin_state=hm_admin_state) if monitor is True else None) persis = sample_session_persistence_tuple( persistence_type=persistence_type) if persistence is True else None return in_pool( id='sample_pool_id_1', protocol=proto, lb_algorithm='ROUND_ROBIN', members=[sample_member_tuple('sample_member_id_1', '10.0.0.99'), sample_member_tuple('sample_member_id_2', '10.0.0.98')], healthmonitor=mon, session_persistence=persis, admin_state_up=True, provisioning_status='ACTIVE') def sample_member_tuple(id, ip, admin_state_up=True, status='ACTIVE'): in_member = collections.namedtuple('member', 'id, address, protocol_port, ' 'weight, subnet_id, ' 'admin_state_up, provisioning_status') return in_member( id=id, address=ip, protocol_port=82, weight=13, subnet_id='10.0.0.1/24', admin_state_up=admin_state_up, provisioning_status=status) def sample_session_persistence_tuple(persistence_type=None): spersistence = collections.namedtuple('SessionPersistence', 'type, cookie_name') pt = 'HTTP_COOKIE' if persistence_type is None else persistence_type return spersistence(type=pt, cookie_name=pt) def sample_health_monitor_tuple(proto='HTTP', admin_state=True): proto = 'HTTP' if proto is 'TERMINATED_HTTPS' else proto monitor = collections.namedtuple( 'monitor', 'id, type, delay, timeout, max_retries, http_method, ' 'url_path, expected_codes, admin_state_up') return monitor(id='sample_monitor_id_1', type=proto, delay=30, timeout=31, max_retries=3, http_method='GET', url_path='/index.html', expected_codes='500, 405, 404', admin_state_up=admin_state) def sample_base_expected_config(backend, frontend=None): if frontend is None: frontend = ("frontend sample_listener_id_1\n" " option tcplog\n" " maxconn 98\n" " option forwardfor\n" " bind 10.0.0.2:80\n" " mode http\n" " default_backend sample_pool_id_1\n\n") return ("# Configuration for test-lb\n" "global\n" " daemon\n" " user nobody\n" " group nogroup\n" " log /dev/log local0\n" " log /dev/log local1 notice\n" " stats socket /sock_path mode 0666 level user\n\n" "defaults\n" " log global\n" " retries 3\n" " option redispatch\n" " timeout connect 5000\n" " timeout client 50000\n" " timeout server 50000\n\n" + frontend + backend) neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/drivers/haproxy/test_jinja_cfg.py0000664000567000056710000005650112701407726035351 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import mock from neutron.tests import base from neutron_lbaas.common.cert_manager import cert_manager from neutron_lbaas.common.tls_utils import cert_parser from neutron_lbaas.services.loadbalancer import data_models from neutron_lbaas.services.loadbalancer.drivers.haproxy import jinja_cfg from neutron_lbaas.tests.unit.services.loadbalancer.drivers.haproxy.\ sample_configs import sample_configs class TestHaproxyCfg(base.BaseTestCase): def test_save_config(self): with contextlib.nested( mock.patch('neutron_lbaas.services.loadbalancer.' 'drivers.haproxy.jinja_cfg.render_loadbalancer_obj'), mock.patch('neutron.common.utils.replace_file') ) as (r_t, replace): r_t.return_value = 'fake_rendered_template' lb = mock.Mock() jinja_cfg.save_config('test_conf_path', lb, 'test_sock_path', 'nogroup', 'fake_state_path') r_t.assert_called_once_with(lb, 'nogroup', 'test_sock_path', 'fake_state_path') replace.assert_called_once_with('test_conf_path', 'fake_rendered_template') def test_get_template(self): template = jinja_cfg._get_template() self.assertEqual('haproxy.loadbalancer.j2', template.name) def test_render_template_tls_termination(self): lb = sample_configs.sample_loadbalancer_tuple( proto='TERMINATED_HTTPS', tls=True, sni=True) fe = ("frontend sample_listener_id_1\n" " option tcplog\n" " redirect scheme https if !{ ssl_fc }\n" " maxconn 98\n" " option forwardfor\n" " bind 10.0.0.2:443" " ssl crt /v2/sample_listener_id_1/fakeCNM.pem" " crt /v2/sample_listener_id_1\n" " mode http\n" " default_backend sample_pool_id_1\n\n") be = ("backend sample_pool_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31\n" " option httpchk GET /index.html\n" " http-check expect rstatus %s\n" " server sample_member_id_1 10.0.0.99:82" " weight 13 check inter 30s fall 3 cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82" " weight 13 check inter 30s fall 3 cookie " "sample_member_id_2\n\n" % sample_configs.PIPED_CODES) with mock.patch('os.makedirs'): with mock.patch('os.listdir'): with mock.patch.object(jinja_cfg, 'n_utils'): with mock.patch.object( jinja_cfg, '_process_tls_certificates') as crt: crt.return_value = { 'tls_cert': lb.listeners[0] .default_tls_container, 'sni_certs': [lb.listeners[0] .sni_containers[0].tls_container]} rendered_obj = jinja_cfg.render_loadbalancer_obj( lb, 'nogroup', '/sock_path', '/v2') self.assertEqual( sample_configs.sample_base_expected_config( frontend=fe, backend=be), rendered_obj) def test_render_template_tls_termination_no_sni(self): lb = sample_configs.sample_loadbalancer_tuple( proto='TERMINATED_HTTPS', tls=True) fe = ("frontend sample_listener_id_1\n" " option tcplog\n" " redirect scheme https if !{ ssl_fc }\n" " maxconn 98\n" " option forwardfor\n" " bind 10.0.0.2:443" " ssl crt /v2/sample_listener_id_1/fakeCNM.pem\n" " mode http\n" " default_backend sample_pool_id_1\n\n") be = ("backend sample_pool_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31\n" " option httpchk GET /index.html\n" " http-check expect rstatus %s\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 cookie sample_member_id_2\n\n" % sample_configs.PIPED_CODES) with mock.patch('os.makedirs'): with mock.patch('neutron.common.utils.replace_file'): with mock.patch('os.listdir'): with mock.patch.object(jinja_cfg, 'n_utils'): with mock.patch.object( jinja_cfg, '_process_tls_certificates') as crt: crt.return_value = { 'tls_cert': lb.listeners[0] .default_tls_container, 'sni_certs': []} rendered_obj = jinja_cfg.render_loadbalancer_obj( lb, 'nogroup', '/sock_path', '/v2') self.assertEqual( sample_configs.sample_base_expected_config( frontend=fe, backend=be), rendered_obj) def test_render_template_http(self): be = ("backend sample_pool_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31\n" " option httpchk GET /index.html\n" " http-check expect rstatus %s\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 cookie sample_member_id_2\n\n" % sample_configs.PIPED_CODES) rendered_obj = jinja_cfg.render_loadbalancer_obj( sample_configs.sample_loadbalancer_tuple(), 'nogroup', '/sock_path', '/v2') self.assertEqual( sample_configs.sample_base_expected_config(backend=be), rendered_obj) def test_render_template_https(self): fe = ("frontend sample_listener_id_1\n" " option tcplog\n" " maxconn 98\n" " bind 10.0.0.2:443\n" " mode tcp\n" " default_backend sample_pool_id_1\n\n") be = ("backend sample_pool_id_1\n" " mode tcp\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " timeout check 31\n" " option httpchk GET /index.html\n" " http-check expect rstatus %s\n" " option ssl-hello-chk\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3 cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3 cookie sample_member_id_2\n\n" % sample_configs.PIPED_CODES) rendered_obj = jinja_cfg.render_loadbalancer_obj( sample_configs.sample_loadbalancer_tuple(proto='HTTPS'), 'nogroup', '/sock_path', '/v2') self.assertEqual(sample_configs.sample_base_expected_config( frontend=fe, backend=be), rendered_obj) def test_render_template_no_monitor_http(self): be = ("backend sample_pool_id_1\n" " mode http\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " server sample_member_id_1 10.0.0.99:82 weight 13 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 weight 13 " "cookie sample_member_id_2\n\n") rendered_obj = jinja_cfg.render_loadbalancer_obj( sample_configs.sample_loadbalancer_tuple( proto='HTTP', monitor=False), 'nogroup', '/sock_path', '/v2') self.assertEqual(sample_configs.sample_base_expected_config( backend=be), rendered_obj) def test_render_template_no_monitor_https(self): fe = ("frontend sample_listener_id_1\n" " option tcplog\n" " maxconn 98\n" " bind 10.0.0.2:443\n" " mode tcp\n" " default_backend sample_pool_id_1\n\n") be = ("backend sample_pool_id_1\n" " mode tcp\n" " balance roundrobin\n" " cookie SRV insert indirect nocache\n" " server sample_member_id_1 10.0.0.99:82 weight 13 " "cookie sample_member_id_1\n" " server sample_member_id_2 10.0.0.98:82 weight 13 " "cookie sample_member_id_2\n\n") rendered_obj = jinja_cfg.render_loadbalancer_obj( sample_configs.sample_loadbalancer_tuple( proto='HTTPS', monitor=False), 'nogroup', '/sock_path', '/v2') self.assertEqual(sample_configs.sample_base_expected_config( frontend=fe, backend=be), rendered_obj) def test_render_template_no_persistence_https(self): fe = ("frontend sample_listener_id_1\n" " option tcplog\n" " maxconn 98\n" " bind 10.0.0.2:443\n" " mode tcp\n" " default_backend sample_pool_id_1\n\n") be = ("backend sample_pool_id_1\n" " mode tcp\n" " balance roundrobin\n" " server sample_member_id_1 10.0.0.99:82 weight 13\n" " server sample_member_id_2 10.0.0.98:82 weight 13\n\n") rendered_obj = jinja_cfg.render_loadbalancer_obj( sample_configs.sample_loadbalancer_tuple( proto='HTTPS', monitor=False, persistence=False), 'nogroup', '/sock_path', '/v2') self.assertEqual(sample_configs.sample_base_expected_config( frontend=fe, backend=be), rendered_obj) def test_render_template_no_persistence_http(self): be = ("backend sample_pool_id_1\n" " mode http\n" " balance roundrobin\n" " server sample_member_id_1 10.0.0.99:82 weight 13\n" " server sample_member_id_2 10.0.0.98:82 weight 13\n\n") rendered_obj = jinja_cfg.render_loadbalancer_obj( sample_configs.sample_loadbalancer_tuple( proto='HTTP', monitor=False, persistence=False), 'nogroup', '/sock_path', '/v2') self.assertEqual(sample_configs.sample_base_expected_config( backend=be), rendered_obj) def test_render_template_sourceip_persistence(self): be = ("backend sample_pool_id_1\n" " mode http\n" " balance roundrobin\n" " stick-table type ip size 10k\n" " stick on src\n" " timeout check 31\n" " option httpchk GET /index.html\n" " http-check expect rstatus %s\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3\n\n" % sample_configs.PIPED_CODES) rendered_obj = jinja_cfg.render_loadbalancer_obj( sample_configs.sample_loadbalancer_tuple( persistence_type='SOURCE_IP'), 'nogroup', '/sock_path', '/v2') self.assertEqual( sample_configs.sample_base_expected_config(backend=be), rendered_obj) def test_render_template_appsession_persistence(self): with mock.patch('os.makedirs') as md: with mock.patch.object(jinja_cfg, 'n_utils'): md.return_value = '/data/dirs/' be = ("backend sample_pool_id_1\n" " mode http\n" " balance roundrobin\n" " appsession APP_COOKIE len 56 timeout 3h\n" " timeout check 31\n" " option httpchk GET /index.html\n" " http-check expect rstatus %s\n" " server sample_member_id_1 10.0.0.99:82 " "weight 13 check inter 30s fall 3\n" " server sample_member_id_2 10.0.0.98:82 " "weight 13 check inter 30s fall 3\n\n" % sample_configs.PIPED_CODES) rendered_obj = jinja_cfg.render_loadbalancer_obj( sample_configs.sample_loadbalancer_tuple( persistence_type='APP_COOKIE'), 'nogroup', '/sock_path', '/v2') self.assertEqual( sample_configs.sample_base_expected_config(backend=be), rendered_obj) def test_retrieve_crt_path(self): with mock.patch('os.makedirs'): with mock.patch('os.path.isdir') as isdir: with mock.patch.object(jinja_cfg, '_retrieve_crt_path') as rcp: isdir.return_value = True rcp.return_value = '/v2/loadbalancers/lb_id_1/' \ 'cont_id_1.pem' ret = jinja_cfg._retrieve_crt_path( '/v2/loadbalancers', 'lb_id_1', 'cont_id_1') self.assertEqual( '/v2/loadbalancers/lb_id_1/cont_id_1.pem', ret) def test_store_listener_crt(self): l = sample_configs.sample_listener_tuple(tls=True, sni=True) with mock.patch('os.makedirs'): with mock.patch('neutron.common.utils.replace_file'): ret = jinja_cfg._store_listener_crt( '/v2/loadbalancers', l, l.default_tls_container) self.assertEqual( '/v2/loadbalancers/sample_listener_id_1/fakeCNM.pem', ret) def test_process_tls_certificates(self): sl = sample_configs.sample_listener_tuple(tls=True, sni=True) tls = data_models.TLSContainer(primary_cn='fakeCN', certificate='imaCert', private_key='imaPrivateKey', intermediates=['imainter1', 'imainter2']) cert = mock.Mock(spec=cert_manager.Cert) cert.get_private_key.return_value = tls.private_key cert.get_certificate.return_value = tls.certificate cert.get_intermediates.return_value = tls.intermediates with contextlib.nested( mock.patch.object(jinja_cfg, '_map_cert_tls_container'), mock.patch.object(jinja_cfg, '_store_listener_crt'), mock.patch.object(cert_parser, 'get_host_names'), mock.patch.object(jinja_cfg, 'CERT_MANAGER_PLUGIN') ) as (map, store_cert, get_host_names, cert_mgr): map.return_value = tls cert_mgr_mock = mock.Mock(spec=cert_manager.CertManager) cert_mgr_mock.get_cert.return_value = cert cert_mgr.CertManager.return_value = cert_mgr_mock get_host_names.return_value = {'cn': 'fakeCN'} jinja_cfg._process_tls_certificates(sl) # Ensure get_cert is called three times calls_certs = [ mock.call(sl.default_tls_container.id), mock.call('cont_id_2'), mock.call('cont_id_3')] cert_mgr_mock.get_cert.call_args_list == calls_certs # Ensure store_cert is called three times calls_ac = [mock.call('/v2/', 'sample_listener_id_1', tls), mock.call('/v2/', 'sample_listener_id_1', tls), mock.call('/v2/', 'sample_listener_id_1', tls)] store_cert.call_args_list == calls_ac def test_get_primary_cn(self): cert = mock.MagicMock() with mock.patch.object(cert_parser, 'get_host_names') as cp: cp.return_value = {'cn': 'fakeCN'} cn = jinja_cfg._get_primary_cn(cert.get_certificate()) self.assertEqual('fakeCN', cn) def test_map_cert_tls_container(self): tls = data_models.TLSContainer(primary_cn='fakeCN', certificate='imaCert', private_key='imaPrivateKey', intermediates=['imainter1', 'imainter2']) cert = mock.MagicMock() cert.get_private_key.return_value = tls.private_key cert.get_certificate.return_value = tls.certificate cert.get_intermediates.return_value = tls.intermediates cert.get_private_key_passphrase.return_value = 'passphrase' with mock.patch.object(cert_parser, 'get_host_names') as cp: with mock.patch.object(cert_parser, 'dump_private_key') as dp: cp.return_value = {'cn': 'fakeCN'} dp.return_value = 'imaPrivateKey' self.assertEqual(tls.primary_cn, jinja_cfg._map_cert_tls_container( cert).primary_cn) self.assertEqual(tls.certificate, jinja_cfg._map_cert_tls_container( cert).certificate) self.assertEqual(tls.private_key, jinja_cfg._map_cert_tls_container( cert).private_key) self.assertEqual(tls.intermediates, jinja_cfg._map_cert_tls_container( cert).intermediates) def test_build_pem(self): expected = 'imainter\nimainter2\nimacert\nimakey' tls_tupe = sample_configs.sample_tls_container_tuple( certificate='imacert', private_key='imakey', intermediates=['imainter', 'imainter2']) self.assertEqual(expected, jinja_cfg._build_pem(tls_tupe)) def test_transform_session_persistence(self): in_persistence = sample_configs.sample_session_persistence_tuple() ret = jinja_cfg._transform_session_persistence(in_persistence) self.assertEqual(sample_configs.RET_PERSISTENCE, ret) def test_transform_health_monitor(self): in_persistence = sample_configs.sample_health_monitor_tuple() ret = jinja_cfg._transform_health_monitor(in_persistence) self.assertEqual(sample_configs.RET_MONITOR, ret) def test_transform_member(self): in_member = sample_configs.sample_member_tuple('sample_member_id_1', '10.0.0.99') ret = jinja_cfg._transform_member(in_member) self.assertEqual(sample_configs.RET_MEMBER_1, ret) def test_transform_pool(self): in_pool = sample_configs.sample_pool_tuple() ret = jinja_cfg._transform_pool(in_pool) self.assertEqual(sample_configs.RET_POOL, ret) def test_transform_pool_admin_state_down(self): in_pool = sample_configs.sample_pool_tuple(hm_admin_state=False) ret = jinja_cfg._transform_pool(in_pool) result = sample_configs.RET_POOL result['health_monitor'] = '' self.assertEqual(result, ret) def test_transform_listener(self): in_listener = sample_configs.sample_listener_tuple() ret = jinja_cfg._transform_listener(in_listener, '/v2') self.assertEqual(sample_configs.RET_LISTENER, ret) def test_transform_loadbalancer(self): in_lb = sample_configs.sample_loadbalancer_tuple() ret = jinja_cfg._transform_loadbalancer(in_lb, '/v2') self.assertEqual(sample_configs.RET_LB, ret) def test_include_member(self): ret = jinja_cfg._include_member( sample_configs.sample_member_tuple('sample_member_id_1', '10.0.0.99')) self.assertTrue(ret) def test_include_member_invalid_status(self): ret = jinja_cfg._include_member( sample_configs.sample_member_tuple('sample_member_id_1', '10.0.0.99', status='PENDING')) self.assertFalse(ret) def test_include_member_invalid_admin_state(self): ret = jinja_cfg._include_member( sample_configs.sample_member_tuple('sample_member_id_1', '10.0.0.99', admin_state_up=False)) self.assertFalse(ret) def test_expand_expected_codes(self): exp_codes = '' self.assertEqual(set([]), jinja_cfg._expand_expected_codes(exp_codes)) exp_codes = '200' self.assertEqual(set(['200']), jinja_cfg._expand_expected_codes(exp_codes)) exp_codes = '200, 201' self.assertEqual(set(['200', '201']), jinja_cfg._expand_expected_codes(exp_codes)) exp_codes = '200, 201,202' self.assertEqual(set(['200', '201', '202']), jinja_cfg._expand_expected_codes(exp_codes)) exp_codes = '200-202' self.assertEqual(set(['200', '201', '202']), jinja_cfg._expand_expected_codes(exp_codes)) exp_codes = '200-202, 205' self.assertEqual(set(['200', '201', '202', '205']), jinja_cfg._expand_expected_codes(exp_codes)) exp_codes = '200, 201-203' self.assertEqual(set(['200', '201', '202', '203']), jinja_cfg._expand_expected_codes(exp_codes)) exp_codes = '200, 201-203, 205' self.assertEqual(set(['200', '201', '202', '203', '205']), jinja_cfg._expand_expected_codes(exp_codes)) exp_codes = '201-200, 205' self.assertEqual(set(['205']), jinja_cfg._expand_expected_codes(exp_codes)) ././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/drivers/haproxy/test_namespace_driver.pyneutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/drivers/haproxy/test_namespace_dr0000664000567000056710000006512512701407726035433 0ustar jenkinsjenkins00000000000000# Copyright 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import mock from neutron_lib import exceptions import six from neutron_lbaas.services.loadbalancer.drivers.haproxy \ import namespace_driver from neutron_lbaas.tests import base class TestHaproxyNSDriver(base.BaseTestCase): def setUp(self): super(TestHaproxyNSDriver, self).setUp() conf = mock.Mock() conf.haproxy.loadbalancer_state_path = '/the/path' conf.interface_driver = 'intdriver' conf.haproxy.user_group = 'test_group' conf.haproxy.send_gratuitous_arp = 3 self.conf = conf self.rpc_mock = mock.Mock() with mock.patch( 'neutron.common.utils.load_class_by_alias_or_classname'): self.driver = namespace_driver.HaproxyNSDriver( conf, self.rpc_mock ) self.vif_driver = mock.Mock() self.driver.vif_driver = self.vif_driver self.fake_config = { 'pool': {'id': 'pool_id', 'status': 'ACTIVE', 'admin_state_up': True}, 'vip': {'id': 'vip_id', 'port': {'id': 'port_id'}, 'address': '10.0.0.2', 'status': 'ACTIVE', 'admin_state_up': True} } def _ip_mock_call(self, ns=None): kwargs = {} if ns: kwargs['namespace'] = ns return mock.call(**kwargs) def test_get_name(self): self.assertEqual(namespace_driver.DRIVER_NAME, self.driver.get_name()) def test_create(self): with mock.patch.object(self.driver, '_plug') as plug: with mock.patch.object(self.driver, '_spawn') as spawn: self.driver.create(self.fake_config) plug.assert_called_once_with( 'qlbaas-pool_id', {'id': 'port_id'}, '10.0.0.2' ) spawn.assert_called_once_with(self.fake_config) def test_update(self): with contextlib.nested( mock.patch.object(self.driver, '_get_state_file_path'), mock.patch.object(self.driver, '_spawn'), mock.patch.object(six.moves.builtins, 'open') ) as (gsp, spawn, mock_open): mock_open.return_value = ['5'] self.driver.update(self.fake_config) mock_open.assert_called_once_with(gsp.return_value, 'r') spawn.assert_called_once_with(self.fake_config, ['-sf', '5']) def test_spawn(self): with contextlib.nested( mock.patch.object(namespace_driver.hacfg, 'save_config'), mock.patch.object(self.driver, '_get_state_file_path'), mock.patch('neutron.agent.linux.ip_lib.IPWrapper') ) as (mock_save, gsp, ip_wrap): gsp.side_effect = lambda x, y: y self.driver._spawn(self.fake_config) mock_save.assert_called_once_with('conf', self.fake_config, 'sock', 'test_group') cmd = ['haproxy', '-f', 'conf', '-p', 'pid'] ip_wrap.assert_has_calls([ self._ip_mock_call('qlbaas-pool_id'), mock.call().netns.execute(cmd) ]) def test_undeploy_instance(self): with contextlib.nested( mock.patch.object(self.driver, '_get_state_file_path'), mock.patch.object(namespace_driver, 'kill_pids_in_file'), mock.patch.object(self.driver, '_unplug'), mock.patch('neutron.agent.linux.ip_lib.IPWrapper'), mock.patch('os.path.isdir'), mock.patch('shutil.rmtree') ) as (gsp, kill, unplug, ip_wrap, isdir, rmtree): gsp.side_effect = lambda x, y: '/pool/' + y self.driver.pool_to_port_id['pool_id'] = 'port_id' isdir.return_value = True self.driver.undeploy_instance('pool_id', delete_namespace=True) kill.assert_called_once_with('/pool/pid') unplug.assert_called_once_with('qlbaas-pool_id', 'port_id') isdir.assert_called_once_with('/pool') rmtree.assert_called_once_with('/pool') ip_wrap.assert_has_calls([ self._ip_mock_call('qlbaas-pool_id'), mock.call().garbage_collect_namespace() ]) def test_undeploy_instance_with_ns_cleanup(self): with contextlib.nested( mock.patch.object(self.driver, '_get_state_file_path'), mock.patch.object(self.driver, 'vif_driver'), mock.patch.object(namespace_driver, 'kill_pids_in_file'), mock.patch('neutron.agent.linux.ip_lib.IPWrapper'), mock.patch('os.path.isdir'), mock.patch('shutil.rmtree') ) as (gsp, vif, kill, ip_wrap, isdir, rmtree): device = mock.Mock() device_name = 'port_device' device.name = device_name ip_wrap.return_value.get_devices.return_value = [device] self.driver.undeploy_instance('pool_id', cleanup_namespace=True) vif.unplug.assert_called_once_with(device_name, namespace='qlbaas-pool_id') def test_remove_orphans(self): with contextlib.nested( mock.patch.object(self.driver, 'exists'), mock.patch.object(self.driver, 'undeploy_instance'), mock.patch('os.listdir'), mock.patch('os.path.exists') ) as (exists, undeploy, listdir, path_exists): known = ['known1', 'known2'] unknown = ['unknown1', 'unknown2'] listdir.return_value = known + unknown exists.side_effect = lambda x: x == 'unknown2' self.driver.remove_orphans(known) undeploy.assert_called_once_with('unknown2', cleanup_namespace=True) def test_exists(self): with contextlib.nested( mock.patch.object(self.driver, '_get_state_file_path'), mock.patch('neutron.agent.linux.ip_lib.IPWrapper'), mock.patch('socket.socket'), mock.patch('os.path.exists'), ) as (gsp, ip_wrap, socket, path_exists): gsp.side_effect = lambda x, y, z: '/pool/' + y ip_wrap.return_value.netns.exists.return_value = True path_exists.return_value = True self.driver.exists('pool_id') ip_wrap.assert_has_calls([ self._ip_mock_call(), mock.call().netns.exists('qlbaas-pool_id') ]) self.assertTrue(self.driver.exists('pool_id')) def test_get_stats(self): raw_stats = ('# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,' 'dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,' 'act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,' 'sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,' 'check_status,check_code,check_duration,hrsp_1xx,' 'hrsp_2xx,hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,' 'req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,\n' '8e271901-69ed-403e-a59b-f53cf77ef208,BACKEND,1,2,3,4,0,' '10,7764,2365,0,0,,0,0,0,0,UP,1,1,0,,0,103780,0,,1,2,0,,0' ',,1,0,,0,,,,0,0,0,0,0,0,,,,,0,0,\n\n' 'a557019b-dc07-4688-9af4-f5cf02bb6d4b,' '32a6c2a3-420a-44c3-955d-86bd2fc6871e,0,0,0,1,,7,1120,' '224,,0,,0,0,0,0,UP,1,1,0,0,1,2623,303,,1,2,1,,7,,2,0,,' '1,L7OK,200,98,0,7,0,0,0,0,0,,,,0,0,\n' 'a557019b-dc07-4688-9af4-f5cf02bb6d4b,' 'd9aea044-8867-4e80-9875-16fb808fa0f9,0,0,0,2,,12,0,0,,' '0,,0,0,8,4,DOWN,1,1,0,9,2,308,675,,1,2,2,,4,,2,0,,2,' 'L4CON,,2999,0,0,0,0,0,0,0,,,,0,0,\n') raw_stats_empty = ('# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,' 'bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,' 'status,weight,act,bck,chkfail,chkdown,lastchg,' 'downtime,qlimit,pid,iid,sid,throttle,lbtot,' 'tracked,type,rate,rate_lim,rate_max,check_status,' 'check_code,check_duration,hrsp_1xx,hrsp_2xx,' 'hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,' 'req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,' '\n') with contextlib.nested( mock.patch.object(self.driver, '_get_state_file_path'), mock.patch('socket.socket'), mock.patch('os.path.exists'), ) as (gsp, socket, path_exists): gsp.side_effect = lambda x, y, z: '/pool/' + y path_exists.return_value = True socket.return_value = socket socket.recv.return_value = raw_stats exp_stats = {'connection_errors': '0', 'active_connections': '3', 'current_sessions': '3', 'bytes_in': '7764', 'max_connections': '4', 'max_sessions': '4', 'bytes_out': '2365', 'response_errors': '0', 'total_sessions': '10', 'total_connections': '10', 'members': { '32a6c2a3-420a-44c3-955d-86bd2fc6871e': { 'status': 'ACTIVE', 'health': 'L7OK', 'failed_checks': '0' }, 'd9aea044-8867-4e80-9875-16fb808fa0f9': { 'status': 'INACTIVE', 'health': 'L4CON', 'failed_checks': '9' } } } stats = self.driver.get_stats('pool_id') self.assertEqual(exp_stats, stats) socket.recv.return_value = raw_stats_empty self.assertEqual({'members': {}}, self.driver.get_stats('pool_id')) path_exists.return_value = False socket.reset_mock() self.assertEqual({}, self.driver.get_stats('pool_id')) self.assertFalse(socket.called) def test_plug(self): test_port = {'id': 'port_id', 'network_id': 'net_id', 'mac_address': 'mac_addr', 'fixed_ips': [{'ip_address': '10.0.0.2', 'subnet': {'cidr': '10.0.0.0/24', 'gateway_ip': '10.0.0.1'}}]} test_address = '10.0.0.2' with contextlib.nested( mock.patch('neutron.agent.linux.ip_lib.device_exists'), mock.patch('netaddr.IPNetwork'), mock.patch('neutron.agent.linux.ip_lib.IPWrapper'), ) as (dev_exists, ip_net, ip_wrap): self.vif_driver.get_device_name.return_value = 'test_interface' dev_exists.return_value = False ip_net.return_value = ip_net ip_net.prefixlen = 24 self.driver._plug('test_ns', test_port, test_address) self.rpc_mock.plug_vip_port.assert_called_once_with( test_port['id']) self.assertTrue(dev_exists.called) self.vif_driver.plug.assert_called_once_with('net_id', 'port_id', 'test_interface', 'mac_addr', namespace='test_ns') self.vif_driver.init_l3.assert_called_once_with( 'test_interface', ['10.0.0.2/24'], namespace='test_ns' ) cmd = ['route', 'add', 'default', 'gw', '10.0.0.1'] cmd_arping = ['arping', '-U', '-I', 'test_interface', '-c', self.conf.haproxy.send_gratuitous_arp, '10.0.0.2'] ip_wrap.assert_has_calls([ self._ip_mock_call('test_ns'), mock.call().netns.execute(cmd, check_exit_code=False), mock.call().netns.execute(cmd_arping, check_exit_code=False), ]) dev_exists.return_value = True self.assertRaises(exceptions.PreexistingDeviceFailure, self.driver._plug, 'test_ns', test_port, test_address, False) def test_plug_not_send_gratuitous_arp(self): self.conf.haproxy.send_gratuitous_arp = 0 test_port = {'id': 'port_id', 'network_id': 'net_id', 'mac_address': 'mac_addr', 'fixed_ips': [{'ip_address': '10.0.0.2', 'subnet': {'cidr': '10.0.0.0/24', 'gateway_ip': '10.0.0.1'}}]} test_address = '10.0.0.2' with contextlib.nested( mock.patch('neutron.agent.linux.ip_lib.device_exists'), mock.patch('netaddr.IPNetwork'), mock.patch('neutron.agent.linux.ip_lib.IPWrapper'), ) as (dev_exists, ip_net, ip_wrap): self.vif_driver.get_device_name.return_value = 'test_interface' dev_exists.return_value = False ip_net.return_value = ip_net ip_net.prefixlen = 24 self.driver._plug('test_ns', test_port, test_address) cmd = ['route', 'add', 'default', 'gw', '10.0.0.1'] expected = [ self._ip_mock_call('test_ns'), mock.call().netns.execute(cmd, check_exit_code=False)] self.assertEqual(expected, ip_wrap.mock_calls) def test_plug_no_gw(self): test_port = {'id': 'port_id', 'network_id': 'net_id', 'mac_address': 'mac_addr', 'fixed_ips': [{'ip_address': '10.0.0.2', 'subnet': {'cidr': '10.0.0.0/24'}}]} test_address = '10.0.0.2' with contextlib.nested( mock.patch('neutron.agent.linux.ip_lib.device_exists'), mock.patch('netaddr.IPNetwork'), mock.patch('neutron.agent.linux.ip_lib.IPWrapper'), ) as (dev_exists, ip_net, ip_wrap): self.vif_driver.get_device_name.return_value = 'test_interface' dev_exists.return_value = False ip_net.return_value = ip_net ip_net.prefixlen = 24 self.driver._plug('test_ns', test_port, test_address) self.rpc_mock.plug_vip_port.assert_called_once_with( test_port['id']) self.assertTrue(dev_exists.called) self.vif_driver.plug.assert_called_once_with('net_id', 'port_id', 'test_interface', 'mac_addr', namespace='test_ns') self.vif_driver.init_l3.assert_called_once_with( 'test_interface', ['10.0.0.2/24'], namespace='test_ns' ) self.assertFalse(ip_wrap.called) dev_exists.return_value = True self.assertRaises(exceptions.PreexistingDeviceFailure, self.driver._plug, 'test_ns', test_port, test_address, False) def test_plug_gw_in_host_routes(self): test_port = {'id': 'port_id', 'network_id': 'net_id', 'mac_address': 'mac_addr', 'fixed_ips': [{'ip_address': '10.0.0.2', 'subnet': {'cidr': '10.0.0.0/24', 'host_routes': [{'destination': '0.0.0.0/0', 'nexthop': '10.0.0.1'}]}}]} test_address = '10.0.0.2' with contextlib.nested( mock.patch('neutron.agent.linux.ip_lib.device_exists'), mock.patch('netaddr.IPNetwork'), mock.patch('neutron.agent.linux.ip_lib.IPWrapper'), ) as (dev_exists, ip_net, ip_wrap): self.vif_driver.get_device_name.return_value = 'test_interface' dev_exists.return_value = False ip_net.return_value = ip_net ip_net.prefixlen = 24 self.driver._plug('test_ns', test_port, test_address) self.rpc_mock.plug_vip_port.assert_called_once_with( test_port['id']) self.assertTrue(dev_exists.called) self.vif_driver.plug.assert_called_once_with('net_id', 'port_id', 'test_interface', 'mac_addr', namespace='test_ns') self.vif_driver.init_l3.assert_called_once_with( 'test_interface', ['10.0.0.2/24'], namespace='test_ns' ) cmd = ['route', 'add', 'default', 'gw', '10.0.0.1'] ip_wrap.assert_has_calls([ self._ip_mock_call('test_ns'), mock.call().netns.execute(cmd, check_exit_code=False), ]) def test_unplug(self): self.vif_driver.get_device_name.return_value = 'test_interface' self.driver._unplug('test_ns', 'port_id') self.rpc_mock.unplug_vip_port.assert_called_once_with('port_id') self.vif_driver.unplug('test_interface', namespace='test_ns') def test_kill_pids_in_file(self): with contextlib.nested( mock.patch('os.path.exists'), mock.patch.object(six.moves.builtins, 'open'), mock.patch('neutron.agent.linux.utils.execute'), mock.patch.object(namespace_driver.LOG, 'exception'), ) as (path_exists, mock_open, mock_execute, mock_log): file_mock = mock.MagicMock() mock_open.return_value = file_mock file_mock.__enter__.return_value = file_mock file_mock.__iter__.return_value = iter(['123']) path_exists.return_value = False namespace_driver.kill_pids_in_file('test_path') path_exists.assert_called_once_with('test_path') self.assertFalse(mock_open.called) self.assertFalse(mock_execute.called) path_exists.return_value = True mock_execute.side_effect = RuntimeError namespace_driver.kill_pids_in_file('test_path') self.assertTrue(mock_log.called) mock_execute.assert_called_once_with( ['kill', '-9', '123'], run_as_root=True) def test_get_state_file_path(self): with mock.patch('os.makedirs') as mkdir: path = self.driver._get_state_file_path('pool_id', 'conf') self.assertEqual('/the/path/pool_id/conf', path) mkdir.assert_called_once_with('/the/path/pool_id', 0o755) def test_deploy_instance(self): with mock.patch.object(self.driver, 'exists') as exists: with mock.patch.object(self.driver, 'update') as update: self.driver.deploy_instance(self.fake_config) exists.assert_called_once_with(self.fake_config['pool']['id']) update.assert_called_once_with(self.fake_config) def test_deploy_instance_non_existing(self): with mock.patch.object(self.driver, 'exists') as exists: with mock.patch.object(self.driver, 'create') as create: exists.return_value = False self.driver.deploy_instance(self.fake_config) exists.assert_called_once_with(self.fake_config['pool']['id']) create.assert_called_once_with(self.fake_config) def test_deploy_instance_vip_status_non_active(self): with mock.patch.object(self.driver, 'exists') as exists: self.fake_config['vip']['status'] = 'NON_ACTIVE' self.driver.deploy_instance(self.fake_config) self.assertFalse(exists.called) def test_deploy_instance_vip_admin_state_down(self): with mock.patch.object(self.driver, 'exists') as exists: self.fake_config['vip']['admin_state_up'] = False self.driver.deploy_instance(self.fake_config) self.assertFalse(exists.called) def test_deploy_instance_no_vip(self): with mock.patch.object(self.driver, 'exists') as exists: del self.fake_config['vip'] self.driver.deploy_instance(self.fake_config) self.assertFalse(exists.called) def test_deploy_instance_pool_status_non_active(self): with mock.patch.object(self.driver, 'exists') as exists: self.fake_config['pool']['status'] = 'NON_ACTIVE' self.driver.deploy_instance(self.fake_config) self.assertFalse(exists.called) def test_deploy_instance_pool_admin_state_down(self): with mock.patch.object(self.driver, 'exists') as exists: with mock.patch.object(self.driver, 'update') as update: self.fake_config['pool']['admin_state_up'] = False self.driver.deploy_instance(self.fake_config) exists.assert_called_once_with(self.fake_config['pool']['id']) update.assert_called_once_with(self.fake_config) def test_refresh_device(self): with contextlib.nested( mock.patch.object(self.driver, 'deploy_instance'), mock.patch.object(self.driver, 'undeploy_instance') ) as (deploy, undeploy): pool_id = 'pool_id1' self.driver._refresh_device(pool_id) self.rpc_mock.get_logical_device.assert_called_once_with(pool_id) deploy.assert_called_once_with( self.rpc_mock.get_logical_device.return_value) self.assertFalse(undeploy.called) def test_refresh_device_not_deployed(self): with contextlib.nested( mock.patch.object(self.driver, 'deploy_instance'), mock.patch.object(self.driver, 'exists'), mock.patch.object(self.driver, 'undeploy_instance') ) as (deploy, exists, undeploy): pool_id = 'pool_id1' deploy.return_value = False exists.return_value = True self.driver._refresh_device(pool_id) undeploy.assert_called_once_with(pool_id) def test_refresh_device_non_existing(self): with contextlib.nested( mock.patch.object(self.driver, 'deploy_instance'), mock.patch.object(self.driver, 'exists'), mock.patch.object(self.driver, 'undeploy_instance') ) as (deploy, exists, undeploy): pool_id = 'pool_id1' deploy.return_value = False exists.return_value = False self.driver._refresh_device(pool_id) self.assertFalse(undeploy.called) def test_create_vip(self): with mock.patch.object(self.driver, '_refresh_device') as refresh: self.driver.create_vip({'pool_id': '1'}) refresh.assert_called_once_with('1') def test_update_vip(self): with mock.patch.object(self.driver, '_refresh_device') as refresh: self.driver.update_vip({}, {'pool_id': '1'}) refresh.assert_called_once_with('1') def test_delete_vip(self): with mock.patch.object(self.driver, 'undeploy_instance') as undeploy: self.driver.delete_vip({'pool_id': '1'}) undeploy.assert_called_once_with('1') def test_create_pool(self): with mock.patch.object(self.driver, '_refresh_device') as refresh: self.driver.create_pool({'id': '1'}) self.assertFalse(refresh.called) def test_update_pool(self): with mock.patch.object(self.driver, '_refresh_device') as refresh: self.driver.update_pool({}, {'id': '1'}) refresh.assert_called_once_with('1') def test_delete_pool_existing(self): with contextlib.nested( mock.patch.object(self.driver, 'undeploy_instance'), mock.patch.object(self.driver, 'exists'), ) as (undeploy, exists): exists.return_value = True self.driver.delete_pool({'id': '1'}) undeploy.assert_called_once_with('1', delete_namespace=True) def test_delete_pool_non_existing(self): with contextlib.nested( mock.patch.object(self.driver, 'undeploy_instance'), mock.patch.object(self.driver, 'exists'), ) as (undeploy, exists): exists.return_value = False self.driver.delete_pool({'id': '1'}) self.assertFalse(undeploy.called) def test_create_member(self): with mock.patch.object(self.driver, '_refresh_device') as refresh: self.driver.create_member({'pool_id': '1'}) refresh.assert_called_once_with('1') def test_update_member(self): with mock.patch.object(self.driver, '_refresh_device') as refresh: self.driver.update_member({}, {'pool_id': '1'}) refresh.assert_called_once_with('1') def test_delete_member(self): with mock.patch.object(self.driver, '_refresh_device') as refresh: self.driver.delete_member({'pool_id': '1'}) refresh.assert_called_once_with('1') def test_create_pool_health_monitor(self): with mock.patch.object(self.driver, '_refresh_device') as refresh: self.driver.create_pool_health_monitor('', '1') refresh.assert_called_once_with('1') def test_update_pool_health_monitor(self): with mock.patch.object(self.driver, '_refresh_device') as refresh: self.driver.update_pool_health_monitor('', '', '1') refresh.assert_called_once_with('1') def test_delete_pool_health_monitor(self): with mock.patch.object(self.driver, '_refresh_device') as refresh: self.driver.delete_pool_health_monitor('', '1') refresh.assert_called_once_with('1') neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/test_agent_scheduler.py0000664000567000056710000002367112701407726033425 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron.api import extensions from neutron.api.v2 import attributes from neutron import context from neutron.extensions import agent from neutron import manager from neutron.plugins.common import constants as plugin_const from neutron.tests.common import helpers from neutron.tests.unit.api import test_extensions from neutron.tests.unit.db import test_agentschedulers_db from neutron.tests.unit.extensions import test_agent from neutron_lib import constants from oslo_config import cfg import six from webob import exc from neutron_lbaas.extensions import lbaas_agentscheduler from neutron_lbaas.extensions import loadbalancer from neutron_lbaas.tests import base from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancer LBAAS_HOSTA = 'hosta' class AgentSchedulerTestMixIn(test_agentschedulers_db.AgentSchedulerTestMixIn): def _list_pools_hosted_by_lbaas_agent(self, agent_id, expected_code=exc.HTTPOk.code, admin_context=True): path = "/agents/%s/%s.%s" % (agent_id, lbaas_agentscheduler.LOADBALANCER_POOLS, self.fmt) return self._request_list(path, expected_code=expected_code, admin_context=admin_context) def _get_lbaas_agent_hosting_pool(self, pool_id, expected_code=exc.HTTPOk.code, admin_context=True): path = "/lb/pools/%s/%s.%s" % (pool_id, lbaas_agentscheduler.LOADBALANCER_AGENT, self.fmt) return self._request_list(path, expected_code=expected_code, admin_context=admin_context) class LBaaSAgentSchedulerTestCase(test_agent.AgentDBTestMixIn, AgentSchedulerTestMixIn, test_db_loadbalancer.LoadBalancerTestMixin, base.NeutronDbPluginV2TestCase): fmt = 'json' plugin_str = 'neutron.plugins.ml2.plugin.Ml2Plugin' def setUp(self): # Save the global RESOURCE_ATTRIBUTE_MAP self.saved_attr_map = {} for res, attrs in six.iteritems(attributes.RESOURCE_ATTRIBUTE_MAP): self.saved_attr_map[res] = attrs.copy() service_plugins = { 'lb_plugin_name': test_db_loadbalancer.DB_LB_PLUGIN_KLASS} # default provider should support agent scheduling self.set_override([('LOADBALANCER:lbaas:neutron_lbaas.services.' 'loadbalancer.drivers.haproxy.plugin_driver.' 'HaproxyOnHostPluginDriver:default')]) super(LBaaSAgentSchedulerTestCase, self).setUp( self.plugin_str, service_plugins=service_plugins) ext_mgr = extensions.PluginAwareExtensionManager.get_instance() self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) self.adminContext = context.get_admin_context() # Add the resources to the global attribute map # This is done here as the setup process won't # initialize the main API router which extends # the global attribute map attributes.RESOURCE_ATTRIBUTE_MAP.update( agent.RESOURCE_ATTRIBUTE_MAP) self.addCleanup(self.restore_attribute_map) def restore_attribute_map(self): # Restore the original RESOURCE_ATTRIBUTE_MAP attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map def test_report_states(self): self._register_agent_states(lbaas_agents=True) agents = self._list_agents() self.assertEqual(6, len(agents['agents'])) def test_pool_scheduling_on_pool_creation(self): self._register_agent_states(lbaas_agents=True) with self.pool() as pool: lbaas_agent = self._get_lbaas_agent_hosting_pool( pool['pool']['id']) self.assertIsNotNone(lbaas_agent) self.assertEqual(constants.AGENT_TYPE_LOADBALANCER, lbaas_agent['agent']['agent_type']) pools = self._list_pools_hosted_by_lbaas_agent( lbaas_agent['agent']['id']) self.assertEqual(1, len(pools['pools'])) self.assertEqual(pool['pool'], pools['pools'][0]) def test_schedule_pool_with_disabled_agent(self): lbaas_hosta = { 'binary': 'neutron-loadbalancer-agent', 'host': LBAAS_HOSTA, 'topic': 'LOADBALANCER_AGENT', 'configurations': {'device_drivers': ['haproxy_ns']}, 'agent_type': constants.AGENT_TYPE_LOADBALANCER} helpers._register_agent(lbaas_hosta) with self.pool() as pool: lbaas_agent = self._get_lbaas_agent_hosting_pool( pool['pool']['id']) self.assertIsNotNone(lbaas_agent) agents = self._list_agents() self._disable_agent(agents['agents'][0]['id']) pool = {'pool': {'name': 'test', 'subnet_id': 'test', 'lb_method': 'ROUND_ROBIN', 'protocol': 'HTTP', 'admin_state_up': True, 'tenant_id': 'test', 'description': 'test'}} lbaas_plugin = manager.NeutronManager.get_service_plugins()[ plugin_const.LOADBALANCER] self.assertRaises(loadbalancer.NoEligibleBackend, lbaas_plugin.create_pool, self.adminContext, pool) pools = lbaas_plugin.get_pools(self.adminContext) self.assertEqual('ERROR', pools[0]['status']) self.assertEqual('No eligible backend', pools[0]['status_description']) def test_schedule_pool_with_down_agent(self): lbaas_hosta = { 'binary': 'neutron-loadbalancer-agent', 'host': LBAAS_HOSTA, 'topic': 'LOADBALANCER_AGENT', 'configurations': {'device_drivers': ['haproxy_ns']}, 'agent_type': constants.AGENT_TYPE_LOADBALANCER} helpers._register_agent(lbaas_hosta) is_agent_down_str = 'neutron.db.agents_db.AgentDbMixin.is_agent_down' with mock.patch(is_agent_down_str) as mock_is_agent_down: mock_is_agent_down.return_value = False with self.pool() as pool: lbaas_agent = self._get_lbaas_agent_hosting_pool( pool['pool']['id']) self.assertIsNotNone(lbaas_agent) with mock.patch(is_agent_down_str) as mock_is_agent_down: mock_is_agent_down.return_value = True pool = {'pool': {'name': 'test', 'subnet_id': 'test', 'lb_method': 'ROUND_ROBIN', 'protocol': 'HTTP', 'provider': 'lbaas', 'admin_state_up': True, 'tenant_id': 'test', 'description': 'test'}} lbaas_plugin = manager.NeutronManager.get_service_plugins()[ plugin_const.LOADBALANCER] self.assertRaises(loadbalancer.NoEligibleBackend, lbaas_plugin.create_pool, self.adminContext, pool) pools = lbaas_plugin.get_pools(self.adminContext) self.assertEqual('ERROR', pools[0]['status']) self.assertEqual('No eligible backend', pools[0]['status_description']) def test_pool_unscheduling_on_pool_deletion(self): self._register_agent_states(lbaas_agents=True) with self.pool(do_delete=False) as pool: lbaas_agent = self._get_lbaas_agent_hosting_pool( pool['pool']['id']) self.assertIsNotNone(lbaas_agent) self.assertEqual(constants.AGENT_TYPE_LOADBALANCER, lbaas_agent['agent']['agent_type']) pools = self._list_pools_hosted_by_lbaas_agent( lbaas_agent['agent']['id']) self.assertEqual(1, len(pools['pools'])) self.assertEqual(pool['pool'], pools['pools'][0]) req = self.new_delete_request('pools', pool['pool']['id']) res = req.get_response(self.ext_api) self.assertEqual(exc.HTTPNoContent.code, res.status_int) pools = self._list_pools_hosted_by_lbaas_agent( lbaas_agent['agent']['id']) self.assertEqual(0, len(pools['pools'])) def test_pool_scheduling_non_admin_access(self): self._register_agent_states(lbaas_agents=True) with self.pool() as pool: self._get_lbaas_agent_hosting_pool( pool['pool']['id'], expected_code=exc.HTTPForbidden.code, admin_context=False) self._list_pools_hosted_by_lbaas_agent( 'fake_id', expected_code=exc.HTTPForbidden.code, admin_context=False) class LeastPoolAgentSchedulerTestCase(LBaaSAgentSchedulerTestCase): def setUp(self): # Setting LeastPoolAgentScheduler as scheduler cfg.CONF.set_override( 'loadbalancer_pool_scheduler_driver', 'neutron_lbaas.services.loadbalancer.' 'agent_scheduler.LeastPoolAgentScheduler') super(LeastPoolAgentSchedulerTestCase, self).setUp() neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/agent/0000775000567000056710000000000012701410110027722 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/agent/__init__.py0000664000567000056710000000000012701407726032044 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/agent/test_api.py0000664000567000056710000000545712701407726032142 0ustar jenkinsjenkins00000000000000# Copyright 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import copy import mock from neutron_lbaas.services.loadbalancer.agent import agent_api as api from neutron_lbaas.tests import base class TestApiCache(base.BaseTestCase): def setUp(self): super(TestApiCache, self).setUp() self.api = api.LbaasAgentApi('topic', mock.sentinel.context, 'host') def test_init(self): self.assertEqual('host', self.api.host) self.assertEqual(mock.sentinel.context, self.api.context) def _test_method(self, method, **kwargs): add_host = ('get_ready_devices', 'plug_vip_port', 'unplug_vip_port', 'update_pool_stats') expected_kwargs = copy.copy(kwargs) if method in add_host: expected_kwargs['host'] = self.api.host with contextlib.nested( mock.patch.object(self.api.client, 'call'), mock.patch.object(self.api.client, 'prepare'), ) as ( rpc_mock, prepare_mock ): prepare_mock.return_value = self.api.client rpc_mock.return_value = 'foo' rv = getattr(self.api, method)(**kwargs) self.assertEqual('foo', rv) prepare_args = {} prepare_mock.assert_called_once_with(**prepare_args) rpc_mock.assert_called_once_with(mock.sentinel.context, method, **expected_kwargs) def test_get_ready_devices(self): self._test_method('get_ready_devices') def test_get_logical_device(self): self._test_method('get_logical_device', pool_id='pool_id') def test_pool_destroyed(self): self._test_method('pool_destroyed', pool_id='pool_id') def test_pool_deployed(self): self._test_method('pool_deployed', pool_id='pool_id') def test_update_status(self): self._test_method('update_status', obj_type='type', obj_id='id', status='status') def test_plug_vip_port(self): self._test_method('plug_vip_port', port_id='port_id') def test_unplug_vip_port(self): self._test_method('unplug_vip_port', port_id='port_id') def test_update_pool_stats(self): self._test_method('update_pool_stats', pool_id='id', stats='stats') neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/agent/test_agent.py0000664000567000056710000000331712701407726032460 0ustar jenkinsjenkins00000000000000# Copyright 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import mock from oslo_config import cfg from neutron_lbaas.services.loadbalancer.agent import agent from neutron_lbaas.tests import base class TestLbaasService(base.BaseTestCase): def test_start(self): with mock.patch.object( agent.n_rpc.Service, 'start' ) as mock_start: mgr = mock.Mock() cfg.CONF.periodic_interval = mock.Mock(return_value=10) agent_service = agent.LbaasAgentService('host', 'topic', mgr) agent_service.start() self.assertTrue(mock_start.called) def test_main(self): logging_str = 'neutron.agent.common.config.setup_logging' with contextlib.nested( mock.patch(logging_str), mock.patch.object(agent.service, 'launch'), mock.patch('sys.argv'), mock.patch.object(agent.manager, 'LbaasAgentManager'), mock.patch.object(cfg.CONF, 'register_opts') ) as (mock_logging, mock_launch, sys_argv, mgr_cls, ro): agent.main() mock_launch.assert_called_once_with(mock.ANY, mock.ANY) neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/agent/test_agent_manager.py0000664000567000056710000005137512701407726034161 0ustar jenkinsjenkins00000000000000# Copyright 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import mock from neutron.plugins.common import constants from neutron_lbaas.services.loadbalancer.agent import agent_manager as manager from neutron_lbaas.services.loadbalancer import constants as l_const from neutron_lbaas.tests import base class TestManager(base.BaseTestCase): def setUp(self): super(TestManager, self).setUp() mock_conf = mock.Mock() mock_conf.device_driver = ['devdriver'] self.mock_importer = mock.patch.object(manager, 'importutils').start() rpc_mock_cls = mock.patch( 'neutron_lbaas.services.loadbalancer.agent.agent_api.LbaasAgentApi' ).start() # disable setting up periodic state reporting mock_conf.AGENT.report_interval = 0 self.mgr = manager.LbaasAgentManager(mock_conf) self.rpc_mock = rpc_mock_cls.return_value self.log = mock.patch.object(manager, 'LOG').start() self.driver_mock = mock.Mock() self.mgr.device_drivers = {'devdriver': self.driver_mock} self.mgr.instance_mapping = {'1': 'devdriver', '2': 'devdriver'} self.mgr.needs_resync = False def test_initialize_service_hook(self): with mock.patch.object(self.mgr, 'sync_state') as sync: self.mgr.initialize_service_hook(mock.Mock()) sync.assert_called_once_with() def test_periodic_resync_needs_sync(self): with mock.patch.object(self.mgr, 'sync_state') as sync: self.mgr.needs_resync = True self.mgr.periodic_resync(mock.Mock()) sync.assert_called_once_with() def test_periodic_resync_no_sync(self): with mock.patch.object(self.mgr, 'sync_state') as sync: self.mgr.needs_resync = False self.mgr.periodic_resync(mock.Mock()) self.assertFalse(sync.called) def test_collect_stats(self): self.mgr.collect_stats(mock.Mock()) self.rpc_mock.update_pool_stats.assert_has_calls([ mock.call('1', mock.ANY), mock.call('2', mock.ANY) ], any_order=True) def test_collect_stats_exception(self): self.driver_mock.get_stats.side_effect = Exception self.mgr.collect_stats(mock.Mock()) self.assertFalse(self.rpc_mock.called) self.assertTrue(self.mgr.needs_resync) self.assertTrue(self.log.exception.called) def _sync_state_helper(self, ready, reloaded, destroyed): with contextlib.nested( mock.patch.object(self.mgr, '_reload_pool'), mock.patch.object(self.mgr, '_destroy_pool') ) as (reload, destroy): self.rpc_mock.get_ready_devices.return_value = ready self.mgr.sync_state() self.assertEqual(len(reloaded), len(reload.mock_calls)) self.assertEqual(len(destroyed), len(destroy.mock_calls)) reload.assert_has_calls([mock.call(i) for i in reloaded], any_order=True) destroy.assert_has_calls([mock.call(i) for i in destroyed], any_order=True) self.assertFalse(self.mgr.needs_resync) def test_sync_state_all_known(self): self._sync_state_helper(['1', '2'], ['1', '2'], []) def test_sync_state_all_unknown(self): self.mgr.instance_mapping = {} self._sync_state_helper(['1', '2'], ['1', '2'], []) def test_sync_state_destroy_all(self): self._sync_state_helper([], [], ['1', '2']) def test_sync_state_both(self): self.mgr.instance_mapping = {'1': 'devdriver'} self._sync_state_helper(['2'], ['2'], ['1']) def test_sync_state_exception(self): self.rpc_mock.get_ready_devices.side_effect = Exception self.mgr.sync_state() self.assertTrue(self.log.exception.called) self.assertTrue(self.mgr.needs_resync) def test_reload_pool(self): config = {'driver': 'devdriver'} self.rpc_mock.get_logical_device.return_value = config pool_id = 'new_id' self.assertNotIn(pool_id, self.mgr.instance_mapping) self.mgr._reload_pool(pool_id) self.driver_mock.deploy_instance.assert_called_once_with(config) self.assertIn(pool_id, self.mgr.instance_mapping) self.rpc_mock.pool_deployed.assert_called_once_with(pool_id) def test_reload_pool_driver_not_found(self): config = {'driver': 'unknown_driver'} self.rpc_mock.get_logical_device.return_value = config pool_id = 'new_id' self.assertNotIn(pool_id, self.mgr.instance_mapping) self.mgr._reload_pool(pool_id) self.assertTrue(self.log.error.called) self.assertFalse(self.driver_mock.deploy_instance.called) self.assertNotIn(pool_id, self.mgr.instance_mapping) self.assertFalse(self.rpc_mock.pool_deployed.called) def test_reload_pool_exception_on_driver(self): config = {'driver': 'devdriver'} self.rpc_mock.get_logical_device.return_value = config self.driver_mock.deploy_instance.side_effect = Exception pool_id = 'new_id' self.assertNotIn(pool_id, self.mgr.instance_mapping) self.mgr._reload_pool(pool_id) self.driver_mock.deploy_instance.assert_called_once_with(config) self.assertNotIn(pool_id, self.mgr.instance_mapping) self.assertFalse(self.rpc_mock.pool_deployed.called) self.assertTrue(self.log.exception.called) self.assertTrue(self.mgr.needs_resync) def test_destroy_pool(self): pool_id = '1' self.assertIn(pool_id, self.mgr.instance_mapping) self.mgr._destroy_pool(pool_id) self.driver_mock.undeploy_instance.assert_called_once_with( pool_id, delete_namespace=True) self.assertNotIn(pool_id, self.mgr.instance_mapping) self.rpc_mock.pool_destroyed.assert_called_once_with(pool_id) self.assertFalse(self.mgr.needs_resync) def test_destroy_pool_exception_on_driver(self): pool_id = '1' self.assertIn(pool_id, self.mgr.instance_mapping) self.driver_mock.undeploy_instance.side_effect = Exception self.mgr._destroy_pool(pool_id) self.driver_mock.undeploy_instance.assert_called_once_with( pool_id, delete_namespace=True) self.assertIn(pool_id, self.mgr.instance_mapping) self.assertFalse(self.rpc_mock.pool_destroyed.called) self.assertTrue(self.log.exception.called) self.assertTrue(self.mgr.needs_resync) def test_get_driver_unknown_device(self): self.assertRaises(manager.DeviceNotFoundOnAgent, self.mgr._get_driver, 'unknown') def test_remove_orphans(self): self.mgr.remove_orphans() orphans = {'1': "Fake", '2': "Fake"} self.driver_mock.remove_orphans.assert_called_once_with(orphans.keys()) def test_create_vip(self): vip = {'id': 'id1', 'pool_id': '1', 'admin_state_up': True} self.mgr.create_vip(mock.Mock(), vip) self.driver_mock.create_vip.assert_called_once_with(vip) self.rpc_mock.update_status.assert_called_once_with('vip', vip['id'], constants.ACTIVE) def test_create_vip_with_admin_down(self): vip = {'id': 'id1', 'pool_id': '1', 'admin_state_up': False} self.mgr.create_vip(mock.Mock(), vip) self.driver_mock.create_vip.assert_called_once_with(vip) self.rpc_mock.update_status.assert_called_once_with('vip', vip['id'], l_const.DISABLED) def test_create_vip_failed(self): vip = {'id': 'id1', 'pool_id': '1', 'admin_state_up': True} self.driver_mock.create_vip.side_effect = Exception self.mgr.create_vip(mock.Mock(), vip) self.driver_mock.create_vip.assert_called_once_with(vip) self.rpc_mock.update_status.assert_called_once_with('vip', vip['id'], constants.ERROR) def test_update_vip(self): old_vip = {'id': 'id1', 'admin_state_up': True} vip = {'id': 'id1', 'pool_id': '1', 'admin_state_up': True} self.mgr.update_vip(mock.Mock(), old_vip, vip) self.driver_mock.update_vip.assert_called_once_with(old_vip, vip) self.rpc_mock.update_status.assert_called_once_with('vip', vip['id'], constants.ACTIVE) def test_update_vip_with_admin_down(self): old_vip = {'id': 'id1', 'admin_state_up': True} vip = {'id': 'id1', 'pool_id': '1', 'admin_state_up': False} self.mgr.update_vip(mock.Mock(), old_vip, vip) self.driver_mock.update_vip.assert_called_once_with(old_vip, vip) self.rpc_mock.update_status.assert_called_once_with('vip', vip['id'], l_const.DISABLED) def test_update_vip_failed(self): old_vip = {'id': 'id1', 'admin_state_up': True} vip = {'id': 'id1', 'pool_id': '1', 'admin_state_up': True} self.driver_mock.update_vip.side_effect = Exception self.mgr.update_vip(mock.Mock(), old_vip, vip) self.driver_mock.update_vip.assert_called_once_with(old_vip, vip) self.rpc_mock.update_status.assert_called_once_with('vip', vip['id'], constants.ERROR) def test_delete_vip(self): vip = {'id': 'id1', 'pool_id': '1'} self.mgr.delete_vip(mock.Mock(), vip) self.driver_mock.delete_vip.assert_called_once_with(vip) def test_create_pool(self): pool = {'id': 'id1', 'admin_state_up': True} self.assertNotIn(pool['id'], self.mgr.instance_mapping) self.mgr.create_pool(mock.Mock(), pool, 'devdriver') self.driver_mock.create_pool.assert_called_once_with(pool) self.rpc_mock.update_status.assert_called_once_with('pool', pool['id'], constants.ACTIVE) self.assertIn(pool['id'], self.mgr.instance_mapping) def test_create_pool_with_admin_down(self): pool = {'id': 'id1', 'admin_state_up': False} self.assertNotIn(pool['id'], self.mgr.instance_mapping) self.mgr.create_pool(mock.Mock(), pool, 'devdriver') self.driver_mock.create_pool.assert_called_once_with(pool) self.rpc_mock.update_status.assert_called_once_with('pool', pool['id'], l_const.DISABLED) self.assertIn(pool['id'], self.mgr.instance_mapping) def test_create_pool_failed(self): pool = {'id': 'id1', 'admin_state_up': True} self.assertNotIn(pool['id'], self.mgr.instance_mapping) self.driver_mock.create_pool.side_effect = Exception self.mgr.create_pool(mock.Mock(), pool, 'devdriver') self.driver_mock.create_pool.assert_called_once_with(pool) self.rpc_mock.update_status.assert_called_once_with('pool', pool['id'], constants.ERROR) self.assertNotIn(pool['id'], self.mgr.instance_mapping) def test_update_pool(self): old_pool = {'id': '1', 'admin_state_up': True} pool = {'id': '1', 'admin_state_up': True} self.mgr.update_pool(mock.Mock(), old_pool, pool) self.driver_mock.update_pool.assert_called_once_with(old_pool, pool) self.rpc_mock.update_status.assert_called_once_with('pool', pool['id'], constants.ACTIVE) def test_update_pool_with_admin_down(self): old_pool = {'id': '1', 'admin_state_up': True} pool = {'id': '1', 'admin_state_up': False} self.mgr.update_pool(mock.Mock(), old_pool, pool) self.driver_mock.update_pool.assert_called_once_with(old_pool, pool) self.rpc_mock.update_status.assert_called_once_with('pool', pool['id'], l_const.DISABLED) def test_update_pool_failed(self): old_pool = {'id': '1', 'admin_state_up': True} pool = {'id': '1', 'admin_state_up': True} self.driver_mock.update_pool.side_effect = Exception self.mgr.update_pool(mock.Mock(), old_pool, pool) self.driver_mock.update_pool.assert_called_once_with(old_pool, pool) self.rpc_mock.update_status.assert_called_once_with('pool', pool['id'], constants.ERROR) def test_delete_pool(self): pool = {'id': '1'} self.assertIn(pool['id'], self.mgr.instance_mapping) self.mgr.delete_pool(mock.Mock(), pool) self.driver_mock.delete_pool.assert_called_once_with(pool) self.assertNotIn(pool['id'], self.mgr.instance_mapping) def test_create_member(self): member = {'id': 'id1', 'pool_id': '1', 'admin_state_up': True} self.mgr.create_member(mock.Mock(), member) self.driver_mock.create_member.assert_called_once_with(member) self.rpc_mock.update_status.assert_called_once_with('member', member['id'], constants.ACTIVE) def test_create_member_with_admin_down(self): member = {'id': 'id1', 'pool_id': '1', 'admin_state_up': False} self.mgr.create_member(mock.Mock(), member) self.driver_mock.create_member.assert_called_once_with(member) self.rpc_mock.update_status.assert_called_once_with('member', member['id'], l_const.DISABLED) def test_create_member_failed(self): member = {'id': 'id1', 'pool_id': '1', 'admin_state_up': True} self.driver_mock.create_member.side_effect = Exception self.mgr.create_member(mock.Mock(), member) self.driver_mock.create_member.assert_called_once_with(member) self.rpc_mock.update_status.assert_called_once_with('member', member['id'], constants.ERROR) def test_update_member(self): old_member = {'id': 'id1', 'admin_state_up': True} member = {'id': 'id1', 'pool_id': '1', 'admin_state_up': True} self.mgr.update_member(mock.Mock(), old_member, member) self.driver_mock.update_member.assert_called_once_with(old_member, member) self.rpc_mock.update_status.assert_called_once_with('member', member['id'], constants.ACTIVE) def test_update_member_with_admin_down(self): old_member = {'id': 'id1', 'admin_state_up': True} member = {'id': 'id1', 'pool_id': '1', 'admin_state_up': False} self.mgr.update_member(mock.Mock(), old_member, member) self.driver_mock.update_member.assert_called_once_with(old_member, member) self.rpc_mock.update_status.assert_called_once_with('member', member['id'], l_const.DISABLED) def test_update_member_failed(self): old_member = {'id': 'id1', 'admin_state_up': True} member = {'id': 'id1', 'pool_id': '1', 'admin_state_up': True} self.driver_mock.update_member.side_effect = Exception self.mgr.update_member(mock.Mock(), old_member, member) self.driver_mock.update_member.assert_called_once_with(old_member, member) self.rpc_mock.update_status.assert_called_once_with('member', member['id'], constants.ERROR) def test_delete_member(self): member = {'id': 'id1', 'pool_id': '1'} self.mgr.delete_member(mock.Mock(), member) self.driver_mock.delete_member.assert_called_once_with(member) def test_create_monitor(self): monitor = {'id': 'id1', 'admin_state_up': True} assoc_id = {'monitor_id': monitor['id'], 'pool_id': '1'} self.mgr.create_pool_health_monitor(mock.Mock(), monitor, '1') self.driver_mock.create_pool_health_monitor.assert_called_once_with( monitor, '1') self.rpc_mock.update_status.assert_called_once_with('health_monitor', assoc_id, constants.ACTIVE) def test_create_monitor_with_admin_down(self): monitor = {'id': 'id1', 'admin_state_up': False} assoc_id = {'monitor_id': monitor['id'], 'pool_id': '1'} self.mgr.create_pool_health_monitor(mock.Mock(), monitor, '1') self.driver_mock.create_pool_health_monitor.assert_called_once_with( monitor, '1') self.rpc_mock.update_status.assert_called_once_with('health_monitor', assoc_id, l_const.DISABLED) def test_create_monitor_failed(self): monitor = {'id': 'id1', 'admin_state_up': True} assoc_id = {'monitor_id': monitor['id'], 'pool_id': '1'} self.driver_mock.create_pool_health_monitor.side_effect = Exception self.mgr.create_pool_health_monitor(mock.Mock(), monitor, '1') self.driver_mock.create_pool_health_monitor.assert_called_once_with( monitor, '1') self.rpc_mock.update_status.assert_called_once_with('health_monitor', assoc_id, constants.ERROR) def test_update_monitor(self): monitor = {'id': 'id1', 'admin_state_up': True} assoc_id = {'monitor_id': monitor['id'], 'pool_id': '1'} self.mgr.update_pool_health_monitor(mock.Mock(), monitor, monitor, '1') self.driver_mock.update_pool_health_monitor.assert_called_once_with( monitor, monitor, '1') self.rpc_mock.update_status.assert_called_once_with('health_monitor', assoc_id, constants.ACTIVE) def test_update_monitor_with_admin_down(self): monitor = {'id': 'id1', 'admin_state_up': False} assoc_id = {'monitor_id': monitor['id'], 'pool_id': '1'} self.mgr.update_pool_health_monitor(mock.Mock(), monitor, monitor, '1') self.driver_mock.update_pool_health_monitor.assert_called_once_with( monitor, monitor, '1') self.rpc_mock.update_status.assert_called_once_with('health_monitor', assoc_id, l_const.DISABLED) def test_update_monitor_failed(self): monitor = {'id': 'id1', 'admin_state_up': True} assoc_id = {'monitor_id': monitor['id'], 'pool_id': '1'} self.driver_mock.update_pool_health_monitor.side_effect = Exception self.mgr.update_pool_health_monitor(mock.Mock(), monitor, monitor, '1') self.driver_mock.update_pool_health_monitor.assert_called_once_with( monitor, monitor, '1') self.rpc_mock.update_status.assert_called_once_with('health_monitor', assoc_id, constants.ERROR) def test_delete_monitor(self): monitor = {'id': 'id1'} self.mgr.delete_pool_health_monitor(mock.Mock(), monitor, '1') self.driver_mock.delete_pool_health_monitor.assert_called_once_with( monitor, '1') def test_agent_disabled(self): payload = {'admin_state_up': False} self.mgr.agent_updated(mock.Mock(), payload) self.driver_mock.undeploy_instance.assert_has_calls( [mock.call('1', delete_namespace=True), mock.call('2', delete_namespace=True)], any_order=True ) neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/test_data_models.py0000664000567000056710000000540712701407726032542 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import mock import testscenarios from neutron_lbaas.services.loadbalancer import data_models from neutron_lbaas.tests import base from neutron_lbaas.tests import tools load_tests = testscenarios.load_tests_apply_scenarios class TestBaseDataModel(base.BaseTestCase): def _get_fake_model_cls(self, fields_): class FakeModel(data_models.BaseDataModel): fields = fields_ def __init__(self, **kwargs): for k, v in kwargs.items(): setattr(self, k, v) return FakeModel def test_from_dict(self): fields_ = ['field1', 'field2'] dict_ = {field: tools.get_random_string() for field in fields_} model_cls = self._get_fake_model_cls(fields_) model = model_cls.from_dict(dict_) for field in fields_: self.assertEqual(dict_[field], getattr(model, field)) def test_from_dict_filters_by_fields(self): fields_ = ['field1', 'field2'] dict_ = {field: tools.get_random_string() for field in fields_} dict_['foo'] = 'bar' model_cls = self._get_fake_model_cls(fields_) model = model_cls.from_dict(dict_) self.assertFalse(hasattr(model, 'foo')) def _get_models(): models = [] for name, obj in inspect.getmembers(data_models): if inspect.isclass(obj): if issubclass(obj, data_models.BaseDataModel): if type(obj) != data_models.BaseDataModel: models.append(obj) return models class TestModels(base.BaseTestCase): scenarios = [ (model.__name__, {'model': model}) for model in _get_models() ] @staticmethod def _get_iterable_mock(*args, **kwargs): m = mock.create_autospec(dict, spec_set=True) def _get_empty_iterator(*args, **kwargs): return iter([]) m.__iter__ = _get_empty_iterator m.pop = _get_empty_iterator return m def test_from_dict_filters_by_fields(self): dict_ = {field: self._get_iterable_mock() for field in self.model.fields} dict_['foo'] = 'bar' model = self.model.from_dict(dict_) self.assertFalse(hasattr(model, 'foo')) neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/services/loadbalancer/test_loadbalancer_plugin.py0000664000567000056710000014021712701407726034252 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from neutron.api.v2 import attributes as attr from neutron.plugins.common import constants from neutron.tests.unit.api.v2 import test_base from oslo_utils import uuidutils from webob import exc from neutron_lbaas.extensions import loadbalancer from neutron_lbaas.extensions import loadbalancerv2 from neutron_lbaas.extensions import sharedpools from neutron_lbaas.tests import base _uuid = uuidutils.generate_uuid _get_path = test_base._get_path class LoadBalancerExtensionTestCase(base.ExtensionTestCase): fmt = 'json' def setUp(self): super(LoadBalancerExtensionTestCase, self).setUp() self._setUpExtension( 'neutron_lbaas.extensions.loadbalancer.LoadBalancerPluginBase', constants.LOADBALANCER, loadbalancer.RESOURCE_ATTRIBUTE_MAP, loadbalancer.Loadbalancer, 'lb', use_quota=True) def test_vip_create(self): vip_id = _uuid() data = {'vip': {'name': 'vip1', 'description': 'descr_vip1', 'subnet_id': _uuid(), 'address': '127.0.0.1', 'protocol_port': 80, 'protocol': 'HTTP', 'pool_id': _uuid(), 'session_persistence': {'type': 'HTTP_COOKIE'}, 'connection_limit': 100, 'admin_state_up': True, 'tenant_id': _uuid()}} return_value = copy.copy(data['vip']) return_value.update({'status': "ACTIVE", 'id': vip_id}) instance = self.plugin.return_value instance.create_vip.return_value = return_value res = self.api.post(_get_path('lb/vips', fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_vip.assert_called_with(mock.ANY, vip=data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('vip', res) self.assertEqual(return_value, res['vip']) def test_vip_create_with_connection_limit_smaller_than_min_value(self): data = {'vip': {'name': 'vip1', 'description': 'descr_vip1', 'subnet_id': _uuid(), 'address': '127.0.0.1', 'protocol_port': 80, 'protocol': 'HTTP', 'pool_id': _uuid(), 'session_persistence': {'type': 'HTTP_COOKIE'}, 'connection_limit': -4, 'admin_state_up': True, 'tenant_id': _uuid()}} res = self.api.post(_get_path('lb/vips', fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt, expect_errors=True) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_vip_list(self): vip_id = _uuid() return_value = [{'name': 'vip1', 'admin_state_up': True, 'tenant_id': _uuid(), 'id': vip_id}] instance = self.plugin.return_value instance.get_vips.return_value = return_value res = self.api.get(_get_path('lb/vips', fmt=self.fmt)) instance.get_vips.assert_called_with(mock.ANY, fields=mock.ANY, filters=mock.ANY) self.assertEqual(exc.HTTPOk.code, res.status_int) def test_vip_update(self): vip_id = _uuid() update_data = {'vip': {'admin_state_up': False}} return_value = {'name': 'vip1', 'admin_state_up': False, 'tenant_id': _uuid(), 'status': "ACTIVE", 'id': vip_id} instance = self.plugin.return_value instance.update_vip.return_value = return_value res = self.api.put(_get_path('lb/vips', id=vip_id, fmt=self.fmt), self.serialize(update_data)) instance.update_vip.assert_called_with(mock.ANY, vip_id, vip=update_data) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('vip', res) self.assertEqual(return_value, res['vip']) def test_vip_update_with_connection_limit_smaller_than_min_value(self): vip_id = _uuid() data = {'vip': {'connection_limit': -4}} res = self.api.put(_get_path('lb/vips', id=vip_id, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt, expect_errors=True) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_vip_get(self): vip_id = _uuid() return_value = {'name': 'vip1', 'admin_state_up': False, 'tenant_id': _uuid(), 'status': "ACTIVE", 'id': vip_id} instance = self.plugin.return_value instance.get_vip.return_value = return_value res = self.api.get(_get_path('lb/vips', id=vip_id, fmt=self.fmt)) instance.get_vip.assert_called_with(mock.ANY, vip_id, fields=mock.ANY) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('vip', res) self.assertEqual(return_value, res['vip']) def test_vip_delete(self): self._test_entity_delete('vip') def test_pool_create(self): pool_id = _uuid() hm_id = _uuid() data = {'pool': {'name': 'pool1', 'description': 'descr_pool1', 'subnet_id': _uuid(), 'protocol': 'HTTP', 'lb_method': 'ROUND_ROBIN', 'health_monitors': [hm_id], 'admin_state_up': True, 'tenant_id': _uuid()}} return_value = copy.copy(data['pool']) return_value['provider'] = 'lbaas' return_value.update({'status': "ACTIVE", 'id': pool_id}) instance = self.plugin.return_value instance.create_pool.return_value = return_value res = self.api.post(_get_path('lb/pools', fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) data['pool']['provider'] = attr.ATTR_NOT_SPECIFIED instance.create_pool.assert_called_with(mock.ANY, pool=data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('pool', res) self.assertEqual(return_value, res['pool']) def test_pool_list(self): pool_id = _uuid() return_value = [{'name': 'pool1', 'admin_state_up': True, 'tenant_id': _uuid(), 'id': pool_id}] instance = self.plugin.return_value instance.get_pools.return_value = return_value res = self.api.get(_get_path('lb/pools', fmt=self.fmt)) instance.get_pools.assert_called_with(mock.ANY, fields=mock.ANY, filters=mock.ANY) self.assertEqual(exc.HTTPOk.code, res.status_int) def test_pool_update(self): pool_id = _uuid() update_data = {'pool': {'admin_state_up': False}} return_value = {'name': 'pool1', 'admin_state_up': False, 'tenant_id': _uuid(), 'status': "ACTIVE", 'id': pool_id} instance = self.plugin.return_value instance.update_pool.return_value = return_value res = self.api.put(_get_path('lb/pools', id=pool_id, fmt=self.fmt), self.serialize(update_data)) instance.update_pool.assert_called_with(mock.ANY, pool_id, pool=update_data) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('pool', res) self.assertEqual(return_value, res['pool']) def test_pool_get(self): pool_id = _uuid() return_value = {'name': 'pool1', 'admin_state_up': False, 'tenant_id': _uuid(), 'status': "ACTIVE", 'id': pool_id} instance = self.plugin.return_value instance.get_pool.return_value = return_value res = self.api.get(_get_path('lb/pools', id=pool_id, fmt=self.fmt)) instance.get_pool.assert_called_with(mock.ANY, pool_id, fields=mock.ANY) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('pool', res) self.assertEqual(return_value, res['pool']) def test_pool_delete(self): self._test_entity_delete('pool') def test_pool_stats(self): pool_id = _uuid() stats = {'stats': 'dummy'} instance = self.plugin.return_value instance.stats.return_value = stats path = _get_path('lb/pools', id=pool_id, action="stats", fmt=self.fmt) res = self.api.get(path) instance.stats.assert_called_with(mock.ANY, pool_id) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('stats', res) self.assertEqual(stats['stats'], res['stats']) def test_member_create(self): member_id = _uuid() data = {'member': {'pool_id': _uuid(), 'address': '127.0.0.1', 'protocol_port': 80, 'weight': 1, 'admin_state_up': True, 'tenant_id': _uuid()}} return_value = copy.copy(data['member']) return_value.update({'status': "ACTIVE", 'id': member_id}) instance = self.plugin.return_value instance.create_member.return_value = return_value res = self.api.post(_get_path('lb/members', fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_member.assert_called_with(mock.ANY, member=data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('member', res) self.assertEqual(return_value, res['member']) def test_member_list(self): member_id = _uuid() return_value = [{'name': 'member1', 'admin_state_up': True, 'tenant_id': _uuid(), 'id': member_id}] instance = self.plugin.return_value instance.get_members.return_value = return_value res = self.api.get(_get_path('lb/members', fmt=self.fmt)) instance.get_members.assert_called_with(mock.ANY, fields=mock.ANY, filters=mock.ANY) self.assertEqual(exc.HTTPOk.code, res.status_int) def test_member_update(self): member_id = _uuid() update_data = {'member': {'admin_state_up': False}} return_value = {'admin_state_up': False, 'tenant_id': _uuid(), 'status': "ACTIVE", 'id': member_id} instance = self.plugin.return_value instance.update_member.return_value = return_value res = self.api.put(_get_path('lb/members', id=member_id, fmt=self.fmt), self.serialize(update_data)) instance.update_member.assert_called_with(mock.ANY, member_id, member=update_data) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('member', res) self.assertEqual(return_value, res['member']) def test_member_get(self): member_id = _uuid() return_value = {'admin_state_up': False, 'tenant_id': _uuid(), 'status': "ACTIVE", 'id': member_id} instance = self.plugin.return_value instance.get_member.return_value = return_value res = self.api.get(_get_path('lb/members', id=member_id, fmt=self.fmt)) instance.get_member.assert_called_with(mock.ANY, member_id, fields=mock.ANY) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('member', res) self.assertEqual(return_value, res['member']) def test_member_delete(self): self._test_entity_delete('member') def test_health_monitor_create(self): health_monitor_id = _uuid() data = {'health_monitor': {'type': 'HTTP', 'delay': 2, 'timeout': 1, 'max_retries': 3, 'http_method': 'GET', 'url_path': '/path', 'expected_codes': '200-300', 'admin_state_up': True, 'tenant_id': _uuid()}} return_value = copy.copy(data['health_monitor']) return_value.update({'status': "ACTIVE", 'id': health_monitor_id}) instance = self.plugin.return_value instance.create_health_monitor.return_value = return_value res = self.api.post(_get_path('lb/health_monitors', fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_health_monitor.assert_called_with(mock.ANY, health_monitor=data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('health_monitor', res) self.assertEqual(return_value, res['health_monitor']) def test_health_monitor_create_with_timeout_negative(self): data = {'health_monitor': {'type': 'HTTP', 'delay': 2, 'timeout': -1, 'max_retries': 3, 'http_method': 'GET', 'url_path': '/path', 'expected_codes': '200-300', 'admin_state_up': True, 'tenant_id': _uuid()}} res = self.api.post(_get_path('lb/health_monitors', fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt, expect_errors=True) self.assertEqual(400, res.status_int) def test_health_monitor_list(self): health_monitor_id = _uuid() return_value = [{'type': 'HTTP', 'admin_state_up': True, 'tenant_id': _uuid(), 'id': health_monitor_id}] instance = self.plugin.return_value instance.get_health_monitors.return_value = return_value res = self.api.get(_get_path('lb/health_monitors', fmt=self.fmt)) instance.get_health_monitors.assert_called_with( mock.ANY, fields=mock.ANY, filters=mock.ANY) self.assertEqual(exc.HTTPOk.code, res.status_int) def test_health_monitor_update(self): health_monitor_id = _uuid() update_data = {'health_monitor': {'admin_state_up': False}} return_value = {'type': 'HTTP', 'admin_state_up': False, 'tenant_id': _uuid(), 'status': "ACTIVE", 'id': health_monitor_id} instance = self.plugin.return_value instance.update_health_monitor.return_value = return_value res = self.api.put(_get_path('lb/health_monitors', id=health_monitor_id, fmt=self.fmt), self.serialize(update_data)) instance.update_health_monitor.assert_called_with( mock.ANY, health_monitor_id, health_monitor=update_data) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('health_monitor', res) self.assertEqual(return_value, res['health_monitor']) def test_health_monitor_get(self): health_monitor_id = _uuid() return_value = {'type': 'HTTP', 'admin_state_up': False, 'tenant_id': _uuid(), 'status': "ACTIVE", 'id': health_monitor_id} instance = self.plugin.return_value instance.get_health_monitor.return_value = return_value res = self.api.get(_get_path('lb/health_monitors', id=health_monitor_id, fmt=self.fmt)) instance.get_health_monitor.assert_called_with( mock.ANY, health_monitor_id, fields=mock.ANY) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('health_monitor', res) self.assertEqual(return_value, res['health_monitor']) def test_health_monitor_delete(self): self._test_entity_delete('health_monitor') def test_create_pool_health_monitor(self): health_monitor_id = _uuid() data = {'health_monitor': {'id': health_monitor_id, 'tenant_id': _uuid()}} return_value = copy.copy(data['health_monitor']) instance = self.plugin.return_value instance.create_pool_health_monitor.return_value = return_value res = self.api.post('/lb/pools/id1/health_monitors', self.serialize(data), content_type='application/%s' % self.fmt) instance.create_pool_health_monitor.assert_called_with( mock.ANY, pool_id='id1', health_monitor=data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('health_monitor', res) self.assertEqual(return_value, res['health_monitor']) def test_delete_pool_health_monitor(self): health_monitor_id = _uuid() res = self.api.delete('/lb/pools/id1/health_monitors/%s' % health_monitor_id) instance = self.plugin.return_value instance.delete_pool_health_monitor.assert_called_with( mock.ANY, health_monitor_id, pool_id='id1') self.assertEqual(exc.HTTPNoContent.code, res.status_int) class LoadBalancerExtensionV2TestCase(base.ExtensionTestCase): fmt = 'json' def setUp(self): super(LoadBalancerExtensionV2TestCase, self).setUp() resource_map = loadbalancerv2.RESOURCE_ATTRIBUTE_MAP.copy() for k in sharedpools.EXTENDED_ATTRIBUTES_2_0.keys(): resource_map[k].update(sharedpools.EXTENDED_ATTRIBUTES_2_0[k]) self._setUpExtension( 'neutron_lbaas.extensions.loadbalancerv2.LoadBalancerPluginBaseV2', constants.LOADBALANCERV2, resource_map, loadbalancerv2.Loadbalancerv2, 'lbaas', use_quota=True) def test_loadbalancer_create(self): lb_id = _uuid() data = {'loadbalancer': {'name': 'lb1', 'description': 'descr_lb1', 'tenant_id': _uuid(), 'vip_subnet_id': _uuid(), 'admin_state_up': True, 'vip_address': '127.0.0.1'}} return_value = copy.copy(data['loadbalancer']) return_value.update({'id': lb_id}) instance = self.plugin.return_value instance.create_loadbalancer.return_value = return_value res = self.api.post(_get_path('lbaas/loadbalancers', fmt=self.fmt), self.serialize(data), content_type='application/{0}'.format(self.fmt)) data['loadbalancer'].update({'provider': attr.ATTR_NOT_SPECIFIED, 'flavor_id': attr.ATTR_NOT_SPECIFIED}) instance.create_loadbalancer.assert_called_with(mock.ANY, loadbalancer=data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('loadbalancer', res) self.assertEqual(return_value, res['loadbalancer']) def test_loadbalancer_create_invalid_flavor(self): data = {'loadbalancer': {'name': 'lb1', 'description': 'descr_lb1', 'tenant_id': _uuid(), 'vip_subnet_id': _uuid(), 'admin_state_up': True, 'flavor_id': 123, 'vip_address': '127.0.0.1'}} res = self.api.post(_get_path('lbaas/loadbalancers', fmt=self.fmt), self.serialize(data), content_type='application/{0}'.format(self.fmt), expect_errors=True) self.assertEqual(400, res.status_int) def test_loadbalancer_create_valid_flavor(self): data = {'loadbalancer': {'name': 'lb1', 'description': 'descr_lb1', 'tenant_id': _uuid(), 'vip_subnet_id': _uuid(), 'admin_state_up': True, 'flavor_id': _uuid(), 'vip_address': '127.0.0.1'}} res = self.api.post(_get_path('lbaas/loadbalancers', fmt=self.fmt), self.serialize(data), content_type='application/{0}'.format(self.fmt), expect_errors=True) self.assertEqual(201, res.status_int) def test_loadbalancer_list(self): lb_id = _uuid() return_value = [{'name': 'lb1', 'admin_state_up': True, 'tenant_id': _uuid(), 'id': lb_id}] instance = self.plugin.return_value instance.get_loadbalancers.return_value = return_value res = self.api.get(_get_path('lbaas/loadbalancers', fmt=self.fmt)) instance.get_loadbalancers.assert_called_with(mock.ANY, fields=mock.ANY, filters=mock.ANY) self.assertEqual(exc.HTTPOk.code, res.status_int) def test_loadbalancer_update(self): lb_id = _uuid() update_data = {'loadbalancer': {'admin_state_up': False}} return_value = {'name': 'lb1', 'admin_state_up': False, 'tenant_id': _uuid(), 'id': lb_id} instance = self.plugin.return_value instance.update_loadbalancer.return_value = return_value res = self.api.put(_get_path('lbaas/loadbalancers', id=lb_id, fmt=self.fmt), self.serialize(update_data)) instance.update_loadbalancer.assert_called_with( mock.ANY, lb_id, loadbalancer=update_data) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('loadbalancer', res) self.assertEqual(return_value, res['loadbalancer']) def test_loadbalancer_get(self): lb_id = _uuid() return_value = {'name': 'lb1', 'admin_state_up': False, 'tenant_id': _uuid(), 'id': lb_id} instance = self.plugin.return_value instance.get_loadbalancer.return_value = return_value res = self.api.get(_get_path('lbaas/loadbalancers', id=lb_id, fmt=self.fmt)) instance.get_loadbalancer.assert_called_with(mock.ANY, lb_id, fields=mock.ANY) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('loadbalancer', res) self.assertEqual(return_value, res['loadbalancer']) def test_loadbalancer_delete(self): self._test_entity_delete('loadbalancer') def test_listener_create(self): listener_id = _uuid() data = {'listener': {'tenant_id': _uuid(), 'name': 'listen-name-1', 'description': 'listen-1-desc', 'protocol': 'HTTP', 'protocol_port': 80, 'default_pool_id': None, 'default_tls_container_ref': None, 'sni_container_refs': [], 'connection_limit': 100, 'admin_state_up': True, 'loadbalancer_id': _uuid()}} return_value = copy.copy(data['listener']) return_value.update({'id': listener_id}) del return_value['loadbalancer_id'] instance = self.plugin.return_value instance.create_listener.return_value = return_value res = self.api.post(_get_path('lbaas/listeners', fmt=self.fmt), self.serialize(data), content_type='application/{0}'.format(self.fmt)) instance.create_listener.assert_called_with(mock.ANY, listener=data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('listener', res) self.assertEqual(return_value, res['listener']) def test_listener_create_with_tls(self): listener_id = _uuid() tls_ref = 'http://example.ref/uuid' sni_refs = ['http://example.ref/uuid', 'http://example.ref/uuid1'] data = {'listener': {'tenant_id': _uuid(), 'name': 'listen-name-1', 'description': 'listen-1-desc', 'protocol': 'HTTP', 'protocol_port': 80, 'default_pool_id': None, 'default_tls_container_ref': tls_ref, 'sni_container_refs': sni_refs, 'connection_limit': 100, 'admin_state_up': True, 'loadbalancer_id': _uuid()}} return_value = copy.copy(data['listener']) return_value.update({'id': listener_id}) del return_value['loadbalancer_id'] instance = self.plugin.return_value instance.create_listener.return_value = return_value res = self.api.post(_get_path('lbaas/listeners', fmt=self.fmt), self.serialize(data), content_type='application/{0}'.format(self.fmt)) instance.create_listener.assert_called_with(mock.ANY, listener=data) self.assertEqual(res.status_int, exc.HTTPCreated.code) res = self.deserialize(res) self.assertIn('listener', res) self.assertEqual(res['listener'], return_value) def test_listener_create_with_connection_limit_less_than_min_value(self): data = {'listener': {'tenant_id': _uuid(), 'name': 'listen-name-1', 'description': 'listen-1-desc', 'protocol': 'HTTP', 'protocol_port': 80, 'default_tls_container_ref': None, 'sni_container_refs': [], 'connection_limit': -4, 'admin_state_up': True, 'loadbalancer_id': _uuid()}} res = self.api.post(_get_path('lbaas/listeners', fmt=self.fmt), self.serialize(data), content_type='application/{0}'.format(self.fmt), expect_errors=True) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_listener_list(self): listener_id = _uuid() return_value = [{'admin_state_up': True, 'tenant_id': _uuid(), 'id': listener_id}] instance = self.plugin.return_value instance.get_listeners.return_value = return_value res = self.api.get(_get_path('lbaas/listeners', fmt=self.fmt)) instance.get_listeners.assert_called_with(mock.ANY, fields=mock.ANY, filters=mock.ANY) self.assertEqual(exc.HTTPOk.code, res.status_int) def test_listener_update(self): listener_id = _uuid() update_data = {'listener': {'admin_state_up': False}} return_value = {'name': 'listener1', 'admin_state_up': False, 'tenant_id': _uuid(), 'id': listener_id} instance = self.plugin.return_value instance.update_listener.return_value = return_value res = self.api.put(_get_path('lbaas/listeners', id=listener_id, fmt=self.fmt), self.serialize(update_data)) instance.update_listener.assert_called_with( mock.ANY, listener_id, listener=update_data) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('listener', res) self.assertEqual(return_value, res['listener']) def test_listener_update_with_tls(self): listener_id = _uuid() tls_ref = 'http://example.ref/uuid' sni_refs = ['http://example.ref/uuid', 'http://example.ref/uuid1'] update_data = {'listener': {'admin_state_up': False}} return_value = {'name': 'listener1', 'admin_state_up': False, 'tenant_id': _uuid(), 'id': listener_id, 'default_tls_container_ref': tls_ref, 'sni_container_refs': sni_refs} instance = self.plugin.return_value instance.update_listener.return_value = return_value res = self.api.put(_get_path('lbaas/listeners', id=listener_id, fmt=self.fmt), self.serialize(update_data)) instance.update_listener.assert_called_with( mock.ANY, listener_id, listener=update_data) self.assertEqual(res.status_int, exc.HTTPOk.code) res = self.deserialize(res) self.assertIn('listener', res) self.assertEqual(res['listener'], return_value) def test_listener_update_with_connection_limit_less_than_min_value(self): listener_id = _uuid() update_data = {'listener': {'connection_limit': -4}} res = self.api.put(_get_path('lbaas/listeners', id=listener_id, fmt=self.fmt), self.serialize(update_data), expect_errors=True) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_listener_get(self): listener_id = _uuid() return_value = {'name': 'listener1', 'admin_state_up': False, 'tenant_id': _uuid(), 'id': listener_id} instance = self.plugin.return_value instance.get_listener.return_value = return_value res = self.api.get(_get_path('lbaas/listeners', id=listener_id, fmt=self.fmt)) instance.get_listener.assert_called_with(mock.ANY, listener_id, fields=mock.ANY) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('listener', res) self.assertEqual(return_value, res['listener']) def test_listener_delete(self): self._test_entity_delete('listener') def test_pool_create(self): pool_id = _uuid() data = {'pool': {'name': 'pool1', 'description': 'descr_pool1', 'protocol': 'HTTP', 'lb_algorithm': 'ROUND_ROBIN', 'admin_state_up': True, 'loadbalancer_id': _uuid(), 'listener_id': None, 'tenant_id': _uuid(), 'session_persistence': {}}} return_value = copy.copy(data['pool']) return_value.update({'id': pool_id}) return_value.pop('listener_id') instance = self.plugin.return_value instance.create_pool.return_value = return_value res = self.api.post(_get_path('lbaas/pools', fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_pool.assert_called_with(mock.ANY, pool=data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('pool', res) self.assertEqual(return_value, res['pool']) def test_pool_list(self): pool_id = _uuid() return_value = [{'name': 'pool1', 'admin_state_up': True, 'tenant_id': _uuid(), 'id': pool_id}] instance = self.plugin.return_value instance.get_pools.return_value = return_value res = self.api.get(_get_path('lbaas/pools', fmt=self.fmt)) instance.get_pools.assert_called_with(mock.ANY, fields=mock.ANY, filters=mock.ANY) self.assertEqual(exc.HTTPOk.code, res.status_int) def test_pool_update(self): pool_id = _uuid() update_data = {'pool': {'admin_state_up': False}} return_value = {'name': 'pool1', 'admin_state_up': False, 'tenant_id': _uuid(), 'id': pool_id} instance = self.plugin.return_value instance.update_pool.return_value = return_value res = self.api.put(_get_path('lbaas/pools', id=pool_id, fmt=self.fmt), self.serialize(update_data)) instance.update_pool.assert_called_with(mock.ANY, pool_id, pool=update_data) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('pool', res) self.assertEqual(return_value, res['pool']) def test_pool_get(self): pool_id = _uuid() return_value = {'name': 'pool1', 'admin_state_up': False, 'tenant_id': _uuid(), 'id': pool_id} instance = self.plugin.return_value instance.get_pool.return_value = return_value res = self.api.get(_get_path('lbaas/pools', id=pool_id, fmt=self.fmt)) instance.get_pool.assert_called_with(mock.ANY, pool_id, fields=mock.ANY) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('pool', res) self.assertEqual(return_value, res['pool']) def test_pool_delete(self): self._test_entity_delete('pool') def test_pool_member_create(self): subnet_id = _uuid() member_id = _uuid() data = {'member': {'address': '10.0.0.1', 'protocol_port': 80, 'weight': 1, 'subnet_id': subnet_id, 'admin_state_up': True, 'tenant_id': _uuid(), 'name': 'member1'}} return_value = copy.copy(data['member']) return_value.update({'id': member_id}) instance = self.plugin.return_value instance.create_pool_member.return_value = return_value res = self.api.post(_get_path('lbaas/pools/pid1/members', fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_pool_member.assert_called_with(mock.ANY, pool_id='pid1', member=data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('member', res) self.assertEqual(return_value, res['member']) def test_pool_member_list(self): member_id = _uuid() return_value = [{'name': 'member1', 'admin_state_up': True, 'tenant_id': _uuid(), 'id': member_id, 'name': 'member1'}] instance = self.plugin.return_value instance.get_pools.return_value = return_value res = self.api.get(_get_path('lbaas/pools/pid1/members', fmt=self.fmt)) instance.get_pool_members.assert_called_with(mock.ANY, fields=mock.ANY, filters=mock.ANY, pool_id='pid1') self.assertEqual(exc.HTTPOk.code, res.status_int) def test_pool_member_update(self): member_id = _uuid() update_data = {'member': {'admin_state_up': False}} return_value = {'admin_state_up': False, 'tenant_id': _uuid(), 'id': member_id, 'name': 'member1'} instance = self.plugin.return_value instance.update_pool_member.return_value = return_value res = self.api.put(_get_path('lbaas/pools/pid1/members', id=member_id, fmt=self.fmt), self.serialize(update_data)) instance.update_pool_member.assert_called_with( mock.ANY, member_id, pool_id='pid1', member=update_data) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('member', res) self.assertEqual(return_value, res['member']) def test_pool_member_get(self): member_id = _uuid() return_value = {'admin_state_up': False, 'tenant_id': _uuid(), 'id': member_id, 'name': 'member1'} instance = self.plugin.return_value instance.get_pool_member.return_value = return_value res = self.api.get(_get_path('lbaas/pools/pid1/members', id=member_id, fmt=self.fmt)) instance.get_pool_member.assert_called_with(mock.ANY, member_id, fields=mock.ANY, pool_id='pid1') self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('member', res) self.assertEqual(return_value, res['member']) def test_pool_member_delete(self): entity_id = _uuid() res = self.api.delete( test_base._get_path('lbaas/pools/pid1/members', id=entity_id, fmt=self.fmt)) delete_entity = getattr(self.plugin.return_value, "delete_pool_member") delete_entity.assert_called_with(mock.ANY, entity_id, pool_id='pid1') self.assertEqual(exc.HTTPNoContent.code, res.status_int) def test_health_monitor_create(self): health_monitor_id = _uuid() data = {'healthmonitor': {'type': 'HTTP', 'delay': 2, 'timeout': 1, 'max_retries': 3, 'http_method': 'GET', 'url_path': '/path', 'expected_codes': '200-300', 'admin_state_up': True, 'tenant_id': _uuid(), 'pool_id': _uuid(), 'name': 'monitor1'}} return_value = copy.copy(data['healthmonitor']) return_value.update({'id': health_monitor_id}) del return_value['pool_id'] instance = self.plugin.return_value instance.create_healthmonitor.return_value = return_value res = self.api.post(_get_path('lbaas/healthmonitors', fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_healthmonitor.assert_called_with( mock.ANY, healthmonitor=data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('healthmonitor', res) self.assertEqual(return_value, res['healthmonitor']) def test_health_monitor_create_with_timeout_negative(self): data = {'healthmonitor': {'type': 'HTTP', 'delay': 2, 'timeout': -1, 'max_retries': 3, 'http_method': 'GET', 'url_path': '/path', 'expected_codes': '200-300', 'admin_state_up': True, 'tenant_id': _uuid(), 'pool_id': _uuid(), 'name': 'monitor1'}} res = self.api.post(_get_path('lbaas/healthmonitors', fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt, expect_errors=True) self.assertEqual(400, res.status_int) def test_health_monitor_list(self): health_monitor_id = _uuid() return_value = [{'type': 'HTTP', 'admin_state_up': True, 'tenant_id': _uuid(), 'id': health_monitor_id, 'name': 'monitor1'}] instance = self.plugin.return_value instance.get_healthmonitors.return_value = return_value res = self.api.get(_get_path('lbaas/healthmonitors', fmt=self.fmt)) instance.get_healthmonitors.assert_called_with( mock.ANY, fields=mock.ANY, filters=mock.ANY) self.assertEqual(exc.HTTPOk.code, res.status_int) def test_health_monitor_update(self): health_monitor_id = _uuid() update_data = {'healthmonitor': {'admin_state_up': False}} return_value = {'type': 'HTTP', 'admin_state_up': False, 'tenant_id': _uuid(), 'id': health_monitor_id, 'name': 'monitor1'} instance = self.plugin.return_value instance.update_healthmonitor.return_value = return_value res = self.api.put(_get_path('lbaas/healthmonitors', id=health_monitor_id, fmt=self.fmt), self.serialize(update_data)) instance.update_healthmonitor.assert_called_with( mock.ANY, health_monitor_id, healthmonitor=update_data) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('healthmonitor', res) self.assertEqual(return_value, res['healthmonitor']) def test_health_monitor_get(self): health_monitor_id = _uuid() return_value = {'type': 'HTTP', 'admin_state_up': False, 'tenant_id': _uuid(), 'id': health_monitor_id, 'name': 'monitor1'} instance = self.plugin.return_value instance.get_healthmonitor.return_value = return_value res = self.api.get(_get_path('lbaas/healthmonitors', id=health_monitor_id, fmt=self.fmt)) instance.get_healthmonitor.assert_called_with( mock.ANY, health_monitor_id, fields=mock.ANY) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('healthmonitor', res) self.assertEqual(return_value, res['healthmonitor']) def test_health_monitor_delete(self): entity_id = _uuid() res = self.api.delete( test_base._get_path('lbaas/healthmonitors', id=entity_id, fmt=self.fmt)) delete_entity = getattr(self.plugin.return_value, "delete_healthmonitor") delete_entity.assert_called_with(mock.ANY, entity_id) self.assertEqual(exc.HTTPNoContent.code, res.status_int) def test_load_balancer_stats(self): load_balancer_id = _uuid() stats = {'stats': 'dummy'} instance = self.plugin.return_value instance.stats.return_value = stats path = _get_path('lbaas/loadbalancers', id=load_balancer_id, action="stats", fmt=self.fmt) res = self.api.get(path) instance.stats.assert_called_with(mock.ANY, load_balancer_id) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('stats', res) self.assertEqual(stats['stats'], res['stats']) def test_load_balancer_statuses(self): load_balancer_id = _uuid() statuses = {'statuses': {'loadbalancer': {}}} instance = self.plugin.return_value instance.statuses.return_value = statuses path = _get_path('lbaas/loadbalancers', id=load_balancer_id, action="statuses", fmt=self.fmt) res = self.api.get(path) instance.statuses.assert_called_with(mock.ANY, load_balancer_id) self.assertEqual(exc.HTTPOk.code, res.status_int) res = self.deserialize(res) self.assertIn('statuses', res) self.assertEqual(statuses['statuses'], res['statuses']) neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/drivers/0000775000567000056710000000000012701410110024050 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/drivers/netscaler/0000775000567000056710000000000012701410110026030 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/drivers/netscaler/test_netscaler_driver_v2.py0000664000567000056710000003035712701407726033436 0ustar jenkinsjenkins00000000000000# Copyright 2015 Citrix Systems # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lbaas.drivers.netscaler \ import netscaler_driver_v2 from neutron_lbaas.services.loadbalancer import data_models from neutron_lbaas.services.loadbalancer.drivers.netscaler import ncc_client from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancer LBAAS_DRIVER_CLASS = ('neutron_lbaas.services.loadbalancer.drivers' '.netscaler.netscaler_driver_v2' '.NetScalerLoadBalancerDriverV2') NCC_CLIENT_CLASS = ('neutron_lbaas.services.loadbalancer.drivers' '.netscaler.ncc_client' '.NSClient') LBAAS_PROVIDER_NAME = 'NetScaler' LBAAS_PROVIDER = ('LOADBALANCERV2:%s:%s:default' % (LBAAS_PROVIDER_NAME, LBAAS_DRIVER_CLASS)) log_path = ('neutron_lbaas.services.loadbalancer.drivers.' 'logging_noop.driver.LOG') class FakeModel(object): def __init__(self, id): self.id = id def attached_to_loadbalancer(self): return True class ManagerTest(object): def __init__(self, parent, manager, model): self.parent = parent self.manager = manager self.model = model self.object_path = None self.async_obj_track_list = (netscaler_driver_v2. PROVISIONING_STATUS_TRACKER) self.successful_completion_mock = mock.patch.object( manager, "successful_completion").start() self.failed_completion_mock = mock.patch.object( manager, "failed_completion").start() def start_tests(self): model = self.model self.object_path = "%s/%s" % ( self.resource_path, model.id) self.create_success(model) self.update_success(model, model) self.delete_success(model) self.create_failure(model) self.update_failure(model, model) self.delete_failure(model) def _check_success_completion(self): """Check if success_completion is called""" successful_completion_mock = self.successful_completion_mock successful_completion_mock.assert_called_once_with( mock.ANY, self.model) successful_completion_mock.reset_mock() def _check_success_completion_with_delete(self): """Check if success_compeletion is called with delete""" successful_completion_mock = self.successful_completion_mock successful_completion_mock.assert_called_once_with( mock.ANY, self.model, delete=True) successful_completion_mock.reset_mock() def _check_failure_completion(self): """Check failed_completion is called""" failed_completion_mock = self.failed_completion_mock failed_completion_mock.assert_called_once_with( mock.ANY, self.model) failed_completion_mock.reset_mock() def _set_response_error(self, mock_instance): errorcode = ncc_client.NCCException.RESPONSE_ERROR mock_instance.side_effect = (ncc_client .NCCException(errorcode)) def create(self, model): self.manager.create(self.parent.context, model) create_resource_mock = self.parent.create_resource_mock self.parent.assertTrue(create_resource_mock.called) resource_path = self.resource_path object_name = self.object_name create_payload = mock.ANY create_resource_mock.assert_called_once_with( mock.ANY, resource_path, object_name, create_payload) def update(self, old_model, model): self.manager.update(self.parent.context, old_model, model) update_resource_mock = self.parent.update_resource_mock self.parent.assertTrue(update_resource_mock.called) object_path = self.object_path object_name = self.object_name update_payload = mock.ANY update_resource_mock.assert_called_once_with( mock.ANY, object_path, object_name, update_payload) def delete(self, model): self.manager.delete(self.parent.context, model) remove_resource_mock = self.parent.remove_resource_mock self.parent.assertTrue(remove_resource_mock.called) object_path = self.object_path remove_resource_mock.assert_called_once_with( mock.ANY, object_path) def check_op_status(self, model, delete=False): loadbalancer = model.root_loadbalancer if hasattr(self, "async_obj_track_list") and self.async_obj_track_list: self.parent.assertIn( loadbalancer.id, self.async_obj_track_list) else: if delete: self._check_success_completion_with_delete() else: self._check_success_completion() def create_success(self, model): self.create(model) self.check_op_status(model) self.parent.create_resource_mock.reset() def update_success(self, old_model, model): self.update(old_model, model) self.check_op_status(model) self.parent.update_resource_mock.reset_mock() def delete_success(self, model): self.delete(model) self.check_op_status(model, delete=True) self.parent.remove_resource_mock.reset_mock() def create_failure(self, model): create_resource_mock = self.parent.create_resource_mock self._set_response_error(create_resource_mock) try: self.create(model) except Exception: pass self._check_failure_completion() create_resource_mock.reset_mock() create_resource_mock.side_effect = mock_create_resource_func def update_failure(self, old_model, model): update_resource_mock = self.parent.update_resource_mock self._set_response_error(update_resource_mock) try: self.update(old_model, model) except Exception: pass self._check_failure_completion() update_resource_mock.reset_mock() update_resource_mock.side_effect = mock_update_resource_func def delete_failure(self, model): remove_resource_mock = self.parent.remove_resource_mock self._set_response_error(remove_resource_mock) try: self.delete(model) except Exception: pass self._check_failure_completion() remove_resource_mock.reset_mock() remove_resource_mock.side_effect = mock_remove_resource_func class LoadBalancerManagerTest(ManagerTest): def __init__(self, parent, manager, model): super(LoadBalancerManagerTest, self).__init__(parent, manager, model) self.object_name = netscaler_driver_v2.LB_RESOURCE self.resource_path = "%s/%s" % ( netscaler_driver_v2.RESOURCE_PREFIX, netscaler_driver_v2.LBS_RESOURCE) self.start_tests() class ListenerManagerTest(ManagerTest): def __init__(self, parent, manager, model): super(ListenerManagerTest, self).__init__(parent, manager, model) self.object_name = netscaler_driver_v2.LISTENER_RESOURCE self.resource_path = "%s/%s" % ( netscaler_driver_v2.RESOURCE_PREFIX, netscaler_driver_v2.LISTENERS_RESOURCE) self.start_tests() class PoolManagerTest(ManagerTest): def __init__(self, parent, manager, model): super(PoolManagerTest, self).__init__(parent, manager, model) self.object_name = netscaler_driver_v2.POOL_RESOURCE self.resource_path = "%s/%s" % ( netscaler_driver_v2.RESOURCE_PREFIX, netscaler_driver_v2.POOLS_RESOURCE) self.start_tests() class MemberManagerTest(ManagerTest): def __init__(self, parent, manager, model): super(MemberManagerTest, self).__init__(parent, manager, model) self.object_name = netscaler_driver_v2.MEMBER_RESOURCE self.resource_path = "%s/%s/%s/%s" % ( netscaler_driver_v2.RESOURCE_PREFIX, netscaler_driver_v2.POOLS_RESOURCE, model.pool.id, netscaler_driver_v2.MEMBERS_RESOURCE) self.start_tests() class MonitorManagerTest(ManagerTest): def __init__(self, parent, manager, model): super(MonitorManagerTest, self).__init__(parent, manager, model) self.object_name = netscaler_driver_v2.MONITOR_RESOURCE self.resource_path = "%s/%s" % ( netscaler_driver_v2.RESOURCE_PREFIX, netscaler_driver_v2.MONITORS_RESOURCE) self.start_tests() class TestNetScalerLoadBalancerDriverV2( test_db_loadbalancer.LoadBalancerPluginDbTestCase): def _create_fake_models(self): id = 'name-001' lb = data_models.LoadBalancer(id=id) listener = data_models.Listener(id=id, loadbalancer=lb) pool = data_models.Pool(id=id, listener=listener) member = data_models.Member(id=id, pool=pool) hm = data_models.HealthMonitor(id=id, pool=pool) lb.listeners = [listener] listener.default_pool = pool pool.members = [member] pool.healthmonitor = hm return lb def _get_fake_network_info(self): network_info = {} network_info["network_id"] = "network_id_1" network_info["subnet_id"] = "subnet_id_1" return network_info def setUp(self): super(TestNetScalerLoadBalancerDriverV2, self).setUp() self.context = mock.Mock() self.plugin = mock.Mock() self.lb = self._create_fake_models() mock.patch.object(netscaler_driver_v2, 'LOG').start() network_info_mock = mock.patch.object( netscaler_driver_v2.PayloadPreparer, "get_network_info").start() network_info_mock.return_value = self._get_fake_network_info() mock.patch.object( netscaler_driver_v2.NetScalerLoadBalancerDriverV2, "_init_status_collection").start() """mock the NSClient class (REST client)""" client_mock_cls = mock.patch(NCC_CLIENT_CLASS).start() """mock the REST methods of the NSClient class""" self.client_mock_instance = client_mock_cls.return_value self.create_resource_mock = self.client_mock_instance.create_resource self.create_resource_mock.side_effect = mock_create_resource_func self.update_resource_mock = self.client_mock_instance.update_resource self.update_resource_mock.side_effect = mock_update_resource_func self.retrieve_resource_mock = (self.client_mock_instance .retrieve_resource) self.retrieve_resource_mock.side_effect = mock_retrieve_resource_func self.remove_resource_mock = self.client_mock_instance.remove_resource self.remove_resource_mock.side_effect = mock_remove_resource_func self.driver = netscaler_driver_v2.NetScalerLoadBalancerDriverV2( self.plugin) self.assertTrue(client_mock_cls.called) def test_load_balancer_ops(self): LoadBalancerManagerTest(self, self.driver.load_balancer, self.lb) def test_listener_ops(self): ListenerManagerTest(self, self.driver.listener, self.lb.listeners[0]) def test_pool_ops(self): PoolManagerTest(self, self.driver.pool, self.lb.listeners[0].default_pool) def test_member_ops(self): MemberManagerTest(self, self.driver.member, self.lb.listeners[0].default_pool.members[0]) def test_health_monitor_ops(self): MonitorManagerTest(self, self.driver.health_monitor, self.lb.listeners[0].default_pool.healthmonitor) def mock_create_resource_func(*args, **kwargs): return 201, {} def mock_update_resource_func(*args, **kwargs): return 202, {} def mock_retrieve_resource_func(*args, **kwargs): return 200, {} def mock_remove_resource_func(*args, **kwargs): return 200, {} neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/drivers/__init__.py0000664000567000056710000000000012701407726026172 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/drivers/kemptechnologies/0000775000567000056710000000000012701410110027410 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/drivers/kemptechnologies/test_driver_v2.py0000664000567000056710000000674612701407726032763 0ustar jenkinsjenkins00000000000000# Copyright 2015, Shane McGough, KEMPtechnologies # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import mock from neutron import context from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancerv2 with mock.patch.dict(sys.modules, {'kemptech_openstack_lbaas': mock.Mock()}): from neutron_lbaas.drivers.kemptechnologies import driver_v2 class FakeModel(object): def __init__(self, id): self.id = id self.address = '1.1.1.1' self.tenant_id = "copying-pre-existing-work-is-easy" class ManagerTest(object): def __init__(self, parent, manager, model, mocked_root): self.parent = parent self.context = parent.context self.driver = parent.driver self.manager = manager self.model = model self.mocked_root = mocked_root self.create(model) self.update(model, model) self.delete(model) def create(self, model): self.manager.create(self.context, model) self.mocked_root.create.assert_called_with(self.context, model) def update(self, old_model, model): self.manager.update(self.context, old_model, model) self.mocked_root.update.assert_called_with(self.context, old_model, model) def delete(self, model): self.manager.delete(self.context, model) self.mocked_root.delete.assert_called_with(self.context, model) def refresh(self): self.manager.refresh(self.context, self.model) self.mocked_root.refresh.assert_called_with(self.context, self.model) def stats(self): self.manager.stats(self.context, self.model) self.mocked_root.stats.assert_called_with(self.context, self.model) class TestKempLoadMasterDriver(test_db_loadbalancerv2.LbaasPluginDbTestCase): def setUp(self): super(TestKempLoadMasterDriver, self).setUp() self.context = context.get_admin_context() self.plugin = mock.Mock() self.driver = driver_v2.KempLoadMasterDriver(self.plugin) self.driver.kemptech = mock.Mock() def test_load_balancer_ops(self): m = ManagerTest(self, self.driver.load_balancer, FakeModel("load_balancer-kemptech"), self.driver.kemptech.load_balancer) m.refresh() m.stats() def test_listener_ops(self): ManagerTest(self, self.driver.listener, FakeModel("listener-kemptech"), self.driver.kemptech.listener) def test_pool_ops(self): ManagerTest(self, self.driver.pool, FakeModel("pool-kemptech"), self.driver.kemptech.pool) def test_member_ops(self): ManagerTest(self, self.driver.member, FakeModel("member-kemptech"), self.driver.kemptech.member) def test_health_monitor_ops(self): ManagerTest(self, self.driver.health_monitor, FakeModel("health_monitor-kemptech"), self.driver.kemptech.health_monitor) neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/drivers/kemptechnologies/__init__.py0000664000567000056710000000000012701407726031532 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/drivers/logging_noop/0000775000567000056710000000000012701410110026531 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/drivers/logging_noop/test_logging_noop_driver.py0000664000567000056710000001215112701407726034221 0ustar jenkinsjenkins00000000000000# Copyright 2014, Doug Wiegley (dougwig), A10 Networks # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron import context from neutron_lbaas.drivers.logging_noop import driver from neutron_lbaas.services.loadbalancer import data_models from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancer log_path = ('neutron_lbaas.drivers.logging_noop.driver.LOG') class FakeModel(object): def __init__(self, id): self.id = id def attached_to_loadbalancer(self): return True def patch_manager(func): @mock.patch(log_path) def wrapper(*args): log_mock = args[-1] manager_test = args[0] model = args[1] parent = manager_test.parent driver = parent.driver driver.plugin.reset_mock() func(*args[:-1]) s = str(log_mock.mock_calls[0]) parent.assertEqual("call.debug(", s[:11]) parent.assertTrue(s.index(model.id) != -1, msg="Model ID not found in log") return wrapper class ManagerTest(object): def __init__(self, parent, manager, model): self.parent = parent self.manager = manager self.create(model) self.update(model, model) self.delete(model) @patch_manager def create(self, model): self.manager.create(self.parent.context, model) @patch_manager def update(self, old_model, model): self.manager.update(self.parent.context, old_model, model) @patch_manager def delete(self, model): self.manager.delete(self.parent.context, model) class ManagerTestWithUpdates(ManagerTest): def __init__(self, parent, manager, model): self.parent = parent self.manager = manager self.create(model) self.update(model, model) self.delete(model) @patch_manager def create(self, model): self.manager.create(self.parent.context, model) @patch_manager def update(self, old_model, model): self.manager.update(self.parent.context, old_model, model) @patch_manager def delete(self, model): self.manager.delete(self.parent.context, model) class LoadBalancerManagerTest(ManagerTestWithUpdates): def __init__(self, parent, manager, model): super(LoadBalancerManagerTest, self).__init__(parent, manager, model) self.create_and_allocate_vip(model) self.refresh(model) self.stats(model) @patch_manager def allocates_vip(self): self.manager.allocates_vip() @patch_manager def create_and_allocate_vip(self, model): self.manager.create(self.parent.context, model) @patch_manager def refresh(self, model): self.manager.refresh(self.parent.context, model) @patch_manager def stats(self, model): dummy_stats = { "bytes_in": 0, "bytes_out": 0, "active_connections": 0, "total_connections": 0 } h = self.manager.stats(self.parent.context, model) self.parent.assertEqual(dummy_stats, h) class TestLoggingNoopLoadBalancerDriver( test_db_loadbalancer.LoadBalancerPluginDbTestCase): def _create_fake_models(self): id = 'name-001' lb = data_models.LoadBalancer(id=id) pool = data_models.Pool(id=id, loadbalancer=lb) listener = data_models.Listener(id=id, loadbalancer=lb) member = data_models.Member(id=id, pool=pool) hm = data_models.HealthMonitor(id=id, pool=pool) lb.listeners = [listener] lb.pools = [pool] listener.default_pool = pool pool.members = [member] pool.healthmonitor = hm return lb def setUp(self): super(TestLoggingNoopLoadBalancerDriver, self).setUp() self.context = context.get_admin_context() self.plugin = mock.Mock() self.driver = driver.LoggingNoopLoadBalancerDriver(self.plugin) self.lb = self._create_fake_models() def test_load_balancer_ops(self): LoadBalancerManagerTest(self, self.driver.load_balancer, self.lb) def test_listener_ops(self): ManagerTest(self, self.driver.listener, self.lb.listeners[0]) def test_pool_ops(self): ManagerTestWithUpdates(self, self.driver.pool, self.lb.listeners[0].default_pool) def test_member_ops(self): ManagerTestWithUpdates(self, self.driver.member, self.lb.listeners[0].default_pool.members[0]) def test_health_monitor_ops(self): ManagerTest(self, self.driver.health_monitor, self.lb.listeners[0].default_pool.healthmonitor) neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/drivers/logging_noop/__init__.py0000664000567000056710000000000012701407726030653 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/drivers/vmware/0000775000567000056710000000000012701410110025351 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/drivers/vmware/test_edge_driver_v2.py0000664000567000056710000001057512701407726031703 0ustar jenkinsjenkins00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron import context as ncontext from neutron_lbaas.drivers.vmware import edge_driver_v2 from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancer DUMMY_CERT = {'id': 'fake_id'} class FakeModel(object): def __init__(self, id): self.id = id class ManagerTest(object): def __init__(self, context, manager, model, mocked_nsxv): self.context = context self.manager = manager self.model = model self.mocked_nsxv = mocked_nsxv self.create(model) self.update(model, model) self.delete(model) def create(self, model): self.manager.create(self.context, model) if model.id == 'listener': model.default_tls_container_id = 'fake_id' self.mocked_nsxv.create.assert_called_with( self.context, model, certificate=DUMMY_CERT) else: self.mocked_nsxv.create.assert_called_with(self.context, model) def update(self, old_model, model): self.manager.update(self.context, old_model, model) if model.id == 'listener': self.mocked_nsxv.update.assert_called_with( self.context, old_model, model, certificate=DUMMY_CERT) else: self.mocked_nsxv.update.assert_called_with(self.context, old_model, model) def delete(self, model): self.manager.delete(self.context, model) self.mocked_nsxv.delete.assert_called_with(self.context, model) def refresh(self): self.manager.refresh(self.context, self.model) self.mocked_nsxv.refresh.assert_called_with(self.context, self.model) def stats(self): self.manager.stats(self.context, self.model) self.mocked_nsxv.stats.assert_called_with(self.context, self.model) class TestVMWareEdgeLoadBalancerDriverV2( test_db_loadbalancer.LoadBalancerPluginDbTestCase): def setUp(self): super(TestVMWareEdgeLoadBalancerDriverV2, self).setUp() self.context = ncontext.get_admin_context() self.driver = edge_driver_v2.EdgeLoadBalancerDriverV2(self.plugin) def _patch_manager(self, mgr): mgr.driver = mock.Mock() mgr.driver.plugin.db = mock.Mock() mgr.driver.plugin.db._core_plugin = mock.Mock() mgr.driver.plugin.db._core_plugin.nsx_v = mock.Mock() return mgr.driver.plugin.db._core_plugin.nsx_v def test_load_balancer_ops(self): mock_nsxv_driver = self._patch_manager(self.driver.load_balancer) m = ManagerTest(self, self.driver.load_balancer, FakeModel("loadbalancer"), mock_nsxv_driver.loadbalancer) m.refresh() m.stats() def test_listener_ops(self): mock_nsxv_driver = self._patch_manager(self.driver.listener) self.driver.listener._get_default_cert = mock.Mock() self.driver.listener._get_default_cert.return_value = DUMMY_CERT listener = FakeModel("listener") listener.default_tls_container_id = None ManagerTest(self, self.driver.listener, listener, mock_nsxv_driver.listener) def test_pool_ops(self): mock_nsxv_driver = self._patch_manager(self.driver.pool) ManagerTest(self, self.driver.pool, FakeModel("pool"), mock_nsxv_driver.pool) def test_member_ops(self): mock_nsxv_driver = self._patch_manager(self.driver.member) ManagerTest(self, self.driver.member, FakeModel("member"), mock_nsxv_driver.member) def test_health_monitor_ops(self): mock_nsxv_driver = self._patch_manager(self.driver.health_monitor) ManagerTest(self, self.driver.health_monitor, FakeModel("hm"), mock_nsxv_driver.healthmonitor) neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/drivers/vmware/__init__.py0000664000567000056710000000000012701407726027473 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/drivers/radware/0000775000567000056710000000000012701410110025475 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/drivers/radware/__init__.py0000664000567000056710000000000012701407726027617 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/drivers/radware/test_v2_plugin_driver.py0000664000567000056710000015377412701407726032432 0ustar jenkinsjenkins00000000000000# Copyright 2015 Radware LTD. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import copy import mock import re from neutron import context from neutron import manager from neutron.plugins.common import constants from oslo_config import cfg from oslo_serialization import jsonutils from six.moves import queue as Queue from neutron_lbaas.common.cert_manager import cert_manager from neutron_lbaas.drivers.radware import exceptions as r_exc from neutron_lbaas.drivers.radware import v2_driver from neutron_lbaas.extensions import loadbalancerv2 from neutron_lbaas.services.loadbalancer import constants as lb_con from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancerv2 GET_200 = ('/api/workflow/', '/api/workflowTemplate') SERVER_DOWN_CODES = (-1, 301, 307) class QueueMock(Queue.Queue): def __init__(self, completion_handler): self.completion_handler = completion_handler super(QueueMock, self).__init__() def put_nowait(self, oper): self.completion_handler(oper) def _recover_function_mock(action, resource, data, headers, binary=False): pass def rest_call_function_mock(action, resource, data, headers, binary=False): if rest_call_function_mock.RESPOND_WITH_ERROR: return 400, 'error_status', 'error_description', None if rest_call_function_mock.RESPOND_WITH_SERVER_DOWN in SERVER_DOWN_CODES: val = rest_call_function_mock.RESPOND_WITH_SERVER_DOWN return val, 'error_status', 'error_description', None if action == 'GET': return _get_handler(resource) elif action == 'DELETE': return _delete_handler(resource) elif action == 'POST': return _post_handler(resource, binary) else: return 0, None, None, None def _get_handler(resource): if resource.startswith(GET_200[1]): return 200, '', '', rest_call_function_mock.WF_TEMPLATES_TO_RETURN if resource.startswith(GET_200[0]): if rest_call_function_mock.WORKFLOW_MISSING: data = jsonutils.loads('{"complete":"True", "success": "True"}') return 404, '', '', data elif resource.endswith('parameters'): return 200, '', '', {'stats': {'bytes_in': 100, 'total_connections': 2, 'active_connections': 1, 'bytes_out': 200}} else: return 200, '', '', '' if resource.startswith(GET_200): return 200, '', '', '' else: data = jsonutils.loads('{"complete":"True", "success": "True"}') return 202, '', '', data def _delete_handler(resource): return 404, '', '', {'message': 'Not Found'} def _post_handler(resource, binary): if re.search(r'/api/workflow/.+/action/.+', resource): data = jsonutils.loads('{"uri":"some_uri"}') return 202, '', '', data elif re.search(r'/api/service\?name=.+', resource): data = jsonutils.loads('{"links":{"actions":{"provision":"someuri"}}}') return 201, '', '', data elif binary: return 201, '', '', '' else: return 202, '', '', '' RADWARE_PROVIDER = ('LOADBALANCERV2:radwarev2:neutron_lbaas.' 'drivers.radware.v2_driver.' 'RadwareLBaaSV2Driver:default') WF_SRV_PARAMS = { "name": "_REPLACE_", "tenantId": "_REPLACE_", "haPair": False, "sessionMirroringEnabled": False, "islVlan": -1, "primary": { "capacity": { "throughput": 1000, "sslThroughput": 100, "compressionThroughput": 100, "cache": 20}, "network": { "type": "portgroup", "portgroups": "_REPLACE_"}, "adcType": "VA", "acceptableAdc": "Exact"}, "resourcePoolIds": []} WF_CREATE_PARAMS = {'parameters': {"provision_service": True, "configure_l3": True, "configure_l4": True, "twoleg_enabled": False, "ha_network_name": "HA-Network", "ha_ip_pool_name": "default", "allocate_ha_vrrp": True, "allocate_ha_ips": True, "data_port": 1, "data_ip_address": "192.168.200.99", "data_ip_mask": "255.255.255.0", "gateway": "192.168.200.1", "ha_port": 2}} WF_APPLY_EMPTY_LB_PARAMS = {'parameters': { 'loadbalancer': {'listeners': [], 'pools': [], 'admin_state_up': True, 'pip_address': u'10.0.0.2', 'vip_address': u'10.0.0.2'}}} class TestLBaaSDriverBase( test_db_loadbalancerv2.LbaasPluginDbTestCase): def setUp(self): super(TestLBaaSDriverBase, self).setUp( lbaas_provider=RADWARE_PROVIDER) loaded_plugins = manager.NeutronManager().get_service_plugins() self.plugin_instance = loaded_plugins[constants.LOADBALANCERV2] self.driver = self.plugin_instance.drivers['radwarev2'] class TestLBaaSDriverRestClient(TestLBaaSDriverBase): def setUp(self): cfg.CONF.set_override('vdirect_address', '1.1.1.1', group='radwarev2') cfg.CONF.set_override('ha_secondary_address', '1.1.1.2', group='radwarev2') super(TestLBaaSDriverRestClient, self).setUp() self.flip_servers_mock = mock.Mock( return_value=None) self.recover_mock = mock.Mock( side_effect=_recover_function_mock) self.orig_recover = self.driver.rest_client._recover self.orig_flip_servers = self.driver.rest_client._flip_servers self.driver.rest_client._flip_servers = self.flip_servers_mock self.driver.rest_client._recover = self.recover_mock def test_recover_was_called(self): """Call REST client which fails and verify _recover is called.""" self.driver.rest_client.call('GET', '/api/workflowTemplate', None, None) self.recover_mock.assert_called_once_with('GET', '/api/workflowTemplate', None, None, False) def test_flip_servers(self): server = self.driver.rest_client.server sec_server = self.driver.rest_client.secondary_server self.driver.rest_client._recover = self.orig_recover self.driver.rest_client._flip_servers = self.orig_flip_servers self.driver.rest_client.call('GET', '/api/workflowTemplate', None, None) self.assertEqual(server, self.driver.rest_client.secondary_server) self.assertEqual(sec_server, self.driver.rest_client.server) class CertMock(cert_manager.Cert): def __init__(self, cert_container): pass def get_certificate(self): return "certificate" def get_intermediates(self): return "intermediates" def get_private_key(self): return "private_key" def get_private_key_passphrase(self): return "private_key_passphrase" class TestLBaaSDriver(TestLBaaSDriverBase): def setUp(self): super(TestLBaaSDriver, self).setUp() templates_to_return = [{'name': self.driver.workflow_template_name}] for t in self.driver.child_workflow_template_names: templates_to_return.append({'name': t}) rest_call_function_mock.__dict__.update( {'RESPOND_WITH_ERROR': False, 'WORKFLOW_MISSING': True, 'WORKFLOW_TEMPLATE_MISSING': True, 'RESPOND_WITH_SERVER_DOWN': 200, 'WF_TEMPLATES_TO_RETURN': templates_to_return}) self.operation_completer_start_mock = mock.Mock( return_value=None) self.operation_completer_join_mock = mock.Mock( return_value=None) self.driver_rest_call_mock = mock.Mock( side_effect=rest_call_function_mock) self.flip_servers_mock = mock.Mock( return_value=None) self.recover_mock = mock.Mock( side_effect=_recover_function_mock) self.driver.completion_handler.start = ( self.operation_completer_start_mock) self.driver.completion_handler.join = ( self.operation_completer_join_mock) self.driver.rest_client.call = self.driver_rest_call_mock self.driver.rest_client._call = self.driver_rest_call_mock self.driver.completion_handler.rest_client.call = ( self.driver_rest_call_mock) self.driver.queue = QueueMock( self.driver.completion_handler.handle_operation_completion) self.addCleanup(self.driver.completion_handler.join) def test_verify_workflow_templates(self): templates_to_return = [] for t in self.driver.child_workflow_template_names: templates_to_return.append({'name': t}) rest_call_function_mock.__dict__.update( {'WF_TEMPLATES_TO_RETURN': templates_to_return}) message = r_exc.WorkflowTemplateMissing.message % \ {'workflow_template': self.driver.workflow_template_name} try: self.driver._verify_workflow_templates() except r_exc.WorkflowTemplateMissing as e: self.assertEqual(message, e.msg) templates_to_return.append( {'name': self.driver.workflow_template_name}) rest_call_function_mock.__dict__.update( {'WF_TEMPLATES_TO_RETURN': templates_to_return}) try: self.driver._verify_workflow_templates() self.assertTrue(True) except r_exc.WorkflowTemplateMissing as e: self.assertTrue(False) def test_wf_created_on_first_member_creation(self): with self.subnet(cidr='10.0.0.0/24') as vip_sub: with self.loadbalancer(subnet=vip_sub) as lb: lb_id = lb['loadbalancer']['id'] with self.listener( loadbalancer_id=lb_id) as listener: with self.pool( protocol=lb_con.PROTOCOL_HTTP, listener_id=listener['listener']['id']) as pool: self.driver_rest_call_mock.assert_has_calls([]) with self.member(pool_id=pool['pool']['id'], subnet=vip_sub, address='10.0.1.10'): calls = [ mock.call( 'POST', '/api/workflow/LB_' + lb_id + '/action/apply', mock.ANY, v2_driver.TEMPLATE_HEADER) ] self.driver_rest_call_mock.assert_has_calls(calls) def test_wf_deleted_on_lb_deletion(self): with self.subnet(cidr='10.0.0.0/24') as vip_sub: with self.loadbalancer(subnet=vip_sub) as lb: get_calls = [ mock.call('GET', u'/api/workflow/LB_' + lb['loadbalancer']['id'], None, None)] with self.listener( loadbalancer_id=lb['loadbalancer']['id']) as listener: with self.pool( protocol=lb_con.PROTOCOL_HTTP, listener_id=listener['listener']['id']) as pool: with self.member(pool_id=pool['pool']['id'], subnet=vip_sub, address='10.0.1.10'): self.driver_rest_call_mock.reset_mock() rest_call_function_mock.__dict__.update( {'WORKFLOW_MISSING': False}) self.driver_rest_call_mock.assert_has_calls(get_calls) self.driver_rest_call_mock.reset_mock() self.driver_rest_call_mock.assert_has_calls(get_calls) self.driver_rest_call_mock.reset_mock() self.driver_rest_call_mock.assert_has_calls(get_calls) self.driver_rest_call_mock.reset_mock() self.driver_rest_call_mock.assert_any_call( 'DELETE', u'/api/workflow/LB_' + lb['loadbalancer']['id'], None, None) def test_lb_crud(self): with self.subnet(cidr='10.0.0.0/24') as s: with self.loadbalancer(subnet=s, no_delete=True) as lb: lb_id = lb['loadbalancer']['id'] with self.listener(loadbalancer_id=lb_id) as l: with self.pool( protocol=lb_con.PROTOCOL_HTTP, listener_id=l['listener']['id']) as p: self.driver_rest_call_mock.assert_has_calls([]) self.plugin_instance.update_loadbalancer( context.get_admin_context(), lb_id, {'loadbalancer': lb}) self.driver_rest_call_mock.assert_has_calls([]) lb_db = self.plugin_instance.db.get_loadbalancer( context.get_admin_context(), lb_id) self.driver.load_balancer.refresh( context.get_admin_context(), lb_db) self.driver_rest_call_mock.assert_has_calls([]) with self.member( no_delete=True, pool_id=p['pool']['id'], subnet=s, address='10.0.1.10') as m: m_data = { "id": m['member']['id'], "address": "10.0.1.10", "protocol_port": 80, "weight": 1, "admin_state_up": True, "subnet": "255.255.255.255", "mask": "255.255.255.255", "gw": "255.255.255.255", "admin_state_up": True} pool_data = { "id": p['pool']['id'], "protocol": lb_con.PROTOCOL_HTTP, "lb_algorithm": "ROUND_ROBIN", "admin_state_up": True, "members": [m_data]} def_pool_data = { "id": p['pool']['id']} wf_apply_params = {'parameters': { 'listeners': [{ "id": l['listener']['id'], "admin_state_up": True, "protocol_port": 80, "protocol": lb_con.PROTOCOL_HTTP, "connection_limit": -1, "admin_state_up": True, "default_pool": def_pool_data, "l7_policies": []}], "pools": [pool_data], "admin_state_up": True, "pip_address": "10.0.0.2", "vip_address": "10.0.0.2"}} calls = [ mock.call( 'POST', '/api/workflowTemplate/' + 'os_lb_v2?name=LB_' + lb_id, mock.ANY, v2_driver.TEMPLATE_HEADER), mock.call( 'POST', '/api/workflow/LB_' + lb_id + '/action/apply', wf_apply_params, v2_driver.TEMPLATE_HEADER) ] self.driver_rest_call_mock.assert_has_calls(calls) self.driver_rest_call_mock.reset_mock() rest_call_function_mock.__dict__.update( {'WORKFLOW_MISSING': False}) calls = [ mock.call( 'POST', '/api/workflow/LB_' + lb_id + '/action/apply', wf_apply_params, v2_driver.TEMPLATE_HEADER) ] self.plugin_instance.update_loadbalancer( context.get_admin_context(), lb_id, {'loadbalancer': lb}) self.driver_rest_call_mock.assert_has_calls(calls) self.driver_rest_call_mock.reset_mock() lb_db = self.plugin_instance.db.get_loadbalancer( context.get_admin_context(), lb_id) self.driver.load_balancer.refresh( context.get_admin_context(), lb_db) self.driver_rest_call_mock.assert_has_calls(calls) self.driver_rest_call_mock.reset_mock() self.plugin_instance.delete_loadbalancer( context.get_admin_context(), lb_id) self.driver_rest_call_mock.assert_any_call( 'DELETE', '/api/workflow/LB_' + lb_id, None, None) self.assertRaises(loadbalancerv2.EntityNotFound, self.plugin_instance.get_loadbalancer, context.get_admin_context(), lb_id) def test_lb_stats(self): with self.subnet(cidr='10.0.0.0/24') as s: with self.loadbalancer(subnet=s) as lb: lb_id = lb['loadbalancer']['id'] with self.listener(loadbalancer_id=lb_id) as l: with self.pool( protocol=lb_con.PROTOCOL_HTTP, listener_id=l['listener']['id']) as p: with self.member( no_delete=True, pool_id=p['pool']['id'], subnet=s, address='10.0.1.10'): rest_call_function_mock.__dict__.update( {'WORKFLOW_MISSING': False}) stats = self.plugin_instance.stats( context.get_admin_context(), lb_id,) self.assertEqual({'stats': {'bytes_in': 100, 'total_connections': 2, 'active_connections': 1, 'bytes_out': 200}}, stats) def test_member_crud(self): with self.subnet(cidr='10.0.0.0/24') as s: with self.loadbalancer(subnet=s) as lb: lb_id = lb['loadbalancer']['id'] with self.listener(loadbalancer_id=lb_id) as l: with self.pool( protocol=lb_con.PROTOCOL_HTTP, listener_id=l['listener']['id']) as p: with contextlib.nested( self.member( no_delete=True, pool_id=p['pool']['id'], subnet=s, address='10.0.1.10'), self.member( no_delete=True, pool_id=p['pool']['id'], subnet=s, address='10.0.1.20')) as (m1, m2): m1_data = { "id": m1['member']['id'], "address": "10.0.1.10", "protocol_port": 80, "weight": 1, "admin_state_up": True, "subnet": "255.255.255.255", "mask": "255.255.255.255", "gw": "255.255.255.255", "admin_state_up": True} m2_data = { "id": m2['member']['id'], "address": "10.0.1.20", "protocol_port": 80, "weight": 1, "admin_state_up": True, "subnet": "255.255.255.255", "mask": "255.255.255.255", "gw": "255.255.255.255", "admin_state_up": True} pool_data = { "id": p['pool']['id'], "protocol": lb_con.PROTOCOL_HTTP, "lb_algorithm": "ROUND_ROBIN", "admin_state_up": True, "members": [m1_data, m2_data]} def_pool_data = { "id": p['pool']['id']} listener_data = { "id": l['listener']['id'], "admin_state_up": True, "protocol_port": 80, "protocol": lb_con.PROTOCOL_HTTP, "connection_limit": -1, "admin_state_up": True, "default_pool": def_pool_data, "l7_policies": []} wf_apply_params = {'parameters': { 'listeners': [listener_data], 'pools': [pool_data], "admin_state_up": True, "pip_address": "10.0.0.2", "vip_address": "10.0.0.2"}} calls = [ mock.call( 'POST', '/api/workflowTemplate/' + 'os_lb_v2?name=LB_' + lb_id, mock.ANY, v2_driver.TEMPLATE_HEADER), mock.call( 'POST', '/api/workflow/LB_' + lb_id + '/action/apply', wf_apply_params, v2_driver.TEMPLATE_HEADER) ] self.driver_rest_call_mock.assert_has_calls(calls) self.driver_rest_call_mock.reset_mock() member = self.plugin_instance.db.get_pool_member( context.get_admin_context(), m1['member']['id']).to_dict(pool=False) member['weight'] = 2 m1_data['weight'] = 2 self.plugin_instance.update_pool_member( context.get_admin_context(), m1['member']['id'], p['pool']['id'], {'member': member}) calls = [ mock.call( 'POST', '/api/workflow/LB_' + lb_id + '/action/apply', wf_apply_params, v2_driver.TEMPLATE_HEADER) ] self.driver_rest_call_mock.assert_has_calls(calls) self.driver_rest_call_mock.reset_mock() self.plugin_instance.delete_pool_member( context.get_admin_context(), m2['member']['id'], p['pool']['id']) pool_data["members"] = [m1_data] calls = [ mock.call( 'POST', '/api/workflow/LB_' + lb_id + '/action/apply', wf_apply_params, v2_driver.TEMPLATE_HEADER) ] self.driver_rest_call_mock.assert_has_calls(calls) lb = self.plugin_instance.db.get_loadbalancer( context.get_admin_context(), lb_id).to_dict(listener=False) self.assertEqual('ACTIVE', lb['provisioning_status']) def test_build_objects_with_tls(self): with self.subnet(cidr='10.0.0.0/24') as vip_sub: with self.loadbalancer(subnet=vip_sub) as lb: lb_id = lb['loadbalancer']['id'] with contextlib.nested( mock.patch('neutron_lbaas.services.loadbalancer.plugin.' 'cert_parser', autospec=True), mock.patch('neutron_lbaas.services.loadbalancer.plugin.' 'CERT_MANAGER_PLUGIN.CertManager', autospec=True) ) as (cert_parser_mock, cert_manager_mock): cert_mock = mock.Mock(spec=cert_manager.Cert) cert_mock.get_certificate.return_value = 'certificate' cert_mock.get_intermediates.return_value = 'intermediates' cert_mock.get_private_key.return_value = 'private_key' cert_mock.get_private_key_passphrase.return_value = \ 'private_key_passphrase' cert_manager_mock().get_cert.return_value = cert_mock cert_parser_mock.validate_cert.return_value = True with self.listener( protocol=lb_con.PROTOCOL_TERMINATED_HTTPS, loadbalancer_id=lb_id, default_tls_container_ref='def1', sni_container_refs=['sni1', 'sni2']) as listener: with self.pool( protocol=lb_con.PROTOCOL_HTTP, listener_id=listener['listener']['id']) as pool: with self.member(pool_id=pool['pool']['id'], subnet=vip_sub, address='10.0.1.10') as m: wf_srv_params = copy.deepcopy(WF_SRV_PARAMS) wf_params = copy.deepcopy(WF_CREATE_PARAMS) wf_srv_params['name'] = 'srv_' + ( vip_sub['subnet']['network_id']) wf_srv_params['tenantId'] = self._tenant_id wf_srv_params['primary']['network'][ 'portgroups'] = [vip_sub['subnet'][ 'network_id']] wf_params['parameters']['service_params'] = ( wf_srv_params) m_data = { "id": m['member']['id'], "address": "10.0.1.10", "protocol_port": 80, "weight": 1, "admin_state_up": True, "subnet": "255.255.255.255", "mask": "255.255.255.255", "gw": "255.255.255.255", 'admin_state_up': True} default_tls_cert_data = { 'id': 'def1', 'certificate': 'certificate', 'intermediates': 'intermediates', 'private_key': 'private_key', 'passphrase': 'private_key_passphrase'} sni1_tls_cert_data = { 'id': 'sni1', 'position': 0, 'certificate': 'certificate', 'intermediates': 'intermediates', 'private_key': 'private_key', 'passphrase': 'private_key_passphrase'} sni2_tls_cert_data = { 'id': 'sni2', 'position': 1, 'certificate': 'certificate', 'intermediates': 'intermediates', 'private_key': 'private_key', 'passphrase': 'private_key_passphrase'} pool_data = { "id": pool['pool']['id'], "protocol": lb_con.PROTOCOL_HTTP, "lb_algorithm": "ROUND_ROBIN", "admin_state_up": True, "members": [m_data]} def_pool_data = { "id": pool['pool']['id']} wf_apply_one_leg_params = {'parameters': { 'listeners': [{ "id": listener['listener']['id'], "admin_state_up": True, "protocol_port": 80, "protocol": lb_con.PROTOCOL_TERMINATED_HTTPS, "connection_limit": -1, "default_pool": def_pool_data, "default_tls_certificate": default_tls_cert_data, "sni_tls_certificates": [ sni1_tls_cert_data, sni2_tls_cert_data], "l7_policies": []}], "pools": [pool_data], "admin_state_up": True, "pip_address": "10.0.0.2", "vip_address": "10.0.0.2"}} calls = [ mock.call('GET', '/api/workflow/LB_' + lb_id, None, None), mock.call( 'POST', '/api/workflowTemplate/' + 'os_lb_v2?name=LB_' + lb_id, wf_params, v2_driver.TEMPLATE_HEADER), mock.call( 'POST', '/api/workflow/LB_' + lb_id + '/action/apply', wf_apply_one_leg_params, v2_driver.TEMPLATE_HEADER) ] self.driver_rest_call_mock.assert_has_calls( calls, any_order=True) # This test some times fails with same input. # mock calls are not found sometimes, will be back after fix def _test_build_objects_with_l7(self): with self.subnet(cidr='10.0.0.0/24') as vip_sub: with self.loadbalancer(subnet=vip_sub) as lb: lb_id = lb['loadbalancer']['id'] with self.listener( protocol=lb_con.PROTOCOL_HTTP, loadbalancer_id=lb_id) as listener: with contextlib.nested( self.pool( protocol=lb_con.PROTOCOL_HTTP, listener_id=listener['listener']['id']), self.pool( protocol=lb_con.PROTOCOL_HTTP, loadbalancer_id=lb_id)) as (def_pool, pol_pool): with self.l7policy( listener['listener']['id'], action=lb_con.L7_POLICY_ACTION_REDIRECT_TO_POOL, redirect_pool_id=pol_pool['pool']['id']) as policy: self.driver_rest_call_mock.reset_mock() with contextlib.nested( self.l7policy_rule( l7policy_id=policy['l7policy']['id'], key='key1', value='val1'), self.l7policy_rule( l7policy_id=policy['l7policy']['id'], key='key2', value='val2'), self.member( pool_id=def_pool['pool']['id'], subnet=vip_sub, address='10.0.1.10'), self.member( pool_id=pol_pool['pool']['id'], subnet=vip_sub, address='10.0.1.20')) as ( rule1, rule2, def_m, pol_m): wf_srv_params = copy.deepcopy(WF_SRV_PARAMS) wf_params = copy.deepcopy(WF_CREATE_PARAMS) wf_srv_params['name'] = 'srv_' + ( vip_sub['subnet']['network_id']) wf_srv_params['tenantId'] = self._tenant_id wf_srv_params['primary']['network'][ 'portgroups'] = [vip_sub['subnet'][ 'network_id']] wf_params['parameters']['service_params'] = ( wf_srv_params) rule1_data = { 'id': rule1['rule']['id'], 'type': lb_con.L7_RULE_TYPE_HOST_NAME, 'compare_type': lb_con.L7_RULE_COMPARE_TYPE_EQUAL_TO, 'admin_state_up': True, 'key': 'key1', 'value': 'val1'} rule2_data = { 'id': rule2['rule']['id'], 'type': lb_con.L7_RULE_TYPE_HOST_NAME, 'compare_type': lb_con.L7_RULE_COMPARE_TYPE_EQUAL_TO, 'admin_state_up': True, 'key': 'key2', 'value': 'val2'} l7_policy_data = { 'redirect_pool_id': pol_pool['pool']['id'], 'rules': [rule1_data, rule2_data], 'redirect_url': None, 'action': lb_con. L7_POLICY_ACTION_REDIRECT_TO_POOL, 'position': 1, 'admin_state_up': True, 'id': policy['l7policy']['id']} def_m_data = { 'id': def_m['member']['id'], 'address': "10.0.1.10", 'protocol_port': 80, 'weight': 1, 'admin_state_up': True, 'subnet': '255.255.255.255', 'mask': '255.255.255.255', 'gw': '255.255.255.255', 'admin_state_up': True} pol_m_data = { 'id': pol_m['member']['id'], 'address': "10.0.1.20", 'protocol_port': 80, 'weight': 1, 'admin_state_up': True, 'subnet': '255.255.255.255', 'mask': '255.255.255.255', 'gw': '255.255.255.255', 'admin_state_up': True} def_pool_data = { 'id': def_pool['pool']['id']} default_pool_data = { 'id': def_pool['pool']['id'], 'protocol': lb_con.PROTOCOL_HTTP, 'lb_algorithm': 'ROUND_ROBIN', 'admin_state_up': True, 'members': [def_m_data]} pol_pool_data = { 'id': pol_pool['pool']['id'], 'protocol': lb_con.PROTOCOL_HTTP, 'lb_algorithm': 'ROUND_ROBIN', 'admin_state_up': True, 'members': [pol_m_data]} wf_apply_one_leg_params = {'parameters': { 'listeners': [{ 'id': listener['listener']['id'], 'admin_state_up': True, 'protocol_port': 80, 'protocol': lb_con.PROTOCOL_HTTP, 'connection_limit': -1, 'default_pool': def_pool_data, 'l7_policies': [ l7_policy_data]}], 'pools': [default_pool_data, pol_pool_data], 'admin_state_up': True, 'pip_address': '10.0.0.2', 'vip_address': '10.0.0.2'}} calls = [ mock.call( 'POST', '/api/workflow/LB_' + lb_id + '/action/apply', wf_apply_one_leg_params, v2_driver.TEMPLATE_HEADER) ] self.driver_rest_call_mock.assert_has_calls( calls, any_order=True) def test_build_objects_graph_one_leg(self): with self.subnet(cidr='10.0.0.0/24') as vip_sub: with self.loadbalancer(subnet=vip_sub) as lb: lb_id = lb['loadbalancer']['id'] with self.listener(loadbalancer_id=lb_id) as listener: with self.pool( protocol='HTTP', listener_id=listener['listener']['id']) as pool: with contextlib.nested( self.member(pool_id=pool['pool']['id'], subnet=vip_sub, address='10.0.1.10'), self.member(pool_id=pool['pool']['id'], subnet=vip_sub, address='10.0.1.20') ) as (member1, member2): wf_srv_params = copy.deepcopy(WF_SRV_PARAMS) wf_params = copy.deepcopy(WF_CREATE_PARAMS) wf_srv_params['name'] = 'srv_' + ( vip_sub['subnet']['network_id']) wf_srv_params['tenantId'] = self._tenant_id wf_srv_params['primary']['network'][ 'portgroups'] = [vip_sub['subnet'][ 'network_id']] wf_params['parameters']['service_params'] = ( wf_srv_params) member1_data = { "id": member1['member']['id'], "address": "10.0.1.10", "protocol_port": 80, "weight": 1, "admin_state_up": True, "subnet": "255.255.255.255", "mask": "255.255.255.255", "gw": "255.255.255.255", 'admin_state_up': True} member2_data = { "id": member2['member']['id'], "address": "10.0.1.20", "protocol_port": 80, "weight": 1, "admin_state_up": True, "subnet": "255.255.255.255", "mask": "255.255.255.255", "gw": "255.255.255.255", "admin_state_up": True} def_pool_data = { "id": pool['pool']['id']} pool_data = { "id": pool['pool']['id'], "protocol": "HTTP", "lb_algorithm": "ROUND_ROBIN", "admin_state_up": True, "members": [ member1_data, member2_data]} wf_apply_one_leg_params = {'parameters': { 'listeners': [{ "id": listener['listener']['id'], "admin_state_up": True, "protocol_port": 80, "protocol": "HTTP", "connection_limit": -1, "default_pool": def_pool_data, "l7_policies": []}], "pools": [pool_data], "admin_state_up": True, "pip_address": "10.0.0.2", "vip_address": "10.0.0.2"}} calls = [ mock.call('GET', '/api/workflow/LB_' + lb_id, None, None), mock.call( 'POST', '/api/workflowTemplate/' + 'os_lb_v2?name=LB_' + lb_id, wf_params, v2_driver.TEMPLATE_HEADER), mock.call( 'POST', '/api/workflow/LB_' + lb_id + '/action/apply', wf_apply_one_leg_params, v2_driver.TEMPLATE_HEADER) ] self.driver_rest_call_mock.assert_has_calls( calls, any_order=True) def test_build_objects_graph_two_legs_full(self): with contextlib.nested( self.subnet(cidr='10.0.0.0/24'), self.subnet(cidr='20.0.0.0/24'), self.subnet(cidr='30.0.0.0/24') ) as (vip_sub, member_sub1, member_sub2): with self.loadbalancer(subnet=vip_sub) as lb: lb_id = lb['loadbalancer']['id'] with self.listener(loadbalancer_id=lb_id) as listener: with self.pool( protocol='HTTP', listener_id=listener['listener']['id'], session_persistence={ 'type': "APP_COOKIE", 'cookie_name': 'sessionId'}) as pool: with self.healthmonitor( type='HTTP', pool_id=pool['pool']['id']) as hm: with self.member( pool_id=pool['pool']['id'], subnet=member_sub1, address='20.0.1.10') as member: wf_params = copy.deepcopy(WF_CREATE_PARAMS) wf_srv_params = copy.deepcopy( WF_SRV_PARAMS) wf_srv_params['name'] = ( 'srv_' + vip_sub['subnet'][ 'network_id']) wf_srv_params['tenantId'] = self._tenant_id wf_srv_params['primary']['network'][ 'portgroups'] = [ vip_sub['subnet']['network_id'], member_sub1['subnet']['network_id']] wf_params['parameters'][ 'twoleg_enabled'] = True wf_params['parameters'][ 'service_params'] = (wf_srv_params) hm_data = { "admin_state_up": True, "id": hm['healthmonitor']['id'], "type": "HTTP", "delay": 1, "timeout": 1, "max_retries": 1, "admin_state_up": True, "url_path": "/", "http_method": "GET", "expected_codes": '200'} sp_data = { "type": "APP_COOKIE", "cookie_name": "sessionId"} m_data = { "id": member['member']['id'], "address": "20.0.1.10", "protocol_port": 80, "weight": 1, "admin_state_up": True, "subnet": "20.0.1.10", "mask": "255.255.255.255", "gw": "20.0.0.1", "admin_state_up": True} def_pool_data = { "id": pool['pool']['id'], "sessionpersistence": sp_data} pool_data = { "id": pool['pool']['id'], "protocol": "HTTP", "lb_algorithm": "ROUND_ROBIN", "admin_state_up": True, "healthmonitor": hm_data, "members": [m_data]} wf_apply_full_params = {'parameters': { 'listeners': [{ "id": listener['listener']['id'], "admin_state_up": True, "protocol_port": 80, "protocol": "HTTP", "connection_limit": -1, "admin_state_up": True, "default_pool": def_pool_data, "l7_policies": []}], "pools": [pool_data], "admin_state_up": True, "pip_address": "20.0.0.2", "vip_address": "10.0.0.2"}} calls = [ mock.call( 'GET', '/api/workflow/LB_' + lb_id, None, None), mock.call( 'POST', '/api/workflowTemplate/' + 'os_lb_v2?name=LB_' + lb_id, wf_params, v2_driver.TEMPLATE_HEADER), mock.call( 'POST', '/api/workflow/LB_' + lb_id + '/action/apply', wf_apply_full_params, v2_driver.TEMPLATE_HEADER), mock.call('GET', 'some_uri', None, None)] self.driver_rest_call_mock.\ assert_has_calls( calls, any_order=True) class TestLBaaSDriverDebugOptions(TestLBaaSDriverBase): def setUp(self): cfg.CONF.set_override('configure_l3', False, group='radwarev2_debug') cfg.CONF.set_override('configure_l4', False, group='radwarev2_debug') super(TestLBaaSDriverDebugOptions, self).setUp() templates_to_return = [{'name': self.driver.workflow_template_name}] for t in self.driver.child_workflow_template_names: templates_to_return.append({'name': t}) rest_call_function_mock.__dict__.update( {'RESPOND_WITH_ERROR': False, 'WORKFLOW_MISSING': True, 'WORKFLOW_TEMPLATE_MISSING': True, 'RESPOND_WITH_SERVER_DOWN': 200, 'WF_TEMPLATES_TO_RETURN': templates_to_return}) self.operation_completer_start_mock = mock.Mock( return_value=None) self.operation_completer_join_mock = mock.Mock( return_value=None) self.driver_rest_call_mock = mock.Mock( side_effect=rest_call_function_mock) self.flip_servers_mock = mock.Mock( return_value=None) self.recover_mock = mock.Mock( side_effect=_recover_function_mock) self.driver.completion_handler.start = ( self.operation_completer_start_mock) self.driver.completion_handler.join = ( self.operation_completer_join_mock) self.driver.rest_client.call = self.driver_rest_call_mock self.driver.rest_client._call = self.driver_rest_call_mock self.driver.completion_handler.rest_client.call = ( self.driver_rest_call_mock) self.driver.queue = QueueMock( self.driver.completion_handler.handle_operation_completion) def test_debug_options(self): with self.subnet(cidr='10.0.0.0/24') as s: with self.loadbalancer(subnet=s) as lb: lb_id = lb['loadbalancer']['id'] with self.listener(loadbalancer_id=lb_id) as l: with self.pool( protocol='HTTP', listener_id=l['listener']['id']) as p: with self.member( pool_id=p['pool']['id'], subnet=s, address='10.0.1.10'): wf_srv_params = copy.deepcopy(WF_SRV_PARAMS) wf_params = copy.deepcopy(WF_CREATE_PARAMS) wf_srv_params['name'] = 'srv_' + ( s['subnet']['network_id']) wf_srv_params['tenantId'] = self._tenant_id wf_srv_params['primary']['network'][ 'portgroups'] = [s['subnet'][ 'network_id']] wf_params['parameters']['service_params'] = ( wf_srv_params) wf_params['parameters']['configure_l3'] = False wf_params['parameters']['configure_l4'] = False calls = [ mock.call('GET', '/api/workflow/LB_' + lb_id, None, None), mock.call( 'POST', '/api/workflowTemplate/' + 'os_lb_v2?name=LB_' + lb_id, wf_params, v2_driver.TEMPLATE_HEADER) ] self.driver_rest_call_mock.assert_has_calls( calls, any_order=True) neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/drivers/common/0000775000567000056710000000000012701410110025340 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/drivers/common/test_agent_driver_base.py0000664000567000056710000007541112701407726032447 0ustar jenkinsjenkins00000000000000# Copyright 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import mock from neutron import context from neutron.db import servicetype_db as st_db from neutron import manager from neutron.plugins.common import constants from neutron_lbaas.db.loadbalancer import models from neutron_lbaas.drivers.common import agent_driver_base from neutron_lbaas.extensions import loadbalancerv2 from neutron_lbaas.tests import base from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancerv2 class TestLoadBalancerPluginBase(test_db_loadbalancerv2.LbaasPluginDbTestCase): def setUp(self): def reset_device_driver(): agent_driver_base.AgentDriverBase.device_driver = None self.addCleanup(reset_device_driver) self.mock_importer = mock.patch.object( agent_driver_base, 'importutils').start() # needed to reload provider configuration st_db.ServiceTypeManager._instance = None agent_driver_base.AgentDriverBase.device_driver = 'dummy' super(TestLoadBalancerPluginBase, self).setUp( lbaas_provider=('LOADBALANCERV2:lbaas:neutron_lbaas.drivers.' 'common.agent_driver_base.' 'AgentDriverBase:default')) # we need access to loaded plugins to modify models loaded_plugins = manager.NeutronManager().get_service_plugins() self.plugin_instance = loaded_plugins[constants.LOADBALANCERV2] class TestLoadBalancerAgentApi(base.BaseTestCase): def setUp(self): super(TestLoadBalancerAgentApi, self).setUp() self.api = agent_driver_base.LoadBalancerAgentApi('topic') def test_init(self): self.assertEqual('topic', self.api.client.target.topic) def _call_test_helper(self, method_name, method_args): with contextlib.nested( mock.patch.object(self.api.client, 'cast'), mock.patch.object(self.api.client, 'prepare'), ) as ( rpc_mock, prepare_mock ): prepare_mock.return_value = self.api.client getattr(self.api, method_name)(mock.sentinel.context, host='host', **method_args) prepare_args = {'server': 'host'} prepare_mock.assert_called_once_with(**prepare_args) if method_name == 'agent_updated': method_args = {'payload': method_args} rpc_mock.assert_called_once_with(mock.sentinel.context, method_name, **method_args) def test_agent_updated(self): self._call_test_helper('agent_updated', {'admin_state_up': 'test'}) def test_create_pool(self): self._call_test_helper('create_pool', {'pool': 'test'}) def test_update_pool(self): self._call_test_helper('update_pool', {'old_pool': 'test', 'pool': 'test'}) def test_delete_pool(self): self._call_test_helper('delete_pool', {'pool': 'test'}) def test_create_loadbalancer(self): self._call_test_helper('create_loadbalancer', {'loadbalancer': 'test', 'driver_name': 'dummy'}) def test_update_loadbalancer(self): self._call_test_helper('update_loadbalancer', { 'old_loadbalancer': 'test', 'loadbalancer': 'test'}) def test_delete_loadbalancer(self): self._call_test_helper('delete_loadbalancer', {'loadbalancer': 'test'}) def test_create_member(self): self._call_test_helper('create_member', {'member': 'test'}) def test_update_member(self): self._call_test_helper('update_member', {'old_member': 'test', 'member': 'test'}) def test_delete_member(self): self._call_test_helper('delete_member', {'member': 'test'}) def test_create_monitor(self): self._call_test_helper('create_healthmonitor', {'healthmonitor': 'test'}) def test_update_monitor(self): self._call_test_helper('update_healthmonitor', {'old_healthmonitor': 'test', 'healthmonitor': 'test'}) def test_delete_monitor(self): self._call_test_helper('delete_healthmonitor', {'healthmonitor': 'test'}) class TestLoadBalancerPluginNotificationWrapper(TestLoadBalancerPluginBase): def setUp(self): self.log = mock.patch.object(agent_driver_base, 'LOG') api_cls = mock.patch.object(agent_driver_base, 'LoadBalancerAgentApi').start() super(TestLoadBalancerPluginNotificationWrapper, self).setUp() self.mock_api = api_cls.return_value self.mock_get_driver = mock.patch.object(self.plugin_instance, '_get_driver') self.mock_get_driver.return_value = ( agent_driver_base.AgentDriverBase(self.plugin_instance)) def _update_status(self, model, status, id): ctx = context.get_admin_context() self.plugin_instance.db.update_status( ctx, model, id, provisioning_status=status ) def test_create_loadbalancer(self): with self.loadbalancer(no_delete=True) as loadbalancer: calls = self.mock_api.create_loadbalancer.call_args_list self.assertEqual(1, len(calls)) _, called_lb, _, device_driver = calls[0][0] self.assertEqual(loadbalancer['loadbalancer']['id'], called_lb.id) self.assertEqual('dummy', device_driver) self.assertEqual(constants.PENDING_CREATE, called_lb.provisioning_status) def test_update_loadbalancer(self): with self.loadbalancer(no_delete=True) as loadbalancer: lb_id = loadbalancer['loadbalancer']['id'] old_lb_name = loadbalancer['loadbalancer']['name'] ctx = context.get_admin_context() self.plugin_instance.db.update_loadbalancer_provisioning_status( ctx, loadbalancer['loadbalancer']['id']) new_lb_name = 'new_lb_name' loadbalancer['loadbalancer']['name'] = new_lb_name self._update_loadbalancer_api( lb_id, {'loadbalancer': {'name': new_lb_name}}) calls = self.mock_api.update_loadbalancer.call_args_list self.assertEqual(1, len(calls)) _, called_old_lb, called_new_lb, called_host = calls[0][0] self.assertEqual(lb_id, called_old_lb.id) self.assertEqual(lb_id, called_new_lb.id) self.assertEqual(old_lb_name, called_old_lb.name) self.assertEqual(new_lb_name, called_new_lb.name) self.assertEqual('host', called_host) self.assertEqual(constants.PENDING_UPDATE, called_new_lb.provisioning_status) def test_delete_loadbalancer(self): with self.loadbalancer(no_delete=True) as loadbalancer: lb_id = loadbalancer['loadbalancer']['id'] ctx = context.get_admin_context() self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) self.plugin_instance.delete_loadbalancer(ctx, lb_id) calls = self.mock_api.delete_loadbalancer.call_args_list self.assertEqual(1, len(calls)) _, called_lb, called_host = calls[0][0] self.assertEqual(lb_id, called_lb.id) self.assertEqual('host', called_host) self.assertEqual(constants.PENDING_DELETE, called_lb.provisioning_status) self.assertRaises(loadbalancerv2.EntityNotFound, self.plugin_instance.db.get_loadbalancer, ctx, lb_id) def test_create_listener(self): with self.loadbalancer(no_delete=True) as loadbalancer: lb_id = loadbalancer['loadbalancer']['id'] self._update_status(models.LoadBalancer, constants.ACTIVE, loadbalancer['loadbalancer']['id']) with self.listener(loadbalancer_id=lb_id, no_delete=True) as listener: listener_id = listener['listener']['id'] calls = self.mock_api.create_listener.call_args_list _, called_listener, called_host = calls[0][0] self.assertEqual(listener_id, called_listener.id) self.assertEqual('host', called_host) self.assertEqual(constants.PENDING_CREATE, called_listener.provisioning_status) ctx = context.get_admin_context() lb = self.plugin_instance.db.get_loadbalancer(ctx, lb_id) self.assertEqual(constants.PENDING_UPDATE, lb.provisioning_status) def test_update_listener(self): with self.loadbalancer(no_delete=True) as loadbalancer: lb_id = loadbalancer['loadbalancer']['id'] self._update_status(models.LoadBalancer, constants.ACTIVE, loadbalancer['loadbalancer']['id']) with self.listener(loadbalancer_id=lb_id, no_delete=True) as listener: listener_id = listener['listener']['id'] old_name = listener['listener']['name'] ctx = context.get_admin_context() self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) self.plugin_instance.db.get_listener(ctx, listener_id) new_name = 'new_listener_name' listener['listener']['name'] = new_name self.plugin_instance.update_listener( ctx, listener['listener']['id'], listener) self.plugin_instance.db.get_listener( ctx, listener['listener']['id']) calls = self.mock_api.update_listener.call_args_list (_, old_called_listener, new_called_listener, called_host) = calls[0][0] self.assertEqual(listener_id, new_called_listener.id) self.assertEqual(listener_id, old_called_listener.id) self.assertEqual(old_name, old_called_listener.name) self.assertEqual(new_name, new_called_listener.name) self.assertEqual(constants.PENDING_UPDATE, new_called_listener.provisioning_status) lb = self.plugin_instance.db.get_loadbalancer(ctx, lb_id) self.assertEqual(constants.PENDING_UPDATE, lb.provisioning_status) self.assertEqual('host', called_host) def test_delete_listener(self): with self.loadbalancer(no_delete=True) as loadbalancer: lb_id = loadbalancer['loadbalancer']['id'] self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) with self.listener(loadbalancer_id=lb_id, no_delete=True) as listener: listener_id = listener['listener']['id'] self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) ctx = context.get_admin_context() self.plugin_instance.delete_listener( ctx, listener['listener']['id']) calls = self.mock_api.delete_listener.call_args_list _, called_listener, called_host = calls[0][0] self.assertEqual(listener_id, called_listener.id) self.assertEqual('host', called_host) self.assertEqual(constants.PENDING_DELETE, called_listener.provisioning_status) ctx = context.get_admin_context() lb = self.plugin_instance.db.get_loadbalancer(ctx, lb_id) self.assertEqual(constants.ACTIVE, lb.provisioning_status) self.assertRaises( loadbalancerv2.EntityNotFound, self.plugin_instance.db.get_listener, ctx, listener_id) def test_create_pool(self): with self.loadbalancer(no_delete=True) as loadbalancer: lb_id = loadbalancer['loadbalancer']['id'] self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) with self.listener(loadbalancer_id=lb_id, no_delete=True) as listener: listener_id = listener['listener']['id'] self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) with self.pool(listener_id=listener_id, loadbalancer_id=lb_id, no_delete=True) as pool: pool_id = pool['pool']['id'] calls = self.mock_api.create_pool.call_args_list _, called_pool, called_host = calls[0][0] self.assertEqual(pool_id, called_pool.id) self.assertEqual('host', called_host) self.assertEqual(constants.PENDING_CREATE, called_pool.provisioning_status) ctx = context.get_admin_context() lb = self.plugin_instance.db.get_loadbalancer(ctx, lb_id) self.assertEqual(constants.PENDING_UPDATE, lb.provisioning_status) def test_update_pool(self): ctx = context.get_admin_context() with self.loadbalancer(no_delete=True) as loadbalancer: lb_id = loadbalancer['loadbalancer']['id'] self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) with self.listener(loadbalancer_id=lb_id, no_delete=True) as listener: listener_id = listener['listener']['id'] self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) with self.pool(loadbalancer_id=lb_id, listener_id=listener_id, no_delete=True) as pool: pool_id = pool['pool']['id'] old_name = pool['pool']['name'] self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) new_name = 'new_name' pool['pool']['name'] = new_name self.plugin_instance.update_pool(ctx, pool_id, pool) calls = self.mock_api.update_pool.call_args_list (_, old_called_pool, new_called_pool, called_host) = calls[0][0] self.assertEqual(pool_id, new_called_pool.id) self.assertEqual(pool_id, old_called_pool.id) self.assertEqual(old_name, old_called_pool.name) self.assertEqual(new_name, new_called_pool.name) self.assertEqual(constants.PENDING_UPDATE, new_called_pool.provisioning_status) lb = self.plugin_instance.db.get_loadbalancer(ctx, lb_id) self.assertEqual(constants.PENDING_UPDATE, lb.provisioning_status) self.assertEqual('host', called_host) def test_delete_pool(self): with self.loadbalancer(no_delete=True) as loadbalancer: lb_id = loadbalancer['loadbalancer']['id'] self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) with self.listener(loadbalancer_id=lb_id, no_delete=True) as listener: listener_id = listener['listener']['id'] self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) with self.pool(listener_id=listener_id, loadbalancer_id=lb_id, no_delete=True) as pool: pool_id = pool['pool']['id'] self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) ctx = context.get_admin_context() self.plugin_instance.delete_pool(ctx, pool_id) calls = self.mock_api.delete_pool.call_args_list _, called_pool, called_host = calls[0][0] self.assertEqual(pool_id, called_pool.id) self.assertEqual('host', called_host) self.assertEqual(constants.PENDING_DELETE, called_pool.provisioning_status) lb = self.plugin_instance.db.get_loadbalancer(ctx, lb_id) self.assertEqual(constants.ACTIVE, lb.provisioning_status) self.assertRaises( loadbalancerv2.EntityNotFound, self.plugin_instance.db.get_pool, ctx, pool_id) def test_create_member(self): with self.loadbalancer(no_delete=True) as loadbalancer: lb_id = loadbalancer['loadbalancer']['id'] self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) with self.listener(loadbalancer_id=lb_id, no_delete=True) as listener: listener_id = listener['listener']['id'] self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) with self.pool(listener_id=listener_id, loadbalancer_id=lb_id, no_delete=True) as pool: pool_id = pool['pool']['id'] self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) with self.subnet(cidr='11.0.0.0/24') as subnet: with self.member(pool_id=pool_id, subnet=subnet, no_delete=True) as member: member_id = member['member']['id'] calls = self.mock_api.create_member.call_args_list _, called_member, called_host = calls[0][0] self.assertEqual(member_id, called_member.id) self.assertEqual('host', called_host) self.assertEqual(constants.PENDING_CREATE, called_member.provisioning_status) ctx = context.get_admin_context() lb = self.plugin_instance.db.get_loadbalancer( ctx, lb_id) self.assertEqual(constants.PENDING_UPDATE, lb.provisioning_status) def test_update_member(self): with self.loadbalancer(no_delete=True) as loadbalancer: lb_id = loadbalancer['loadbalancer']['id'] self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) with self.listener(loadbalancer_id=lb_id, no_delete=True) as listener: listener_id = listener['listener']['id'] self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) with self.pool(listener_id=listener_id, loadbalancer_id=lb_id, no_delete=True) as pool: pool_id = pool['pool']['id'] self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) with self.subnet(cidr='11.0.0.0/24') as subnet: with self.member(pool_id=pool_id, subnet=subnet, no_delete=True) as member: member_id = member['member']['id'] self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) old_weight = member['member']['weight'] new_weight = 2 member['member']['weight'] = new_weight ctx = context.get_admin_context() self.plugin_instance.update_pool_member( ctx, member_id, pool_id, member) calls = self.mock_api.update_member.call_args_list (_, old_called_member, new_called_member, called_host) = calls[0][0] self.assertEqual(member_id, new_called_member.id) self.assertEqual(member_id, old_called_member.id) self.assertEqual(old_weight, old_called_member.weight) self.assertEqual(new_weight, new_called_member.weight) self.assertEqual( constants.PENDING_UPDATE, new_called_member.provisioning_status) lb = self.plugin_instance.db.get_loadbalancer( ctx, lb_id) self.assertEqual(constants.PENDING_UPDATE, lb.provisioning_status) self.assertEqual('host', called_host) def test_delete_member(self): with self.loadbalancer(no_delete=True) as loadbalancer: lb_id = loadbalancer['loadbalancer']['id'] self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) with self.listener(loadbalancer_id=lb_id, no_delete=True) as listener: listener_id = listener['listener']['id'] self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) with self.pool(listener_id=listener_id, loadbalancer_id=lb_id, no_delete=True) as pool: pool_id = pool['pool']['id'] self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) with self.subnet(cidr='11.0.0.0/24') as subnet: with self.member(pool_id=pool_id, subnet=subnet, no_delete=True) as member: member_id = member['member']['id'] self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) ctx = context.get_admin_context() self.plugin_instance.delete_pool_member( ctx, member_id, pool_id) calls = self.mock_api.delete_member.call_args_list _, called_member, called_host = calls[0][0] self.assertEqual(member_id, called_member.id) self.assertEqual('host', called_host) self.assertEqual(constants.PENDING_DELETE, called_member.provisioning_status) lb = self.plugin_instance.db.get_loadbalancer( ctx, lb_id) self.assertEqual(constants.ACTIVE, lb.provisioning_status) self.assertRaises( loadbalancerv2.EntityNotFound, self.plugin_instance.db.get_pool_member, ctx, member_id) def test_create_health_monitor(self): with self.loadbalancer(no_delete=True) as loadbalancer: lb_id = loadbalancer['loadbalancer']['id'] self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) with self.listener(loadbalancer_id=lb_id, no_delete=True) as listener: listener_id = listener['listener']['id'] self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) with self.pool(listener_id=listener_id, loadbalancer_id=lb_id, no_delete=True) as pool: pool_id = pool['pool']['id'] self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) with self.healthmonitor(pool_id=pool_id, no_delete=True) as monitor: hm_id = monitor['healthmonitor']['id'] calls = ( self.mock_api.create_healthmonitor.call_args_list) _, called_hm, called_host = calls[0][0] self.assertEqual(hm_id, called_hm.id) self.assertEqual('host', called_host) self.assertEqual(constants.PENDING_CREATE, called_hm.provisioning_status) ctx = context.get_admin_context() lb = self.plugin_instance.db.get_loadbalancer( ctx, lb_id) self.assertEqual(constants.PENDING_UPDATE, lb.provisioning_status) def test_update_health_monitor(self): with self.loadbalancer(no_delete=True) as loadbalancer: lb_id = loadbalancer['loadbalancer']['id'] self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) with self.listener(loadbalancer_id=lb_id, no_delete=True) as listener: listener_id = listener['listener']['id'] self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) with self.pool(listener_id=listener_id, loadbalancer_id=lb_id, no_delete=True) as pool: pool_id = pool['pool']['id'] self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) with self.healthmonitor(pool_id=pool_id, no_delete=True) as monitor: hm_id = monitor['healthmonitor']['id'] self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) old_to = monitor['healthmonitor']['timeout'] new_to = 2 monitor['healthmonitor']['timeout'] = new_to ctx = context.get_admin_context() self.plugin_instance.update_healthmonitor(ctx, hm_id, monitor) calls = ( self.mock_api.update_healthmonitor.call_args_list) (_, old_called_hm, new_called_hm, called_host) = calls[0][0] self.assertEqual(hm_id, new_called_hm.id) self.assertEqual(hm_id, old_called_hm.id) self.assertEqual(old_to, old_called_hm.timeout) self.assertEqual(new_to, new_called_hm.timeout) self.assertEqual( constants.PENDING_UPDATE, new_called_hm.provisioning_status) lb = self.plugin_instance.db.get_loadbalancer( ctx, lb_id) self.assertEqual(constants.PENDING_UPDATE, lb.provisioning_status) self.assertEqual('host', called_host) def test_delete_health_monitor(self): with self.loadbalancer(no_delete=True) as loadbalancer: lb_id = loadbalancer['loadbalancer']['id'] self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) with self.listener(loadbalancer_id=lb_id, no_delete=True) as listener: listener_id = listener['listener']['id'] self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) with self.pool(listener_id=listener_id, loadbalancer_id=lb_id, no_delete=True) as pool: pool_id = pool['pool']['id'] self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) with self.healthmonitor(pool_id=pool_id, no_delete=True) as monitor: hm_id = monitor['healthmonitor']['id'] self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id) ctx = context.get_admin_context() self.plugin_instance.delete_healthmonitor(ctx, hm_id) calls = ( self.mock_api.delete_healthmonitor.call_args_list) _, called_hm, called_host = calls[0][0] self.assertEqual(hm_id, called_hm.id) self.assertEqual('host', called_host) self.assertEqual(constants.PENDING_DELETE, called_hm.provisioning_status) lb = self.plugin_instance.db.get_loadbalancer( ctx, lb_id) self.assertEqual(constants.ACTIVE, lb.provisioning_status) self.assertRaises( loadbalancerv2.EntityNotFound, self.plugin_instance.db.get_healthmonitor, ctx, hm_id) neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/drivers/common/test_agent_callbacks.py0000664000567000056710000003143312701407726032075 0ustar jenkinsjenkins00000000000000# Copyright 2013 New Dream Network, LLC (DreamHost) # Copyright 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron import context from neutron.extensions import portbindings from neutron.plugins.common import constants from neutron.tests.unit import testlib_api from oslo_utils import uuidutils import six from six import moves from neutron_lbaas.db.loadbalancer import loadbalancer_dbv2 as ldb from neutron_lbaas.db.loadbalancer import models as db_models from neutron_lbaas.drivers.common import agent_callbacks from neutron_lbaas.extensions import loadbalancerv2 from neutron_lbaas.services.loadbalancer import constants as lb_const from neutron_lbaas.services.loadbalancer import data_models from neutron_lbaas.tests.unit.drivers.common import test_agent_driver_base class TestLoadBalancerCallbacks( test_agent_driver_base.TestLoadBalancerPluginBase): def setUp(self): super(TestLoadBalancerCallbacks, self).setUp() self.callbacks = agent_callbacks.LoadBalancerCallbacks( self.plugin_instance ) get_lbaas_agents_patcher = mock.patch( 'neutron_lbaas.agent_scheduler.LbaasAgentSchedulerDbMixin.' 'get_lbaas_agents') get_lbaas_agents_patcher.start() def test_get_ready_devices(self): with self.loadbalancer() as loadbalancer: lb_id = loadbalancer['loadbalancer']['id'] self.plugin_instance.db.update_loadbalancer_provisioning_status( context.get_admin_context(), loadbalancer['loadbalancer']['id']) with mock.patch( 'neutron_lbaas.agent_scheduler.LbaasAgentSchedulerDbMixin.' 'list_loadbalancers_on_lbaas_agent') as mock_agent_lbs: mock_agent_lbs.return_value = [ data_models.LoadBalancer(id=lb_id)] ready = self.callbacks.get_ready_devices( context.get_admin_context(), ) self.assertEqual([lb_id], ready) def test_get_ready_devices_multiple_listeners_and_loadbalancers(self): ctx = context.get_admin_context() # add 3 load balancers and 2 listeners directly to DB # to create 2 "ready" devices and one load balancer without listener loadbalancers = [] for i in moves.range(3): loadbalancers.append(ldb.models.LoadBalancer( id=uuidutils.generate_uuid(), vip_subnet_id=self._subnet_id, provisioning_status=constants.ACTIVE, admin_state_up=True, operating_status=constants.ACTIVE)) ctx.session.add(loadbalancers[i]) listener0 = ldb.models.Listener( id=uuidutils.generate_uuid(), protocol="HTTP", loadbalancer_id=loadbalancers[0].id, provisioning_status=constants.ACTIVE, admin_state_up=True, connection_limit=3, protocol_port=80, operating_status=constants.ACTIVE) ctx.session.add(listener0) loadbalancers[0].listener_id = listener0.id listener1 = ldb.models.Listener( id=uuidutils.generate_uuid(), protocol="HTTP", loadbalancer_id=loadbalancers[1].id, provisioning_status=constants.ACTIVE, admin_state_up=True, connection_limit=3, protocol_port=80, operating_status=constants.ACTIVE) ctx.session.add(listener1) loadbalancers[1].listener_id = listener1.id ctx.session.flush() self.assertEqual(3, ctx.session.query(ldb.models.LoadBalancer).count()) self.assertEqual(2, ctx.session.query(ldb.models.Listener).count()) with mock.patch( 'neutron_lbaas.agent_scheduler.LbaasAgentSchedulerDbMixin' '.list_loadbalancers_on_lbaas_agent') as mock_agent_lbs: mock_agent_lbs.return_value = loadbalancers ready = self.callbacks.get_ready_devices(ctx) self.assertEqual(3, len(ready)) self.assertIn(loadbalancers[0].id, ready) self.assertIn(loadbalancers[1].id, ready) self.assertIn(loadbalancers[2].id, ready) # cleanup ctx.session.query(ldb.models.Listener).delete() ctx.session.query(ldb.models.LoadBalancer).delete() def test_get_ready_devices_inactive_loadbalancer(self): with self.loadbalancer() as loadbalancer: lb_id = loadbalancer['loadbalancer']['id'] self.plugin_instance.db.update_loadbalancer_provisioning_status( context.get_admin_context(), loadbalancer['loadbalancer']['id']) # set the loadbalancer inactive need to use plugin directly since # status is not tenant mutable self.plugin_instance.db.update_loadbalancer( context.get_admin_context(), loadbalancer['loadbalancer']['id'], {'loadbalancer': {'provisioning_status': constants.INACTIVE}} ) with mock.patch( 'neutron_lbaas.agent_scheduler.LbaasAgentSchedulerDbMixin.' 'list_loadbalancers_on_lbaas_agent') as mock_agent_lbs: mock_agent_lbs.return_value = [ data_models.LoadBalancer(id=lb_id)] ready = self.callbacks.get_ready_devices( context.get_admin_context(), ) self.assertEqual([loadbalancer['loadbalancer']['id']], ready) def test_get_loadbalancer_active(self): with self.loadbalancer() as loadbalancer: ctx = context.get_admin_context() # activate objects self.plugin_instance.db.update_status( ctx, db_models.LoadBalancer, loadbalancer['loadbalancer']['id'], 'ACTIVE') lb = self.plugin_instance.db.get_loadbalancer( ctx, loadbalancer['loadbalancer']['id'] ) load_balancer = self.callbacks.get_loadbalancer( ctx, loadbalancer['loadbalancer']['id'] ) expected_lb = lb.to_dict() expected_lb['provider']['device_driver'] = 'dummy' subnet = self.plugin_instance.db._core_plugin.get_subnet( ctx, expected_lb['vip_subnet_id']) subnet = data_models.Subnet.from_dict(subnet).to_dict() expected_lb['vip_port']['fixed_ips'][0]['subnet'] = subnet del expected_lb['stats'] self.assertEqual(expected_lb, load_balancer) def _update_port_test_helper(self, expected, func, **kwargs): core = self.plugin_instance.db._core_plugin with self.loadbalancer() as loadbalancer: lb_id = loadbalancer['loadbalancer']['id'] if 'device_id' not in expected: expected['device_id'] = lb_id self.plugin_instance.db.update_loadbalancer_provisioning_status( context.get_admin_context(), loadbalancer['loadbalancer']['id']) ctx = context.get_admin_context() db_lb = self.plugin_instance.db.get_loadbalancer(ctx, lb_id) func(ctx, port_id=db_lb.vip_port_id, **kwargs) db_port = core.get_port(ctx, db_lb.vip_port_id) for k, v in six.iteritems(expected): self.assertEqual(v, db_port[k]) def test_plug_vip_port(self): exp = { 'device_owner': 'neutron:' + constants.LOADBALANCERV2, 'admin_state_up': True } self._update_port_test_helper( exp, self.callbacks.plug_vip_port, host='host' ) def test_plug_vip_port_mock_with_host(self): exp = { 'device_owner': 'neutron:' + constants.LOADBALANCERV2, 'admin_state_up': True, portbindings.HOST_ID: 'host' } with mock.patch.object( self.plugin.db._core_plugin, 'update_port') as mock_update_port: with self.loadbalancer() as loadbalancer: lb_id = loadbalancer['loadbalancer']['id'] ctx = context.get_admin_context() self.callbacks.update_status(ctx, 'loadbalancer', lb_id, constants.ACTIVE) (self.plugin_instance.db .update_loadbalancer_provisioning_status(ctx, lb_id)) db_lb = self.plugin_instance.db.get_loadbalancer(ctx, lb_id) self.callbacks.plug_vip_port(ctx, port_id=db_lb.vip_port_id, host='host') mock_update_port.assert_called_once_with( ctx, db_lb.vip_port_id, {'port': testlib_api.SubDictMatch(exp)}) def test_unplug_vip_port(self): exp = { 'device_owner': '', 'device_id': '', 'admin_state_up': False } self._update_port_test_helper( exp, self.callbacks.unplug_vip_port, host='host' ) def test_loadbalancer_deployed(self): with self.loadbalancer() as loadbalancer: ctx = context.get_admin_context() l = self.plugin_instance.db.get_loadbalancer( ctx, loadbalancer['loadbalancer']['id']) self.assertEqual('PENDING_CREATE', l.provisioning_status) self.callbacks.loadbalancer_deployed( ctx, loadbalancer['loadbalancer']['id']) l = self.plugin_instance.db.get_loadbalancer( ctx, loadbalancer['loadbalancer']['id']) self.assertEqual('ACTIVE', l.provisioning_status) def test_listener_deployed(self): with self.loadbalancer(no_delete=True) as loadbalancer: self.plugin_instance.db.update_loadbalancer_provisioning_status( context.get_admin_context(), loadbalancer['loadbalancer']['id']) with self.listener( loadbalancer_id=loadbalancer[ 'loadbalancer']['id']) as listener: ctx = context.get_admin_context() l = self.plugin_instance.db.get_loadbalancer( ctx, loadbalancer['loadbalancer']['id']) self.assertEqual('PENDING_UPDATE', l.provisioning_status) ll = self.plugin_instance.db.get_listener( ctx, listener['listener']['id']) self.assertEqual('PENDING_CREATE', ll.provisioning_status) self.callbacks.loadbalancer_deployed( ctx, loadbalancer['loadbalancer']['id']) l = self.plugin_instance.db.get_loadbalancer( ctx, loadbalancer['loadbalancer']['id']) self.assertEqual('ACTIVE', l.provisioning_status) ll = self.plugin_instance.db.get_listener( ctx, listener['listener']['id']) self.assertEqual('ACTIVE', ll.provisioning_status) def test_update_status_loadbalancer(self): with self.loadbalancer() as loadbalancer: loadbalancer_id = loadbalancer['loadbalancer']['id'] ctx = context.get_admin_context() l = self.plugin_instance.db.get_loadbalancer(ctx, loadbalancer_id) self.assertEqual('PENDING_CREATE', l.provisioning_status) self.callbacks.update_status(ctx, 'loadbalancer', loadbalancer_id, provisioning_status=constants.ACTIVE, operating_status=lb_const.ONLINE) l = self.plugin_instance.db.get_loadbalancer(ctx, loadbalancer_id) self.assertEqual(constants.ACTIVE, l.provisioning_status) self.assertEqual(lb_const.ONLINE, l.operating_status) def test_update_status_loadbalancer_deleted_already(self): with mock.patch.object(agent_callbacks, 'LOG') as mock_log: loadbalancer_id = 'deleted_lb' ctx = context.get_admin_context() self.assertRaises(loadbalancerv2.EntityNotFound, self.plugin_instance.get_loadbalancer, ctx, loadbalancer_id) self.callbacks.update_status(ctx, 'loadbalancer', loadbalancer_id, provisioning_status=constants.ACTIVE) self.assertTrue(mock_log.warning.called) neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/drivers/common/__init__.py0000664000567000056710000000000012701407726027462 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/drivers/brocade/0000775000567000056710000000000012701410110025447 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/drivers/brocade/test_driver_v2.py0000664000567000056710000001254112701407726031010 0ustar jenkinsjenkins00000000000000# Copyright 2014 Brocade Communications Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Pattabi Ayyasami (pattabi), Brocade Communication Systems, Inc. # import sys import mock from neutron import context with mock.patch.dict(sys.modules, {'brocade_neutron_lbaas': mock.Mock()}): from neutron_lbaas.drivers.brocade import driver_v2 as driver from neutron_lbaas.services.loadbalancer import data_models from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancer class FakeModel(object): def __init__(self, id): self.id = id def attached_to_loadbalancer(self): return True class ManagerTest(object): def __init__(self, parent, manager, model): self.parent = parent self.manager = manager self.model = model self.create(model) self.update(model, model) self.delete(model) def create(self, model): self.manager.create(self.parent.context, model) def update(self, old_model, model): self.manager.update(self.parent.context, old_model, model) def delete(self, model): self.manager.delete(self.parent.context, model) class LoadBalancerManagerTest(ManagerTest): def __init__(self, parent, manager, model): super(LoadBalancerManagerTest, self).__init__(parent, manager, model) self.refresh(model) self.stats(model) def refresh(self, model): self.manager.refresh(self.parent.context, model) self.parent.driver.device_driver.refresh \ .assert_called_once_with(model) def stats(self, model): self.manager.stats(self.parent.context, model) self.parent.driver.device_driver.stats.assert_called_once_with(model) class TestBrocadeLoadBalancerDriver( test_db_loadbalancer.LoadBalancerPluginDbTestCase): def _create_fake_models(self): id = 'name-001' lb = data_models.LoadBalancer(id=id) listener = data_models.Listener(id=id, loadbalancer=lb) pool = data_models.Pool(id=id, loadbalancer=lb) member = data_models.Member(id=id, pool=pool) hm = data_models.HealthMonitor(id=id, pool=pool) lb.listeners = [listener] lb.pools = [pool] listener.default_pool = pool pool.members = [member] pool.healthmonitor = hm return lb def setUp(self): super(TestBrocadeLoadBalancerDriver, self).setUp() self.context = context.get_admin_context() self.plugin = mock.Mock() self.driver = driver.BrocadeLoadBalancerDriver(self.plugin) self.lb = self._create_fake_models() def test_load_balancer_ops(self): LoadBalancerManagerTest(self, self.driver.load_balancer, self.lb) self.driver.device_driver.create_loadbalancer \ .assert_called_once_with(self.lb) self.driver.device_driver.update_loadbalancer \ .assert_called_once_with(self.lb, self.lb) self.driver.device_driver.delete_loadbalancer \ .assert_called_once_with(self.lb) def test_listener_ops(self): ManagerTest(self, self.driver.listener, self.lb.listeners[0]) self.driver.device_driver.create_listener \ .assert_called_once_with(self.lb.listeners[0]) self.driver.device_driver.update_listener \ .assert_called_once_with(self.lb.listeners[0], self.lb.listeners[0]) self.driver.device_driver.delete_listener \ .assert_called_once_with(self.lb.listeners[0]) def test_pool_ops(self): pool_fake_model = self.lb.listeners[0].default_pool ManagerTest(self, self.driver.pool, pool_fake_model) self.driver.device_driver.update_pool \ .assert_called_once_with(pool_fake_model, pool_fake_model) self.driver.device_driver.delete_pool \ .assert_called_once_with(pool_fake_model) def test_member_ops(self): member_fake_model = self.lb.listeners[0].default_pool.members[0] ManagerTest(self, self.driver.member, member_fake_model) self.driver.device_driver.create_member \ .assert_called_once_with(member_fake_model) self.driver.device_driver.update_member \ .assert_called_once_with(member_fake_model, member_fake_model) self.driver.device_driver.delete_member \ .assert_called_once_with(member_fake_model) def test_health_monitor_ops(self): hm_fake_model = self.lb.listeners[0].default_pool.healthmonitor ManagerTest(self, self.driver.health_monitor, hm_fake_model) self.driver.device_driver.update_healthmonitor \ .assert_called_once_with(hm_fake_model, hm_fake_model) self.driver.device_driver.delete_healthmonitor \ .assert_called_once_with(hm_fake_model) neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/drivers/brocade/__init__.py0000664000567000056710000000000012701407726027571 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/drivers/a10networks/0000775000567000056710000000000012701410110026226 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/drivers/a10networks/test_driver_v2.py0000664000567000056710000000652412701407726031573 0ustar jenkinsjenkins00000000000000# Copyright 2015, Doug Wiegley (dougwig), A10 Networks # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import mock from neutron import context from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancerv2 with mock.patch.dict(sys.modules, {'a10_neutron_lbaas': mock.Mock()}): from neutron_lbaas.drivers.a10networks import driver_v2 class FakeModel(object): def __init__(self, id): self.id = id self.address = '1.1.1.1' self.tenant_id = "tennant-was-a-great-doctor" class ManagerTest(object): def __init__(self, parent, manager, model, mocked_root): self.parent = parent self.context = parent.context self.driver = parent.driver self.manager = manager self.model = model self.mocked_root = mocked_root self.create(model) self.update(model, model) self.delete(model) def create(self, model): self.manager.create(self.context, model) self.mocked_root.create.assert_called_with(self.context, model) def update(self, old_model, model): self.manager.update(self.context, old_model, model) self.mocked_root.update.assert_called_with(self.context, old_model, model) def delete(self, model): self.manager.delete(self.context, model) self.mocked_root.delete.assert_called_with(self.context, model) def refresh(self): self.manager.refresh(self.context, self.model) self.mocked_root.refresh.assert_called_with(self.context, self.model) def stats(self): self.manager.stats(self.context, self.model) self.mocked_root.stats.assert_called_with(self.context, self.model) class TestA10ThunderDriver(test_db_loadbalancerv2.LbaasPluginDbTestCase): def setUp(self): super(TestA10ThunderDriver, self).setUp() self.context = context.get_admin_context() self.plugin = mock.Mock() self.driver = driver_v2.ThunderDriver(self.plugin) self.driver.a10 = mock.Mock() def test_load_balancer_ops(self): m = ManagerTest(self, self.driver.load_balancer, FakeModel("loadbalancer-a10"), self.driver.a10.lb) m.refresh() m.stats() def test_listener_ops(self): ManagerTest(self, self.driver.listener, FakeModel("listener-a10"), self.driver.a10.listener) def test_pool_ops(self): ManagerTest(self, self.driver.pool, FakeModel("pool-10"), self.driver.a10.pool) def test_member_ops(self): ManagerTest(self, self.driver.member, FakeModel("member-a10"), self.driver.a10.member) def test_health_monitor_ops(self): ManagerTest(self, self.driver.health_monitor, FakeModel("hm-a10"), self.driver.a10.hm) neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/drivers/a10networks/__init__.py0000664000567000056710000000000012701407726030350 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/drivers/octavia/0000775000567000056710000000000012701410110025476 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/drivers/octavia/__init__.py0000664000567000056710000000000012701407726027620 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/drivers/octavia/test_octavia_messaging_consumer.py0000664000567000056710000001746612701407726034546 0ustar jenkinsjenkins00000000000000# Copyright 2016 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from neutron_lbaas.common import exceptions from neutron_lbaas.db.loadbalancer import models import neutron_lbaas.drivers.octavia.driver as odriver from neutron_lbaas.drivers.octavia.driver import octavia_messaging_consumer from neutron_lbaas.services.loadbalancer import constants from neutron_lbaas.tests.unit.drivers.octavia import test_octavia_driver InfoContainer = octavia_messaging_consumer.InfoContainer class TestOctaviaMessagingConsumer(test_octavia_driver.BaseOctaviaDriverTest): def setUp(self): super(test_octavia_driver.BaseOctaviaDriverTest, self).setUp() self.plugin = mock.Mock() self.driver = odriver.OctaviaDriver(self.plugin) def assert_handle_streamed_event_called(self, model_class, id_param, payload): call_args_list = self.driver.plugin.db.update_status.call_args_list[0] self.assertEqual(len(call_args_list), 2) self.assertEqual(len(call_args_list[0]), 3) self.assertEqual(model_class, call_args_list[0][1]) self.assertEqual(call_args_list[0][2], id_param) self.assertEqual(call_args_list[1], payload) def test_info_container_constructor(self): ID = 'test_id' PAYLOAD = 'test_payload' TYPE = 'test_type' cnt = InfoContainer(TYPE, ID, PAYLOAD) self.assertEqual(cnt.info_type, TYPE) self.assertEqual(cnt.info_id, ID) self.assertEqual(cnt.info_payload, PAYLOAD) self.assertEqual(cnt.to_dict(), {'info_type': TYPE, 'info_id': ID, 'info_payload': PAYLOAD}) def test_info_container_from_dict(self): ID = 'test_id' PAYLOAD = 'test_payload' TYPE = 'test_type' cnt = InfoContainer.from_dict({'info_type': TYPE, 'info_id': ID, 'info_payload': PAYLOAD}) self.assertEqual(cnt.info_type, TYPE) self.assertEqual(cnt.info_id, ID) self.assertEqual(cnt.info_payload, PAYLOAD) def test_set_consumer_topic(self): TOPIC = 'neutron_lbaas_event' self.addCleanup(cfg.CONF.clear_override, 'event_stream_topic', group='oslo_messaging') cfg.CONF.set_override('event_stream_topic', TOPIC, group='oslo_messaging') consumer = octavia_messaging_consumer.OctaviaConsumer(self.driver) self.assertIsNotNone(consumer.transport) self.assertEqual(TOPIC, consumer.target.topic) self.assertEqual(cfg.CONF.host, consumer.target.server) @mock.patch.object(octavia_messaging_consumer.messaging, 'get_rpc_server') def test_consumer_start(self, mock_get_rpc_server): mock_server = mock.Mock() mock_get_rpc_server.return_value = mock_server TOPIC = 'neutron_lbaas_event' self.addCleanup(cfg.CONF.clear_override, 'event_stream_topic', group='oslo_messaging') cfg.CONF.set_override('event_stream_topic', TOPIC, group='oslo_messaging') consumer = octavia_messaging_consumer.OctaviaConsumer(self.driver) consumer.start() mock_get_rpc_server.assert_called_once_with( consumer.transport, consumer.target, consumer.endpoints, executor='eventlet' ) mock_server.start.assert_called_once_with() @mock.patch.object(octavia_messaging_consumer.messaging, 'get_rpc_server') def test_consumer_stop(self, mock_get_rpc_server): mock_server = mock.Mock() mock_get_rpc_server.return_value = mock_server consumer = octavia_messaging_consumer.OctaviaConsumer(self.driver) consumer.start() consumer.stop() mock_server.stop.assert_called_once_with() mock_server.wait.assert_not_called() @mock.patch.object(octavia_messaging_consumer.messaging, 'get_rpc_server') def test_consumer_graceful_stop(self, mock_get_rpc_server): mock_server = mock.Mock() mock_get_rpc_server.return_value = mock_server consumer = octavia_messaging_consumer.OctaviaConsumer(self.driver) consumer.start() consumer.stop(graceful=True) mock_server.stop.assert_called_once_with() mock_server.wait.assert_called_once_with() @mock.patch.object(octavia_messaging_consumer.messaging, 'get_rpc_server') def test_consumer_reset(self, mock_get_rpc_server): mock_server = mock.Mock() mock_get_rpc_server.return_value = mock_server consumer = octavia_messaging_consumer.OctaviaConsumer(self.driver) consumer.start() consumer.reset() mock_server.reset.assert_called_once_with() def set_db_mocks(self): TOPIC = 'neutron_lbaas_event' self.addCleanup(cfg.CONF.clear_override, 'event_stream_topic', group='oslo_messaging') cfg.CONF.set_override('event_stream_topic', TOPIC, group='oslo_messaging') self.payload = {'operating_status': 'ONLINE'} self.consumer = octavia_messaging_consumer.OctaviaConsumer( self.driver) def test_updatedb_with_raises_exception_with_bad_model_name(self): self.set_db_mocks() cnt = InfoContainer('listener_statsX', 'id', self.payload).to_dict() self.assertRaises(exceptions.ModelMapException, self.consumer.endpoints[0].update_info, {}, cnt) def test_updatedb_ignores_listener_stats(self): self.set_db_mocks() cnt = InfoContainer('listener_stats', 'id', self.payload).to_dict() self.consumer.endpoints[0].update_info({}, cnt) call_len = len(self.driver.plugin.db.update_status.call_args_list) self.assertEqual(call_len, 0) # See didn't do anything def test_updatedb_loadbalancer(self): self.set_db_mocks() cnt = InfoContainer(constants.LOADBALANCER_EVENT, 'lb_id', self.payload).to_dict() self.consumer.endpoints[0].update_info({}, cnt) self.assert_handle_streamed_event_called(models.LoadBalancer, 'lb_id', self.payload) def test_updatedb_listener(self): self.set_db_mocks() cnt = InfoContainer(constants.LISTENER_EVENT, 'listener_id', self.payload).to_dict() self.consumer.endpoints[0].update_info({}, cnt) self.assert_handle_streamed_event_called(models.Listener, 'listener_id', self.payload) def test_updatedb_pool(self): self.set_db_mocks() cnt = InfoContainer(constants.POOL_EVENT, 'pool_id', self.payload).to_dict() self.consumer.endpoints[0].update_info({}, cnt) self.assert_handle_streamed_event_called(models.PoolV2, 'pool_id', self.payload) def test_updatedb_member(self): self.set_db_mocks() cnt = InfoContainer(constants.MEMBER_EVENT, 'pool_id', self.payload).to_dict() self.consumer.endpoints[0].update_info({}, cnt) self.assert_handle_streamed_event_called(models.MemberV2, 'pool_id', self.payload) neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/drivers/octavia/test_octavia_driver.py0000664000567000056710000004253712701407726032146 0ustar jenkinsjenkins00000000000000# Copyright 2015, Banashankar Veerad, Copyright IBM Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from oslo_config import cfg from neutron import context from neutron_lbaas.drivers.octavia import driver from neutron_lbaas.services.loadbalancer import constants from neutron_lbaas.services.loadbalancer import data_models from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancerv2 class ManagerTest(object): def __init__(self, parent, manager, mocked_req): self.parent = parent self.context = parent.context self.driver = parent.driver self.manager = manager self.mocked_req = mocked_req def create(self, model, url, args): self.manager.create(self.context, model) self.mocked_req.post.assert_called_with(url, args) def update(self, old_model, model, url, args): self.manager.update(self.context, old_model, model) self.mocked_req.put.assert_called_with(url, args) def delete(self, model, url): self.manager.delete(self.context, model) self.mocked_req.delete.assert_called_with(url) def delete_cascade(self, model, url): self.manager.delete_cascade(self.context, model) self.mocked_req.delete.assert_called_with(url) # TODO(Banashankar) : Complete refresh function. Need more info. def refresh(self): pass # TODO(Banashankar): Complete stats function. Need more info. def stats(self): pass class BaseOctaviaDriverTest(test_db_loadbalancerv2.LbaasPluginDbTestCase): # Copied it from Brocade's test code :/ def _create_fake_models(self): # This id is used for all the entities. id = 'test_id' lb = data_models.LoadBalancer(id=id) sni_container = data_models.SNI(listener_id=id) listener = data_models.Listener(id=id, loadbalancer=lb, sni_containers=[sni_container]) pool = data_models.Pool(id=id, loadbalancer=lb) member = data_models.Member(id=id, pool=pool) hm = data_models.HealthMonitor(id=id, pool=pool) l7policy = data_models.L7Policy( id=id, listener=listener, redirect_pool_id=pool.id, action=constants.L7_POLICY_ACTION_REDIRECT_TO_POOL) l7rule = data_models.L7Rule( id=id, policy=l7policy, type=constants.L7_RULE_TYPE_PATH, compare_type=constants.L7_RULE_COMPARE_TYPE_STARTS_WITH, value='/api') lb.listeners = [listener] lb.pools = [pool] listener.default_pool = pool listener.l7policies = [l7policy] l7policy.rules = [l7rule] pool.members = [member] pool.healthmonitor = hm return lb def setUp(self): super(BaseOctaviaDriverTest, self).setUp() self.context = context.get_admin_context() self.plugin = mock.Mock() self.driver = driver.OctaviaDriver(self.plugin) # mock of rest call. self.driver.req = mock.Mock() self.lb = self._create_fake_models() class TestOctaviaDriver(BaseOctaviaDriverTest): def test_allocates_vip(self): self.addCleanup(cfg.CONF.clear_override, 'allocates_vip', group='octavia') cfg.CONF.set_override('allocates_vip', True, group='octavia') test_driver = driver.OctaviaDriver(self.plugin) self.assertTrue(test_driver.load_balancer.allocates_vip) def test_load_balancer_ops(self): m = ManagerTest(self, self.driver.load_balancer, self.driver.req) lb = self.lb # urls for assert test. lb_url = '/v1/loadbalancers' lb_url_id = '/v1/loadbalancers/' + lb.id # Create LB test # args for create assert. args = { 'id': lb.id, 'name': lb.name, 'description': lb.description, 'enabled': lb.admin_state_up, 'project_id': lb.tenant_id, 'vip': { 'subnet_id': lb.vip_subnet_id, 'ip_address': lb.vip_address, 'port_id': lb.vip_port_id, } } m.create(lb, lb_url, args) # Update LB test # args for update assert. args = args = { 'name': lb.name, 'description': lb.description, 'enabled': lb.admin_state_up, } m.update(lb, lb, lb_url_id, args) # delete LB test m.delete_cascade(lb, lb_url_id + '/delete_cascade') # TODO(Banashankar) : refresh n stats fucntions are not yet done. #m.refresh() #m.stats() def test_listener_ops(self): m = ManagerTest(self, self.driver.listener, self.driver.req) listener = self.lb.listeners[0] # urls for assert test. list_url = '/v1/loadbalancers/%s/listeners' % listener.loadbalancer.id list_url_id = list_url + '/%s' % (listener.id) # Create Listener test. # args for create and update assert. sni_containers = [sni.tls_container_id for sni in listener.sni_containers] args = { 'id': listener.id, 'name': listener.name, 'description': listener.description, 'enabled': listener.admin_state_up, 'protocol': listener.protocol, 'protocol_port': listener.protocol_port, 'connection_limit': listener.connection_limit, 'tls_certificate_id': listener.default_tls_container_id, 'sni_containers': sni_containers, 'default_pool_id': listener.default_pool_id, 'project_id': listener.tenant_id } m.create(listener, list_url, args) # Update listener test. del args['id'] del args['project_id'] m.update(listener, listener, list_url_id, args) # Delete listener. m.delete(listener, list_url_id) def test_pool_ops(self): m = ManagerTest(self, self.driver.pool, self.driver.req) pool = self.lb.listeners[0].default_pool # urls for assert test. pool_url = '/v1/loadbalancers/%s/pools' % ( pool.loadbalancer.id) pool_url_id = pool_url + "/%s" % pool.id # Test create pool. # args for create and update assert. args = { 'id': pool.id, 'name': pool.name, 'description': pool.description, 'enabled': pool.admin_state_up, 'protocol': pool.protocol, 'lb_algorithm': pool.lb_algorithm, 'project_id': pool.tenant_id } if pool.session_persistence: args['session_persistence'] = { 'type': pool.session_persistence.type, 'cookie_name': pool.session_persistence.cookie_name, } else: args['session_persistence'] = None m.create(pool, pool_url, args) # Test update pool. del args['id'] del args['project_id'] m.update(pool, pool, pool_url_id, args) # Test pool delete. m.delete(pool, pool_url_id) def test_member_ops(self): m = ManagerTest(self, self.driver.member, self.driver.req) member = self.lb.listeners[0].default_pool.members[0] # urls for assert. mem_url = '/v1/loadbalancers/%s/pools/%s/members' % ( member.pool.loadbalancer.id, member.pool.id) mem_url_id = mem_url + "/%s" % member.id # Test Create member. # args for create assert. args = { 'id': member.id, 'enabled': member.admin_state_up, 'ip_address': member.address, 'protocol_port': member.protocol_port, 'weight': member.weight, 'subnet_id': member.subnet_id, 'project_id': member.tenant_id } m.create(member, mem_url, args) # Test member update. # args for update assert. args = { 'enabled': member.admin_state_up, 'protocol_port': member.protocol_port, 'weight': member.weight, } m.update(member, member, mem_url_id, args) # Test member delete. m.delete(member, mem_url_id) def test_health_monitor_ops(self): m = ManagerTest(self, self.driver.health_monitor, self.driver.req) hm = self.lb.listeners[0].default_pool.healthmonitor # urls for assert. hm_url = '/v1/loadbalancers/%s/pools/%s/healthmonitor' % ( hm.pool.loadbalancer.id, hm.pool.id) # Test HM create. # args for create and update assert. args = { 'type': hm.type, 'delay': hm.delay, 'timeout': hm.timeout, 'rise_threshold': hm.max_retries, 'fall_threshold': hm.max_retries, 'http_method': hm.http_method, 'url_path': hm.url_path, 'expected_codes': hm.expected_codes, 'enabled': hm.admin_state_up, 'project_id': hm.tenant_id } m.create(hm, hm_url, args) # Test HM update del args['project_id'] m.update(hm, hm, hm_url, args) # Test HM delete m.delete(hm, hm_url) def test_l7_policy_ops_reject(self): m = ManagerTest(self, self.driver.l7policy, self.driver.req) l7p = copy.deepcopy(self.lb.listeners[0].l7policies[0]) l7p.action = constants.L7_POLICY_ACTION_REJECT # urls for assert. l7p_url = '/v1/loadbalancers/%s/listeners/%s/l7policies' % ( l7p.listener.loadbalancer.id, l7p.listener.id) l7p_url_id = l7p_url + "/%s" % l7p.id # Test L7Policy create. # args for create and update assert. args = { 'id': l7p.id, 'name': l7p.name, 'description': l7p.description, 'action': constants.L7_POLICY_ACTION_REJECT, 'position': l7p.position, 'enabled': l7p.admin_state_up } m.create(l7p, l7p_url, args) # Test L7Policy update del args['id'] m.update(l7p, l7p, l7p_url_id, args) # Test L7Policy delete m.delete(l7p, l7p_url_id) def test_l7_policy_ops_rdr_pool(self): m = ManagerTest(self, self.driver.l7policy, self.driver.req) l7p = copy.deepcopy(self.lb.listeners[0].l7policies[0]) l7p.action = constants.L7_POLICY_ACTION_REDIRECT_TO_POOL # urls for assert. l7p_url = '/v1/loadbalancers/%s/listeners/%s/l7policies' % ( l7p.listener.loadbalancer.id, l7p.listener.id) l7p_url_id = l7p_url + "/%s" % l7p.id # Test L7Policy create. # args for create and update assert. args = { 'id': l7p.id, 'name': l7p.name, 'description': l7p.description, 'action': constants.L7_POLICY_ACTION_REDIRECT_TO_POOL, 'redirect_pool_id': l7p.redirect_pool_id, 'position': l7p.position, 'enabled': l7p.admin_state_up } m.create(l7p, l7p_url, args) # Test L7Policy update del args['id'] m.update(l7p, l7p, l7p_url_id, args) # Test L7Policy delete m.delete(l7p, l7p_url_id) def test_l7_policy_ops_rdr_url(self): m = ManagerTest(self, self.driver.l7policy, self.driver.req) l7p = copy.deepcopy(self.lb.listeners[0].l7policies[0]) l7p.action = constants.L7_POLICY_ACTION_REDIRECT_TO_URL # urls for assert. l7p_url = '/v1/loadbalancers/%s/listeners/%s/l7policies' % ( l7p.listener.loadbalancer.id, l7p.listener.id) l7p_url_id = l7p_url + "/%s" % l7p.id # Test L7Policy create. # args for create and update assert. args = { 'id': l7p.id, 'name': l7p.name, 'description': l7p.description, 'action': constants.L7_POLICY_ACTION_REDIRECT_TO_URL, 'redirect_url': l7p.redirect_url, 'position': l7p.position, 'enabled': l7p.admin_state_up } m.create(l7p, l7p_url, args) # Test L7Policy update del args['id'] m.update(l7p, l7p, l7p_url_id, args) # Test L7Policy delete m.delete(l7p, l7p_url_id) def test_l7_rule_ops(self): m = ManagerTest(self, self.driver.l7rule, self.driver.req) l7r = self.lb.listeners[0].l7policies[0].rules[0] # urls for assert. l7r_url = '/v1/loadbalancers/%s/listeners/%s/l7policies/%s/l7rules' % ( l7r.policy.listener.loadbalancer.id, l7r.policy.listener.id, l7r.policy.id) l7r_url_id = l7r_url + "/%s" % l7r.id # Test L7Rule create. # args for create and update assert. args = { 'id': l7r.id, 'type': l7r.type, 'compare_type': l7r.compare_type, 'key': l7r.key, 'value': l7r.value, 'invert': l7r.invert } m.create(l7r, l7r_url, args) # Test L7rule update del args['id'] m.update(l7r, l7r, l7r_url_id, args) # Test L7Rule delete m.delete(l7r, l7r_url_id) class TestThreadedDriver(BaseOctaviaDriverTest): def setUp(self): super(TestThreadedDriver, self).setUp() cfg.CONF.set_override('request_poll_interval', 1, group='octavia') cfg.CONF.set_override('request_poll_timeout', 5, group='octavia') self.driver.req.get = mock.MagicMock() self.succ_completion = mock.MagicMock() self.fail_completion = mock.MagicMock() self.context = mock.MagicMock() ctx_patcher = mock.patch('neutron.context.get_admin_context', return_value=self.context) ctx_patcher.start() self.addCleanup(ctx_patcher.stop) self.driver.load_balancer.successful_completion = ( self.succ_completion) self.driver.load_balancer.failed_completion = self.fail_completion def test_thread_op_goes_active(self): self.driver.req.get.side_effect = [ {'provisioning_status': 'PENDING_CREATE'}, {'provisioning_status': 'ACTIVE'} ] driver.thread_op(self.driver.load_balancer, self.lb) self.succ_completion.assert_called_once_with(self.context, self.lb, delete=False) self.assertEqual(0, self.fail_completion.call_count) def test_thread_op_goes_deleted(self): self.driver.req.get.side_effect = [ {'provisioning_status': 'PENDING_DELETE'}, {'provisioning_status': 'DELETED'} ] driver.thread_op(self.driver.load_balancer, self.lb, delete=True) self.succ_completion.assert_called_once_with(self.context, self.lb, delete=True) self.assertEqual(0, self.fail_completion.call_count) def test_thread_op_goes_error(self): self.driver.req.get.side_effect = [ {'provisioning_status': 'PENDING_CREATE'}, {'provisioning_status': 'ERROR'} ] driver.thread_op(self.driver.load_balancer, self.lb) self.fail_completion.assert_called_once_with(self.context, self.lb) self.assertEqual(0, self.succ_completion.call_count) def test_thread_op_a_times_out(self): cfg.CONF.set_override('request_poll_timeout', 1, group='octavia') self.driver.req.get.side_effect = [ {'provisioning_status': 'PENDING_CREATE'} ] driver.thread_op(self.driver.load_balancer, self.lb) self.fail_completion.assert_called_once_with(self.context, self.lb) self.assertEqual(0, self.succ_completion.call_count) def test_thread_op_updates_vip_when_vip_delegated(self): cfg.CONF.set_override('allocates_vip', True, group='octavia') expected_vip = '10.1.1.1' self.driver.req.get.side_effect = [ {'provisioning_status': 'PENDING_CREATE', 'vip': {'ip_address': ''}}, {'provisioning_status': 'ACTIVE', 'vip': {'ip_address': expected_vip}} ] driver.thread_op(self.driver.load_balancer, self.lb, lb_create=True) self.succ_completion.assert_called_once_with(self.context, self.lb, delete=False, lb_create=True) self.assertEqual(expected_vip, self.lb.vip_address) neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/drivers/haproxy/0000775000567000056710000000000012701410110025542 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/drivers/haproxy/__init__.py0000664000567000056710000000000012701407726027664 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/drivers/haproxy/test_namespace_driver.py0000664000567000056710000007301312701407726032511 0ustar jenkinsjenkins00000000000000# Copyright 2013 New Dream Network, LLC (DreamHost) # Copyright 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import contextlib import socket import mock from neutron.plugins.common import constants from neutron_lib import exceptions from neutron_lbaas.drivers.haproxy import namespace_driver from neutron_lbaas.services.loadbalancer import data_models from neutron_lbaas.tests import base class TestHaproxyNSDriver(base.BaseTestCase): def setUp(self): super(TestHaproxyNSDriver, self).setUp() conf = mock.Mock() conf.haproxy.loadbalancer_state_path = '/the/path' conf.interface_driver = 'intdriver' conf.haproxy.user_group = 'test_group' conf.haproxy.send_gratuitous_arp = 3 self.conf = conf self.rpc_mock = mock.Mock() with mock.patch( 'neutron.common.utils.load_class_by_alias_or_classname'): self.driver = namespace_driver.HaproxyNSDriver( conf, self.rpc_mock ) self.vif_driver = mock.Mock() self.driver.vif_driver = self.vif_driver self._build_mock_data_models() def _build_mock_data_models(self): host_route = data_models.HostRoute(destination='0.0.0.0/0', nexthop='192.0.0.1') subnet = data_models.Subnet(cidr='10.0.0.1/24', gateway_ip='10.0.0.2', host_routes=[host_route]) fixed_ip = data_models.IPAllocation(ip_address='10.0.0.1') setattr(fixed_ip, 'subnet', subnet) port = data_models.Port(id='port1', network_id='network1', mac_address='12-34-56-78-9A-BC', fixed_ips=[fixed_ip]) self.lb = data_models.LoadBalancer(id='lb1', listeners=[], vip_port=port, vip_address='10.0.0.1') def test_get_name(self): self.assertEqual(namespace_driver.DRIVER_NAME, self.driver.get_name()) @mock.patch('neutron.agent.linux.ip_lib.IPWrapper') @mock.patch('os.path.dirname') @mock.patch('os.path.isdir') @mock.patch('shutil.rmtree') def test_undeploy_instance(self, mock_shutil, mock_isdir, mock_dirname, mock_ip_wrap): self.driver._get_state_file_path = mock.Mock(return_value='/path') namespace_driver.kill_pids_in_file = mock.Mock() self.driver._unplug = mock.Mock() mock_dirname.return_value = '/path/' + self.lb.id mock_isdir.return_value = False self.driver.undeploy_instance(self.lb.id) namespace_driver.kill_pids_in_file.assert_called_once_with('/path') calls = [mock.call(self.lb.id, 'pid'), mock.call(self.lb.id, '')] self.driver._get_state_file_path.has_calls(calls) self.assertFalse(self.driver._unplug.called) self.assertFalse(mock_ip_wrap.called) mock_isdir.assert_called_once_with('/path/' + self.lb.id) self.assertFalse(mock_shutil.called) self.driver.deployed_loadbalancers[self.lb.id] = self.lb mock_isdir.return_value = True namespace_driver.kill_pids_in_file.reset_mock() mock_isdir.reset_mock() mock_ns = mock_ip_wrap.return_value mock_ns.get_devices.return_value = [collections.namedtuple( 'Device', ['name'])(name='test_device')] self.driver.undeploy_instance(self.lb.id, cleanup_namespace=True, delete_namespace=True) ns = namespace_driver.get_ns_name(self.lb.id) namespace_driver.kill_pids_in_file.assert_called_once_with('/path') calls = [mock.call(self.lb.id, 'pid'), mock.call(self.lb.id, '')] self.driver._get_state_file_path.has_calls(calls) self.driver._unplug.assert_called_once_with(ns, self.lb.vip_port) ip_wrap_calls = [mock.call(namespace=ns), mock.call(namespace=ns)] mock_ip_wrap.has_calls(ip_wrap_calls) mock_ns.get_devices.assert_called_once_with(exclude_loopback=True) self.vif_driver.unplug.assert_called_once_with('test_device', namespace=ns) mock_shutil.assert_called_once_with('/path/' + self.lb.id) mock_ns.garbage_collect_namespace.assert_called_once_with() @mock.patch('os.path.exists') @mock.patch('os.listdir') def test_remove_orphans(self, list_dir, exists): lb_ids = [self.lb.id] exists.return_value = False self.driver.remove_orphans(lb_ids) exists.assert_called_once_with(self.driver.state_path) self.assertFalse(list_dir.called) exists.reset_mock() exists.return_value = True list_dir.return_value = [self.lb.id, 'lb2'] self.driver.exists = mock.Mock() self.driver.undeploy_instance = mock.Mock() self.driver.remove_orphans(lb_ids) exists.assert_called_once_with(self.driver.state_path) list_dir.assert_called_once_with(self.driver.state_path) self.driver.exists.assert_called_once_with('lb2') self.driver.undeploy_instance.assert_called_once_with( 'lb2', cleanup_namespace=True) def test_get_stats(self): # Shamelessly stolen from v1 namespace driver tests. raw_stats = ('# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,' 'dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,' 'act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,' 'sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,' 'check_status,check_code,check_duration,hrsp_1xx,' 'hrsp_2xx,hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,' 'req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,\n' '8e271901-69ed-403e-a59b-f53cf77ef208,BACKEND,1,2,3,4,0,' '10,7764,2365,0,0,,0,0,0,0,UP,1,1,0,,0,103780,0,,1,2,0,,0' ',,1,0,,0,,,,0,0,0,0,0,0,,,,,0,0,\n\n' 'a557019b-dc07-4688-9af4-f5cf02bb6d4b,' '32a6c2a3-420a-44c3-955d-86bd2fc6871e,0,0,0,1,,7,1120,' '224,,0,,0,0,0,0,UP,1,1,0,0,1,2623,303,,1,2,1,,7,,2,0,,' '1,L7OK,200,98,0,7,0,0,0,0,0,,,,0,0,\n' 'a557019b-dc07-4688-9af4-f5cf02bb6d4b,' 'd9aea044-8867-4e80-9875-16fb808fa0f9,0,0,0,2,,12,0,0,,' '0,,0,0,8,4,DOWN,1,1,0,9,2,308,675,,1,2,2,,4,,2,0,,2,' 'L4CON,,2999,0,0,0,0,0,0,0,,,,0,0,\n') raw_stats_empty = ('# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,' 'bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,' 'status,weight,act,bck,chkfail,chkdown,lastchg,' 'downtime,qlimit,pid,iid,sid,throttle,lbtot,' 'tracked,type,rate,rate_lim,rate_max,check_status,' 'check_code,check_duration,hrsp_1xx,hrsp_2xx,' 'hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,' 'req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,' '\n') with contextlib.nested( mock.patch.object(self.driver, '_get_state_file_path'), mock.patch('socket.socket'), mock.patch('os.path.exists'), ) as (gsp, mocket, path_exists): gsp.side_effect = lambda x, y, z: '/pool/' + y path_exists.return_value = True mocket.return_value = mocket mocket.recv.return_value = raw_stats exp_stats = {'connection_errors': '0', 'active_connections': '3', 'current_sessions': '3', 'bytes_in': '7764', 'max_connections': '4', 'max_sessions': '4', 'bytes_out': '2365', 'response_errors': '0', 'total_sessions': '10', 'total_connections': '10', 'members': { '32a6c2a3-420a-44c3-955d-86bd2fc6871e': { 'status': 'ACTIVE', 'health': 'L7OK', 'failed_checks': '0' }, 'd9aea044-8867-4e80-9875-16fb808fa0f9': { 'status': 'INACTIVE', 'health': 'L4CON', 'failed_checks': '9' } } } stats = self.driver.get_stats(self.lb.id) self.assertEqual(exp_stats, stats) mocket.recv.return_value = raw_stats_empty self.assertEqual({'members': {}}, self.driver.get_stats(self.lb.id)) path_exists.return_value = False mocket.reset_mock() self.assertEqual({}, self.driver.get_stats(self.lb.id)) self.assertFalse(mocket.called) def test_deploy_instance(self): self.driver.deployable = mock.Mock(return_value=False) self.driver.exists = mock.Mock(return_value=True) self.driver.update = mock.Mock() self.driver.create = mock.Mock() def reset(): self.driver.deployable.reset_mock() self.driver.exists.reset_mock() self.driver.update.reset_mock() self.driver.create.reset_mock() deployed = self.driver.deploy_instance(self.lb) self.assertFalse(deployed) self.assertFalse(self.driver.exists.called) self.assertFalse(self.driver.create.called) self.assertFalse(self.driver.update.called) reset() self.driver.deployable.return_value = True deployed = self.driver.deploy_instance(self.lb) self.assertTrue(deployed) self.driver.exists.assert_called_once_with(self.lb.id) self.driver.update.assert_called_once_with(self.lb) self.assertFalse(self.driver.create.called) reset() self.driver.exists.return_value = False deployed = self.driver.deploy_instance(self.lb) self.assertTrue(deployed) self.driver.exists.assert_called_once_with(self.lb.id) self.driver.create.assert_called_once_with(self.lb) self.assertFalse(self.driver.update.called) def test_update(self): self.driver._get_state_file_path = mock.Mock(return_value='/path') self.driver._spawn = mock.Mock() with mock.patch('six.moves.builtins.open') as m_open: file_mock = mock.MagicMock() m_open.return_value = file_mock file_mock.__enter__.return_value = file_mock file_mock.__iter__.return_value = iter(['123']) self.driver.update(self.lb) self.driver._spawn.assert_called_once_with(self.lb, ['-sf', '123']) @mock.patch('socket.socket') @mock.patch('os.path.exists') @mock.patch('neutron.agent.linux.ip_lib.IPWrapper') def test_exists(self, ip_wrap, exists, mocket): socket_path = '/path/haproxy_stats.sock' mock_ns = ip_wrap.return_value mock_socket = mocket.return_value self.driver._get_state_file_path = mock.Mock(return_value=socket_path) mock_ns.netns.exists.return_value = False exists.return_value = False def reset(): ip_wrap.reset_mock() self.driver._get_state_file_path.reset_mock() mock_ns.reset_mock() exists.reset_mock() mocket.reset_mock() mock_socket.reset_mock() ret_exists = self.driver.exists(self.lb.id) ip_wrap.assert_called_once_with() self.driver._get_state_file_path.assert_called_once_with( self.lb.id, 'haproxy_stats.sock', False) mock_ns.netns.exists.assert_called_once_with( namespace_driver.get_ns_name(self.lb.id)) self.assertFalse(exists.called) self.assertFalse(mocket.called) self.assertFalse(mock_socket.connect.called) self.assertFalse(ret_exists) reset() mock_ns.netns.exists.return_value = True exists.return_value = False ret_exists = self.driver.exists(self.lb.id) ip_wrap.assert_called_once_with() self.driver._get_state_file_path.assert_called_once_with( self.lb.id, 'haproxy_stats.sock', False) mock_ns.netns.exists.assert_called_once_with( namespace_driver.get_ns_name(self.lb.id)) exists.assert_called_once_with(socket_path) self.assertFalse(mocket.called) self.assertFalse(mock_socket.connect.called) self.assertFalse(ret_exists) reset() mock_ns.netns.exists.return_value = True exists.return_value = True ret_exists = self.driver.exists(self.lb.id) ip_wrap.assert_called_once_with() self.driver._get_state_file_path.assert_called_once_with( self.lb.id, 'haproxy_stats.sock', False) mock_ns.netns.exists.assert_called_once_with( namespace_driver.get_ns_name(self.lb.id)) exists.assert_called_once_with(socket_path) mocket.assert_called_once_with(socket.AF_UNIX, socket.SOCK_STREAM) mock_socket.connect.assert_called_once_with(socket_path) self.assertTrue(ret_exists) def test_create(self): self.driver._plug = mock.Mock() self.driver._spawn = mock.Mock() self.driver.create(self.lb) self.driver._plug.assert_called_once_with( namespace_driver.get_ns_name(self.lb.id), self.lb.vip_port, self.lb.vip_address) self.driver._spawn.assert_called_once_with(self.lb) def test_deployable(self): # test None ret_val = self.driver.deployable(None) self.assertFalse(ret_val) # test no listeners ret_val = self.driver.deployable(self.lb) self.assertFalse(ret_val) # test no acceptable listeners listener = data_models.Listener( provisioning_status=constants.PENDING_DELETE, admin_state_up=True) self.lb.listeners.append(listener) ret_val = self.driver.deployable(self.lb) self.assertFalse(ret_val) listener.provisioning_status = constants.PENDING_CREATE listener.admin_state_up = False ret_val = self.driver.deployable(self.lb) self.assertFalse(ret_val) # test bad lb status listener.admin_state_up = True self.lb.provisioning_status = constants.PENDING_DELETE self.lb.admin_state_up = True ret_val = self.driver.deployable(self.lb) self.assertFalse(ret_val) self.lb.provisioning_status = constants.PENDING_UPDATE self.lb.admin_state_up = False ret_val = self.driver.deployable(self.lb) self.assertFalse(ret_val) # test everything good self.lb.admin_state_up = True ret_val = self.driver.deployable(self.lb) self.assertTrue(ret_val) @mock.patch('neutron.common.utils.ensure_dir') def test_get_state_file_path(self, ensure_dir): path = self.driver._get_state_file_path(self.lb.id, 'conf', ensure_state_dir=False) self.assertEqual('/the/path/v2/lb1/conf', path) self.assertFalse(ensure_dir.called) path = self.driver._get_state_file_path(self.lb.id, 'conf') self.assertEqual('/the/path/v2/lb1/conf', path) self.assertTrue(ensure_dir.called) @mock.patch('neutron.agent.linux.ip_lib.device_exists') @mock.patch('neutron.agent.linux.ip_lib.IPWrapper') def test_plug(self, ip_wrap, device_exists): device_exists.return_value = True interface_name = 'tap-d4nc3' self.vif_driver.get_device_name.return_value = interface_name self.assertRaises(exceptions.PreexistingDeviceFailure, self.driver._plug, 'ns1', self.lb.vip_port, self.lb.vip_address, reuse_existing=False) device_exists.assert_called_once_with(interface_name, namespace='ns1') self.rpc_mock.plug_vip_port.assert_called_once_with( self.lb.vip_port.id) device_exists.reset_mock() self.rpc_mock.plug_vip_port.reset_mock() mock_ns = ip_wrap.return_value self.driver._plug('ns1', self.lb.vip_port, self.lb.vip_address) self.rpc_mock.plug_vip_port.assert_called_once_with( self.lb.vip_port.id) device_exists.assert_called_once_with(interface_name, namespace='ns1') self.assertFalse(self.vif_driver.plug.called) expected_cidrs = ['10.0.0.1/24'] self.vif_driver.init_l3.assert_called_once_with( interface_name, expected_cidrs, namespace='ns1') calls = [mock.call(['route', 'add', 'default', 'gw', '192.0.0.1'], check_exit_code=False), mock.call(['arping', '-U', '-I', interface_name, '-c', 3, '10.0.0.1'], check_exit_code=False)] mock_ns.netns.execute.has_calls(calls) self.assertEqual(2, mock_ns.netns.execute.call_count) def test_unplug(self): interface_name = 'tap-d4nc3' self.vif_driver.get_device_name.return_value = interface_name self.driver._unplug('ns1', self.lb.vip_port) self.rpc_mock.unplug_vip_port.assert_called_once_with( self.lb.vip_port.id) self.vif_driver.get_device_name.assert_called_once_with( self.lb.vip_port) self.vif_driver.unplug.assert_called_once_with(interface_name, namespace='ns1') @mock.patch('neutron.common.utils.ensure_dir') @mock.patch('neutron_lbaas.services.loadbalancer.drivers.haproxy.' 'jinja_cfg.save_config') @mock.patch('neutron.agent.linux.ip_lib.IPWrapper') def test_spawn(self, ip_wrap, jinja_save, ensure_dir): mock_ns = ip_wrap.return_value self.driver._spawn(self.lb) conf_dir = self.driver.state_path + '/' + self.lb.id + '/%s' jinja_save.assert_called_once_with( conf_dir % 'haproxy.conf', self.lb, conf_dir % 'haproxy_stats.sock', 'test_group', conf_dir % '') ip_wrap.assert_called_once_with( namespace=namespace_driver.get_ns_name(self.lb.id)) mock_ns.netns.execute.assert_called_once_with( ['haproxy', '-f', conf_dir % 'haproxy.conf', '-p', conf_dir % 'haproxy.pid']) self.assertIn(self.lb.id, self.driver.deployed_loadbalancers) self.assertEqual(self.lb, self.driver.deployed_loadbalancers[self.lb.id]) class BaseTestManager(base.BaseTestCase): def setUp(self): super(BaseTestManager, self).setUp() self.driver = mock.Mock() self.lb_manager = namespace_driver.LoadBalancerManager(self.driver) self.listener_manager = namespace_driver.ListenerManager(self.driver) self.pool_manager = namespace_driver.PoolManager(self.driver) self.member_manager = namespace_driver.MemberManager(self.driver) self.hm_manager = namespace_driver.HealthMonitorManager(self.driver) self.refresh = self.driver.loadbalancer.refresh class BaseTestLoadBalancerManager(BaseTestManager): def setUp(self): super(BaseTestLoadBalancerManager, self).setUp() self.in_lb = data_models.LoadBalancer(id='lb1', listeners=[]) class TestLoadBalancerManager(BaseTestLoadBalancerManager): @mock.patch.object(data_models.LoadBalancer, 'from_dict') def test_refresh(self, lb_from_dict): rpc_return = {'id': self.in_lb.id} self.driver.plugin_rpc.get_loadbalancer.return_value = rpc_return from_dict_return = data_models.LoadBalancer(id=self.in_lb.id) lb_from_dict.return_value = from_dict_return self.driver.deploy_instance.return_value = True self.driver.exists.return_value = True self.lb_manager.refresh(self.in_lb) self.driver.plugin_rpc.get_loadbalancer.assert_called_once_with( self.in_lb.id) lb_from_dict.assert_called_once_with(rpc_return) self.driver.deploy_instance.assert_called_once_with(from_dict_return) self.assertFalse(self.driver.exists.called) self.assertFalse(self.driver.undeploy_instance.called) self.driver.reset_mock() lb_from_dict.reset_mock() self.driver.deploy_instance.return_value = False self.driver.exists.return_value = False self.lb_manager.refresh(self.in_lb) self.driver.plugin_rpc.get_loadbalancer.assert_called_once_with( self.in_lb.id) lb_from_dict.assert_called_once_with(rpc_return) self.driver.deploy_instance.assert_called_once_with(from_dict_return) self.driver.exists.assert_called_once_with(self.in_lb.id) self.assertFalse(self.driver.undeploy_instance.called) self.driver.reset_mock() lb_from_dict.reset_mock() self.driver.deploy_instance.return_value = False self.driver.exists.return_value = True self.lb_manager.refresh(self.in_lb) self.driver.plugin_rpc.get_loadbalancer.assert_called_once_with( self.in_lb.id) lb_from_dict.assert_called_once_with(rpc_return) self.driver.deploy_instance.assert_called_once_with(from_dict_return) self.driver.exists.assert_called_once_with(from_dict_return.id) self.driver.undeploy_instance.assert_called_once_with(self.in_lb.id) def test_delete(self): self.driver.exists.return_value = False self.lb_manager.delete(self.in_lb) self.driver.exists.assert_called_once_with(self.in_lb.id) self.assertFalse(self.driver.undeploy_instance.called) self.driver.reset_mock() self.driver.exists.return_value = True self.lb_manager.delete(self.in_lb) self.driver.exists.assert_called_once_with(self.in_lb.id) self.driver.undeploy_instance.assert_called_once_with( self.in_lb.id, delete_namespace=True) def test_create(self): self.lb_manager.refresh = mock.Mock() self.lb_manager.create(self.in_lb) self.assertFalse(self.lb_manager.refresh.called) self.lb_manager.refresh.reset_mock() self.in_lb.listeners.append(data_models.Listener(id='listener1')) self.lb_manager.create(self.in_lb) self.lb_manager.refresh.assert_called_once_with(self.in_lb) def test_get_stats(self): self.lb_manager.get_stats(self.in_lb.id) self.driver.get_stats.assert_called_once_with(self.in_lb.id) def test_update(self): old_lb = data_models.LoadBalancer(id='lb0') self.lb_manager.refresh = mock.Mock() self.lb_manager.update(old_lb, self.in_lb) self.lb_manager.refresh.assert_called_once_with(self.in_lb) class BaseTestListenerManager(BaseTestLoadBalancerManager): def setUp(self): super(BaseTestListenerManager, self).setUp() self.in_listener = data_models.Listener(id='listener1') self.listener2 = data_models.Listener(id='listener2') self.in_listener.loadbalancer = self.in_lb self.listener2.loadbalancer = self.in_lb self.in_lb.listeners = [self.in_listener, self.listener2] self.refresh = self.driver.loadbalancer.refresh class TestListenerManager(BaseTestListenerManager): def setUp(self): super(TestListenerManager, self).setUp() self.in_listener = data_models.Listener(id='listener1') self.listener2 = data_models.Listener(id='listener2') self.in_lb.listeners = [self.in_listener, self.listener2] self.in_listener.loadbalancer = self.in_lb self.listener2.loadbalancer = self.in_lb def test_remove_listener(self): self.listener_manager._remove_listener(self.in_lb, self.in_listener.id) self.assertEqual(1, len(self.in_lb.listeners)) self.assertEqual(self.listener2.id, self.in_lb.listeners[0].id) def test_update(self): old_listener = data_models.Listener(id='listener1', name='bleh') self.listener_manager.update(old_listener, self.in_listener) self.refresh.assert_called_once_with(self.in_lb) def test_create(self): self.listener_manager.create(self.in_listener) self.refresh.assert_called_once_with(self.in_lb) def test_delete(self): self.listener_manager.delete(self.in_listener) self.refresh.assert_called_once_with(self.in_lb) self.assertFalse(self.driver.undeploy_instance.called) self.refresh.reset_mock() self.driver.reset_mock() self.listener_manager.delete(self.listener2) self.assertFalse(self.refresh.called) self.driver.undeploy_instance.assert_called_once_with(self.in_lb.id) class BaseTestPoolManager(BaseTestListenerManager): def setUp(self): super(BaseTestPoolManager, self).setUp() self.in_pool = data_models.Pool(id='pool1') self.in_listener.default_pool = self.in_pool self.in_pool.loadbalancer = self.in_lb self.in_pool.listeners = [self.in_listener] self.in_lb.pools = [self.in_pool] class TestPoolManager(BaseTestPoolManager): def test_update(self): old_pool = data_models.Pool(id=self.in_pool.id, name='bleh') self.pool_manager.update(old_pool, self.in_pool) self.refresh.assert_called_once_with(self.in_lb) def test_create(self): self.pool_manager.create(self.in_pool) self.refresh.assert_called_once_with(self.in_lb) def test_delete(self): self.pool_manager.delete(self.in_pool) self.assertIsNone(self.in_listener.default_pool) self.refresh.assert_called_once_with(self.in_lb) class BaseTestMemberManager(BaseTestPoolManager): def setUp(self): super(BaseTestMemberManager, self).setUp() self.in_member = data_models.Member(id='member1') self.member2 = data_models.Member(id='member2') self.in_pool.members = [self.in_member, self.member2] self.in_member.pool = self.in_pool self.member2.pool = self.in_pool class TestMemberManager(BaseTestMemberManager): def test_remove_member(self): self.member_manager._remove_member(self.in_pool, self.in_member.id) self.assertEqual(1, len(self.in_pool.members)) self.assertEqual(self.member2.id, self.in_pool.members[0].id) def test_update(self): old_member = data_models.Member(id=self.in_member.id, address='0.0.0.0') self.member_manager.update(old_member, self.in_member) self.refresh.assert_called_once_with(self.in_lb) def test_create(self): self.member_manager.create(self.in_member) self.refresh.assert_called_once_with(self.in_lb) def test_delete(self): self.member_manager.delete(self.in_member) self.refresh.assert_called_once_with(self.in_lb) class BaseTestHealthMonitorManager(BaseTestPoolManager): def setUp(self): super(BaseTestHealthMonitorManager, self).setUp() self.in_hm = data_models.HealthMonitor(id='hm1') self.in_pool.healthmonitor = self.in_hm self.in_hm.pool = self.in_pool class TestHealthMonitorManager(BaseTestHealthMonitorManager): def test_update(self): old_hm = data_models.HealthMonitor(id=self.in_hm.id, timeout=2) self.hm_manager.update(old_hm, self.in_hm) self.refresh.assert_called_once_with(self.in_lb) def test_create(self): self.hm_manager.create(self.in_hm) self.refresh.assert_called_once_with(self.in_lb) def test_delete(self): self.hm_manager.delete(self.in_hm) self.assertIsNone(self.in_pool.healthmonitor) self.refresh.assert_called_once_with(self.in_lb) class TestNamespaceDriverModule(base.BaseTestCase): @mock.patch('os.path.exists') @mock.patch('neutron.agent.linux.utils.execute') def test_kill_pids_in_file(self, execute, exists): pid_path = '/var/lib/data' with mock.patch('six.moves.builtins.open') as m_open: exists.return_value = False file_mock = mock.MagicMock() m_open.return_value = file_mock file_mock.__enter__.return_value = file_mock file_mock.__iter__.return_value = iter(['123']) namespace_driver.kill_pids_in_file(pid_path) # sometimes fails # exists.assert_called_once_with(pid_path) self.assertFalse(m_open.called) self.assertFalse(execute.called) exists.return_value = True execute.side_effect = RuntimeError namespace_driver.kill_pids_in_file(pid_path) # sometimes fails # execute.assert_called_once_with(['kill', '-9', '123']) def test_get_ns_name(self): ns_name = namespace_driver.get_ns_name('woohoo') self.assertEqual(namespace_driver.NS_PREFIX + 'woohoo', ns_name) neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/test_agent_scheduler.py0000664000567000056710000002774412701407726027200 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # Copyright 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from datetime import datetime import mock from neutron.api import extensions from neutron.api.v2 import attributes from neutron import context from neutron.db import agents_db from neutron.extensions import agent from neutron import manager from neutron.plugins.common import constants as plugin_const from neutron.tests.common import helpers from neutron.tests.unit.api import test_extensions from neutron.tests.unit.db import test_agentschedulers_db import neutron.tests.unit.extensions from neutron.tests.unit.extensions import test_agent import six from webob import exc from neutron_lbaas.drivers.haproxy import plugin_driver from neutron_lbaas.extensions import lbaas_agentschedulerv2 from neutron_lbaas.services.loadbalancer import constants as lb_const from neutron_lbaas.tests import base from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancerv2 LBAAS_HOSTA = 'hosta' extensions_path = ':'.join(neutron.tests.unit.extensions.__path__) class AgentSchedulerTestMixIn(test_agentschedulers_db.AgentSchedulerTestMixIn): def _list_loadbalancers_hosted_by_agent( self, agent_id, expected_code=exc.HTTPOk.code, admin_context=True): path = "/agents/%s/%s.%s" % (agent_id, lbaas_agentschedulerv2.LOADBALANCERS, self.fmt) return self._request_list(path, expected_code=expected_code, admin_context=admin_context) def _get_lbaas_agent_hosting_loadbalancer(self, loadbalancer_id, expected_code=exc.HTTPOk.code, admin_context=True): path = "/lbaas/loadbalancers/%s/%s.%s" % (loadbalancer_id, lbaas_agentschedulerv2 .LOADBALANCER_AGENT, self.fmt) return self._request_list(path, expected_code=expected_code, admin_context=admin_context) class LBaaSAgentSchedulerTestCase(test_agent.AgentDBTestMixIn, AgentSchedulerTestMixIn, test_db_loadbalancerv2.LbaasTestMixin, base.NeutronDbPluginV2TestCase): fmt = 'json' plugin_str = 'neutron.plugins.ml2.plugin.Ml2Plugin' def _register_agent_states(self, lbaas_agents=False): res = super(LBaaSAgentSchedulerTestCase, self)._register_agent_states( lbaas_agents=lbaas_agents) if lbaas_agents: lbaas_hosta = { 'binary': 'neutron-loadbalancer-agent', 'host': test_agent.LBAAS_HOSTA, 'topic': 'LOADBALANCER_AGENT', 'configurations': {'device_drivers': [ plugin_driver.HaproxyOnHostPluginDriver.device_driver]}, 'agent_type': lb_const.AGENT_TYPE_LOADBALANCERV2} lbaas_hostb = copy.deepcopy(lbaas_hosta) lbaas_hostb['host'] = test_agent.LBAAS_HOSTB callback = agents_db.AgentExtRpcCallback() callback.report_state(self.adminContext, agent_state={'agent_state': lbaas_hosta}, time=datetime.utcnow().isoformat()) callback.report_state(self.adminContext, agent_state={'agent_state': lbaas_hostb}, time=datetime.utcnow().isoformat()) res += [lbaas_hosta, lbaas_hostb] return res def setUp(self): # Save the global RESOURCE_ATTRIBUTE_MAP self.saved_attr_map = {} for res, attrs in six.iteritems(attributes.RESOURCE_ATTRIBUTE_MAP): self.saved_attr_map[res] = attrs.copy() service_plugins = { 'lb_plugin_name': test_db_loadbalancerv2.DB_LB_PLUGIN_CLASS} # default provider should support agent scheduling self.set_override( [('LOADBALANCERV2:lbaas:neutron_lbaas.drivers.haproxy.' 'plugin_driver.HaproxyOnHostPluginDriver:default')]) super(LBaaSAgentSchedulerTestCase, self).setUp( self.plugin_str, service_plugins=service_plugins) ext_mgr = extensions.PluginAwareExtensionManager.get_instance() self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) self.adminContext = context.get_admin_context() # Add the resources to the global attribute map # This is done here as the setup process won't # initialize the main API router which extends # the global attribute map attributes.RESOURCE_ATTRIBUTE_MAP.update( agent.RESOURCE_ATTRIBUTE_MAP) self.lbaas_plugin = manager.NeutronManager.get_service_plugins()[ plugin_const.LOADBALANCERV2] self.core_plugin = manager.NeutronManager.get_plugin() self.addCleanup(self.restore_attribute_map) def restore_attribute_map(self): # Restore the original RESOURCE_ATTRIBUTE_MAP attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map def test_report_states(self): self._register_agent_states(lbaas_agents=True) agents = self._list_agents() self.assertEqual(8, len(agents['agents'])) def test_loadbalancer_scheduling_on_loadbalancer_creation(self): self._register_agent_states(lbaas_agents=True) with self.loadbalancer() as loadbalancer: lbaas_agent = self._get_lbaas_agent_hosting_loadbalancer( loadbalancer['loadbalancer']['id']) self.assertIsNotNone(lbaas_agent) self.assertEqual(lb_const.AGENT_TYPE_LOADBALANCERV2, lbaas_agent['agent']['agent_type']) loadbalancers = self._list_loadbalancers_hosted_by_agent( lbaas_agent['agent']['id']) self.assertEqual(1, len(loadbalancers['loadbalancers'])) self.assertEqual(loadbalancer['loadbalancer'], loadbalancers['loadbalancers'][0]) self.lbaas_plugin.db.update_loadbalancer_provisioning_status( self.adminContext, loadbalancer['loadbalancer']['id'] ) def test_schedule_loadbalancer_with_disabled_agent(self): lbaas_hosta = { 'binary': 'neutron-loadbalancer-agent', 'host': LBAAS_HOSTA, 'topic': 'LOADBALANCER_AGENT', 'configurations': {'device_drivers': [ plugin_driver.HaproxyOnHostPluginDriver.device_driver ]}, 'agent_type': lb_const.AGENT_TYPE_LOADBALANCERV2} helpers._register_agent(lbaas_hosta) with self.loadbalancer() as loadbalancer: lbaas_agent = self._get_lbaas_agent_hosting_loadbalancer( loadbalancer['loadbalancer']['id']) self.assertIsNotNone(lbaas_agent) self.lbaas_plugin.db.update_loadbalancer_provisioning_status( self.adminContext, loadbalancer['loadbalancer']['id'] ) agents = self._list_agents() self._disable_agent(agents['agents'][0]['id']) subnet = self.core_plugin.get_subnets(self.adminContext)[0] lb = { 'loadbalancer': { 'vip_subnet_id': subnet['id'], 'provider': 'lbaas', 'flavor_id': attributes.ATTR_NOT_SPECIFIED, 'vip_address': attributes.ATTR_NOT_SPECIFIED, 'admin_state_up': True, 'tenant_id': self._tenant_id}} self.assertRaises(lbaas_agentschedulerv2.NoEligibleLbaasAgent, self.lbaas_plugin.create_loadbalancer, self.adminContext, lb) def test_schedule_loadbalancer_with_down_agent(self): lbaas_hosta = { 'binary': 'neutron-loadbalancer-agent', 'host': LBAAS_HOSTA, 'topic': 'LOADBALANCER_AGENT', 'configurations': {'device_drivers': [ plugin_driver.HaproxyOnHostPluginDriver.device_driver ]}, 'agent_type': lb_const.AGENT_TYPE_LOADBALANCERV2} helpers._register_agent(lbaas_hosta) is_agent_down_str = 'neutron.db.agents_db.AgentDbMixin.is_agent_down' with mock.patch(is_agent_down_str) as mock_is_agent_down: mock_is_agent_down.return_value = False with self.loadbalancer() as loadbalancer: lbaas_agent = self._get_lbaas_agent_hosting_loadbalancer( loadbalancer['loadbalancer']['id']) self.lbaas_plugin.db.update_loadbalancer_provisioning_status( self.adminContext, loadbalancer['loadbalancer']['id'] ) self.assertIsNotNone(lbaas_agent) with mock.patch(is_agent_down_str) as mock_is_agent_down: mock_is_agent_down.return_value = True subnet = self.core_plugin.get_subnets(self.adminContext)[0] lb = { 'loadbalancer': { 'vip_subnet_id': subnet['id'], 'provider': 'lbaas', 'flavor_id': attributes.ATTR_NOT_SPECIFIED, 'vip_address': attributes.ATTR_NOT_SPECIFIED, 'admin_state_up': True, 'tenant_id': self._tenant_id}} self.assertRaises(lbaas_agentschedulerv2.NoEligibleLbaasAgent, self.lbaas_plugin.create_loadbalancer, self.adminContext, lb) def test_loadbalancer_unscheduling_on_loadbalancer_deletion(self): self._register_agent_states(lbaas_agents=True) with self.loadbalancer(no_delete=True) as loadbalancer: lb_id = loadbalancer['loadbalancer']['id'] lbaas_agent = self._get_lbaas_agent_hosting_loadbalancer(lb_id) self.assertIsNotNone(lbaas_agent) self.assertEqual(lb_const.AGENT_TYPE_LOADBALANCERV2, lbaas_agent['agent']['agent_type']) loadbalancers = self._list_loadbalancers_hosted_by_agent( lbaas_agent['agent']['id']) self.assertEqual(1, len(loadbalancers['loadbalancers'])) self.assertEqual(loadbalancer['loadbalancer'], loadbalancers['loadbalancers'][0]) self.lbaas_plugin.db.update_loadbalancer_provisioning_status( self.adminContext, lb_id ) req = self.new_delete_request('loadbalancers', lb_id) res = req.get_response(self.ext_api) self.assertEqual(exc.HTTPNoContent.code, res.status_int) loadbalancers = self._list_loadbalancers_hosted_by_agent( lbaas_agent['agent']['id']) self.assertEqual(0, len(loadbalancers['loadbalancers'])) def test_loadbalancer_scheduling_non_admin_access(self): self._register_agent_states(lbaas_agents=True) with self.loadbalancer() as loadbalancer: self._get_lbaas_agent_hosting_loadbalancer( loadbalancer['loadbalancer']['id'], expected_code=exc.HTTPForbidden.code, admin_context=False) self._list_loadbalancers_hosted_by_agent( 'fake_id', expected_code=exc.HTTPForbidden.code, admin_context=False) self.lbaas_plugin.db.update_loadbalancer_provisioning_status( self.adminContext, loadbalancer['loadbalancer']['id'] ) neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/agent/0000775000567000056710000000000012701410110023470 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/agent/test_agent_api.py0000664000567000056710000000603512701407726027057 0ustar jenkinsjenkins00000000000000# Copyright 2013 New Dream Network, LLC (DreamHost) # Copyright 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import copy import mock from neutron_lbaas.agent import agent_api as api from neutron_lbaas.tests import base class TestApiCache(base.BaseTestCase): def setUp(self): super(TestApiCache, self).setUp() self.api = api.LbaasAgentApi('topic', mock.sentinel.context, 'host') def test_init(self): self.assertEqual('host', self.api.host) self.assertEqual(mock.sentinel.context, self.api.context) def _test_method(self, method, **kwargs): add_host = ('get_ready_devices', 'plug_vip_port', 'unplug_vip_port') expected_kwargs = copy.copy(kwargs) if method in add_host: expected_kwargs['host'] = self.api.host with contextlib.nested( mock.patch.object(self.api.client, 'call'), mock.patch.object(self.api.client, 'prepare'), ) as ( rpc_mock, prepare_mock ): prepare_mock.return_value = self.api.client rpc_mock.return_value = 'foo' rv = getattr(self.api, method)(**kwargs) self.assertEqual('foo', rv) prepare_args = {} prepare_mock.assert_called_once_with(**prepare_args) rpc_mock.assert_called_once_with(mock.sentinel.context, method, **expected_kwargs) def test_get_ready_devices(self): self._test_method('get_ready_devices') def test_get_loadbalancer(self): self._test_method('get_loadbalancer', loadbalancer_id='loadbalancer_id') def test_loadbalancer_destroyed(self): self._test_method('loadbalancer_destroyed', loadbalancer_id='loadbalancer_id') def test_loadbalancer_deployed(self): self._test_method('loadbalancer_deployed', loadbalancer_id='loadbalancer_id') def test_update_status(self): self._test_method('update_status', obj_type='type', obj_id='id', provisioning_status='p_status', operating_status='o_status') def test_plug_vip_port(self): self._test_method('plug_vip_port', port_id='port_id') def test_unplug_vip_port(self): self._test_method('unplug_vip_port', port_id='port_id') def test_update_loadbalancer_stats(self): self._test_method('update_loadbalancer_stats', loadbalancer_id='id', stats='stats') neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/agent/__init__.py0000664000567000056710000000000012701407726025612 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/agent/test_agent.py0000664000567000056710000000332412701407726026224 0ustar jenkinsjenkins00000000000000# Copyright 2013 New Dream Network, LLC (DreamHost) # Copyright 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import mock from oslo_config import cfg from neutron_lbaas.agent import agent from neutron_lbaas.tests import base class TestLbaasService(base.BaseTestCase): def test_start(self): with mock.patch.object( agent.n_rpc.Service, 'start' ) as mock_start: mgr = mock.Mock() cfg.CONF.periodic_interval = mock.Mock(return_value=10) agent_service = agent.LbaasAgentService('host', 'topic', mgr) agent_service.start() self.assertTrue(mock_start.called) def test_main(self): logging_str = 'neutron.agent.common.config.setup_logging' with contextlib.nested( mock.patch(logging_str), mock.patch.object(agent.service, 'launch'), mock.patch('sys.argv'), mock.patch.object(agent.manager, 'LbaasAgentManager'), mock.patch.object(cfg.CONF, 'register_opts') ) as (mock_logging, mock_launch, sys_argv, mgr_cls, ro): agent.main() mock_launch.assert_called_once_with(mock.ANY, mock.ANY) neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/agent/test_agent_manager.py0000664000567000056710000007552112701407726027726 0ustar jenkinsjenkins00000000000000# Copyright 2013 New Dream Network, LLC (DreamHost) # Copyright 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import mock from neutron.plugins.common import constants from neutron_lbaas.agent import agent_manager as manager from neutron_lbaas.services.loadbalancer import constants as lb_const from neutron_lbaas.services.loadbalancer import data_models from neutron_lbaas.tests import base class TestManager(base.BaseTestCase): def setUp(self): super(TestManager, self).setUp() mock_conf = mock.Mock() mock_conf.device_driver = ['devdriver'] self.mock_importer = mock.patch.object(manager, 'importutils').start() rpc_mock_cls = mock.patch( 'neutron_lbaas.agent.agent_api.LbaasAgentApi' ).start() # disable setting up periodic state reporting mock_conf.AGENT.report_interval = 0 self.mgr = manager.LbaasAgentManager(mock_conf) self.rpc_mock = rpc_mock_cls.return_value self.log = mock.patch.object(manager, 'LOG').start() self.driver_mock = mock.Mock() self.mgr.device_drivers = {'devdriver': self.driver_mock} self.mgr.instance_mapping = {'1': 'devdriver', '2': 'devdriver'} self.mgr.needs_resync = False self.update_statuses_patcher = mock.patch.object( self.mgr, '_update_statuses') self.update_statuses = self.update_statuses_patcher.start() def test_initialize_service_hook(self): with mock.patch.object(self.mgr, 'sync_state') as sync: self.mgr.initialize_service_hook(mock.Mock()) sync.assert_called_once_with() def test_periodic_resync_needs_sync(self): with mock.patch.object(self.mgr, 'sync_state') as sync: self.mgr.needs_resync = True self.mgr.periodic_resync(mock.Mock()) sync.assert_called_once_with() def test_periodic_resync_no_sync(self): with mock.patch.object(self.mgr, 'sync_state') as sync: self.mgr.needs_resync = False self.mgr.periodic_resync(mock.Mock()) self.assertFalse(sync.called) def test_collect_stats(self): self.mgr.collect_stats(mock.Mock()) self.rpc_mock.update_loadbalancer_stats.assert_has_calls([ mock.call('1', mock.ANY), mock.call('2', mock.ANY) ], any_order=True) def test_collect_stats_exception(self): self.driver_mock.loadbalancer.get_stats.side_effect = Exception self.mgr.collect_stats(mock.Mock()) self.assertFalse(self.rpc_mock.called) self.assertTrue(self.mgr.needs_resync) self.assertTrue(self.log.exception.called) def _sync_state_helper(self, ready, reloaded, destroyed): with contextlib.nested( mock.patch.object(self.mgr, '_reload_loadbalancer'), mock.patch.object(self.mgr, '_destroy_loadbalancer') ) as (reload, destroy): self.rpc_mock.get_ready_devices.return_value = ready self.mgr.sync_state() self.assertEqual(len(reloaded), len(reload.mock_calls)) self.assertEqual(len(destroyed), len(destroy.mock_calls)) reload.assert_has_calls([mock.call(i) for i in reloaded], any_order=True) destroy.assert_has_calls([mock.call(i) for i in destroyed], any_order=True) self.assertFalse(self.mgr.needs_resync) def test_sync_state_all_known(self): self._sync_state_helper(['1', '2'], ['1', '2'], []) def test_sync_state_all_unknown(self): self.mgr.instance_mapping = {} self._sync_state_helper(['1', '2'], ['1', '2'], []) def test_sync_state_destroy_all(self): self._sync_state_helper([], [], ['1', '2']) def test_sync_state_both(self): self.mgr.instance_mapping = {'1': 'devdriver'} self._sync_state_helper(['2'], ['2'], ['1']) def test_sync_state_exception(self): self.rpc_mock.get_ready_devices.side_effect = Exception self.mgr.sync_state() self.assertTrue(self.log.exception.called) self.assertTrue(self.mgr.needs_resync) def test_reload_loadbalancer(self): lb = data_models.LoadBalancer(id='1').to_dict() lb['provider'] = {'device_driver': 'devdriver'} self.rpc_mock.get_loadbalancer.return_value = lb lb_id = 'new_id' self.assertNotIn(lb_id, self.mgr.instance_mapping) self.mgr._reload_loadbalancer(lb_id) calls = self.driver_mock.deploy_instance.call_args_list self.assertEqual(1, len(calls)) called_lb = calls[0][0][0] self.assertEqual(lb['id'], called_lb.id) self.assertIn(lb['id'], self.mgr.instance_mapping) self.rpc_mock.loadbalancer_deployed.assert_called_once_with(lb_id) def test_reload_loadbalancer_driver_not_found(self): lb = data_models.LoadBalancer(id='1').to_dict() lb['provider'] = {'device_driver': 'unknowndriver'} self.rpc_mock.get_loadbalancer.return_value = lb lb_id = 'new_id' self.assertNotIn(lb_id, self.mgr.instance_mapping) self.mgr._reload_loadbalancer(lb_id) self.assertTrue(self.log.error.called) self.assertFalse(self.driver_mock.deploy_instance.called) self.assertNotIn(lb_id, self.mgr.instance_mapping) self.assertFalse(self.rpc_mock.loadbalancer_deployed.called) def test_reload_loadbalancer_exception_on_driver(self): lb = data_models.LoadBalancer(id='3').to_dict() lb['provider'] = {'device_driver': 'devdriver'} self.rpc_mock.get_loadbalancer.return_value = lb self.driver_mock.deploy_instance.side_effect = Exception lb_id = 'new_id' self.assertNotIn(lb_id, self.mgr.instance_mapping) self.mgr._reload_loadbalancer(lb_id) calls = self.driver_mock.deploy_instance.call_args_list self.assertEqual(1, len(calls)) called_lb = calls[0][0][0] self.assertEqual(lb['id'], called_lb.id) self.assertNotIn(lb['id'], self.mgr.instance_mapping) self.assertFalse(self.rpc_mock.loadbalancer_deployed.called) self.assertTrue(self.log.exception.called) self.assertTrue(self.mgr.needs_resync) def test_destroy_loadbalancer(self): lb_id = '1' self.assertIn(lb_id, self.mgr.instance_mapping) self.mgr._destroy_loadbalancer(lb_id) self.driver_mock.undeploy_instance.assert_called_once_with( lb_id, delete_namespace=True) self.assertNotIn(lb_id, self.mgr.instance_mapping) self.rpc_mock.loadbalancer_destroyed.assert_called_once_with(lb_id) self.assertFalse(self.mgr.needs_resync) def test_destroy_loadbalancer_exception_on_driver(self): lb_id = '1' self.assertIn(lb_id, self.mgr.instance_mapping) self.driver_mock.undeploy_instance.side_effect = Exception self.mgr._destroy_loadbalancer(lb_id) self.driver_mock.undeploy_instance.assert_called_once_with( lb_id, delete_namespace=True) self.assertIn(lb_id, self.mgr.instance_mapping) self.assertFalse(self.rpc_mock.loadbalancer_destroyed.called) self.assertTrue(self.log.exception.called) self.assertTrue(self.mgr.needs_resync) def test_get_driver_unknown_device(self): self.assertRaises(manager.DeviceNotFoundOnAgent, self.mgr._get_driver, 'unknown') def test_remove_orphans(self): self.mgr.remove_orphans() orphans = {'1': "Fake", '2': "Fake"} self.driver_mock.remove_orphans.assert_called_once_with(orphans.keys()) def test_agent_disabled(self): payload = {'admin_state_up': False} self.mgr.agent_updated(mock.Mock(), payload) self.driver_mock.undeploy_instance.assert_has_calls( [mock.call('1', delete_namespace=True), mock.call('2', delete_namespace=True)], any_order=True ) def test_update_statuses_loadbalancer(self): self.update_statuses_patcher.stop() lb = data_models.LoadBalancer(id='1') self.mgr._update_statuses(lb) self.rpc_mock.update_status.assert_called_once_with( 'loadbalancer', lb.id, provisioning_status=constants.ACTIVE, operating_status=lb_const.ONLINE) self.rpc_mock.update_status.reset_mock() self.mgr._update_statuses(lb, error=True) self.rpc_mock.update_status.assert_called_once_with( 'loadbalancer', lb.id, provisioning_status=constants.ERROR, operating_status=lb_const.OFFLINE) def test_update_statuses_listener(self): self.update_statuses_patcher.stop() listener = data_models.Listener(id='1') lb = data_models.LoadBalancer(id='1', listeners=[listener]) listener.loadbalancer = lb self.mgr._update_statuses(listener) self.assertEqual(2, self.rpc_mock.update_status.call_count) calls = [mock.call('listener', listener.id, provisioning_status=constants.ACTIVE, operating_status=lb_const.ONLINE), mock.call('loadbalancer', lb.id, provisioning_status=constants.ACTIVE, operating_status=None)] self.rpc_mock.update_status.assert_has_calls(calls) self.rpc_mock.update_status.reset_mock() self.mgr._update_statuses(listener, error=True) self.assertEqual(2, self.rpc_mock.update_status.call_count) calls = [mock.call('listener', listener.id, provisioning_status=constants.ERROR, operating_status=lb_const.OFFLINE), mock.call('loadbalancer', lb.id, provisioning_status=constants.ACTIVE, operating_status=None)] self.rpc_mock.update_status.assert_has_calls(calls) def test_update_statuses_pool(self): self.update_statuses_patcher.stop() pool = data_models.Pool(id='1') listener = data_models.Listener(id='1', default_pool=pool) lb = data_models.LoadBalancer(id='1', listeners=[listener]) listener.loadbalancer = lb pool.loadbalancer = lb self.mgr._update_statuses(pool) self.assertEqual(2, self.rpc_mock.update_status.call_count) calls = [mock.call('pool', pool.id, provisioning_status=constants.ACTIVE, operating_status=lb_const.ONLINE), mock.call('loadbalancer', lb.id, provisioning_status=constants.ACTIVE, operating_status=None)] self.rpc_mock.update_status.assert_has_calls(calls) self.rpc_mock.update_status.reset_mock() self.mgr._update_statuses(pool, error=True) self.assertEqual(2, self.rpc_mock.update_status.call_count) calls = [mock.call('pool', pool.id, provisioning_status=constants.ERROR, operating_status=lb_const.OFFLINE), mock.call('loadbalancer', lb.id, provisioning_status=constants.ACTIVE, operating_status=None)] self.rpc_mock.update_status.assert_has_calls(calls) def test_update_statuses_member(self): self.update_statuses_patcher.stop() member = data_models.Member(id='1') pool = data_models.Pool(id='1', members=[member]) member.pool = pool listener = data_models.Listener(id='1', default_pool=pool) lb = data_models.LoadBalancer(id='1', listeners=[listener]) listener.loadbalancer = lb pool.loadbalancer = lb self.mgr._update_statuses(member) self.assertEqual(2, self.rpc_mock.update_status.call_count) calls = [mock.call('member', member.id, provisioning_status=constants.ACTIVE, operating_status=lb_const.ONLINE), mock.call('loadbalancer', lb.id, provisioning_status=constants.ACTIVE, operating_status=None)] self.rpc_mock.update_status.assert_has_calls(calls) self.rpc_mock.update_status.reset_mock() self.mgr._update_statuses(member, error=True) self.assertEqual(2, self.rpc_mock.update_status.call_count) calls = [mock.call('member', member.id, provisioning_status=constants.ERROR, operating_status=lb_const.OFFLINE), mock.call('loadbalancer', lb.id, provisioning_status=constants.ACTIVE, operating_status=None)] self.rpc_mock.update_status.assert_has_calls(calls) def test_update_statuses_healthmonitor(self): self.update_statuses_patcher.stop() hm = data_models.HealthMonitor(id='1') pool = data_models.Pool(id='1', healthmonitor=hm) hm.pool = pool listener = data_models.Listener(id='1', default_pool=pool) lb = data_models.LoadBalancer(id='1', listeners=[listener]) listener.loadbalancer = lb pool.loadbalancer = lb self.mgr._update_statuses(hm) self.assertEqual(2, self.rpc_mock.update_status.call_count) calls = [mock.call('healthmonitor', hm.id, provisioning_status=constants.ACTIVE, operating_status=None), mock.call('loadbalancer', lb.id, provisioning_status=constants.ACTIVE, operating_status=None)] self.rpc_mock.update_status.assert_has_calls(calls) self.rpc_mock.update_status.reset_mock() self.mgr._update_statuses(hm, error=True) self.assertEqual(2, self.rpc_mock.update_status.call_count) calls = [mock.call('healthmonitor', hm.id, provisioning_status=constants.ERROR, operating_status=None), mock.call('loadbalancer', lb.id, provisioning_status=constants.ACTIVE, operating_status=None)] self.rpc_mock.update_status.assert_has_calls(calls) @mock.patch.object(data_models.LoadBalancer, 'from_dict') def test_create_loadbalancer(self, mlb): loadbalancer = data_models.LoadBalancer(id='1') self.assertIn(loadbalancer.id, self.mgr.instance_mapping) mlb.return_value = loadbalancer self.mgr.create_loadbalancer(mock.Mock(), loadbalancer.to_dict(), 'devdriver') self.driver_mock.loadbalancer.create.assert_called_once_with( loadbalancer) self.update_statuses.assert_called_once_with(loadbalancer) @mock.patch.object(data_models.LoadBalancer, 'from_dict') def test_create_loadbalancer_failed(self, mlb): loadbalancer = data_models.LoadBalancer(id='1') self.assertIn(loadbalancer.id, self.mgr.instance_mapping) self.driver_mock.loadbalancer.create.side_effect = Exception mlb.return_value = loadbalancer self.mgr.create_loadbalancer(mock.Mock(), loadbalancer.to_dict(), 'devdriver') self.driver_mock.loadbalancer.create.assert_called_once_with( loadbalancer) self.update_statuses.assert_called_once_with(loadbalancer, error=True) @mock.patch.object(data_models.LoadBalancer, 'from_dict') def test_update_loadbalancer(self, mlb): loadbalancer = data_models.LoadBalancer(id='1', vip_address='10.0.0.1') old_loadbalancer = data_models.LoadBalancer(id='1', vip_address='10.0.0.2') mlb.side_effect = [loadbalancer, old_loadbalancer] self.mgr.update_loadbalancer(mock.Mock(), old_loadbalancer.to_dict(), loadbalancer.to_dict()) self.driver_mock.loadbalancer.update.assert_called_once_with( old_loadbalancer, loadbalancer) self.update_statuses.assert_called_once_with(loadbalancer) @mock.patch.object(data_models.LoadBalancer, 'from_dict') def test_update_loadbalancer_failed(self, mlb): loadbalancer = data_models.LoadBalancer(id='1', vip_address='10.0.0.1') old_loadbalancer = data_models.LoadBalancer(id='1', vip_address='10.0.0.2') mlb.side_effect = [loadbalancer, old_loadbalancer] self.driver_mock.loadbalancer.update.side_effect = Exception self.mgr.update_loadbalancer(mock.Mock(), old_loadbalancer, loadbalancer) self.driver_mock.loadbalancer.update.assert_called_once_with( old_loadbalancer, loadbalancer) self.update_statuses.assert_called_once_with(loadbalancer, error=True) @mock.patch.object(data_models.LoadBalancer, 'from_dict') def test_delete_loadbalancer(self, mlb): loadbalancer = data_models.LoadBalancer(id='1') mlb.return_value = loadbalancer self.assertIn(loadbalancer.id, self.mgr.instance_mapping) self.mgr.delete_loadbalancer(mock.Mock(), loadbalancer.to_dict()) self.driver_mock.loadbalancer.delete.assert_called_once_with( loadbalancer) self.assertNotIn(loadbalancer.id, self.mgr.instance_mapping) @mock.patch.object(data_models.Listener, 'from_dict') def test_create_listener(self, mlistener): loadbalancer = data_models.LoadBalancer(id='1') listener = data_models.Listener(id=1, loadbalancer_id='1', loadbalancer=loadbalancer) self.assertIn(loadbalancer.id, self.mgr.instance_mapping) mlistener.return_value = listener self.mgr.create_listener(mock.Mock(), listener.to_dict()) self.driver_mock.listener.create.assert_called_once_with(listener) self.update_statuses.assert_called_once_with(listener) @mock.patch.object(data_models.Listener, 'from_dict') def test_create_listener_failed(self, mlistener): loadbalancer = data_models.LoadBalancer(id='1') listener = data_models.Listener(id=1, loadbalancer_id='1', loadbalancer=loadbalancer) self.assertIn(loadbalancer.id, self.mgr.instance_mapping) self.driver_mock.listener.create.side_effect = Exception mlistener.return_value = listener self.mgr.create_listener(mock.Mock(), listener.to_dict()) self.driver_mock.listener.create.assert_called_once_with(listener) self.update_statuses.assert_called_once_with(listener, error=True) @mock.patch.object(data_models.Listener, 'from_dict') def test_update_listener(self, mlistener): loadbalancer = data_models.LoadBalancer(id='1') old_listener = data_models.Listener(id=1, loadbalancer_id='1', loadbalancer=loadbalancer, protocol_port=80) listener = data_models.Listener(id=1, loadbalancer_id='1', loadbalancer=loadbalancer, protocol_port=81) mlistener.side_effect = [listener, old_listener] self.mgr.update_listener(mock.Mock(), old_listener.to_dict(), listener.to_dict()) self.driver_mock.listener.update.assert_called_once_with( old_listener, listener) self.update_statuses.assert_called_once_with(listener) @mock.patch.object(data_models.Listener, 'from_dict') def test_update_listener_failed(self, mlistener): loadbalancer = data_models.LoadBalancer(id='1') old_listener = data_models.Listener(id=1, loadbalancer_id='1', loadbalancer=loadbalancer, protocol_port=80) listener = data_models.Listener(id=1, loadbalancer_id='1', loadbalancer=loadbalancer, protocol_port=81) mlistener.side_effect = [listener, old_listener] self.driver_mock.listener.update.side_effect = Exception self.mgr.update_listener(mock.Mock(), old_listener, listener) self.driver_mock.listener.update.assert_called_once_with(old_listener, listener) self.update_statuses.assert_called_once_with(listener, error=True) @mock.patch.object(data_models.Listener, 'from_dict') def test_delete_listener(self, mlistener): loadbalancer = data_models.LoadBalancer(id='1') listener = data_models.Listener(id=1, loadbalancer_id='1', loadbalancer=loadbalancer, protocol_port=80) mlistener.return_value = listener self.mgr.delete_listener(mock.Mock(), listener.to_dict()) self.driver_mock.listener.delete.assert_called_once_with(listener) @mock.patch.object(data_models.Pool, 'from_dict') def test_create_pool(self, mpool): loadbalancer = data_models.LoadBalancer(id='1') pool = data_models.Pool(id='1', loadbalancer=loadbalancer) mpool.return_value = pool self.mgr.create_pool(mock.Mock(), pool.to_dict()) self.driver_mock.pool.create.assert_called_once_with(pool) self.update_statuses.assert_called_once_with(pool) @mock.patch.object(data_models.Pool, 'from_dict') def test_create_pool_failed(self, mpool): loadbalancer = data_models.LoadBalancer(id='1') pool = data_models.Pool(id='1', loadbalancer=loadbalancer) mpool.return_value = pool self.driver_mock.pool.create.side_effect = Exception self.mgr.create_pool(mock.Mock(), pool) self.driver_mock.pool.create.assert_called_once_with(pool) self.update_statuses.assert_called_once_with(pool, error=True) @mock.patch.object(data_models.Pool, 'from_dict') def test_update_pool(self, mpool): loadbalancer = data_models.LoadBalancer(id='1') pool = data_models.Pool(id='1', loadbalancer=loadbalancer, protocol='HTTPS') old_pool = data_models.Pool(id='1', loadbalancer=loadbalancer, protocol='HTTP') mpool.side_effect = [pool, old_pool] self.mgr.update_pool(mock.Mock(), old_pool.to_dict(), pool.to_dict()) self.driver_mock.pool.update.assert_called_once_with(old_pool, pool) self.update_statuses.assert_called_once_with(pool) @mock.patch.object(data_models.Pool, 'from_dict') def test_update_pool_failed(self, mpool): loadbalancer = data_models.LoadBalancer(id='1') pool = data_models.Pool(id='1', loadbalancer=loadbalancer, protocol='HTTPS') old_pool = data_models.Pool(id='1', loadbalancer=loadbalancer, protocol='HTTP') mpool.side_effect = [pool, old_pool] self.driver_mock.pool.update.side_effect = Exception self.mgr.update_pool(mock.Mock(), old_pool.to_dict(), pool.to_dict()) self.driver_mock.pool.update.assert_called_once_with(old_pool, pool) self.update_statuses.assert_called_once_with(pool, error=True) @mock.patch.object(data_models.Pool, 'from_dict') def test_delete_pool(self, mpool): loadbalancer = data_models.LoadBalancer(id='1') pool = data_models.Pool(id='1', loadbalancer=loadbalancer, protocol='HTTPS') mpool.return_value = pool self.mgr.delete_pool(mock.Mock(), pool.to_dict()) self.driver_mock.pool.delete.assert_called_once_with(pool) @mock.patch.object(data_models.Member, 'from_dict') def test_create_member(self, mmember): loadbalancer = data_models.LoadBalancer(id='1') pool = data_models.Pool(id='1', loadbalancer=loadbalancer, protocol='HTTPS') member = data_models.Member(id='1', pool=pool) mmember.return_value = member self.mgr.create_member(mock.Mock(), member.to_dict()) self.driver_mock.member.create.assert_called_once_with(member) self.update_statuses.assert_called_once_with(member) @mock.patch.object(data_models.Member, 'from_dict') def test_create_member_failed(self, mmember): loadbalancer = data_models.LoadBalancer(id='1') pool = data_models.Pool(id='1', loadbalancer=loadbalancer, protocol='HTTPS') member = data_models.Member(id='1', pool=pool) mmember.return_value = member self.driver_mock.member.create.side_effect = Exception self.mgr.create_member(mock.Mock(), member.to_dict()) self.driver_mock.member.create.assert_called_once_with(member) self.update_statuses.assert_called_once_with(member, error=True) @mock.patch.object(data_models.Member, 'from_dict') def test_update_member(self, mmember): loadbalancer = data_models.LoadBalancer(id='1') pool = data_models.Pool(id='1', loadbalancer=loadbalancer, protocol='HTTPS') member = data_models.Member(id='1', pool=pool, weight=1) old_member = data_models.Member(id='1', pool=pool, weight=2) mmember.side_effect = [member, old_member] self.mgr.update_member(mock.Mock(), old_member.to_dict(), member.to_dict()) self.driver_mock.member.update.assert_called_once_with(old_member, member) self.update_statuses.assert_called_once_with(member) @mock.patch.object(data_models.Member, 'from_dict') def test_update_member_failed(self, mmember): loadbalancer = data_models.LoadBalancer(id='1') pool = data_models.Pool(id='1', loadbalancer=loadbalancer, protocol='HTTPS') member = data_models.Member(id='1', pool=pool, weight=1) old_member = data_models.Member(id='1', pool=pool, weight=2) mmember.side_effect = [member, old_member] self.driver_mock.member.update.side_effect = Exception self.mgr.update_member(mock.Mock(), old_member.to_dict(), member.to_dict()) self.driver_mock.member.update.assert_called_once_with(old_member, member) self.update_statuses.assert_called_once_with(member, error=True) @mock.patch.object(data_models.Member, 'from_dict') def test_delete_member(self, mmember): loadbalancer = data_models.LoadBalancer(id='1') pool = data_models.Pool(id='1', loadbalancer=loadbalancer, protocol='HTTPS') member = data_models.Member(id='1', pool=pool, weight=1) mmember.return_value = member self.mgr.delete_member(mock.Mock(), member.to_dict()) self.driver_mock.member.delete.assert_called_once_with(member) @mock.patch.object(data_models.HealthMonitor, 'from_dict') def test_create_monitor(self, mmonitor): loadbalancer = data_models.LoadBalancer(id='1') pool = data_models.Pool(id='1', loadbalancer=loadbalancer, protocol='HTTPS') monitor = data_models.HealthMonitor(id='1', pool=pool) mmonitor.return_value = monitor self.mgr.create_healthmonitor(mock.Mock(), monitor.to_dict()) self.driver_mock.healthmonitor.create.assert_called_once_with( monitor) self.update_statuses.assert_called_once_with(monitor) @mock.patch.object(data_models.HealthMonitor, 'from_dict') def test_create_monitor_failed(self, mmonitor): loadbalancer = data_models.LoadBalancer(id='1') pool = data_models.Pool(id='1', loadbalancer=loadbalancer, protocol='HTTPS') monitor = data_models.HealthMonitor(id='1', pool=pool) mmonitor.return_value = monitor self.driver_mock.healthmonitor.create.side_effect = Exception self.mgr.create_healthmonitor(mock.Mock(), monitor.to_dict()) self.driver_mock.healthmonitor.create.assert_called_once_with(monitor) self.update_statuses.assert_called_once_with(monitor, error=True) @mock.patch.object(data_models.HealthMonitor, 'from_dict') def test_update_monitor(self, mmonitor): loadbalancer = data_models.LoadBalancer(id='1') pool = data_models.Pool(id='1', loadbalancer=loadbalancer, protocol='HTTPS') monitor = data_models.HealthMonitor(id='1', pool=pool, delay=1) old_monitor = data_models.HealthMonitor(id='1', pool=pool, delay=2) mmonitor.side_effect = [monitor, old_monitor] self.mgr.update_healthmonitor(mock.Mock(), old_monitor.to_dict(), monitor.to_dict()) self.driver_mock.healthmonitor.update.assert_called_once_with( old_monitor, monitor) self.update_statuses.assert_called_once_with(monitor) @mock.patch.object(data_models.HealthMonitor, 'from_dict') def test_update_monitor_failed(self, mmonitor): loadbalancer = data_models.LoadBalancer(id='1') pool = data_models.Pool(id='1', loadbalancer=loadbalancer, protocol='HTTPS') monitor = data_models.HealthMonitor(id='1', pool=pool, delay=1) old_monitor = data_models.HealthMonitor(id='1', pool=pool, delay=2) mmonitor.side_effect = [monitor, old_monitor] self.driver_mock.healthmonitor.update.side_effect = Exception self.mgr.update_healthmonitor(mock.Mock(), monitor.to_dict(), monitor.to_dict()) self.driver_mock.healthmonitor.update.assert_called_once_with( old_monitor, monitor) self.update_statuses.assert_called_once_with(monitor, error=True) @mock.patch.object(data_models.HealthMonitor, 'from_dict') def test_delete_monitor(self, mmonitor): loadbalancer = data_models.LoadBalancer(id='1') pool = data_models.Pool(id='1', loadbalancer=loadbalancer, protocol='HTTPS') monitor = data_models.HealthMonitor(id='1', pool=pool) mmonitor.return_value = monitor self.mgr.delete_healthmonitor(mock.Mock(), monitor.to_dict()) self.driver_mock.healthmonitor.delete.assert_called_once_with( monitor) neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/common/0000775000567000056710000000000012701410110023662 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/common/__init__.py0000664000567000056710000000000012701407726026004 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/common/cert_manager/0000775000567000056710000000000012701410110026311 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/common/cert_manager/test_local.py0000664000567000056710000001306612701407726031045 0ustar jenkinsjenkins00000000000000# Copyright 2014 Rackspace US, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock from oslo_config import cfg from oslo_config import fixture as oslo_fixture from neutron_lbaas.common.cert_manager import cert_manager from neutron_lbaas.common.cert_manager import local_cert_manager from neutron_lbaas.tests import base class TestLocalCert(base.BaseTestCase): def setUp(self): self.certificate = "My Certificate" self.intermediates = "My Intermediates" self.private_key = "My Private Key" self.private_key_passphrase = "My Private Key Passphrase" super(TestLocalCert, self).setUp() def test_local_cert(self): # Create a cert cert = local_cert_manager.Cert( certificate=self.certificate, intermediates=self.intermediates, private_key=self.private_key, private_key_passphrase=self.private_key_passphrase ) # Validate the cert functions self.assertEqual(self.certificate, cert.get_certificate()) self.assertEqual(self.intermediates, cert.get_intermediates()) self.assertEqual(self.private_key, cert.get_private_key()) self.assertEqual(self.private_key_passphrase, cert.get_private_key_passphrase()) class TestLocalManager(base.BaseTestCase): def setUp(self): self.project_id = "12345" self.certificate = "My Certificate" self.intermediates = "My Intermediates" self.private_key = "My Private Key" self.private_key_passphrase = "My Private Key Passphrase" conf = oslo_fixture.Config(cfg.CONF) conf.config(group="certificates", storage_path="/tmp/") self.cert_manager = local_cert_manager.CertManager() super(TestLocalManager, self).setUp() def _store_cert(self): file_mock = mock.mock_open() # Attempt to store the cert with mock.patch('six.moves.builtins.open', file_mock, create=True): cert_id = self.cert_manager.store_cert( project_id=self.project_id, certificate=self.certificate, intermediates=self.intermediates, private_key=self.private_key, private_key_passphrase=self.private_key_passphrase ) # Check that something came back self.assertIsNotNone(cert_id) # Verify the correct files were opened file_mock.assert_has_calls([ mock.call(os.path.join('/tmp/{0}.crt'.format(cert_id)), 'w'), mock.call(os.path.join('/tmp/{0}.key'.format(cert_id)), 'w'), mock.call(os.path.join('/tmp/{0}.int'.format(cert_id)), 'w'), mock.call(os.path.join('/tmp/{0}.pass'.format(cert_id)), 'w') ], any_order=True) # Verify the writes were made file_mock().write.assert_has_calls([ mock.call(self.certificate), mock.call(self.intermediates), mock.call(self.private_key), mock.call(self.private_key_passphrase) ], any_order=True) return cert_id def _get_cert(self, cert_id): file_mock = mock.mock_open() # Attempt to retrieve the cert with mock.patch('six.moves.builtins.open', file_mock, create=True): data = self.cert_manager.get_cert( project_id=self.project_id, cert_ref=cert_id, resource_ref=None ) # Verify the correct files were opened file_mock.assert_has_calls([ mock.call(os.path.join('/tmp/{0}.crt'.format(cert_id)), 'r'), mock.call(os.path.join('/tmp/{0}.key'.format(cert_id)), 'r'), mock.call(os.path.join('/tmp/{0}.int'.format(cert_id)), 'r'), mock.call(os.path.join('/tmp/{0}.pass'.format(cert_id)), 'r') ], any_order=True) # The returned data should be a Cert object self.assertIsInstance(data, cert_manager.Cert) return data def _delete_cert(self, cert_id): remove_mock = mock.Mock() # Delete the cert with mock.patch('os.remove', remove_mock): self.cert_manager.delete_cert( project_id=self.project_id, cert_ref=cert_id, resource_ref=None ) # Verify the correct files were removed remove_mock.assert_has_calls([ mock.call(os.path.join('/tmp/{0}.crt'.format(cert_id))), mock.call(os.path.join('/tmp/{0}.key'.format(cert_id))), mock.call(os.path.join('/tmp/{0}.int'.format(cert_id))), mock.call(os.path.join('/tmp/{0}.pass'.format(cert_id))) ], any_order=True) def test_store_cert(self): self._store_cert() def test_get_cert(self): # Store a cert cert_id = self._store_cert() # Get the cert self._get_cert(cert_id) def test_delete_cert(self): # Store a cert cert_id = self._store_cert() # Verify the cert exists self._get_cert(cert_id) # Delete the cert self._delete_cert(cert_id) neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/common/cert_manager/__init__.py0000664000567000056710000000000012701407726030433 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/common/cert_manager/test_cert_manager.py0000664000567000056710000000425412701407726032401 0ustar jenkinsjenkins00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron_lbaas.common import cert_manager from neutron_lbaas.common.cert_manager import barbican_cert_manager as bcm from neutron_lbaas.common.cert_manager import cert_manager as cmi from neutron_lbaas.common.cert_manager import local_cert_manager as lcm from neutron_lbaas.tests import base class TestCertManager(base.BaseTestCase): def setUp(self): cert_manager._CERT_MANAGER_PLUGIN = None super(TestCertManager, self).setUp() def test_get_service_url(self): # Format: ://// cfg.CONF.set_override('service_name', 'lbaas', 'service_auth', enforce_type=True) cfg.CONF.set_override('region', 'RegionOne', 'service_auth', enforce_type=True) self.assertEqual( 'lbaas://RegionOne/loadbalancer/LB-ID', cmi.CertManager.get_service_url('LB-ID')) def test_barbican_cert_manager(self): cfg.CONF.set_override( 'cert_manager_type', 'barbican', group='certificates') self.assertEqual(cert_manager.get_backend().CertManager, bcm.CertManager) def test_local_cert_manager(self): cfg.CONF.set_override( 'cert_manager_type', 'local', group='certificates') self.assertEqual(cert_manager.get_backend().CertManager, lcm.CertManager) neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/common/cert_manager/test_barbican.py0000664000567000056710000000501412701407726031506 0ustar jenkinsjenkins00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from barbicanclient import client as barbican_client import mock import neutron_lbaas.common.cert_manager.barbican_cert_manager as bbq_common import neutron_lbaas.tests.base as base class TestBarbicanCert(base.BaseTestCase): def setUp(self): # Certificate data self.certificate = "My Certificate" self.intermediates = "My Intermediates" self.private_key = "My Private Key" self.private_key_passphrase = "My Private Key Passphrase" self.certificate_secret = barbican_client.secrets.Secret( api=mock.MagicMock(), payload=self.certificate ) self.intermediates_secret = barbican_client.secrets.Secret( api=mock.MagicMock(), payload=self.intermediates ) self.private_key_secret = barbican_client.secrets.Secret( api=mock.MagicMock(), payload=self.private_key ) self.private_key_passphrase_secret = barbican_client.secrets.Secret( api=mock.MagicMock(), payload=self.private_key_passphrase ) super(TestBarbicanCert, self).setUp() def test_barbican_cert(self): container = barbican_client.containers.CertificateContainer( api=mock.MagicMock(), certificate=self.certificate_secret, intermediates=self.intermediates_secret, private_key=self.private_key_secret, private_key_passphrase=self.private_key_passphrase_secret ) # Create a cert cert = bbq_common.Cert( cert_container=container ) # Validate the cert functions self.assertEqual(self.certificate, cert.get_certificate()) self.assertEqual(self.intermediates, cert.get_intermediates()) self.assertEqual(self.private_key, cert.get_private_key()) self.assertEqual(self.private_key_passphrase, cert.get_private_key_passphrase()) neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/common/cert_manager/barbican_auth/0000775000567000056710000000000012701410110031073 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/common/cert_manager/barbican_auth/test_barbican_acl.py0000664000567000056710000000367212701407726035117 0ustar jenkinsjenkins00000000000000# Copyright 2014-2016 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from barbicanclient import client as barbican_client import mock from neutron_lbaas.common.cert_manager.barbican_auth import barbican_acl from neutron_lbaas.common.cert_manager import barbican_cert_manager from neutron_lbaas.common import keystone import neutron_lbaas.tests.base as base class TestBarbicanACLAuth(base.BaseTestCase): def setUp(self): # Reset the client keystone._SESSION = None super(TestBarbicanACLAuth, self).setUp() def test_get_barbican_client(self): # There should be no existing client self.assertIsNone(keystone._SESSION) # Mock out the keystone session and get the client keystone._SESSION = mock.MagicMock() acl_auth_object = barbican_acl.BarbicanACLAuth() bc1 = acl_auth_object.get_barbican_client() # Our returned client should be an instance of barbican_client.Client self.assertIsInstance( bc1, barbican_client.Client ) # Getting the session again with new class should get the same object acl_auth_object2 = barbican_acl.BarbicanACLAuth() bc2 = acl_auth_object2.get_barbican_client() self.assertIs(bc1, bc2) def test_load_auth_driver(self): bcm = barbican_cert_manager.CertManager() self.assertTrue(isinstance(bcm.auth, barbican_acl.BarbicanACLAuth)) neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/common/cert_manager/barbican_auth/__init__.py0000664000567000056710000000000012701407726033215 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/common/tls_utils/0000775000567000056710000000000012701410110025704 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/common/tls_utils/__init__.py0000664000567000056710000000000012701407726030026 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/common/tls_utils/test_cert_parser.py0000664000567000056710000003666512701407726031671 0ustar jenkinsjenkins00000000000000# # Copyright 2014 OpenStack Foundation. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cryptography import x509 import six import neutron_lbaas.common.exceptions as exceptions import neutron_lbaas.common.tls_utils.cert_parser as cert_parser from neutron_lbaas.tests import base ALT_EXT_CRT = """-----BEGIN CERTIFICATE----- MIIGqjCCBZKgAwIBAgIJAIApBg8slSSiMA0GCSqGSIb3DQEBBQUAMIGLMQswCQYD VQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxFDASBgNVBAcMC1NhbiBBbnRvbmlvMR4w HAYDVQQKDBVPcGVuU3RhY2sgRXhwZXJpbWVudHMxFjAUBgNVBAsMDU5ldXRyb24g TGJhYXMxHjAcBgNVBAMMFXd3dy5DTkZyb21TdWJqZWN0Lm9yZzAeFw0xNTA1MjEy MDMzMjNaFw0yNTA1MTgyMDMzMjNaMIGLMQswCQYDVQQGEwJVUzEOMAwGA1UECAwF VGV4YXMxFDASBgNVBAcMC1NhbiBBbnRvbmlvMR4wHAYDVQQKDBVPcGVuU3RhY2sg RXhwZXJpbWVudHMxFjAUBgNVBAsMDU5ldXRyb24gTGJhYXMxHjAcBgNVBAMMFXd3 dy5DTkZyb21TdWJqZWN0Lm9yZzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC ggEBALL1nmbDPUDps84i1sM3rhHrc+Dlu0N/wKQWKZFeiWUtF/pot19V3o0yXDps g7W5RkLMTFkZEcnQpyGdpAGjTjzmNXMZw99EzxsmrR3l6hUEISifVbvEuftYZT6j PxM5ML6WAjFNaBEZPWtZi8CgX5xdjdrDNndwyHob49n7Nc/h1kVqqBqMILabTqC6 yEcxS/B+DugVuuYbEdYYYElQUMfM+mUdULrSqIVl2n5AvvSFjWzWzfgPyp4QKn+f 7HVRT62bh/XjQ88n1tMYNAEqixRZTPgqY1LFl9VJVgRp9fdL6ttMurOR3C0STJ5q CdKBL7LrpbY4u8dEragRC6YAyI8CAwEAAaOCAw0wggMJMAkGA1UdEwQCMAAwCwYD VR0PBAQDAgXgMIIC7QYDVR0RBIIC5DCCAuCCGHd3dy5ob3N0RnJvbUROU05hbWUx LmNvbYIYd3d3Lmhvc3RGcm9tRE5TTmFtZTIuY29tghh3d3cuaG9zdEZyb21ETlNO YW1lMy5jb22CGHd3dy5ob3N0RnJvbUROU05hbWU0LmNvbYcECgECA4cQASNFZ4mr ze/3s9WR5qLEgIYWaHR0cDovL3d3dy5leGFtcGxlLmNvbaSBjzCBjDELMAkGA1UE BhMCVVMxDjAMBgNVBAgMBVRleGFzMRQwEgYDVQQHDAtTYW4gQW50b25pbzEeMBwG A1UECgwVT3BlblN0YWNrIEV4cGVyaW1lbnRzMRYwFAYDVQQLDA1OZXV0cm9uIExi YWFzMR8wHQYDVQQDDBZ3d3cuY25Gcm9tQWx0TmFtZTEub3JnpIGPMIGMMQswCQYD VQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxFDASBgNVBAcMC1NhbiBBbnRvbmlvMR4w HAYDVQQKDBVPcGVuU3RhY2sgRXhwZXJpbWVudHMxFjAUBgNVBAsMDU5ldXRyb24g TGJhYXMxHzAdBgNVBAMMFnd3dy5jbkZyb21BbHROYW1lMi5vcmekgY8wgYwxCzAJ BgNVBAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEUMBIGA1UEBwwLU2FuIEFudG9uaW8x HjAcBgNVBAoMFU9wZW5TdGFjayBFeHBlcmltZW50czEWMBQGA1UECwwNTmV1dHJv biBMYmFhczEfMB0GA1UEAwwWd3d3LmNuRnJvbUFsdE5hbWUzLm9yZ6SBjzCBjDEL MAkGA1UEBhMCVVMxDjAMBgNVBAgMBVRleGFzMRQwEgYDVQQHDAtTYW4gQW50b25p bzEeMBwGA1UECgwVT3BlblN0YWNrIEV4cGVyaW1lbnRzMRYwFAYDVQQLDA1OZXV0 cm9uIExiYWFzMR8wHQYDVQQDDBZ3d3cuY25Gcm9tQWx0TmFtZTQub3JnMA0GCSqG SIb3DQEBBQUAA4IBAQCS6iDn6R3C+qJLZibaqrBSkM9yu5kwRsQ6lQ+DODvVYGWq eGkkh5o2c6WbJlH44yF280+HvnJcuISD7epPHJN0vUM9+WMtXfEli9avFHgu2JxP 3P0ixK2kaJnqKQkSEdnA/v/eWP1Cd2v6rbKCIo9d2gSP0cnpdtlX9Zk3SzEh0V7s RjSdfZoAvz0aAnpDHlTerLcz5T2aiRae2wSt/RLA3qDO1Ji05tWvQBmKuepxS6A1 tL4Drm+OCXJwTrE7ClTMCwcrZnLl4tI+Z+X3DV92WQB8ldST/QFjz1hgs/4zrADA elu2c/X7MR4ObOjhDfaVGQ8kMhYf5hx69qyNDsGi -----END CERTIFICATE----- """ SOME_OTHER_RSA_KEY = """ -----BEGIN RSA PRIVATE KEY----- MIICWwIBAAKBgQDDnJL9dAdDpjoq4tksTJmdM0AjIHa7Y2yc8XwU7YkgrOR0m4Po r7El0NwWf5i/LFudX1cOkfwemMIPwQ+67k0BVu/W3SR+g9ZzVKZtTBJnDoqMZ4RJ jBk4gfwhnQYKPIQvdilDZReH3hFcBvPUkYWSHMn17FBTGmNzp2AnMdLpQQIDAQAB AoGAIlew7tKaG+RpPfJJ0p84MQM4dXJTph6UiRFUiZASjSwNh/Ntu0JtRYhfu4t3 U8kD5KNCc4ppyy1ilMV+b4E6/3ydz6syMeJ7G24/PMU8d44zDgZXdM1pf5Nlosh1 BVv1Fvb0PBW2xs9VRlO6W62IWVtsZCGXYNayrXDiRZ50IGkCQQDkmOVEqffz3GeD A+XWp9YrXeMqOmtPcrOuvMIO9DwrlXb8eNwvG5GxbuHGuZfOp01tiPyQrkxM0JzU y8iD1pjrAkEA2w9topUzYS/NZt45OD9t5ZBVMfP15AwWRVv7V5uTksTqfZ9tFfh6 pN4oWe6xK/kgKAdE9hkjubGKQBjJSC27gwJAGZlRm1XZUXKuGMrX8yjKYALcjH8M Q1JZ8shqhtgs4MiVEYLLTW8t6ou7NtDTwi2UCx8bAWyzWKrH1UCYzMK8TwJAMngU fz+2ra5wuUF7l1ztudUN+8tEHH04aFRvzNhYIJljmPuxCz3LK87PJyEaCpKD+RTr q3NRSsf/nRLY1NtMdwJAVKOdUCwZKGpGyOUZPRbZZAPlojIff2CxJ6E2Pr0RbShD 31icKmhIY+e2rP6v5W7hzTGge5PA0hRfCiwyd+zLoQ== -----END RSA PRIVATE KEY----- """ ALT_EXT_CRT_KEY = """ -----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAsvWeZsM9QOmzziLWwzeuEetz4OW7Q3/ApBYpkV6JZS0X+mi3 X1XejTJcOmyDtblGQsxMWRkRydCnIZ2kAaNOPOY1cxnD30TPGyatHeXqFQQhKJ9V u8S5+1hlPqM/EzkwvpYCMU1oERk9a1mLwKBfnF2N2sM2d3DIehvj2fs1z+HWRWqo GowgtptOoLrIRzFL8H4O6BW65hsR1hhgSVBQx8z6ZR1QutKohWXafkC+9IWNbNbN +A/KnhAqf5/sdVFPrZuH9eNDzyfW0xg0ASqLFFlM+CpjUsWX1UlWBGn190vq20y6 s5HcLRJMnmoJ0oEvsuultji7x0StqBELpgDIjwIDAQABAoIBAC3DX6FZtfU+jgtd n1vGhk3wzu4o8S0+ow2S2UhiS3JDCMmxM4s+ky26Phl2nGvBGDWGttNl9MWOBN80 x7bfgudR20M2yH70wp1n04c8vxJmvu/7ZtogYYrjvOg6qKuKyWtDQwZGjCErOiiU eodku25qAhd6Khh7D9kh/q9EbSteYFXsqJiNrY4ul1+cROMZpHx63xY6AzPmkvSU garkgY4rw9E71t7it2laWkRKVsd+kEjayritdEEliNMVFFtrGEgplYkmLxGf0HLi ROFVMCLRW/P12JpXllFPrBb8rlPL4w1c/s+yStohT0K+o4FLXhsf/inxmfc9XnZX dJm0k/ECgYEA47FpV1caMk+TNPfu318VCGRmjwpXdmkNaUiX2Uvs3xIKQ6KJmpo3 sj0YjQEmQVz8s6geStvU1LdPxgsWZfbDt31M6SNwylh82ABQF1bZyrcMRxM8bHhe bhDITM1dAn6aROkS1cBpfR9NJOFD850lmJvBGR9ORVBGyucTKH5uXxkCgYEAyTU0 zQKW2aU3J7mTCC9cp+eSD3fubJpa3ML5XfQ8YNID4PsxWglNKPcOTC4yaSfxVmyk S0WIQUazCstszQsvwy9YyHtpkMq+0lyCPvrYnmRV0zx5zT155V2zcEh/oj64eoee W5kvJSs/x6vT+lEN0TDEJ2gKEaJuBt6JG6P04ecCgYBSNw1CbEEZSYJt7dhi74I4 tYgSvjk2mFgvW/b4j2HIaksqgNYO7QCPa2AiCfg2Qc09UcceYKJI7Kfxaq97wc6J wsSyqglgBvONSw+gXcvmVpIoV9nJkO0H8SdiFAUxkWVC3KXgaMmuVE8WsgBHRsb8 g8EFwTgR7xqgyS8xv/U6gQKBgQCdUr/dSJgAx6EPq5degAHXu0ZGWAUR38MJ+F2Y 6/5FyhCEWoRlHP66+CmywTBjbnrSk5IG1PBL8ebOmu6QiJ2o5R1rbKvHLe/0dabV bbfwaQ1+ZDvskZP9Fr3WHqnFh3shO2dDwcvOKTnuetj9UWEXXyUQltXAohubvWbB OPqhowKBgB3t2oUSFJI8fSNQnQNkcespJTddr0oLEwgsIl4Q7rdFHLr+/c46svjJ kPMtpfxDQvkgK2aWpS4OP0E2vSU/IfMEDmlypfKe2SaTtFehZSUwR4R1/ZhSL3iS iMwJYgm98P27s4TEMdhlPNVJrj1FrD+4VrgpOsoM20EkZnTvel9s -----END RSA PRIVATE KEY----- """ ENCRYPTED_PKCS8_CRT_KEY_PASSPHRASE = "test_passphrase" ENCRYPTED_PKCS8_CRT_KEY = """-----BEGIN ENCRYPTED PRIVATE KEY----- MIIE6TAbBgkqhkiG9w0BBQMwDgQIT04zko6pmJICAggABIIEyL/79sqzTQ7BsEjY ao2Uhh3//mpNJfCDhjSZOmWL7s4+161cEqpxrfxo4bHH8fkZ60VZUQP8CjwwQUhP 4iwpv2bYbQwzlttZwTC6s28wh7FRtgVoVPTwvXJa6fl2zAjLtsjwLZ/556ez9xIJ 67hxkIK2EzGQaeEKI1+vVF5EKsgKiPEmgspOBxRPoVWTx49NooiakGnwaBoDyTob 8FMr8mF1EheNQ4kl1bPrl+csD7PPnfbWUdNVvMljEhS3cYamQDPEWyAzvaIr0rHh /6h80L/G2+0fensrTspWJcjX+XDBwQPk+YMic0TJ3KvkC7p2iNJhjNrjhQ+APZWq xYrjfcmdK0RaaoqN+1zeE1P2kWIJx9CQZVMeGhVzzcmPwJPDnJFpkU+8cgTWnUr/ Fh8YtDoDzLiAUcmV1Kk7LYtYPHuU8epuz5PYm49TbWzdS7PX5wqFAFmrVt5jysm4 D/Ox0r4KV1t7D/1gc1WRIu8oUXkIglCHWNpTyMK0kFPctAf/ua+DUFRE4eSx3rsX ZKIymdF9v/WF1Ud0tsNeudQbVeXWS6UCR8m/rqe81W4npQm/uqUNla+6yaYUmHlk tvw/m6pt+jKhn0XIRkMwHrTpIaMVvInMg0xpkRuc7Xj5A7vNnkypZRNZJHgy7WWC 6GpOCWJOltYaNy7tmAkSUHJ6kNjXK5a4fi30HknEaqKjFTQNGvcybulJ3MXUzds0 MJoTpvQfLzYQbMYZ/XRGND4lgeEbs29nWLPae8D5XlDeZQMin8EukPko8u8+YGbU eWGOvDc+4/xrWrsq1i6R0uWq+Cyoql8oh0PNBlM04S7GAbu1pOD/tPcq/GNYcv/Q vJcIz9KA3BNepq7tC8D88ggEvFjTsHKeW/OnuCxKducSna4Mq+GebU52tKjkLjFC eLG4Vx0BY5xPH3gd7iyuAf7S+08BbinNZWjHLpdmR3vKK5YbLPiGSfcYQdClr6BK 9vNWH4TXmZMV+rWtfSeM/cbhCHwxT5Jx6N0OFAxOblQClWnUD79nGkEgn/GoY/Aj FPNj8u2U/mJHgFHH3ClidYL9jJUvhGpTixB8nGgMjJ0wvFcp+5OysG3TsjqYkwR6 RRNBmM+iLEUFTrMZYb+edHvGJsMEMZ0qvjmZDsfDz6ax5M9zH/ORFcGplgIec8kj I106+dqAVVrv1CrBf2N/pxV0OXVhgl6ECe/Ee1xYC2e2CiEgUnQtedu8ekgPgp73 tHcAiWMamLPTwXuL7jFtvWaQfkYBmrBdEx54+eZOfH/NgV3o8gbaWNHSxbfbwlXN MvyJidZGkXU0DJtUUnO5i2S7ftKCdOzrrSA8HDTvxFUhxretYpF3NzPYpYkM7WJX GM7bTMn37AWYqLZmdYYdjh1ZOH/wsM/3uxGBpyEyy4Urrr1ux7X1P0cL0O2P/72h GRd499JLrRMrmmtQ4KrN7GCHdctvujhDP8zvmnaEyGVzg88XmDg50ZF3+8DmOOgX EMZEYHO2Wi2uyFotFtZCuqoOJmGPPeGV8QrsRs82hnL1bcd6REUTWk0KsTt13lvF WwMJugHFk5NQuse3P4Hh9smQrRrv1dvnpt7s4yKStKolXUaFWcXJvXVaDfR5266Y p7cuYY1cAyI7gFfl5A== -----END ENCRYPTED PRIVATE KEY----- """ UNENCRYPTED_PKCS8_CRT_KEY = """-----BEGIN PRIVATE KEY----- MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCy9Z5mwz1A6bPO ItbDN64R63Pg5btDf8CkFimRXollLRf6aLdfVd6NMlw6bIO1uUZCzExZGRHJ0Kch naQBo0485jVzGcPfRM8bJq0d5eoVBCEon1W7xLn7WGU+oz8TOTC+lgIxTWgRGT1r WYvAoF+cXY3awzZ3cMh6G+PZ+zXP4dZFaqgajCC2m06gushHMUvwfg7oFbrmGxHW GGBJUFDHzPplHVC60qiFZdp+QL70hY1s1s34D8qeECp/n+x1UU+tm4f140PPJ9bT GDQBKosUWUz4KmNSxZfVSVYEafX3S+rbTLqzkdwtEkyeagnSgS+y66W2OLvHRK2o EQumAMiPAgMBAAECggEALcNfoVm19T6OC12fW8aGTfDO7ijxLT6jDZLZSGJLckMI ybEziz6TLbo+GXaca8EYNYa202X0xY4E3zTHtt+C51HbQzbIfvTCnWfThzy/Ema+ 7/tm2iBhiuO86Dqoq4rJa0NDBkaMISs6KJR6h2S7bmoCF3oqGHsP2SH+r0RtK15g VeyomI2tji6XX5xE4xmkfHrfFjoDM+aS9JSBquSBjivD0TvW3uK3aVpaREpWx36Q SNrKuK10QSWI0xUUW2sYSCmViSYvEZ/QcuJE4VUwItFb8/XYmleWUU+sFvyuU8vj DVz+z7JK2iFPQr6jgUteGx/+KfGZ9z1edld0mbST8QKBgQDjsWlXVxoyT5M09+7f XxUIZGaPCld2aQ1pSJfZS+zfEgpDoomamjeyPRiNASZBXPyzqB5K29TUt0/GCxZl 9sO3fUzpI3DKWHzYAFAXVtnKtwxHEzxseF5uEMhMzV0CfppE6RLVwGl9H00k4UPz nSWYm8EZH05FUEbK5xMofm5fGQKBgQDJNTTNApbZpTcnuZMIL1yn55IPd+5smlrc wvld9Dxg0gPg+zFaCU0o9w5MLjJpJ/FWbKRLRYhBRrMKy2zNCy/DL1jIe2mQyr7S XII++tieZFXTPHnNPXnlXbNwSH+iPrh6h55bmS8lKz/Hq9P6UQ3RMMQnaAoRom4G 3okbo/Th5wKBgFI3DUJsQRlJgm3t2GLvgji1iBK+OTaYWC9b9viPYchqSyqA1g7t AI9rYCIJ+DZBzT1Rxx5gokjsp/Fqr3vBzonCxLKqCWAG841LD6Bdy+ZWkihX2cmQ 7QfxJ2IUBTGRZULcpeBoya5UTxayAEdGxvyDwQXBOBHvGqDJLzG/9TqBAoGBAJ1S v91ImADHoQ+rl16AAde7RkZYBRHfwwn4XZjr/kXKEIRahGUc/rr4KbLBMGNuetKT kgbU8Evx5s6a7pCInajlHWtsq8ct7/R1ptVtt/BpDX5kO+yRk/0WvdYeqcWHeyE7 Z0PBy84pOe562P1RYRdfJRCW1cCiG5u9ZsE4+qGjAoGAHe3ahRIUkjx9I1CdA2Rx 6yklN12vSgsTCCwiXhDut0Ucuv79zjqy+MmQ8y2l/ENC+SArZpalLg4/QTa9JT8h 8wQOaXKl8p7ZJpO0V6FlJTBHhHX9mFIveJKIzAliCb3w/buzhMQx2GU81UmuPUWs P7hWuCk6ygzbQSRmdO96X2w= -----END PRIVATE KEY----- """ EXPECTED_IMD_SUBJS = ["IMD3", "IMD2", "IMD1"] X509_IMDS = """Junk -----BEGIN CERTIFICATE----- MIIBhDCCAS6gAwIBAgIGAUo7hO/eMA0GCSqGSIb3DQEBCwUAMA8xDTALBgNVBAMT BElNRDIwHhcNMTQxMjExMjI0MjU1WhcNMjUxMTIzMjI0MjU1WjAPMQ0wCwYDVQQD EwRJTUQzMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKHIPXo2pfD5dpnpVDVz4n43 zn3VYsjz/mgOZU0WIWjPA97mvulb7mwb4/LB4ijOMzHj9XfwP75GiOFxYFs8O80C AwEAAaNwMG4wDwYDVR0TAQH/BAUwAwEB/zA8BgNVHSMENTAzgBS6rfnABCO3oHEz NUUtov2hfXzfVaETpBEwDzENMAsGA1UEAxMESU1EMYIGAUo7hO/DMB0GA1UdDgQW BBRiLW10LVJiFO/JOLsQFev0ToAcpzANBgkqhkiG9w0BAQsFAANBABtdF+89WuDi TC0FqCocb7PWdTucaItD9Zn55G8KMd93eXrOE/FQDf1ScC+7j0jIHXjhnyu6k3NV 8el/x5gUHlc= -----END CERTIFICATE----- Junk should be ignored by x509 splitter -----BEGIN CERTIFICATE----- MIIBhDCCAS6gAwIBAgIGAUo7hO/DMA0GCSqGSIb3DQEBCwUAMA8xDTALBgNVBAMT BElNRDEwHhcNMTQxMjExMjI0MjU1WhcNMjUxMTIzMjI0MjU1WjAPMQ0wCwYDVQQD EwRJTUQyMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAJYHqnsisVKTlwVaCSa2wdrv CeJJzqpEVV0RVgAAF6FXjX2Tioii+HkXMR9zFgpE1w4yD7iu9JDb8yTdNh+NxysC AwEAAaNwMG4wDwYDVR0TAQH/BAUwAwEB/zA8BgNVHSMENTAzgBQt3KvN8ncGj4/s if1+wdvIMCoiE6ETpBEwDzENMAsGA1UEAxMEcm9vdIIGAUo7hO+mMB0GA1UdDgQW BBS6rfnABCO3oHEzNUUtov2hfXzfVTANBgkqhkiG9w0BAQsFAANBAIlJODvtmpok eoRPOb81MFwPTTGaIqafebVWfBlR0lmW8IwLhsOUdsQqSzoeypS3SJUBpYT1Uu2v zEDOmgdMsBY= -----END CERTIFICATE----- Junk should be thrown out like junk -----BEGIN CERTIFICATE----- MIIBfzCCASmgAwIBAgIGAUo7hO+mMA0GCSqGSIb3DQEBCwUAMA8xDTALBgNVBAMT BHJvb3QwHhcNMTQxMjExMjI0MjU1WhcNMjUxMTIzMjI0MjU1WjAPMQ0wCwYDVQQD EwRJTUQxMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAI+tSJxr60ogwXFmgqbLMW7K 3fkQnh9sZBi7Qo6AzUnfe/AhXoisib651fOxKXCbp57IgzLTv7O9ygq3I+5fQqsC AwEAAaNrMGkwDwYDVR0TAQH/BAUwAwEB/zA3BgNVHSMEMDAugBR73ZKSpjbsz9tZ URkvFwpIO7gB4KETpBEwDzENMAsGA1UEAxMEcm9vdIIBATAdBgNVHQ4EFgQULdyr zfJ3Bo+P7In9fsHbyDAqIhMwDQYJKoZIhvcNAQELBQADQQBenkZ2k7RgZqgj+dxA D7BF8MN1oUAOpyYqAjkGddSEuMyNmwtHKZI1dyQ0gBIQdiU9yAG2oTbUIK4msbBV uJIQ -----END CERTIFICATE-----""" def _get_rsa_numbers(private_key, private_key_passphrase=None): """ Grabs the private and public numbers as a dictionary from an RSA private key used for the dump_private_key test case :param private_key: :param private_key_passphrase: :returns: a dictionary with keys (p,q,e,d,t,e,n) """ kw = {"private_key_passphrase": private_key_passphrase} pk = cert_parser._read_pyca_private_key(private_key, **kw) p = pk.private_numbers().p q = pk.private_numbers().q d = pk.private_numbers().d t = (p - 1) * (q - 1) e = pk.private_numbers().public_numbers.e n = pk.private_numbers().public_numbers.n # should be p*q # Force a canonical representation for comparison algos # by swapping p and q if q is bigger if p < q: (p, q) = (q, p) return {"p": p, "q": q, "d": d, "t": t, "e": e, "n": n} class TestTLSParseUtils(base.BaseTestCase): def test_alt_subject_name_parses(self): hosts = cert_parser.get_host_names(ALT_EXT_CRT) self.assertEqual('www.cnfromsubject.org', hosts['cn']) self.assertEqual('www.hostfromdnsname1.com', hosts['dns_names'][0]) self.assertEqual('www.hostfromdnsname2.com', hosts['dns_names'][1]) self.assertEqual('www.hostfromdnsname3.com', hosts['dns_names'][2]) self.assertEqual('www.hostfromdnsname4.com', hosts['dns_names'][3]) def test_x509_parses(self): self.assertRaises(exceptions.UnreadableCert, cert_parser.validate_cert, "BAD CERT") self.assertTrue(cert_parser.validate_cert(six.u(ALT_EXT_CRT))) self.assertTrue(cert_parser.validate_cert(ALT_EXT_CRT)) self.assertTrue(cert_parser.validate_cert(ALT_EXT_CRT, private_key=UNENCRYPTED_PKCS8_CRT_KEY)) def test_x509_parses_intermediates(self): # Should not throw error when parsing with intermediates cert_parser.validate_cert(ALT_EXT_CRT, UNENCRYPTED_PKCS8_CRT_KEY, intermediates=X509_IMDS) def test_read_private_key(self): self.assertRaises(exceptions.NeedsPassphrase, cert_parser._read_privatekey, ENCRYPTED_PKCS8_CRT_KEY) cert_parser._read_privatekey( str(ENCRYPTED_PKCS8_CRT_KEY), passphrase=ENCRYPTED_PKCS8_CRT_KEY_PASSPHRASE) def test_read_private_key_unicode(self): self.assertRaises(exceptions.NeedsPassphrase, cert_parser._read_privatekey, ENCRYPTED_PKCS8_CRT_KEY) cert_parser._read_privatekey( six.u(ENCRYPTED_PKCS8_CRT_KEY), passphrase=ENCRYPTED_PKCS8_CRT_KEY_PASSPHRASE) cert_parser._read_privatekey( ENCRYPTED_PKCS8_CRT_KEY, passphrase=six.u(ENCRYPTED_PKCS8_CRT_KEY_PASSPHRASE)) def test_dump_private_key(self): self.assertRaises(exceptions.NeedsPassphrase, cert_parser.dump_private_key, ENCRYPTED_PKCS8_CRT_KEY) striped_rsa_key = _get_rsa_numbers( UNENCRYPTED_PKCS8_CRT_KEY) decrypted_rsa_key = _get_rsa_numbers( cert_parser.dump_private_key(ENCRYPTED_PKCS8_CRT_KEY, ENCRYPTED_PKCS8_CRT_KEY_PASSPHRASE)) self.assertEqual(striped_rsa_key, decrypted_rsa_key) self.assertIsNot(ENCRYPTED_PKCS8_CRT_KEY, cert_parser.dump_private_key( ENCRYPTED_PKCS8_CRT_KEY, ENCRYPTED_PKCS8_CRT_KEY_PASSPHRASE)) def test_validate_cert_and_key_match(self): self.assertTrue(cert_parser.validate_cert(ALT_EXT_CRT, private_key=ALT_EXT_CRT_KEY)) self.assertRaises(exceptions.MisMatchedKey, cert_parser.validate_cert, ALT_EXT_CRT, private_key=SOME_OTHER_RSA_KEY) def test_split_x509s(self): imds = [] for x509Pem in cert_parser._split_x509s(X509_IMDS): imds.append(cert_parser._get_x509_from_pem_bytes(x509Pem)) for i in range(0, len(imds)): self.assertEqual(EXPECTED_IMD_SUBJS[i], imds[i].subject.get_attributes_for_oid( x509.OID_COMMON_NAME)[0].value) neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/db/0000775000567000056710000000000012701410110022757 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/db/__init__.py0000664000567000056710000000000012701407726025101 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/db/loadbalancer/0000775000567000056710000000000012701410110025366 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/db/loadbalancer/test_db_loadbalancerv2.py0000664000567000056710000051711412701407727032360 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import copy import exceptions as ex import mock import six from neutron.api import extensions from neutron.api.v2 import attributes from neutron.common import config from neutron import context import neutron.db.l3_db # noqa from neutron.plugins.common import constants from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron_lib import constants as n_constants from neutron_lib import exceptions as n_exc from oslo_config import cfg from oslo_utils import uuidutils import testtools import webob.exc from neutron import manager from neutron_lbaas._i18n import _ from neutron_lbaas.common.cert_manager import cert_manager from neutron_lbaas.common import exceptions from neutron_lbaas.db.loadbalancer import models from neutron_lbaas.drivers.logging_noop import driver as noop_driver import neutron_lbaas.extensions from neutron_lbaas.extensions import l7 from neutron_lbaas.extensions import loadbalancerv2 from neutron_lbaas.extensions import sharedpools from neutron_lbaas.services.loadbalancer import constants as lb_const from neutron_lbaas.services.loadbalancer import plugin as loadbalancer_plugin from neutron_lbaas.tests import base DB_CORE_PLUGIN_CLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' DB_LB_PLUGIN_CLASS = ( "neutron_lbaas.services.loadbalancer." "plugin.LoadBalancerPluginv2" ) NOOP_DRIVER_CLASS = ('neutron_lbaas.drivers.logging_noop.driver.' 'LoggingNoopLoadBalancerDriver') extensions_path = ':'.join(neutron_lbaas.extensions.__path__) _subnet_id = "0c798ed8-33ba-11e2-8b28-000c291c4d14" class LbaasTestMixin(object): resource_keys = loadbalancerv2.RESOURCE_ATTRIBUTE_MAP.keys() resource_keys.extend(l7.RESOURCE_ATTRIBUTE_MAP.keys()) resource_prefix_map = dict( (k, loadbalancerv2.LOADBALANCERV2_PREFIX) for k in resource_keys) def _get_loadbalancer_optional_args(self): return 'description', 'vip_address', 'admin_state_up', 'name' def _create_loadbalancer(self, fmt, subnet_id, expected_res_status=None, **kwargs): data = {'loadbalancer': {'vip_subnet_id': subnet_id, 'tenant_id': self._tenant_id}} args = self._get_loadbalancer_optional_args() for arg in args: if arg in kwargs and kwargs[arg] is not None: data['loadbalancer'][arg] = kwargs[arg] lb_req = self.new_create_request('loadbalancers', data, fmt) lb_res = lb_req.get_response(self.ext_api) if expected_res_status: self.assertEqual(expected_res_status, lb_res.status_int) return lb_res def _get_listener_optional_args(self): return ('name', 'description', 'default_pool_id', 'loadbalancer_id', 'connection_limit', 'admin_state_up', 'default_tls_container_ref', 'sni_container_refs') def _create_listener(self, fmt, protocol, protocol_port, loadbalancer_id=None, default_pool_id=None, expected_res_status=None, **kwargs): data = {'listener': {'protocol': protocol, 'protocol_port': protocol_port, 'tenant_id': self._tenant_id}} if loadbalancer_id: data['listener']['loadbalancer_id'] = loadbalancer_id if default_pool_id: data['listener']['default_pool_id'] = default_pool_id args = self._get_listener_optional_args() for arg in args: if arg in kwargs and kwargs[arg] is not None: data['listener'][arg] = kwargs[arg] listener_req = self.new_create_request('listeners', data, fmt) listener_res = listener_req.get_response(self.ext_api) if expected_res_status: self.assertEqual(expected_res_status, listener_res.status_int) return listener_res def _get_pool_optional_args(self): return 'name', 'description', 'admin_state_up', 'session_persistence' def _create_pool(self, fmt, protocol, lb_algorithm, listener_id=None, loadbalancer_id=None, expected_res_status=None, **kwargs): data = {'pool': {'protocol': protocol, 'lb_algorithm': lb_algorithm, 'tenant_id': self._tenant_id}} if listener_id: data['pool']['listener_id'] = listener_id if loadbalancer_id: data['pool']['loadbalancer_id'] = loadbalancer_id args = self._get_pool_optional_args() for arg in args: if arg in kwargs and kwargs[arg] is not None: data['pool'][arg] = kwargs[arg] pool_req = self.new_create_request('pools', data, fmt) pool_res = pool_req.get_response(self.ext_api) if expected_res_status: self.assertEqual(expected_res_status, pool_res.status_int) return pool_res def _get_member_optional_args(self): return 'weight', 'admin_state_up', 'name' def _create_member(self, fmt, pool_id, address, protocol_port, subnet_id, expected_res_status=None, **kwargs): data = {'member': {'address': address, 'protocol_port': protocol_port, 'subnet_id': subnet_id, 'tenant_id': self._tenant_id}} args = self._get_member_optional_args() for arg in args: if arg in kwargs and kwargs[arg] is not None: data['member'][arg] = kwargs[arg] member_req = self.new_create_request('pools', data, fmt=fmt, id=pool_id, subresource='members') member_res = member_req.get_response(self.ext_api) if expected_res_status: self.assertEqual(expected_res_status, member_res.status_int) return member_res def _get_healthmonitor_optional_args(self): return ('weight', 'admin_state_up', 'expected_codes', 'url_path', 'http_method', 'name') def _create_healthmonitor(self, fmt, pool_id, type, delay, timeout, max_retries, expected_res_status=None, **kwargs): data = {'healthmonitor': {'type': type, 'delay': delay, 'timeout': timeout, 'max_retries': max_retries, 'pool_id': pool_id, 'tenant_id': self._tenant_id}} args = self._get_healthmonitor_optional_args() for arg in args: if arg in kwargs and kwargs[arg] is not None: data['healthmonitor'][arg] = kwargs[arg] hm_req = self.new_create_request('healthmonitors', data, fmt=fmt) hm_res = hm_req.get_response(self.ext_api) if expected_res_status: self.assertEqual(expected_res_status, hm_res.status_int) return hm_res def _add_optional_args(self, optional_args, data, **kwargs): for arg in optional_args: if arg in kwargs and kwargs[arg] is not None: data[arg] = kwargs[arg] def _get_l7policy_optional_args(self): return ('name', 'description', 'redirect_pool_id', 'redirect_url', 'admin_state_up', 'position') def _create_l7policy(self, fmt, listener_id, action, expected_res_status=None, **kwargs): data = {'l7policy': {'listener_id': listener_id, 'action': action, 'tenant_id': self._tenant_id}} optional_args = self._get_l7policy_optional_args() self._add_optional_args(optional_args, data['l7policy'], **kwargs) l7policy_req = self.new_create_request('l7policies', data, fmt) l7policy_res = l7policy_req.get_response(self.ext_api) if expected_res_status: self.assertEqual(l7policy_res.status_int, expected_res_status) return l7policy_res def _get_l7rule_optional_args(self): return ('invert', 'key', 'admin_state_up') def _create_l7policy_rule(self, fmt, l7policy_id, type, compare_type, value, expected_res_status=None, **kwargs): data = {'rule': {'type': type, 'compare_type': compare_type, 'value': value, 'tenant_id': self._tenant_id}} optional_args = self._get_l7rule_optional_args() self._add_optional_args(optional_args, data['rule'], **kwargs) rule_req = self.new_create_request('l7policies', data, fmt, id=l7policy_id, subresource='rules') rule_res = rule_req.get_response(self.ext_api) if expected_res_status: self.assertEqual(rule_res.status_int, expected_res_status) return rule_res @contextlib.contextmanager def loadbalancer(self, fmt=None, subnet=None, no_delete=False, **kwargs): if not fmt: fmt = self.fmt with test_db_base_plugin_v2.optional_ctx( subnet, self.subnet) as tmp_subnet: res = self._create_loadbalancer(fmt, tmp_subnet['subnet']['id'], **kwargs) if res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError( explanation=_("Unexpected error code: %s") % res.status_int ) lb = self.deserialize(fmt or self.fmt, res) yield lb if not no_delete: self._delete('loadbalancers', lb['loadbalancer']['id']) @contextlib.contextmanager def listener(self, fmt=None, protocol='HTTP', loadbalancer_id=None, protocol_port=80, default_pool_id=None, no_delete=False, **kwargs): if not fmt: fmt = self.fmt if loadbalancer_id and default_pool_id: res = self._create_listener(fmt, protocol, protocol_port, loadbalancer_id=loadbalancer_id, default_pool_id=default_pool_id, **kwargs) elif loadbalancer_id: res = self._create_listener(fmt, protocol, protocol_port, loadbalancer_id=loadbalancer_id, **kwargs) else: res = self._create_listener(fmt, protocol, protocol_port, default_pool_id=default_pool_id, **kwargs) if res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError( explanation=_("Unexpected error code: %s") % res.status_int ) listener = self.deserialize(fmt or self.fmt, res) yield listener if not no_delete: self._delete('listeners', listener['listener']['id']) @contextlib.contextmanager def pool(self, fmt=None, protocol='HTTP', lb_algorithm='ROUND_ROBIN', no_delete=False, listener_id=None, loadbalancer_id=None, **kwargs): if not fmt: fmt = self.fmt if listener_id and loadbalancer_id: res = self._create_pool(fmt, protocol=protocol, lb_algorithm=lb_algorithm, listener_id=listener_id, loadbalancer_id=loadbalancer_id, **kwargs) elif listener_id: res = self._create_pool(fmt, protocol=protocol, lb_algorithm=lb_algorithm, listener_id=listener_id, **kwargs) else: res = self._create_pool(fmt, protocol=protocol, lb_algorithm=lb_algorithm, loadbalancer_id=loadbalancer_id, **kwargs) if res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError( explanation=_("Unexpected error code: %s") % res.status_int ) pool = self.deserialize(fmt or self.fmt, res) yield pool if not no_delete: self._delete('pools', pool['pool']['id']) @contextlib.contextmanager def member(self, fmt=None, pool_id='pool1id', address='127.0.0.1', protocol_port=80, subnet=None, no_delete=False, **kwargs): if not fmt: fmt = self.fmt subnet = subnet or self.test_subnet with test_db_base_plugin_v2.optional_ctx( subnet, self.subnet) as tmp_subnet: res = self._create_member(fmt, pool_id=pool_id, address=address, protocol_port=protocol_port, subnet_id=tmp_subnet['subnet']['id'], **kwargs) if res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError( explanation=_("Unexpected error code: %s") % res.status_int ) member = self.deserialize(fmt or self.fmt, res) yield member if not no_delete: del_req = self.new_delete_request( 'pools', fmt=fmt, id=pool_id, subresource='members', sub_id=member['member']['id']) del_res = del_req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPNoContent.code, del_res.status_int) @contextlib.contextmanager def healthmonitor(self, fmt=None, pool_id='pool1id', type='TCP', delay=1, timeout=1, max_retries=1, no_delete=False, **kwargs): if not fmt: fmt = self.fmt res = self._create_healthmonitor(fmt, pool_id=pool_id, type=type, delay=delay, timeout=timeout, max_retries=max_retries, **kwargs) if res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError( explanation=_("Unexpected error code: %s") % res.status_int ) healthmonitor = self.deserialize(fmt or self.fmt, res) yield healthmonitor if not no_delete: del_req = self.new_delete_request( 'healthmonitors', fmt=fmt, id=healthmonitor['healthmonitor']['id']) del_res = del_req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPNoContent.code, del_res.status_int) @contextlib.contextmanager def l7policy(self, listener_id, fmt=None, action=lb_const.L7_POLICY_ACTION_REJECT, no_delete=False, **kwargs): if not fmt: fmt = self.fmt res = self._create_l7policy(fmt, listener_id=listener_id, action=action, **kwargs) if res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError( explanation=_("Unexpected error code: %s") % res.status_int ) l7policy = self.deserialize(fmt or self.fmt, res) yield l7policy if not no_delete: self.plugin.db.update_status(context.get_admin_context(), models.L7Policy, l7policy['l7policy']['id'], constants.ACTIVE) del_req = self.new_delete_request( 'l7policies', fmt=fmt, id=l7policy['l7policy']['id']) del_res = del_req.get_response(self.ext_api) self.assertEqual(del_res.status_int, webob.exc.HTTPNoContent.code) @contextlib.contextmanager def l7policy_rule(self, l7policy_id, fmt=None, value='value1', type=lb_const.L7_RULE_TYPE_HOST_NAME, compare_type=lb_const.L7_RULE_COMPARE_TYPE_EQUAL_TO, no_delete=False, **kwargs): if not fmt: fmt = self.fmt res = self._create_l7policy_rule(fmt, l7policy_id=l7policy_id, type=type, compare_type=compare_type, value=value, **kwargs) if res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError( explanation=_("Unexpected error code: %s") % res.status_int ) rule = self.deserialize(fmt or self.fmt, res) yield rule if not no_delete: self.plugin.db.update_status(context.get_admin_context(), models.L7Rule, rule['rule']['id'], constants.ACTIVE) del_req = self.new_delete_request( 'l7policies', fmt=fmt, id=l7policy_id, subresource='rules', sub_id=rule['rule']['id']) del_res = del_req.get_response(self.ext_api) self.assertEqual(del_res.status_int, webob.exc.HTTPNoContent.code) class ExtendedPluginAwareExtensionManager(object): def __init__(self, extension_aliases): self.extension_aliases = extension_aliases def get_resources(self): extensions_list = [] if 'shared_pools' in self.extension_aliases: extensions_list.append(sharedpools) if 'l7' in self.extension_aliases: extensions_list.append(l7) for extension in extensions_list: if 'RESOURCE_ATTRIBUTE_MAP' in extension.__dict__: loadbalancerv2.RESOURCE_ATTRIBUTE_MAP.update( extension.RESOURCE_ATTRIBUTE_MAP) if 'SUB_RESOURCE_ATTRIBUTE_MAP' in extension.__dict__: loadbalancerv2.SUB_RESOURCE_ATTRIBUTE_MAP.update( extension.SUB_RESOURCE_ATTRIBUTE_MAP) if 'EXTENDED_ATTRIBUTES_2_0' in extension.__dict__: for key in loadbalancerv2.RESOURCE_ATTRIBUTE_MAP.keys(): loadbalancerv2.RESOURCE_ATTRIBUTE_MAP[key].update( extension.EXTENDED_ATTRIBUTES_2_0.get(key, {})) return loadbalancerv2.Loadbalancerv2.get_resources() def get_actions(self): return [] def get_request_extensions(self): return [] class LbaasPluginDbTestCase(LbaasTestMixin, base.NeutronDbPluginV2TestCase): def setUp(self, core_plugin=None, lb_plugin=None, lbaas_provider=None, ext_mgr=None): service_plugins = {'lb_plugin_name': DB_LB_PLUGIN_CLASS} if not lbaas_provider: lbaas_provider = ( constants.LOADBALANCERV2 + ':lbaas:' + NOOP_DRIVER_CLASS + ':default') # override the default service provider self.set_override([lbaas_provider]) # removing service-type because it resides in neutron and tests # dont care LBPlugin = loadbalancer_plugin.LoadBalancerPluginv2 sea_index = None for index, sea in enumerate(LBPlugin.supported_extension_aliases): if sea == 'service-type': sea_index = index if sea_index: del LBPlugin.supported_extension_aliases[sea_index] super(LbaasPluginDbTestCase, self).setUp( ext_mgr=ext_mgr, service_plugins=service_plugins ) if not ext_mgr: self.plugin = loadbalancer_plugin.LoadBalancerPluginv2() # This is necessary because the automatic extension manager # finding algorithm below will find the loadbalancerv2 # extension and fail to initizlize the main API router with # extensions' resources ext_mgr = ExtendedPluginAwareExtensionManager( LBPlugin.supported_extension_aliases) app = config.load_paste_app('extensions_test_app') self.ext_api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr) get_lbaas_agent_patcher = mock.patch( 'neutron_lbaas.agent_scheduler' '.LbaasAgentSchedulerDbMixin.get_agent_hosting_loadbalancer') mock_lbaas_agent = mock.MagicMock() get_lbaas_agent_patcher.start().return_value = mock_lbaas_agent mock_lbaas_agent.__getitem__.return_value = {'host': 'host'} self._subnet_id = _subnet_id def _update_loadbalancer_api(self, lb_id, data): req = self.new_update_request('loadbalancers', data, lb_id) resp = req.get_response(self.ext_api) body = self.deserialize(self.fmt, req.get_response(self.ext_api)) return resp, body def _delete_loadbalancer_api(self, lb_id): req = self.new_delete_request('loadbalancers', lb_id) resp = req.get_response(self.ext_api) return resp def _get_loadbalancer_api(self, lb_id): req = self.new_show_request('loadbalancers', lb_id) resp = req.get_response(self.ext_api) body = self.deserialize(self.fmt, resp) return resp, body def _list_loadbalancers_api(self): req = self.new_list_request('loadbalancers') resp = req.get_response(self.ext_api) body = self.deserialize(self.fmt, resp) return resp, body def _get_loadbalancer_stats_api(self, lb_id): req = self.new_show_request('loadbalancers', lb_id, subresource='stats') resp = req.get_response(self.ext_api) body = self.deserialize(self.fmt, resp) return resp, body def _get_loadbalancer_statuses_api(self, lb_id): req = self.new_show_request('loadbalancers', lb_id, subresource='statuses') resp = req.get_response(self.ext_api) body = self.deserialize(self.fmt, resp) return resp, body def _validate_statuses(self, lb_id, listener_id=None, l7policy_id=None, l7rule_id=None, pool_id=None, member_id=None, hm_id=None, member_disabled=False, listener_disabled=False, l7policy_disabled=False, l7rule_disabled=False, loadbalancer_disabled=False): resp, body = self._get_loadbalancer_statuses_api(lb_id) lb_statuses = body['statuses']['loadbalancer'] self.assertEqual(constants.ACTIVE, lb_statuses['provisioning_status']) if loadbalancer_disabled: self.assertEqual(lb_const.DISABLED, lb_statuses['operating_status']) else: self.assertEqual(lb_const.ONLINE, lb_statuses['operating_status']) if listener_id: listener_statuses = None for listener in lb_statuses['listeners']: if listener['id'] == listener_id: listener_statuses = listener self.assertIsNotNone(listener_statuses) self.assertEqual(constants.ACTIVE, listener_statuses['provisioning_status']) if listener_disabled: self.assertEqual(lb_const.DISABLED, listener_statuses['operating_status']) else: self.assertEqual(lb_const.ONLINE, listener_statuses['operating_status']) if l7policy_id: policy_statuses = None for policy in listener_statuses['l7policies']: if policy['id'] == l7policy_id: policy_statuses = policy self.assertIsNotNone(policy_statuses) self.assertEqual(constants.ACTIVE, policy_statuses['provisioning_status']) if l7rule_id: rule_statuses = None for rule in policy_statuses['rules']: if rule['id'] == l7rule_id: rule_statuses = rule self.assertIsNotNone(rule_statuses) self.assertEqual(constants.ACTIVE, rule_statuses['provisioning_status']) if pool_id: pool_statuses = None for pool in lb_statuses['pools']: if pool['id'] == pool_id: pool_statuses = pool self.assertIsNotNone(pool_statuses) self.assertEqual(constants.ACTIVE, pool_statuses['provisioning_status']) self.assertEqual(lb_const.ONLINE, pool_statuses['operating_status']) if member_id: member_statuses = None for member in pool_statuses['members']: if member['id'] == member_id: member_statuses = member self.assertIsNotNone(member_statuses) self.assertEqual(constants.ACTIVE, member_statuses['provisioning_status']) if member_disabled: self.assertEqual(lb_const.DISABLED, member_statuses["operating_status"]) else: self.assertEqual(lb_const.ONLINE, member_statuses['operating_status']) if hm_id: hm_status = pool_statuses['healthmonitor'] self.assertEqual(constants.ACTIVE, hm_status['provisioning_status']) class LbaasLoadBalancerTests(LbaasPluginDbTestCase): def test_create_loadbalancer(self, **extras): expected = { 'name': 'vip1', 'description': '', 'admin_state_up': True, 'provisioning_status': constants.ACTIVE, 'operating_status': lb_const.ONLINE, 'tenant_id': self._tenant_id, 'listeners': [], 'pools': [], 'provider': 'lbaas' } expected.update(extras) with self.subnet() as subnet: expected['vip_subnet_id'] = subnet['subnet']['id'] name = expected['name'] with self.loadbalancer(name=name, subnet=subnet, **extras) as lb: lb_id = lb['loadbalancer']['id'] for k in ('id', 'vip_address', 'vip_subnet_id'): self.assertTrue(lb['loadbalancer'].get(k, None)) expected['vip_port_id'] = lb['loadbalancer']['vip_port_id'] actual = dict((k, v) for k, v in lb['loadbalancer'].items() if k in expected) self.assertEqual(expected, actual) self._validate_statuses(lb_id) return lb def test_create_loadbalancer_with_vip_address(self): self.test_create_loadbalancer(vip_address='10.0.0.7') def test_create_loadbalancer_with_vip_address_outside_subnet(self): with testtools.ExpectedException(webob.exc.HTTPClientError): self.test_create_loadbalancer(vip_address='9.9.9.9') def test_update_loadbalancer(self): name = 'new_loadbalancer' description = 'a crazy loadbalancer' expected_values = {'name': name, 'description': description, 'admin_state_up': False, 'provisioning_status': constants.ACTIVE, 'operating_status': lb_const.ONLINE, 'listeners': [], 'provider': 'lbaas'} with self.subnet() as subnet: expected_values['vip_subnet_id'] = subnet['subnet']['id'] with self.loadbalancer(subnet=subnet) as loadbalancer: expected_values['vip_port_id'] = ( loadbalancer['loadbalancer']['vip_port_id']) loadbalancer_id = loadbalancer['loadbalancer']['id'] data = {'loadbalancer': {'name': name, 'description': description, 'admin_state_up': False}} resp, res = self._update_loadbalancer_api(loadbalancer_id, data) for k in expected_values: self.assertEqual(expected_values[k], res['loadbalancer'][k]) self._validate_statuses(loadbalancer_id, loadbalancer_disabled=True) def test_delete_loadbalancer(self): with self.subnet() as subnet: with self.loadbalancer(subnet=subnet, no_delete=True) as loadbalancer: loadbalancer_id = loadbalancer['loadbalancer']['id'] resp = self._delete_loadbalancer_api(loadbalancer_id) self.assertEqual(webob.exc.HTTPNoContent.code, resp.status_int) def test_delete_loadbalancer_when_loadbalancer_in_use(self): with self.subnet() as subnet: with self.loadbalancer(subnet=subnet) as loadbalancer: lb_id = loadbalancer['loadbalancer']['id'] with self.listener(loadbalancer_id=lb_id): ctx = context.get_admin_context() self.assertRaises(loadbalancerv2.EntityInUse, self.plugin.delete_loadbalancer, ctx, lb_id) self._validate_statuses(lb_id) def test_show_loadbalancer(self): name = 'lb_show' description = 'lb_show description' vip_address = '10.0.0.10' expected_values = {'name': name, 'description': description, 'vip_address': '10.0.0.10', 'admin_state_up': True, 'provisioning_status': constants.ACTIVE, 'operating_status': lb_const.ONLINE, 'listeners': [], 'provider': 'lbaas'} with self.subnet() as subnet: vip_subnet_id = subnet['subnet']['id'] expected_values['vip_subnet_id'] = vip_subnet_id with self.loadbalancer(subnet=subnet, name=name, description=description, vip_address=vip_address) as lb: lb_id = lb['loadbalancer']['id'] expected_values['id'] = lb_id expected_values['vip_port_id'] = ( lb['loadbalancer']['vip_port_id']) resp, body = self._get_loadbalancer_api(lb_id) for k in expected_values: self.assertEqual(expected_values[k], body['loadbalancer'][k]) def test_list_loadbalancers(self): name = 'lb_show' description = 'lb_show description' vip_address = '10.0.0.10' expected_values = {'name': name, 'description': description, 'vip_address': '10.0.0.10', 'admin_state_up': True, 'provisioning_status': constants.ACTIVE, 'operating_status': lb_const.ONLINE, 'listeners': [], 'provider': 'lbaas'} with self.subnet() as subnet: vip_subnet_id = subnet['subnet']['id'] expected_values['vip_subnet_id'] = vip_subnet_id with self.loadbalancer(subnet=subnet, name=name, description=description, vip_address=vip_address) as lb: lb_id = lb['loadbalancer']['id'] expected_values['id'] = lb_id expected_values['vip_port_id'] = ( lb['loadbalancer']['vip_port_id']) resp, body = self._list_loadbalancers_api() self.assertEqual(1, len(body['loadbalancers'])) for k in expected_values: self.assertEqual(expected_values[k], body['loadbalancers'][0][k]) def test_list_loadbalancers_with_sort_emulated(self): with self.subnet() as subnet: with self.loadbalancer(subnet=subnet, name='lb1') as lb1: with self.loadbalancer(subnet=subnet, name='lb2') as lb2: with self.loadbalancer(subnet=subnet, name='lb3') as lb3: self._test_list_with_sort( 'loadbalancer', (lb1, lb2, lb3), [('name', 'asc')] ) def test_list_loadbalancers_with_pagination_emulated(self): with self.subnet() as subnet: with self.loadbalancer(subnet=subnet, name='lb1') as lb1: with self.loadbalancer(subnet=subnet, name='lb2') as lb2: with self.loadbalancer(subnet=subnet, name='lb3') as lb3: self._test_list_with_pagination( 'loadbalancer', (lb1, lb2, lb3), ('name', 'asc'), 2, 2 ) def test_list_loadbalancers_with_pagination_reverse_emulated(self): with self.subnet() as subnet: with self.loadbalancer(subnet=subnet, name='lb1') as lb1: with self.loadbalancer(subnet=subnet, name='lb2') as lb2: with self.loadbalancer(subnet=subnet, name='lb3') as lb3: self._test_list_with_pagination_reverse( 'loadbalancer', (lb1, lb2, lb3), ('name', 'asc'), 2, 2 ) def test_get_loadbalancer_stats(self): expected_values = {'stats': {lb_const.STATS_TOTAL_CONNECTIONS: 0, lb_const.STATS_ACTIVE_CONNECTIONS: 0, lb_const.STATS_OUT_BYTES: 0, lb_const.STATS_IN_BYTES: 0}} with self.subnet() as subnet: with self.loadbalancer(subnet=subnet) as lb: lb_id = lb['loadbalancer']['id'] resp, body = self._get_loadbalancer_stats_api(lb_id) self.assertEqual(expected_values, body) def test_show_loadbalancer_with_listeners(self): name = 'lb_show' description = 'lb_show description' vip_address = '10.0.0.10' expected_values = {'name': name, 'description': description, 'vip_address': '10.0.0.10', 'admin_state_up': True, 'provisioning_status': constants.ACTIVE, 'operating_status': lb_const.ONLINE, 'listeners': []} with self.subnet() as subnet: vip_subnet_id = subnet['subnet']['id'] expected_values['vip_subnet_id'] = vip_subnet_id with self.loadbalancer(subnet=subnet, name=name, description=description, vip_address=vip_address) as lb: lb_id = lb['loadbalancer']['id'] expected_values['id'] = lb_id with self.listener(loadbalancer_id=lb_id, protocol_port=80) as listener1: listener1_id = listener1['listener']['id'] expected_values['listeners'].append({'id': listener1_id}) with self.listener(loadbalancer_id=lb_id, protocol_port=81) as listener2: listener2_id = listener2['listener']['id'] expected_values['listeners'].append( {'id': listener2_id}) resp, body = self._get_loadbalancer_api(lb_id) for k in expected_values: self.assertEqual(expected_values[k], body['loadbalancer'][k]) def test_port_delete_via_port_api(self): port = { 'id': 'my_port_id', 'device_owner': n_constants.DEVICE_OWNER_LOADBALANCERV2 } ctx = context.get_admin_context() port['device_owner'] = n_constants.DEVICE_OWNER_LOADBALANCERV2 myloadbalancers = [{'name': 'lb1'}] with mock.patch.object(manager.NeutronManager, 'get_plugin') as gp: self.plugin.db.get_loadbalancers = mock.Mock( return_value=myloadbalancers) plugin = mock.Mock() gp.return_value = plugin plugin._get_port.return_value = port self.assertRaises(n_exc.ServicePortInUse, self.plugin.db.prevent_lbaasv2_port_deletion, ctx, port['id']) class LoadBalancerDelegateVIPCreation(LbaasPluginDbTestCase): def setUp(self): driver_patcher = mock.patch.object( noop_driver.LoggingNoopLoadBalancerManager, 'allocates_vip', new_callable=mock.PropertyMock) driver_patcher.start().return_value = True super(LoadBalancerDelegateVIPCreation, self).setUp() def test_create_loadbalancer(self): expected = { 'name': 'vip1', 'description': '', 'admin_state_up': True, 'provisioning_status': constants.ACTIVE, 'operating_status': lb_const.ONLINE, 'tenant_id': self._tenant_id, 'listeners': [], 'pools': [], 'provider': 'lbaas' } with self.subnet() as subnet: expected['vip_subnet_id'] = subnet['subnet']['id'] name = expected['name'] with self.loadbalancer(name=name, subnet=subnet) as lb: lb_id = lb['loadbalancer']['id'] for k in ('id', 'vip_subnet_id'): self.assertTrue(lb['loadbalancer'].get(k, None)) self.assertIsNone(lb['loadbalancer'].get('vip_address')) expected['vip_port_id'] = lb['loadbalancer']['vip_port_id'] actual = dict((k, v) for k, v in lb['loadbalancer'].items() if k in expected) self.assertEqual(expected, actual) self._validate_statuses(lb_id) return lb def test_delete_loadbalancer(self): with self.subnet() as subnet: with self.loadbalancer(subnet=subnet, no_delete=True) as lb: lb_id = lb['loadbalancer']['id'] acontext = context.get_admin_context() db_port = self.plugin.db._core_plugin.create_port( acontext, {'port': {'network_id': subnet['subnet']['network_id'], 'name': '', 'admin_state_up': True, 'device_id': lb_id, 'device_owner': '', 'mac_address': '', 'fixed_ips': [], 'tenant_id': acontext.tenant_id}}) port_id = db_port['id'] self.addCleanup(self.plugin.db._core_plugin.delete_port, acontext, port_id) self.plugin.db.update_loadbalancer( acontext, lb_id, {'loadbalancer': {'vip_port_id': port_id}}) self.plugin.db.delete_loadbalancer( acontext, lb_id, delete_vip_port=True) port = self.plugin.db._core_plugin.get_port(acontext, port_id) self.assertIsNotNone(port) class ListenerTestBase(LbaasPluginDbTestCase): def setUp(self): super(ListenerTestBase, self).setUp() network = self._make_network(self.fmt, 'test-net', True) self.test_subnet = self._make_subnet( self.fmt, network, gateway=attributes.ATTR_NOT_SPECIFIED, cidr='10.0.0.0/24') self.test_subnet_id = self.test_subnet['subnet']['id'] lb_res = self._create_loadbalancer( self.fmt, subnet_id=self.test_subnet_id) lb_res2 = self._create_loadbalancer( self.fmt, subnet_id=self.test_subnet_id) self.lb = self.deserialize(self.fmt, lb_res) self.lb2 = self.deserialize(self.fmt, lb_res2) self.lb_id = self.lb['loadbalancer']['id'] self.lb_id2 = self.lb2['loadbalancer']['id'] def tearDown(self): self._delete_loadbalancer_api(self.lb_id) super(ListenerTestBase, self).tearDown() def _create_listener_api(self, data): req = self.new_create_request("listeners", data, self.fmt) resp = req.get_response(self.ext_api) body = self.deserialize(self.fmt, resp) return resp, body def _update_listener_api(self, listener_id, data): req = self.new_update_request('listeners', data, listener_id) resp = req.get_response(self.ext_api) body = self.deserialize(self.fmt, req.get_response(self.ext_api)) return resp, body def _delete_listener_api(self, listener_id): req = self.new_delete_request('listeners', listener_id) resp = req.get_response(self.ext_api) return resp def _get_listener_api(self, listener_id): req = self.new_show_request('listeners', listener_id) resp = req.get_response(self.ext_api) body = self.deserialize(self.fmt, resp) return resp, body def _list_listeners_api(self): req = self.new_list_request('listeners') resp = req.get_response(self.ext_api) body = self.deserialize(self.fmt, resp) return resp, body class CertMock(cert_manager.Cert): def __init__(self, cert_container): pass def get_certificate(self): return "mock" def get_intermediates(self): return "mock" def get_private_key(self): return "mock" def get_private_key_passphrase(self): return "mock" class Exceptions(object): def __iter__(self): return self pass class LbaasListenerTests(ListenerTestBase): def test_create_listener(self, **extras): expected = { 'protocol': 'HTTP', 'protocol_port': 80, 'admin_state_up': True, 'tenant_id': self._tenant_id, 'default_pool_id': None, 'loadbalancers': [{'id': self.lb_id}] } expected.update(extras) with self.listener(loadbalancer_id=self.lb_id) as listener: listener_id = listener['listener'].get('id') self.assertTrue(listener_id) actual = {} for k, v in listener['listener'].items(): if k in expected: actual[k] = v self.assertEqual(expected, actual) self._validate_statuses(self.lb_id, listener_id) return listener def test_create_listener_with_default_pool_no_lb(self, **extras): listener_pool_res = self._create_pool( self.fmt, lb_const.PROTOCOL_HTTP, lb_const.LB_METHOD_ROUND_ROBIN, loadbalancer_id=self.lb_id) listener_pool = self.deserialize(self.fmt, listener_pool_res) listener_pool_id = listener_pool['pool']['id'] expected = { 'protocol': 'HTTP', 'protocol_port': 80, 'admin_state_up': True, 'tenant_id': self._tenant_id, 'default_pool_id': listener_pool_id } expected.update(extras) with self.listener(default_pool_id=listener_pool_id) as listener: listener_id = listener['listener'].get('id') self.assertTrue(listener_id) actual = {} for k, v in listener['listener'].items(): if k in expected: actual[k] = v self.assertEqual(actual, expected) self._validate_statuses(self.lb_id, listener_id) return listener def test_create_listener_same_port_same_load_balancer(self): with self.listener(loadbalancer_id=self.lb_id, protocol_port=80): self._create_listener(self.fmt, 'HTTP', 80, loadbalancer_id=self.lb_id, expected_res_status=409) def test_create_listener_with_tls_no_default_container(self, **extras): listener_data = { 'protocol': lb_const.PROTOCOL_TERMINATED_HTTPS, 'default_tls_container_ref': None, 'protocol_port': 443, 'admin_state_up': True, 'tenant_id': self._tenant_id, 'loadbalancer_id': self.lb_id, } listener_data.update(extras) self.assertRaises( loadbalancerv2.TLSDefaultContainerNotSpecified, self.plugin.create_listener, context.get_admin_context(), {'listener': listener_data}) def test_create_listener_with_tls_missing_container(self, **extras): default_tls_container_ref = uuidutils.generate_uuid() class ReplaceClass(ex.Exception): def __init__(self, status_code, message): self.status_code = status_code self.message = message pass cfg.CONF.set_override('service_name', 'lbaas', 'service_auth') cfg.CONF.set_override('region', 'RegionOne', 'service_auth') listener_data = { 'protocol': lb_const.PROTOCOL_TERMINATED_HTTPS, 'default_tls_container_ref': default_tls_container_ref, 'sni_container_refs': [], 'protocol_port': 443, 'admin_state_up': True, 'tenant_id': self._tenant_id, 'loadbalancer_id': self.lb_id } listener_data.update(extras) with contextlib.nested( mock.patch('neutron_lbaas.services.loadbalancer.plugin.' 'CERT_MANAGER_PLUGIN.CertManager.get_cert'), mock.patch('neutron_lbaas.services.loadbalancer.plugin.' 'CERT_MANAGER_PLUGIN.CertManager.delete_cert') ) as (get_cert_mock, rm_consumer_mock): ex.Exception = ReplaceClass(status_code=404, message='Cert Not Found') get_cert_mock.side_effect = ex.Exception self.assertRaises(loadbalancerv2.TLSContainerNotFound, self.plugin.create_listener, context.get_admin_context(), {'listener': listener_data}) def test_create_listener_with_tls_invalid_service_acct(self, **extras): default_tls_container_ref = uuidutils.generate_uuid() listener_data = { 'protocol': lb_const.PROTOCOL_TERMINATED_HTTPS, 'default_tls_container_ref': default_tls_container_ref, 'sni_container_refs': [], 'protocol_port': 443, 'admin_state_up': True, 'tenant_id': self._tenant_id, 'loadbalancer_id': self.lb_id } listener_data.update(extras) with contextlib.nested( mock.patch('neutron_lbaas.services.loadbalancer.plugin.' 'CERT_MANAGER_PLUGIN.CertManager.get_cert'), mock.patch('neutron_lbaas.services.loadbalancer.plugin.' 'CERT_MANAGER_PLUGIN.CertManager.delete_cert') ) as (get_cert_mock, rm_consumer_mock): get_cert_mock.side_effect = Exception('RandomFailure') self.assertRaises(loadbalancerv2.CertManagerError, self.plugin.create_listener, context.get_admin_context(), {'listener': listener_data}) def test_create_listener_with_tls_invalid_container(self, **extras): default_tls_container_ref = uuidutils.generate_uuid() cfg.CONF.set_override('service_name', 'lbaas', 'service_auth') cfg.CONF.set_override('region', 'RegionOne', 'service_auth') listener_data = { 'protocol': lb_const.PROTOCOL_TERMINATED_HTTPS, 'default_tls_container_ref': default_tls_container_ref, 'sni_container_refs': [], 'protocol_port': 443, 'admin_state_up': True, 'tenant_id': self._tenant_id, 'loadbalancer_id': self.lb_id } listener_data.update(extras) with contextlib.nested( mock.patch('neutron_lbaas.services.loadbalancer.plugin.' 'cert_parser.validate_cert'), mock.patch('neutron_lbaas.services.loadbalancer.plugin.' 'CERT_MANAGER_PLUGIN.CertManager.get_cert'), mock.patch('neutron_lbaas.services.loadbalancer.plugin.' 'CERT_MANAGER_PLUGIN.CertManager.delete_cert') ) as (validate_cert_mock, get_cert_mock, rm_consumer_mock): get_cert_mock.start().return_value = CertMock( 'mock_cert') validate_cert_mock.side_effect = exceptions.MisMatchedKey self.assertRaises(loadbalancerv2.TLSContainerInvalid, self.plugin.create_listener, context.get_admin_context(), {'listener': listener_data}) rm_consumer_mock.assert_called_once_with( cert_ref=listener_data['default_tls_container_ref'], project_id=self._tenant_id, resource_ref=cert_manager.CertManager.get_service_url( self.lb_id)) def test_create_listener_with_tls(self, **extras): default_tls_container_ref = uuidutils.generate_uuid() sni_tls_container_ref_1 = uuidutils.generate_uuid() sni_tls_container_ref_2 = uuidutils.generate_uuid() expected = { 'protocol': lb_const.PROTOCOL_TERMINATED_HTTPS, 'default_tls_container_ref': default_tls_container_ref, 'sni_container_refs': [sni_tls_container_ref_1, sni_tls_container_ref_2]} extras['default_tls_container_ref'] = default_tls_container_ref extras['sni_container_refs'] = [sni_tls_container_ref_1, sni_tls_container_ref_2] with contextlib.nested( mock.patch('neutron_lbaas.services.loadbalancer.plugin.' 'cert_parser.validate_cert'), mock.patch('neutron_lbaas.services.loadbalancer.plugin.' 'CERT_MANAGER_PLUGIN.CertManager.get_cert') ) as (validate_cert_mock, get_cert_mock): get_cert_mock.start().return_value = CertMock( 'mock_cert') validate_cert_mock.start().return_value = True with self.listener(protocol=lb_const.PROTOCOL_TERMINATED_HTTPS, loadbalancer_id=self.lb_id, protocol_port=443, **extras) as listener: self.assertEqual( expected, dict((k, v) for k, v in listener['listener'].items() if k in expected) ) def test_create_listener_loadbalancer_id_does_not_exist(self): self._create_listener(self.fmt, 'HTTP', 80, loadbalancer_id=uuidutils.generate_uuid(), expected_res_status=404) def test_can_create_listener_with_pool_loadbalancer_match(self): with self.subnet() as subnet: with self.loadbalancer(subnet=subnet) as loadbalancer: lb_id = loadbalancer['loadbalancer']['id'] with self.pool(loadbalancer_id=lb_id) as p1: p_id = p1['pool']['id'] with self.listener(default_pool_id=p_id, loadbalancer_id=lb_id): pass def test_cannot_create_listener_with_pool_loadbalancer_mismatch(self): with self.subnet() as subnet: with contextlib.nested(self.loadbalancer(subnet=subnet), self.loadbalancer(subnet=subnet) ) as (lb1, lb2): lb_id1 = lb1['loadbalancer']['id'] lb_id2 = lb2['loadbalancer']['id'] with self.pool(loadbalancer_id=lb_id1) as p1: p_id = p1['pool']['id'] data = {'listener': {'name': '', 'protocol_port': 80, 'protocol': 'HTTP', 'connection_limit': 100, 'admin_state_up': True, 'tenant_id': self._tenant_id, 'default_pool_id': p_id, 'loadbalancer_id': lb_id2}} resp, body = self._create_listener_api(data) self.assertEqual(resp.status_int, webob.exc.HTTPBadRequest.code) def test_update_listener(self): name = 'new_listener' expected_values = {'name': name, 'protocol_port': 80, 'protocol': 'HTTP', 'connection_limit': 100, 'admin_state_up': False, 'tenant_id': self._tenant_id, 'loadbalancers': [{'id': self.lb_id}]} with self.listener(name=name, loadbalancer_id=self.lb_id) as listener: listener_id = listener['listener']['id'] data = {'listener': {'name': name, 'connection_limit': 100, 'admin_state_up': False}} resp, body = self._update_listener_api(listener_id, data) for k in expected_values: self.assertEqual(expected_values[k], body['listener'][k]) self._validate_statuses(self.lb_id, listener_id, listener_disabled=True) def test_update_listener_with_tls(self): default_tls_container_ref = uuidutils.generate_uuid() sni_tls_container_ref_1 = uuidutils.generate_uuid() sni_tls_container_ref_2 = uuidutils.generate_uuid() sni_tls_container_ref_3 = uuidutils.generate_uuid() sni_tls_container_ref_4 = uuidutils.generate_uuid() sni_tls_container_ref_5 = uuidutils.generate_uuid() listener_data = { 'protocol': lb_const.PROTOCOL_TERMINATED_HTTPS, 'default_tls_container_ref': default_tls_container_ref, 'sni_container_refs': [sni_tls_container_ref_1, sni_tls_container_ref_2], 'protocol_port': 443, 'admin_state_up': True, 'tenant_id': self._tenant_id, 'loadbalancer_id': self.lb_id } with contextlib.nested( mock.patch('neutron_lbaas.services.loadbalancer.plugin.' 'cert_parser.validate_cert'), mock.patch('neutron_lbaas.services.loadbalancer.plugin.' 'CERT_MANAGER_PLUGIN.CertManager.get_cert') ) as (validate_cert_mock, get_cert_mock): get_cert_mock.start().return_value = CertMock( 'mock_cert') validate_cert_mock.start().return_value = True # Default container and two SNI containers # Test order and validation behavior. listener = self.plugin.create_listener(context.get_admin_context(), {'listener': listener_data}) self.assertEqual([sni_tls_container_ref_1, sni_tls_container_ref_2], listener['sni_container_refs']) # Default container and two other SNI containers # Test order and validation behavior. listener_data.pop('loadbalancer_id') listener_data.pop('protocol') listener_data.pop('provisioning_status') listener_data.pop('operating_status') listener_data['sni_container_refs'] = [sni_tls_container_ref_3, sni_tls_container_ref_4] listener = self.plugin.update_listener( context.get_admin_context(), listener['id'], {'listener': listener_data} ) self.assertEqual([sni_tls_container_ref_3, sni_tls_container_ref_4], listener['sni_container_refs']) # Default container, two old SNI containers ordered differently # and one new SNI container. # Test order and validation behavior. listener_data.pop('protocol') listener_data['sni_container_refs'] = [sni_tls_container_ref_4, sni_tls_container_ref_3, sni_tls_container_ref_5] listener = self.plugin.update_listener(context.get_admin_context(), listener['id'], {'listener': listener_data}) self.assertEqual([sni_tls_container_ref_4, sni_tls_container_ref_3, sni_tls_container_ref_5], listener['sni_container_refs']) def test_delete_listener(self): with self.listener(no_delete=True, loadbalancer_id=self.lb_id) as listener: listener_id = listener['listener']['id'] resp = self._delete_listener_api(listener_id) self.assertEqual(webob.exc.HTTPNoContent.code, resp.status_int) resp, body = self._get_loadbalancer_api(self.lb_id) self.assertEqual(0, len(body['loadbalancer']['listeners'])) def test_show_listener(self): name = 'show_listener' expected_values = {'name': name, 'protocol_port': 80, 'protocol': 'HTTP', 'connection_limit': -1, 'admin_state_up': True, 'tenant_id': self._tenant_id, 'default_pool_id': None, 'loadbalancers': [{'id': self.lb_id}]} with self.listener(name=name, loadbalancer_id=self.lb_id) as listener: listener_id = listener['listener']['id'] resp, body = self._get_listener_api(listener_id) for k in expected_values: self.assertEqual(expected_values[k], body['listener'][k]) def test_list_listeners(self): name = 'list_listeners' expected_values = {'name': name, 'protocol_port': 80, 'protocol': 'HTTP', 'connection_limit': -1, 'admin_state_up': True, 'tenant_id': self._tenant_id, 'loadbalancers': [{'id': self.lb_id}]} with self.listener(name=name, loadbalancer_id=self.lb_id) as listener: listener_id = listener['listener']['id'] expected_values['id'] = listener_id resp, body = self._list_listeners_api() listener_list = body['listeners'] self.assertEqual(1, len(listener_list)) for k in expected_values: self.assertEqual(expected_values[k], listener_list[0][k]) def test_list_listeners_with_sort_emulated(self): with self.listener(name='listener1', protocol_port=81, loadbalancer_id=self.lb_id) as listener1: with self.listener(name='listener2', protocol_port=82, loadbalancer_id=self.lb_id) as listener2: with self.listener(name='listener3', protocol_port=83, loadbalancer_id=self.lb_id) as listener3: self._test_list_with_sort( 'listener', (listener1, listener2, listener3), [('protocol_port', 'asc'), ('name', 'desc')] ) def test_list_listeners_with_pagination_emulated(self): with self.listener(name='listener1', protocol_port=80, loadbalancer_id=self.lb_id) as listener1: with self.listener(name='listener2', protocol_port=81, loadbalancer_id=self.lb_id) as listener2: with self.listener(name='listener3', protocol_port=82, loadbalancer_id=self.lb_id) as listener3: self._test_list_with_pagination( 'listener', (listener1, listener2, listener3), ('name', 'asc'), 2, 2 ) def test_list_listeners_with_pagination_reverse_emulated(self): with self.listener(name='listener1', protocol_port=80, loadbalancer_id=self.lb_id) as listener1: with self.listener(name='listener2', protocol_port=81, loadbalancer_id=self.lb_id) as listener2: with self.listener(name='listener3', protocol_port=82, loadbalancer_id=self.lb_id) as listener3: self._test_list_with_pagination( 'listener', (listener3, listener2, listener1), ('name', 'desc'), 2, 2 ) class LbaasL7Tests(ListenerTestBase): def test_create_l7policy_invalid_listener_id(self, **extras): self._create_l7policy(self.fmt, uuidutils.generate_uuid(), lb_const.L7_POLICY_ACTION_REJECT, expected_res_status=webob.exc.HTTPNotFound.code) def test_create_l7policy_redirect_no_pool(self, **extras): l7policy_data = { 'name': '', 'action': lb_const.L7_POLICY_ACTION_REDIRECT_TO_POOL, 'description': '', 'position': 1, 'redirect_pool_id': None, 'redirect_url': 'http://radware.com', 'tenant_id': self._tenant_id, 'admin_state_up': True, } l7policy_data.update(extras) with self.listener(loadbalancer_id=self.lb_id) as listener: ctx = context.get_admin_context() l7policy_data['listener_id'] = listener['listener']['id'] l7policy_data['action'] = ( lb_const.L7_POLICY_ACTION_REDIRECT_TO_POOL) self.assertRaises( l7.L7PolicyRedirectPoolIdMissing, self.plugin.create_l7policy, ctx, {'l7policy': l7policy_data}) def test_create_l7policy_redirect_invalid_pool(self, **extras): l7policy_data = { 'name': '', 'action': lb_const.L7_POLICY_ACTION_REDIRECT_TO_POOL, 'description': '', 'position': 1, 'redirect_pool_id': None, 'tenant_id': self._tenant_id, 'admin_state_up': True, } l7policy_data.update(extras) with self.listener(loadbalancer_id=self.lb_id) as listener: ctx = context.get_admin_context() l7policy_data['listener_id'] = listener['listener']['id'] # Test pool redirect action with invalid pool id specified l7policy_data['redirect_pool_id'] = uuidutils.generate_uuid() self.assertRaises( loadbalancerv2.EntityNotFound, self.plugin.create_l7policy, ctx, {'l7policy': l7policy_data}) def test_create_l7policy_redirect_foreign_pool(self, **extras): l7policy_data = { 'name': '', 'action': lb_const.L7_POLICY_ACTION_REDIRECT_TO_POOL, 'description': '', 'position': 1, 'redirect_pool_id': None, 'tenant_id': self._tenant_id, 'admin_state_up': True, } l7policy_data.update(extras) with self.listener(loadbalancer_id=self.lb_id) as listener: ctx = context.get_admin_context() l7policy_data['listener_id'] = listener['listener']['id'] # Test pool redirect action with another loadbalancer pool id with self.pool(loadbalancer_id=self.lb_id2) as p: l7policy_data['redirect_pool_id'] = p['pool']['id'] self.assertRaises( sharedpools.ListenerAndPoolMustBeOnSameLoadbalancer, self.plugin.create_l7policy, ctx, {'l7policy': l7policy_data}) def test_create_l7policy_redirect_no_url(self, **extras): l7policy_data = { 'name': '', 'action': lb_const.L7_POLICY_ACTION_REDIRECT_TO_URL, 'description': '', 'position': 1, 'redirect_pool_id': None, 'redirect_url': 'http://radware.com', 'tenant_id': self._tenant_id, 'admin_state_up': True, } l7policy_data.update(extras) with self.listener(loadbalancer_id=self.lb_id) as listener: ctx = context.get_admin_context() l7policy_data['listener_id'] = listener['listener']['id'] # Test url redirect action without url specified del l7policy_data['redirect_url'] l7policy_data['action'] = lb_const.L7_POLICY_ACTION_REDIRECT_TO_URL self.assertRaises( l7.L7PolicyRedirectUrlMissing, self.plugin.create_l7policy, ctx, {'l7policy': l7policy_data}) def test_create_l7policy_redirect_invalid_url(self, **extras): l7policy_data = { 'name': '', 'action': lb_const.L7_POLICY_ACTION_REDIRECT_TO_URL, 'description': '', 'position': 1, 'redirect_pool_id': None, 'redirect_url': 'http://radware.com', 'tenant_id': self._tenant_id, 'admin_state_up': True, } l7policy_data.update(extras) with self.listener(loadbalancer_id=self.lb_id) as listener: l7policy_data['listener_id'] = listener['listener']['id'] # Test url redirect action with invalid url specified try: with contextlib.nested( self.l7policy(listener['listener']['id'], action=lb_const.L7_POLICY_ACTION_REDIRECT_TO_URL, redirect_url='https:/acme.com')): self.assertTrue(False) except webob.exc.HTTPClientError: pass def test_create_l7policy_invalid_position(self, **extras): l7policy_data = { 'name': '', 'action': lb_const.L7_POLICY_ACTION_REDIRECT_TO_URL, 'description': '', 'position': 1, 'redirect_pool_id': None, 'redirect_url': 'http://radware.com', 'tenant_id': self._tenant_id, 'admin_state_up': True, } l7policy_data.update(extras) with self.listener(loadbalancer_id=self.lb_id) as listener: l7policy_data['listener_id'] = listener['listener']['id'] # Test invalid zero position for policy try: with contextlib.nested( self.l7policy(listener['listener']['id'], position=0)): self.assertTrue(False) except webob.exc.HTTPClientError: pass def test_create_l7policy(self, **extras): expected = { 'action': lb_const.L7_POLICY_ACTION_REJECT, 'redirect_pool_id': None, 'redirect_url': None, 'tenant_id': self._tenant_id, } expected.update(extras) with self.listener(loadbalancer_id=self.lb_id) as listener: listener_id = listener['listener']['id'] with self.l7policy(listener_id) as p: expected['listener_id'] = listener_id actual = {} for k, v in p['l7policy'].items(): if k in expected: actual[k] = v self.assertEqual(actual, expected) self._validate_statuses(self.lb_id, listener_id, p['l7policy']['id']) def test_create_l7policy_pool_redirect(self, **extras): expected = { 'action': lb_const.L7_POLICY_ACTION_REDIRECT_TO_POOL, 'redirect_pool_id': None, 'redirect_url': None, 'tenant_id': self._tenant_id, } expected.update(extras) with self.listener(loadbalancer_id=self.lb_id) as listener: listener_id = listener['listener']['id'] with self.pool(loadbalancer_id=self.lb_id) as pool: pool_id = pool['pool']['id'] with self.l7policy( listener_id, action=lb_const.L7_POLICY_ACTION_REDIRECT_TO_POOL, redirect_pool_id=pool_id) as p: expected['listener_id'] = listener_id expected['redirect_pool_id'] = pool_id actual = {} for k, v in p['l7policy'].items(): if k in expected: actual[k] = v self.assertEqual(actual, expected) def test_l7policy_pool_deletion(self, **extras): expected = { 'action': lb_const.L7_POLICY_ACTION_REDIRECT_TO_POOL, 'redirect_pool_id': None, 'redirect_url': None, 'tenant_id': self._tenant_id, } expected.update(extras) with contextlib.nested( self.listener(loadbalancer_id=self.lb_id), self.listener(loadbalancer_id=self.lb_id, protocol_port=8080)) as (listener1, listener2): with contextlib.nested( self.pool(loadbalancer_id=self.lb_id, no_delete=True), self.pool(loadbalancer_id=self.lb_id)) as (pool1, pool2): with contextlib.nested( self.l7policy( listener1['listener']['id'], action=lb_const.L7_POLICY_ACTION_REDIRECT_TO_POOL, redirect_pool_id=pool1['pool']['id']), self.l7policy( listener1['listener']['id'], action=lb_const.L7_POLICY_ACTION_REDIRECT_TO_POOL, redirect_pool_id=pool2['pool']['id']), self.l7policy( listener2['listener']['id'], action=lb_const.L7_POLICY_ACTION_REDIRECT_TO_POOL, redirect_pool_id=pool1['pool']['id'])) as ( policy1, policy2, policy3): ctx = context.get_admin_context() self.plugin.delete_pool(ctx, pool1['pool']['id']) l7policy1 = self.plugin.get_l7policy( ctx, policy1['l7policy']['id']) self.assertEqual(l7policy1['action'], lb_const.L7_POLICY_ACTION_REJECT) self.assertEqual(l7policy1['redirect_pool_id'], None) l7policy3 = self.plugin.get_l7policy( ctx, policy3['l7policy']['id']) self.assertEqual(l7policy3['action'], lb_const.L7_POLICY_ACTION_REJECT) self.assertEqual(l7policy3['redirect_pool_id'], None) def test_create_l7policies_ordering(self, **extras): with self.listener(loadbalancer_id=self.lb_id) as listener: listener_id = listener['listener']['id'] with contextlib.nested( self.l7policy(listener_id, name="1"), self.l7policy(listener_id, name="2"), self.l7policy(listener_id, name="3"), self.l7policy(listener_id, position=1, name="4"), self.l7policy(listener_id, position=2, name="5"), self.l7policy(listener_id, position=4, name="6"), self.l7policy(listener_id, name="7"), self.l7policy(listener_id, position=8, name="8"), self.l7policy(listener_id, position=1, name="9"), self.l7policy(listener_id, position=1, name="10") ): listener_db = self.plugin.db._get_resource( context.get_admin_context(), models.Listener, listener['listener']['id']) names = ['10', '9', '4', '5', '1', '6', '2', '3', '7', '8'] for pos in range(0, 10): self.assertEqual( listener_db.l7_policies[pos]['position'], pos + 1) self.assertEqual( listener_db.l7_policies[pos]['name'], names[pos]) def test_update_l7policy(self, **extras): expected = { 'admin_state_up': False, 'action': lb_const.L7_POLICY_ACTION_REDIRECT_TO_URL, 'redirect_pool_id': None, 'redirect_url': 'redirect_url', 'tenant_id': self._tenant_id, 'position': 1, } expected.update(extras) with self.listener(loadbalancer_id=self.lb_id) as listener: listener_id = listener['listener']['id'] with self.l7policy(listener_id) as p: l7policy_id = p['l7policy']['id'] data = { 'l7policy': { 'action': lb_const.L7_POLICY_ACTION_REDIRECT_TO_URL, 'redirect_url': 'redirect_url', 'admin_state_up': False}} ctx = context.get_admin_context() self.plugin.update_l7policy(ctx, l7policy_id, data) l7policy = self.plugin.get_l7policy(ctx, l7policy_id) actual = {} for k, v in l7policy.items(): if k in expected: actual[k] = v self.assertEqual(actual, expected) self._validate_statuses(self.lb_id, listener_id, p['l7policy']['id'], l7policy_disabled=True) def test_update_l7policies_ordering(self, **extras): expected = { 'action': lb_const.L7_POLICY_ACTION_REJECT, 'redirect_pool_id': None, 'redirect_url': '', 'tenant_id': self._tenant_id, } expected.update(extras) with self.listener(loadbalancer_id=self.lb_id) as listener: listener_id = listener['listener']['id'] with contextlib.nested( self.l7policy(listener_id, name="1"), self.l7policy(listener_id, name="2"), self.l7policy(listener_id, name="3"), self.l7policy(listener_id, name="4"), self.l7policy(listener_id, name="5"), self.l7policy(listener_id, name="6"), self.l7policy(listener_id, name="7"), self.l7policy(listener_id, name="8"), self.l7policy(listener_id, name="9"), self.l7policy(listener_id, name="10"), ) as (p1, p2, p3, p4, p5, p6, p7, p8, p9, p10): c = context.get_admin_context() listener_db = self.plugin.db._get_resource( context.get_admin_context(), models.Listener, listener['listener']['id']) expected['position'] = 1 self.plugin.db.update_status( c, models.L7Policy, p2['l7policy']['id'], lb_const.OFFLINE) self.plugin.update_l7policy(c, p2['l7policy']['id'], {'l7policy': expected}) expected['position'] = 3 self.plugin.db.update_status( c, models.L7Policy, p1['l7policy']['id'], lb_const.OFFLINE) self.plugin.update_l7policy(c, p1['l7policy']['id'], {'l7policy': expected}) expected['position'] = 4 self.plugin.db.update_status( c, models.L7Policy, p6['l7policy']['id'], lb_const.OFFLINE) self.plugin.update_l7policy(c, p6['l7policy']['id'], {'l7policy': expected}) expected['position'] = 11 self.plugin.db.update_status( c, models.L7Policy, p2['l7policy']['id'], lb_const.OFFLINE) self.plugin.update_l7policy(c, p2['l7policy']['id'], {'l7policy': expected}) expected['position'] = 1 self.plugin.db.update_status( c, models.L7Policy, p1['l7policy']['id'], lb_const.OFFLINE) self.plugin.update_l7policy(c, p1['l7policy']['id'], {'l7policy': expected}) expected['position'] = 8 self.plugin.db.update_status( c, models.L7Policy, p5['l7policy']['id'], lb_const.OFFLINE) self.plugin.update_l7policy(c, p5['l7policy']['id'], {'l7policy': expected}) expected['position'] = 3 self.plugin.db.update_status( c, models.L7Policy, p10['l7policy']['id'], lb_const.OFFLINE) self.plugin.update_l7policy(c, p10['l7policy']['id'], {'l7policy': expected}) listener_db = self.plugin.db._get_resource( context.get_admin_context(), models.Listener, listener['listener']['id']) names = ['1', '3', '10', '6', '4', '7', '8', '9', '5', '2'] for pos in range(0, 10): self.assertEqual( listener_db.l7_policies[pos]['position'], pos + 1) self.assertEqual( listener_db.l7_policies[pos]['name'], names[pos]) def test_delete_l7policy(self, **extras): expected = { 'position': 1, 'action': lb_const.L7_POLICY_ACTION_REJECT, 'redirect_pool_id': None, 'redirect_url': '', 'tenant_id': self._tenant_id, } expected.update(extras) with self.listener(loadbalancer_id=self.lb_id) as listener: listener_id = listener['listener']['id'] with contextlib.nested( self.l7policy(listener_id, name="0"), self.l7policy(listener_id, name="1"), self.l7policy(listener_id, name="2"), self.l7policy(listener_id, name="3", no_delete=True), self.l7policy(listener_id, name="4"), self.l7policy(listener_id, name="5", no_delete=True), self.l7policy(listener_id, name="6") ) as (p0, p1, p2, p3, p4, p5, p6): c = context.get_admin_context() self.plugin.db.update_status( c, models.L7Policy, p3['l7policy']['id'], lb_const.OFFLINE) self.plugin.delete_l7policy(c, p3['l7policy']['id']) self.plugin.db.update_status( c, models.L7Policy, p5['l7policy']['id'], lb_const.OFFLINE) self.plugin.delete_l7policy(c, p5['l7policy']['id']) listener_db = self.plugin.db._get_resource( context.get_admin_context(), models.Listener, listener['listener']['id']) names = ['0', '1', '2', '4', '6'] for pos in range(0, 4): self.assertEqual( listener_db.l7_policies[pos]['position'], pos + 1) self.assertEqual( listener_db.l7_policies[pos]['name'], names[pos]) self.assertRaises( loadbalancerv2.EntityNotFound, self.plugin.get_l7policy, c, p3['l7policy']['id']) self.assertRaises( loadbalancerv2.EntityNotFound, self.plugin.get_l7policy, c, p5['l7policy']['id']) def test_show_l7policy(self, **extras): expected = { 'position': 1, 'action': lb_const.L7_POLICY_ACTION_REJECT, 'redirect_pool_id': None, 'redirect_url': None, 'tenant_id': self._tenant_id, } expected.update(extras) with self.listener(loadbalancer_id=self.lb_id) as listener: listener_id = listener['listener']['id'] with self.l7policy(listener_id, name="0") as p: req = self.new_show_request('l7policies', p['l7policy']['id'], fmt=self.fmt) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) actual = {} for k, v in res['l7policy'].items(): if k in expected: actual[k] = v self.assertEqual(expected, actual) return p def test_list_l7policies_with_sort_emulated(self): with self.listener(loadbalancer_id=self.lb_id) as listener: listener_id = listener['listener']['id'] with contextlib.nested(self.l7policy(listener_id, name="b"), self.l7policy(listener_id, name="c"), self.l7policy(listener_id, name="a") ) as (p1, p2, p3): self._test_list_with_sort('l7policy', (p3, p1, p2), [('name', 'asc')], resources='l7policies') def test_list_l7policies_with_pagination_emulated(self): with self.listener(loadbalancer_id=self.lb_id) as listener: listener_id = listener['listener']['id'] with contextlib.nested(self.l7policy(listener_id, name="b"), self.l7policy(listener_id, name="c"), self.l7policy(listener_id, name="e"), self.l7policy(listener_id, name="d"), self.l7policy(listener_id, name="f"), self.l7policy(listener_id, name="g"), self.l7policy(listener_id, name="a") ) as (p1, p2, p3, p4, p5, p6, p7): self._test_list_with_pagination( 'l7policy', (p6, p5, p3, p4, p2, p1, p7), ('name', 'desc'), 2, 4, resources='l7policies') def test_list_l7policies_with_pagination_reverse_emulated(self): with self.listener(loadbalancer_id=self.lb_id) as listener: listener_id = listener['listener']['id'] with contextlib.nested(self.l7policy(listener_id, name="b"), self.l7policy(listener_id, name="c"), self.l7policy(listener_id, name="e"), self.l7policy(listener_id, name="d"), self.l7policy(listener_id, name="f"), self.l7policy(listener_id, name="g"), self.l7policy(listener_id, name="a") ) as (p1, p2, p3, p4, p5, p6, p7): self._test_list_with_pagination_reverse( 'l7policy', (p6, p5, p3, p4, p2, p1, p7), ('name', 'desc'), 2, 4, resources='l7policies') def test_create_l7rule_invalid_policy_id(self, **extras): with self.listener(loadbalancer_id=self.lb_id) as listener: with self.l7policy(listener['listener']['id']): self._create_l7policy_rule( self.fmt, uuidutils.generate_uuid(), lb_const.L7_RULE_TYPE_HOST_NAME, lb_const.L7_RULE_COMPARE_TYPE_REGEX, 'value', expected_res_status=webob.exc.HTTPNotFound.code) def test_create_invalid_l7rule(self, **extras): rule = { 'type': lb_const.L7_RULE_TYPE_HEADER, 'compare_type': lb_const.L7_RULE_COMPARE_TYPE_REGEX, 'value': '*' } with self.listener(loadbalancer_id=self.lb_id) as listener: with self.l7policy(listener['listener']['id']) as policy: policy_id = policy['l7policy']['id'] ctx = context.get_admin_context() # test invalid regex self.assertRaises( l7.L7RuleInvalidRegex, self.plugin.db.create_l7policy_rule, ctx, rule, policy_id) # test missing key for HEADER type rule['value'] = '/*/' self.assertRaises( l7.L7RuleKeyMissing, self.plugin.db.create_l7policy_rule, ctx, rule, policy_id) # test missing key for COOKIE type rule['type'] = lb_const.L7_RULE_TYPE_COOKIE self.assertRaises( l7.L7RuleKeyMissing, self.plugin.db.create_l7policy_rule, ctx, rule, policy_id) # test invalid key for HEADER type rule['type'] = lb_const.L7_RULE_TYPE_HEADER rule['key'] = '/' self.assertRaises( l7.L7RuleInvalidKey, self.plugin.db.create_l7policy_rule, ctx, rule, policy_id) # test invalid value for COOKIE type rule['compare_type'] =\ lb_const.L7_RULE_COMPARE_TYPE_CONTAINS rule['type'] = lb_const.L7_RULE_TYPE_COOKIE rule['key'] = 'a' rule['value'] = ';' self.assertRaises( l7.L7RuleInvalidCookieValue, self.plugin.db.create_l7policy_rule, ctx, rule, policy_id) # test invalid value for !COOKIE type rule['type'] = lb_const.L7_RULE_TYPE_PATH rule['value'] = ' ' self.assertRaises( l7.L7RuleInvalidHeaderValue, self.plugin.db.create_l7policy_rule, ctx, rule, policy_id) # test invalid value for !COOKIE type quated rule['value'] = ' ' self.assertRaises( l7.L7RuleInvalidHeaderValue, self.plugin.db.create_l7policy_rule, ctx, rule, policy_id) # test unsupported compare type for FILE type rule['type'] = lb_const.L7_RULE_TYPE_FILE_TYPE self.assertRaises( l7.L7RuleUnsupportedCompareType, self.plugin.db.create_l7policy_rule, ctx, rule, policy_id) def test_create_l7rule(self, **extras): expected = { 'type': lb_const.L7_RULE_TYPE_HOST_NAME, 'compare_type': lb_const.L7_RULE_COMPARE_TYPE_EQUAL_TO, 'key': None, 'value': 'value1' } with self.listener(loadbalancer_id=self.lb_id) as listener: with self.l7policy(listener['listener']['id']) as policy: policy_id = policy['l7policy']['id'] with contextlib.nested( self.l7policy_rule(policy_id), self.l7policy_rule(policy_id, key='key1'), self.l7policy_rule(policy_id, value='value2'), self.l7policy_rule(policy_id, type=lb_const.L7_RULE_TYPE_PATH), self.l7policy_rule( policy_id, compare_type=lb_const.L7_RULE_COMPARE_TYPE_REGEX), self.l7policy_rule( policy_id, invert=True) ) as (r_def, r_key, r_value, r_type, r_compare_type, r_invert): ctx = context.get_admin_context() rdb = self.plugin.get_l7policy_rule( ctx, r_def['rule']['id'], policy_id) actual = {} for k, v in rdb.items(): if k in expected: actual[k] = v self.assertEqual(actual, expected) rdb = self.plugin.get_l7policy_rule( ctx, r_key['rule']['id'], policy_id) expected['key'] = 'key1' actual = {} for k, v in rdb.items(): if k in expected: actual[k] = v self.assertEqual(actual, expected) rdb = self.plugin.get_l7policy_rule( ctx, r_value['rule']['id'], policy_id) expected['key'] = None expected['value'] = 'value2' actual = {} for k, v in rdb.items(): if k in expected: actual[k] = v self.assertEqual(actual, expected) rdb = self.plugin.get_l7policy_rule( ctx, r_type['rule']['id'], policy_id) expected['value'] = 'value1' expected['type'] = lb_const.L7_RULE_TYPE_PATH actual = {} for k, v in rdb.items(): if k in expected: actual[k] = v self.assertEqual(actual, expected) rdb = self.plugin.get_l7policy_rule( ctx, r_compare_type['rule']['id'], policy_id) expected['type'] = lb_const.L7_RULE_TYPE_HOST_NAME expected['compare_type'] =\ lb_const.L7_RULE_COMPARE_TYPE_REGEX actual = {} for k, v in rdb.items(): if k in expected: actual[k] = v self.assertEqual(actual, expected) rdb = self.plugin.get_l7policy_rule( ctx, r_invert['rule']['id'], policy_id) expected['invert'] = True expected['compare_type'] =\ lb_const.L7_RULE_COMPARE_TYPE_EQUAL_TO actual = {} for k, v in rdb.items(): if k in expected: actual[k] = v self.assertEqual(actual, expected) def test_invalid_update_l7rule(self, **extras): rule = { 'type': lb_const.L7_RULE_TYPE_HEADER, 'compare_type': lb_const.L7_RULE_COMPARE_TYPE_REGEX, 'value': '*' } with self.listener(loadbalancer_id=self.lb_id) as listener: with self.l7policy(listener['listener']['id']) as policy: policy_id = policy['l7policy']['id'] with self.l7policy_rule(policy_id) as r: rule_id = r['rule']['id'] ctx = context.get_admin_context() # test invalid regex self.assertRaises( l7.L7RuleInvalidRegex, self.plugin.db.update_l7policy_rule, ctx, rule_id, rule, policy_id) # test missing key for HEADER type rule['value'] = '/*/' self.assertRaises( l7.L7RuleKeyMissing, self.plugin.db.update_l7policy_rule, ctx, rule_id, rule, policy_id) # test missing key for COOKIE type rule['type'] = lb_const.L7_RULE_TYPE_COOKIE self.assertRaises( l7.L7RuleKeyMissing, self.plugin.db.update_l7policy_rule, ctx, rule_id, rule, policy_id) # test invalid key for HEADER type rule['type'] = lb_const.L7_RULE_TYPE_HEADER rule['key'] = '/' self.assertRaises( l7.L7RuleInvalidKey, self.plugin.db.update_l7policy_rule, ctx, rule_id, rule, policy_id) # test invalid value for COOKIE type rule['compare_type'] =\ lb_const.L7_RULE_COMPARE_TYPE_CONTAINS rule['type'] = lb_const.L7_RULE_TYPE_COOKIE rule['key'] = 'a' rule['value'] = ';' self.assertRaises( l7.L7RuleInvalidCookieValue, self.plugin.db.update_l7policy_rule, ctx, rule_id, rule, policy_id) # test invalid value for !COOKIE type rule['type'] = lb_const.L7_RULE_TYPE_PATH rule['value'] = ' ' self.assertRaises( l7.L7RuleInvalidHeaderValue, self.plugin.db.update_l7policy_rule, ctx, rule_id, rule, policy_id) # test invalid value for !COOKIE type quated rule['value'] = ' ' self.assertRaises( l7.L7RuleInvalidHeaderValue, self.plugin.db.update_l7policy_rule, ctx, rule_id, rule, policy_id) # test unsupported compare type for FILE type rule['type'] = lb_const.L7_RULE_TYPE_FILE_TYPE self.assertRaises( l7.L7RuleUnsupportedCompareType, self.plugin.db.update_l7policy_rule, ctx, rule_id, rule, policy_id) def test_update_l7rule(self, **extras): with self.listener(loadbalancer_id=self.lb_id) as listener: with self.l7policy(listener['listener']['id']) as policy: policy_id = policy['l7policy']['id'] with self.l7policy_rule(policy_id) as r: req = self.new_show_request('l7policies', policy_id, fmt=self.fmt) policy_show = self.deserialize( self.fmt, req.get_response(self.ext_api) ) self.assertEqual( len(policy_show['l7policy']['rules']), 1) expected = {} expected['type'] = lb_const.L7_RULE_TYPE_HEADER expected['compare_type'] = ( lb_const.L7_RULE_COMPARE_TYPE_REGEX) expected['value'] = '/.*/' expected['key'] = 'HEADER1' expected['invert'] = True expected['admin_state_up'] = False req = self.new_update_request( 'l7policies', {'rule': expected}, policy_id, subresource='rules', sub_id=r['rule']['id']) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) actual = {} for k, v in res['rule'].items(): if k in expected: actual[k] = v self.assertEqual(actual, expected) self._validate_statuses(self.lb_id, listener['listener']['id'], policy_id, r['rule']['id'], l7rule_disabled=True) def test_delete_l7rule(self): with self.listener(loadbalancer_id=self.lb_id) as listener: with self.l7policy(listener['listener']['id']) as policy: policy_id = policy['l7policy']['id'] with contextlib.nested( self.l7policy_rule(policy_id, no_delete=True), self.l7policy_rule(policy_id, no_delete=True) ) as (r0, r1): req = self.new_show_request('l7policies', policy_id, fmt=self.fmt) policy_update = self.deserialize( self.fmt, req.get_response(self.ext_api) ) self.assertEqual( len(policy_update['l7policy']['rules']), 2) req = self.new_delete_request('l7policies', policy_id, subresource='rules', sub_id=r0['rule']['id']) res = req.get_response(self.ext_api) self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) req = self.new_show_request('l7policies', policy_id, fmt=self.fmt) policy_update = self.deserialize( self.fmt, req.get_response(self.ext_api) ) self.assertEqual( len(policy_update['l7policy']['rules']), 1) def test_list_l7rules_with_sort_emulated(self): with self.listener(loadbalancer_id=self.lb_id) as listener: listener_id = listener['listener']['id'] with self.l7policy(listener_id) as policy: policy_id = policy['l7policy']['id'] with contextlib.nested( self.l7policy_rule(policy_id, value="b"), self.l7policy_rule(policy_id, value="c"), self.l7policy_rule(policy_id, value="a") ) as (r1, r2, r3): self._test_list_with_sort('l7policy', (r3, r1, r2), [('value', 'asc')], id=policy_id, resources='l7policies', subresource='rule', subresources='rules') def test_list_l7rules_with_pagination_emulated(self): with self.listener(loadbalancer_id=self.lb_id) as listener: listener_id = listener['listener']['id'] with self.l7policy(listener_id) as policy: policy_id = policy['l7policy']['id'] with contextlib.nested( self.l7policy_rule(policy_id, value="b"), self.l7policy_rule(policy_id, value="c"), self.l7policy_rule(policy_id, value="e"), self.l7policy_rule(policy_id, value="d"), self.l7policy_rule(policy_id, value="f"), self.l7policy_rule(policy_id, value="g"), self.l7policy_rule(policy_id, value="a") ) as (r1, r2, r3, r4, r5, r6, r7): self._test_list_with_pagination( 'l7policy', (r6, r5, r3, r4, r2, r1, r7), ('value', 'desc'), 2, 4, id=policy_id, resources='l7policies', subresource='rule', subresources='rules') def test_list_l7rules_with_pagination_reverse_emulated(self): with self.listener(loadbalancer_id=self.lb_id) as listener: listener_id = listener['listener']['id'] with self.l7policy(listener_id) as p: policy_id = p['l7policy']['id'] with contextlib.nested( self.l7policy_rule(policy_id, value="b"), self.l7policy_rule(policy_id, value="c"), self.l7policy_rule(policy_id, value="e"), self.l7policy_rule(policy_id, value="d"), self.l7policy_rule(policy_id, value="f"), self.l7policy_rule(policy_id, value="g"), self.l7policy_rule(policy_id, value="a") ) as (r1, r2, r3, r4, r5, r6, r7): self._test_list_with_pagination_reverse( 'l7policy', (r6, r5, r3, r4, r2, r1, r7), ('value', 'desc'), 2, 4, id=policy_id, resources='l7policies', subresource='rule', subresources='rules') class PoolTestBase(ListenerTestBase): def setUp(self): super(PoolTestBase, self).setUp() listener_res = self._create_listener(self.fmt, lb_const.PROTOCOL_HTTP, 80, self.lb_id) listener_res2 = self._create_listener(self.fmt, lb_const.PROTOCOL_HTTP, 80, self.lb_id2) self.def_listener = self.deserialize(self.fmt, listener_res) self.def_listener2 = self.deserialize(self.fmt, listener_res2) self.listener_id = self.def_listener['listener']['id'] self.listener_id2 = self.def_listener2['listener']['id'] self.loadbalancer_id = self.lb_id self.loadbalancer_id2 = self.lb_id2 def tearDown(self): self._delete_listener_api(self.listener_id) super(PoolTestBase, self).tearDown() def _create_pool_api(self, data): req = self.new_create_request("pools", data, self.fmt) resp = req.get_response(self.ext_api) body = self.deserialize(self.fmt, resp) return resp, body def _update_pool_api(self, pool_id, data): req = self.new_update_request('pools', data, pool_id) resp = req.get_response(self.ext_api) body = self.deserialize(self.fmt, resp) return resp, body def _delete_pool_api(self, pool_id): req = self.new_delete_request('pools', pool_id) resp = req.get_response(self.ext_api) return resp def _get_pool_api(self, pool_id): req = self.new_show_request('pools', pool_id) resp = req.get_response(self.ext_api) body = self.deserialize(self.fmt, resp) return resp, body def _list_pools_api(self): req = self.new_list_request('pools') resp = req.get_response(self.ext_api) body = self.deserialize(self.fmt, resp) return resp, body class LbaasPoolTests(PoolTestBase): def test_create_pool(self, **extras): expected = { 'name': '', 'description': '', 'protocol': 'HTTP', 'lb_algorithm': 'ROUND_ROBIN', 'admin_state_up': True, 'tenant_id': self._tenant_id, 'healthmonitor_id': None, 'members': [] } expected.update(extras) with self.pool(listener_id=self.listener_id, **extras) as pool: pool_id = pool['pool'].get('id') if ('session_persistence' in expected.keys() and expected['session_persistence'] is not None and not expected['session_persistence'].get('cookie_name')): expected['session_persistence']['cookie_name'] = None self.assertTrue(pool_id) actual = {} for k, v in pool['pool'].items(): if k in expected: actual[k] = v self.assertEqual(expected, actual) self._validate_statuses(self.lb_id, self.listener_id, pool_id=pool_id) return pool def test_create_pool_with_loadbalancer_no_listener(self, **extras): expected = { 'name': '', 'description': '', 'protocol': 'HTTP', 'lb_algorithm': 'ROUND_ROBIN', 'admin_state_up': True, 'tenant_id': self._tenant_id, 'healthmonitor_id': None, 'members': [] } expected.update(extras) with self.pool(loadbalancer_id=self.loadbalancer_id, **extras) as pool: pool_id = pool['pool'].get('id') if 'session_persistence' in expected: if not expected['session_persistence'].get('cookie_name'): expected['session_persistence']['cookie_name'] = None self.assertTrue(pool_id) actual = {} for k, v in pool['pool'].items(): if k in expected: actual[k] = v self.assertEqual(expected, actual) self._validate_statuses(self.lb_id, None, pool_id=pool_id) return pool def test_show_pool(self, **extras): expected = { 'name': '', 'description': '', 'protocol': 'HTTP', 'lb_algorithm': 'ROUND_ROBIN', 'admin_state_up': True, 'tenant_id': self._tenant_id, 'listeners': [{'id': self.listener_id}], 'healthmonitor_id': None, 'members': [] } expected.update(extras) with self.pool(listener_id=self.listener_id) as pool: pool_id = pool['pool']['id'] resp, body = self._get_pool_api(pool_id) actual = {} for k, v in body['pool'].items(): if k in expected: actual[k] = v self.assertEqual(expected, actual) return pool def test_update_pool(self, **extras): expected = { 'name': '', 'description': '', 'protocol': 'HTTP', 'lb_algorithm': 'LEAST_CONNECTIONS', 'admin_state_up': True, 'tenant_id': self._tenant_id, 'listeners': [{'id': self.listener_id}], 'healthmonitor_id': None, 'members': [] } expected.update(extras) with self.pool(listener_id=self.listener_id) as pool: pool_id = pool['pool']['id'] self.assertTrue(pool_id) data = {'pool': {'lb_algorithm': 'LEAST_CONNECTIONS'}} resp, body = self._update_pool_api(pool_id, data) actual = {} for k, v in body['pool'].items(): if k in expected: actual[k] = v self.assertEqual(expected, actual) self._validate_statuses(self.lb_id, self.listener_id, pool_id=pool_id) return pool def test_delete_pool(self): with self.pool(no_delete=True, listener_id=self.listener_id) as pool: pool_id = pool['pool']['id'] ctx = context.get_admin_context() qry = ctx.session.query(models.PoolV2) qry = qry.filter_by(id=pool_id) self.assertIsNotNone(qry.first()) resp = self._delete_pool_api(pool_id) self.assertEqual(webob.exc.HTTPNoContent.code, resp.status_int) qry = ctx.session.query(models.PoolV2) qry = qry.filter_by(id=pool['pool']['id']) self.assertIsNone(qry.first()) def test_delete_pool_and_members(self): with self.pool(listener_id=self.listener_id, no_delete=True) as pool: pool_id = pool['pool']['id'] with self.member(pool_id=pool_id, no_delete=True) as member: member_id = member['member']['id'] ctx = context.get_admin_context() # this will only set status, it requires driver to delete # from db. Since the LoggingNoopDriver is being used it # should delete from db self.plugin.delete_pool(ctx, pool_id) # verify member got deleted as well self.assertRaises( loadbalancerv2.EntityNotFound, self.plugin.db.get_pool_member, ctx, member_id) def test_cannot_add_multiple_pools_to_listener(self): with self.pool(listener_id=self.listener_id): data = {'pool': {'name': '', 'description': '', 'protocol': 'HTTP', 'lb_algorithm': 'ROUND_ROBIN', 'admin_state_up': True, 'tenant_id': self._tenant_id, 'listener_id': self.listener_id}} resp, body = self._create_pool_api(data) self.assertEqual(webob.exc.HTTPConflict.code, resp.status_int) def test_create_pool_with_pool_protocol_mismatch(self): with self.listener(protocol=lb_const.PROTOCOL_HTTPS, loadbalancer_id=self.lb_id, protocol_port=443) as listener: listener_id = listener['listener']['id'] data = {'pool': {'listener_id': listener_id, 'protocol': lb_const.PROTOCOL_HTTP, 'lb_algorithm': lb_const.LB_METHOD_ROUND_ROBIN, 'tenant_id': self._tenant_id}} resp, body = self._create_pool_api(data) self.assertEqual(webob.exc.HTTPConflict.code, resp.status_int) def test_create_pool_with_protocol_invalid(self): data = {'pool': { 'name': '', 'description': '', 'protocol': 'BLANK', 'lb_algorithm': 'LEAST_CONNECTIONS', 'admin_state_up': True, 'tenant_id': self._tenant_id }} resp, body = self._create_pool_api(data) self.assertEqual(webob.exc.HTTPBadRequest.code, resp.status_int) def test_can_create_pool_with_listener_loadbalancer_match(self): with self.subnet() as subnet: with self.loadbalancer(subnet=subnet) as loadbalancer: lb_id = loadbalancer['loadbalancer']['id'] with self.listener(loadbalancer_id=lb_id) as l1: l_id = l1['listener']['id'] with self.pool(listener_id=l_id, loadbalancer_id=lb_id): pass def test_cannot_create_pool_with_listener_loadbalancer_mismatch(self): with self.subnet() as subnet: with contextlib.nested(self.loadbalancer(subnet=subnet), self.loadbalancer(subnet=subnet) ) as (lb1, lb2): lb_id1 = lb1['loadbalancer']['id'] lb_id2 = lb2['loadbalancer']['id'] with self.listener(loadbalancer_id=lb_id1) as l1: l_id = l1['listener']['id'] data = {'pool': {'name': '', 'description': '', 'protocol': 'HTTP', 'lb_algorithm': 'ROUND_ROBIN', 'admin_state_up': True, 'tenant_id': self._tenant_id, 'listener_id': l_id, 'loadbalancer_id': lb_id2}} resp, body = self._create_pool_api(data) self.assertEqual(resp.status_int, webob.exc.HTTPBadRequest.code) def test_create_pool_with_session_persistence(self): self.test_create_pool(session_persistence={'type': 'HTTP_COOKIE'}) def test_create_pool_with_session_persistence_none(self): self.test_create_pool(session_persistence=None) def test_create_pool_with_session_persistence_with_app_cookie(self): sp = {'type': 'APP_COOKIE', 'cookie_name': 'sessionId'} self.test_create_pool(session_persistence=sp) def test_create_pool_with_session_persistence_unsupported_type(self): with testtools.ExpectedException(webob.exc.HTTPClientError): self.test_create_pool(session_persistence={'type': 'UNSUPPORTED'}) def test_create_pool_with_unnecessary_cookie_name(self): sp = {'type': "SOURCE_IP", 'cookie_name': 'sessionId'} with testtools.ExpectedException(webob.exc.HTTPClientError): self.test_create_pool(session_persistence=sp) def test_create_pool_with_session_persistence_without_cookie_name(self): sp = {'type': "APP_COOKIE"} with testtools.ExpectedException(webob.exc.HTTPClientError): self.test_create_pool(session_persistence=sp) def test_validate_session_persistence_valid_with_cookie_name(self): sp = {'type': 'APP_COOKIE', 'cookie_name': 'MyCookie'} self.assertIsNone( self.plugin._validate_session_persistence_info(sp_info=sp)) def test_validate_session_persistence_invalid_with_cookie_name(self): sp = {'type': 'HTTP', 'cookie_name': 'MyCookie'} with testtools.ExpectedException( loadbalancerv2.SessionPersistenceConfigurationInvalid): self.plugin._validate_session_persistence_info(sp_info=sp) def test_validate_session_persistence_invalid_without_cookie_name(self): sp = {'type': 'APP_COOKIE'} with testtools.ExpectedException( loadbalancerv2.SessionPersistenceConfigurationInvalid): self.plugin._validate_session_persistence_info(sp_info=sp) def test_reset_session_persistence(self): name = 'pool4' sp = {'type': "HTTP_COOKIE"} update_info = {'pool': {'session_persistence': None}} with self.pool(name=name, session_persistence=sp, listener_id=self.listener_id) as pool: pool_id = pool['pool']['id'] sp['cookie_name'] = None # Ensure that pool has been created properly self.assertEqual(pool['pool']['session_persistence'], sp) # Try resetting session_persistence resp, body = self._update_pool_api(pool_id, update_info) self.assertIsNone(body['pool'].get('session_persistence')) def test_update_no_change_session_persistence(self): name = 'pool4' sp = {'type': "HTTP_COOKIE"} update_info = {'pool': {'lb_algorithm': 'ROUND_ROBIN'}} with self.pool(name=name, session_persistence=sp, listener_id=self.listener_id) as pool: pool_id = pool['pool']['id'] sp['cookie_name'] = None # Ensure that pool has been created properly self.assertEqual(pool['pool']['session_persistence'], sp) # Try updating something other than session_persistence resp, body = self._update_pool_api(pool_id, update_info) # Make sure session_persistence is unchanged self.assertEqual(pool['pool']['session_persistence'], sp) def test_update_pool_with_protocol(self): with self.pool(listener_id=self.listener_id) as pool: pool_id = pool['pool']['id'] data = {'pool': {'protocol': 'BLANK'}} resp, body = self._update_pool_api(pool_id, data) self.assertEqual(webob.exc.HTTPBadRequest.code, resp.status_int) def test_list_pools(self): name = 'list_pools' expected_values = {'name': name, 'protocol': 'HTTP', 'description': 'apool', 'lb_algorithm': 'ROUND_ROBIN', 'admin_state_up': True, 'tenant_id': self._tenant_id, 'session_persistence': {'cookie_name': None, 'type': 'HTTP_COOKIE'}, 'loadbalancers': [{'id': self.lb_id}], 'members': []} with self.pool(name=name, listener_id=self.listener_id, description='apool', session_persistence={'type': 'HTTP_COOKIE'}, members=[]) as pool: pool_id = pool['pool']['id'] expected_values['id'] = pool_id resp, body = self._list_pools_api() pool_list = body['pools'] self.assertEqual(1, len(pool_list)) for k in expected_values: self.assertEqual(expected_values[k], pool_list[0][k]) def test_list_pools_with_sort_emulated(self): with contextlib.nested(self.listener(loadbalancer_id=self.lb_id, protocol_port=81, protocol=lb_const.PROTOCOL_HTTPS), self.listener(loadbalancer_id=self.lb_id, protocol_port=82, protocol=lb_const.PROTOCOL_TCP), self.listener(loadbalancer_id=self.lb_id, protocol_port=83, protocol=lb_const.PROTOCOL_HTTP) ) as (l1, l2, l3): with contextlib.nested(self.pool(listener_id=l1['listener']['id'], protocol=lb_const.PROTOCOL_HTTPS), self.pool(listener_id=l2['listener']['id'], protocol=lb_const.PROTOCOL_TCP), self.pool(listener_id=l3['listener']['id'], protocol=lb_const.PROTOCOL_HTTP) ) as (p1, p2, p3): self._test_list_with_sort('pool', (p2, p1, p3), [('protocol', 'desc')]) def test_list_pools_with_pagination_emulated(self): with contextlib.nested(self.listener(loadbalancer_id=self.lb_id, protocol_port=81, protocol=lb_const.PROTOCOL_HTTPS), self.listener(loadbalancer_id=self.lb_id, protocol_port=82, protocol=lb_const.PROTOCOL_TCP), self.listener(loadbalancer_id=self.lb_id, protocol_port=83, protocol=lb_const.PROTOCOL_HTTP) ) as (l1, l2, l3): with contextlib.nested(self.pool(listener_id=l1['listener']['id'], protocol=lb_const.PROTOCOL_HTTPS), self.pool(listener_id=l2['listener']['id'], protocol=lb_const.PROTOCOL_TCP), self.pool(listener_id=l3['listener']['id'], protocol=lb_const.PROTOCOL_HTTP) ) as (p1, p2, p3): self._test_list_with_pagination('pool', (p3, p1, p2), ('protocol', 'asc'), 2, 2) def test_list_pools_with_pagination_reverse_emulated(self): with contextlib.nested(self.listener(loadbalancer_id=self.lb_id, protocol_port=81, protocol=lb_const.PROTOCOL_HTTPS), self.listener(loadbalancer_id=self.lb_id, protocol_port=82, protocol=lb_const.PROTOCOL_TCP), self.listener(loadbalancer_id=self.lb_id, protocol_port=83, protocol=lb_const.PROTOCOL_HTTP) ) as (l1, l2, l3): with contextlib.nested(self.pool(listener_id=l1['listener']['id'], protocol=lb_const.PROTOCOL_HTTPS), self.pool(listener_id=l2['listener']['id'], protocol=lb_const.PROTOCOL_TCP), self.pool(listener_id=l3['listener']['id'], protocol=lb_const.PROTOCOL_HTTP) ) as (p1, p2, p3): self._test_list_with_pagination_reverse('pool', (p3, p1, p2), ('protocol', 'asc'), 2, 2) def test_get_listener_shows_default_pool(self): with self.pool(listener_id=self.listener_id) as pool: pool_id = pool['pool']['id'] resp, body = self._get_listener_api(self.listener_id) self.assertEqual(pool_id, body['listener']['default_pool_id']) class MemberTestBase(PoolTestBase): def setUp(self): super(MemberTestBase, self).setUp() pool_res = self._create_pool( self.fmt, lb_const.PROTOCOL_HTTP, lb_const.LB_METHOD_ROUND_ROBIN, self.listener_id, self.lb_id, session_persistence={'type': lb_const.SESSION_PERSISTENCE_HTTP_COOKIE}) self.pool = self.deserialize(self.fmt, pool_res) self.pool_id = self.pool['pool']['id'] alt_listener_res = self._create_listener( self.fmt, lb_const.PROTOCOL_HTTP, self.def_listener['listener']['protocol_port'] + 1, self.lb_id ) self.alt_listener = self.deserialize(self.fmt, alt_listener_res) self.alt_listener_id = self.alt_listener['listener']['id'] alt_pool_res = self._create_pool( self.fmt, lb_const.PROTOCOL_HTTP, lb_const.LB_METHOD_ROUND_ROBIN, self.alt_listener_id, session_persistence={'type': lb_const.SESSION_PERSISTENCE_HTTP_COOKIE}) self.alt_pool = self.deserialize(self.fmt, alt_pool_res) self.alt_pool_id = self.alt_pool['pool']['id'] def tearDown(self): self._delete('pools', self.alt_pool_id) self._delete('pools', self.pool_id) super(MemberTestBase, self).tearDown() def _create_member_api(self, pool_id, data): req = self.new_create_request("pools", data, self.fmt, id=pool_id, subresource='members') resp = req.get_response(self.ext_api) body = self.deserialize(self.fmt, resp) return resp, body def _update_member_api(self, pool_id, member_id, data): req = self.new_update_request('pools', data, pool_id, subresource='members', sub_id=member_id) resp = req.get_response(self.ext_api) body = self.deserialize(self.fmt, resp) return resp, body def _delete_member_api(self, pool_id, member_id): req = self.new_delete_request('pools', pool_id, subresource='members', sub_id=member_id) resp = req.get_response(self.ext_api) return resp def _get_member_api(self, pool_id, member_id): req = self.new_show_request('pools', pool_id, subresource='members', sub_id=member_id) resp = req.get_response(self.ext_api) body = self.deserialize(self.fmt, resp) return resp, body def _list_members_api(self, pool_id): req = self.new_list_request('pools', id=pool_id, subresource='members') resp = req.get_response(self.ext_api) body = self.deserialize(self.fmt, resp) return resp, body class LbaasMemberTests(MemberTestBase): def test_create_member(self, **extras): expected = { 'address': '127.0.0.1', 'protocol_port': 80, 'weight': 1, 'admin_state_up': True, 'tenant_id': self._tenant_id, 'subnet_id': '', 'name': 'member1' } expected.update(extras) expected['subnet_id'] = self.test_subnet_id with self.member(pool_id=self.pool_id, name='member1') as member: member_id = member['member'].get('id') self.assertTrue(member_id) actual = {} for k, v in member['member'].items(): if k in expected: actual[k] = v self.assertEqual(expected, actual) self._validate_statuses(self.lb_id, self.listener_id, pool_id=self.pool_id, member_id=member_id) return member def test_create_member_with_existing_address_port_pool_combination(self): with self.member(pool_id=self.pool_id) as member1: member1 = member1['member'] member_data = { 'address': member1['address'], 'protocol_port': member1['protocol_port'], 'weight': 1, 'subnet_id': member1['subnet_id'], 'admin_state_up': True, 'tenant_id': member1['tenant_id'] } self.assertRaises( loadbalancerv2.MemberExists, self.plugin.create_pool_member, context.get_admin_context(), self.pool_id, {'member': member_data}) def test_update_member(self): keys = [('address', "127.0.0.1"), ('tenant_id', self._tenant_id), ('protocol_port', 80), ('weight', 10), ('admin_state_up', False), ('name', 'member2')] with self.member(pool_id=self.pool_id) as member: member_id = member['member']['id'] resp, pool1_update = self._get_pool_api(self.pool_id) self.assertEqual(1, len(pool1_update['pool']['members'])) data = {'member': {'weight': 10, 'admin_state_up': False, 'name': 'member2'}} resp, body = self._update_member_api(self.pool_id, member_id, data) for k, v in keys: self.assertEqual(v, body['member'][k]) resp, pool1_update = self._get_pool_api(self.pool_id) self.assertEqual(1, len(pool1_update['pool']['members'])) self._validate_statuses(self.lb_id, self.listener_id, pool_id=self.pool_id, member_id=member_id, member_disabled=True) def test_delete_member(self): with self.member(pool_id=self.pool_id, no_delete=True) as member: member_id = member['member']['id'] resp = self._delete_member_api(self.pool_id, member_id) self.assertEqual(webob.exc.HTTPNoContent.code, resp.status_int) resp, pool_update = self._get_pool_api(self.pool_id) self.assertEqual(0, len(pool_update['pool']['members'])) def test_show_member(self): keys = [('address', "127.0.0.1"), ('tenant_id', self._tenant_id), ('protocol_port', 80), ('weight', 1), ('admin_state_up', True), ('name', 'member1')] with self.member(pool_id=self.pool_id, name='member1') as member: member_id = member['member']['id'] resp, body = self._get_member_api(self.pool_id, member_id) for k, v in keys: self.assertEqual(v, body['member'][k]) def test_list_members(self): with self.member(pool_id=self.pool_id, name='member1', protocol_port=81): resp, body = self._list_members_api(self.pool_id) self.assertEqual(1, len(body['members'])) def test_list_members_only_for_pool(self): with self.member(pool_id=self.alt_pool_id): with self.member(pool_id=self.pool_id, protocol_port=81) as in_member: resp, body = self._list_members_api(self.pool_id) self.assertEqual(len(body['members']), 1) self.assertIn(in_member['member'], body['members']) def test_list_members_with_sort_emulated(self): with self.member(pool_id=self.pool_id, protocol_port=81) as m1: with self.member(pool_id=self.pool_id, protocol_port=82) as m2: with self.member(pool_id=self.pool_id, protocol_port=83) as m3: self._test_list_with_sort( 'pool', (m3, m2, m1), [('protocol_port', 'desc')], id=self.pool_id, subresource='member') def test_list_members_with_pagination_emulated(self): with self.member(pool_id=self.pool_id, protocol_port=81) as m1: with self.member(pool_id=self.pool_id, protocol_port=82) as m2: with self.member(pool_id=self.pool_id, protocol_port=83) as m3: self._test_list_with_pagination( 'pool', (m1, m2, m3), ('protocol_port', 'asc'), 2, 2, id=self.pool_id, subresource='member' ) def test_list_members_with_pagination_reverse_emulated(self): with self.member(pool_id=self.pool_id, protocol_port=81) as m1: with self.member(pool_id=self.pool_id, protocol_port=82) as m2: with self.member(pool_id=self.pool_id, protocol_port=83) as m3: self._test_list_with_pagination_reverse( 'pool', (m1, m2, m3), ('protocol_port', 'asc'), 2, 2, id=self.pool_id, subresource='member' ) def test_list_members_invalid_pool_id(self): resp, body = self._list_members_api('WRONG_POOL_ID') self.assertEqual(webob.exc.HTTPNotFound.code, resp.status_int) resp, body = self._list_members_api(self.pool_id) self.assertEqual(webob.exc.HTTPOk.code, resp.status_int) def test_get_member_invalid_pool_id(self): with self.member(pool_id=self.pool_id) as member: member_id = member['member']['id'] resp, body = self._get_member_api('WRONG_POOL_ID', member_id) self.assertEqual(webob.exc.HTTPNotFound.code, resp.status_int) resp, body = self._get_member_api(self.pool_id, member_id) self.assertEqual(webob.exc.HTTPOk.code, resp.status_int) def test_create_member_invalid_pool_id(self): data = {'member': {'address': '127.0.0.1', 'protocol_port': 80, 'weight': 1, 'admin_state_up': True, 'tenant_id': self._tenant_id, 'subnet_id': self.test_subnet_id}} resp, body = self._create_member_api('WRONG_POOL_ID', data) self.assertEqual(webob.exc.HTTPNotFound.code, resp.status_int) def test_update_member_invalid_pool_id(self): with self.member(pool_id=self.pool_id) as member: member_id = member['member']['id'] data = {'member': {'weight': 1}} resp, body = self._update_member_api( 'WRONG_POOL_ID', member_id, data) self.assertEqual(webob.exc.HTTPNotFound.code, resp.status_int) def test_create_member_invalid_name(self): data = {'member': {'address': '127.0.0.1', 'protocol_port': 80, 'weight': 1, 'admin_state_up': True, 'tenant_id': self._tenant_id, 'subnet_id': self.test_subnet_id, 'name': 123}} resp, body = self._create_member_api('POOL_ID', data) self.assertEqual(webob.exc.HTTPBadRequest.code, resp.status_int) def test_delete_member_invalid_pool_id(self): with self.member(pool_id=self.pool_id) as member: member_id = member['member']['id'] resp = self._delete_member_api('WRONG_POOL_ID', member_id) self.assertEqual(webob.exc.HTTPNotFound.code, resp.status_int) def test_get_pool_shows_members(self): with self.member(pool_id=self.pool_id, name='member1') as member: expected = {'id': member['member']['id']} resp, body = self._get_pool_api(self.pool_id) self.assertIn(expected, body['pool']['members']) class HealthMonitorTestBase(MemberTestBase): def _create_healthmonitor_api(self, data): req = self.new_create_request("healthmonitors", data, self.fmt) resp = req.get_response(self.ext_api) body = self.deserialize(self.fmt, resp) return resp, body def _update_healthmonitor_api(self, hm_id, data): req = self.new_update_request('healthmonitors', data, hm_id) resp = req.get_response(self.ext_api) body = self.deserialize(self.fmt, resp) return resp, body def _delete_healthmonitor_api(self, hm_id): req = self.new_delete_request('healthmonitors', hm_id) resp = req.get_response(self.ext_api) return resp def _get_healthmonitor_api(self, hm_id): req = self.new_show_request('healthmonitors', hm_id) resp = req.get_response(self.ext_api) body = self.deserialize(self.fmt, resp) return resp, body def _list_healthmonitors_api(self): req = self.new_list_request('healthmonitors') resp = req.get_response(self.ext_api) body = self.deserialize(self.fmt, resp) return resp, body class LbaasHealthMonitorTests(HealthMonitorTestBase): def test_create_healthmonitor(self, **extras): expected = { 'type': 'HTTP', 'delay': 1, 'timeout': 1, 'max_retries': 1, 'http_method': 'GET', 'url_path': '/', 'expected_codes': '200', 'admin_state_up': True, 'tenant_id': self._tenant_id, 'pools': [{'id': self.pool_id}], 'name': 'monitor1' } expected.update(extras) with self.healthmonitor(pool_id=self.pool_id, type='HTTP', name='monitor1', **extras) as healthmonitor: hm_id = healthmonitor['healthmonitor'].get('id') self.assertTrue(hm_id) actual = {} for k, v in healthmonitor['healthmonitor'].items(): if k in expected: actual[k] = v self.assertEqual(expected, actual) self._validate_statuses(self.lb_id, self.listener_id, pool_id=self.pool_id, hm_id=hm_id) _, pool = self._get_pool_api(self.pool_id) self.assertEqual( {'type': lb_const.SESSION_PERSISTENCE_HTTP_COOKIE, 'cookie_name': None}, pool['pool'].get('session_persistence')) return healthmonitor def test_show_healthmonitor(self, **extras): expected = { 'type': 'HTTP', 'delay': 1, 'timeout': 1, 'max_retries': 1, 'http_method': 'GET', 'url_path': '/', 'expected_codes': '200', 'admin_state_up': True, 'tenant_id': self._tenant_id, 'pools': [{'id': self.pool_id}], 'name': 'monitor1' } expected.update(extras) with self.healthmonitor(pool_id=self.pool_id, type='HTTP', name='monitor1') as healthmonitor: hm_id = healthmonitor['healthmonitor']['id'] resp, body = self._get_healthmonitor_api(hm_id) actual = {} for k, v in body['healthmonitor'].items(): if k in expected: actual[k] = v self.assertEqual(expected, actual) return healthmonitor def test_update_healthmonitor(self, **extras): expected = { 'type': 'HTTP', 'delay': 30, 'timeout': 10, 'max_retries': 4, 'http_method': 'GET', 'url_path': '/index.html', 'expected_codes': '200,404', 'admin_state_up': True, 'tenant_id': self._tenant_id, 'pools': [{'id': self.pool_id}], 'name': 'monitor2' } expected.update(extras) with self.healthmonitor(pool_id=self.pool_id, type='HTTP', name='monitor1') as healthmonitor: hm_id = healthmonitor['healthmonitor']['id'] data = {'healthmonitor': {'delay': 30, 'timeout': 10, 'max_retries': 4, 'expected_codes': '200,404', 'url_path': '/index.html', 'name': 'monitor2'}} resp, body = self._update_healthmonitor_api(hm_id, data) actual = {} for k, v in body['healthmonitor'].items(): if k in expected: actual[k] = v self.assertEqual(expected, actual) self._validate_statuses(self.lb_id, self.listener_id, pool_id=self.pool_id, hm_id=hm_id) return healthmonitor def test_delete_healthmonitor(self): with self.healthmonitor(pool_id=self.pool_id, no_delete=True) as healthmonitor: hm_id = healthmonitor['healthmonitor']['id'] resp = self._delete_healthmonitor_api(hm_id) self.assertEqual(webob.exc.HTTPNoContent.code, resp.status_int) def test_create_healthmonitor_with_type_tcp(self, **extras): expected = { 'type': 'TCP', 'delay': 1, 'timeout': 1, 'max_retries': 1, 'admin_state_up': True, 'tenant_id': self._tenant_id, 'pools': [{'id': self.pool_id}], 'name': 'monitor1' } expected.update(extras) with self.healthmonitor(pool_id=self.pool_id, type='TCP', name='monitor1') as healthmonitor: hm_id = healthmonitor['healthmonitor'].get('id') self.assertTrue(hm_id) actual = {} for k, v in healthmonitor['healthmonitor'].items(): if k in expected: actual[k] = v self.assertEqual(expected, actual) self._validate_statuses(self.lb_id, self.listener_id, pool_id=self.pool_id, hm_id=hm_id) return healthmonitor def test_show_healthmonitor_with_type_tcp(self, **extras): expected = { 'type': 'TCP', 'delay': 1, 'timeout': 1, 'max_retries': 1, 'admin_state_up': True, 'tenant_id': self._tenant_id, 'pools': [{'id': self.pool_id}], 'name': 'monitor1' } expected.update(extras) with self.healthmonitor(pool_id=self.pool_id, type='TCP', name='monitor1') as healthmonitor: hm_id = healthmonitor['healthmonitor']['id'] resp, body = self._get_healthmonitor_api(hm_id) actual = {} for k, v in body['healthmonitor'].items(): if k in expected: actual[k] = v self.assertEqual(expected, actual) return healthmonitor def test_update_healthmonitor_with_type_tcp(self, **extras): expected = { 'type': 'TCP', 'delay': 30, 'timeout': 10, 'max_retries': 4, 'admin_state_up': True, 'tenant_id': self._tenant_id, 'pools': [{'id': self.pool_id}], 'name': 'monitor2' } expected.update(extras) with self.healthmonitor(pool_id=self.pool_id, type='TCP', name='monitor1') as healthmonitor: hm_id = healthmonitor['healthmonitor']['id'] data = {'healthmonitor': {'delay': 30, 'timeout': 10, 'max_retries': 4, 'name': 'monitor2'}} resp, body = self._update_healthmonitor_api(hm_id, data) actual = {} for k, v in body['healthmonitor'].items(): if k in expected: actual[k] = v self.assertEqual(expected, actual) self._validate_statuses(self.lb_id, self.listener_id, pool_id=self.pool_id, hm_id=hm_id) return healthmonitor def test_create_health_monitor_with_timeout_invalid(self): data = {'healthmonitor': {'type': 'HTTP', 'delay': 1, 'timeout': -1, 'max_retries': 2, 'admin_state_up': True, 'tenant_id': self._tenant_id, 'pool_id': self.pool_id}} resp, body = self._create_healthmonitor_api(data) self.assertEqual(webob.exc.HTTPBadRequest.code, resp.status_int) def test_update_health_monitor_with_timeout_invalid(self): with self.healthmonitor(pool_id=self.pool_id) as healthmonitor: hm_id = healthmonitor['healthmonitor']['id'] data = {'healthmonitor': {'delay': 10, 'timeout': -1, 'max_retries': 2, 'admin_state_up': False}} resp, body = self._update_healthmonitor_api(hm_id, data) self.assertEqual(webob.exc.HTTPBadRequest.code, resp.status_int) def test_create_health_monitor_with_delay_invalid(self): data = {'healthmonitor': {'type': 'HTTP', 'delay': -1, 'timeout': 1, 'max_retries': 2, 'admin_state_up': True, 'tenant_id': self._tenant_id, 'pool_id': self.pool_id}} resp, body = self._create_healthmonitor_api(data) self.assertEqual(webob.exc.HTTPBadRequest.code, resp.status_int) def test_update_health_monitor_with_delay_invalid(self): with self.healthmonitor(pool_id=self.pool_id) as healthmonitor: hm_id = healthmonitor['healthmonitor']['id'] data = {'healthmonitor': {'delay': -1, 'timeout': 1, 'max_retries': 2, 'admin_state_up': False}} resp, body = self._update_healthmonitor_api(hm_id, data) self.assertEqual(webob.exc.HTTPBadRequest.code, resp.status_int) def test_create_health_monitor_with_max_retries_invalid(self): data = {'healthmonitor': {'type': 'HTTP', 'delay': 1, 'timeout': 1, 'max_retries': 20, 'admin_state_up': True, 'tenant_id': self._tenant_id, 'pool_id': self.pool_id}} resp, body = self._create_healthmonitor_api(data) self.assertEqual(webob.exc.HTTPBadRequest.code, resp.status_int) def test_update_health_monitor_with_max_retries_invalid(self): with self.healthmonitor(pool_id=self.pool_id) as healthmonitor: hm_id = healthmonitor['healthmonitor']['id'] data = {'healthmonitor': {'delay': 1, 'timeout': 1, 'max_retries': 20, 'admin_state_up': False}} resp, body = self._update_healthmonitor_api(hm_id, data) self.assertEqual(webob.exc.HTTPBadRequest.code, resp.status_int) def test_create_health_monitor_with_type_invalid(self): data = {'healthmonitor': {'type': 1, 'delay': 1, 'timeout': 1, 'max_retries': 2, 'admin_state_up': True, 'tenant_id': self._tenant_id, 'pool_id': self.pool_id}} resp, body = self._create_healthmonitor_api(data) self.assertEqual(webob.exc.HTTPBadRequest.code, resp.status_int) def test_update_health_monitor_with_type_invalid(self): with self.healthmonitor(pool_id=self.pool_id) as healthmonitor: hm_id = healthmonitor['healthmonitor']['id'] data = {'healthmonitor': {'type': 1, 'delay': 1, 'timeout': 1, 'max_retries': 2, 'admin_state_up': False}} resp, body = self._update_healthmonitor_api(hm_id, data) self.assertEqual(webob.exc.HTTPBadRequest.code, resp.status_int) def test_create_health_monitor_with_http_method_non_default(self): data = {'healthmonitor': {'type': 'HTTP', 'http_method': 'POST', 'delay': 2, 'timeout': 1, 'max_retries': 2, 'tenant_id': self._tenant_id, 'pool_id': self.pool_id}} resp, body = self._create_healthmonitor_api(data) self.assertEqual(201, resp.status_int) def test_create_health_monitor_with_http_method_invalid(self): data = {'healthmonitor': {'type': 'HTTP', 'http_method': 'FOO', 'delay': 1, 'timeout': 1, 'max_retries': 2, 'admin_state_up': True, 'tenant_id': self._tenant_id, 'pool_id': self.pool_id}} resp, body = self._create_healthmonitor_api(data) self.assertEqual(webob.exc.HTTPBadRequest.code, resp.status_int) def test_update_health_monitor_with_http_method_invalid(self): with self.healthmonitor(pool_id=self.pool_id) as healthmonitor: hm_id = healthmonitor['healthmonitor']['id'] data = {'healthmonitor': {'type': 'HTTP', 'http_method': 'FOO', 'delay': 1, 'timeout': 1, 'max_retries': 2, 'admin_state_up': False}} resp, body = self._update_healthmonitor_api(hm_id, data) self.assertEqual(webob.exc.HTTPBadRequest.code, resp.status_int) def test_create_health_monitor_with_url_path_non_default(self): data = {'healthmonitor': {'type': 'HTTP', 'url_path': '/a/b_c-d/e%20f', 'delay': 2, 'timeout': 1, 'max_retries': 2, 'tenant_id': self._tenant_id, 'pool_id': self.pool_id}} resp, body = self._create_healthmonitor_api(data) self.assertEqual(201, resp.status_int) def test_create_health_monitor_with_url_path_invalid(self): data = {'healthmonitor': {'type': 'HTTP', 'url_path': 1, 'delay': 1, 'timeout': 1, 'max_retries': 2, 'admin_state_up': True, 'tenant_id': self._tenant_id, 'pool_id': self.pool_id}} resp, body = self._create_healthmonitor_api(data) self.assertEqual(webob.exc.HTTPBadRequest.code, resp.status_int) def test_update_health_monitor_with_url_path_invalid(self): with self.healthmonitor(pool_id=self.pool_id) as healthmonitor: hm_id = healthmonitor['healthmonitor']['id'] data = {'healthmonitor': {'url_path': 1, 'delay': 1, 'timeout': 1, 'max_retries': 2, 'admin_state_up': False}} resp, body = self._update_healthmonitor_api(hm_id, data) self.assertEqual(webob.exc.HTTPBadRequest.code, resp.status_int) def test_create_healthmonitor_invalid_pool_id(self): data = {'healthmonitor': {'type': lb_const.HEALTH_MONITOR_TCP, 'delay': 1, 'timeout': 1, 'max_retries': 1, 'tenant_id': self._tenant_id, 'pool_id': uuidutils.generate_uuid()}} resp, body = self._create_healthmonitor_api(data) self.assertEqual(webob.exc.HTTPNotFound.code, resp.status_int) def test_create_healthmonitor_invalid_name(self): data = {'healthmonitor': {'type': lb_const.HEALTH_MONITOR_TCP, 'delay': 1, 'timeout': 1, 'max_retries': 1, 'tenant_id': self._tenant_id, 'pool_id': self.pool_id, 'name': 123}} resp, body = self._create_healthmonitor_api(data) self.assertEqual(webob.exc.HTTPBadRequest.code, resp.status_int) def test_only_one_healthmonitor_per_pool(self): with self.healthmonitor(pool_id=self.pool_id): data = {'healthmonitor': {'type': lb_const.HEALTH_MONITOR_TCP, 'delay': 1, 'timeout': 1, 'max_retries': 1, 'tenant_id': self._tenant_id, 'pool_id': self.pool_id}} resp, body = self._create_healthmonitor_api(data) self.assertEqual(webob.exc.HTTPConflict.code, resp.status_int) def test_get_healthmonitor(self): expected = { 'type': 'HTTP', 'delay': 1, 'timeout': 1, 'max_retries': 1, 'http_method': 'GET', 'url_path': '/', 'expected_codes': '200', 'admin_state_up': True, 'tenant_id': self._tenant_id, 'pools': [{'id': self.pool_id}], 'name': 'monitor1' } with self.healthmonitor(pool_id=self.pool_id, type='HTTP', name='monitor1') as healthmonitor: hm_id = healthmonitor['healthmonitor']['id'] expected['id'] = hm_id resp, body = self._get_healthmonitor_api(hm_id) self.assertEqual(expected, body['healthmonitor']) def test_list_healthmonitors(self): expected = { 'type': 'HTTP', 'delay': 1, 'timeout': 1, 'max_retries': 1, 'http_method': 'GET', 'url_path': '/', 'expected_codes': '200', 'admin_state_up': True, 'tenant_id': self._tenant_id, 'pools': [{'id': self.pool_id}], 'name': '', } with self.healthmonitor(pool_id=self.pool_id, type='HTTP') as healthmonitor: hm_id = healthmonitor['healthmonitor']['id'] expected['id'] = hm_id resp, body = self._list_healthmonitors_api() self.assertEqual([expected], body['healthmonitors']) def test_get_pool_shows_healthmonitor_id(self): with self.healthmonitor(pool_id=self.pool_id) as healthmonitor: hm_id = healthmonitor['healthmonitor']['id'] resp, body = self._get_pool_api(self.pool_id) self.assertEqual(hm_id, body['pool']['healthmonitor_id']) def test_update_healthmonitor_status(self): with self.healthmonitor(pool_id=self.pool_id) as healthmonitor: hm_id = healthmonitor['healthmonitor'].get('id') ctx = context.get_admin_context() self.plugin.db.update_status(ctx, models.HealthMonitorV2, hm_id, provisioning_status=constants.ACTIVE, operating_status=lb_const.DEGRADED) db_hm = self.plugin.db.get_healthmonitor(ctx, hm_id) self.assertEqual(constants.ACTIVE, db_hm.provisioning_status) self.assertFalse(hasattr(db_hm, 'operating_status')) def test_create_healthmonitor_admin_state_down(self): self.test_create_healthmonitor(admin_state_up=False) class LbaasStatusesTest(MemberTestBase): def setUp(self): super(LbaasStatusesTest, self).setUp() self.lbs_to_clean = [] def tearDown(self): for lb_dict in self.lbs_to_clean: self._delete_populated_lb(lb_dict) super(LbaasStatusesTest, self).tearDown() def test_disable_lb(self): ctx = context.get_admin_context() lb_dict = self._create_new_populated_loadbalancer() lb_id = lb_dict['id'] opt = {'admin_state_up': False} self.plugin.db.update_loadbalancer(ctx, lb_id, opt) statuses = self._get_loadbalancer_statuses_api(lb_id)[1] n_disabled = self._countDisabledChildren(statuses, 0) self.assertEqual(11, n_disabled) def _countDisabledChildren(self, obj, count): if isinstance(obj, dict): for key, value in six.iteritems(obj): if key == "operating_status": count += 1 continue count = self._countDisabledChildren(value, count) if isinstance(obj, list): for value in obj: count = self._countDisabledChildren(value, count) return count def test_disable_trickles_down(self): lb_dict = self._create_new_populated_loadbalancer() lb_id = lb_dict['id'] self._update_loadbalancer_api(lb_id, {'loadbalancer': { 'admin_state_up': False}}) statuses = self._get_loadbalancer_statuses_api(lb_id)[1] self._assertDisabled(self._traverse_statuses(statuses)) self._assertDisabled(self._traverse_statuses(statuses, listener='listener_HTTP')) self._assertDisabled(self._traverse_statuses( statuses, listener='listener_HTTPS')) self._assertDisabled(self._traverse_statuses(statuses, listener='listener_HTTP', pool='pool_HTTP')) self._assertDisabled(self._traverse_statuses(statuses, listener='listener_HTTPS', pool='pool_HTTPS')) self._assertDisabled(self._traverse_statuses(statuses, listener='listener_HTTP', pool='pool_HTTP', member='127.0.0.1')) self._assertDisabled(self._traverse_statuses(statuses, listener='listener_HTTPS', pool='pool_HTTPS', member='127.0.0.4')) self._assertDisabled(self._traverse_statuses(statuses, listener='listener_HTTP', pool='pool_HTTP', healthmonitor=True)) self._assertDisabled(self._traverse_statuses(statuses, listener='listener_HTTPS', pool='pool_HTTPS', healthmonitor=True)) def test_disable_not_calculated_in_degraded(self): lb_dict = self._create_new_populated_loadbalancer() lb_id = lb_dict['id'] listener_id = lb_dict['listeners'][0]['id'] listener = 'listener_HTTP' self._update_listener_api(listener_id, {'listener': {'admin_state_up': False}}) statuses = self._get_loadbalancer_statuses_api(lb_id)[1] self._assertOnline(self._traverse_statuses(statuses)) self._update_listener_api(listener_id, {'listener': {'admin_state_up': True}}) pool_id = lb_dict['listeners'][0]['pools'][0]['id'] pool = 'pool_HTTP' member_id = lb_dict['listeners'][0]['pools'][0]['members'][0]['id'] member = '127.0.0.1' self._update_member_api(pool_id, member_id, {'member': {'admin_state_up': False}}) statuses = self._get_loadbalancer_statuses_api(lb_id)[1] self._assertOnline(self._traverse_statuses(statuses)) self._assertOnline(self._traverse_statuses(statuses, listener=listener)) self._assertOnline(self._traverse_statuses(statuses, listener=listener, pool=pool)) self._assertDisabled(self._traverse_statuses(statuses, listener=listener, pool=pool, member=member)) def test_that_failures_trickle_up_on_prov_errors(self): ctx = context.get_admin_context() ERROR = constants.ERROR lb_dict = self._create_new_populated_loadbalancer() lb_id = lb_dict['id'] statuses = self._get_loadbalancer_statuses_api(lb_id)[1] stat = self._traverse_statuses(statuses, listener="listener_HTTP", pool="pool_HTTP", member='127.0.0.1') member_id = stat['id'] self.plugin.db.update_status(ctx, models.MemberV2, member_id, provisioning_status=ERROR) statuses = self._get_loadbalancer_statuses_api(lb_id)[1] #Assert the parents of the member are degraded self._assertDegraded(self._traverse_statuses(statuses, listener='listener_HTTP', pool='pool_HTTP')) self._assertDegraded(self._traverse_statuses(statuses, listener='listener_HTTP')) self._assertDegraded(self._traverse_statuses(statuses)) #Verify siblings are not degraded self._assertNotDegraded(self._traverse_statuses(statuses, listener='listener_HTTPS', pool='pool_HTTPS')) self._assertNotDegraded(self._traverse_statuses(statuses, listener='listener_HTTPS')) def test_that_failures_trickle_up_on_non_ONLINE_prov_status(self): ctx = context.get_admin_context() lb_dict = self._create_new_populated_loadbalancer() lb_id = lb_dict['id'] statuses = self._get_loadbalancer_statuses_api(lb_id)[1] stat = self._traverse_statuses(statuses, listener="listener_HTTP", pool="pool_HTTP", member='127.0.0.1') member_id = stat['id'] self.plugin.db.update_status(ctx, models.MemberV2, member_id, operating_status=lb_const.OFFLINE) statuses = self._get_loadbalancer_statuses_api(lb_id)[1] #Assert the parents of the member are degraded self._assertDegraded(self._traverse_statuses(statuses, listener='listener_HTTP', pool='pool_HTTP')) self._assertDegraded(self._traverse_statuses(statuses, listener='listener_HTTP')) self._assertDegraded(self._traverse_statuses(statuses)) #Verify siblings are not degraded self._assertNotDegraded(self._traverse_statuses(statuses, listener='listener_HTTPS', pool='pool_HTTPS')) self._assertNotDegraded(self._traverse_statuses(statuses, listener='listener_HTTPS')) def _assertOnline(self, obj): OS = "operating_status" if OS in obj: self.assertEqual(lb_const.ONLINE, obj[OS]) def _assertDegraded(self, obj): OS = "operating_status" if OS in obj: self.assertEqual(lb_const.DEGRADED, obj[OS]) def _assertNotDegraded(self, obj): OS = "operating_status" if OS in obj: self.assertNotEqual(lb_const.DEGRADED, obj[OS]) def _assertDisabled(self, obj): OS = "operating_status" if OS in obj: self.assertEqual(lb_const.DISABLED, obj[OS]) def _delete_populated_lb(self, lb_dict): lb_id = lb_dict['id'] for pool in lb_dict['pools']: pool_id = pool['id'] for member in pool['members']: member_id = member['id'] self._delete_member_api(pool_id, member_id) self._delete_pool_api(pool_id) for listener in lb_dict['listeners']: listener_id = listener['id'] self._delete_listener_api(listener_id) self._delete_loadbalancer_api(lb_id) def _traverse_statuses(self, statuses, listener=None, pool=None, member=None, healthmonitor=False): lb = statuses['statuses']['loadbalancer'] if listener is None: return copy.copy(lb) listener_list = lb['listeners'] for listener_obj in listener_list: if listener_obj['name'] == listener: if pool is None: return copy.copy(listener_obj) pool_list = listener_obj['pools'] for pool_obj in pool_list: if pool_obj['name'] == pool: if healthmonitor: return copy.copy(pool_obj['healthmonitor']) if member is None: return copy.copy(pool_obj) member_list = pool_obj['members'] for member_obj in member_list: if member_obj['address'] == member: return copy.copy(member_obj) pool_list = lb['pools'] for pool_obj in pool_list: if pool_obj['name'] == pool: if healthmonitor: return copy.copy(pool_obj['healthmonitor']) if member is None: return copy.copy(pool_obj) member_list = pool_obj['members'] for member_obj in member_list: if member_obj['address'] == member: return copy.copy(member_obj) raise KeyError def _create_new_populated_loadbalancer(self): oct4 = 1 subnet_id = self.test_subnet_id HTTP = lb_const.PROTOCOL_HTTP HTTPS = lb_const.PROTOCOL_HTTPS ROUND_ROBIN = lb_const.LB_METHOD_ROUND_ROBIN fmt = self.fmt lb_dict = {} lb_res = self._create_loadbalancer( self.fmt, subnet_id=self.test_subnet_id, name='test_loadbalancer') lb = self.deserialize(fmt, lb_res) lb_id = lb['loadbalancer']['id'] lb_dict['id'] = lb_id lb_dict['listeners'] = [] lb_dict['pools'] = [] for prot, port in [(HTTP, 80), (HTTPS, 443)]: res = self._create_listener(fmt, prot, port, lb_id, name="listener_%s" % prot) listener = self.deserialize(fmt, res) listener_id = listener['listener']['id'] lb_dict['listeners'].append({'id': listener_id, 'pools': []}) res = self._create_pool(fmt, prot, ROUND_ROBIN, listener_id, loadbalancer_id=lb_id, name="pool_%s" % prot) pool = self.deserialize(fmt, res) pool_id = pool['pool']['id'] members = [] lb_dict['listeners'][-1]['pools'].append({'id': pool['pool']['id'], 'members': members}) lb_dict['pools'].append({'id': pool['pool']['id'], 'members': members}) res = self._create_healthmonitor(fmt, pool_id, type=prot, delay=1, timeout=1, max_retries=1) health_monitor = self.deserialize(fmt, res) lb_dict['listeners'][-1]['pools'][-1]['health_monitor'] = { 'id': health_monitor['healthmonitor']['id']} lb_dict['pools'][-1]['health_monitor'] = { 'id': health_monitor['healthmonitor']['id']} for i in six.moves.range(0, 3): address = "127.0.0.%i" % oct4 oct4 += 1 res = self._create_member(fmt, pool_id, address, port, subnet_id) member = self.deserialize(fmt, res) members.append({'id': member['member']['id']}) self.lbs_to_clean.append(lb_dict) return lb_dict neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/db/loadbalancer/__init__.py0000664000567000056710000000000012701407726027510 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/db/loadbalancer/test_migrations.py0000664000567000056710000000442412701407726031202 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Cisco Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron.db.migration.alembic_migrations import external from neutron.db.migration import cli as migration from neutron.tests.common import base from neutron.tests.functional.db import test_migrations from neutron_lbaas.db.models import head # EXTERNAL_TABLES should contain all names of tables that are not related to # current repo. EXTERNAL_TABLES = set(external.TABLES) - set(external.LBAAS_TABLES) VERSION_TABLE = 'alembic_version_lbaas' class _TestModelsMigrationsLBAAS(test_migrations._TestModelsMigrations): # TODO(yamamoto): Move this test to functional, once neutron-lbaas # gets a functional job. def db_sync(self, engine): cfg.CONF.set_override('connection', engine.url, group='database') for conf in migration.get_alembic_configs(): self.alembic_config = conf self.alembic_config.neutron_config = cfg.CONF migration.do_alembic_command(conf, 'upgrade', 'heads') def get_metadata(self): return head.get_metadata() def include_object(self, object_, name, type_, reflected, compare_to): if type_ == 'table' and (name.startswith('alembic') or name == VERSION_TABLE or name in EXTERNAL_TABLES): return False if type_ == 'index' and reflected and name.startswith("idx_autoinc_"): return False return True class TestModelsMigrationsMysql(_TestModelsMigrationsLBAAS, base.MySQLTestCase): pass class TestModelsMigrationsPostgresql(_TestModelsMigrationsLBAAS, base.PostgreSQLTestCase): pass neutron-lbaas-8.0.0/neutron_lbaas/tests/unit/db/loadbalancer/test_db_loadbalancer.py0000664000567000056710000022166412701407726032111 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import mock from neutron.api import extensions from neutron.callbacks import events from neutron.callbacks import registry from neutron.callbacks import resources from neutron.common import config from neutron import context from neutron.db import servicetype_db as sdb from neutron import manager from neutron.plugins.common import constants from neutron.services import provider_configuration as pconf from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron_lib import constants as n_constants from neutron_lib import exceptions as n_exc import testtools import webob.exc from neutron_lbaas._i18n import _ from neutron_lbaas.db.loadbalancer import loadbalancer_db as ldb import neutron_lbaas.extensions from neutron_lbaas.extensions import loadbalancer from neutron_lbaas.services.loadbalancer import ( plugin as loadbalancer_plugin ) from neutron_lbaas.services.loadbalancer.drivers import abstract_driver from neutron_lbaas.tests import base DB_CORE_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' DB_LB_PLUGIN_KLASS = ( "neutron_lbaas.services.loadbalancer." "plugin.LoadBalancerPlugin" ) NOOP_DRIVER_KLASS = ('neutron_lbaas.tests.unit.db.loadbalancer.' 'test_db_loadbalancer.NoopLbaaSDriver') extensions_path = ':'.join(neutron_lbaas.extensions.__path__) _subnet_id = "0c798ed8-33ba-11e2-8b28-000c291c4d14" class NoopLbaaSDriver(abstract_driver.LoadBalancerAbstractDriver): """A dummy lbass driver that that only performs object deletion.""" def __init__(self, plugin): self.plugin = plugin def create_vip(self, context, vip): pass def update_vip(self, context, old_vip, vip): pass def delete_vip(self, context, vip): self.plugin._delete_db_vip(context, vip["id"]) def create_pool(self, context, pool): pass def update_pool(self, context, old_pool, pool): pass def delete_pool(self, context, pool): self.plugin._delete_db_pool(context, pool["id"]) def stats(self, context, pool_id): return {"bytes_in": 0, "bytes_out": 0, "active_connections": 0, "total_connections": 0} def create_member(self, context, member): pass def update_member(self, context, old_member, member): pass def delete_member(self, context, member): self.plugin._delete_db_member(context, member["id"]) def update_pool_health_monitor(self, context, old_health_monitor, health_monitor, pool_association): pass def create_pool_health_monitor(self, context, health_monitor, pool_id): pass def delete_pool_health_monitor(self, context, health_monitor, pool_id): self.plugin._delete_db_pool_health_monitor( context, health_monitor["id"], pool_id ) class LoadBalancerTestMixin(object): resource_prefix_map = dict( (k, loadbalancer.LOADBALANCER_PREFIX) for k in loadbalancer.RESOURCE_ATTRIBUTE_MAP.keys() ) def _get_vip_optional_args(self): return ('description', 'subnet_id', 'address', 'session_persistence', 'connection_limit') def _create_vip(self, fmt, name, pool_id, protocol, protocol_port, admin_state_up, expected_res_status=None, **kwargs): data = {'vip': {'name': name, 'pool_id': pool_id, 'protocol': protocol, 'protocol_port': protocol_port, 'admin_state_up': admin_state_up, 'tenant_id': self._tenant_id}} args = self._get_vip_optional_args() for arg in args: if arg in kwargs and kwargs[arg] is not None: data['vip'][arg] = kwargs[arg] vip_req = self.new_create_request('vips', data, fmt) vip_res = vip_req.get_response(self.ext_api) if expected_res_status: self.assertEqual(expected_res_status, vip_res.status_int) return vip_res def _create_pool(self, fmt, name, lb_method, protocol, admin_state_up, subnet_id, expected_res_status=None, **kwargs): data = {'pool': {'name': name, 'subnet_id': subnet_id, 'lb_method': lb_method, 'protocol': protocol, 'admin_state_up': admin_state_up, 'tenant_id': self._tenant_id}} for arg in ('description', 'provider', 'subnet_id'): if arg in kwargs and kwargs[arg] is not None: data['pool'][arg] = kwargs[arg] pool_req = self.new_create_request('pools', data, fmt) pool_res = pool_req.get_response(self.ext_api) if expected_res_status: self.assertEqual(expected_res_status, pool_res.status_int) return pool_res def _create_member(self, fmt, address, protocol_port, admin_state_up, expected_res_status=None, **kwargs): data = {'member': {'address': address, 'protocol_port': protocol_port, 'admin_state_up': admin_state_up, 'tenant_id': self._tenant_id}} for arg in ('weight', 'pool_id'): if arg in kwargs and kwargs[arg] is not None: data['member'][arg] = kwargs[arg] member_req = self.new_create_request('members', data, fmt) member_res = member_req.get_response(self.ext_api) if expected_res_status: self.assertEqual(expected_res_status, member_res.status_int) return member_res def _create_health_monitor(self, fmt, type, delay, timeout, max_retries, admin_state_up, expected_res_status=None, **kwargs): data = {'health_monitor': {'type': type, 'delay': delay, 'timeout': timeout, 'max_retries': max_retries, 'admin_state_up': admin_state_up, 'tenant_id': self._tenant_id}} for arg in ('http_method', 'path', 'expected_code'): if arg in kwargs and kwargs[arg] is not None: data['health_monitor'][arg] = kwargs[arg] req = self.new_create_request('health_monitors', data, fmt) res = req.get_response(self.ext_api) if expected_res_status: self.assertEqual(expected_res_status, res.status_int) return res @contextlib.contextmanager def vip(self, fmt=None, name='vip1', pool=None, subnet=None, protocol='HTTP', protocol_port=80, admin_state_up=True, do_delete=True, **kwargs): if not fmt: fmt = self.fmt with test_db_base_plugin_v2.optional_ctx( subnet, self.subnet) as tmp_subnet: with test_db_base_plugin_v2.optional_ctx( pool, self.pool) as tmp_pool: pool_id = tmp_pool['pool']['id'] res = self._create_vip(fmt, name, pool_id, protocol, protocol_port, admin_state_up, subnet_id=tmp_subnet['subnet']['id'], **kwargs) if res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError( explanation=_("Unexpected error code: %s") % res.status_int ) vip = self.deserialize(fmt or self.fmt, res) yield vip if do_delete: self._delete('vips', vip['vip']['id']) @contextlib.contextmanager def pool(self, fmt=None, name='pool1', lb_method='ROUND_ROBIN', protocol='HTTP', admin_state_up=True, do_delete=True, subnet_id=None, **kwargs): if not fmt: fmt = self.fmt subnet_id = subnet_id or _subnet_id res = self._create_pool(fmt, name, lb_method, protocol, admin_state_up, subnet_id, **kwargs) if res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError( explanation=_("Unexpected error code: %s") % res.status_int ) pool = self.deserialize(fmt or self.fmt, res) yield pool if do_delete: self._delete('pools', pool['pool']['id']) @contextlib.contextmanager def member(self, fmt=None, address='192.168.1.100', protocol_port=80, admin_state_up=True, do_delete=True, **kwargs): if not fmt: fmt = self.fmt res = self._create_member(fmt, address, protocol_port, admin_state_up, **kwargs) if res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError( explanation=_("Unexpected error code: %s") % res.status_int ) member = self.deserialize(fmt or self.fmt, res) yield member if do_delete: self._delete('members', member['member']['id']) @contextlib.contextmanager def health_monitor(self, fmt=None, type='TCP', delay=30, timeout=10, max_retries=3, admin_state_up=True, do_delete=True, **kwargs): if not fmt: fmt = self.fmt res = self._create_health_monitor(fmt, type, delay, timeout, max_retries, admin_state_up, **kwargs) if res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError( explanation=_("Unexpected error code: %s") % res.status_int ) health_monitor = self.deserialize(fmt or self.fmt, res) the_health_monitor = health_monitor['health_monitor'] # make sure: # 1. When the type is HTTP/S we have HTTP related attributes in # the result # 2. When the type is not HTTP/S we do not have HTTP related # attributes in the result http_related_attributes = ('http_method', 'url_path', 'expected_codes') if type in ['HTTP', 'HTTPS']: for arg in http_related_attributes: self.assertIsNotNone(the_health_monitor.get(arg)) else: for arg in http_related_attributes: self.assertIsNone(the_health_monitor.get(arg)) yield health_monitor if do_delete: self._delete('health_monitors', the_health_monitor['id']) class LoadBalancerPluginDbTestCase(LoadBalancerTestMixin, base.NeutronDbPluginV2TestCase): def setUp(self, core_plugin=None, lb_plugin=None, lbaas_provider=None, ext_mgr=None): service_plugins = {'lb_plugin_name': DB_LB_PLUGIN_KLASS} if not lbaas_provider: lbaas_provider = ( constants.LOADBALANCER + ':lbaas:' + NOOP_DRIVER_KLASS + ':default') # override the default service provider self.set_override([lbaas_provider]) # removing service-type because it resides in neutron and tests # dont care LBPlugin = loadbalancer_plugin.LoadBalancerPlugin sea_index = None for index, sea in enumerate(LBPlugin.supported_extension_aliases): if sea == 'service-type': sea_index = index if sea_index: del LBPlugin.supported_extension_aliases[sea_index] super(LoadBalancerPluginDbTestCase, self).setUp( ext_mgr=ext_mgr, service_plugins=service_plugins ) if not ext_mgr: self.plugin = loadbalancer_plugin.LoadBalancerPlugin() ext_mgr = extensions.PluginAwareExtensionManager( extensions_path, {constants.LOADBALANCER: self.plugin} ) app = config.load_paste_app('extensions_test_app') self.ext_api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr) get_lbaas_agent_patcher = mock.patch( 'neutron_lbaas.services.loadbalancer.agent_scheduler' '.LbaasAgentSchedulerDbMixin.get_lbaas_agent_hosting_pool') mock_lbaas_agent = mock.MagicMock() get_lbaas_agent_patcher.start().return_value = mock_lbaas_agent mock_lbaas_agent.__getitem__.return_value = {'host': 'host'} self._subnet_id = _subnet_id class TestLoadBalancer(LoadBalancerPluginDbTestCase): def test_create_vip(self, **extras): expected = { 'name': 'vip1', 'description': '', 'protocol_port': 80, 'protocol': 'HTTP', 'connection_limit': -1, 'admin_state_up': True, 'status': 'PENDING_CREATE', 'tenant_id': self._tenant_id, } expected.update(extras) with self.subnet() as subnet: expected['subnet_id'] = subnet['subnet']['id'] name = expected['name'] with self.vip(name=name, subnet=subnet, **extras) as vip: for k in ('id', 'address', 'port_id', 'pool_id'): self.assertTrue(vip['vip'].get(k, None)) self.assertEqual( expected, dict((k, v) for k, v in vip['vip'].items() if k in expected) ) return vip def test_create_vip_create_port_fails(self): with self.subnet() as subnet: with self.pool() as pool: lb_plugin = (manager.NeutronManager. get_instance(). get_service_plugins()[constants.LOADBALANCER]) with mock.patch.object( lb_plugin, '_create_port_for_vip') as cp: #some exception that can show up in port creation cp.side_effect = n_exc.IpAddressGenerationFailure( net_id=subnet['subnet']['network_id']) self._create_vip(self.fmt, "vip", pool['pool']['id'], "HTTP", "80", True, subnet_id=subnet['subnet']['id'], expected_res_status=409) req = self.new_list_request('vips') res = self.deserialize(self.fmt, req.get_response(self.ext_api)) self.assertFalse(res['vips']) def test_create_vip_twice_for_same_pool(self): """Test loadbalancer db plugin via extension and directly.""" with self.subnet() as subnet: with self.pool(name="pool1") as pool: with self.vip(name='vip1', subnet=subnet, pool=pool): vip_data = { 'name': 'vip1', 'pool_id': pool['pool']['id'], 'description': '', 'protocol_port': 80, 'protocol': 'HTTP', 'connection_limit': -1, 'admin_state_up': True, 'status': 'PENDING_CREATE', 'tenant_id': self._tenant_id, 'session_persistence': '' } self.assertRaises(loadbalancer.VipExists, self.plugin.create_vip, context.get_admin_context(), {'vip': vip_data}) def test_update_vip_raises_vip_exists(self): with self.subnet() as subnet: with contextlib.nested( self.pool(name="pool1"), self.pool(name="pool2") ) as (pool1, pool2): with contextlib.nested( self.vip(name='vip1', subnet=subnet, pool=pool1), self.vip(name='vip2', subnet=subnet, pool=pool2) ) as (vip1, vip2): vip_data = { 'id': vip2['vip']['id'], 'name': 'vip1', 'pool_id': pool1['pool']['id'], } self.assertRaises(loadbalancer.VipExists, self.plugin.update_vip, context.get_admin_context(), vip2['vip']['id'], {'vip': vip_data}) def test_update_vip_change_pool(self): with self.subnet() as subnet: with contextlib.nested( self.pool(name="pool1"), self.pool(name="pool2") ) as (pool1, pool2): with self.vip(name='vip1', subnet=subnet, pool=pool1) as vip: # change vip from pool1 to pool2 vip_data = { 'id': vip['vip']['id'], 'name': 'vip1', 'pool_id': pool2['pool']['id'], } ctx = context.get_admin_context() self.plugin.update_vip(ctx, vip['vip']['id'], {'vip': vip_data}) db_pool2 = (ctx.session.query(ldb.Pool). filter_by(id=pool2['pool']['id']).one()) db_pool1 = (ctx.session.query(ldb.Pool). filter_by(id=pool1['pool']['id']).one()) # check that pool1.vip became None self.assertIsNone(db_pool1.vip) # and pool2 got vip self.assertEqual(vip['vip']['id'], db_pool2.vip.id) def test_create_vip_with_invalid_values(self): invalid = { 'protocol': 'UNSUPPORTED', 'protocol_port': 'NOT_AN_INT', 'protocol_port': 1000500, 'subnet': {'subnet': {'id': 'invalid-subnet'}} } for param, value in invalid.items(): kwargs = {'name': 'the-vip', param: value} with testtools.ExpectedException(webob.exc.HTTPClientError): with self.vip(**kwargs): pass def test_create_vip_with_address(self): self.test_create_vip(address='10.0.0.7') def test_create_vip_with_address_outside_subnet(self): with testtools.ExpectedException(webob.exc.HTTPClientError): self.test_create_vip(address='9.9.9.9') def test_create_vip_with_session_persistence(self): self.test_create_vip(session_persistence={'type': 'HTTP_COOKIE'}) def test_create_vip_with_session_persistence_with_app_cookie(self): sp = {'type': 'APP_COOKIE', 'cookie_name': 'sessionId'} self.test_create_vip(session_persistence=sp) def test_create_vip_with_session_persistence_unsupported_type(self): with testtools.ExpectedException(webob.exc.HTTPClientError): self.test_create_vip(session_persistence={'type': 'UNSUPPORTED'}) def test_create_vip_with_unnecessary_cookie_name(self): sp = {'type': "SOURCE_IP", 'cookie_name': 'sessionId'} with testtools.ExpectedException(webob.exc.HTTPClientError): self.test_create_vip(session_persistence=sp) def test_create_vip_with_session_persistence_without_cookie_name(self): sp = {'type': "APP_COOKIE"} with testtools.ExpectedException(webob.exc.HTTPClientError): self.test_create_vip(session_persistence=sp) def test_create_vip_with_protocol_mismatch(self): with self.pool(protocol='TCP') as pool: with testtools.ExpectedException(webob.exc.HTTPClientError): self.test_create_vip(pool=pool, protocol='HTTP') def test_create_vip_with_gateway_ip(self): with testtools.ExpectedException(webob.exc.HTTPClientError): self.test_create_vip(address='10.0.0.1') def test_update_vip_with_protocol_mismatch(self): with self.pool(protocol='TCP') as pool: with self.vip(protocol='HTTP') as vip: data = {'vip': {'pool_id': pool['pool']['id']}} req = self.new_update_request('vips', data, vip['vip']['id']) res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_reset_session_persistence(self): name = 'vip4' session_persistence = {'type': "HTTP_COOKIE"} update_info = {'vip': {'session_persistence': None}} with self.vip(name=name, session_persistence=session_persistence) as v: # Ensure that vip has been created properly self.assertEqual(session_persistence, v['vip']['session_persistence']) # Try resetting session_persistence req = self.new_update_request('vips', update_info, v['vip']['id']) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) self.assertIsNone(res['vip']['session_persistence']) def test_update_vip(self): name = 'new_vip' keys = [('name', name), ('address', "10.0.0.2"), ('protocol_port', 80), ('connection_limit', 100), ('admin_state_up', False), ('status', 'PENDING_UPDATE')] with self.vip(name=name) as vip: keys.append(('subnet_id', vip['vip']['subnet_id'])) data = {'vip': {'name': name, 'connection_limit': 100, 'session_persistence': {'type': "APP_COOKIE", 'cookie_name': "jesssionId"}, 'admin_state_up': False}} req = self.new_update_request('vips', data, vip['vip']['id']) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) for k, v in keys: self.assertEqual(v, res['vip'][k]) def test_delete_vip(self): with self.pool(): with self.vip(do_delete=False) as vip: req = self.new_delete_request('vips', vip['vip']['id']) res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) def test_show_vip(self): name = "vip_show" keys = [('name', name), ('address', "10.0.0.10"), ('protocol_port', 80), ('protocol', 'HTTP'), ('connection_limit', -1), ('admin_state_up', True), ('status', 'PENDING_CREATE')] with self.vip(name=name, address='10.0.0.10') as vip: req = self.new_show_request('vips', vip['vip']['id']) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) for k, v in keys: self.assertEqual(v, res['vip'][k]) def test_list_vips(self): name = "vips_list" keys = [('name', name), ('address', "10.0.0.2"), ('protocol_port', 80), ('protocol', 'HTTP'), ('connection_limit', -1), ('admin_state_up', True), ('status', 'PENDING_CREATE')] with self.vip(name=name) as vip: keys.append(('subnet_id', vip['vip']['subnet_id'])) req = self.new_list_request('vips') res = self.deserialize(self.fmt, req.get_response(self.ext_api)) self.assertEqual(1, len(res['vips'])) for k, v in keys: self.assertEqual(v, res['vips'][0][k]) def test_list_vips_with_sort_emulated(self): with self.subnet() as subnet: with contextlib.nested( self.vip(name='vip1', subnet=subnet, protocol_port=81), self.vip(name='vip2', subnet=subnet, protocol_port=82), self.vip(name='vip3', subnet=subnet, protocol_port=82) ) as (vip1, vip2, vip3): self._test_list_with_sort( 'vip', (vip1, vip3, vip2), [('protocol_port', 'asc'), ('name', 'desc')] ) def test_list_vips_with_pagination_emulated(self): with self.subnet() as subnet: with contextlib.nested(self.vip(name='vip1', subnet=subnet), self.vip(name='vip2', subnet=subnet), self.vip(name='vip3', subnet=subnet) ) as (vip1, vip2, vip3): self._test_list_with_pagination('vip', (vip1, vip2, vip3), ('name', 'asc'), 2, 2) def test_list_vips_with_pagination_reverse_emulated(self): with self.subnet() as subnet: with contextlib.nested(self.vip(name='vip1', subnet=subnet), self.vip(name='vip2', subnet=subnet), self.vip(name='vip3', subnet=subnet) ) as (vip1, vip2, vip3): self._test_list_with_pagination_reverse('vip', (vip1, vip2, vip3), ('name', 'asc'), 2, 2) def test_create_pool_with_invalid_values(self): name = 'pool3' pool = self.pool(name=name, protocol='UNSUPPORTED') self.assertRaises(webob.exc.HTTPClientError, pool.__enter__) pool = self.pool(name=name, lb_method='UNSUPPORTED') self.assertRaises(webob.exc.HTTPClientError, pool.__enter__) def _create_pool_directly_via_plugin(self, provider_name): #default provider will be haproxy prov1 = (constants.LOADBALANCER + ':lbaas:' + NOOP_DRIVER_KLASS) prov2 = (constants.LOADBALANCER + ':haproxy:neutron_lbaas.services.loadbalancer.' 'drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver' ':default') # override the default service provider self.set_override([prov1, prov2]) self.plugin = loadbalancer_plugin.LoadBalancerPlugin() with self.subnet() as subnet: ctx = context.get_admin_context() #create pool with another provider - lbaas #which is noop driver pool = {'name': 'pool1', 'subnet_id': subnet['subnet']['id'], 'lb_method': 'ROUND_ROBIN', 'protocol': 'HTTP', 'admin_state_up': True, 'tenant_id': self._tenant_id, 'provider': provider_name, 'description': ''} self.plugin.create_pool(ctx, {'pool': pool}) assoc = ctx.session.query(sdb.ProviderResourceAssociation).one() self.assertEqual(pconf.normalize_provider_name(provider_name), assoc.provider_name) def test_create_pool_another_provider(self): self._create_pool_directly_via_plugin('lbaas') def test_create_pool_unnormalized_provider_name(self): self._create_pool_directly_via_plugin('LBAAS') def test_create_pool_unexisting_provider(self): self.assertRaises( pconf.ServiceProviderNotFound, self._create_pool_directly_via_plugin, 'unexisting') def test_create_pool(self): name = "pool1" keys = [('name', name), ('subnet_id', self._subnet_id), ('tenant_id', self._tenant_id), ('protocol', 'HTTP'), ('lb_method', 'ROUND_ROBIN'), ('admin_state_up', True), ('status', 'PENDING_CREATE')] with self.pool(name=name) as pool: for k, v in keys: self.assertEqual(v, pool['pool'][k]) def test_create_pool_with_members(self): name = "pool2" with self.pool(name=name) as pool: pool_id = pool['pool']['id'] res1 = self._create_member(self.fmt, '192.168.1.100', '80', True, pool_id=pool_id, weight=1) req = self.new_show_request('pools', pool_id, fmt=self.fmt) pool_updated = self.deserialize( self.fmt, req.get_response(self.ext_api) ) member1 = self.deserialize(self.fmt, res1) self.assertEqual(member1['member']['id'], pool_updated['pool']['members'][0]) self.assertEqual(1, len(pool_updated['pool']['members'])) keys = [('address', '192.168.1.100'), ('protocol_port', 80), ('weight', 1), ('pool_id', pool_id), ('admin_state_up', True), ('status', 'PENDING_CREATE')] for k, v in keys: self.assertEqual(v, member1['member'][k]) self._delete('members', member1['member']['id']) def test_delete_pool(self): with self.pool(do_delete=False) as pool: with self.member(do_delete=False, pool_id=pool['pool']['id']): req = self.new_delete_request('pools', pool['pool']['id']) res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) def test_delete_pool_preserve_state(self): with self.pool(do_delete=False) as pool: with self.vip(pool=pool): req = self.new_delete_request('pools', pool['pool']['id']) res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) req = self.new_show_request('pools', pool['pool']['id'], fmt=self.fmt) res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPOk.code, res.status_int) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) self.assertEqual(constants.PENDING_CREATE, res['pool']['status']) req = self.new_delete_request('pools', pool['pool']['id']) def test_delete_subnet_with_pool(self): registry.subscribe(ldb.is_subnet_in_use_callback, resources.SUBNET, events.BEFORE_DELETE) try: with self.subnet() as subnet: with self.pool(subnet_id=subnet['subnet']['id']): req = self.new_delete_request('subnets', subnet['subnet']['id']) res = req.get_response(self.api) self.assertIn('NeutronError', res.json) self.assertEqual('SubnetInUse', res.json['NeutronError']['type']) self.assertEqual(409, res.status_code) finally: registry.unsubscribe(ldb.is_subnet_in_use_callback, resources.SUBNET, events.BEFORE_DELETE) def test_show_pool(self): name = "pool1" keys = [('name', name), ('subnet_id', self._subnet_id), ('tenant_id', self._tenant_id), ('protocol', 'HTTP'), ('lb_method', 'ROUND_ROBIN'), ('admin_state_up', True), ('status', 'PENDING_CREATE')] with self.pool(name=name) as pool: req = self.new_show_request('pools', pool['pool']['id'], fmt=self.fmt) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) for k, v in keys: self.assertEqual(v, res['pool'][k]) def test_list_pools_with_sort_emulated(self): with contextlib.nested(self.pool(name='p1'), self.pool(name='p2'), self.pool(name='p3') ) as (p1, p2, p3): self._test_list_with_sort('pool', (p3, p2, p1), [('name', 'desc')]) def test_list_pools_with_pagination_emulated(self): with contextlib.nested(self.pool(name='p1'), self.pool(name='p2'), self.pool(name='p3') ) as (p1, p2, p3): self._test_list_with_pagination('pool', (p1, p2, p3), ('name', 'asc'), 2, 2) def test_list_pools_with_pagination_reverse_emulated(self): with contextlib.nested(self.pool(name='p1'), self.pool(name='p2'), self.pool(name='p3') ) as (p1, p2, p3): self._test_list_with_pagination_reverse('pool', (p1, p2, p3), ('name', 'asc'), 2, 2) def test_create_member(self): with self.pool() as pool: pool_id = pool['pool']['id'] with self.member(address='192.168.1.100', protocol_port=80, pool_id=pool_id) as member1: with self.member(address='192.168.1.101', protocol_port=80, pool_id=pool_id) as member2: req = self.new_show_request('pools', pool_id, fmt=self.fmt) pool_update = self.deserialize( self.fmt, req.get_response(self.ext_api) ) self.assertIn(member1['member']['id'], pool_update['pool']['members']) self.assertIn(member2['member']['id'], pool_update['pool']['members']) def test_create_same_member_in_same_pool_raises_member_exists(self): with self.subnet(): with self.pool(name="pool1") as pool: pool_id = pool['pool']['id'] with self.member(address='192.168.1.100', protocol_port=80, pool_id=pool_id): member_data = { 'address': '192.168.1.100', 'protocol_port': 80, 'weight': 1, 'admin_state_up': True, 'pool_id': pool_id, 'tenant_id': self._tenant_id } self.assertRaises(loadbalancer.MemberExists, self.plugin.create_member, context.get_admin_context(), {'member': member_data}) def test_update_member(self): with self.pool(name="pool1") as pool1: with self.pool(name="pool2") as pool2: keys = [('address', "192.168.1.100"), ('tenant_id', self._tenant_id), ('protocol_port', 80), ('weight', 10), ('pool_id', pool2['pool']['id']), ('admin_state_up', False), ('status', 'PENDING_UPDATE')] with self.member(pool_id=pool1['pool']['id']) as member: req = self.new_show_request('pools', pool1['pool']['id'], fmt=self.fmt) pool1_update = self.deserialize( self.fmt, req.get_response(self.ext_api) ) self.assertEqual(1, len(pool1_update['pool']['members'])) req = self.new_show_request('pools', pool2['pool']['id'], fmt=self.fmt) pool2_update = self.deserialize( self.fmt, req.get_response(self.ext_api) ) self.assertEqual(1, len(pool1_update['pool']['members'])) self.assertEqual(0, len(pool2_update['pool']['members'])) data = {'member': {'pool_id': pool2['pool']['id'], 'weight': 10, 'admin_state_up': False}} req = self.new_update_request('members', data, member['member']['id']) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) for k, v in keys: self.assertEqual(v, res['member'][k]) req = self.new_show_request('pools', pool1['pool']['id'], fmt=self.fmt) pool1_update = self.deserialize( self.fmt, req.get_response(self.ext_api) ) req = self.new_show_request('pools', pool2['pool']['id'], fmt=self.fmt) pool2_update = self.deserialize( self.fmt, req.get_response(self.ext_api) ) self.assertEqual(1, len(pool2_update['pool']['members'])) self.assertEqual(0, len(pool1_update['pool']['members'])) def test_delete_member(self): with self.pool() as pool: pool_id = pool['pool']['id'] with self.member(pool_id=pool_id, do_delete=False) as member: req = self.new_delete_request('members', member['member']['id']) res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) req = self.new_show_request('pools', pool_id, fmt=self.fmt) pool_update = self.deserialize( self.fmt, req.get_response(self.ext_api) ) self.assertEqual(0, len(pool_update['pool']['members'])) def test_show_member(self): with self.pool() as pool: keys = [('address', "192.168.1.100"), ('tenant_id', self._tenant_id), ('protocol_port', 80), ('weight', 1), ('pool_id', pool['pool']['id']), ('admin_state_up', True), ('status', 'PENDING_CREATE')] with self.member(pool_id=pool['pool']['id']) as member: req = self.new_show_request('members', member['member']['id'], fmt=self.fmt) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) for k, v in keys: self.assertEqual(v, res['member'][k]) def test_list_members_with_sort_emulated(self): with self.pool() as pool: with contextlib.nested(self.member(pool_id=pool['pool']['id'], protocol_port=81), self.member(pool_id=pool['pool']['id'], protocol_port=82), self.member(pool_id=pool['pool']['id'], protocol_port=83) ) as (m1, m2, m3): self._test_list_with_sort('member', (m3, m2, m1), [('protocol_port', 'desc')]) def test_list_members_with_pagination_emulated(self): with self.pool() as pool: with contextlib.nested(self.member(pool_id=pool['pool']['id'], protocol_port=81), self.member(pool_id=pool['pool']['id'], protocol_port=82), self.member(pool_id=pool['pool']['id'], protocol_port=83) ) as (m1, m2, m3): self._test_list_with_pagination( 'member', (m1, m2, m3), ('protocol_port', 'asc'), 2, 2 ) def test_list_members_with_pagination_reverse_emulated(self): with self.pool() as pool: with contextlib.nested(self.member(pool_id=pool['pool']['id'], protocol_port=81), self.member(pool_id=pool['pool']['id'], protocol_port=82), self.member(pool_id=pool['pool']['id'], protocol_port=83) ) as (m1, m2, m3): self._test_list_with_pagination_reverse( 'member', (m1, m2, m3), ('protocol_port', 'asc'), 2, 2 ) def test_create_healthmonitor(self): keys = [('type', "TCP"), ('tenant_id', self._tenant_id), ('delay', 30), ('timeout', 10), ('max_retries', 3), ('admin_state_up', True)] with self.health_monitor() as monitor: for k, v in keys: self.assertEqual(v, monitor['health_monitor'][k]) def test_create_health_monitor_with_timeout_delay_invalid(self): data = {'health_monitor': {'type': type, 'delay': 3, 'timeout': 6, 'max_retries': 2, 'admin_state_up': True, 'tenant_id': self._tenant_id}} req = self.new_create_request('health_monitors', data, self.fmt) res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_update_health_monitor_with_timeout_delay_invalid(self): with self.health_monitor() as monitor: data = {'health_monitor': {'delay': 10, 'timeout': 20, 'max_retries': 2, 'admin_state_up': False}} req = self.new_update_request("health_monitors", data, monitor['health_monitor']['id']) res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_update_healthmonitor(self): keys = [('type', "TCP"), ('tenant_id', self._tenant_id), ('delay', 20), ('timeout', 20), ('max_retries', 2), ('admin_state_up', False)] with self.health_monitor() as monitor: data = {'health_monitor': {'delay': 20, 'timeout': 20, 'max_retries': 2, 'admin_state_up': False}} req = self.new_update_request("health_monitors", data, monitor['health_monitor']['id']) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) for k, v in keys: self.assertEqual(v, res['health_monitor'][k]) def test_delete_healthmonitor(self): with self.health_monitor(do_delete=False) as monitor: ctx = context.get_admin_context() qry = ctx.session.query(ldb.HealthMonitor) qry = qry.filter_by(id=monitor['health_monitor']['id']) self.assertIsNotNone(qry.first()) req = self.new_delete_request('health_monitors', monitor['health_monitor']['id']) res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) qry = ctx.session.query(ldb.HealthMonitor) qry = qry.filter_by(id=monitor['health_monitor']['id']) self.assertIsNone(qry.first()) def test_delete_healthmonitor_with_associations_raises(self): with self.health_monitor(type='HTTP') as monitor: with self.pool() as pool: data = { 'health_monitor': { 'id': monitor['health_monitor']['id'], 'tenant_id': self._tenant_id } } req = self.new_create_request( 'pools', data, fmt=self.fmt, id=pool['pool']['id'], subresource='health_monitors') res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) ctx = context.get_admin_context() # check if we actually have corresponding Pool associations qry = ctx.session.query(ldb.PoolMonitorAssociation) qry = qry.filter_by(monitor_id=monitor['health_monitor']['id']) self.assertTrue(qry.all()) # try to delete the HealthMonitor instance req = self.new_delete_request( 'health_monitors', monitor['health_monitor']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) qry = ctx.session.query(ldb.HealthMonitor) qry = qry.filter_by(id=monitor['health_monitor']['id']) self.assertIsNotNone(qry.first()) # check if all corresponding Pool associations are not deleted qry = ctx.session.query(ldb.PoolMonitorAssociation) qry = qry.filter_by(monitor_id=monitor['health_monitor']['id']) self.assertTrue(qry.all()) def test_show_healthmonitor(self): with self.health_monitor() as monitor: keys = [('type', "TCP"), ('tenant_id', self._tenant_id), ('delay', 30), ('timeout', 10), ('max_retries', 3), ('admin_state_up', True)] req = self.new_show_request('health_monitors', monitor['health_monitor']['id'], fmt=self.fmt) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) for k, v in keys: self.assertEqual(v, res['health_monitor'][k]) def test_list_healthmonitors_with_sort_emulated(self): with contextlib.nested(self.health_monitor(delay=30), self.health_monitor(delay=31), self.health_monitor(delay=32) ) as (m1, m2, m3): self._test_list_with_sort('health_monitor', (m3, m2, m1), [('delay', 'desc')]) def test_list_healthmonitors_with_pagination_emulated(self): with contextlib.nested(self.health_monitor(delay=30), self.health_monitor(delay=31), self.health_monitor(delay=32) ) as (m1, m2, m3): self._test_list_with_pagination('health_monitor', (m1, m2, m3), ('delay', 'asc'), 2, 2) def test_list_healthmonitors_with_pagination_reverse_emulated(self): with contextlib.nested(self.health_monitor(delay=30), self.health_monitor(delay=31), self.health_monitor(delay=32) ) as (m1, m2, m3): self._test_list_with_pagination_reverse('health_monitor', (m1, m2, m3), ('delay', 'asc'), 2, 2) def test_update_pool_invalid_lb_method(self): with self.pool() as pool: update_data = {'pool': {'lb_method': 'dummy'}} req = self.new_update_request('pools', update_data, pool['pool']['id'], fmt=self.fmt) res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_update_pool_stats_with_no_stats(self): keys = ["bytes_in", "bytes_out", "active_connections", "total_connections"] with self.pool() as pool: pool_id = pool['pool']['id'] ctx = context.get_admin_context() self.plugin.update_pool_stats(ctx, pool_id) pool_obj = ctx.session.query(ldb.Pool).filter_by(id=pool_id).one() for key in keys: self.assertEqual(0, pool_obj.stats.__dict__[key]) def test_update_pool_stats_with_negative_values(self): stats_data = {"bytes_in": -1, "bytes_out": -2, "active_connections": -3, "total_connections": -4} for k, v in stats_data.items(): self._test_update_pool_stats_with_negative_value(k, v) def _test_update_pool_stats_with_negative_value(self, k, v): with self.pool() as pool: pool_id = pool['pool']['id'] ctx = context.get_admin_context() self.assertRaises(ValueError, self.plugin.update_pool_stats, ctx, pool_id, {k: v}) def test_update_pool_stats(self): stats_data = {"bytes_in": 1, "bytes_out": 2, "active_connections": 3, "total_connections": 4} with self.pool() as pool: pool_id = pool['pool']['id'] ctx = context.get_admin_context() self.plugin.update_pool_stats(ctx, pool_id, stats_data) pool_obj = ctx.session.query(ldb.Pool).filter_by(id=pool_id).one() for k, v in stats_data.items(): self.assertEqual(v, pool_obj.stats.__dict__[k]) def test_update_pool_stats_members_statuses(self): with self.pool() as pool: pool_id = pool['pool']['id'] with self.member(pool_id=pool_id) as member: member_id = member['member']['id'] stats_data = {'members': { member_id: { 'status': 'INACTIVE' } }} ctx = context.get_admin_context() member = self.plugin.get_member(ctx, member_id) self.assertEqual('PENDING_CREATE', member['status']) self.plugin.update_pool_stats(ctx, pool_id, stats_data) member = self.plugin.get_member(ctx, member_id) self.assertEqual('INACTIVE', member['status']) def test_get_pool_stats(self): keys = [("bytes_in", 0), ("bytes_out", 0), ("active_connections", 0), ("total_connections", 0)] with self.pool() as pool: req = self.new_show_request("pools", pool['pool']['id'], subresource="stats", fmt=self.fmt) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) for k, v in keys: self.assertEqual(v, res['stats'][k]) def test_create_healthmonitor_of_pool(self): with self.health_monitor(type="TCP") as monitor1: with self.health_monitor(type="HTTP") as monitor2: with self.pool() as pool: data = {"health_monitor": { "id": monitor1['health_monitor']['id'], 'tenant_id': self._tenant_id}} req = self.new_create_request( "pools", data, fmt=self.fmt, id=pool['pool']['id'], subresource="health_monitors") res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) data = {"health_monitor": { "id": monitor2['health_monitor']['id'], 'tenant_id': self._tenant_id}} req = self.new_create_request( "pools", data, fmt=self.fmt, id=pool['pool']['id'], subresource="health_monitors") res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) req = self.new_show_request( 'pools', pool['pool']['id'], fmt=self.fmt) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) self.assertIn(monitor1['health_monitor']['id'], res['pool']['health_monitors']) self.assertIn(monitor2['health_monitor']['id'], res['pool']['health_monitors']) expected = [ {'monitor_id': monitor1['health_monitor']['id'], 'status': 'PENDING_CREATE', 'status_description': None}, {'monitor_id': monitor2['health_monitor']['id'], 'status': 'PENDING_CREATE', 'status_description': None}] self.assertEqual( sorted(expected), sorted(res['pool']['health_monitors_status'])) def test_delete_healthmonitor_of_pool(self): with self.health_monitor(type="TCP") as monitor1: with self.health_monitor(type="HTTP") as monitor2: with self.pool() as pool: # add the monitors to the pool data = {"health_monitor": { "id": monitor1['health_monitor']['id'], 'tenant_id': self._tenant_id}} req = self.new_create_request( "pools", data, fmt=self.fmt, id=pool['pool']['id'], subresource="health_monitors") res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) data = {"health_monitor": { "id": monitor2['health_monitor']['id'], 'tenant_id': self._tenant_id}} req = self.new_create_request( "pools", data, fmt=self.fmt, id=pool['pool']['id'], subresource="health_monitors") res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) # remove one of healthmonitor from the pool req = self.new_delete_request( "pools", fmt=self.fmt, id=pool['pool']['id'], sub_id=monitor1['health_monitor']['id'], subresource="health_monitors") res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) req = self.new_show_request( 'pools', pool['pool']['id'], fmt=self.fmt) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) self.assertNotIn(monitor1['health_monitor']['id'], res['pool']['health_monitors']) self.assertIn(monitor2['health_monitor']['id'], res['pool']['health_monitors']) expected = [ {'monitor_id': monitor2['health_monitor']['id'], 'status': 'PENDING_CREATE', 'status_description': None} ] self.assertEqual(expected, res['pool']['health_monitors_status']) def test_create_loadbalancer(self): vip_name = "vip3" pool_name = "pool3" with self.pool(name=pool_name) as pool: with self.vip(name=vip_name, pool=pool) as vip: pool_id = pool['pool']['id'] vip_id = vip['vip']['id'] # Add two members res1 = self._create_member(self.fmt, '192.168.1.100', '80', True, pool_id=pool_id, weight=1) res2 = self._create_member(self.fmt, '192.168.1.101', '80', True, pool_id=pool_id, weight=2) # Add a health_monitor req = self._create_health_monitor(self.fmt, 'HTTP', '10', '10', '3', True) health_monitor = self.deserialize(self.fmt, req) self.assertEqual(webob.exc.HTTPCreated.code, req.status_int) # Associate the health_monitor to the pool data = {"health_monitor": { "id": health_monitor['health_monitor']['id'], 'tenant_id': self._tenant_id}} req = self.new_create_request("pools", data, fmt=self.fmt, id=pool['pool']['id'], subresource="health_monitors") res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) # Get pool and vip req = self.new_show_request('pools', pool_id, fmt=self.fmt) pool_updated = self.deserialize( self.fmt, req.get_response(self.ext_api) ) member1 = self.deserialize(self.fmt, res1) member2 = self.deserialize(self.fmt, res2) self.assertIn(member1['member']['id'], pool_updated['pool']['members']) self.assertIn(member2['member']['id'], pool_updated['pool']['members']) self.assertIn(health_monitor['health_monitor']['id'], pool_updated['pool']['health_monitors']) expected = [ {'monitor_id': health_monitor['health_monitor']['id'], 'status': 'PENDING_CREATE', 'status_description': None} ] self.assertEqual( expected, pool_updated['pool']['health_monitors_status']) req = self.new_show_request('vips', vip_id, fmt=self.fmt) vip_updated = self.deserialize( self.fmt, req.get_response(self.ext_api) ) self.assertEqual(pool_updated['pool']['id'], vip_updated['vip']['pool_id']) # clean up # disassociate the health_monitor from the pool first req = self.new_delete_request( "pools", fmt=self.fmt, id=pool['pool']['id'], subresource="health_monitors", sub_id=health_monitor['health_monitor']['id']) res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) self._delete('health_monitors', health_monitor['health_monitor']['id']) self._delete('members', member1['member']['id']) self._delete('members', member2['member']['id']) def test_create_pool_health_monitor(self): with contextlib.nested( self.health_monitor(), self.health_monitor(), self.pool(name="pool") ) as (health_mon1, health_mon2, pool): res = self.plugin.create_pool_health_monitor( context.get_admin_context(), health_mon1, pool['pool']['id'] ) self.assertEqual({'health_monitor': [health_mon1['health_monitor']['id']]}, res) res = self.plugin.create_pool_health_monitor( context.get_admin_context(), health_mon2, pool['pool']['id'] ) self.assertEqual({'health_monitor': [health_mon1['health_monitor']['id'], health_mon2['health_monitor']['id']]}, res) res = self.plugin.get_pool_health_monitor( context.get_admin_context(), health_mon2['health_monitor']['id'], pool['pool']['id']) self.assertEqual(health_mon1['health_monitor']['tenant_id'], res['tenant_id']) def test_driver_call_create_pool_health_monitor(self): with mock.patch.object(self.plugin.drivers['lbaas'], 'create_pool_health_monitor') as driver_call: with contextlib.nested( self.health_monitor(), self.pool() ) as (hm, pool): data = {'health_monitor': { 'id': hm['health_monitor']['id'], 'tenant_id': self._tenant_id}} self.plugin.create_pool_health_monitor( context.get_admin_context(), data, pool['pool']['id'] ) hm['health_monitor']['pools'] = [ {'pool_id': pool['pool']['id'], 'status': 'PENDING_CREATE', 'status_description': None}] driver_call.assert_called_once_with( mock.ANY, hm['health_monitor'], pool['pool']['id']) def test_pool_monitor_list_of_pools(self): with contextlib.nested( self.health_monitor(), self.pool(), self.pool() ) as (hm, p1, p2): ctx = context.get_admin_context() data = {'health_monitor': { 'id': hm['health_monitor']['id'], 'tenant_id': self._tenant_id}} self.plugin.create_pool_health_monitor( ctx, data, p1['pool']['id']) self.plugin.create_pool_health_monitor( ctx, data, p2['pool']['id']) healthmon = self.plugin.get_health_monitor( ctx, hm['health_monitor']['id']) pool_data = [{'pool_id': p1['pool']['id'], 'status': 'PENDING_CREATE', 'status_description': None}, {'pool_id': p2['pool']['id'], 'status': 'PENDING_CREATE', 'status_description': None}] self.assertEqual(sorted(pool_data), sorted(healthmon['pools'])) req = self.new_show_request( 'health_monitors', hm['health_monitor']['id'], fmt=self.fmt) hm = self.deserialize( self.fmt, req.get_response(self.ext_api) ) self.assertEqual(sorted(pool_data), sorted(hm['health_monitor']['pools'])) def test_create_pool_health_monitor_already_associated(self): with contextlib.nested( self.health_monitor(), self.pool(name="pool") ) as (hm, pool): res = self.plugin.create_pool_health_monitor( context.get_admin_context(), hm, pool['pool']['id'] ) self.assertEqual({'health_monitor': [hm['health_monitor']['id']]}, res) self.assertRaises(loadbalancer.PoolMonitorAssociationExists, self.plugin.create_pool_health_monitor, context.get_admin_context(), hm, pool['pool']['id']) def test_create_pool_healthmon_invalid_pool_id(self): with self.health_monitor() as healthmon: self.assertRaises(loadbalancer.PoolNotFound, self.plugin.create_pool_health_monitor, context.get_admin_context(), healthmon, "123-456-789" ) def test_create_pool_healthmon_invalid_health_monitor_id(self): with self.pool() as pool: healthmon = {'health_monitor': {'id': '123-456-789'}} self.assertRaises(loadbalancer.HealthMonitorNotFound, self.plugin.create_pool_health_monitor, context.get_admin_context(), healthmon, pool['pool']['id'] ) def test_update_status(self): with self.pool() as pool: self.assertEqual('PENDING_CREATE', pool['pool']['status']) self.assertFalse(pool['pool']['status_description']) self.plugin.update_status(context.get_admin_context(), ldb.Pool, pool['pool']['id'], 'ERROR', 'unknown') updated_pool = self.plugin.get_pool(context.get_admin_context(), pool['pool']['id']) self.assertEqual('ERROR', updated_pool['status']) self.assertEqual('unknown', updated_pool['status_description']) # update status to ACTIVE, status_description should be cleared self.plugin.update_status(context.get_admin_context(), ldb.Pool, pool['pool']['id'], 'ACTIVE') updated_pool = self.plugin.get_pool(context.get_admin_context(), pool['pool']['id']) self.assertEqual('ACTIVE', updated_pool['status']) self.assertFalse(updated_pool['status_description']) def test_update_pool_health_monitor(self): with contextlib.nested( self.health_monitor(), self.pool(name="pool") ) as (hm, pool): res = self.plugin.create_pool_health_monitor( context.get_admin_context(), hm, pool['pool']['id']) self.assertEqual({'health_monitor': [hm['health_monitor']['id']]}, res) assoc = self.plugin.get_pool_health_monitor( context.get_admin_context(), hm['health_monitor']['id'], pool['pool']['id']) self.assertEqual('PENDING_CREATE', assoc['status']) self.assertIsNone(assoc['status_description']) self.plugin.update_pool_health_monitor( context.get_admin_context(), hm['health_monitor']['id'], pool['pool']['id'], 'ACTIVE', 'ok') assoc = self.plugin.get_pool_health_monitor( context.get_admin_context(), hm['health_monitor']['id'], pool['pool']['id']) self.assertEqual('ACTIVE', assoc['status']) self.assertEqual('ok', assoc['status_description']) def test_check_orphan_pool_associations(self): with contextlib.nested( #creating pools with default noop driver self.pool(), self.pool() ) as (p1, p2): #checking that 3 associations exist ctx = context.get_admin_context() qry = ctx.session.query(sdb.ProviderResourceAssociation) self.assertEqual(2, qry.count()) #removing driver self.set_override([ constants.LOADBALANCER + ':lbaas1:' + NOOP_DRIVER_KLASS + ':default' ]) # calling _remove_orphan... in constructor self.assertRaises( SystemExit, loadbalancer_plugin.LoadBalancerPlugin ) def test_port_delete_via_port_api(self): port = { 'id': 'my_port_id', 'device_owner': n_constants.DEVICE_OWNER_LOADBALANCER } ctx = context.get_admin_context() port['device_owner'] = n_constants.DEVICE_OWNER_LOADBALANCER myvips = [{'name': 'vip1'}] with mock.patch.object(manager.NeutronManager, 'get_plugin') as gp: self.plugin.get_vips = mock.Mock(return_value=myvips) plugin = mock.Mock() gp.return_value = plugin plugin._get_port.return_value = port self.assertRaises(n_exc.ServicePortInUse, self.plugin.prevent_lbaas_port_deletion, ctx, port['id']) neutron-lbaas-8.0.0/neutron_lbaas/tests/tools.py0000664000567000056710000000135112701407726023150 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import random import string # NOTE(ihrachys): this function is copied from neutron tree def get_random_string(n=10): return ''.join(random.choice(string.ascii_lowercase) for _ in range(n)) neutron-lbaas-8.0.0/neutron_lbaas/opts.py0000664000567000056710000000654112701407726021641 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import neutron.agent.common.config import neutron.agent.linux.interface import neutron.services.provider_configuration import neutron_lbaas.agent.agent import neutron_lbaas.common.cert_manager import neutron_lbaas.common.cert_manager.local_cert_manager import neutron_lbaas.common.keystone import neutron_lbaas.drivers.common.agent_driver_base import neutron_lbaas.drivers.octavia.driver import neutron_lbaas.drivers.radware.base_v2_driver import neutron_lbaas.extensions.loadbalancerv2 import neutron_lbaas.services.loadbalancer.agent.agent_manager import neutron_lbaas.services.loadbalancer.drivers.haproxy.jinja_cfg import neutron_lbaas.services.loadbalancer.drivers.haproxy.namespace_driver import neutron_lbaas.services.loadbalancer.drivers.netscaler.netscaler_driver import neutron_lbaas.services.loadbalancer.drivers.radware.driver def list_agent_opts(): return [ ('DEFAULT', itertools.chain( neutron_lbaas.agent.agent.OPTS, neutron_lbaas.services.loadbalancer.agent.agent_manager.OPTS, neutron.agent.linux.interface.OPTS, neutron.agent.common.config.INTERFACE_DRIVER_OPTS) ), ('haproxy', neutron_lbaas.services.loadbalancer.drivers.haproxy. namespace_driver.OPTS) ] def list_opts(): return [ ('DEFAULT', neutron_lbaas.drivers.common.agent_driver_base.AGENT_SCHEDULER_OPTS), ('quotas', neutron_lbaas.extensions.loadbalancerv2.lbaasv2_quota_opts), ('service_auth', neutron_lbaas.common.keystone.OPTS), ('service_providers', neutron.services.provider_configuration.serviceprovider_opts), ('certificates', itertools.chain( neutron_lbaas.common.cert_manager.cert_manager_opts, neutron_lbaas.common.cert_manager.local_cert_manager. local_cert_manager_opts) ) ] def list_service_opts(): return [ ('radware', neutron_lbaas.services.loadbalancer.drivers.radware.driver. driver_opts), ('radwarev2', neutron_lbaas.drivers.radware.base_v2_driver.driver_opts), ('radwarev2_debug', neutron_lbaas.drivers.radware.base_v2_driver.driver_debug_opts), ('netscaler_driver', neutron_lbaas.services.loadbalancer.drivers.netscaler. netscaler_driver.NETSCALER_CC_OPTS), ('haproxy', itertools.chain( neutron.agent.common.config.INTERFACE_DRIVER_OPTS, neutron_lbaas.agent.agent.OPTS, neutron_lbaas.services.loadbalancer.drivers.haproxy. namespace_driver.OPTS, neutron_lbaas.services.loadbalancer.drivers.haproxy.jinja_cfg. jinja_opts) ), ('octavia', neutron_lbaas.drivers.octavia.driver.OPTS) ] neutron-lbaas-8.0.0/neutron_lbaas/__init__.py0000664000567000056710000000000012701407726022373 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/services/0000775000567000056710000000000012701410110022074 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/services/__init__.py0000664000567000056710000000000012701407726024216 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/0000775000567000056710000000000012701410110024503 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/plugin.py0000664000567000056710000017552712701407726026417 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Radware LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from neutron.api.v2 import attributes as attrs from neutron import context as ncontext from neutron.db import servicetype_db as st_db from neutron.extensions import flavors from neutron import manager from neutron.plugins.common import constants from neutron.services.flavors import flavors_plugin from neutron.services import provider_configuration as pconf from neutron.services import service_base from neutron_lib import exceptions as n_exc from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from neutron_lbaas._i18n import _LI, _LE from neutron_lbaas import agent_scheduler as agent_scheduler_v2 import neutron_lbaas.common.cert_manager from neutron_lbaas.common.tls_utils import cert_parser from neutron_lbaas.db.loadbalancer import loadbalancer_db as ldb from neutron_lbaas.db.loadbalancer import loadbalancer_dbv2 as ldbv2 from neutron_lbaas.db.loadbalancer import models from neutron_lbaas.extensions import lbaas_agentschedulerv2 from neutron_lbaas.extensions import loadbalancer as lb_ext from neutron_lbaas.extensions import loadbalancerv2 from neutron_lbaas.extensions import sharedpools from neutron_lbaas.services.loadbalancer import agent_scheduler from neutron_lbaas.services.loadbalancer import constants as lb_const from neutron_lbaas.services.loadbalancer import data_models LOG = logging.getLogger(__name__) CERT_MANAGER_PLUGIN = neutron_lbaas.common.cert_manager.get_backend() def verify_lbaas_mutual_exclusion(): """Verifies lbaas v1 and lbaas v2 cannot be active concurrently.""" plugins = set([LoadBalancerPlugin.__name__, LoadBalancerPluginv2.__name__]) cfg_sps = set([sp.split('.')[-1] for sp in cfg.CONF.service_plugins]) if len(plugins.intersection(cfg_sps)) >= 2: msg = _LE("Cannot have service plugins %(v1)s and %(v2)s active at " "the same time!") % {'v1': LoadBalancerPlugin.__name__, 'v2': LoadBalancerPluginv2.__name__} LOG.error(msg) raise SystemExit(1) def add_provider_configuration(type_manager, service_type): type_manager.add_provider_configuration( service_type, pconf.ProviderConfiguration('neutron_lbaas')) class LoadBalancerPlugin(ldb.LoadBalancerPluginDb, agent_scheduler.LbaasAgentSchedulerDbMixin): """Implementation of the Neutron Loadbalancer Service Plugin. This class manages the workflow of LBaaS request/response. Most DB related works are implemented in class loadbalancer_db.LoadBalancerPluginDb. """ supported_extension_aliases = ["lbaas", "lbaas_agent_scheduler", "service-type"] path_prefix = lb_ext.LOADBALANCER_PREFIX # lbaas agent notifiers to handle agent update operations; # can be updated by plugin drivers while loading; # will be extracted by neutron manager when loading service plugins; agent_notifiers = {} def __init__(self): """Initialization for the loadbalancer service plugin.""" self.service_type_manager = st_db.ServiceTypeManager.get_instance() add_provider_configuration( self.service_type_manager, constants.LOADBALANCER) self._load_drivers() super(LoadBalancerPlugin, self).subscribe() def _load_drivers(self): """Loads plugin-drivers specified in configuration.""" self.drivers, self.default_provider = service_base.load_drivers( constants.LOADBALANCER, self) # NOTE(blogan): this method MUST be called after # service_base.load_drivers to correctly verify verify_lbaas_mutual_exclusion() ctx = ncontext.get_admin_context() # stop service in case provider was removed, but resources were not self._check_orphan_pool_associations(ctx, self.drivers.keys()) def _check_orphan_pool_associations(self, context, provider_names): """Checks remaining associations between pools and providers. If admin has not undeployed resources with provider that was deleted from configuration, neutron service is stopped. Admin must delete resources prior to removing providers from configuration. """ pools = self.get_pools(context) lost_providers = set([pool['provider'] for pool in pools if pool['provider'] not in provider_names]) # resources are left without provider - stop the service if lost_providers: LOG.exception(_LE("Delete associated loadbalancer pools before " "removing providers %s"), list(lost_providers)) raise SystemExit(1) def _get_driver_for_provider(self, provider): if provider in self.drivers: return self.drivers[provider] # raise if not associated (should never be reached) raise n_exc.Invalid(_LE("Error retrieving driver for provider %s") % provider) def _get_driver_for_pool(self, context, pool_id): pool = self.get_pool(context, pool_id) try: return self.drivers[pool['provider']] except KeyError: raise n_exc.Invalid(_LE("Error retrieving provider for pool %s") % pool_id) def get_plugin_type(self): return constants.LOADBALANCER def get_plugin_description(self): return "Neutron LoadBalancer Service Plugin" def create_vip(self, context, vip): v = super(LoadBalancerPlugin, self).create_vip(context, vip) driver = self._get_driver_for_pool(context, v['pool_id']) driver.create_vip(context, v) return v def update_vip(self, context, id, vip): if 'status' not in vip['vip']: vip['vip']['status'] = constants.PENDING_UPDATE old_vip = self.get_vip(context, id) v = super(LoadBalancerPlugin, self).update_vip(context, id, vip) driver = self._get_driver_for_pool(context, v['pool_id']) driver.update_vip(context, old_vip, v) return v def _delete_db_vip(self, context, id): # proxy the call until plugin inherits from DBPlugin super(LoadBalancerPlugin, self).delete_vip(context, id) def delete_vip(self, context, id): self.update_status(context, ldb.Vip, id, constants.PENDING_DELETE) v = self.get_vip(context, id) driver = self._get_driver_for_pool(context, v['pool_id']) driver.delete_vip(context, v) def _get_provider_name(self, context, pool): if ('provider' in pool and pool['provider'] != attrs.ATTR_NOT_SPECIFIED): provider_name = pconf.normalize_provider_name(pool['provider']) self.validate_provider(provider_name) return provider_name else: if not self.default_provider: raise pconf.DefaultServiceProviderNotFound( service_type=constants.LOADBALANCER) return self.default_provider def create_pool(self, context, pool): # This validation is because the new API version also has a resource # called pool and these attributes have to be optional in the old API # so they are not required attributes of the new. Its complicated. if pool['pool']['lb_method'] == attrs.ATTR_NOT_SPECIFIED: raise loadbalancerv2.RequiredAttributeNotSpecified( attr_name='lb_method') if pool['pool']['subnet_id'] == attrs.ATTR_NOT_SPECIFIED: raise loadbalancerv2.RequiredAttributeNotSpecified( attr_name='subnet_id') provider_name = self._get_provider_name(context, pool['pool']) p = super(LoadBalancerPlugin, self).create_pool(context, pool) self.service_type_manager.add_resource_association( context, constants.LOADBALANCER, provider_name, p['id']) # need to add provider name to pool dict, # because provider was not known to db plugin at pool creation p['provider'] = provider_name driver = self.drivers[provider_name] try: driver.create_pool(context, p) except lb_ext.NoEligibleBackend: # that should catch cases when backend of any kind # is not available (agent, appliance, etc) self.update_status(context, ldb.Pool, p['id'], constants.ERROR, "No eligible backend") raise lb_ext.NoEligibleBackend(pool_id=p['id']) return p def update_pool(self, context, id, pool): if 'status' not in pool['pool']: pool['pool']['status'] = constants.PENDING_UPDATE old_pool = self.get_pool(context, id) p = super(LoadBalancerPlugin, self).update_pool(context, id, pool) driver = self._get_driver_for_provider(p['provider']) driver.update_pool(context, old_pool, p) return p def _delete_db_pool(self, context, id): # proxy the call until plugin inherits from DBPlugin # rely on uuid uniqueness: try: with context.session.begin(subtransactions=True): self.service_type_manager.del_resource_associations( context, [id]) super(LoadBalancerPlugin, self).delete_pool(context, id) except Exception: # that should not happen # if it's still a case - something goes wrong # log the error and mark the pool as ERROR LOG.error(_LE('Failed to delete pool %s, putting it in ERROR ' 'state'), id) with excutils.save_and_reraise_exception(): self.update_status(context, ldb.Pool, id, constants.ERROR) def delete_pool(self, context, id): # check for delete conditions and update the status # within a transaction to avoid a race with context.session.begin(subtransactions=True): self.update_status(context, ldb.Pool, id, constants.PENDING_DELETE) self._ensure_pool_delete_conditions(context, id) p = self.get_pool(context, id) driver = self._get_driver_for_provider(p['provider']) driver.delete_pool(context, p) def create_member(self, context, member): m = super(LoadBalancerPlugin, self).create_member(context, member) driver = self._get_driver_for_pool(context, m['pool_id']) driver.create_member(context, m) return m def update_member(self, context, id, member): if 'status' not in member['member']: member['member']['status'] = constants.PENDING_UPDATE old_member = self.get_member(context, id) m = super(LoadBalancerPlugin, self).update_member(context, id, member) driver = self._get_driver_for_pool(context, m['pool_id']) driver.update_member(context, old_member, m) return m def _delete_db_member(self, context, id): # proxy the call until plugin inherits from DBPlugin super(LoadBalancerPlugin, self).delete_member(context, id) def delete_member(self, context, id): self.update_status(context, ldb.Member, id, constants.PENDING_DELETE) m = self.get_member(context, id) driver = self._get_driver_for_pool(context, m['pool_id']) driver.delete_member(context, m) def _validate_hm_parameters(self, delay, timeout): if delay < timeout: raise lb_ext.DelayOrTimeoutInvalid() def create_health_monitor(self, context, health_monitor): new_hm = health_monitor['health_monitor'] self._validate_hm_parameters(new_hm['delay'], new_hm['timeout']) hm = super(LoadBalancerPlugin, self).create_health_monitor( context, health_monitor ) return hm def update_health_monitor(self, context, id, health_monitor): new_hm = health_monitor['health_monitor'] old_hm = self.get_health_monitor(context, id) delay = new_hm.get('delay', old_hm.get('delay')) timeout = new_hm.get('timeout', old_hm.get('timeout')) self._validate_hm_parameters(delay, timeout) hm = super(LoadBalancerPlugin, self).update_health_monitor( context, id, health_monitor ) with context.session.begin(subtransactions=True): qry = context.session.query( ldb.PoolMonitorAssociation ).filter_by(monitor_id=hm['id']).join(ldb.Pool) for assoc in qry: driver = self._get_driver_for_pool(context, assoc['pool_id']) driver.update_pool_health_monitor(context, old_hm, hm, assoc['pool_id']) return hm def _delete_db_pool_health_monitor(self, context, hm_id, pool_id): super(LoadBalancerPlugin, self).delete_pool_health_monitor(context, hm_id, pool_id) def _delete_db_health_monitor(self, context, id): super(LoadBalancerPlugin, self).delete_health_monitor(context, id) def create_pool_health_monitor(self, context, health_monitor, pool_id): retval = super(LoadBalancerPlugin, self).create_pool_health_monitor( context, health_monitor, pool_id ) monitor_id = health_monitor['health_monitor']['id'] hm = self.get_health_monitor(context, monitor_id) driver = self._get_driver_for_pool(context, pool_id) driver.create_pool_health_monitor(context, hm, pool_id) return retval def delete_pool_health_monitor(self, context, id, pool_id): self.update_pool_health_monitor(context, id, pool_id, constants.PENDING_DELETE) hm = self.get_health_monitor(context, id) driver = self._get_driver_for_pool(context, pool_id) driver.delete_pool_health_monitor(context, hm, pool_id) def stats(self, context, pool_id): driver = self._get_driver_for_pool(context, pool_id) stats_data = driver.stats(context, pool_id) # if we get something from the driver - # update the db and return the value from db # else - return what we have in db if stats_data: super(LoadBalancerPlugin, self).update_pool_stats( context, pool_id, stats_data ) return super(LoadBalancerPlugin, self).stats(context, pool_id) def populate_vip_graph(self, context, vip): """Populate the vip with: pool, members, healthmonitors.""" pool = self.get_pool(context, vip['pool_id']) vip['pool'] = pool vip['members'] = [self.get_member(context, member_id) for member_id in pool['members']] vip['health_monitors'] = [self.get_health_monitor(context, hm_id) for hm_id in pool['health_monitors']] return vip def validate_provider(self, provider): if provider not in self.drivers: raise pconf.ServiceProviderNotFound( provider=provider, service_type=constants.LOADBALANCER) class LoadBalancerPluginv2(loadbalancerv2.LoadBalancerPluginBaseV2): """Implementation of the Neutron Loadbalancer Service Plugin. This class manages the workflow of LBaaS request/response. Most DB related works are implemented in class loadbalancer_db.LoadBalancerPluginDb. """ supported_extension_aliases = ["lbaasv2", "shared_pools", "l7", "lbaas_agent_schedulerv2", "service-type"] path_prefix = loadbalancerv2.LOADBALANCERV2_PREFIX agent_notifiers = ( agent_scheduler_v2.LbaasAgentSchedulerDbMixin.agent_notifiers) def __init__(self): """Initialization for the loadbalancer service plugin.""" self.db = ldbv2.LoadBalancerPluginDbv2() self.service_type_manager = st_db.ServiceTypeManager.get_instance() add_provider_configuration( self.service_type_manager, constants.LOADBALANCERV2) self._load_drivers() self.start_rpc_listeners() self.db.subscribe() def start_rpc_listeners(self): listeners = [] for driver in self.drivers.values(): if hasattr(driver, 'start_rpc_listeners'): listener = driver.start_rpc_listeners() listeners.append(listener) return listeners def _load_drivers(self): """Loads plugin-drivers specified in configuration.""" self.drivers, self.default_provider = service_base.load_drivers( constants.LOADBALANCERV2, self) # NOTE(blogan): this method MUST be called after # service_base.load_drivers to correctly verify verify_lbaas_mutual_exclusion() ctx = ncontext.get_admin_context() # stop service in case provider was removed, but resources were not self._check_orphan_loadbalancer_associations(ctx, self.drivers.keys()) def _check_orphan_loadbalancer_associations(self, context, provider_names): """Checks remaining associations between loadbalancers and providers. If admin has not undeployed resources with provider that was deleted from configuration, neutron service is stopped. Admin must delete resources prior to removing providers from configuration. """ loadbalancers = self.db.get_loadbalancers(context) lost_providers = set( [lb.provider.provider_name for lb in loadbalancers if lb.provider.provider_name not in provider_names]) # resources are left without provider - stop the service if lost_providers: msg = _LE("Delete associated load balancers before " "removing providers %s") % list(lost_providers) LOG.error(msg) raise SystemExit(1) def _get_driver_for_provider(self, provider): try: return self.drivers[provider] except KeyError: # raise if not associated (should never be reached) raise n_exc.Invalid(_LE("Error retrieving driver for provider " "%s") % provider) def _get_driver_for_loadbalancer(self, context, loadbalancer_id): lb = self.db.get_loadbalancer(context, loadbalancer_id) try: return self.drivers[lb.provider.provider_name] except KeyError: raise n_exc.Invalid( _LE("Error retrieving provider for load balancer. Possible " "providers are %s.") % self.drivers.keys() ) def _get_provider_name(self, entity): if ('provider' in entity and entity['provider'] != attrs.ATTR_NOT_SPECIFIED): provider_name = pconf.normalize_provider_name(entity['provider']) del entity['provider'] self.validate_provider(provider_name) return provider_name else: if not self.default_provider: raise pconf.DefaultServiceProviderNotFound( service_type=constants.LOADBALANCER) del entity['provider'] return self.default_provider def _call_driver_operation(self, context, driver_method, db_entity, old_db_entity=None): manager_method = "%s.%s" % (driver_method.__self__.__class__.__name__, driver_method.__name__) LOG.info(_LI("Calling driver operation %s") % manager_method) try: if old_db_entity: driver_method(context, old_db_entity, db_entity) else: driver_method(context, db_entity) # catching and reraising agent issues except (lbaas_agentschedulerv2.NoEligibleLbaasAgent, lbaas_agentschedulerv2.NoActiveLbaasAgent) as no_agent: raise no_agent except Exception: LOG.exception(_LE("There was an error in the driver")) self._handle_driver_error(context, db_entity) raise loadbalancerv2.DriverError() def _handle_driver_error(self, context, db_entity): lb_id = db_entity.root_loadbalancer.id self.db.update_status(context, models.LoadBalancer, lb_id, constants.ERROR) def _validate_session_persistence_info(self, sp_info): """Performs sanity check on session persistence info. :param sp_info: Session persistence info """ if not sp_info: return if sp_info['type'] == lb_const.SESSION_PERSISTENCE_APP_COOKIE: if not sp_info.get('cookie_name'): raise loadbalancerv2.SessionPersistenceConfigurationInvalid( msg="'cookie_name' should be specified for %s" " session persistence." % sp_info['type']) else: if 'cookie_name' in sp_info: raise loadbalancerv2.SessionPersistenceConfigurationInvalid( msg="'cookie_name' is not allowed for %s" " session persistence" % sp_info['type']) def get_plugin_type(self): return constants.LOADBALANCERV2 def get_plugin_description(self): return "Neutron LoadBalancer Service Plugin v2" def _insert_provider_name_from_flavor(self, context, loadbalancer): """Select provider based on flavor.""" # TODO(jwarendt) Support passing flavor metainfo from the # selected flavor profile into the provider, not just selecting # the provider, when flavor templating arrives. if ('provider' in loadbalancer and loadbalancer['provider'] != attrs.ATTR_NOT_SPECIFIED): raise loadbalancerv2.ProviderFlavorConflict() plugin = manager.NeutronManager.get_service_plugins().get( constants.FLAVORS) if not plugin: raise loadbalancerv2.FlavorsPluginNotLoaded() # Will raise FlavorNotFound if doesn't exist fl_db = flavors_plugin.FlavorsPlugin.get_flavor( plugin, context, loadbalancer['flavor_id']) if fl_db['service_type'] != constants.LOADBALANCERV2: raise flavors.InvalidFlavorServiceType( service_type=fl_db['service_type']) if not fl_db['enabled']: raise flavors.FlavorDisabled() providers = flavors_plugin.FlavorsPlugin.get_flavor_next_provider( plugin, context, fl_db['id']) provider = providers[0].get('provider') LOG.debug("Selected provider %s" % provider) loadbalancer['provider'] = provider def create_loadbalancer(self, context, loadbalancer): loadbalancer = loadbalancer.get('loadbalancer') if loadbalancer['flavor_id'] != attrs.ATTR_NOT_SPECIFIED: self._insert_provider_name_from_flavor(context, loadbalancer) else: del loadbalancer['flavor_id'] provider_name = self._get_provider_name(loadbalancer) driver = self.drivers[provider_name] lb_db = self.db.create_loadbalancer( context, loadbalancer, allocate_vip=not driver.load_balancer.allocates_vip) self.service_type_manager.add_resource_association( context, constants.LOADBALANCERV2, provider_name, lb_db.id) create_method = (driver.load_balancer.create_and_allocate_vip if driver.load_balancer.allocates_vip else driver.load_balancer.create) self._call_driver_operation(context, create_method, lb_db) return self.db.get_loadbalancer(context, lb_db.id).to_api_dict() def update_loadbalancer(self, context, id, loadbalancer): loadbalancer = loadbalancer.get('loadbalancer') old_lb = self.db.get_loadbalancer(context, id) self.db.test_and_set_status(context, models.LoadBalancer, id, constants.PENDING_UPDATE) try: updated_lb = self.db.update_loadbalancer( context, id, loadbalancer) except Exception as exc: self.db.update_status(context, models.LoadBalancer, id, old_lb.provisioning_status) raise exc driver = self._get_driver_for_provider(old_lb.provider.provider_name) self._call_driver_operation(context, driver.load_balancer.update, updated_lb, old_db_entity=old_lb) return self.db.get_loadbalancer(context, id).to_api_dict() def delete_loadbalancer(self, context, id): old_lb = self.db.get_loadbalancer(context, id) if old_lb.listeners: raise loadbalancerv2.EntityInUse( entity_using=models.Listener.NAME, id=old_lb.listeners[0].id, entity_in_use=models.LoadBalancer.NAME) if old_lb.pools: raise loadbalancerv2.EntityInUse( entity_using=models.PoolV2.NAME, id=old_lb.pools[0].id, entity_in_use=models.LoadBalancer.NAME) self.db.test_and_set_status(context, models.LoadBalancer, id, constants.PENDING_DELETE) driver = self._get_driver_for_provider(old_lb.provider.provider_name) db_lb = self.db.get_loadbalancer(context, id) self._call_driver_operation( context, driver.load_balancer.delete, db_lb) def get_loadbalancer(self, context, id, fields=None): return self.db.get_loadbalancer(context, id).to_api_dict() def get_loadbalancers(self, context, filters=None, fields=None): return [loadbalancer.to_api_dict() for loadbalancer in self.db.get_loadbalancers(context, filters=filters)] def _validate_tls(self, listener, curr_listener=None): def validate_tls_container(container_ref): cert_mgr = CERT_MANAGER_PLUGIN.CertManager() if curr_listener: lb_id = curr_listener['loadbalancer_id'] tenant_id = curr_listener['tenant_id'] else: lb_id = listener.get('loadbalancer_id') tenant_id = listener.get('tenant_id') try: cert_container = cert_mgr.get_cert( project_id=tenant_id, cert_ref=container_ref, resource_ref=cert_mgr.get_service_url(lb_id)) except Exception as e: if hasattr(e, 'status_code') and e.status_code == 404: raise loadbalancerv2.TLSContainerNotFound( container_id=container_ref) else: # Could be a keystone configuration error... raise loadbalancerv2.CertManagerError( ref=container_ref, reason=e.message ) try: cert_parser.validate_cert( cert_container.get_certificate(), private_key=cert_container.get_private_key(), private_key_passphrase=( cert_container.get_private_key_passphrase()), intermediates=cert_container.get_intermediates()) except Exception as e: cert_mgr.delete_cert( project_id=tenant_id, cert_ref=container_ref, resource_ref=cert_mgr.get_service_url(lb_id)) raise loadbalancerv2.TLSContainerInvalid( container_id=container_ref, reason=str(e)) def validate_tls_containers(to_validate): for container_ref in to_validate: validate_tls_container(container_ref) to_validate = [] if not listener['default_tls_container_ref']: raise loadbalancerv2.TLSDefaultContainerNotSpecified() if not curr_listener: to_validate.extend([listener['default_tls_container_ref']]) if 'sni_container_refs' in listener: to_validate.extend(listener['sni_container_refs']) elif curr_listener['provisioning_status'] == constants.ERROR: to_validate.extend(curr_listener['default_tls_container_id']) to_validate.extend([ container['tls_container_id'] for container in ( curr_listener['sni_containers'])]) else: if (curr_listener['default_tls_container_id'] != listener['default_tls_container_ref']): to_validate.extend([listener['default_tls_container_ref']]) if ('sni_container_refs' in listener and [container['tls_container_id'] for container in ( curr_listener['sni_containers'])] != listener['sni_container_refs']): to_validate.extend(listener['sni_container_refs']) if len(to_validate) > 0: validate_tls_containers(to_validate) return len(to_validate) > 0 def _check_pool_loadbalancer_match(self, context, pool_id, lb_id): lb = self.db.get_loadbalancer(context, lb_id) pool = self.db.get_pool(context, pool_id) if not lb.id == pool.loadbalancer.id: raise sharedpools.ListenerAndPoolMustBeOnSameLoadbalancer() def create_listener(self, context, listener): listener = listener.get('listener') lb_id = listener.get('loadbalancer_id') default_pool_id = listener.get('default_pool_id') if default_pool_id: self._check_pool_exists(context, default_pool_id) # Get the loadbalancer from the default_pool_id if not lb_id: default_pool = self.db.get_pool(context, default_pool_id) lb_id = default_pool.loadbalancer.id listener['loadbalancer_id'] = lb_id elif not lb_id: raise sharedpools.ListenerMustHaveLoadbalancer() if default_pool_id and lb_id: self._check_pool_loadbalancer_match( context, default_pool_id, lb_id) self.db.test_and_set_status(context, models.LoadBalancer, lb_id, constants.PENDING_UPDATE) try: if listener['protocol'] == lb_const.PROTOCOL_TERMINATED_HTTPS: self._validate_tls(listener) listener_db = self.db.create_listener(context, listener) except Exception as exc: self.db.update_loadbalancer_provisioning_status( context, lb_id) raise exc driver = self._get_driver_for_loadbalancer( context, listener_db.loadbalancer_id) self._call_driver_operation( context, driver.listener.create, listener_db) return self.db.get_listener(context, listener_db.id).to_api_dict() def _check_listener_pool_lb_match(self, context, listener_id, pool_id): listener = self.db.get_listener(context, listener_id) pool = self.db.get_pool(context, pool_id) if not listener.loadbalancer.id == pool.loadbalancer.id: raise sharedpools.ListenerAndPoolMustBeOnSameLoadbalancer() def update_listener(self, context, id, listener): listener = listener.get('listener') curr_listener_db = self.db.get_listener(context, id) default_pool_id = listener.get('default_pool_id') if default_pool_id: self._check_listener_pool_lb_match( context, id, default_pool_id) self.db.test_and_set_status(context, models.Listener, id, constants.PENDING_UPDATE) try: curr_listener = curr_listener_db.to_dict() default_tls_container_ref = listener.get( 'default_tls_container_ref') if not default_tls_container_ref: listener['default_tls_container_ref'] = ( # NOTE(blogan): not changing to ref bc this dictionary is # created from a data model curr_listener['default_tls_container_id']) if 'sni_container_refs' not in listener: listener['sni_container_ids'] = [ container.tls_container_id for container in ( curr_listener['sni_containers'])] tls_containers_changed = False if curr_listener['protocol'] == lb_const.PROTOCOL_TERMINATED_HTTPS: tls_containers_changed = self._validate_tls( listener, curr_listener=curr_listener) listener_db = self.db.update_listener( context, id, listener, tls_containers_changed=tls_containers_changed) except Exception as exc: self.db.update_status( context, models.LoadBalancer, curr_listener_db.loadbalancer.id, provisioning_status=constants.ACTIVE ) self.db.update_status( context, models.Listener, curr_listener_db.id, provisioning_status=constants.ACTIVE ) raise exc driver = self._get_driver_for_loadbalancer( context, listener_db.loadbalancer_id) self._call_driver_operation( context, driver.listener.update, listener_db, old_db_entity=curr_listener_db) return self.db.get_listener(context, id).to_api_dict() def delete_listener(self, context, id): self.db.test_and_set_status(context, models.Listener, id, constants.PENDING_DELETE) listener_db = self.db.get_listener(context, id) driver = self._get_driver_for_loadbalancer( context, listener_db.loadbalancer_id) self._call_driver_operation( context, driver.listener.delete, listener_db) def get_listener(self, context, id, fields=None): return self.db.get_listener(context, id).to_api_dict() def get_listeners(self, context, filters=None, fields=None): return [listener.to_api_dict() for listener in self.db.get_listeners( context, filters=filters)] def create_pool(self, context, pool): pool = pool.get('pool') listener_id = pool.get('listener_id') listeners = pool.get('listeners', []) if listener_id: listeners.append(listener_id) lb_id = pool.get('loadbalancer_id') db_listeners = [] for l in listeners: db_l = self.db.get_listener(context, l) db_listeners.append(db_l) # Take the pool's loadbalancer_id from the first listener found # if it wasn't specified in the API call. if not lb_id: lb_id = db_l.loadbalancer.id # All specified listeners must be on the same loadbalancer if db_l.loadbalancer.id != lb_id: raise sharedpools.ListenerAndPoolMustBeOnSameLoadbalancer() if db_l.default_pool_id: raise sharedpools.ListenerDefaultPoolAlreadySet( listener_id=db_l.id, pool_id=db_l.default_pool_id) if not lb_id: raise sharedpools.PoolMustHaveLoadbalancer() pool['loadbalancer_id'] = lb_id self._validate_session_persistence_info( pool.get('session_persistence')) # SQLAlchemy gets strange ideas about populating the pool if we don't # blank out the listeners at this point. del pool['listener_id'] pool['listeners'] = [] db_pool = self.db.create_pool(context, pool) self.db.test_and_set_status(context, models.LoadBalancer, db_pool.loadbalancer_id, constants.PENDING_UPDATE) for db_l in db_listeners: try: self.db.update_listener(context, db_l.id, {'default_pool_id': db_pool.id}) except Exception as exc: self.db.update_loadbalancer_provisioning_status( context, db_pool.loadbalancer_id) raise exc # Reload the pool from the DB to re-populate pool.listeners # before calling the driver db_pool = self.db.get_pool(context, db_pool.id) driver = self._get_driver_for_loadbalancer( context, db_pool.loadbalancer_id) self._call_driver_operation(context, driver.pool.create, db_pool) return db_pool.to_api_dict() def update_pool(self, context, id, pool): pool = pool.get('pool') self._validate_session_persistence_info( pool.get('session_persistence')) old_pool = self.db.get_pool(context, id) self.db.test_and_set_status(context, models.PoolV2, id, constants.PENDING_UPDATE) try: updated_pool = self.db.update_pool(context, id, pool) except Exception as exc: self.db.update_loadbalancer_provisioning_status( context, old_pool.root_loadbalancer.id) raise exc driver = self._get_driver_for_loadbalancer( context, updated_pool.loadbalancer_id) self._call_driver_operation(context, driver.pool.update, updated_pool, old_db_entity=old_pool) return self.db.get_pool(context, id).to_api_dict() def delete_pool(self, context, id): self.db.test_and_set_status(context, models.PoolV2, id, constants.PENDING_DELETE) db_pool = self.db.get_pool(context, id) driver = self._get_driver_for_loadbalancer( context, db_pool.loadbalancer_id) self._call_driver_operation(context, driver.pool.delete, db_pool) def get_pools(self, context, filters=None, fields=None): return [pool.to_api_dict() for pool in self.db.get_pools( context, filters=filters)] def get_pool(self, context, id, fields=None): return self.db.get_pool(context, id).to_api_dict() def _check_pool_exists(self, context, pool_id): if not self.db._resource_exists(context, models.PoolV2, pool_id): raise loadbalancerv2.EntityNotFound(name=models.PoolV2.NAME, id=pool_id) def create_pool_member(self, context, pool_id, member): self._check_pool_exists(context, pool_id) db_pool = self.db.get_pool(context, pool_id) self.db.test_and_set_status(context, models.LoadBalancer, db_pool.root_loadbalancer.id, constants.PENDING_UPDATE) member = member.get('member') try: member_db = self.db.create_pool_member(context, member, pool_id) except Exception as exc: self.db.update_loadbalancer_provisioning_status( context, db_pool.root_loadbalancer.id) raise exc driver = self._get_driver_for_loadbalancer( context, member_db.pool.loadbalancer_id) self._call_driver_operation(context, driver.member.create, member_db) return self.db.get_pool_member(context, member_db.id).to_api_dict() def update_pool_member(self, context, id, pool_id, member): self._check_pool_exists(context, pool_id) member = member.get('member') old_member = self.db.get_pool_member(context, id) self.db.test_and_set_status(context, models.MemberV2, id, constants.PENDING_UPDATE) try: updated_member = self.db.update_pool_member(context, id, member) except Exception as exc: self.db.update_loadbalancer_provisioning_status( context, old_member.pool.loadbalancer.id) raise exc driver = self._get_driver_for_loadbalancer( context, updated_member.pool.loadbalancer_id) self._call_driver_operation(context, driver.member.update, updated_member, old_db_entity=old_member) return self.db.get_pool_member(context, id).to_api_dict() def delete_pool_member(self, context, id, pool_id): self._check_pool_exists(context, pool_id) self.db.test_and_set_status(context, models.MemberV2, id, constants.PENDING_DELETE) db_member = self.db.get_pool_member(context, id) driver = self._get_driver_for_loadbalancer( context, db_member.pool.loadbalancer_id) self._call_driver_operation(context, driver.member.delete, db_member) def get_pool_members(self, context, pool_id, filters=None, fields=None): self._check_pool_exists(context, pool_id) if not filters: filters = {} filters['pool_id'] = [pool_id] return [mem.to_api_dict() for mem in self.db.get_pool_members( context, filters=filters)] def get_pool_member(self, context, id, pool_id, fields=None): self._check_pool_exists(context, pool_id) return self.db.get_pool_member(context, id).to_api_dict() def _check_pool_already_has_healthmonitor(self, context, pool_id): pool = self.db.get_pool(context, pool_id) if pool.healthmonitor: raise loadbalancerv2.OneHealthMonitorPerPool( pool_id=pool_id, hm_id=pool.healthmonitor.id) def create_healthmonitor(self, context, healthmonitor): healthmonitor = healthmonitor.get('healthmonitor') pool_id = healthmonitor.pop('pool_id') self._check_pool_exists(context, pool_id) self._check_pool_already_has_healthmonitor(context, pool_id) db_pool = self.db.get_pool(context, pool_id) self.db.test_and_set_status(context, models.LoadBalancer, db_pool.root_loadbalancer.id, constants.PENDING_UPDATE) try: db_hm = self.db.create_healthmonitor_on_pool(context, pool_id, healthmonitor) except Exception as exc: self.db.update_loadbalancer_provisioning_status( context, db_pool.root_loadbalancer.id) raise exc driver = self._get_driver_for_loadbalancer( context, db_hm.pool.loadbalancer_id) self._call_driver_operation(context, driver.health_monitor.create, db_hm) return self.db.get_healthmonitor(context, db_hm.id).to_api_dict() def update_healthmonitor(self, context, id, healthmonitor): healthmonitor = healthmonitor.get('healthmonitor') old_hm = self.db.get_healthmonitor(context, id) self.db.test_and_set_status(context, models.HealthMonitorV2, id, constants.PENDING_UPDATE) try: updated_hm = self.db.update_healthmonitor(context, id, healthmonitor) except Exception as exc: self.db.update_loadbalancer_provisioning_status( context, old_hm.root_loadbalancer.id) raise exc driver = self._get_driver_for_loadbalancer( context, updated_hm.pool.loadbalancer_id) self._call_driver_operation(context, driver.health_monitor.update, updated_hm, old_db_entity=old_hm) return self.db.get_healthmonitor(context, updated_hm.id).to_api_dict() def delete_healthmonitor(self, context, id): self.db.test_and_set_status(context, models.HealthMonitorV2, id, constants.PENDING_DELETE) db_hm = self.db.get_healthmonitor(context, id) driver = self._get_driver_for_loadbalancer( context, db_hm.pool.loadbalancer_id) self._call_driver_operation( context, driver.health_monitor.delete, db_hm) def get_healthmonitor(self, context, id, fields=None): return self.db.get_healthmonitor(context, id).to_api_dict() def get_healthmonitors(self, context, filters=None, fields=None): return [hm.to_api_dict() for hm in self.db.get_healthmonitors( context, filters=filters)] def stats(self, context, loadbalancer_id): lb = self.db.get_loadbalancer(context, loadbalancer_id) driver = self._get_driver_for_loadbalancer(context, loadbalancer_id) stats_data = driver.load_balancer.stats(context, lb) # if we get something from the driver - # update the db and return the value from db # else - return what we have in db if stats_data: self.db.update_loadbalancer_stats(context, loadbalancer_id, stats_data) db_stats = self.db.stats(context, loadbalancer_id) return {'stats': db_stats.to_api_dict()} def create_l7policy(self, context, l7policy): l7policy = l7policy.get('l7policy') l7policy_db = self.db.create_l7policy(context, l7policy) if l7policy_db.attached_to_loadbalancer(): driver = self._get_driver_for_loadbalancer( context, l7policy_db.listener.loadbalancer_id) self._call_driver_operation(context, driver.l7policy.create, l7policy_db) return l7policy_db.to_dict() def update_l7policy(self, context, id, l7policy): l7policy = l7policy.get('l7policy') old_l7policy = self.db.get_l7policy(context, id) self.db.test_and_set_status(context, models.L7Policy, id, constants.PENDING_UPDATE) try: updated_l7policy = self.db.update_l7policy( context, id, l7policy) except Exception as exc: self.db.update_loadbalancer_provisioning_status( context, old_l7policy.root_loadbalancer.id) raise exc if (updated_l7policy.attached_to_loadbalancer() or old_l7policy.attached_to_loadbalancer()): if updated_l7policy.attached_to_loadbalancer(): driver = self._get_driver_for_loadbalancer( context, updated_l7policy.listener.loadbalancer_id) else: driver = self._get_driver_for_loadbalancer( context, old_l7policy.listener.loadbalancer_id) self._call_driver_operation(context, driver.l7policy.update, updated_l7policy, old_db_entity=old_l7policy) return self.db.get_l7policy(context, updated_l7policy.id).to_api_dict() def delete_l7policy(self, context, id): self.db.test_and_set_status(context, models.L7Policy, id, constants.PENDING_DELETE) l7policy_db = self.db.get_l7policy(context, id) if l7policy_db.attached_to_loadbalancer(): driver = self._get_driver_for_loadbalancer( context, l7policy_db.listener.loadbalancer_id) self._call_driver_operation(context, driver.l7policy.delete, l7policy_db) else: self.db.delete_l7policy(context, id) def get_l7policies(self, context, filters=None, fields=None): return [policy.to_api_dict() for policy in self.db.get_l7policies( context, filters=filters)] def get_l7policy(self, context, id, fields=None): return self.db.get_l7policy(context, id).to_api_dict() def _check_l7policy_exists(self, context, l7policy_id): if not self.db._resource_exists(context, models.L7Policy, l7policy_id): raise loadbalancerv2.EntityNotFound(name=models.L7Policy.NAME, id=l7policy_id) def create_l7policy_rule(self, context, rule, l7policy_id): rule = rule.get('rule') rule_db = self.db.create_l7policy_rule(context, rule, l7policy_id) if rule_db.attached_to_loadbalancer(): driver = self._get_driver_for_loadbalancer( context, rule_db.policy.listener.loadbalancer_id) self._call_driver_operation(context, driver.l7rule.create, rule_db) else: self.db.update_status(context, models.L7Rule, rule_db.id, lb_const.DEFERRED) return rule_db.to_dict() def update_l7policy_rule(self, context, id, rule, l7policy_id): rule = rule.get('rule') old_rule_db = self.db.get_l7policy_rule(context, id, l7policy_id) self.db.test_and_set_status(context, models.L7Rule, id, constants.PENDING_UPDATE) try: upd_rule_db = self.db.update_l7policy_rule( context, id, rule, l7policy_id) except Exception as exc: self.db.update_loadbalancer_provisioning_status( context, old_rule_db.root_loadbalancer.id) raise exc if (upd_rule_db.attached_to_loadbalancer() or old_rule_db.attached_to_loadbalancer()): if upd_rule_db.attached_to_loadbalancer(): driver = self._get_driver_for_loadbalancer( context, upd_rule_db.policy.listener.loadbalancer_id) else: driver = self._get_driver_for_loadbalancer( context, old_rule_db.policy.listener.loadbalancer_id) self._call_driver_operation(context, driver.l7rule.update, upd_rule_db, old_db_entity=old_rule_db) else: self.db.update_status(context, models.L7Rule, id, lb_const.DEFERRED) return upd_rule_db.to_dict() def delete_l7policy_rule(self, context, id, l7policy_id): self.db.test_and_set_status(context, models.L7Rule, id, constants.PENDING_DELETE) rule_db = self.db.get_l7policy_rule(context, id, l7policy_id) if rule_db.attached_to_loadbalancer(): driver = self._get_driver_for_loadbalancer( context, rule_db.policy.listener.loadbalancer_id) self._call_driver_operation(context, driver.l7rule.delete, rule_db) else: self.db.delete_l7policy_rule(context, id, l7policy_id) def get_l7policy_rules(self, context, l7policy_id, filters=None, fields=None): self._check_l7policy_exists(context, l7policy_id) return [rule.to_api_dict() for rule in self.db.get_l7policy_rules( context, l7policy_id, filters=filters)] def get_l7policy_rule(self, context, id, l7policy_id, fields=None): self._check_l7policy_exists(context, l7policy_id) return self.db.get_l7policy_rule( context, id, l7policy_id).to_api_dict() def validate_provider(self, provider): if provider not in self.drivers: raise pconf.ServiceProviderNotFound( provider=provider, service_type=constants.LOADBALANCERV2) def _default_status(self, obj, exclude=None, **kw): exclude = exclude or [] status = {} status["id"] = obj.id if "provisioning_status" not in exclude: status["provisioning_status"] = obj.provisioning_status if "operating_status" not in exclude: status["operating_status"] = obj.operating_status for key, value in six.iteritems(kw): status[key] = value try: status['name'] = getattr(obj, 'name') except AttributeError: pass return status def _disable_entity_and_children(self, obj): DISABLED = lb_const.DISABLED d = {} if isinstance(obj, data_models.LoadBalancer): d = {'loadbalancer': {'id': obj.id, 'operating_status': DISABLED, 'provisioning_status': obj.provisioning_status, 'name': obj.name, 'listeners': []}} for listener in obj.listeners: listener_dict = self._disable_entity_and_children(listener) d['loadbalancer']['listeners'].append(listener_dict) if isinstance(obj, data_models.Listener): d = {'id': obj.id, 'operating_status': DISABLED, 'provisioning_status': obj.provisioning_status, 'name': obj.name, 'pools': [], 'l7policies': []} if obj.default_pool: pool_dict = self._disable_entity_and_children(obj.default_pool) d['pools'].append(pool_dict) for policy in obj.l7_policies: policy_dict = self._disable_entity_and_children(policy) d['l7policies'].append(policy_dict) if isinstance(obj, data_models.L7Policy): d = {'id': obj.id, 'provisioning_status': obj.provisioning_status, 'name': obj.name, 'rules': []} for rule in obj.rules: rule_dict = self._disable_entity_and_children(rule) d['rules'].append(rule_dict) if isinstance(obj, data_models.L7Rule): d = {'id': obj.id, 'provisioning_status': obj.provisioning_status, 'type': obj.type} if isinstance(obj, data_models.Pool): d = {'id': obj.id, 'operating_status': DISABLED, 'provisioning_status': obj.provisioning_status, 'name': obj.name, 'members': [], 'healthmonitor': {}} for member in obj.members: member_dict = self._disable_entity_and_children(member) d['members'].append(member_dict) d['healthmonitor'] = self._disable_entity_and_children( obj.healthmonitor) if isinstance(obj, data_models.HealthMonitor): d = {'id': obj.id, 'provisioning_status': obj.provisioning_status, 'type': obj.type} if isinstance(obj, data_models.Member): d = {'id': obj.id, 'operating_status': DISABLED, 'provisioning_status': obj.provisioning_status, 'address': obj.address, 'protocol_port': obj.protocol_port} return d def statuses(self, context, loadbalancer_id): OS = "operating_status" lb = self.db.get_loadbalancer(context, loadbalancer_id) if not lb.admin_state_up: return {"statuses": self._disable_entity_and_children(lb)} lb_status = self._default_status(lb, listeners=[], pools=[]) statuses = {"statuses": {"loadbalancer": lb_status}} if self._is_degraded(lb): self._set_degraded(lb_status) for curr_listener in lb.listeners: if not curr_listener.admin_state_up: lb_status["listeners"].append( self._disable_entity_and_children(curr_listener) ) continue listener_status = self._default_status(curr_listener, pools=[], l7policies=[]) lb_status["listeners"].append(listener_status) if self._is_degraded(curr_listener): self._set_degraded(lb_status) for policy in curr_listener.l7_policies: if not policy.admin_state_up: listener_status["l7policies"].append( self._disable_entity_and_children(policy)) continue policy_opts = {"action": policy.action, "rules": []} policy_status = self._default_status(policy, exclude=[OS], **policy_opts) listener_status["l7policies"].append(policy_status) if self._is_degraded(policy, exclude=[OS]): self._set_degraded(policy_status, listener_status, lb_status) for rule in policy.rules: if not rule.admin_state_up: policy_status["rules"].append( self._disable_entity_and_children(rule)) continue rule_opts = {"type": rule.type} rule_status = self._default_status(rule, exclude=[OS], **rule_opts) policy_status["rules"].append(rule_status) if self._is_degraded(rule, exclude=[OS]): self._set_degraded(rule_status, policy_status, listener_status, lb_status) if not curr_listener.default_pool: continue if not curr_listener.default_pool.admin_state_up: listener_status["pools"].append( self._disable_entity_and_children( curr_listener.default_pool)) continue pool_status = self._default_status(curr_listener.default_pool, members=[], healthmonitor={}) listener_status["pools"].append(pool_status) if (pool_status["id"] not in [ps["id"] for ps in lb_status["pools"]]): lb_status["pools"].append(pool_status) if self._is_degraded(curr_listener.default_pool): self._set_degraded(self, listener_status, lb_status) members = curr_listener.default_pool.members for curr_member in members: if not curr_member.admin_state_up: pool_status["members"].append( self._disable_entity_and_children(curr_member)) continue member_opts = {"address": curr_member.address, "protocol_port": curr_member.protocol_port} member_status = self._default_status(curr_member, **member_opts) pool_status["members"].append(member_status) if self._is_degraded(curr_member): self._set_degraded(pool_status, listener_status, lb_status) healthmonitor = curr_listener.default_pool.healthmonitor if healthmonitor: if not healthmonitor.admin_state_up: dhm = self._disable_entity_and_children(healthmonitor) hm_status = dhm else: hm_status = self._default_status(healthmonitor, exclude=[OS], type=healthmonitor.type) if self._is_degraded(healthmonitor, exclude=[OS]): self._set_degraded(pool_status, listener_status, lb_status) else: hm_status = {} pool_status["healthmonitor"] = hm_status # Needed for pools not associated with a listener for curr_pool in lb.pools: if curr_pool.id in [ps["id"] for ps in lb_status["pools"]]: continue if not curr_pool.admin_state_up: lb_status["pools"].append( self._disable_entity_and_children(curr_pool)) continue pool_status = self._default_status(curr_pool, members=[], healthmonitor={}) lb_status["pools"].append(pool_status) if self._is_degraded(curr_pool): self._set_degraded(lb_status) members = curr_pool.members for curr_member in members: if not curr_member.admin_state_up: pool_status["members"].append( self._disable_entity_and_children(curr_member)) continue member_opts = {"address": curr_member.address, "protocol_port": curr_member.protocol_port} member_status = self._default_status(curr_member, **member_opts) pool_status["members"].append(member_status) if self._is_degraded(curr_member): self._set_degraded(pool_status, lb_status) healthmonitor = curr_pool.healthmonitor if healthmonitor: if not healthmonitor.admin_state_up: dhm = self._disable_entity_and_children(healthmonitor) pool_status["healthmonitor"] = dhm else: hm_status = self._default_status(healthmonitor, exclude=[OS], type=healthmonitor.type) if self._is_degraded(healthmonitor, exclude=[OS]): self._set_degraded(pool_status, listener_status, lb_status) else: hm_status = {} pool_status["healthmonitor"] = hm_status return statuses def _set_degraded(self, *objects): for obj in objects: obj["operating_status"] = lb_const.DEGRADED def _is_degraded(self, obj, exclude=None): exclude = exclude or [] if "provisioning_status" not in exclude: if obj.provisioning_status == constants.ERROR: return True if "operating_status" not in exclude: if ((obj.operating_status != lb_const.ONLINE) and (obj.operating_status != lb_const.NO_MONITOR)): return True return False # NOTE(brandon-logan): these need to be concrete methods because the # neutron request pipeline calls these methods before the plugin methods # are ever called def get_members(self, context, filters=None, fields=None): pass def get_member(self, context, id, fields=None): pass neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/agent_scheduler.py0000664000567000056710000001431712701407727030243 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random import sys from neutron.db import agents_db from neutron.db import agentschedulers_db from neutron.db import model_base from neutron_lib import constants from oslo_log import log as logging import six import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy.orm import joinedload from abc import abstractmethod from neutron_lbaas._i18n import _LW from neutron_lbaas.extensions import lbaas_agentscheduler LOG = logging.getLogger(__name__) class PoolLoadbalancerAgentBinding(model_base.BASEV2): """Represents binding between neutron loadbalancer pools and agents.""" pool_id = sa.Column(sa.String(36), sa.ForeignKey("pools.id", ondelete='CASCADE'), primary_key=True) agent = orm.relation(agents_db.Agent) agent_id = sa.Column(sa.String(36), sa.ForeignKey("agents.id", ondelete='CASCADE'), nullable=False) class LbaasAgentSchedulerDbMixin(agentschedulers_db.AgentSchedulerDbMixin, lbaas_agentscheduler .LbaasAgentSchedulerPluginBase): def get_lbaas_agent_hosting_pool(self, context, pool_id, active=None): query = context.session.query(PoolLoadbalancerAgentBinding) query = query.options(joinedload('agent')) binding = query.get(pool_id) if (binding and self.is_eligible_agent( active, binding.agent)): return {'agent': self._make_agent_dict(binding.agent)} def get_lbaas_agents(self, context, active=None, filters=None): query = context.session.query(agents_db.Agent) query = query.filter_by(agent_type=constants.AGENT_TYPE_LOADBALANCER) if active is not None: query = query.filter_by(admin_state_up=active) if filters: for key, value in six.iteritems(filters): column = getattr(agents_db.Agent, key, None) if column: query = query.filter(column.in_(value)) return [agent for agent in query if self.is_eligible_agent(active, agent)] def list_pools_on_lbaas_agent(self, context, id): query = context.session.query(PoolLoadbalancerAgentBinding.pool_id) query = query.filter_by(agent_id=id) pool_ids = [item[0] for item in query] if pool_ids: return {'pools': self.get_pools(context, filters={'id': pool_ids})} else: return {'pools': []} def num_of_pools_on_lbaas_agent(self, context, id): query = context.session.query(PoolLoadbalancerAgentBinding.pool_id) query = query.filter_by(agent_id=id) return query.count() def get_lbaas_agent_candidates(self, device_driver, active_agents): candidates = [] for agent in active_agents: agent_conf = self.get_configuration_dict(agent) if device_driver in agent_conf['device_drivers']: candidates.append(agent) return candidates class SchedulerBase(object): def schedule(self, plugin, context, pool, device_driver): """Schedule the pool to an active loadbalancer agent if there is no enabled agent hosting it. """ with context.session.begin(subtransactions=True): lbaas_agent = plugin.get_lbaas_agent_hosting_pool( context, pool['id']) if lbaas_agent: LOG.debug('Pool %(pool_id)s has already been hosted' ' by lbaas agent %(agent_id)s', {'pool_id': pool['id'], 'agent_id': lbaas_agent['id']}) return active_agents = plugin.get_lbaas_agents(context, active=True) if not active_agents: LOG.warning(_LW('No active lbaas agents for pool %s'), pool['id']) return candidates = plugin.get_lbaas_agent_candidates(device_driver, active_agents) if not candidates: LOG.warning(_LW('No lbaas agent supporting device driver %s'), device_driver) return chosen_agent = self._schedule(candidates, plugin, context) binding = PoolLoadbalancerAgentBinding() binding.agent = chosen_agent binding.pool_id = pool['id'] context.session.add(binding) LOG.debug('Pool %(pool_id)s is scheduled to lbaas agent ' '%(agent_id)s', {'pool_id': pool['id'], 'agent_id': chosen_agent['id']}) return chosen_agent @abstractmethod def _schedule(self, candidates, plugin, context): pass class ChanceScheduler(SchedulerBase): def _schedule(self, candidates, plugin, context): """Allocate a loadbalancer agent for a vip in a random way.""" return random.choice(candidates) class LeastPoolAgentScheduler(SchedulerBase): def _schedule(self, candidates, plugin, context): """Pick an agent with least number of pools from candidates""" current_min_pool_num = sys.maxint # SchedulerBase.schedule() already checks for empty candidates for tmp_agent in candidates: tmp_pool_num = plugin.num_of_pools_on_lbaas_agent( context, tmp_agent['id']) if current_min_pool_num > tmp_pool_num: current_min_pool_num = tmp_pool_num chosen_agent = tmp_agent return chosen_agent neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/data_models.py0000664000567000056710000007536612701407727027376 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This module holds the data models for the load balancer service plugin. These are meant simply as replacement data structures for dictionaries and SQLAlchemy models. Using dictionaries as data containers for many components causes readability issues and does not intuitively give the benefits of what classes and OO give. Using SQLAlchemy models as data containers for many components can become an issue if you do not want to give certain components access to the database. These data models do provide methods for instantiation from SQLAlchemy models and also converting to dictionaries. """ from neutron.db import model_base from neutron.db import models_v2 from neutron.db import servicetype_db from sqlalchemy.ext import orderinglist from sqlalchemy.orm import collections from neutron_lbaas.db.loadbalancer import models from neutron_lbaas.services.loadbalancer import constants as l_const class BaseDataModel(object): # NOTE(ihrachys): we could reuse the list to provide a default __init__ # implementation. That would require handling custom default values though. fields = [] def to_dict(self, **kwargs): ret = {} for attr in self.__dict__: if attr.startswith('_') or not kwargs.get(attr, True): continue if isinstance(getattr(self, attr), list): ret[attr] = [] for item in self.__dict__[attr]: if isinstance(item, BaseDataModel): ret[attr].append(item.to_dict()) else: ret[attr] = item elif isinstance(getattr(self, attr), BaseDataModel): ret[attr] = self.__dict__[attr].to_dict() elif isinstance(self.__dict__[attr], unicode): ret[attr.encode('utf8')] = self.__dict__[attr].encode('utf8') else: ret[attr] = self.__dict__[attr] return ret def to_api_dict(self, **kwargs): return {} @classmethod def from_dict(cls, model_dict): fields = {k: v for k, v in model_dict.items() if k in cls.fields} return cls(**fields) @classmethod def from_sqlalchemy_model(cls, sa_model, calling_classes=None): calling_classes = calling_classes or [] attr_mapping = vars(cls).get("attr_mapping") instance = cls() for attr_name in cls.fields: if attr_name.startswith('_'): continue if attr_mapping and attr_name in attr_mapping.keys(): attr = getattr(sa_model, attr_mapping[attr_name]) else: attr = getattr(sa_model, attr_name) # Handles M:1 or 1:1 relationships if isinstance(attr, model_base.BASEV2): if hasattr(instance, attr_name): data_class = SA_MODEL_TO_DATA_MODEL_MAP[attr.__class__] # Don't recurse down object classes too far. If we have # seen the same object class more than twice, we are # probably in a loop. if data_class and calling_classes.count(data_class) < 2: setattr(instance, attr_name, data_class.from_sqlalchemy_model( attr, calling_classes=calling_classes + [cls])) # Handles 1:M or N:M relationships elif (isinstance(attr, collections.InstrumentedList) or isinstance(attr, orderinglist.OrderingList)): for item in attr: if hasattr(instance, attr_name): data_class = SA_MODEL_TO_DATA_MODEL_MAP[item.__class__] # Don't recurse down object classes too far. If we have # seen the same object class more than twice, we are # probably in a loop. if (data_class and calling_classes.count(data_class) < 2): attr_list = getattr(instance, attr_name) or [] attr_list.append(data_class.from_sqlalchemy_model( item, calling_classes=calling_classes + [cls])) setattr(instance, attr_name, attr_list) # This isn't a relationship so it must be a "primitive" else: setattr(instance, attr_name, attr) return instance @property def root_loadbalancer(self): """Returns the loadbalancer this instance is attached to.""" if isinstance(self, LoadBalancer): lb = self elif isinstance(self, Listener): lb = self.loadbalancer elif isinstance(self, L7Policy): lb = self.listener.loadbalancer elif isinstance(self, L7Rule): lb = self.policy.listener.loadbalancer elif isinstance(self, Pool): lb = self.loadbalancer elif isinstance(self, SNI): lb = self.listener.loadbalancer else: # Pool Member or Health Monitor lb = self.pool.loadbalancer return lb # NOTE(brandon-logan) AllocationPool, HostRoute, Subnet, IPAllocation, Port, # and ProviderResourceAssociation are defined here because there aren't any # data_models defined in core neutron or neutron services. Instead of jumping # through the hoops to create those I've just defined them here. If ever # data_models or similar are defined in those packages, those should be used # instead of these. class AllocationPool(BaseDataModel): fields = ['start', 'end'] def __init__(self, start=None, end=None): self.start = start self.end = end class HostRoute(BaseDataModel): fields = ['destination', 'nexthop'] def __init__(self, destination=None, nexthop=None): self.destination = destination self.nexthop = nexthop class Subnet(BaseDataModel): fields = ['id', 'name', 'tenant_id', 'network_id', 'ip_version', 'cidr', 'gateway_ip', 'enable_dhcp', 'ipv6_ra_mode', 'ipv6_address_mode', 'shared', 'dns_nameservers', 'host_routes', 'allocation_pools', 'subnetpool_id'] def __init__(self, id=None, name=None, tenant_id=None, network_id=None, ip_version=None, cidr=None, gateway_ip=None, enable_dhcp=None, ipv6_ra_mode=None, ipv6_address_mode=None, shared=None, dns_nameservers=None, host_routes=None, allocation_pools=None, subnetpool_id=None): self.id = id self.name = name self.tenant_id = tenant_id self.network_id = network_id self.ip_version = ip_version self.cidr = cidr self.gateway_ip = gateway_ip self.enable_dhcp = enable_dhcp self.ipv6_ra_mode = ipv6_ra_mode self.ipv6_address_mode = ipv6_address_mode self.shared = shared self.dns_nameservers = dns_nameservers self.host_routes = host_routes self.allocation_pools = allocation_pools self.subnetpool_id = subnetpool_id @classmethod def from_dict(cls, model_dict): host_routes = model_dict.pop('host_routes', []) allocation_pools = model_dict.pop('allocation_pools', []) model_dict['host_routes'] = [HostRoute.from_dict(route) for route in host_routes] model_dict['allocation_pools'] = [AllocationPool.from_dict(ap) for ap in allocation_pools] return super(Subnet, cls).from_dict(model_dict) class IPAllocation(BaseDataModel): fields = ['port_id', 'ip_address', 'subnet_id', 'network_id'] def __init__(self, port_id=None, ip_address=None, subnet_id=None, network_id=None): self.port_id = port_id self.ip_address = ip_address self.subnet_id = subnet_id self.network_id = network_id @classmethod def from_dict(cls, model_dict): subnet = model_dict.pop('subnet', None) # TODO(blogan): add subnet to __init__. Can't do it yet because it # causes issues with converting SA models into data models. instance = super(IPAllocation, cls).from_dict(model_dict) setattr(instance, 'subnet', None) if subnet: setattr(instance, 'subnet', Subnet.from_dict(subnet)) return instance class Port(BaseDataModel): fields = ['id', 'tenant_id', 'name', 'network_id', 'mac_address', 'admin_state_up', 'status', 'device_id', 'device_owner', 'fixed_ips'] def __init__(self, id=None, tenant_id=None, name=None, network_id=None, mac_address=None, admin_state_up=None, status=None, device_id=None, device_owner=None, fixed_ips=None): self.id = id self.tenant_id = tenant_id self.name = name self.network_id = network_id self.mac_address = mac_address self.admin_state_up = admin_state_up self.status = status self.device_id = device_id self.device_owner = device_owner self.fixed_ips = fixed_ips or [] @classmethod def from_dict(cls, model_dict): fixed_ips = model_dict.pop('fixed_ips', []) model_dict['fixed_ips'] = [IPAllocation.from_dict(fixed_ip) for fixed_ip in fixed_ips] return super(Port, cls).from_dict(model_dict) class ProviderResourceAssociation(BaseDataModel): fields = ['provider_name', 'resource_id'] def __init__(self, provider_name=None, resource_id=None): self.provider_name = provider_name self.resource_id = resource_id @classmethod def from_dict(cls, model_dict): device_driver = model_dict.pop('device_driver', None) instance = super(ProviderResourceAssociation, cls).from_dict( model_dict) setattr(instance, 'device_driver', device_driver) return instance class SessionPersistence(BaseDataModel): fields = ['pool_id', 'type', 'cookie_name', 'pool'] def __init__(self, pool_id=None, type=None, cookie_name=None, pool=None): self.pool_id = pool_id self.type = type self.cookie_name = cookie_name self.pool = pool def to_api_dict(self): return super(SessionPersistence, self).to_dict(pool=False, pool_id=False) @classmethod def from_dict(cls, model_dict): pool = model_dict.pop('pool', None) if pool: model_dict['pool'] = Pool.from_dict( pool) return super(SessionPersistence, cls).from_dict(model_dict) class LoadBalancerStatistics(BaseDataModel): fields = ['loadbalancer_id', 'bytes_in', 'bytes_out', 'active_connections', 'total_connections', 'loadbalancer'] def __init__(self, loadbalancer_id=None, bytes_in=None, bytes_out=None, active_connections=None, total_connections=None, loadbalancer=None): self.loadbalancer_id = loadbalancer_id self.bytes_in = bytes_in self.bytes_out = bytes_out self.active_connections = active_connections self.total_connections = total_connections self.loadbalancer = loadbalancer def to_api_dict(self): return super(LoadBalancerStatistics, self).to_dict( loadbalancer_id=False, loadbalancer=False) class HealthMonitor(BaseDataModel): fields = ['id', 'tenant_id', 'type', 'delay', 'timeout', 'max_retries', 'http_method', 'url_path', 'expected_codes', 'provisioning_status', 'admin_state_up', 'pool', 'name'] def __init__(self, id=None, tenant_id=None, type=None, delay=None, timeout=None, max_retries=None, http_method=None, url_path=None, expected_codes=None, provisioning_status=None, admin_state_up=None, pool=None, name=None): self.id = id self.tenant_id = tenant_id self.type = type self.delay = delay self.timeout = timeout self.max_retries = max_retries self.http_method = http_method self.url_path = url_path self.expected_codes = expected_codes self.provisioning_status = provisioning_status self.admin_state_up = admin_state_up self.pool = pool self.name = name def attached_to_loadbalancer(self): return bool(self.pool and self.pool.loadbalancer) def to_api_dict(self): ret_dict = super(HealthMonitor, self).to_dict( provisioning_status=False, pool=False) ret_dict['pools'] = [] if self.pool: ret_dict['pools'].append({'id': self.pool.id}) if self.type in [l_const.HEALTH_MONITOR_TCP, l_const.HEALTH_MONITOR_PING]: ret_dict.pop('http_method') ret_dict.pop('url_path') ret_dict.pop('expected_codes') return ret_dict @classmethod def from_dict(cls, model_dict): pool = model_dict.pop('pool', None) if pool: model_dict['pool'] = Pool.from_dict( pool) return super(HealthMonitor, cls).from_dict(model_dict) class Pool(BaseDataModel): fields = ['id', 'tenant_id', 'name', 'description', 'healthmonitor_id', 'protocol', 'lb_algorithm', 'admin_state_up', 'operating_status', 'provisioning_status', 'members', 'healthmonitor', 'session_persistence', 'loadbalancer_id', 'loadbalancer', 'listener', 'listeners', 'l7_policies'] # Map deprecated attribute names to new ones. attr_mapping = {'sessionpersistence': 'session_persistence'} def __init__(self, id=None, tenant_id=None, name=None, description=None, healthmonitor_id=None, protocol=None, lb_algorithm=None, admin_state_up=None, operating_status=None, provisioning_status=None, members=None, healthmonitor=None, session_persistence=None, loadbalancer_id=None, loadbalancer=None, listener=None, listeners=None, l7_policies=None): self.id = id self.tenant_id = tenant_id self.name = name self.description = description self.healthmonitor_id = healthmonitor_id self.protocol = protocol self.lb_algorithm = lb_algorithm self.admin_state_up = admin_state_up self.operating_status = operating_status self.provisioning_status = provisioning_status self.members = members or [] self.healthmonitor = healthmonitor self.session_persistence = session_persistence # NOTE(eezhova): Old attribute name is kept for backwards # compatibility with out-of-tree drivers. self.sessionpersistence = self.session_persistence self.loadbalancer_id = loadbalancer_id self.loadbalancer = loadbalancer self.listener = listener self.listeners = listeners or [] self.l7_policies = l7_policies or [] def attached_to_loadbalancer(self): return bool(self.loadbalancer) def to_api_dict(self): ret_dict = super(Pool, self).to_dict( provisioning_status=False, operating_status=False, healthmonitor=False, session_persistence=False, loadbalancer_id=False, loadbalancer=False, listener_id=False) ret_dict['loadbalancers'] = [] if self.loadbalancer: ret_dict['loadbalancers'].append({'id': self.loadbalancer.id}) ret_dict['session_persistence'] = None if self.session_persistence: ret_dict['session_persistence'] = ( self.session_persistence.to_api_dict()) ret_dict['members'] = [{'id': member.id} for member in self.members] ret_dict['listeners'] = [{'id': listener.id} for listener in self.listeners] if self.listener: ret_dict['listener_id'] = self.listener.id else: ret_dict['listener_id'] = None ret_dict['l7_policies'] = [{'id': l7_policy.id} for l7_policy in self.l7_policies] return ret_dict @classmethod def from_dict(cls, model_dict): healthmonitor = model_dict.pop('healthmonitor', None) session_persistence = model_dict.pop('session_persistence', None) model_dict.pop('sessionpersistence', None) loadbalancer = model_dict.pop('loadbalancer', None) members = model_dict.pop('members', []) model_dict['members'] = [Member.from_dict(member) for member in members] listeners = model_dict.pop('listeners', []) model_dict['listeners'] = [Listener.from_dict(listener) for listener in listeners] l7_policies = model_dict.pop('l7_policies', []) model_dict['l7_policies'] = [L7Policy.from_dict(policy) for policy in l7_policies] if healthmonitor: model_dict['healthmonitor'] = HealthMonitor.from_dict( healthmonitor) if session_persistence: model_dict['session_persistence'] = SessionPersistence.from_dict( session_persistence) if loadbalancer: model_dict['loadbalancer'] = LoadBalancer.from_dict(loadbalancer) return super(Pool, cls).from_dict(model_dict) class Member(BaseDataModel): fields = ['id', 'tenant_id', 'pool_id', 'address', 'protocol_port', 'weight', 'admin_state_up', 'subnet_id', 'operating_status', 'provisioning_status', 'pool', 'name'] def __init__(self, id=None, tenant_id=None, pool_id=None, address=None, protocol_port=None, weight=None, admin_state_up=None, subnet_id=None, operating_status=None, provisioning_status=None, pool=None, name=None): self.id = id self.tenant_id = tenant_id self.pool_id = pool_id self.address = address self.protocol_port = protocol_port self.weight = weight self.admin_state_up = admin_state_up self.subnet_id = subnet_id self.operating_status = operating_status self.provisioning_status = provisioning_status self.pool = pool self.name = name def attached_to_loadbalancer(self): return bool(self.pool and self.pool.loadbalancer) def to_api_dict(self): return super(Member, self).to_dict( provisioning_status=False, operating_status=False, pool=False) @classmethod def from_dict(cls, model_dict): pool = model_dict.pop('pool', None) if pool: model_dict['pool'] = Pool.from_dict( pool) return super(Member, cls).from_dict(model_dict) class SNI(BaseDataModel): fields = ['listener_id', 'tls_container_id', 'position', 'listener'] def __init__(self, listener_id=None, tls_container_id=None, position=None, listener=None): self.listener_id = listener_id self.tls_container_id = tls_container_id self.position = position self.listener = listener def attached_to_loadbalancer(self): return bool(self.listener and self.listener.loadbalancer) def to_api_dict(self): return super(SNI, self).to_dict(listener=False) class TLSContainer(BaseDataModel): fields = ['id', 'certificate', 'private_key', 'passphrase', 'intermediates', 'primary_cn'] def __init__(self, id=None, certificate=None, private_key=None, passphrase=None, intermediates=None, primary_cn=None): self.id = id self.certificate = certificate self.private_key = private_key self.passphrase = passphrase self.intermediates = intermediates self.primary_cn = primary_cn class L7Rule(BaseDataModel): fields = ['id', 'tenant_id', 'l7policy_id', 'type', 'compare_type', 'invert', 'key', 'value', 'provisioning_status', 'admin_state_up', 'policy'] def __init__(self, id=None, tenant_id=None, l7policy_id=None, type=None, compare_type=None, invert=None, key=None, value=None, provisioning_status=None, admin_state_up=None, policy=None): self.id = id self.tenant_id = tenant_id self.l7policy_id = l7policy_id self.type = type self.compare_type = compare_type self.invert = invert self.key = key self.value = value self.provisioning_status = provisioning_status self.admin_state_up = admin_state_up self.policy = policy def attached_to_loadbalancer(self): return bool(self.policy.listener.loadbalancer) def to_api_dict(self): ret_dict = super(L7Rule, self).to_dict( provisioning_status=False, policy=False, l7policy_id=False) ret_dict['policies'] = [] if self.policy: ret_dict['policies'].append({'id': self.policy.id}) return ret_dict @classmethod def from_dict(cls, model_dict): policy = model_dict.pop('policy', None) if policy: model_dict['policy'] = L7Policy.from_dict(policy) return super(L7Rule, cls).from_dict(model_dict) class L7Policy(BaseDataModel): fields = ['id', 'tenant_id', 'name', 'description', 'listener_id', 'action', 'redirect_pool_id', 'redirect_url', 'position', 'admin_state_up', 'provisioning_status', 'listener', 'rules', 'redirect_pool'] def __init__(self, id=None, tenant_id=None, name=None, description=None, listener_id=None, action=None, redirect_pool_id=None, redirect_url=None, position=None, admin_state_up=None, provisioning_status=None, listener=None, rules=None, redirect_pool=None): self.id = id self.tenant_id = tenant_id self.name = name self.description = description self.listener_id = listener_id self.action = action self.redirect_pool_id = redirect_pool_id self.redirect_pool = redirect_pool self.redirect_url = redirect_url self.position = position self.admin_state_up = admin_state_up self.provisioning_status = provisioning_status self.listener = listener self.rules = rules or [] def attached_to_loadbalancer(self): return bool(self.listener.loadbalancer) def to_api_dict(self): ret_dict = super(L7Policy, self).to_dict( listener=False, listener_id=False, provisioning_status=False, redirect_pool=False) ret_dict['listeners'] = [] if self.listener: ret_dict['listeners'].append({'id': self.listener.id}) ret_dict['rules'] = [{'id': rule.id} for rule in self.rules] return ret_dict @classmethod def from_dict(cls, model_dict): listener = model_dict.pop('listener', None) redirect_pool = model_dict.pop('redirect_pool', None) rules = model_dict.pop('rules', []) if listener: model_dict['listener'] = Listener.from_dict(listener) if redirect_pool: model_dict['redirect_pool'] = Pool.from_dict(redirect_pool) model_dict['rules'] = [L7Rule.from_dict(rule) for rule in rules] return super(L7Policy, cls).from_dict(model_dict) class Listener(BaseDataModel): fields = ['id', 'tenant_id', 'name', 'description', 'default_pool_id', 'loadbalancer_id', 'protocol', 'default_tls_container_id', 'sni_containers', 'protocol_port', 'connection_limit', 'admin_state_up', 'provisioning_status', 'operating_status', 'default_pool', 'loadbalancer', 'l7_policies'] def __init__(self, id=None, tenant_id=None, name=None, description=None, default_pool_id=None, loadbalancer_id=None, protocol=None, default_tls_container_id=None, sni_containers=None, protocol_port=None, connection_limit=None, admin_state_up=None, provisioning_status=None, operating_status=None, default_pool=None, loadbalancer=None, l7_policies=None): self.id = id self.tenant_id = tenant_id self.name = name self.description = description self.default_pool_id = default_pool_id self.loadbalancer_id = loadbalancer_id self.protocol = protocol self.default_tls_container_id = default_tls_container_id self.sni_containers = sni_containers or [] self.protocol_port = protocol_port self.connection_limit = connection_limit self.admin_state_up = admin_state_up self.operating_status = operating_status self.provisioning_status = provisioning_status self.default_pool = default_pool self.loadbalancer = loadbalancer self.l7_policies = l7_policies or [] def attached_to_loadbalancer(self): return bool(self.loadbalancer) def to_api_dict(self): ret_dict = super(Listener, self).to_dict( loadbalancer=False, loadbalancer_id=False, default_pool=False, operating_status=False, provisioning_status=False, sni_containers=False) # NOTE(blogan): Returning a list to future proof for M:N objects # that are not yet implemented. ret_dict['loadbalancers'] = [] if self.loadbalancer: ret_dict['loadbalancers'].append({'id': self.loadbalancer.id}) ret_dict['sni_container_refs'] = [container.tls_container_id for container in self.sni_containers] ret_dict['default_tls_container_ref'] = self.default_tls_container_id ret_dict['l7_policies'] = [{'id': l7_policy.id} for l7_policy in self.l7_policies] return ret_dict @classmethod def from_dict(cls, model_dict): default_pool = model_dict.pop('default_pool', None) loadbalancer = model_dict.pop('loadbalancer', None) sni_containers = model_dict.pop('sni_containers', []) model_dict['sni_containers'] = [SNI.from_dict(sni) for sni in sni_containers] l7_policies = model_dict.pop('l7_policies', []) if default_pool: model_dict['default_pool'] = Pool.from_dict(default_pool) if loadbalancer: model_dict['loadbalancer'] = LoadBalancer.from_dict(loadbalancer) model_dict['l7_policies'] = [L7Policy.from_dict(policy) for policy in l7_policies] return super(Listener, cls).from_dict(model_dict) class LoadBalancer(BaseDataModel): fields = ['id', 'tenant_id', 'name', 'description', 'vip_subnet_id', 'vip_port_id', 'vip_address', 'provisioning_status', 'operating_status', 'admin_state_up', 'vip_port', 'stats', 'provider', 'listeners', 'pools', 'flavor_id'] def __init__(self, id=None, tenant_id=None, name=None, description=None, vip_subnet_id=None, vip_port_id=None, vip_address=None, provisioning_status=None, operating_status=None, admin_state_up=None, vip_port=None, stats=None, provider=None, listeners=None, pools=None, flavor_id=None): self.id = id self.tenant_id = tenant_id self.name = name self.description = description self.vip_subnet_id = vip_subnet_id self.vip_port_id = vip_port_id self.vip_address = vip_address self.operating_status = operating_status self.provisioning_status = provisioning_status self.admin_state_up = admin_state_up self.vip_port = vip_port self.stats = stats self.provider = provider self.listeners = listeners or [] self.flavor_id = flavor_id self.pools = pools or [] def attached_to_loadbalancer(self): return True def to_api_dict(self): ret_dict = super(LoadBalancer, self).to_dict( vip_port=False, stats=False, listeners=False) ret_dict['listeners'] = [{'id': listener.id} for listener in self.listeners] ret_dict['pools'] = [{'id': pool.id} for pool in self.pools] if self.provider: ret_dict['provider'] = self.provider.provider_name if not self.flavor_id: del ret_dict['flavor_id'] return ret_dict @classmethod def from_dict(cls, model_dict): listeners = model_dict.pop('listeners', []) pools = model_dict.pop('pools', []) vip_port = model_dict.pop('vip_port', None) provider = model_dict.pop('provider', None) model_dict.pop('stats', None) model_dict['listeners'] = [Listener.from_dict(listener) for listener in listeners] model_dict['pools'] = [Pool.from_dict(pool) for pool in pools] if vip_port: model_dict['vip_port'] = Port.from_dict(vip_port) if provider: model_dict['provider'] = ProviderResourceAssociation.from_dict( provider) return super(LoadBalancer, cls).from_dict(model_dict) SA_MODEL_TO_DATA_MODEL_MAP = { models.LoadBalancer: LoadBalancer, models.HealthMonitorV2: HealthMonitor, models.Listener: Listener, models.SNI: SNI, models.L7Rule: L7Rule, models.L7Policy: L7Policy, models.PoolV2: Pool, models.MemberV2: Member, models.LoadBalancerStatistics: LoadBalancerStatistics, models.SessionPersistenceV2: SessionPersistence, models_v2.IPAllocation: IPAllocation, models_v2.Port: Port, servicetype_db.ProviderResourceAssociation: ProviderResourceAssociation } DATA_MODEL_TO_SA_MODEL_MAP = { LoadBalancer: models.LoadBalancer, HealthMonitor: models.HealthMonitorV2, Listener: models.Listener, SNI: models.SNI, L7Rule: models.L7Rule, L7Policy: models.L7Policy, Pool: models.PoolV2, Member: models.MemberV2, LoadBalancerStatistics: models.LoadBalancerStatistics, SessionPersistence: models.SessionPersistenceV2, IPAllocation: models_v2.IPAllocation, Port: models_v2.Port, ProviderResourceAssociation: servicetype_db.ProviderResourceAssociation } neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/constants.py0000664000567000056710000001513112701407726027115 0ustar jenkinsjenkins00000000000000# Copyright 2013 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. #FIXME(brandon-logan): change these to LB_ALGORITHM LB_METHOD_ROUND_ROBIN = 'ROUND_ROBIN' LB_METHOD_LEAST_CONNECTIONS = 'LEAST_CONNECTIONS' LB_METHOD_SOURCE_IP = 'SOURCE_IP' SUPPORTED_LB_ALGORITHMS = (LB_METHOD_LEAST_CONNECTIONS, LB_METHOD_ROUND_ROBIN, LB_METHOD_SOURCE_IP) PROTOCOL_TCP = 'TCP' PROTOCOL_HTTP = 'HTTP' PROTOCOL_HTTPS = 'HTTPS' PROTOCOL_TERMINATED_HTTPS = 'TERMINATED_HTTPS' POOL_SUPPORTED_PROTOCOLS = (PROTOCOL_TCP, PROTOCOL_HTTPS, PROTOCOL_HTTP) LISTENER_SUPPORTED_PROTOCOLS = (PROTOCOL_TCP, PROTOCOL_HTTPS, PROTOCOL_HTTP, PROTOCOL_TERMINATED_HTTPS) LISTENER_POOL_COMPATIBLE_PROTOCOLS = ( (PROTOCOL_TCP, PROTOCOL_TCP), (PROTOCOL_HTTP, PROTOCOL_HTTP), (PROTOCOL_HTTPS, PROTOCOL_HTTPS), (PROTOCOL_HTTP, PROTOCOL_TERMINATED_HTTPS)) HEALTH_MONITOR_PING = 'PING' HEALTH_MONITOR_TCP = 'TCP' HEALTH_MONITOR_HTTP = 'HTTP' HEALTH_MONITOR_HTTPS = 'HTTPS' SUPPORTED_HEALTH_MONITOR_TYPES = (HEALTH_MONITOR_HTTP, HEALTH_MONITOR_HTTPS, HEALTH_MONITOR_PING, HEALTH_MONITOR_TCP) HTTP_METHOD_GET = 'GET' HTTP_METHOD_HEAD = 'HEAD' HTTP_METHOD_POST = 'POST' HTTP_METHOD_PUT = 'PUT' HTTP_METHOD_DELETE = 'DELETE' HTTP_METHOD_TRACE = 'TRACE' HTTP_METHOD_OPTIONS = 'OPTIONS' HTTP_METHOD_CONNECT = 'CONNECT' HTTP_METHOD_PATCH = 'PATCH' SUPPORTED_HTTP_METHODS = (HTTP_METHOD_GET, HTTP_METHOD_HEAD, HTTP_METHOD_POST, HTTP_METHOD_PUT, HTTP_METHOD_DELETE, HTTP_METHOD_TRACE, HTTP_METHOD_OPTIONS, HTTP_METHOD_CONNECT, HTTP_METHOD_PATCH) # URL path regex according to RFC 3986 # Format: path = "/" *( "/" segment ) # segment = *pchar # pchar = unreserved / pct-encoded / sub-delims / ":" / "@" # unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" # pct-encoded = "%" HEXDIG HEXDIG # sub-delims = "!" / "$" / "&" / "'" / "(" / ")" # / "*" / "+" / "," / ";" / "=" SUPPORTED_URL_PATH = ( "^(/([a-zA-Z0-9-._~!$&\'()*+,;=:@]|(%[a-fA-F0-9]{2}))*)+$") SESSION_PERSISTENCE_SOURCE_IP = 'SOURCE_IP' SESSION_PERSISTENCE_HTTP_COOKIE = 'HTTP_COOKIE' SESSION_PERSISTENCE_APP_COOKIE = 'APP_COOKIE' SUPPORTED_SP_TYPES = (SESSION_PERSISTENCE_SOURCE_IP, SESSION_PERSISTENCE_HTTP_COOKIE, SESSION_PERSISTENCE_APP_COOKIE) L7_RULE_TYPE_HOST_NAME = 'HOST_NAME' L7_RULE_TYPE_PATH = 'PATH' L7_RULE_TYPE_FILE_TYPE = 'FILE_TYPE' L7_RULE_TYPE_HEADER = 'HEADER' L7_RULE_TYPE_COOKIE = 'COOKIE' SUPPORTED_L7_RULE_TYPES = (L7_RULE_TYPE_HOST_NAME, L7_RULE_TYPE_PATH, L7_RULE_TYPE_FILE_TYPE, L7_RULE_TYPE_HEADER, L7_RULE_TYPE_COOKIE) L7_RULE_COMPARE_TYPE_REGEX = 'REGEX' L7_RULE_COMPARE_TYPE_STARTS_WITH = 'STARTS_WITH' L7_RULE_COMPARE_TYPE_ENDS_WITH = 'ENDS_WITH' L7_RULE_COMPARE_TYPE_CONTAINS = 'CONTAINS' L7_RULE_COMPARE_TYPE_EQUAL_TO = 'EQUAL_TO' SUPPORTED_L7_RULE_COMPARE_TYPES = (L7_RULE_COMPARE_TYPE_REGEX, L7_RULE_COMPARE_TYPE_STARTS_WITH, L7_RULE_COMPARE_TYPE_ENDS_WITH, L7_RULE_COMPARE_TYPE_CONTAINS, L7_RULE_COMPARE_TYPE_EQUAL_TO) L7_POLICY_ACTION_REJECT = 'REJECT' L7_POLICY_ACTION_REDIRECT_TO_POOL = 'REDIRECT_TO_POOL' L7_POLICY_ACTION_REDIRECT_TO_URL = 'REDIRECT_TO_URL' SUPPORTED_L7_POLICY_ACTIONS = (L7_POLICY_ACTION_REJECT, L7_POLICY_ACTION_REDIRECT_TO_POOL, L7_POLICY_ACTION_REDIRECT_TO_URL) URL_REGEX = "http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*(),]|\ (?:%[0-9a-fA-F][0-9a-fA-F]))+" # See RFCs 2616, 2965, 6265, 7230: Should match characters valid in a # http header or cookie name. HTTP_HEADER_COOKIE_NAME_REGEX = r'\A[a-zA-Z0-9!#$%&\'*+-.^_`|~]+\Z' # See RFCs 2616, 2965, 6265: Should match characters valid in a cookie value. HTTP_COOKIE_VALUE_REGEX = r'\A[a-zA-Z0-9!#$%&\'()*+-./:<=>?@[\]^_`{|}~]+\Z' # See RFC 7230: Should match characters valid in a header value. HTTP_HEADER_VALUE_REGEX = (r'\A[a-zA-Z0-9' r'!"#$%&\'()*+,-./:;<=>?@[\]^_`{|}~\\]+\Z') # Also in RFC 7230: Should match characters valid in a header value # when quoted with double quotes. HTTP_QUOTED_HEADER_VALUE_REGEX = (r'\A"[a-zA-Z0-9 \t' r'!"#$%&\'()*+,-./:;<=>?@[\]^_`{|}~\\]*"\Z') STATS_ACTIVE_CONNECTIONS = 'active_connections' STATS_MAX_CONNECTIONS = 'max_connections' STATS_TOTAL_CONNECTIONS = 'total_connections' STATS_CURRENT_SESSIONS = 'current_sessions' STATS_MAX_SESSIONS = 'max_sessions' STATS_TOTAL_SESSIONS = 'total_sessions' STATS_IN_BYTES = 'bytes_in' STATS_OUT_BYTES = 'bytes_out' STATS_CONNECTION_ERRORS = 'connection_errors' STATS_RESPONSE_ERRORS = 'response_errors' STATS_STATUS = 'status' STATS_HEALTH = 'health' STATS_FAILED_CHECKS = 'failed_checks' # Constants to extend status strings in neutron.plugins.common.constants ONLINE = 'ONLINE' OFFLINE = 'OFFLINE' DEGRADED = 'DEGRADED' DISABLED = 'DISABLED' NO_MONITOR = 'NO_MONITOR' OPERATING_STATUSES = (ONLINE, OFFLINE, DEGRADED, DISABLED, NO_MONITOR) NO_CHECK = 'no check' # LBaaS V2 Agent Constants LBAAS_AGENT_SCHEDULER_V2_EXT_ALIAS = 'lbaas_agent_schedulerv2' AGENT_TYPE_LOADBALANCERV2 = 'Loadbalancerv2 agent' LOADBALANCER_PLUGINV2 = 'n-lbaasv2-plugin' LOADBALANCER_AGENTV2 = 'n-lbaasv2_agent' # LBasS V1 Agent Constants LOADBALANCER_PLUGIN = 'n-lbaas-plugin' LOADBALANCER_AGENT = 'n-lbaas_agent' LOADBALANCER = "LOADBALANCER" LOADBALANCERV2 = "LOADBALANCERV2" # Used to check number of connections per second allowed # for the LBaaS V1 vip and LBaaS V2 listeners. -1 indicates # no limit, the value cannot be less than -1. MIN_CONNECT_VALUE = -1 # LBaas V2 Table entities LISTENER_EVENT = 'listener' LISTENER_STATS_EVENT = 'listener_stats' LOADBALANCER_EVENT = 'loadbalancer' MEMBER_EVENT = 'member' OPERATING_STATUS = 'operating_status' POOL_EVENT = 'pool' neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/__init__.py0000664000567000056710000000000012701407726026625 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/0000775000567000056710000000000012701410110026161 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/netscaler/0000775000567000056710000000000012701410110030141 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/netscaler/netscaler_driver.py0000664000567000056710000005137012701407727034100 0ustar jenkinsjenkins00000000000000# Copyright 2014 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api.v2 import attributes from neutron.plugins.common import constants from oslo_config import cfg from oslo_log import log as logging from neutron_lbaas._i18n import _, _LI from neutron_lbaas.db.loadbalancer import loadbalancer_db from neutron_lbaas.services.loadbalancer.drivers import abstract_driver from neutron_lbaas.services.loadbalancer.drivers.netscaler import ncc_client LOG = logging.getLogger(__name__) NETSCALER_CC_OPTS = [ cfg.StrOpt( 'netscaler_ncc_uri', help=_('The URL to reach the NetScaler Control Center Server.'), ), cfg.StrOpt( 'netscaler_ncc_username', help=_('Username to login to the NetScaler Control Center Server.'), ), cfg.StrOpt( 'netscaler_ncc_password', help=_('Password to login to the NetScaler Control Center Server.'), ) ] cfg.CONF.register_opts(NETSCALER_CC_OPTS, 'netscaler_driver') VIPS_RESOURCE = 'vips' VIP_RESOURCE = 'vip' POOLS_RESOURCE = 'pools' POOL_RESOURCE = 'pool' POOLMEMBERS_RESOURCE = 'members' POOLMEMBER_RESOURCE = 'member' MONITORS_RESOURCE = 'healthmonitors' MONITOR_RESOURCE = 'healthmonitor' POOLSTATS_RESOURCE = 'statistics' PROV_SEGMT_ID = 'provider:segmentation_id' PROV_NET_TYPE = 'provider:network_type' DRIVER_NAME = 'netscaler_driver' class NetScalerPluginDriver(abstract_driver.LoadBalancerAbstractDriver): """NetScaler LBaaS Plugin driver class.""" def __init__(self, plugin): self.plugin = plugin ncc_uri = cfg.CONF.netscaler_driver.netscaler_ncc_uri ncc_username = cfg.CONF.netscaler_driver.netscaler_ncc_username ncc_password = cfg.CONF.netscaler_driver.netscaler_ncc_password self.client = ncc_client.NSClient(ncc_uri, ncc_username, ncc_password) def create_vip(self, context, vip): """Create a vip on a NetScaler device.""" network_info = self._get_vip_network_info(context, vip) ncc_vip = self._prepare_vip_for_creation(vip) ncc_vip = dict(ncc_vip.items() + network_info.items()) LOG.debug("NetScaler driver vip creation: %r", ncc_vip) status = constants.ACTIVE try: self.client.create_resource(context.tenant_id, VIPS_RESOURCE, VIP_RESOURCE, ncc_vip) except ncc_client.NCCException: status = constants.ERROR self.plugin.update_status(context, loadbalancer_db.Vip, vip["id"], status) def update_vip(self, context, old_vip, vip): """Update a vip on a NetScaler device.""" update_vip = self._prepare_vip_for_update(vip) resource_path = "%s/%s" % (VIPS_RESOURCE, vip["id"]) LOG.debug("NetScaler driver vip %(vip_id)s update: %(vip_obj)r", {"vip_id": vip["id"], "vip_obj": vip}) status = constants.ACTIVE try: self.client.update_resource(context.tenant_id, resource_path, VIP_RESOURCE, update_vip) except ncc_client.NCCException: status = constants.ERROR self.plugin.update_status(context, loadbalancer_db.Vip, old_vip["id"], status) def delete_vip(self, context, vip): """Delete a vip on a NetScaler device.""" resource_path = "%s/%s" % (VIPS_RESOURCE, vip["id"]) LOG.debug("NetScaler driver vip removal: %s", vip["id"]) try: self.client.remove_resource(context.tenant_id, resource_path) except ncc_client.NCCException: self.plugin.update_status(context, loadbalancer_db.Vip, vip["id"], constants.ERROR) else: self.plugin._delete_db_vip(context, vip['id']) def create_pool(self, context, pool): """Create a pool on a NetScaler device.""" network_info = self._get_pool_network_info(context, pool) #allocate a snat port/ipaddress on the subnet if one doesn't exist self._create_snatport_for_subnet_if_not_exists(context, pool['tenant_id'], pool['subnet_id'], network_info) ncc_pool = self._prepare_pool_for_creation(pool) ncc_pool = dict(ncc_pool.items() + network_info.items()) LOG.debug("NetScaler driver pool creation: %r", ncc_pool) status = constants.ACTIVE try: self.client.create_resource(context.tenant_id, POOLS_RESOURCE, POOL_RESOURCE, ncc_pool) except ncc_client.NCCException: status = constants.ERROR self.plugin.update_status(context, loadbalancer_db.Pool, ncc_pool["id"], status) def update_pool(self, context, old_pool, pool): """Update a pool on a NetScaler device.""" ncc_pool = self._prepare_pool_for_update(pool) resource_path = "%s/%s" % (POOLS_RESOURCE, old_pool["id"]) LOG.debug("NetScaler driver pool %(pool_id)s update: %(pool_obj)r", {"pool_id": old_pool["id"], "pool_obj": ncc_pool}) status = constants.ACTIVE try: self.client.update_resource(context.tenant_id, resource_path, POOL_RESOURCE, ncc_pool) except ncc_client.NCCException: status = constants.ERROR self.plugin.update_status(context, loadbalancer_db.Pool, old_pool["id"], status) def delete_pool(self, context, pool): """Delete a pool on a NetScaler device.""" resource_path = "%s/%s" % (POOLS_RESOURCE, pool['id']) LOG.debug("NetScaler driver pool removal: %s", pool["id"]) try: self.client.remove_resource(context.tenant_id, resource_path) except ncc_client.NCCException: self.plugin.update_status(context, loadbalancer_db.Pool, pool["id"], constants.ERROR) else: self.plugin._delete_db_pool(context, pool['id']) self._remove_snatport_for_subnet_if_not_used(context, pool['tenant_id'], pool['subnet_id']) def create_member(self, context, member): """Create a pool member on a NetScaler device.""" ncc_member = self._prepare_member_for_creation(member) LOG.info(_LI("NetScaler driver poolmember creation: %r"), ncc_member) status = constants.ACTIVE try: self.client.create_resource(context.tenant_id, POOLMEMBERS_RESOURCE, POOLMEMBER_RESOURCE, ncc_member) except ncc_client.NCCException: status = constants.ERROR self.plugin.update_status(context, loadbalancer_db.Member, member["id"], status) def update_member(self, context, old_member, member): """Update a pool member on a NetScaler device.""" ncc_member = self._prepare_member_for_update(member) resource_path = "%s/%s" % (POOLMEMBERS_RESOURCE, old_member["id"]) LOG.debug("NetScaler driver poolmember %(member_id)s update: " "%(member_obj)r", {"member_id": old_member["id"], "member_obj": ncc_member}) status = constants.ACTIVE try: self.client.update_resource(context.tenant_id, resource_path, POOLMEMBER_RESOURCE, ncc_member) except ncc_client.NCCException: status = constants.ERROR self.plugin.update_status(context, loadbalancer_db.Member, old_member["id"], status) def delete_member(self, context, member): """Delete a pool member on a NetScaler device.""" resource_path = "%s/%s" % (POOLMEMBERS_RESOURCE, member['id']) LOG.debug("NetScaler driver poolmember removal: %s", member["id"]) try: self.client.remove_resource(context.tenant_id, resource_path) except ncc_client.NCCException: self.plugin.update_status(context, loadbalancer_db.Member, member["id"], constants.ERROR) else: self.plugin._delete_db_member(context, member['id']) def create_pool_health_monitor(self, context, health_monitor, pool_id): """Create a pool health monitor on a NetScaler device.""" ncc_hm = self._prepare_healthmonitor_for_creation(health_monitor, pool_id) resource_path = "%s/%s/%s" % (POOLS_RESOURCE, pool_id, MONITORS_RESOURCE) LOG.debug("NetScaler driver healthmonitor creation for pool " "%(pool_id)s: %(monitor_obj)r", {"pool_id": pool_id, "monitor_obj": ncc_hm}) status = constants.ACTIVE try: self.client.create_resource(context.tenant_id, resource_path, MONITOR_RESOURCE, ncc_hm) except ncc_client.NCCException: status = constants.ERROR self.plugin.update_pool_health_monitor(context, health_monitor['id'], pool_id, status, "") def update_pool_health_monitor(self, context, old_health_monitor, health_monitor, pool_id): """Update a pool health monitor on a NetScaler device.""" ncc_hm = self._prepare_healthmonitor_for_update(health_monitor) resource_path = "%s/%s" % (MONITORS_RESOURCE, old_health_monitor["id"]) LOG.debug("NetScaler driver healthmonitor %(monitor_id)s update: " "%(monitor_obj)r", {"monitor_id": old_health_monitor["id"], "monitor_obj": ncc_hm}) status = constants.ACTIVE try: self.client.update_resource(context.tenant_id, resource_path, MONITOR_RESOURCE, ncc_hm) except ncc_client.NCCException: status = constants.ERROR self.plugin.update_pool_health_monitor(context, old_health_monitor['id'], pool_id, status, "") def delete_pool_health_monitor(self, context, health_monitor, pool_id): """Delete a pool health monitor on a NetScaler device.""" resource_path = "%s/%s/%s/%s" % (POOLS_RESOURCE, pool_id, MONITORS_RESOURCE, health_monitor["id"]) LOG.debug("NetScaler driver healthmonitor %(monitor_id)s" "removal for pool %(pool_id)s", {"monitor_id": health_monitor["id"], "pool_id": pool_id}) try: self.client.remove_resource(context.tenant_id, resource_path) except ncc_client.NCCException: self.plugin.update_pool_health_monitor(context, health_monitor['id'], pool_id, constants.ERROR, "") else: self.plugin._delete_db_pool_health_monitor(context, health_monitor['id'], pool_id) def stats(self, context, pool_id): """Retrieve pool statistics from the NetScaler device.""" resource_path = "%s/%s" % (POOLSTATS_RESOURCE, pool_id) LOG.debug("NetScaler driver pool stats retrieval: %s", pool_id) try: stats = self.client.retrieve_resource(context.tenant_id, resource_path)[1] except ncc_client.NCCException: self.plugin.update_status(context, loadbalancer_db.Pool, pool_id, constants.ERROR) else: return stats def _prepare_vip_for_creation(self, vip): creation_attrs = { 'id': vip['id'], 'tenant_id': vip['tenant_id'], 'protocol': vip['protocol'], 'address': vip['address'], 'protocol_port': vip['protocol_port'], } if 'session_persistence' in vip: creation_attrs['session_persistence'] = vip['session_persistence'] update_attrs = self._prepare_vip_for_update(vip) creation_attrs.update(update_attrs) return creation_attrs def _prepare_vip_for_update(self, vip): return { 'name': vip['name'], 'description': vip['description'], 'pool_id': vip['pool_id'], 'connection_limit': vip['connection_limit'], 'admin_state_up': vip['admin_state_up'] } def _prepare_pool_for_creation(self, pool): creation_attrs = { 'id': pool['id'], 'tenant_id': pool['tenant_id'], 'vip_id': pool['vip_id'], 'protocol': pool['protocol'], 'subnet_id': pool['subnet_id'], } update_attrs = self._prepare_pool_for_update(pool) creation_attrs.update(update_attrs) return creation_attrs def _prepare_pool_for_update(self, pool): return { 'name': pool['name'], 'description': pool['description'], 'lb_method': pool['lb_method'], 'admin_state_up': pool['admin_state_up'] } def _prepare_member_for_creation(self, member): creation_attrs = { 'id': member['id'], 'tenant_id': member['tenant_id'], 'address': member['address'], 'protocol_port': member['protocol_port'], } update_attrs = self._prepare_member_for_update(member) creation_attrs.update(update_attrs) return creation_attrs def _prepare_member_for_update(self, member): return { 'pool_id': member['pool_id'], 'weight': member['weight'], 'admin_state_up': member['admin_state_up'] } def _prepare_healthmonitor_for_creation(self, health_monitor, pool_id): creation_attrs = { 'id': health_monitor['id'], 'tenant_id': health_monitor['tenant_id'], 'type': health_monitor['type'], } update_attrs = self._prepare_healthmonitor_for_update(health_monitor) creation_attrs.update(update_attrs) return creation_attrs def _prepare_healthmonitor_for_update(self, health_monitor): ncc_hm = { 'delay': health_monitor['delay'], 'timeout': health_monitor['timeout'], 'max_retries': health_monitor['max_retries'], 'admin_state_up': health_monitor['admin_state_up'] } if health_monitor['type'] in ['HTTP', 'HTTPS']: ncc_hm['http_method'] = health_monitor['http_method'] ncc_hm['url_path'] = health_monitor['url_path'] ncc_hm['expected_codes'] = health_monitor['expected_codes'] return ncc_hm def _get_network_info(self, context, entity): network_info = {} subnet_id = entity['subnet_id'] subnet = self.plugin._core_plugin.get_subnet(context, subnet_id) network_id = subnet['network_id'] network = self.plugin._core_plugin.get_network(context, network_id) network_info['network_id'] = network_id network_info['subnet_id'] = subnet_id if PROV_NET_TYPE in network: network_info['network_type'] = network[PROV_NET_TYPE] if PROV_SEGMT_ID in network: network_info['segmentation_id'] = network[PROV_SEGMT_ID] return network_info def _get_vip_network_info(self, context, vip): network_info = self._get_network_info(context, vip) network_info['port_id'] = vip['port_id'] return network_info def _get_pool_network_info(self, context, pool): return self._get_network_info(context, pool) def _get_pools_on_subnet(self, context, tenant_id, subnet_id): filter_dict = {'subnet_id': [subnet_id], 'tenant_id': [tenant_id]} return self.plugin.get_pools(context, filters=filter_dict) def _get_snatport_for_subnet(self, context, tenant_id, subnet_id): device_id = '_lb-snatport-' + subnet_id subnet = self.plugin._core_plugin.get_subnet(context, subnet_id) network_id = subnet['network_id'] LOG.debug("Filtering ports based on network_id=%(network_id)s, " "tenant_id=%(tenant_id)s, device_id=%(device_id)s", {'network_id': network_id, 'tenant_id': tenant_id, 'device_id': device_id}) filter_dict = { 'network_id': [network_id], 'tenant_id': [tenant_id], 'device_id': [device_id], 'device-owner': [DRIVER_NAME] } ports = self.plugin._core_plugin.get_ports(context, filters=filter_dict) if ports: LOG.info(_LI("Found an existing SNAT port for subnet %s"), subnet_id) return ports[0] LOG.info(_LI("Found no SNAT ports for subnet %s"), subnet_id) def _create_snatport_for_subnet(self, context, tenant_id, subnet_id, ip_address): subnet = self.plugin._core_plugin.get_subnet(context, subnet_id) fixed_ip = {'subnet_id': subnet['id']} if ip_address and ip_address != attributes.ATTR_NOT_SPECIFIED: fixed_ip['ip_address'] = ip_address port_data = { 'tenant_id': tenant_id, 'name': '_lb-snatport-' + subnet_id, 'network_id': subnet['network_id'], 'mac_address': attributes.ATTR_NOT_SPECIFIED, 'admin_state_up': False, 'device_id': '_lb-snatport-' + subnet_id, 'device_owner': DRIVER_NAME, 'fixed_ips': [fixed_ip], } port = self.plugin._core_plugin.create_port(context, {'port': port_data}) LOG.info(_LI("Created SNAT port: %r"), port) return port def _remove_snatport_for_subnet(self, context, tenant_id, subnet_id): port = self._get_snatport_for_subnet(context, tenant_id, subnet_id) if port: self.plugin._core_plugin.delete_port(context, port['id']) LOG.info(_LI("Removed SNAT port: %r"), port) def _create_snatport_for_subnet_if_not_exists(self, context, tenant_id, subnet_id, network_info): port = self._get_snatport_for_subnet(context, tenant_id, subnet_id) if not port: LOG.info(_LI("No SNAT port found for subnet %s. Creating one..."), subnet_id) port = self._create_snatport_for_subnet(context, tenant_id, subnet_id, ip_address=None) network_info['port_id'] = port['id'] network_info['snat_ip'] = port['fixed_ips'][0]['ip_address'] LOG.info(_LI("SNAT port: %r"), port) def _remove_snatport_for_subnet_if_not_used(self, context, tenant_id, subnet_id): pools = self._get_pools_on_subnet(context, tenant_id, subnet_id) if not pools: #No pools left on the old subnet. #We can remove the SNAT port/ipaddress self._remove_snatport_for_subnet(context, tenant_id, subnet_id) LOG.info(_LI("Removing SNAT port for subnet %s " "as this is the last pool using it..."), subnet_id) neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/netscaler/ncc_client.py0000664000567000056710000002512112701407726032640 0ustar jenkinsjenkins00000000000000 # Copyright 2014 Citrix Systems # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import requests from neutron_lib import exceptions as n_exc from oslo_log import log as logging from oslo_serialization import jsonutils from neutron_lbaas._i18n import _, _LE, _LI LOG = logging.getLogger(__name__) CONTENT_TYPE_HEADER = 'Content-type' ACCEPT_HEADER = 'Accept' AUTH_HEADER = 'Cookie' DRIVER_HEADER = 'X-OpenStack-LBaaS' TENANT_HEADER = 'X-Tenant-ID' JSON_CONTENT_TYPE = 'application/json' DRIVER_HEADER_VALUE = 'netscaler-openstack-lbaas' NITRO_LOGIN_URI = 'nitro/v2/config/login' class NCCException(n_exc.NeutronException): """Represents exceptions thrown by NSClient.""" CONNECTION_ERROR = 1 REQUEST_ERROR = 2 RESPONSE_ERROR = 3 UNKNOWN_ERROR = 4 def __init__(self, error, status=requests.codes.SERVICE_UNAVAILABLE): self.message = _("NCC Error %d") % error super(NCCException, self).__init__() self.error = error self.status = status def is_not_found_exception(self): if int(self.status) == requests.codes.NOT_FOUND: return True class NSClient(object): """Client to operate on REST resources of NetScaler Control Center.""" def __init__(self, service_uri, username, password, ncc_cleanup_mode="False"): if not service_uri: LOG.exception(_LE("No NetScaler Control Center URI specified. " "Cannot connect.")) raise NCCException(NCCException.CONNECTION_ERROR) self.service_uri = service_uri.strip('/') self.auth = None self.cleanup_mode = False if username and password: self.username = username self.password = password if ncc_cleanup_mode.lower() == "true": self.cleanup_mode = True def create_resource(self, tenant_id, resource_path, object_name, object_data): """Create a resource of NetScaler Control Center.""" return self._resource_operation('POST', tenant_id, resource_path, object_name=object_name, object_data=object_data) def is_login(self, resource_uri): if 'login' in resource_uri.lower(): return True else: return False def login(self): """Get session based login""" login_obj = {"username": self.username, "password": self.password} msg = "NetScaler driver login:" + repr(login_obj) LOG.info(msg) resp_status, result = self.create_resource("login", NITRO_LOGIN_URI, "login", login_obj) LOG.info(_LI("Response: status : %(status)s %result(result)s"), { "status": resp_status, "result": result['body']}) result_body = jsonutils.loads(result['body']) session_id = None if result_body and "login" in result_body: logins = result_body["login"] if isinstance(logins, list): login = logins[0] else: login = logins if login and "sessionid" in login: session_id = login["sessionid"] if session_id: LOG.info(_LI("Response: %(result)s"), {"result": result['body']}) LOG.info( _LI("Session_id = %(session_id)s") % {"session_id": session_id}) # Update sessin_id in auth self.auth = "SessId=%s" % session_id else: raise NCCException(NCCException.RESPONSE_ERROR) def retrieve_resource(self, tenant_id, resource_path, parse_response=True): """Retrieve a resource of NetScaler Control Center.""" return self._resource_operation('GET', tenant_id, resource_path) def update_resource(self, tenant_id, resource_path, object_name, object_data): """Update a resource of the NetScaler Control Center.""" return self._resource_operation('PUT', tenant_id, resource_path, object_name=object_name, object_data=object_data) def remove_resource(self, tenant_id, resource_path, parse_response=True): """Remove a resource of NetScaler Control Center.""" if self.cleanup_mode: return True else: return self._resource_operation('DELETE', tenant_id, resource_path) def _resource_operation(self, method, tenant_id, resource_path, object_name=None, object_data=None): resource_uri = "%s/%s" % (self.service_uri, resource_path) if not self.auth and not self.is_login(resource_uri): # Creating a session for the first time self.login() headers = self._setup_req_headers(tenant_id) request_body = None if object_data: if isinstance(object_data, str): request_body = object_data else: obj_dict = {object_name: object_data} request_body = jsonutils.dumps(obj_dict) try: response_status, resp_dict = (self. _execute_request(method, resource_uri, headers, body=request_body)) except NCCException as e: if e.status == requests.codes.NOT_FOUND and method == 'DELETE': return 200, {} else: raise e return response_status, resp_dict def _is_valid_response(self, response_status): # when status is less than 400, the response is fine return response_status < requests.codes.bad_request def _setup_req_headers(self, tenant_id): headers = {ACCEPT_HEADER: JSON_CONTENT_TYPE, CONTENT_TYPE_HEADER: JSON_CONTENT_TYPE, DRIVER_HEADER: DRIVER_HEADER_VALUE, TENANT_HEADER: tenant_id, AUTH_HEADER: self.auth} return headers def _get_response_dict(self, response): response_dict = {'status': int(response.status_code), 'body': response.text, 'headers': response.headers} if self._is_valid_response(int(response.status_code)): if response.text: response_dict['dict'] = response.json() return response_dict def _execute_request(self, method, resource_uri, headers, body=None): service_uri_dict = {"service_uri": self.service_uri} try: response = requests.request(method, url=resource_uri, headers=headers, data=body) except requests.exceptions.SSLError: LOG.exception(_LE("SSL error occurred while connecting " "to %(service_uri)s"), service_uri_dict) raise NCCException(NCCException.CONNECTION_ERROR) except requests.exceptions.ConnectionError: LOG.exception(_LE("Connection error occurred while connecting" "to %(service_uri)s"), service_uri_dict) raise NCCException(NCCException.CONNECTION_ERROR) except requests.exceptions.Timeout: LOG.exception( _LE("Request to %(service_uri)s timed out"), service_uri_dict) raise NCCException(NCCException.CONNECTION_ERROR) except (requests.exceptions.URLRequired, requests.exceptions.InvalidURL, requests.exceptions.MissingSchema, requests.exceptions.InvalidSchema): LOG.exception(_LE("Request did not specify a valid URL")) raise NCCException(NCCException.REQUEST_ERROR) except requests.exceptions.TooManyRedirects: LOG.exception(_LE("Too many redirects occurred for request ")) raise NCCException(NCCException.REQUEST_ERROR) except requests.exceptions.RequestException: LOG.exception( _LE("A request error while connecting to %(service_uri)s"), service_uri_dict) raise NCCException(NCCException.REQUEST_ERROR) except Exception: LOG.exception( _LE("A unknown error occurred during request to" " %(service_uri)s"), service_uri_dict) raise NCCException(NCCException.UNKNOWN_ERROR) resp_dict = self._get_response_dict(response) resp_body = resp_dict['body'] LOG.info(_LI("Response: %(resp_body)s"), {"resp_body": resp_body}) response_status = resp_dict['status'] if response_status == requests.codes.unauthorized: LOG.exception(_LE("Unable to login. Invalid credentials passed." "for: %s"), self.service_uri) if not self.is_login(resource_uri): # Session expired, relogin and retry.... self.login() # Retry the operation headers.update({AUTH_HEADER: self.auth}) self._execute_request(method, resource_uri, headers, body) else: raise NCCException(NCCException.RESPONSE_ERROR) if not self._is_valid_response(response_status): response_msg = resp_body response_dict = {"method": method, "url": resource_uri, "response_status": response_status, "response_msg": response_msg} LOG.exception(_LE("Failed %(method)s operation on %(url)s " "status code: %(response_status)s " "message: %(response_msg)s"), response_dict) raise NCCException(NCCException.RESPONSE_ERROR, response_status) return response_status, resp_dict neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/netscaler/__init__.py0000664000567000056710000000000012701407726032263 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/abstract_driver.py0000664000567000056710000001100412701407726031730 0ustar jenkinsjenkins00000000000000# Copyright 2013 Radware LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six # # DEPRECATION WARNING. THIS ABSTRACT DRIVER IS FOR THE LBAAS V1 OBJECT # MODEL AND SHOULD NO LONGER BE USED TO CREATE DRIVERS. # # PLEASE REFER TO driver_base.py and driver_mixins.py for the newest # lbaas driver base classes. # @six.add_metaclass(abc.ABCMeta) class LoadBalancerAbstractDriver(object): """Abstract lbaas driver that expose ~same API as lbaas plugin. The configuration elements (Vip,Member,etc) are the dicts that are returned to the tenant. Get operations are not part of the API - it will be handled by the lbaas plugin. """ @abc.abstractmethod def create_vip(self, context, vip): """A real driver would invoke a call to his backend and set the Vip status to ACTIVE/ERROR according to the backend call result self.plugin.update_status(context, Vip, vip["id"], constants.ACTIVE) """ pass @abc.abstractmethod def update_vip(self, context, old_vip, vip): """Driver may call the code below in order to update the status. self.plugin.update_status(context, Vip, id, constants.ACTIVE) """ pass @abc.abstractmethod def delete_vip(self, context, vip): """A real driver would invoke a call to his backend and try to delete the Vip. if the deletion was successful, delete the record from the database. if the deletion has failed, set the Vip status to ERROR. """ pass @abc.abstractmethod def create_pool(self, context, pool): """Driver may call the code below in order to update the status. self.plugin.update_status(context, Pool, pool["id"], constants.ACTIVE) """ pass @abc.abstractmethod def update_pool(self, context, old_pool, pool): """Driver may call the code below in order to update the status. self.plugin.update_status(context, Pool, pool["id"], constants.ACTIVE) """ pass @abc.abstractmethod def delete_pool(self, context, pool): """Driver can call the code below in order to delete the pool. self.plugin._delete_db_pool(context, pool["id"]) or set the status to ERROR if deletion failed """ pass @abc.abstractmethod def stats(self, context, pool_id): pass @abc.abstractmethod def create_member(self, context, member): """Driver may call the code below in order to update the status. self.plugin.update_status(context, Member, member["id"], constants.ACTIVE) """ pass @abc.abstractmethod def update_member(self, context, old_member, member): """Driver may call the code below in order to update the status. self.plugin.update_status(context, Member, member["id"], constants.ACTIVE) """ pass @abc.abstractmethod def delete_member(self, context, member): pass @abc.abstractmethod def update_pool_health_monitor(self, context, old_health_monitor, health_monitor, pool_id): pass @abc.abstractmethod def create_pool_health_monitor(self, context, health_monitor, pool_id): """Driver may call the code below in order to update the status. self.plugin.update_pool_health_monitor(context, health_monitor["id"], pool_id, constants.ACTIVE) """ pass @abc.abstractmethod def delete_pool_health_monitor(self, context, health_monitor, pool_id): pass neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/__init__.py0000664000567000056710000000000012701407726030303 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/logging_noop/0000775000567000056710000000000012701410110030642 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/logging_noop/__init__.py0000664000567000056710000000165012701407726033000 0ustar jenkinsjenkins00000000000000# Copyright 2014, Doug Wiegley (dougwig), A10 Networks # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron_lbaas._i18n import _LW from neutron_lbaas.drivers import logging_noop LOG = logging.getLogger(__name__) LOG.warning(_LW("This path has been deprecated. " "Use neutron_lbaas.drivers.logging_noop instead.")) __path__ = logging_noop.__path__ neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/vmware/0000775000567000056710000000000012701410110027462 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/vmware/__init__.py0000664000567000056710000000000012701407726031604 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/vmware/edge_driver.py0000664000567000056710000002137212701407726032343 0ustar jenkinsjenkins00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.plugins.common import constants from neutron_lbaas.db.loadbalancer import loadbalancer_db as lb_db from neutron_lbaas.extensions import loadbalancer as lb_ext from neutron_lbaas.services.loadbalancer.drivers import abstract_driver from neutron_lbaas.services.loadbalancer.drivers.vmware import db class EdgeLoadbalancerDriver(abstract_driver.LoadBalancerAbstractDriver): def __init__(self, plugin): self._plugin = plugin @property def _nsxv_driver(self): return self._plugin._core_plugin.nsx_v def create_pool_successful(self, context, pool, edge_id, edge_pool_id): db.add_nsxv_edge_pool_mapping( context, pool['id'], edge_id, edge_pool_id) self.pool_successful(context, pool) def delete_pool_successful(self, context, pool): self._plugin._delete_db_pool(context, pool['id']) db.delete_nsxv_edge_pool_mapping(context, pool['id']) def pool_successful(self, context, pool): self._plugin.update_status( context, lb_db.Pool, pool['id'], constants.ACTIVE) def pool_failed(self, context, pool): self._plugin.update_status( context, lb_db.Pool, pool['id'], constants.ERROR) def create_pool(self, context, pool): super(EdgeLoadbalancerDriver, self).create_pool(context, pool) self._nsxv_driver.create_pool(context, pool) def update_pool(self, context, old_pool, pool): super(EdgeLoadbalancerDriver, self).update_pool( context, old_pool, pool) pool_mapping = db.get_nsxv_edge_pool_mapping(context, old_pool['id']) self._nsxv_driver.update_pool( context, old_pool, pool, pool_mapping) def delete_pool(self, context, pool): vip_id = self._plugin.get_pool(context, pool['id']).get('vip_id', None) if vip_id: raise lb_ext.PoolInUse(pool_id=pool['id']) else: super(EdgeLoadbalancerDriver, self).delete_pool(context, pool) pool_mapping = db.get_nsxv_edge_pool_mapping(context, pool['id']) self._nsxv_driver.delete_pool(context, pool, pool_mapping) def create_vip_successful(self, context, vip, edge_id, app_profile_id, edge_vip_id, edge_fw_rule_id): db.add_nsxv_edge_vip_mapping(context, vip['pool_id'], edge_id, app_profile_id, edge_vip_id, edge_fw_rule_id) self.vip_successful(context, vip) def delete_vip_successful(self, context, vip): db.delete_nsxv_edge_vip_mapping(context, vip['pool_id']) self._plugin._delete_db_vip(context, vip['id']) def vip_successful(self, context, vip): self._plugin.update_status( context, lb_db.Vip, vip['id'], constants.ACTIVE) def vip_failed(self, context, vip): self._plugin.update_status( context, lb_db.Vip, vip['id'], constants.ERROR) def create_vip(self, context, vip): super(EdgeLoadbalancerDriver, self).create_vip(context, vip) pool_mapping = db.get_nsxv_edge_pool_mapping(context, vip['pool_id']) self._nsxv_driver.create_vip(context, vip, pool_mapping) def update_vip(self, context, old_vip, vip): super(EdgeLoadbalancerDriver, self).update_vip(context, old_vip, vip) pool_mapping = db.get_nsxv_edge_pool_mapping(context, vip['pool_id']) vip_mapping = db.get_nsxv_edge_vip_mapping(context, vip['pool_id']) self._nsxv_driver.update_vip(context, old_vip, vip, pool_mapping, vip_mapping) def delete_vip(self, context, vip): super(EdgeLoadbalancerDriver, self).delete_vip(context, vip) vip_mapping = db.get_nsxv_edge_vip_mapping(context, vip['pool_id']) self._nsxv_driver.delete_vip(context, vip, vip_mapping) def member_successful(self, context, member): self._plugin.update_status( context, lb_db.Member, member['id'], constants.ACTIVE) def member_failed(self, context, member): self._plugin.update_status( context, lb_db.Member, member['id'], constants.ERROR) def create_member(self, context, member): super(EdgeLoadbalancerDriver, self).create_member(context, member) pool_mapping = db.get_nsxv_edge_pool_mapping( context, member['pool_id']) self._nsxv_driver.create_member( context, member, pool_mapping) def update_member(self, context, old_member, member): super(EdgeLoadbalancerDriver, self).update_member( context, old_member, member) pool_mapping = db.get_nsxv_edge_pool_mapping( context, member['pool_id']) self._nsxv_driver.update_member( context, old_member, member, pool_mapping) def delete_member(self, context, member): super(EdgeLoadbalancerDriver, self).delete_member(context, member) pool_mapping = db.get_nsxv_edge_pool_mapping( context, member['pool_id']) self._nsxv_driver.delete_member(context, member, pool_mapping) def create_pool_health_monitor_successful(self, context, health_monitor, pool_id, edge_id, edge_mon_id): db.add_nsxv_edge_monitor_mapping( context, health_monitor['id'], edge_id, edge_mon_id) self.pool_health_monitor_successful(context, health_monitor, pool_id) def delete_pool_health_monitor_successful(self, context, health_monitor, pool_id, mon_mapping): db.delete_nsxv_edge_monitor_mapping( context, health_monitor['id'], mon_mapping['edge_id']) self._plugin._delete_db_pool_health_monitor( context, health_monitor['id'], pool_id) def pool_health_monitor_successful(self, context, health_monitor, pool_id): self._plugin.update_pool_health_monitor( context, health_monitor['id'], pool_id, constants.ACTIVE, '') def pool_health_monitor_failed(self, context, health_monitor, pool_id): self._plugin.update_pool_health_monitor( context, health_monitor['id'], pool_id, constants.ERROR, '') def create_pool_health_monitor(self, context, health_monitor, pool_id): super(EdgeLoadbalancerDriver, self).create_pool_health_monitor( context, health_monitor, pool_id) pool_mapping = db.get_nsxv_edge_pool_mapping(context, pool_id) mon_mapping = db.get_nsxv_edge_monitor_mapping( context, health_monitor['id'], pool_mapping['edge_id']) self._nsxv_driver.create_pool_health_monitor( context, health_monitor, pool_id, pool_mapping, mon_mapping) def update_pool_health_monitor(self, context, old_health_monitor, health_monitor, pool_id): super(EdgeLoadbalancerDriver, self).update_pool_health_monitor( context, old_health_monitor, health_monitor, pool_id) pool_mapping = db.get_nsxv_edge_pool_mapping(context, pool_id) mon_mapping = db.get_nsxv_edge_monitor_mapping( context, health_monitor['id'], pool_mapping['edge_id']) self._nsxv_driver.update_pool_health_monitor( context, old_health_monitor, health_monitor, pool_id, mon_mapping) def delete_pool_health_monitor(self, context, health_monitor, pool_id): super(EdgeLoadbalancerDriver, self).delete_pool_health_monitor( context, health_monitor, pool_id) pool_mapping = db.get_nsxv_edge_pool_mapping(context, pool_id) edge_id = pool_mapping['edge_id'] mon_mapping = db.get_nsxv_edge_monitor_mapping( context, health_monitor['id'], edge_id) self._nsxv_driver.delete_pool_health_monitor( context, health_monitor, pool_id, pool_mapping, mon_mapping) def stats(self, context, pool_id): super(EdgeLoadbalancerDriver, self).stats(context, pool_id) pool_mapping = db.get_nsxv_edge_pool_mapping(context, pool_id) return self._nsxv_driver.stats(context, pool_id, pool_mapping) def is_edge_in_use(self, context, edge_id): pool_mappings = db.get_nsxv_edge_pool_mapping_by_edge(context, edge_id) if pool_mappings: return True return False neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/vmware/db.py0000664000567000056710000000670712701407726030456 0ustar jenkinsjenkins00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lbaas.services.loadbalancer.drivers.vmware import models def add_nsxv_edge_pool_mapping(context, pool_id, edge_id, edge_pool_id): session = context.session with session.begin(subtransactions=True): mapping = models.NsxvEdgePoolMapping() mapping.pool_id = pool_id mapping.edge_id = edge_id mapping.edge_pool_id = edge_pool_id session.add(mapping) def get_nsxv_edge_pool_mapping(context, pool_id): return(context.session.query(models.NsxvEdgePoolMapping). filter_by(pool_id=pool_id).first()) def get_nsxv_edge_pool_mapping_by_edge(context, edge_id): return(context.session.query(models.NsxvEdgePoolMapping). filter_by(edge_id=edge_id).all()) def delete_nsxv_edge_pool_mapping(context, pool_id): session = context.session mapping = (session.query(models.NsxvEdgePoolMapping).filter_by( pool_id=pool_id)) for m in mapping: session.delete(m) def add_nsxv_edge_vip_mapping(context, pool_id, edge_id, edge_app_profile_id, edge_vse_id, edge_fw_rule_id): session = context.session with session.begin(subtransactions=True): mapping = models.NsxvEdgeVipMapping() mapping.pool_id = pool_id mapping.edge_id = edge_id mapping.edge_app_profile_id = edge_app_profile_id mapping.edge_vse_id = edge_vse_id mapping.edge_fw_rule_id = edge_fw_rule_id session.add(mapping) def get_nsxv_edge_vip_mapping(context, pool_id): return(context.session.query(models.NsxvEdgeVipMapping). filter_by(pool_id=pool_id).first()) def delete_nsxv_edge_vip_mapping(context, pool_id): session = context.session mapping = (session.query(models.NsxvEdgeVipMapping).filter_by( pool_id=pool_id)) for m in mapping: session.delete(m) def add_nsxv_edge_monitor_mapping(context, monitor_id, edge_id, edge_monitor_id): session = context.session with session.begin(subtransactions=True): mapping = models.NsxvEdgeMonitorMapping() mapping.monitor_id = monitor_id mapping.edge_id = edge_id mapping.edge_monitor_id = edge_monitor_id session.add(mapping) def get_nsxv_edge_monitor_mapping(context, monitor_id, edge_id): return(context.session.query(models.NsxvEdgeMonitorMapping). filter_by(monitor_id=monitor_id, edge_id=edge_id).first()) def get_nsxv_edge_monitor_mapping_all(context, monitor_id): return(context.session.query(models.NsxvEdgeMonitorMapping). filter_by(monitor_id=monitor_id).all()) def delete_nsxv_edge_monitor_mapping(context, monitor_id, edge_id): session = context.session mapping = (session.query(models.NsxvEdgeMonitorMapping).filter_by( monitor_id=monitor_id, edge_id=edge_id)) for m in mapping: session.delete(m) neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/vmware/models.py0000664000567000056710000000444412701407726031350 0ustar jenkinsjenkins00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.db import model_base import sqlalchemy as sql class NsxvEdgePoolMapping(model_base.BASEV2): """Represents the connection between Edges and pools.""" __tablename__ = 'nsxv_edge_pool_mappings' pool_id = sql.Column(sql.String(36), sql.ForeignKey('pools.id', ondelete='CASCADE'), primary_key=True) edge_id = sql.Column(sql.String(36), nullable=False) edge_pool_id = sql.Column(sql.String(36), nullable=False) class NsxvEdgeVipMapping(model_base.BASEV2): """Represents the connection between Edges and VIPs.""" __tablename__ = 'nsxv_edge_vip_mappings' pool_id = sql.Column(sql.String(36), sql.ForeignKey('pools.id', ondelete='CASCADE'), primary_key=True) edge_id = sql.Column(sql.String(36), nullable=False) edge_app_profile_id = sql.Column(sql.String(36), nullable=False) edge_vse_id = sql.Column(sql.String(36), nullable=False) edge_fw_rule_id = sql.Column(sql.String(36), nullable=False) class NsxvEdgeMonitorMapping(model_base.BASEV2): """Represents the connection between Edges and pool monitors.""" __tablename__ = 'nsxv_edge_monitor_mappings' __table_args__ = (sql.schema.UniqueConstraint( 'monitor_id', 'edge_id', name='uniq_nsxv_edge_monitor_mappings'),) monitor_id = sql.Column(sql.String(36), sql.ForeignKey('healthmonitors.id', ondelete='CASCADE'), primary_key=True) edge_id = sql.Column(sql.String(36), nullable=False, primary_key=True) edge_monitor_id = sql.Column(sql.String(36), nullable=False) neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/radware/0000775000567000056710000000000012701410110027606 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/radware/__init__.py0000664000567000056710000000000012701407726031730 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/radware/exceptions.py0000664000567000056710000000277412701407726032376 0ustar jenkinsjenkins00000000000000# Copyright 2013 Radware LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions from neutron_lbaas._i18n import _ class RadwareLBaasException(exceptions.NeutronException): message = _('An unknown exception occurred in Radware LBaaS provider.') class AuthenticationMissing(RadwareLBaasException): message = _('vDirect user/password missing. ' 'Specify in configuration file, under [radware] section') class WorkflowMissing(RadwareLBaasException): message = _('Workflow %(workflow)s is missing on vDirect server. ' 'Upload missing workflow') class RESTRequestFailure(RadwareLBaasException): message = _('REST request failed with status %(status)s. ' 'Reason: %(reason)s, Description: %(description)s. ' 'Success status codes are %(success_codes)s') class UnsupportedEntityOperation(RadwareLBaasException): message = _('%(operation)s operation is not supported for %(entity)s.') neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/radware/driver.py0000664000567000056710000013051612701407727031505 0ustar jenkinsjenkins00000000000000# Copyright 2013 Radware LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import copy import httplib import netaddr import threading import time import eventlet eventlet.monkey_patch(thread=True) from neutron.api.v2 import attributes from neutron import context as ncontext from neutron.plugins.common import constants from oslo_config import cfg from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import excutils from six.moves import queue as Queue from neutron_lbaas._i18n import _, _LE, _LI, _LW from neutron_lbaas.db.loadbalancer import loadbalancer_db as lb_db from neutron_lbaas.extensions import loadbalancer from neutron_lbaas.services.loadbalancer.drivers import abstract_driver from neutron_lbaas.services.loadbalancer.drivers.radware \ import exceptions as r_exc LOG = logging.getLogger(__name__) RESP_STATUS = 0 RESP_REASON = 1 RESP_STR = 2 RESP_DATA = 3 TEMPLATE_HEADER = {'Content-Type': 'application/vnd.com.radware.vdirect.' 'template-parameters+json'} PROVISION_HEADER = {'Content-Type': 'application/vnd.com.radware.' 'vdirect.status+json'} CREATE_SERVICE_HEADER = {'Content-Type': 'application/vnd.com.radware.' 'vdirect.adc-service-specification+json'} driver_opts = [ cfg.StrOpt('vdirect_address', help=_('IP address of vDirect server.')), cfg.StrOpt('ha_secondary_address', help=_('IP address of secondary vDirect server.')), cfg.StrOpt('vdirect_user', default='vDirect', help=_('vDirect user name.')), cfg.StrOpt('vdirect_password', default='radware', secret=True, help=_('vDirect user password.')), cfg.StrOpt('service_adc_type', default="VA", help=_('Service ADC type. Default: VA.')), cfg.StrOpt('service_adc_version', default="", help=_('Service ADC version.')), cfg.BoolOpt('service_ha_pair', default=False, help=_('Enables or disables the Service HA pair. ' 'Default: False.')), cfg.IntOpt('service_throughput', default=1000, help=_('Service throughput. Default: 1000.')), cfg.IntOpt('service_ssl_throughput', default=100, help=_('Service SSL throughput. Default: 100.')), cfg.IntOpt('service_compression_throughput', default=100, help=_('Service compression throughput. Default: 100.')), cfg.IntOpt('service_cache', default=20, help=_('Size of service cache. Default: 20.')), cfg.StrOpt('l2_l3_workflow_name', default='openstack_l2_l3', help=_('Name of l2_l3 workflow. Default: ' 'openstack_l2_l3.')), cfg.StrOpt('l4_workflow_name', default='openstack_l4', help=_('Name of l4 workflow. Default: openstack_l4.')), cfg.DictOpt('l2_l3_ctor_params', default={"service": "_REPLACE_", "ha_network_name": "HA-Network", "ha_ip_pool_name": "default", "allocate_ha_vrrp": True, "allocate_ha_ips": True, "twoleg_enabled": "_REPLACE_"}, help=_('Parameter for l2_l3 workflow constructor.')), cfg.DictOpt('l2_l3_setup_params', default={"data_port": 1, "data_ip_address": "192.168.200.99", "data_ip_mask": "255.255.255.0", "gateway": "192.168.200.1", "ha_port": 2}, help=_('Parameter for l2_l3 workflow setup.')), cfg.ListOpt('actions_to_skip', default=['setup_l2_l3'], help=_('List of actions that are not pushed to ' 'the completion queue.')), cfg.StrOpt('l4_action_name', default='BaseCreate', help=_('Name of the l4 workflow action. ' 'Default: BaseCreate.')), cfg.ListOpt('service_resource_pool_ids', default=[], help=_('Resource pool IDs.')), cfg.IntOpt('service_isl_vlan', default=-1, help=_('A required VLAN for the interswitch link to use.')), cfg.BoolOpt('service_session_mirroring_enabled', default=False, help=_('Enable or disable Alteon interswitch link for ' 'stateful session failover. Default: False.')) ] cfg.CONF.register_opts(driver_opts, "radware") class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver): """Radware lbaas driver.""" def __init__(self, plugin): rad = cfg.CONF.radware self.plugin = plugin self.service = { "haPair": rad.service_ha_pair, "sessionMirroringEnabled": rad.service_session_mirroring_enabled, "primary": { "capacity": { "throughput": rad.service_throughput, "sslThroughput": rad.service_ssl_throughput, "compressionThroughput": rad.service_compression_throughput, "cache": rad.service_cache }, "network": { "type": "portgroup", "portgroups": ['DATA_NETWORK'] }, "adcType": rad.service_adc_type, "acceptableAdc": "Exact" } } if rad.service_resource_pool_ids: ids = rad.service_resource_pool_ids self.service['resourcePoolIds'] = [ {'name': id} for id in ids ] if rad.service_isl_vlan: self.service['islVlan'] = rad.service_isl_vlan self.l2_l3_wf_name = rad.l2_l3_workflow_name self.l4_wf_name = rad.l4_workflow_name self.l2_l3_ctor_params = rad.l2_l3_ctor_params self.l2_l3_setup_params = rad.l2_l3_setup_params self.l4_action_name = rad.l4_action_name self.actions_to_skip = rad.actions_to_skip vdirect_address = rad.vdirect_address sec_server = rad.ha_secondary_address self.rest_client = vDirectRESTClient(server=vdirect_address, secondary_server=sec_server, user=rad.vdirect_user, password=rad.vdirect_password) self.queue = Queue.Queue() self.completion_handler = OperationCompletionHandler(self.queue, self.rest_client, plugin) self.workflow_templates_exists = False self.completion_handler.setDaemon(True) self.completion_handler_started = False def _populate_vip_graph(self, context, vip): ext_vip = self.plugin.populate_vip_graph(context, vip) vip_network_id = self._get_vip_network_id(context, ext_vip) pool_network_id = self._get_pool_network_id(context, ext_vip) # if VIP and PIP are different, we need an IP address for the PIP # so create port on PIP's network and use its IP address if vip_network_id != pool_network_id: pip_address = self._get_pip( context, vip['tenant_id'], _make_pip_name_from_vip(vip), pool_network_id, ext_vip['pool']['subnet_id']) ext_vip['pip_address'] = pip_address else: ext_vip['pip_address'] = vip['address'] ext_vip['vip_network_id'] = vip_network_id ext_vip['pool_network_id'] = pool_network_id return ext_vip def create_vip(self, context, vip): log_info = {'vip': vip, 'extended_vip': 'NOT_ASSIGNED', 'service_name': 'NOT_ASSIGNED'} try: ext_vip = self._populate_vip_graph(context, vip) service_name = self._get_service(ext_vip) log_info['extended_vip'] = ext_vip log_info['service_name'] = service_name self._create_workflow( vip['pool_id'], self.l4_wf_name, {"service": service_name}) self._update_workflow( vip['pool_id'], self.l4_action_name, ext_vip, context) finally: LOG.debug('vip: %(vip)s, extended_vip: %(extended_vip)s, ' 'service_name: %(service_name)s, ', log_info) def update_vip(self, context, old_vip, vip): ext_vip = self._populate_vip_graph(context, vip) self._update_workflow( vip['pool_id'], self.l4_action_name, ext_vip, context, False, lb_db.Vip, vip['id']) def delete_vip(self, context, vip): """Delete a Vip First delete it from the device. If deletion ended OK - remove data from DB as well. If the deletion failed - mark vip with error status in DB """ ext_vip = self._populate_vip_graph(context, vip) params = _translate_vip_object_graph(ext_vip, self.plugin, context) ids = params.pop('__ids__') try: # get neutron port id associated with the vip (present if vip and # pip are different) and release it after workflow removed port_filter = { 'name': [_make_pip_name_from_vip(vip)], } ports = self.plugin._core_plugin.get_ports(context, filters=port_filter) if ports: LOG.debug('Retrieved pip nport: %(port)r for vip: %(vip)s', {'port': ports[0], 'vip': vip['id']}) delete_pip_nport_function = self._get_delete_pip_nports( context, ports) else: delete_pip_nport_function = None LOG.debug('Found no pip nports associated with vip: %s', vip['id']) # removing the WF will cause deletion of the configuration from the # device self._remove_workflow(ids, context, delete_pip_nport_function) except r_exc.RESTRequestFailure: pool_id = ext_vip['pool_id'] LOG.exception(_LE('Failed to remove workflow %s. ' 'Going to set vip to ERROR status'), pool_id) self.plugin.update_status(context, lb_db.Vip, ids['vip'], constants.ERROR) def _get_delete_pip_nports(self, context, ports): def _delete_pip_nports(success): if success: for port in ports: try: self.plugin._core_plugin.delete_port( context, port['id']) LOG.debug('pip nport id: %s', port['id']) except Exception as exception: # stop exception propagation, nport may have # been deleted by other means LOG.warning(_LW('pip nport delete failed: %r'), exception) return _delete_pip_nports def create_pool(self, context, pool): # nothing to do pass def update_pool(self, context, old_pool, pool): self._handle_pool(context, pool) def delete_pool(self, context, pool,): self._handle_pool(context, pool, delete=True) def _handle_pool(self, context, pool, delete=False): vip_id = self.plugin.get_pool(context, pool['id']).get('vip_id', None) if vip_id: if delete: raise loadbalancer.PoolInUse(pool_id=pool['id']) else: vip = self.plugin.get_vip(context, vip_id) ext_vip = self._populate_vip_graph(context, vip) self._update_workflow( pool['id'], self.l4_action_name, ext_vip, context, delete, lb_db.Pool, pool['id']) else: if delete: self.plugin._delete_db_pool(context, pool['id']) else: # we keep the pool in PENDING_UPDATE # no point to modify it since it is not connected to vip yet pass def create_member(self, context, member): self._handle_member(context, member) def update_member(self, context, old_member, member): self._handle_member(context, member) def delete_member(self, context, member): self._handle_member(context, member, delete=True) def _handle_member(self, context, member, delete=False): """Navigate the model. If a Vip is found - activate a bulk WF action. """ vip_id = self.plugin.get_pool( context, member['pool_id']).get('vip_id') if vip_id: vip = self.plugin.get_vip(context, vip_id) ext_vip = self._populate_vip_graph(context, vip) self._update_workflow( member['pool_id'], self.l4_action_name, ext_vip, context, delete, lb_db.Member, member['id']) # We have to delete this member but it is not connected to a vip yet elif delete: self.plugin._delete_db_member(context, member['id']) def create_health_monitor(self, context, health_monitor): # Anything to do here? the hm is not connected to the graph yet pass def update_pool_health_monitor(self, context, old_health_monitor, health_monitor, pool_id): self._handle_pool_health_monitor(context, health_monitor, pool_id) def create_pool_health_monitor(self, context, health_monitor, pool_id): self._handle_pool_health_monitor(context, health_monitor, pool_id) def delete_pool_health_monitor(self, context, health_monitor, pool_id): self._handle_pool_health_monitor(context, health_monitor, pool_id, True) def _handle_pool_health_monitor(self, context, health_monitor, pool_id, delete=False): """Push a graph to vDirect Navigate the model. Check if a pool is associated to the vip and push the graph to vDirect """ vip_id = self.plugin.get_pool(context, pool_id).get('vip_id', None) debug_params = {"hm_id": health_monitor['id'], "pool_id": pool_id, "delete": delete, "vip_id": vip_id} LOG.debug('_handle_pool_health_monitor. health_monitor = %(hm_id)s ' 'pool_id = %(pool_id)s delete = %(delete)s ' 'vip_id = %(vip_id)s', debug_params) if vip_id: vip = self.plugin.get_vip(context, vip_id) ext_vip = self._populate_vip_graph(context, vip) self._update_workflow(pool_id, self.l4_action_name, ext_vip, context, delete, lb_db.PoolMonitorAssociation, health_monitor['id']) elif delete: self.plugin._delete_db_pool_health_monitor(context, health_monitor['id'], pool_id) def stats(self, context, pool_id): # TODO(avishayb) implement return {"bytes_in": 0, "bytes_out": 0, "active_connections": 0, "total_connections": 0} def _get_vip_network_id(self, context, extended_vip): subnet = self.plugin._core_plugin.get_subnet( context, extended_vip['subnet_id']) return subnet['network_id'] def _start_completion_handling_thread(self): if not self.completion_handler_started: LOG.info(_LI('Starting operation completion handling thread')) self.completion_handler.start() self.completion_handler_started = True def _get_pool_network_id(self, context, extended_vip): subnet = self.plugin._core_plugin.get_subnet( context, extended_vip['pool']['subnet_id']) return subnet['network_id'] @log_helpers.log_method_call def _update_workflow(self, wf_name, action, wf_params, context, delete=False, lbaas_entity=None, entity_id=None): """Update the WF state. Push the result to a queue for processing.""" if not self.workflow_templates_exists: self._verify_workflow_templates() if action not in self.actions_to_skip: params = _translate_vip_object_graph(wf_params, self.plugin, context) else: params = wf_params resource = '/api/workflow/%s/action/%s' % (wf_name, action) response = _rest_wrapper(self.rest_client.call('POST', resource, {'parameters': params}, TEMPLATE_HEADER)) LOG.debug('_update_workflow response: %s ', response) if action not in self.actions_to_skip: ids = params.pop('__ids__', None) oper = OperationAttributes(response['uri'], ids, lbaas_entity, entity_id, delete=delete) LOG.debug('Pushing operation %s to the queue', oper) self._start_completion_handling_thread() self.queue.put_nowait(oper) def _remove_workflow(self, ids, context, post_remove_function): wf_name = ids['pool'] LOG.debug('Remove the workflow %s' % wf_name) resource = '/api/workflow/%s' % (wf_name) rest_return = self.rest_client.call('DELETE', resource, None, None) response = _rest_wrapper(rest_return, [204, 202, 404]) if rest_return[RESP_STATUS] == 404: if post_remove_function: try: post_remove_function(True) LOG.debug('Post-remove workflow function %r completed', post_remove_function) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Post-remove workflow function ' '%r failed'), post_remove_function) self.plugin._delete_db_vip(context, ids['vip']) else: oper = OperationAttributes( response['uri'], ids, lb_db.Vip, ids['vip'], delete=True, post_op_function=post_remove_function) LOG.debug('Pushing operation %s to the queue', oper) self._start_completion_handling_thread() self.queue.put_nowait(oper) def _remove_service(self, service_name): resource = '/api/service/%s' % (service_name) _rest_wrapper(self.rest_client.call('DELETE', resource, None, None), [202]) def _get_service(self, ext_vip): """Get a service name. if you can't find one, create a service and create l2_l3 WF. """ if not self.workflow_templates_exists: self._verify_workflow_templates() if ext_vip['vip_network_id'] != ext_vip['pool_network_id']: networks_name = '%s_%s' % (ext_vip['vip_network_id'], ext_vip['pool_network_id']) self.l2_l3_ctor_params["twoleg_enabled"] = True else: networks_name = ext_vip['vip_network_id'] self.l2_l3_ctor_params["twoleg_enabled"] = False incoming_service_name = 'srv_%s' % (networks_name,) service_name = self._get_available_service(incoming_service_name) if not service_name: LOG.debug( 'Could not find a service named ' + incoming_service_name) service_name = self._create_service(ext_vip['vip_network_id'], ext_vip['pool_network_id'], ext_vip['tenant_id']) self.l2_l3_ctor_params["service"] = incoming_service_name wf_name = 'l2_l3_' + networks_name self._create_workflow( wf_name, self.l2_l3_wf_name, self.l2_l3_ctor_params) self._update_workflow( wf_name, "setup_l2_l3", self.l2_l3_setup_params, None) else: LOG.debug('A service named ' + service_name + ' was found.') return service_name def _create_service(self, vip_network_id, pool_network_id, tenant_id): """create the service and provision it (async).""" # 1) create the service service = copy.deepcopy(self.service) if vip_network_id != pool_network_id: service_name = 'srv_%s_%s' % (vip_network_id, pool_network_id) service['primary']['network']['portgroups'] = [vip_network_id, pool_network_id] else: service_name = 'srv_' + vip_network_id service['primary']['network']['portgroups'] = [vip_network_id] resource = '/api/service?name=%s&tenant=%s' % (service_name, tenant_id) response = _rest_wrapper(self.rest_client.call('POST', resource, service, CREATE_SERVICE_HEADER), [201]) # 2) provision the service provision_uri = response['links']['actions']['provision'] _rest_wrapper(self.rest_client.call('POST', provision_uri, None, PROVISION_HEADER)) return service_name def _get_available_service(self, service_name): """Check if service exists and return its name if it does.""" resource = '/api/service/' + service_name try: _rest_wrapper(self.rest_client.call('GET', resource, None, None), [200]) except Exception: return return service_name def _workflow_exists(self, pool_id): """Check if a WF having the name of the pool_id exists.""" resource = '/api/workflow/' + pool_id try: _rest_wrapper(self.rest_client.call('GET', resource, None, None), [200]) except Exception: return False return True def _create_workflow(self, wf_name, wf_template_name, create_workflow_params=None): """Create a WF if it doesn't exists yet.""" if not self.workflow_templates_exists: self._verify_workflow_templates() if not self._workflow_exists(wf_name): if not create_workflow_params: create_workflow_params = {} resource = '/api/workflowTemplate/%s?name=%s' % ( wf_template_name, wf_name) params = {'parameters': create_workflow_params} response = _rest_wrapper(self.rest_client.call('POST', resource, params, TEMPLATE_HEADER)) LOG.debug('create_workflow response: %s', response) def _verify_workflow_templates(self): """Verify the existence of workflows on vDirect server.""" workflows = {self.l2_l3_wf_name: False, self.l4_wf_name: False} resource = '/api/workflowTemplate' response = _rest_wrapper(self.rest_client.call('GET', resource, None, None), [200]) for wf in workflows.keys(): for wf_template in response: if wf == wf_template['name']: workflows[wf] = True break for wf, found in workflows.items(): if not found: raise r_exc.WorkflowMissing(workflow=wf) self.workflow_templates_exists = True def _get_pip(self, context, tenant_id, port_name, network_id, subnet_id): """Get proxy IP Creates or get port on network_id, returns that port's IP on the subnet_id. """ port_filter = { 'name': [port_name], } ports = self.plugin._core_plugin.get_ports(context, filters=port_filter) if not ports: # create port, we just want any IP allocated to the port # based on the network id and subnet_id port_data = { 'tenant_id': tenant_id, 'name': port_name, 'network_id': network_id, 'mac_address': attributes.ATTR_NOT_SPECIFIED, 'admin_state_up': False, 'device_id': '', 'device_owner': 'neutron:' + constants.LOADBALANCER, 'fixed_ips': [{'subnet_id': subnet_id}] } port = self.plugin._core_plugin.create_port(context, {'port': port_data}) else: port = ports[0] ips_on_subnet = [ip for ip in port['fixed_ips'] if ip['subnet_id'] == subnet_id] if not ips_on_subnet: raise Exception(_('Could not find or allocate ' 'IP address for subnet id %s'), subnet_id) else: return ips_on_subnet[0]['ip_address'] class vDirectRESTClient(object): """REST server proxy to Radware vDirect.""" def __init__(self, server='localhost', secondary_server=None, user=None, password=None, port=2189, ssl=True, timeout=5000, base_uri=''): self.server = server self.secondary_server = secondary_server self.port = port self.ssl = ssl self.base_uri = base_uri self.timeout = timeout if user and password: self.auth = base64.encodestring('%s:%s' % (user, password)) self.auth = self.auth.replace('\n', '') else: raise r_exc.AuthenticationMissing() debug_params = {'server': self.server, 'sec_server': self.secondary_server, 'port': self.port, 'ssl': self.ssl} LOG.debug('vDirectRESTClient:init server=%(server)s, ' 'secondary server=%(sec_server)s, ' 'port=%(port)d, ssl=%(ssl)r', debug_params) def _flip_servers(self): LOG.warning(_LW('Fliping servers. Current is: %(server)s, ' 'switching to %(secondary)s'), {'server': self.server, 'secondary': self.secondary_server}) self.server, self.secondary_server = self.secondary_server, self.server def _recover(self, action, resource, data, headers, binary=False): if self.server and self.secondary_server: self._flip_servers() resp = self._call(action, resource, data, headers, binary) return resp else: LOG.exception(_LE('REST client is not able to recover ' 'since only one vDirect server is ' 'configured.')) return -1, None, None, None def call(self, action, resource, data, headers, binary=False): resp = self._call(action, resource, data, headers, binary) if resp[RESP_STATUS] == -1: LOG.warning(_LW('vDirect server is not responding (%s).'), self.server) return self._recover(action, resource, data, headers, binary) elif resp[RESP_STATUS] in (301, 307): LOG.warning(_LW('vDirect server is not active (%s).'), self.server) return self._recover(action, resource, data, headers, binary) else: return resp @log_helpers.log_method_call def _call(self, action, resource, data, headers, binary=False): if resource.startswith('http'): uri = resource else: uri = self.base_uri + resource if binary: body = data else: body = jsonutils.dumps(data) debug_data = 'binary' if binary else body debug_data = debug_data if debug_data else 'EMPTY' if not headers: headers = {'Authorization': 'Basic %s' % self.auth} else: headers['Authorization'] = 'Basic %s' % self.auth conn = None if self.ssl: conn = httplib.HTTPSConnection( self.server, self.port, timeout=self.timeout) if conn is None: LOG.error(_LE('vdirectRESTClient: Could not establish HTTPS ' 'connection')) return 0, None, None, None else: conn = httplib.HTTPConnection( self.server, self.port, timeout=self.timeout) if conn is None: LOG.error(_LE('vdirectRESTClient: Could not establish HTTP ' 'connection')) return 0, None, None, None try: conn.request(action, uri, body, headers) response = conn.getresponse() respstr = response.read() respdata = respstr try: respdata = jsonutils.loads(respstr) except ValueError: # response was not JSON, ignore the exception pass ret = (response.status, response.reason, respstr, respdata) except Exception as e: log_dict = {'action': action, 'e': e} LOG.error(_LE('vdirectRESTClient: %(action)s failure, %(e)r'), log_dict) ret = -1, None, None, None conn.close() return ret class OperationAttributes(object): """Holds operation attributes. The parameter 'post_op_function' (if supplied) is a function that takes one boolean argument, specifying the success of the operation """ def __init__(self, operation_url, object_graph, lbaas_entity=None, entity_id=None, delete=False, post_op_function=None): self.operation_url = operation_url self.object_graph = object_graph self.delete = delete self.lbaas_entity = lbaas_entity self.entity_id = entity_id self.creation_time = time.time() self.post_op_function = post_op_function def __repr__(self): attrs = self.__dict__ items = ("%s = %r" % (k, v) for k, v in attrs.items()) return "<%s: {%s}>" % (self.__class__.__name__, ', '.join(items)) class OperationCompletionHandler(threading.Thread): """Update DB with operation status or delete the entity from DB.""" def __init__(self, queue, rest_client, plugin): threading.Thread.__init__(self) self.queue = queue self.rest_client = rest_client self.plugin = plugin self.stoprequest = threading.Event() self.opers_to_handle_before_rest = 0 def join(self, timeout=None): self.stoprequest.set() super(OperationCompletionHandler, self).join(timeout) def handle_operation_completion(self, oper): result = self.rest_client.call('GET', oper.operation_url, None, None) completed = result[RESP_DATA]['complete'] reason = result[RESP_REASON], description = result[RESP_STR] if completed: # operation is done - update the DB with the status # or delete the entire graph from DB success = result[RESP_DATA]['success'] sec_to_completion = time.time() - oper.creation_time debug_data = {'oper': oper, 'sec_to_completion': sec_to_completion, 'success': success} LOG.debug('Operation %(oper)s is completed after ' '%(sec_to_completion)d sec ' 'with success status: %(success)s :', debug_data) db_status = None if not success: # failure - log it and set the return ERROR as DB state if reason or description: msg = 'Reason:%s. Description:%s' % (reason, description) else: msg = "unknown" error_params = {"operation": oper, "msg": msg} LOG.error(_LE('Operation %(operation)s failed. Reason: ' '%(msg)s'), error_params) db_status = constants.ERROR else: if oper.delete: _remove_object_from_db(self.plugin, oper) else: db_status = constants.ACTIVE if db_status: _update_vip_graph_status(self.plugin, oper, db_status) OperationCompletionHandler._run_post_op_function(success, oper) return completed def run(self): while not self.stoprequest.isSet(): try: oper = self.queue.get(timeout=1) # Get the current queue size (N) and set the counter with it. # Handle N operations with no intermission. # Once N operations handles, get the size again and repeat. if self.opers_to_handle_before_rest <= 0: self.opers_to_handle_before_rest = self.queue.qsize() + 1 LOG.debug('Operation consumed from the queue: %s', oper) # check the status - if oper is done: update the db , # else push the oper again to the queue if not self.handle_operation_completion(oper): LOG.debug('Operation %s is not completed yet..', oper) # Not completed - push to the queue again self.queue.put_nowait(oper) self.queue.task_done() self.opers_to_handle_before_rest -= 1 # Take one second rest before start handling # new operations or operations handled before if self.opers_to_handle_before_rest <= 0: time.sleep(1) except Queue.Empty: continue except Exception: m = _("Exception was thrown inside OperationCompletionHandler") LOG.exception(m) @staticmethod def _run_post_op_function(success, oper): if oper.post_op_function: log_data = {'func': oper.post_op_function, 'oper': oper} try: oper.post_op_function(success) LOG.debug('Post-operation function %(func)r completed ' 'after operation %(oper)r', log_data) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Post-operation function %(func)r ' 'failed after operation %(oper)r'), log_data) def _rest_wrapper(response, success_codes=None): """Wrap a REST call and make sure a valid status is returned.""" success_codes = success_codes or [202] if not response: raise r_exc.RESTRequestFailure( status=-1, reason="Unknown", description="Unknown", success_codes=success_codes ) elif response[RESP_STATUS] not in success_codes: raise r_exc.RESTRequestFailure( status=response[RESP_STATUS], reason=response[RESP_REASON], description=response[RESP_STR], success_codes=success_codes ) else: return response[RESP_DATA] def _make_pip_name_from_vip(vip): """Standard way of making PIP name based on VIP ID.""" return 'pip_' + vip['id'] def _update_vip_graph_status(plugin, oper, status): """Update the status Of all the Vip object graph or a specific entity in the graph. """ ctx = ncontext.get_admin_context() LOG.debug('_update: %s ', oper) if oper.lbaas_entity == lb_db.PoolMonitorAssociation: plugin.update_pool_health_monitor(ctx, oper.entity_id, oper.object_graph['pool'], status) elif oper.entity_id: plugin.update_status(ctx, oper.lbaas_entity, oper.entity_id, status) else: _update_vip_graph_status_cascade(plugin, oper.object_graph, ctx, status) def _update_vip_graph_status_cascade(plugin, ids, ctx, status): plugin.update_status(ctx, lb_db.Vip, ids['vip'], status) plugin.update_status(ctx, lb_db.Pool, ids['pool'], status) for member_id in ids['members']: plugin.update_status(ctx, lb_db.Member, member_id, status) for hm_id in ids['health_monitors']: plugin.update_pool_health_monitor(ctx, hm_id, ids['pool'], status) def _remove_object_from_db(plugin, oper): """Remove a specific entity from db.""" LOG.debug('_remove_object_from_db %s', oper) ctx = ncontext.get_admin_context() if oper.lbaas_entity == lb_db.PoolMonitorAssociation: plugin._delete_db_pool_health_monitor(ctx, oper.entity_id, oper.object_graph['pool']) elif oper.lbaas_entity == lb_db.Member: plugin._delete_db_member(ctx, oper.entity_id) elif oper.lbaas_entity == lb_db.Vip: plugin._delete_db_vip(ctx, oper.entity_id) elif oper.lbaas_entity == lb_db.Pool: plugin._delete_db_pool(ctx, oper.entity_id) else: raise r_exc.UnsupportedEntityOperation( operation='Remove from DB', entity=oper.lbaas_entity ) TRANSLATION_DEFAULTS = {'session_persistence_type': 'none', 'session_persistence_cookie_name': 'none', 'url_path': '/', 'http_method': 'GET', 'expected_codes': '200', 'subnet': '255.255.255.255', 'mask': '255.255.255.255', 'gw': '255.255.255.255', } VIP_PROPERTIES = ['address', 'protocol_port', 'protocol', 'connection_limit', 'admin_state_up', 'session_persistence_type', 'session_persistence_cookie_name'] POOL_PROPERTIES = ['protocol', 'lb_method', 'admin_state_up'] MEMBER_PROPERTIES = ['address', 'protocol_port', 'weight', 'admin_state_up', 'subnet', 'mask', 'gw'] HEALTH_MONITOR_PROPERTIES = ['type', 'delay', 'timeout', 'max_retries', 'admin_state_up', 'url_path', 'http_method', 'expected_codes', 'id'] def _translate_vip_object_graph(extended_vip, plugin, context): """Translate the extended vip translate to a structure that can be understood by the workflow. """ def _create_key(prefix, property_name): return prefix + '_' + property_name + '_array' def _trans_prop_name(prop_name): if prop_name == 'id': return 'uuid' else: return prop_name def get_ids(extended_vip): ids = {} ids['vip'] = extended_vip['id'] ids['pool'] = extended_vip['pool']['id'] ids['members'] = [m['id'] for m in extended_vip['members']] ids['health_monitors'] = [ hm['id'] for hm in extended_vip['health_monitors'] ] return ids trans_vip = {} LOG.debug('Vip graph to be translated: %s', extended_vip) for vip_property in VIP_PROPERTIES: trans_vip['vip_' + vip_property] = extended_vip.get( vip_property, TRANSLATION_DEFAULTS.get(vip_property)) for pool_property in POOL_PROPERTIES: trans_vip['pool_' + pool_property] = extended_vip[ 'pool'][pool_property] for member_property in MEMBER_PROPERTIES: trans_vip[_create_key('member', member_property)] = [] two_leg = (extended_vip['pip_address'] != extended_vip['address']) if two_leg: pool_subnet = plugin._core_plugin.get_subnet( context, extended_vip['pool']['subnet_id']) for member in extended_vip['members']: if member['status'] != constants.PENDING_DELETE: if (two_leg and netaddr.IPAddress(member['address']) not in netaddr.IPNetwork(pool_subnet['cidr'])): member_ports = plugin._core_plugin.get_ports( context, filters={'fixed_ips': {'ip_address': [member['address']]}, 'tenant_id': [extended_vip['tenant_id']]}) if len(member_ports) == 1: member_subnet = plugin._core_plugin.get_subnet( context, member_ports[0]['fixed_ips'][0]['subnet_id']) member_network = netaddr.IPNetwork(member_subnet['cidr']) member['subnet'] = str(member_network.network) member['mask'] = str(member_network.netmask) else: member['subnet'] = member['address'] member['gw'] = pool_subnet['gateway_ip'] for member_property in MEMBER_PROPERTIES: trans_vip[_create_key('member', member_property)].append( member.get(member_property, TRANSLATION_DEFAULTS.get(member_property))) for hm_property in HEALTH_MONITOR_PROPERTIES: trans_vip[ _create_key('hm', _trans_prop_name(hm_property))] = [] for hm in extended_vip['health_monitors']: hm_pool = plugin.get_pool_health_monitor(context, hm['id'], extended_vip['pool']['id']) if hm_pool['status'] != constants.PENDING_DELETE: for hm_property in HEALTH_MONITOR_PROPERTIES: value = hm.get(hm_property, TRANSLATION_DEFAULTS.get(hm_property)) trans_vip[_create_key('hm', _trans_prop_name(hm_property))].append(value) ids = get_ids(extended_vip) trans_vip['__ids__'] = ids if 'pip_address' in extended_vip: trans_vip['pip_address'] = extended_vip['pip_address'] LOG.debug('Translated Vip graph: %s', trans_vip) return trans_vip neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/common/0000775000567000056710000000000012701410110027451 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/common/__init__.py0000664000567000056710000000000012701407726031573 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/common/agent_driver_base.py0000664000567000056710000004316112701407726033516 0ustar jenkinsjenkins00000000000000# Copyright 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from neutron.common import rpc as n_rpc from neutron.db import agents_db from neutron.extensions import portbindings from neutron.plugins.common import constants as np_const from neutron.services import provider_configuration as provconf from neutron_lib import constants as q_const from neutron_lib import exceptions as n_exc from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_utils import importutils from neutron_lbaas._i18n import _, _LW from neutron_lbaas.db.loadbalancer import loadbalancer_db from neutron_lbaas.extensions import lbaas_agentscheduler from neutron_lbaas.services.loadbalancer import constants as l_const from neutron_lbaas.services.loadbalancer.drivers import abstract_driver LOG = logging.getLogger(__name__) POOL_SCHEDULERS = 'pool_schedulers' AGENT_SCHEDULER_OPTS = [ cfg.StrOpt('loadbalancer_pool_scheduler_driver', default='neutron_lbaas.services.loadbalancer.agent_scheduler' '.ChanceScheduler', help=_('Driver to use for scheduling ' 'pool to a default loadbalancer agent')), ] cfg.CONF.register_opts(AGENT_SCHEDULER_OPTS) class DriverNotSpecified(n_exc.NeutronException): message = _("Device driver for agent should be specified " "in plugin driver.") class LoadBalancerCallbacks(object): # history # 1.0 Initial version # 2.0 Generic API for agent based drivers # - get_logical_device() handling changed; # - pool_deployed() and update_status() methods added; target = oslo_messaging.Target(version='2.0') def __init__(self, plugin): super(LoadBalancerCallbacks, self).__init__() self.plugin = plugin def get_ready_devices(self, context, host=None): with context.session.begin(subtransactions=True): agents = self.plugin.get_lbaas_agents(context, filters={'host': [host]}) if not agents: return [] elif len(agents) > 1: LOG.warning(_LW('Multiple lbaas agents found on host %s'), host) pools = self.plugin.list_pools_on_lbaas_agent(context, agents[0].id) pool_ids = [pool['id'] for pool in pools['pools']] qry = context.session.query(loadbalancer_db.Pool.id) qry = qry.filter(loadbalancer_db.Pool.id.in_(pool_ids)) qry = qry.filter( loadbalancer_db.Pool.status.in_( np_const.ACTIVE_PENDING_STATUSES)) up = True # makes pep8 and sqlalchemy happy qry = qry.filter(loadbalancer_db.Pool.admin_state_up == up) return [id for id, in qry] def get_logical_device(self, context, pool_id=None): with context.session.begin(subtransactions=True): qry = context.session.query(loadbalancer_db.Pool) qry = qry.filter_by(id=pool_id) pool = qry.one() retval = {} retval['pool'] = self.plugin._make_pool_dict(pool) if pool.vip: retval['vip'] = self.plugin._make_vip_dict(pool.vip) retval['vip']['port'] = ( self.plugin._core_plugin._make_port_dict(pool.vip.port) ) for fixed_ip in retval['vip']['port']['fixed_ips']: fixed_ip['subnet'] = ( self.plugin._core_plugin.get_subnet( context, fixed_ip['subnet_id'] ) ) retval['members'] = [ self.plugin._make_member_dict(m) for m in pool.members if ( m.status in np_const.ACTIVE_PENDING_STATUSES or m.status == np_const.INACTIVE) ] retval['healthmonitors'] = [ self.plugin._make_health_monitor_dict(hm.healthmonitor) for hm in pool.monitors if hm.status in np_const.ACTIVE_PENDING_STATUSES ] retval['driver'] = ( self.plugin.drivers[pool.provider.provider_name].device_driver) return retval def pool_deployed(self, context, pool_id): with context.session.begin(subtransactions=True): qry = context.session.query(loadbalancer_db.Pool) qry = qry.filter_by(id=pool_id) pool = qry.one() # set all resources to active if pool.status in np_const.ACTIVE_PENDING_STATUSES: pool.status = np_const.ACTIVE if (pool.vip and pool.vip.status in np_const.ACTIVE_PENDING_STATUSES): pool.vip.status = np_const.ACTIVE for m in pool.members: if m.status in np_const.ACTIVE_PENDING_STATUSES: m.status = np_const.ACTIVE for hm in pool.monitors: if hm.status in np_const.ACTIVE_PENDING_STATUSES: hm.status = np_const.ACTIVE def update_status(self, context, obj_type, obj_id, status): model_mapping = { 'pool': loadbalancer_db.Pool, 'vip': loadbalancer_db.Vip, 'member': loadbalancer_db.Member, 'health_monitor': loadbalancer_db.PoolMonitorAssociation } if obj_type not in model_mapping: raise n_exc.Invalid(_('Unknown object type: %s') % obj_type) try: if obj_type == 'health_monitor': self.plugin.update_pool_health_monitor( context, obj_id['monitor_id'], obj_id['pool_id'], status) else: self.plugin.update_status( context, model_mapping[obj_type], obj_id, status) except n_exc.NotFound: # update_status may come from agent on an object which was # already deleted from db with other request LOG.warning(_LW('Cannot update status: %(obj_type)s %(obj_id)s ' 'not found in the DB, it was probably deleted ' 'concurrently'), {'obj_type': obj_type, 'obj_id': obj_id}) def pool_destroyed(self, context, pool_id=None): """Agent confirmation hook that a pool has been destroyed. This method exists for subclasses to change the deletion behavior. """ pass def plug_vip_port(self, context, port_id=None, host=None): if not port_id: return try: port = self.plugin._core_plugin.get_port( context, port_id ) except n_exc.PortNotFound: LOG.debug('Unable to find port %s to plug.', port_id) return port['admin_state_up'] = True port['device_owner'] = 'neutron:' + np_const.LOADBALANCER port['device_id'] = str(uuid.uuid5(uuid.NAMESPACE_DNS, str(host))) port[portbindings.HOST_ID] = host self.plugin._core_plugin.update_port( context, port_id, {'port': port} ) def unplug_vip_port(self, context, port_id=None, host=None): if not port_id: return try: port = self.plugin._core_plugin.get_port( context, port_id ) except n_exc.PortNotFound: LOG.debug('Unable to find port %s to unplug. This can occur when ' 'the Vip has been deleted first.', port_id) return port['admin_state_up'] = False port['device_owner'] = '' port['device_id'] = '' try: self.plugin._core_plugin.update_port( context, port_id, {'port': port} ) except n_exc.PortNotFound: LOG.debug('Unable to find port %s to unplug. This can occur when ' 'the Vip has been deleted first.', port_id) def update_pool_stats(self, context, pool_id=None, stats=None, host=None): self.plugin.update_pool_stats(context, pool_id, data=stats) class LoadBalancerAgentApi(object): """Plugin side of plugin to agent RPC API.""" # history # 1.0 Initial version # 1.1 Support agent_updated call # 2.0 Generic API for agent based drivers # - modify/reload/destroy_pool methods were removed; # - added methods to handle create/update/delete for every lbaas # object individually; def __init__(self, topic): target = oslo_messaging.Target(topic=topic, version='2.0') self.client = n_rpc.get_client(target) def create_vip(self, context, vip, host): cctxt = self.client.prepare(server=host) cctxt.cast(context, 'create_vip', vip=vip) def update_vip(self, context, old_vip, vip, host): cctxt = self.client.prepare(server=host) cctxt.cast(context, 'update_vip', old_vip=old_vip, vip=vip) def delete_vip(self, context, vip, host): cctxt = self.client.prepare(server=host) cctxt.cast(context, 'delete_vip', vip=vip) def create_pool(self, context, pool, host, driver_name): cctxt = self.client.prepare(server=host) cctxt.cast(context, 'create_pool', pool=pool, driver_name=driver_name) def update_pool(self, context, old_pool, pool, host): cctxt = self.client.prepare(server=host) cctxt.cast(context, 'update_pool', old_pool=old_pool, pool=pool) def delete_pool(self, context, pool, host): cctxt = self.client.prepare(server=host) cctxt.cast(context, 'delete_pool', pool=pool) def create_member(self, context, member, host): cctxt = self.client.prepare(server=host) cctxt.cast(context, 'create_member', member=member) def update_member(self, context, old_member, member, host): cctxt = self.client.prepare(server=host) cctxt.cast(context, 'update_member', old_member=old_member, member=member) def delete_member(self, context, member, host): cctxt = self.client.prepare(server=host) cctxt.cast(context, 'delete_member', member=member) def create_pool_health_monitor(self, context, health_monitor, pool_id, host): cctxt = self.client.prepare(server=host) cctxt.cast(context, 'create_pool_health_monitor', health_monitor=health_monitor, pool_id=pool_id) def update_pool_health_monitor(self, context, old_health_monitor, health_monitor, pool_id, host): cctxt = self.client.prepare(server=host) cctxt.cast(context, 'update_pool_health_monitor', old_health_monitor=old_health_monitor, health_monitor=health_monitor, pool_id=pool_id) def delete_pool_health_monitor(self, context, health_monitor, pool_id, host): cctxt = self.client.prepare(server=host) cctxt.cast(context, 'delete_pool_health_monitor', health_monitor=health_monitor, pool_id=pool_id) def agent_updated(self, context, admin_state_up, host): cctxt = self.client.prepare(server=host) cctxt.cast(context, 'agent_updated', payload={'admin_state_up': admin_state_up}) class AgentDriverBase(abstract_driver.LoadBalancerAbstractDriver): # name of device driver that should be used by the agent; # vendor specific plugin drivers must override it; device_driver = None def __init__(self, plugin): if not self.device_driver: raise DriverNotSpecified() self.agent_rpc = LoadBalancerAgentApi(l_const.LOADBALANCER_AGENT) self.plugin = plugin self._set_callbacks_on_plugin() self.plugin.agent_notifiers.update( {q_const.AGENT_TYPE_LOADBALANCER: self.agent_rpc}) pool_sched_driver = provconf.get_provider_driver_class( cfg.CONF.loadbalancer_pool_scheduler_driver, POOL_SCHEDULERS) self.pool_scheduler = importutils.import_object(pool_sched_driver) def _set_callbacks_on_plugin(self): # other agent based plugin driver might already set callbacks on plugin if hasattr(self.plugin, 'agent_callbacks'): return self.plugin.agent_endpoints = [ LoadBalancerCallbacks(self.plugin), agents_db.AgentExtRpcCallback(self.plugin) ] self.plugin.conn = n_rpc.create_connection() self.plugin.conn.create_consumer( l_const.LOADBALANCER_PLUGIN, self.plugin.agent_endpoints, fanout=False) self.plugin.conn.consume_in_threads() def get_pool_agent(self, context, pool_id): agent = self.plugin.get_lbaas_agent_hosting_pool(context, pool_id) if not agent: raise lbaas_agentscheduler.NoActiveLbaasAgent(pool_id=pool_id) return agent['agent'] def create_vip(self, context, vip): agent = self.get_pool_agent(context, vip['pool_id']) self.agent_rpc.create_vip(context, vip, agent['host']) def update_vip(self, context, old_vip, vip): agent = self.get_pool_agent(context, vip['pool_id']) if vip['status'] in np_const.ACTIVE_PENDING_STATUSES: self.agent_rpc.update_vip(context, old_vip, vip, agent['host']) else: self.agent_rpc.delete_vip(context, vip, agent['host']) def delete_vip(self, context, vip): self.plugin._delete_db_vip(context, vip['id']) agent = self.get_pool_agent(context, vip['pool_id']) self.agent_rpc.delete_vip(context, vip, agent['host']) def create_pool(self, context, pool): agent = self.pool_scheduler.schedule(self.plugin, context, pool, self.device_driver) if not agent: raise lbaas_agentscheduler.NoEligibleLbaasAgent(pool_id=pool['id']) self.agent_rpc.create_pool(context, pool, agent['host'], self.device_driver) def update_pool(self, context, old_pool, pool): agent = self.get_pool_agent(context, pool['id']) if pool['status'] in np_const.ACTIVE_PENDING_STATUSES: self.agent_rpc.update_pool(context, old_pool, pool, agent['host']) else: self.agent_rpc.delete_pool(context, pool, agent['host']) def delete_pool(self, context, pool): # get agent first to know host as binding will be deleted # after pool is deleted from db agent = self.plugin.get_lbaas_agent_hosting_pool(context, pool['id']) self.plugin._delete_db_pool(context, pool['id']) if agent: self.agent_rpc.delete_pool(context, pool, agent['agent']['host']) def create_member(self, context, member): agent = self.get_pool_agent(context, member['pool_id']) self.agent_rpc.create_member(context, member, agent['host']) def update_member(self, context, old_member, member): agent = self.get_pool_agent(context, member['pool_id']) # member may change pool id if member['pool_id'] != old_member['pool_id']: old_pool_agent = self.plugin.get_lbaas_agent_hosting_pool( context, old_member['pool_id']) if old_pool_agent: self.agent_rpc.delete_member(context, old_member, old_pool_agent['agent']['host']) self.agent_rpc.create_member(context, member, agent['host']) else: self.agent_rpc.update_member(context, old_member, member, agent['host']) def delete_member(self, context, member): self.plugin._delete_db_member(context, member['id']) agent = self.get_pool_agent(context, member['pool_id']) self.agent_rpc.delete_member(context, member, agent['host']) def create_pool_health_monitor(self, context, healthmon, pool_id): # healthmon is not used here agent = self.get_pool_agent(context, pool_id) self.agent_rpc.create_pool_health_monitor(context, healthmon, pool_id, agent['host']) def update_pool_health_monitor(self, context, old_health_monitor, health_monitor, pool_id): agent = self.get_pool_agent(context, pool_id) self.agent_rpc.update_pool_health_monitor(context, old_health_monitor, health_monitor, pool_id, agent['host']) def delete_pool_health_monitor(self, context, health_monitor, pool_id): self.plugin._delete_db_pool_health_monitor( context, health_monitor['id'], pool_id ) agent = self.get_pool_agent(context, pool_id) self.agent_rpc.delete_pool_health_monitor(context, health_monitor, pool_id, agent['host']) def stats(self, context, pool_id): pass neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/a10networks/0000775000567000056710000000000012701410110030337 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/a10networks/__init__.py0000664000567000056710000000000012701407726032461 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/a10networks/README.txt0000664000567000056710000000226212701407726032062 0ustar jenkinsjenkins00000000000000A10 Networks LBaaS Driver Installation info: To use this driver, you must: - Install the a10-neutron-lbaas module. (E.g.: 'pip install a10-neutron-lbaas') - Create a driver config file, a sample of which is given below. - Enable it in neutron.conf - Restart neutron-server Third-party CI info: Contact info for any problems is: a10-openstack-ci at a10networks dot com Or contact Doug Wiegley directly (IRC: dougwig) Configuration file: Create a configuration file with a list of A10 appliances, similar to the file below, located at: /etc/neutron/services/loadbalancer/a10networks/config.py Or you can override that directory by setting the environment variable A10_CONFIG_DIR. Example config file: devices = { "ax1": { "name": "ax1", "host": "10.10.100.20", "port": 443, "protocol": "https", "username": "admin", "password": "a10", "status": True, "autosnat": False, "api_version": "2.1", "v_method": "LSI", "max_instance": 5000, "use_float": False, "method": "hash" }, "ax4": { "host": "10.10.100.23", "username": "admin", "password": "a10", }, } neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/a10networks/driver_v1.py0000664000567000056710000001476312701407726032650 0ustar jenkinsjenkins00000000000000# Copyright 2014, Doug Wiegley (dougwig), A10 Networks # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import a10_neutron_lbaas from neutron.db import l3_db from neutron.plugins.common import constants from oslo_log import log as logging from neutron_lbaas.db.loadbalancer import loadbalancer_db as lb_db from neutron_lbaas.services.loadbalancer.drivers import abstract_driver VERSION = "1.0.0" LOG = logging.getLogger(__name__) # Most driver calls below are straight passthroughs to the A10 package # 'a10_neutron_lbaas'. Any function that has not been fully abstracted # into the openstack driver/plugin interface is NOT passed through, to # make it obvious which hidden interfaces/db calls that we rely on. class ThunderDriver(abstract_driver.LoadBalancerAbstractDriver): def __init__(self, plugin): LOG.debug("A10Driver: init version=%s", VERSION) self.plugin = plugin # Map the string types to neutron classes/functions, in order to keep # from reaching into the bowels of Neutron from anywhere but this file. self.neutron_map = { 'member': { 'model': lb_db.Member, 'delete_func': self.plugin._delete_db_member, }, 'pool': { 'model': lb_db.Pool, 'delete_func': self.plugin._delete_db_pool, }, 'vip': { 'model': lb_db.Vip, 'delete_func': self.plugin._delete_db_vip, }, } LOG.debug("A10Driver: initializing, version=%s, lbaas_manager=%s", VERSION, a10_neutron_lbaas.VERSION) self.a10 = a10_neutron_lbaas.A10OpenstackLBV1(self) # The following private helper methods are used by a10_neutron_lbaas, # and reflect the neutron interfaces required by that package. def _hm_binding_count(self, context, hm_id): return context.session.query(lb_db.PoolMonitorAssociation).filter_by( monitor_id=hm_id).join(lb_db.Pool).count() def _member_count(self, context, member): return context.session.query(lb_db.Member).filter_by( tenant_id=member['tenant_id'], address=member['address']).count() def _member_get(self, context, member_id): return self.plugin.get_member(context, member_id) def _member_get_ip(self, context, member, use_float=False): ip_address = member['address'] if use_float: fip_qry = context.session.query(l3_db.FloatingIP) if fip_qry.filter_by(fixed_ip_address=ip_address).count() > 0: float_address = fip_qry.filter_by( fixed_ip_address=ip_address).first() ip_address = str(float_address.floating_ip_address) return ip_address def _pool_get_hm(self, context, hm_id): return self.plugin.get_health_monitor(context, hm_id) def _pool_get_tenant_id(self, context, pool_id): pool_qry = context.session.query(lb_db.Pool).filter_by(id=pool_id) z = pool_qry.first() if z: return z.tenant_id else: return '' def _pool_get_vip_id(self, context, pool_id): pool_qry = context.session.query(lb_db.Pool).filter_by(id=pool_id) z = pool_qry.first() if z: return z.vip_id else: return '' def _pool_total(self, context, tenant_id): return context.session.query(lb_db.Pool).filter_by( tenant_id=tenant_id).count() def _vip_get(self, context, vip_id): return self.plugin.get_vip(context, vip_id) def _active(self, context, model_type, model_id): self.plugin.update_status(context, self.neutron_map[model_type]['model'], model_id, constants.ACTIVE) def _failed(self, context, model_type, model_id): self.plugin.update_status(context, self.neutron_map[model_type]['model'], model_id, constants.ERROR) def _db_delete(self, context, model_type, model_id): self.neutron_map[model_type]['delete_func'](context, model_id) def _hm_active(self, context, hm_id, pool_id): self.plugin.update_pool_health_monitor(context, hm_id, pool_id, constants.ACTIVE) def _hm_failed(self, context, hm_id, pool_id): self.plugin.update_pool_health_monitor(context, hm_id, pool_id, constants.ERROR) def _hm_db_delete(self, context, hm_id, pool_id): self.plugin._delete_db_pool_health_monitor(context, hm_id, pool_id) # Pass-through driver def create_vip(self, context, vip): self.a10.vip.create(context, vip) def update_vip(self, context, old_vip, vip): self.a10.vip.update(context, old_vip, vip) def delete_vip(self, context, vip): self.a10.vip.delete(context, vip) def create_pool(self, context, pool): self.a10.pool.create(context, pool) def update_pool(self, context, old_pool, pool): self.a10.pool.update(context, old_pool, pool) def delete_pool(self, context, pool): self.a10.pool.delete(context, pool) def stats(self, context, pool_id): return self.a10.pool.stats(context, pool_id) def create_member(self, context, member): self.a10.member.create(context, member) def update_member(self, context, old_member, member): self.a10.member.update(context, old_member, member) def delete_member(self, context, member): self.a10.member.delete(context, member) def update_pool_health_monitor(self, context, old_hm, hm, pool_id): self.a10.hm.update(context, old_hm, hm, pool_id) def create_pool_health_monitor(self, context, hm, pool_id): self.a10.hm.create(context, hm, pool_id) def delete_pool_health_monitor(self, context, hm, pool_id): self.a10.hm.delete(context, hm, pool_id) neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/haproxy/0000775000567000056710000000000012701410110027653 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/haproxy/__init__.py0000664000567000056710000000000012701407726031775 0ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/haproxy/synchronous_namespace_driver.pyneutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/haproxy/synchronous_namespace_driver0000664000567000056710000000200312701407726035575 0ustar jenkinsjenkins00000000000000# Copyright 2014-2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron_lbaas._i18n import _LW from neutron_lbaas.drivers.haproxy import synchronous_namespace_driver LOG = logging.getLogger(__name__) LOG.warning(_LW("This path has been deprecated. " "Use neutron_lbaas.drivers.haproxy." "synchronous_namespace_driver instead.")) class HaproxyNSDriver(synchronous_namespace_driver.HaproxyNSDriver): pass neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/haproxy/templates/0000775000567000056710000000000012701410110031651 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/haproxy/templates/haproxy_base.j20000664000567000056710000000176412701407726034625 0ustar jenkinsjenkins00000000000000{# # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # #} # Configuration for {{ loadbalancer_name }} global daemon user nobody group {{ usergroup }} log /dev/log local0 log /dev/log local1 notice stats socket {{ sock_path }} mode 0666 level user defaults log global retries 3 option redispatch timeout connect 5000 timeout client 50000 timeout server 50000 {% block proxies %}{% endblock proxies %} neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/haproxy/templates/haproxy_proxies.j20000664000567000056710000000713012701407726035375 0ustar jenkinsjenkins00000000000000{# # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # #} {% extends 'haproxy_base.j2' %} {% macro bind_macro(constants, listener, lb_vip_address) %} {% if listener.default_tls_path %} {% set def_crt_opt = "ssl crt %s"|format(listener.default_tls_path)|trim() %} {% else %} {% set def_crt_opt = "" %} {% endif %} {% if listener.crt_dir %} {% set crt_dir_opt = "crt %s"|format(listener.crt_dir)|trim() %} {% else %} {% set crt_dir_opt = "" %} {% endif %} bind {{ lb_vip_address }}:{{ listener.protocol_port }} {{ "%s %s"|format(def_crt_opt, crt_dir_opt)|trim() }} {% endmacro %} {% macro use_backend_macro(listener) %} {% if listener.default_pool %} default_backend {{ listener.default_pool.id }} {% endif %} {% endmacro %} {% macro frontend_macro(constants, listener, lb_vip_address) %} frontend {{ listener.id }} option tcplog {% if listener.protocol == constants.PROTOCOL_TERMINATED_HTTPS %} redirect scheme https if !{ ssl_fc } {% endif %} {% if listener.connection_limit is defined %} maxconn {{ listener.connection_limit }} {% endif %} {% if listener.protocol_mode == constants.PROTOCOL_HTTP.lower() %} option forwardfor {% endif %} {{ bind_macro(constants, listener, lb_vip_address)|trim() }} mode {{ listener.protocol_mode }} {% if listener.default_pool %} default_backend {{ listener.default_pool.id }} {% endif %} {% endmacro %} {% macro backend_macro(constants, pool) %} backend {{ pool.id }} mode {{ pool.protocol }} balance {{ pool.lb_algorithm }} {% if pool.session_persistence %} {% if pool.session_persistence.type == constants.SESSION_PERSISTENCE_SOURCE_IP %} stick-table type ip size 10k stick on src {% elif pool.session_persistence.type == constants.SESSION_PERSISTENCE_HTTP_COOKIE %} cookie SRV insert indirect nocache {% elif pool.session_persistence.type == constants.SESSION_PERSISTENCE_APP_COOKIE and pool.session_persistence.cookie_name %} appsession {{ pool.session_persistence.cookie_name }} len 56 timeout 3h {% endif %} {% endif %} {% if pool.health_monitor %} timeout check {{ pool.health_monitor.timeout }} {% if pool.health_monitor.type == constants.HEALTH_MONITOR_HTTP or pool.health_monitor.type == constants.HEALTH_MONITOR_HTTPS %} option httpchk {{ pool.health_monitor.http_method }} {{ pool.health_monitor.url_path }} http-check expect rstatus {{ pool.health_monitor.expected_codes }} {% endif %} {% if pool.health_monitor.type == constants.HEALTH_MONITOR_HTTPS %} option ssl-hello-chk {% endif %} {% endif %} {% for member in pool.members %} {% if pool.health_monitor %} {% set hm_opt = " check inter %ds fall %d"|format(pool.health_monitor.delay, pool.health_monitor.max_retries) %} {% else %} {% set hm_opt = "" %} {% endif %} {%if pool.session_persistence.type == constants.SESSION_PERSISTENCE_HTTP_COOKIE %} {% set persistence_opt = " cookie %s"|format(member.id) %} {% else %} {% set persistence_opt = "" %} {% endif %} {{ "server %s %s:%d weight %s%s%s"|e|format(member.id, member.address, member.protocol_port, member.weight, hm_opt, persistence_opt)|trim() }} {% endfor %} {% endmacro %} ././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/haproxy/templates/haproxy.loadbalancer.j2neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/haproxy/templates/haproxy.loadbalanc0000664000567000056710000000213012701407726035364 0ustar jenkinsjenkins00000000000000{# # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # #} {% extends 'haproxy_proxies.j2' %} {% set loadbalancer_name = loadbalancer.name %} {% set usergroup = user_group %} {% set sock_path = stats_sock %} {% block proxies %} {% from 'haproxy_proxies.j2' import frontend_macro as frontend_macro, backend_macro%} {% for listener in loadbalancer.listeners %} {{ frontend_macro(constants, listener, loadbalancer.vip_address) }} {% endfor %} {% for pool in loadbalancer.pools %} {{ backend_macro(constants, pool) }} {% endfor %} {% endblock proxies %} neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/haproxy/plugin_driver.py0000664000567000056710000000164612701407726033130 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lbaas.services.loadbalancer.drivers.common \ import agent_driver_base from neutron_lbaas.services.loadbalancer.drivers.haproxy \ import namespace_driver class HaproxyOnHostPluginDriver(agent_driver_base.AgentDriverBase): device_driver = namespace_driver.DRIVER_NAME neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/haproxy/jinja_cfg.py0000664000567000056710000003150612701407726032167 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import jinja2 import six from neutron.common import utils as n_utils from neutron.plugins.common import constants as plugin_constants from oslo_config import cfg from neutron_lbaas._i18n import _ from neutron_lbaas.common import cert_manager from neutron_lbaas.common.tls_utils import cert_parser from neutron_lbaas.services.loadbalancer import constants from neutron_lbaas.services.loadbalancer import data_models CERT_MANAGER_PLUGIN = cert_manager.get_backend() PROTOCOL_MAP = { constants.PROTOCOL_TCP: 'tcp', constants.PROTOCOL_HTTP: 'http', constants.PROTOCOL_HTTPS: 'tcp', constants.PROTOCOL_TERMINATED_HTTPS: 'http' } BALANCE_MAP = { constants.LB_METHOD_ROUND_ROBIN: 'roundrobin', constants.LB_METHOD_LEAST_CONNECTIONS: 'leastconn', constants.LB_METHOD_SOURCE_IP: 'source' } STATS_MAP = { constants.STATS_ACTIVE_CONNECTIONS: 'scur', constants.STATS_MAX_CONNECTIONS: 'smax', constants.STATS_CURRENT_SESSIONS: 'scur', constants.STATS_MAX_SESSIONS: 'smax', constants.STATS_TOTAL_CONNECTIONS: 'stot', constants.STATS_TOTAL_SESSIONS: 'stot', constants.STATS_IN_BYTES: 'bin', constants.STATS_OUT_BYTES: 'bout', constants.STATS_CONNECTION_ERRORS: 'econ', constants.STATS_RESPONSE_ERRORS: 'eresp' } MEMBER_STATUSES = plugin_constants.ACTIVE_PENDING_STATUSES + ( plugin_constants.INACTIVE,) TEMPLATES_DIR = os.path.abspath( os.path.join(os.path.dirname(__file__), 'templates/')) JINJA_ENV = None jinja_opts = [ cfg.StrOpt( 'jinja_config_template', default=os.path.join( TEMPLATES_DIR, 'haproxy.loadbalancer.j2'), help=_('Jinja template file for haproxy configuration')) ] cfg.CONF.register_opts(jinja_opts, 'haproxy') def save_config(conf_path, loadbalancer, socket_path, user_group, haproxy_base_dir): """Convert a logical configuration to the HAProxy version. :param conf_path: location of Haproxy configuration :param loadbalancer: the load balancer object :param socket_path: location of haproxy socket data :param user_group: user group :param haproxy_base_dir: location of the instances state data """ config_str = render_loadbalancer_obj(loadbalancer, user_group, socket_path, haproxy_base_dir) n_utils.replace_file(conf_path, config_str) def _get_template(): """Retrieve Jinja template :returns: Jinja template """ global JINJA_ENV if not JINJA_ENV: template_loader = jinja2.FileSystemLoader( searchpath=os.path.dirname(cfg.CONF.haproxy.jinja_config_template)) JINJA_ENV = jinja2.Environment( loader=template_loader, trim_blocks=True, lstrip_blocks=True) return JINJA_ENV.get_template(os.path.basename( cfg.CONF.haproxy.jinja_config_template)) def _store_listener_crt(haproxy_base_dir, listener, cert): """Store TLS certificate :param haproxy_base_dir: location of the instances state data :param listener: the listener object :param cert: the TLS certificate :returns: location of the stored certificate """ cert_path = _retrieve_crt_path(haproxy_base_dir, listener, cert.primary_cn) # build a string that represents the pem file to be saved pem = _build_pem(cert) n_utils.replace_file(cert_path, pem) return cert_path def _retrieve_crt_path(haproxy_base_dir, listener, primary_cn): """Retrieve TLS certificate location :param haproxy_base_dir: location of the instances state data :param listener: the listener object :param primary_cn: primary_cn used for identifying TLS certificate :returns: TLS certificate location """ confs_dir = os.path.abspath(os.path.normpath(haproxy_base_dir)) confs_path = os.path.join(confs_dir, listener.id) if haproxy_base_dir and listener.id: if not os.path.isdir(confs_path): os.makedirs(confs_path, 0o755) return os.path.join( confs_path, '{0}.pem'.format(primary_cn)) def _process_tls_certificates(listener): """Processes TLS data from the listener. Converts and uploads PEM data to the Amphora API :param listener: the listener object :returns: TLS_CERT and SNI_CERTS """ cert_mgr = CERT_MANAGER_PLUGIN.CertManager() tls_cert = None sni_certs = [] # Retrieve, map and store default TLS certificate if listener.default_tls_container_id: tls_cert = _map_cert_tls_container( cert_mgr.get_cert( project_id=listener.tenant_id, cert_ref=listener.default_tls_container_id, resource_ref=cert_mgr.get_service_url( listener.loadbalancer_id), check_only=True ) ) if listener.sni_containers: # Retrieve, map and store SNI certificates for sni_cont in listener.sni_containers: cert_container = _map_cert_tls_container( cert_mgr.get_cert( project_id=listener.tenant_id, cert_ref=sni_cont.tls_container_id, resource_ref=cert_mgr.get_service_url( listener.loadbalancer_id), check_only=True ) ) sni_certs.append(cert_container) return {'tls_cert': tls_cert, 'sni_certs': sni_certs} def _get_primary_cn(tls_cert): """Retrieve primary cn for TLS certificate :param tls_cert: the TLS certificate :returns: primary cn of the TLS certificate """ return cert_parser.get_host_names(tls_cert)['cn'] def _map_cert_tls_container(cert): """Map cert data to TLS data model :param cert: TLS certificate :returns: mapped TLSContainer object """ certificate = cert.get_certificate() pkey = cert_parser.dump_private_key(cert.get_private_key(), cert.get_private_key_passphrase()) return data_models.TLSContainer( primary_cn=_get_primary_cn(certificate), private_key=pkey, certificate=certificate, intermediates=cert.get_intermediates()) def _build_pem(tls_cert): """Generate PEM encoded TLS certificate data :param tls_cert: TLS certificate :returns: PEm encoded certificate data """ pem = () if tls_cert.intermediates: for c in tls_cert.intermediates: pem = pem + (c,) if tls_cert.certificate: pem = pem + (tls_cert.certificate,) if tls_cert.private_key: pem = pem + (tls_cert.private_key,) return "\n".join(pem) def render_loadbalancer_obj(loadbalancer, user_group, socket_path, haproxy_base_dir): """Renders load balancer object :param loadbalancer: the load balancer object :param user_group: the user group :param socket_path: location of the instances socket data :param haproxy_base_dir: location of the instances state data :returns: rendered load balancer configuration """ loadbalancer = _transform_loadbalancer(loadbalancer, haproxy_base_dir) return _get_template().render({'loadbalancer': loadbalancer, 'user_group': user_group, 'stats_sock': socket_path}, constants=constants) def _transform_loadbalancer(loadbalancer, haproxy_base_dir): """Transforms load balancer object :param loadbalancer: the load balancer object :param haproxy_base_dir: location of the instances state data :returns: dictionary of transformed load balancer values """ listeners = [_transform_listener(x, haproxy_base_dir) for x in loadbalancer.listeners if x.admin_state_up] pools = [_transform_pool(x) for x in loadbalancer.pools] return { 'name': loadbalancer.name, 'vip_address': loadbalancer.vip_address, 'listeners': listeners, 'pools': pools } def _transform_listener(listener, haproxy_base_dir): """Transforms listener object :param listener: the listener object :param haproxy_base_dir: location of the instances state data :returns: dictionary of transformed listener values """ data_dir = os.path.join(haproxy_base_dir, listener.id) ret_value = { 'id': listener.id, 'protocol_port': listener.protocol_port, 'protocol_mode': PROTOCOL_MAP[listener.protocol], 'protocol': listener.protocol } if listener.connection_limit and listener.connection_limit > -1: ret_value['connection_limit'] = listener.connection_limit if listener.default_pool: ret_value['default_pool'] = _transform_pool(listener.default_pool) # Process and store certificates certs = _process_tls_certificates(listener) if listener.default_tls_container_id: ret_value['default_tls_path'] = _store_listener_crt( haproxy_base_dir, listener, certs['tls_cert']) if listener.sni_containers: for c in certs['sni_certs']: _store_listener_crt(haproxy_base_dir, listener, c) ret_value['crt_dir'] = data_dir return ret_value def _transform_pool(pool): """Transforms pool object :param pool: the pool object :returns: dictionary of transformed pool values """ ret_value = { 'id': pool.id, 'protocol': PROTOCOL_MAP[pool.protocol], 'lb_algorithm': BALANCE_MAP.get(pool.lb_algorithm, 'roundrobin'), 'members': [], 'health_monitor': '', 'session_persistence': '', 'admin_state_up': pool.admin_state_up, 'provisioning_status': pool.provisioning_status } members = [_transform_member(x) for x in pool.members if _include_member(x)] ret_value['members'] = members if pool.healthmonitor and pool.healthmonitor.admin_state_up: ret_value['health_monitor'] = _transform_health_monitor( pool.healthmonitor) if pool.session_persistence: ret_value['session_persistence'] = _transform_session_persistence( pool.session_persistence) return ret_value def _transform_session_persistence(persistence): """Transforms session persistence object :param persistence: the session persistence object :returns: dictionary of transformed session persistence values """ return { 'type': persistence.type, 'cookie_name': persistence.cookie_name } def _transform_member(member): """Transforms member object :param member: the member object :returns: dictionary of transformed member values """ return { 'id': member.id, 'address': member.address, 'protocol_port': member.protocol_port, 'weight': member.weight, 'admin_state_up': member.admin_state_up, 'subnet_id': member.subnet_id, 'provisioning_status': member.provisioning_status } def _transform_health_monitor(monitor): """Transforms health monitor object :param monitor: the health monitor object :returns: dictionary of transformed health monitor values """ return { 'id': monitor.id, 'type': monitor.type, 'delay': monitor.delay, 'timeout': monitor.timeout, 'max_retries': monitor.max_retries, 'http_method': monitor.http_method, 'url_path': monitor.url_path, 'expected_codes': '|'.join( _expand_expected_codes(monitor.expected_codes)), 'admin_state_up': monitor.admin_state_up, } def _include_member(member): """Helper for verifying member statues :param member: the member object :returns: boolean of status check """ return (member.provisioning_status in MEMBER_STATUSES and member.admin_state_up) def _expand_expected_codes(codes): """Expand the expected code string in set of codes :param codes: string of status codes :returns: list of status codes """ retval = set() for code in codes.replace(',', ' ').split(' '): code = code.strip() if not code: continue elif '-' in code: low, hi = code.split('-')[:2] retval.update( str(i) for i in six.moves.range(int(low), int(hi) + 1)) else: retval.add(code) return retval neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/haproxy/namespace_driver.py0000664000567000056710000003700212701407726033561 0ustar jenkinsjenkins00000000000000# Copyright 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import shutil import socket import netaddr from neutron.agent.linux import ip_lib from neutron.agent.linux import utils from neutron.common import utils as n_utils from neutron.plugins.common import constants from neutron_lib import exceptions from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from neutron_lbaas._i18n import _, _LE, _LW from neutron_lbaas.services.loadbalancer.agent import agent_device_driver from neutron_lbaas.services.loadbalancer import constants as lb_const from neutron_lbaas.services.loadbalancer.drivers.haproxy import cfg as hacfg LOG = logging.getLogger(__name__) NS_PREFIX = 'qlbaas-' DRIVER_NAME = 'haproxy_ns' STATE_PATH_DEFAULT = '$state_path/lbaas' USER_GROUP_DEFAULT = 'nogroup' OPTS = [ cfg.StrOpt( 'loadbalancer_state_path', default=STATE_PATH_DEFAULT, help=_('Location to store config and state files'), deprecated_opts=[cfg.DeprecatedOpt('loadbalancer_state_path', group='DEFAULT')], ), cfg.StrOpt( 'user_group', default=USER_GROUP_DEFAULT, help=_('The user group'), deprecated_opts=[cfg.DeprecatedOpt('user_group', group='DEFAULT')], ), cfg.IntOpt( 'send_gratuitous_arp', default=3, help=_('When delete and re-add the same vip, send this many ' 'gratuitous ARPs to flush the ARP cache in the Router. ' 'Set it below or equal to 0 to disable this feature.'), ) ] cfg.CONF.register_opts(OPTS, 'haproxy') class HaproxyNSDriver(agent_device_driver.AgentDeviceDriver): def __init__(self, conf, plugin_rpc): self.conf = conf self.state_path = conf.haproxy.loadbalancer_state_path try: vif_driver_class = n_utils.load_class_by_alias_or_classname( 'neutron.interface_drivers', conf.interface_driver) except ImportError: with excutils.save_and_reraise_exception(): msg = (_('Error importing interface driver: %s') % conf.interface_driver) LOG.error(msg) self.vif_driver = vif_driver_class(conf) self.plugin_rpc = plugin_rpc self.pool_to_port_id = {} @classmethod def get_name(cls): return DRIVER_NAME def create(self, logical_config): pool_id = logical_config['pool']['id'] namespace = get_ns_name(pool_id) self._plug(namespace, logical_config['vip']['port'], logical_config['vip']['address']) self._spawn(logical_config) def update(self, logical_config): pool_id = logical_config['pool']['id'] pid_path = self._get_state_file_path(pool_id, 'pid') extra_args = ['-sf'] extra_args.extend(p.strip() for p in open(pid_path, 'r')) self._spawn(logical_config, extra_args) def _spawn(self, logical_config, extra_cmd_args=()): pool_id = logical_config['pool']['id'] namespace = get_ns_name(pool_id) conf_path = self._get_state_file_path(pool_id, 'conf') pid_path = self._get_state_file_path(pool_id, 'pid') sock_path = self._get_state_file_path(pool_id, 'sock') user_group = self.conf.haproxy.user_group hacfg.save_config(conf_path, logical_config, sock_path, user_group) cmd = ['haproxy', '-f', conf_path, '-p', pid_path] cmd.extend(extra_cmd_args) ns = ip_lib.IPWrapper(namespace=namespace) ns.netns.execute(cmd) # remember the pool<>port mapping self.pool_to_port_id[pool_id] = logical_config['vip']['port']['id'] @n_utils.synchronized('haproxy-driver') def undeploy_instance(self, pool_id, **kwargs): cleanup_namespace = kwargs.get('cleanup_namespace', False) delete_namespace = kwargs.get('delete_namespace', False) namespace = get_ns_name(pool_id) pid_path = self._get_state_file_path(pool_id, 'pid') # kill the process kill_pids_in_file(pid_path) # unplug the ports if pool_id in self.pool_to_port_id: self._unplug(namespace, self.pool_to_port_id[pool_id]) # delete all devices from namespace; # used when deleting orphans and port_id is not known for pool_id if cleanup_namespace: ns = ip_lib.IPWrapper(namespace=namespace) for device in ns.get_devices(exclude_loopback=True): self.vif_driver.unplug(device.name, namespace=namespace) # remove the configuration directory conf_dir = os.path.dirname(self._get_state_file_path(pool_id, '')) if os.path.isdir(conf_dir): shutil.rmtree(conf_dir) if delete_namespace: ns = ip_lib.IPWrapper(namespace=namespace) ns.garbage_collect_namespace() def exists(self, pool_id): namespace = get_ns_name(pool_id) root_ns = ip_lib.IPWrapper() socket_path = self._get_state_file_path(pool_id, 'sock', False) if root_ns.netns.exists(namespace) and os.path.exists(socket_path): try: s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) s.connect(socket_path) return True except socket.error: pass return False def get_stats(self, pool_id): socket_path = self._get_state_file_path(pool_id, 'sock', False) TYPE_BACKEND_REQUEST = 2 TYPE_SERVER_REQUEST = 4 if os.path.exists(socket_path): parsed_stats = self._get_stats_from_socket( socket_path, entity_type=TYPE_BACKEND_REQUEST | TYPE_SERVER_REQUEST) pool_stats = self._get_backend_stats(parsed_stats) pool_stats['members'] = self._get_servers_stats(parsed_stats) return pool_stats else: LOG.warning(_LW('Stats socket not found for pool %s'), pool_id) return {} def _get_backend_stats(self, parsed_stats): TYPE_BACKEND_RESPONSE = '1' for stats in parsed_stats: if stats.get('type') == TYPE_BACKEND_RESPONSE: unified_stats = dict((k, stats.get(v, '')) for k, v in hacfg.STATS_MAP.items()) return unified_stats return {} def _get_servers_stats(self, parsed_stats): TYPE_SERVER_RESPONSE = '2' res = {} for stats in parsed_stats: if stats.get('type') == TYPE_SERVER_RESPONSE: res[stats['svname']] = { lb_const.STATS_STATUS: (constants.INACTIVE if stats['status'] == 'DOWN' else constants.ACTIVE), lb_const.STATS_HEALTH: stats['check_status'], lb_const.STATS_FAILED_CHECKS: stats['chkfail'] } return res def _get_stats_from_socket(self, socket_path, entity_type): try: s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) s.connect(socket_path) s.send('show stat -1 %s -1\n' % entity_type) raw_stats = '' chunk_size = 1024 while True: chunk = s.recv(chunk_size) raw_stats += chunk if len(chunk) < chunk_size: break return self._parse_stats(raw_stats) except socket.error as e: LOG.warning(_LW('Error while connecting to stats socket: %s'), e) return {} def _parse_stats(self, raw_stats): stat_lines = raw_stats.splitlines() if len(stat_lines) < 2: return [] stat_names = [name.strip('# ') for name in stat_lines[0].split(',')] res_stats = [] for raw_values in stat_lines[1:]: if not raw_values: continue stat_values = [value.strip() for value in raw_values.split(',')] res_stats.append(dict(zip(stat_names, stat_values))) return res_stats def _get_state_file_path(self, pool_id, kind, ensure_state_dir=True): """Returns the file name for a given kind of config file.""" confs_dir = os.path.abspath(os.path.normpath(self.state_path)) conf_dir = os.path.join(confs_dir, pool_id) if ensure_state_dir: if not os.path.isdir(conf_dir): os.makedirs(conf_dir, 0o755) return os.path.join(conf_dir, kind) def _plug(self, namespace, port, vip_address, reuse_existing=True): self.plugin_rpc.plug_vip_port(port['id']) interface_name = self.vif_driver.get_device_name(Wrap(port)) if ip_lib.device_exists(interface_name, namespace=namespace): if not reuse_existing: raise exceptions.PreexistingDeviceFailure( dev_name=interface_name ) else: self.vif_driver.plug( port['network_id'], port['id'], interface_name, port['mac_address'], namespace=namespace ) cidrs = [ '%s/%s' % (ip['ip_address'], netaddr.IPNetwork(ip['subnet']['cidr']).prefixlen) for ip in port['fixed_ips'] ] self.vif_driver.init_l3(interface_name, cidrs, namespace=namespace) # Haproxy socket binding to IPv6 VIP address will fail if this address # is not yet ready(i.e tentative address). if netaddr.IPAddress(vip_address).version == 6: device = ip_lib.IPDevice(interface_name, namespace=namespace) device.addr.wait_until_address_ready(vip_address) gw_ip = port['fixed_ips'][0]['subnet'].get('gateway_ip') if not gw_ip: host_routes = port['fixed_ips'][0]['subnet'].get('host_routes', []) for host_route in host_routes: if host_route['destination'] == "0.0.0.0/0": gw_ip = host_route['nexthop'] break if gw_ip: cmd = ['route', 'add', 'default', 'gw', gw_ip] ip_wrapper = ip_lib.IPWrapper(namespace=namespace) ip_wrapper.netns.execute(cmd, check_exit_code=False) # When delete and re-add the same vip, we need to # send gratuitous ARP to flush the ARP cache in the Router. gratuitous_arp = self.conf.haproxy.send_gratuitous_arp if gratuitous_arp > 0: for ip in port['fixed_ips']: cmd_arping = ['arping', '-U', '-I', interface_name, '-c', gratuitous_arp, ip['ip_address']] ip_wrapper.netns.execute(cmd_arping, check_exit_code=False) def _unplug(self, namespace, port_id): port_stub = {'id': port_id} self.plugin_rpc.unplug_vip_port(port_id) interface_name = self.vif_driver.get_device_name(Wrap(port_stub)) self.vif_driver.unplug(interface_name, namespace=namespace) def _is_active(self, logical_config): # haproxy wil be unable to start without any active vip if ('vip' not in logical_config or (logical_config['vip']['status'] not in constants.ACTIVE_PENDING_STATUSES) or not logical_config['vip']['admin_state_up']): return False # not checking pool's admin_state_up to utilize haproxy ability to # turn backend off instead of doing undeploy. # in this case "ERROR 503: Service Unavailable" will be returned if (logical_config['pool']['status'] not in constants.ACTIVE_PENDING_STATUSES): return False return True @n_utils.synchronized('haproxy-driver') def deploy_instance(self, logical_config): """Deploys loadbalancer if necessary :returns: True if loadbalancer was deployed, False otherwise """ # do actual deploy only if vip and pool are configured and active if not logical_config or not self._is_active(logical_config): return False if self.exists(logical_config['pool']['id']): self.update(logical_config) else: self.create(logical_config) return True def _refresh_device(self, pool_id): logical_config = self.plugin_rpc.get_logical_device(pool_id) # cleanup if the loadbalancer wasn't deployed (in case nothing to # deploy or any errors) if not self.deploy_instance(logical_config) and self.exists(pool_id): self.undeploy_instance(pool_id) def create_vip(self, vip): self._refresh_device(vip['pool_id']) def update_vip(self, old_vip, vip): self._refresh_device(vip['pool_id']) def delete_vip(self, vip): self.undeploy_instance(vip['pool_id']) def create_pool(self, pool): # nothing to do here because a pool needs a vip to be useful pass def update_pool(self, old_pool, pool): self._refresh_device(pool['id']) def delete_pool(self, pool): if self.exists(pool['id']): self.undeploy_instance(pool['id'], delete_namespace=True) def create_member(self, member): self._refresh_device(member['pool_id']) def update_member(self, old_member, member): self._refresh_device(member['pool_id']) def delete_member(self, member): self._refresh_device(member['pool_id']) def create_pool_health_monitor(self, health_monitor, pool_id): self._refresh_device(pool_id) def update_pool_health_monitor(self, old_health_monitor, health_monitor, pool_id): self._refresh_device(pool_id) def delete_pool_health_monitor(self, health_monitor, pool_id): self._refresh_device(pool_id) def remove_orphans(self, known_pool_ids): if not os.path.exists(self.state_path): return orphans = (pool_id for pool_id in os.listdir(self.state_path) if pool_id not in known_pool_ids) for pool_id in orphans: if self.exists(pool_id): self.undeploy_instance(pool_id, cleanup_namespace=True) # NOTE (markmcclain) For compliance with interface.py which expects objects class Wrap(object): """A light attribute wrapper for compatibility with the interface lib.""" def __init__(self, d): self.__dict__.update(d) def __getitem__(self, key): return self.__dict__[key] def get_ns_name(namespace_id): return NS_PREFIX + namespace_id def kill_pids_in_file(pid_path): if os.path.exists(pid_path): with open(pid_path, 'r') as pids: for pid in pids: pid = pid.strip() try: utils.execute(['kill', '-9', pid], run_as_root=True) except RuntimeError: LOG.exception( _LE('Unable to kill haproxy process: %s'), pid ) neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/drivers/haproxy/cfg.py0000664000567000056710000001660012701407726031012 0ustar jenkinsjenkins00000000000000# Copyright 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from neutron.common import utils as n_utils from neutron.plugins.common import constants as qconstants from six import moves from neutron_lbaas.services.loadbalancer import constants PROTOCOL_MAP = { constants.PROTOCOL_TCP: 'tcp', constants.PROTOCOL_HTTP: 'http', constants.PROTOCOL_HTTPS: 'tcp', } BALANCE_MAP = { constants.LB_METHOD_ROUND_ROBIN: 'roundrobin', constants.LB_METHOD_LEAST_CONNECTIONS: 'leastconn', constants.LB_METHOD_SOURCE_IP: 'source' } STATS_MAP = { constants.STATS_ACTIVE_CONNECTIONS: 'scur', constants.STATS_MAX_CONNECTIONS: 'smax', constants.STATS_CURRENT_SESSIONS: 'scur', constants.STATS_MAX_SESSIONS: 'smax', constants.STATS_TOTAL_CONNECTIONS: 'stot', constants.STATS_TOTAL_SESSIONS: 'stot', constants.STATS_IN_BYTES: 'bin', constants.STATS_OUT_BYTES: 'bout', constants.STATS_CONNECTION_ERRORS: 'econ', constants.STATS_RESPONSE_ERRORS: 'eresp' } ACTIVE_PENDING_STATUSES = qconstants.ACTIVE_PENDING_STATUSES INACTIVE = qconstants.INACTIVE def save_config(conf_path, logical_config, socket_path=None, user_group='nogroup'): """Convert a logical configuration to the HAProxy version.""" data = [] data.extend(_build_global(logical_config, socket_path=socket_path, user_group=user_group)) data.extend(_build_defaults(logical_config)) data.extend(_build_frontend(logical_config)) data.extend(_build_backend(logical_config)) n_utils.replace_file(conf_path, '\n'.join(data)) def _build_global(config, socket_path=None, user_group='nogroup'): opts = [ 'daemon', 'user nobody', 'group %s' % user_group, 'log /dev/log local0', 'log /dev/log local1 notice' ] if socket_path: opts.append('stats socket %s mode 0666 level user' % socket_path) return itertools.chain(['global'], ('\t' + o for o in opts)) def _build_defaults(config): opts = [ 'log global', 'retries 3', 'option redispatch', 'timeout connect 5000', 'timeout client 50000', 'timeout server 50000', ] return itertools.chain(['defaults'], ('\t' + o for o in opts)) def _build_frontend(config): protocol = config['vip']['protocol'] opts = [ 'option tcplog', 'bind %s:%d' % ( _get_first_ip_from_port(config['vip']['port']), config['vip']['protocol_port'] ), 'mode %s' % PROTOCOL_MAP[protocol], 'default_backend %s' % config['pool']['id'], ] if config['vip']['connection_limit'] >= 0: opts.append('maxconn %s' % config['vip']['connection_limit']) if protocol == constants.PROTOCOL_HTTP: opts.append('option forwardfor') if not config['vip']['admin_state_up']: opts.append('disabled') return itertools.chain( ['frontend %s' % config['vip']['id']], ('\t' + o for o in opts) ) def _build_backend(config): protocol = config['pool']['protocol'] lb_method = config['pool']['lb_method'] opts = [ 'mode %s' % PROTOCOL_MAP[protocol], 'balance %s' % BALANCE_MAP.get(lb_method, 'roundrobin') ] if protocol == constants.PROTOCOL_HTTP: opts.append('option forwardfor') # add the first health_monitor (if available) server_addon, health_opts = _get_server_health_option(config) opts.extend(health_opts) # add session persistence (if available) persist_opts = _get_session_persistence(config) opts.extend(persist_opts) # add the members for member in config['members']: if ((member['status'] in ACTIVE_PENDING_STATUSES or member['status'] == INACTIVE) and member['admin_state_up']): server = (('server %(id)s %(address)s:%(protocol_port)s ' 'weight %(weight)s') % member) + server_addon if _has_http_cookie_persistence(config): server += ' cookie %s' % member['id'] opts.append(server) if not config['pool']['admin_state_up']: opts.append('disabled') return itertools.chain( ['backend %s' % config['pool']['id']], ('\t' + o for o in opts) ) def _get_first_ip_from_port(port): for fixed_ip in port['fixed_ips']: return fixed_ip['ip_address'] def _get_server_health_option(config): """return the first active health option.""" for m in config['healthmonitors']: # not checking the status of healthmonitor for two reasons: # 1) status field is absent in HealthMonitor model # 2) only active HealthMonitors are fetched with # LoadBalancerCallbacks.get_logical_device if m['admin_state_up']: monitor = m break else: return '', [] server_addon = ' check inter %(delay)ds fall %(max_retries)d' % monitor opts = [ 'timeout check %ds' % monitor['timeout'] ] if monitor['type'] in (constants.HEALTH_MONITOR_HTTP, constants.HEALTH_MONITOR_HTTPS): opts.append('option httpchk %(http_method)s %(url_path)s' % monitor) opts.append( 'http-check expect rstatus %s' % '|'.join(_expand_expected_codes(monitor['expected_codes'])) ) if monitor['type'] == constants.HEALTH_MONITOR_HTTPS: opts.append('option ssl-hello-chk') return server_addon, opts def _get_session_persistence(config): persistence = config['vip'].get('session_persistence') if not persistence: return [] opts = [] if persistence['type'] == constants.SESSION_PERSISTENCE_SOURCE_IP: opts.append('stick-table type ip size 10k') opts.append('stick on src') elif (persistence['type'] == constants.SESSION_PERSISTENCE_HTTP_COOKIE and config.get('members')): opts.append('cookie SRV insert indirect nocache') elif (persistence['type'] == constants.SESSION_PERSISTENCE_APP_COOKIE and persistence.get('cookie_name')): opts.append('appsession %s len 56 timeout 3h' % persistence['cookie_name']) return opts def _has_http_cookie_persistence(config): return (config['vip'].get('session_persistence') and config['vip']['session_persistence']['type'] == constants.SESSION_PERSISTENCE_HTTP_COOKIE) def _expand_expected_codes(codes): """Expand the expected code string in set of codes. 200-204 -> 200, 201, 202, 204 200, 203 -> 200, 203 """ retval = set() for code in codes.replace(',', ' ').split(' '): code = code.strip() if not code: continue elif '-' in code: low, hi = code.split('-')[:2] retval.update(str(i) for i in moves.range(int(low), int(hi) + 1)) else: retval.add(code) return retval neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/agent/0000775000567000056710000000000012701410110025601 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/agent/agent_device_driver.py0000664000567000056710000000477112701407726032177 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six @six.add_metaclass(abc.ABCMeta) class AgentDeviceDriver(object): """Abstract device driver that defines the API required by LBaaS agent.""" @abc.abstractmethod def get_name(self): """Returns unique name across all LBaaS device drivers.""" pass @abc.abstractmethod def deploy_instance(self, logical_config): """Fully deploys a loadbalancer instance from a given config.""" pass @abc.abstractmethod def undeploy_instance(self, pool_id, **kwargs): """Fully undeploys the loadbalancer instance.""" pass @abc.abstractmethod def get_stats(self, pool_id): pass def remove_orphans(self, known_pool_ids): # Not all drivers will support this raise NotImplementedError() @abc.abstractmethod def create_vip(self, vip): pass @abc.abstractmethod def update_vip(self, old_vip, vip): pass @abc.abstractmethod def delete_vip(self, vip): pass @abc.abstractmethod def create_pool(self, pool): pass @abc.abstractmethod def update_pool(self, old_pool, pool): pass @abc.abstractmethod def delete_pool(self, pool): pass @abc.abstractmethod def create_member(self, member): pass @abc.abstractmethod def update_member(self, old_member, member): pass @abc.abstractmethod def delete_member(self, member): pass @abc.abstractmethod def create_pool_health_monitor(self, health_monitor, pool_id): pass @abc.abstractmethod def update_pool_health_monitor(self, old_health_monitor, health_monitor, pool_id): pass @abc.abstractmethod def delete_pool_health_monitor(self, health_monitor, pool_id): pass neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/agent/agent_api.py0000664000567000056710000000515112701407726030127 0ustar jenkinsjenkins00000000000000# Copyright 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import rpc as n_rpc import oslo_messaging class LbaasAgentApi(object): """Agent side of the Agent to Plugin RPC API.""" # history # 1.0 Initial version # 2.0 Generic API for agent based drivers # - get_logical_device() handling changed on plugin side; # - pool_deployed() and update_status() methods added; def __init__(self, topic, context, host): self.context = context self.host = host target = oslo_messaging.Target(topic=topic, version='2.0') self.client = n_rpc.get_client(target) def get_ready_devices(self): cctxt = self.client.prepare() return cctxt.call(self.context, 'get_ready_devices', host=self.host) def pool_destroyed(self, pool_id): cctxt = self.client.prepare() return cctxt.call(self.context, 'pool_destroyed', pool_id=pool_id) def pool_deployed(self, pool_id): cctxt = self.client.prepare() return cctxt.call(self.context, 'pool_deployed', pool_id=pool_id) def get_logical_device(self, pool_id): cctxt = self.client.prepare() return cctxt.call(self.context, 'get_logical_device', pool_id=pool_id) def update_status(self, obj_type, obj_id, status): cctxt = self.client.prepare() return cctxt.call(self.context, 'update_status', obj_type=obj_type, obj_id=obj_id, status=status) def plug_vip_port(self, port_id): cctxt = self.client.prepare() return cctxt.call(self.context, 'plug_vip_port', port_id=port_id, host=self.host) def unplug_vip_port(self, port_id): cctxt = self.client.prepare() return cctxt.call(self.context, 'unplug_vip_port', port_id=port_id, host=self.host) def update_pool_stats(self, pool_id, stats): cctxt = self.client.prepare() return cctxt.call(self.context, 'update_pool_stats', pool_id=pool_id, stats=stats, host=self.host) neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/agent/__init__.py0000664000567000056710000000000012701407726027723 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/agent/agent_manager.py0000664000567000056710000003321512701407726030772 0ustar jenkinsjenkins00000000000000# Copyright 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent import rpc as agent_rpc from neutron import context as ncontext from neutron.plugins.common import constants as np_const from neutron.services import provider_configuration as provconfig from neutron_lib import constants as n_const from neutron_lib import exceptions as n_exc from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_service import loopingcall from oslo_service import periodic_task from oslo_utils import importutils from neutron_lbaas._i18n import _, _LE, _LI from neutron_lbaas.services.loadbalancer.agent import agent_api from neutron_lbaas.services.loadbalancer import constants as l_const LOG = logging.getLogger(__name__) DEVICE_DRIVERS = 'device_drivers' OPTS = [ cfg.MultiStrOpt( 'device_driver', default=['neutron_lbaas.services.loadbalancer.drivers' '.haproxy.namespace_driver.HaproxyNSDriver'], help=_('Drivers used to manage loadbalancing devices'), ), ] class DeviceNotFoundOnAgent(n_exc.NotFound): message = _('Unknown device with pool_id %(pool_id)s') class LbaasAgentManager(periodic_task.PeriodicTasks): # history # 1.0 Initial version # 1.1 Support agent_updated call # 2.0 Generic API for agent based drivers # - modify/reload/destroy_pool methods were removed; # - added methods to handle create/update/delete for every lbaas # object individually; target = oslo_messaging.Target(version='2.0') def __init__(self, conf): super(LbaasAgentManager, self).__init__(conf) self.conf = conf self.context = ncontext.get_admin_context_without_session() self.plugin_rpc = agent_api.LbaasAgentApi( l_const.LOADBALANCER_PLUGIN, self.context, self.conf.host ) self._load_drivers() self.agent_state = { 'binary': 'neutron-lbaas-agent', 'host': conf.host, 'topic': l_const.LOADBALANCER_AGENT, 'configurations': {'device_drivers': self.device_drivers.keys()}, 'agent_type': n_const.AGENT_TYPE_LOADBALANCER, 'start_flag': True} self.admin_state_up = True self._setup_state_rpc() self.needs_resync = False # pool_id->device_driver_name mapping used to store known instances self.instance_mapping = {} def _load_drivers(self): self.device_drivers = {} for driver in self.conf.device_driver: driver = provconfig.get_provider_driver_class(driver, DEVICE_DRIVERS) try: driver_inst = importutils.import_object( driver, self.conf, self.plugin_rpc ) except ImportError: msg = _('Error importing loadbalancer device driver: %s') raise SystemExit(msg % driver) driver_name = driver_inst.get_name() if driver_name not in self.device_drivers: self.device_drivers[driver_name] = driver_inst else: msg = _('Multiple device drivers with the same name found: %s') raise SystemExit(msg % driver_name) def _setup_state_rpc(self): self.state_rpc = agent_rpc.PluginReportStateAPI( l_const.LOADBALANCER_PLUGIN) report_interval = self.conf.AGENT.report_interval if report_interval: heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) heartbeat.start(interval=report_interval) def _report_state(self): try: instance_count = len(self.instance_mapping) self.agent_state['configurations']['instances'] = instance_count self.state_rpc.report_state(self.context, self.agent_state) self.agent_state.pop('start_flag', None) except Exception: LOG.exception(_LE("Failed reporting state!")) def initialize_service_hook(self, started_by): self.sync_state() @periodic_task.periodic_task def periodic_resync(self, context): if self.needs_resync: self.needs_resync = False self.sync_state() @periodic_task.periodic_task(spacing=6) def collect_stats(self, context): for pool_id, driver_name in self.instance_mapping.items(): driver = self.device_drivers[driver_name] try: stats = driver.get_stats(pool_id) if stats: self.plugin_rpc.update_pool_stats(pool_id, stats) except Exception: LOG.exception(_LE('Error updating statistics on pool %s'), pool_id) self.needs_resync = True def sync_state(self): known_instances = set(self.instance_mapping.keys()) try: ready_instances = set(self.plugin_rpc.get_ready_devices()) for deleted_id in known_instances - ready_instances: self._destroy_pool(deleted_id) for pool_id in ready_instances: self._reload_pool(pool_id) except Exception: LOG.exception(_LE('Unable to retrieve ready devices')) self.needs_resync = True self.remove_orphans() def _get_driver(self, pool_id): if pool_id not in self.instance_mapping: raise DeviceNotFoundOnAgent(pool_id=pool_id) driver_name = self.instance_mapping[pool_id] return self.device_drivers[driver_name] def _reload_pool(self, pool_id): try: logical_config = self.plugin_rpc.get_logical_device(pool_id) driver_name = logical_config['driver'] if driver_name not in self.device_drivers: LOG.error(_LE('No device driver on agent: %s.'), driver_name) self.plugin_rpc.update_status( 'pool', pool_id, np_const.ERROR) return self.device_drivers[driver_name].deploy_instance(logical_config) self.instance_mapping[pool_id] = driver_name self.plugin_rpc.pool_deployed(pool_id) except Exception: LOG.exception(_LE('Unable to deploy instance for pool: %s'), pool_id) self.needs_resync = True def _destroy_pool(self, pool_id): driver = self._get_driver(pool_id) try: driver.undeploy_instance(pool_id, delete_namespace=True) del self.instance_mapping[pool_id] self.plugin_rpc.pool_destroyed(pool_id) except Exception: LOG.exception(_LE('Unable to destroy device for pool: %s'), pool_id) self.needs_resync = True def remove_orphans(self): for driver_name in self.device_drivers: pool_ids = [pool_id for pool_id in self.instance_mapping if self.instance_mapping[pool_id] == driver_name] try: self.device_drivers[driver_name].remove_orphans(pool_ids) except NotImplementedError: pass # Not all drivers will support this def _handle_failed_driver_call(self, operation, obj_type, obj_id, driver): LOG.exception(_LE('%(operation)s %(obj)s %(id)s failed on device ' 'driver %(driver)s'), {'operation': operation.capitalize(), 'obj': obj_type, 'id': obj_id, 'driver': driver}) self.plugin_rpc.update_status(obj_type, obj_id, np_const.ERROR) def _update_status(self, obj_type, obj_id, admin_state_up): if admin_state_up: self.plugin_rpc.update_status(obj_type, obj_id, np_const.ACTIVE) else: self.plugin_rpc.update_status(obj_type, obj_id, l_const.DISABLED) def create_vip(self, context, vip): driver = self._get_driver(vip['pool_id']) try: driver.create_vip(vip) except Exception: self._handle_failed_driver_call('create', 'vip', vip['id'], driver.get_name()) else: self._update_status('vip', vip['id'], vip['admin_state_up']) def update_vip(self, context, old_vip, vip): driver = self._get_driver(vip['pool_id']) try: driver.update_vip(old_vip, vip) except Exception: self._handle_failed_driver_call('update', 'vip', vip['id'], driver.get_name()) else: self._update_status('vip', vip['id'], vip['admin_state_up']) def delete_vip(self, context, vip): driver = self._get_driver(vip['pool_id']) driver.delete_vip(vip) def create_pool(self, context, pool, driver_name): if driver_name not in self.device_drivers: LOG.error(_LE('No device driver on agent: %s.'), driver_name) self.plugin_rpc.update_status('pool', pool['id'], np_const.ERROR) return driver = self.device_drivers[driver_name] try: driver.create_pool(pool) except Exception: self._handle_failed_driver_call('create', 'pool', pool['id'], driver.get_name()) else: self.instance_mapping[pool['id']] = driver_name self._update_status('pool', pool['id'], pool['admin_state_up']) def update_pool(self, context, old_pool, pool): driver = self._get_driver(pool['id']) try: driver.update_pool(old_pool, pool) except Exception: self._handle_failed_driver_call('update', 'pool', pool['id'], driver.get_name()) else: self._update_status('pool', pool['id'], pool['admin_state_up']) def delete_pool(self, context, pool): driver = self._get_driver(pool['id']) driver.delete_pool(pool) del self.instance_mapping[pool['id']] def create_member(self, context, member): driver = self._get_driver(member['pool_id']) try: driver.create_member(member) except Exception: self._handle_failed_driver_call('create', 'member', member['id'], driver.get_name()) else: self._update_status('member', member['id'], member['admin_state_up']) def update_member(self, context, old_member, member): driver = self._get_driver(member['pool_id']) try: driver.update_member(old_member, member) except Exception: self._handle_failed_driver_call('update', 'member', member['id'], driver.get_name()) else: self._update_status('member', member['id'], member['admin_state_up']) def delete_member(self, context, member): driver = self._get_driver(member['pool_id']) driver.delete_member(member) def create_pool_health_monitor(self, context, health_monitor, pool_id): driver = self._get_driver(pool_id) assoc_id = {'pool_id': pool_id, 'monitor_id': health_monitor['id']} try: driver.create_pool_health_monitor(health_monitor, pool_id) except Exception: self._handle_failed_driver_call( 'create', 'health_monitor', assoc_id, driver.get_name()) else: self._update_status('health_monitor', assoc_id, health_monitor['admin_state_up']) def update_pool_health_monitor(self, context, old_health_monitor, health_monitor, pool_id): driver = self._get_driver(pool_id) assoc_id = {'pool_id': pool_id, 'monitor_id': health_monitor['id']} try: driver.update_pool_health_monitor(old_health_monitor, health_monitor, pool_id) except Exception: self._handle_failed_driver_call( 'update', 'health_monitor', assoc_id, driver.get_name()) else: self._update_status('health_monitor', assoc_id, health_monitor['admin_state_up']) def delete_pool_health_monitor(self, context, health_monitor, pool_id): driver = self._get_driver(pool_id) driver.delete_pool_health_monitor(health_monitor, pool_id) def agent_updated(self, context, payload): """Handle the agent_updated notification event.""" if payload['admin_state_up'] != self.admin_state_up: self.admin_state_up = payload['admin_state_up'] if self.admin_state_up: self.needs_resync = True else: for pool_id in self.instance_mapping.keys(): LOG.info(_LI("Destroying pool %s due to agent disabling"), pool_id) self._destroy_pool(pool_id) LOG.info(_LI("Agent_updated by server side %s!"), payload) neutron-lbaas-8.0.0/neutron_lbaas/services/loadbalancer/agent/agent.py0000664000567000056710000000405512701407727027301 0ustar jenkinsjenkins00000000000000# Copyright 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import eventlet eventlet.monkey_patch() from neutron.agent.common import config from neutron.agent.linux import interface from neutron.common import config as common_config from neutron.common import rpc as n_rpc from oslo_config import cfg from oslo_service import service from neutron_lbaas._i18n import _ from neutron_lbaas.services.loadbalancer.agent import agent_manager as manager from neutron_lbaas.services.loadbalancer import constants as l_const OPTS = [ cfg.IntOpt( 'periodic_interval', default=10, help=_('Seconds between periodic task runs') ) ] class LbaasAgentService(n_rpc.Service): def start(self): super(LbaasAgentService, self).start() self.tg.add_timer( cfg.CONF.periodic_interval, self.manager.run_periodic_tasks, None, None ) def main(): cfg.CONF.register_opts(OPTS) cfg.CONF.register_opts(manager.OPTS) # import interface options just in case the driver uses namespaces cfg.CONF.register_opts(interface.OPTS) config.register_interface_driver_opts_helper(cfg.CONF) config.register_agent_state_opts_helper(cfg.CONF) common_config.init(sys.argv[1:]) config.setup_logging() mgr = manager.LbaasAgentManager(cfg.CONF) svc = LbaasAgentService( host=cfg.CONF.host, topic=l_const.LOADBALANCER_AGENT, manager=mgr ) service.launch(cfg.CONF, svc).wait() neutron-lbaas-8.0.0/neutron_lbaas/drivers/0000775000567000056710000000000012701410110021727 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/drivers/netscaler/0000775000567000056710000000000012701410110023707 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/drivers/netscaler/netscaler_driver_v2.py0000664000567000056710000007370712701407727030265 0ustar jenkinsjenkins00000000000000 # Copyright 2015 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_config import cfg from oslo_log import log as logging from neutron import context as ncontext from neutron.plugins.common import constants from oslo_service import service from neutron_lbaas._i18n import _, _LE from neutron_lbaas.drivers import driver_base from neutron_lbaas.drivers.driver_mixins import BaseManagerMixin from neutron_lbaas.services.loadbalancer.drivers.netscaler import ncc_client DEFAULT_PERIODIC_TASK_INTERVAL = "2" DEFAULT_STATUS_COLLECTION = "True" DEFAULT_PAGE_SIZE = "300" DEFAULT_IS_SYNCRONOUS = "True" PROV = "provisioning_status" NETSCALER = "netscaler" LOG = logging.getLogger(__name__) NETSCALER_CC_OPTS = [ cfg.StrOpt( 'netscaler_ncc_uri', help=_('The URL to reach the NetScaler Control Center Server.'), ), cfg.StrOpt( 'netscaler_ncc_username', help=_('Username to login to the NetScaler Control Center Server.'), ), cfg.StrOpt( 'netscaler_ncc_password', secret=True, help=_('Password to login to the NetScaler Control Center Server.'), ), cfg.StrOpt( 'periodic_task_interval', default=DEFAULT_PERIODIC_TASK_INTERVAL, help=_('Setting for periodic task collection interval from' 'NetScaler Control Center Server..'), ), cfg.StrOpt( 'is_synchronous', default=DEFAULT_IS_SYNCRONOUS, help=_('Setting for option to enable synchronous operations' 'NetScaler Control Center Server.'), ), cfg.StrOpt( 'netscaler_ncc_cleanup_mode', help=_( 'Setting to enable/disable cleanup mode for NetScaler Control ' 'Center Server'), ), cfg.StrOpt( 'netscaler_status_collection', default=DEFAULT_STATUS_COLLECTION + "," + DEFAULT_PAGE_SIZE, help=_('Setting for member status collection from' 'NetScaler Control Center Server.'), ) ] if not hasattr(cfg.CONF, "netscaler_driver"): cfg.CONF.register_opts(NETSCALER_CC_OPTS, 'netscaler_driver') LBS_RESOURCE = 'loadbalancers' LB_RESOURCE = 'loadbalancer' LISTENERS_RESOURCE = 'listeners' LISTENER_RESOURCE = 'listener' POOLS_RESOURCE = 'pools' POOL_RESOURCE = 'pool' MEMBERS_RESOURCE = 'members' MEMBER_RESOURCE = 'member' MONITORS_RESOURCE = 'healthmonitors' MONITOR_RESOURCE = 'healthmonitor' STATS_RESOURCE = 'stats' PROV_SEGMT_ID = 'provider:segmentation_id' PROV_NET_TYPE = 'provider:network_type' DRIVER_NAME = 'netscaler_driver' RESOURCE_PREFIX = 'v2.0/lbaas' STATUS_PREFIX = 'oca/v2' MEMBER_STATUS = 'memberstatus' PAGE = 'page' SIZE = 'size' PROVISIONING_STATUS_TRACKER = [] class NetScalerLoadBalancerDriverV2(driver_base.LoadBalancerBaseDriver): def __init__(self, plugin): super(NetScalerLoadBalancerDriverV2, self).__init__(plugin) self.driver_conf = cfg.CONF.netscaler_driver self.admin_ctx = ncontext.get_admin_context() self._init_client() self._init_managers() self._init_status_collection() def _init_client(self): ncc_uri = self.driver_conf.netscaler_ncc_uri ncc_username = self.driver_conf.netscaler_ncc_username ncc_password = self.driver_conf.netscaler_ncc_password ncc_cleanup_mode = cfg.CONF.netscaler_driver.netscaler_ncc_cleanup_mode self.client = ncc_client.NSClient(ncc_uri, ncc_username, ncc_password, ncc_cleanup_mode) def _init_managers(self): self.load_balancer = NetScalerLoadBalancerManager(self) self.listener = NetScalerListenerManager(self) self.pool = NetScalerPoolManager(self) self.member = NetScalerMemberManager(self) self.health_monitor = NetScalerHealthMonitorManager(self) def _init_status_collection(self): self.status_conf = self.driver_conf.netscaler_status_collection self.periodic_task_interval = self.driver_conf.periodic_task_interval status_conf = self.driver_conf.netscaler_status_collection (is_status_collection, pagesize_status_collection) = status_conf.split(",") self.is_status_collection = True if is_status_collection.lower() == "false": self.is_status_collection = False self.pagesize_status_collection = pagesize_status_collection self._init_pending_status_tracker() NetScalerStatusService(self).start() def _init_pending_status_tracker(self): # Initialize PROVISIONING_STATUS_TRACKER for loadbalancers in # pending state db_lbs = self.plugin.db.get_loadbalancers( self.admin_ctx) for db_lb in db_lbs: if ((db_lb.id not in PROVISIONING_STATUS_TRACKER) and (db_lb.provider.provider_name == NETSCALER) and (db_lb.provisioning_status.startswith("PENDING_"))): PROVISIONING_STATUS_TRACKER.append(db_lb.id) def collect_provision_status(self): msg = ("Collecting status ", self.periodic_task_interval) LOG.debug(msg) self._update_loadbalancers_provision_status() def _update_loadbalancers_provision_status(self): for lb_id in PROVISIONING_STATUS_TRACKER: lb_statuses = self._get_loadbalancer_statuses(lb_id) if lb_statuses: self._update_status_tree_in_db( lb_id, lb_statuses["lb_statuses"]) def _get_loadbalancer_statuses(self, lb_id): """Retrieve listener status from Control Center.""" resource_path = "%s/%s/%s/statuses" % (RESOURCE_PREFIX, LBS_RESOURCE, lb_id) try: statuses = self.client.retrieve_resource( "GLOBAL", resource_path)[1]['dict'] except ncc_client.NCCException as e: if e.is_not_found_exception(): return {"lb_statuses": None} else: return None statuses = statuses["statuses"] return {"lb_statuses": statuses} def _update_status_tree_in_db(self, lb_id, loadbalancer_statuses): track_loadbalancer = {"track": False} db_lb = self.plugin.db.get_loadbalancer(self.admin_ctx, lb_id) if (not loadbalancer_statuses and db_lb.provisioning_status == constants.PENDING_DELETE): try: self.load_balancer.successful_completion( self.admin_ctx, db_lb, delete=True) except Exception: LOG.error(_LE("error with successful completion")) PROVISIONING_STATUS_TRACKER.remove(lb_id) return else: status_lb = loadbalancer_statuses["loadbalancer"] status_listeners = status_lb["listeners"] for db_listener in db_lb.listeners: db_listener.loadbalancer = db_lb status_listener = (self. _update_entity_status_in_db(track_loadbalancer, db_listener, status_listeners, self.listener)) if not status_listener: continue db_pool = db_listener.default_pool if not db_pool: continue db_pool.listener = db_listener status_pools = status_listener['pools'] status_pool = self._update_entity_status_in_db(track_loadbalancer, db_pool, status_pools, self.pool) db_members = db_pool.members if not status_pool: continue status_members = status_pool['members'] for db_member in db_members: db_member.pool = db_pool self._update_entity_status_in_db(track_loadbalancer, db_member, status_members, self.member) db_hm = db_pool.healthmonitor if db_hm: db_hm.pool = db_pool status_hm = status_pool['healthmonitor'] self._update_entity_status_in_db(track_loadbalancer, db_hm, [status_hm], self.health_monitor) if not track_loadbalancer['track']: self._update_entity_status_in_db( track_loadbalancer, db_lb, status_lb, self.load_balancer) if not track_loadbalancer['track']: PROVISIONING_STATUS_TRACKER.remove(lb_id) def _update_entity_status_in_db(self, track_loadbalancer, db_entity, status_entities, entity_manager): if isinstance(status_entities, list): entity_status = self._get_entity_status( db_entity.id, status_entities) else: entity_status = status_entities self._check_and_update_entity_status_in_db( track_loadbalancer, db_entity, entity_status, entity_manager) return entity_status def _get_entity_status(self, entity_id, entities_status): for entity_status in entities_status: if entity_status and entity_status['id'] == entity_id: return entity_status return None def _check_and_update_entity_status_in_db(self, track_loadbalancer, db_entity, entity_status, entity_manager): if not db_entity.provisioning_status.startswith("PENDING_"): # no operation is attempted on this entity return if entity_status: if entity_status[PROV].startswith("PENDING_"): # an entity is not finished provisioning. Continue to track track_loadbalancer['track'] = True return if entity_status[PROV] == constants.ERROR: # Marked for failed completion try: entity_manager.failed_completion( self.admin_ctx, db_entity) except Exception: LOG.error(_LE("error with failed completion")) return if db_entity.provisioning_status == constants.PENDING_DELETE: # entity is under deletion # if entity is missing in lb status tree it should to be # deleted if entity_status: msg = ('Invalid status set for delete of %s in statuses', db_entity.id) LOG.error(msg) return try: entity_manager.successful_completion( self.admin_ctx, db_entity, delete=True) except Exception: LOG.error(_LE("error with successful completion")) return if entity_status[PROV] != constants.ACTIVE: msg = ('Invalid prov status for %s, should be ACTIVE ' "for CREATE and UPDATE", db_entity.id) LOG.error(msg) return try: entity_manager.successful_completion( self.admin_ctx, db_entity) except Exception: LOG.error(_LE("error with successful completion")) return class NetScalerCommonManager(BaseManagerMixin): def __init__(self, driver): super(NetScalerCommonManager, self).__init__(driver) self.payload_preparer = PayloadPreparer() self.client = self.driver.client self.is_synchronous = self.driver.driver_conf.is_synchronous if self.is_synchronous.lower() == "false": self.is_synchronous = False else: self.is_synchronous = True def create(self, context, obj): LOG.debug("%s, create %s", self.__class__.__name__, obj.id) try: self.create_entity(context, obj) if self.is_synchronous: self.successful_completion(context, obj) else: self.track_provision_status(obj) except Exception as e: self.failed_completion(context, obj) raise e def update(self, context, old_obj, obj): LOG.debug("%s, update %s", self.__class__.__name__, old_obj.id) try: self.update_entity(context, old_obj, obj) if self.is_synchronous: self.successful_completion(context, obj) else: self.track_provision_status(obj) except Exception as e: self.failed_completion(context, obj) raise e def delete(self, context, obj): LOG.debug("%s, delete %s", self.__class__.__name__, obj.id) try: self.delete_entity(context, obj) if self.is_synchronous: self.successful_completion(context, obj, delete=True) else: self.track_provision_status(obj) except Exception as e: self.failed_completion(context, obj) raise e def track_provision_status(self, obj): for lb in self._get_loadbalancers(obj): if lb.id not in PROVISIONING_STATUS_TRACKER: PROVISIONING_STATUS_TRACKER.append(lb.id) def _get_loadbalancers(self, obj): lbs = [] lbs.append(obj.root_loadbalancer) return lbs @abc.abstractmethod def create_entity(self, context, obj): pass @abc.abstractmethod def update_entity(self, context, old_obj, obj): pass @abc.abstractmethod def delete_entity(self, context, obj): pass class NetScalerLoadBalancerManager(NetScalerCommonManager, driver_base.BaseLoadBalancerManager): def __init__(self, driver): driver_base.BaseLoadBalancerManager.__init__(self, driver) NetScalerCommonManager.__init__(self, driver) def refresh(self, context, lb_obj): # This is intended to trigger the backend to check and repair # the state of this load balancer and all of its dependent objects LOG.debug("LB refresh %s", lb_obj.id) def stats(self, context, lb_obj): pass def create_entity(self, context, lb_obj): ncc_lb = self.payload_preparer.prepare_lb_for_creation(lb_obj) vip_subnet_id = lb_obj.vip_subnet_id network_info = self.payload_preparer.\ get_network_info(context, self.driver.plugin, vip_subnet_id) ncc_lb = dict(ncc_lb.items() + network_info.items()) msg = _("NetScaler driver lb creation: %s") % repr(ncc_lb) LOG.debug(msg) resource_path = "%s/%s" % (RESOURCE_PREFIX, LBS_RESOURCE) self.client.create_resource(context.tenant_id, resource_path, LB_RESOURCE, ncc_lb) def update_entity(self, context, old_lb_obj, lb_obj): update_lb = self.payload_preparer.prepare_lb_for_update(lb_obj) resource_path = "%s/%s/%s" % (RESOURCE_PREFIX, LBS_RESOURCE, lb_obj.id) msg = (_("NetScaler driver lb_obj %(lb_obj_id)s update: %(lb_obj)s") % {"lb_obj_id": old_lb_obj.id, "lb_obj": repr(lb_obj)}) LOG.debug(msg) self.client.update_resource(context.tenant_id, resource_path, LB_RESOURCE, update_lb) def delete_entity(self, context, lb_obj): """Delete a loadbalancer on a NetScaler device.""" resource_path = "%s/%s/%s" % (RESOURCE_PREFIX, LBS_RESOURCE, lb_obj.id) msg = _("NetScaler driver lb_obj removal: %s") % lb_obj.id LOG.debug(msg) self.client.remove_resource(context.tenant_id, resource_path) class NetScalerListenerManager(NetScalerCommonManager, driver_base.BaseListenerManager): def __init__(self, driver): driver_base.BaseListenerManager.__init__(self, driver) NetScalerCommonManager.__init__(self, driver) def stats(self, context, listener): # returning dummy status now LOG.debug( "Tenant id %s , Listener stats %s", context.tenant_id, listener.id) return { "bytes_in": 0, "bytes_out": 0, "active_connections": 0, "total_connections": 0 } def create_entity(self, context, listener): """Listener is created with loadbalancer """ ncc_listener = self.payload_preparer.prepare_listener_for_creation( listener) msg = _("NetScaler driver listener creation: %s") % repr(ncc_listener) LOG.debug(msg) resource_path = "%s/%s" % (RESOURCE_PREFIX, LISTENERS_RESOURCE) self.client.create_resource(context.tenant_id, resource_path, LISTENER_RESOURCE, ncc_listener) def update_entity(self, context, old_listener, listener): update_listener = self.payload_preparer.prepare_listener_for_update( listener) resource_path = "%s/%s/%s" % (RESOURCE_PREFIX, LISTENERS_RESOURCE, listener.id) msg = (_("NetScaler driver listener %(listener_id)s " "update: %(listener_obj)s") % {"listener_id": old_listener.id, "listener_obj": repr(listener)}) LOG.debug(msg) self.client.update_resource(context.tenant_id, resource_path, LISTENER_RESOURCE, update_listener) def delete_entity(self, context, listener): """Delete a listener on a NetScaler device.""" resource_path = "%s/%s/%s" % (RESOURCE_PREFIX, LISTENERS_RESOURCE, listener.id) msg = _("NetScaler driver listener removal: %s") % listener.id LOG.debug(msg) self.client.remove_resource(context.tenant_id, resource_path) class NetScalerPoolManager(NetScalerCommonManager, driver_base.BasePoolManager): def __init__(self, driver): driver_base.BasePoolManager.__init__(self, driver) NetScalerCommonManager.__init__(self, driver) def create_entity(self, context, pool): ncc_pool = self.payload_preparer.prepare_pool_for_creation( pool) msg = _("NetScaler driver pool creation: %s") % repr(ncc_pool) LOG.debug(msg) resource_path = "%s/%s" % (RESOURCE_PREFIX, POOLS_RESOURCE) self.client.create_resource(context.tenant_id, resource_path, POOL_RESOURCE, ncc_pool) def update_entity(self, context, old_pool, pool): update_pool = self.payload_preparer.prepare_pool_for_update(pool) resource_path = "%s/%s/%s" % (RESOURCE_PREFIX, POOLS_RESOURCE, pool.id) msg = (_("NetScaler driver pool %(pool_id)s update: %(pool_obj)s") % {"pool_id": old_pool.id, "pool_obj": repr(pool)}) LOG.debug(msg) self.client.update_resource(context.tenant_id, resource_path, POOL_RESOURCE, update_pool) def delete_entity(self, context, pool): """Delete a pool on a NetScaler device.""" resource_path = "%s/%s/%s" % (RESOURCE_PREFIX, POOLS_RESOURCE, pool.id) msg = _("NetScaler driver pool removal: %s") % pool.id LOG.debug(msg) self.client.remove_resource(context.tenant_id, resource_path) class NetScalerMemberManager(NetScalerCommonManager, driver_base.BaseMemberManager): def __init__(self, driver): driver_base.BaseMemberManager.__init__(self, driver) NetScalerCommonManager.__init__(self, driver) def create_entity(self, context, member): ncc_member = self.payload_preparer.prepare_member_for_creation(member) subnet_id = member.subnet_id network_info = (self.payload_preparer. get_network_info(context, self.driver.plugin, subnet_id)) ncc_member = dict(ncc_member.items() + network_info.items()) msg = _("NetScaler driver member creation: %s") % repr(ncc_member) LOG.debug(msg) parent_pool_id = member.pool.id resource_path = "%s/%s/%s/%s" % (RESOURCE_PREFIX, POOLS_RESOURCE, parent_pool_id, MEMBERS_RESOURCE) self.client.create_resource(context.tenant_id, resource_path, MEMBER_RESOURCE, ncc_member) def update_entity(self, context, old_member, member): parent_pool_id = member.pool.id update_member = self.payload_preparer.prepare_member_for_update(member) resource_path = "%s/%s/%s/%s/%s" % (RESOURCE_PREFIX, POOLS_RESOURCE, parent_pool_id, MEMBERS_RESOURCE, member.id) msg = (_("NetScaler driver member %(member_id)s " "update: %(member_obj)s") % {"member_id": old_member.id, "member_obj": repr(member)}) LOG.debug(msg) self.client.update_resource(context.tenant_id, resource_path, MEMBER_RESOURCE, update_member) def delete_entity(self, context, member): """Delete a member on a NetScaler device.""" parent_pool_id = member.pool.id resource_path = "%s/%s/%s/%s/%s" % (RESOURCE_PREFIX, POOLS_RESOURCE, parent_pool_id, MEMBERS_RESOURCE, member.id) msg = _("NetScaler driver member removal: %s") % member.id LOG.debug(msg) self.client.remove_resource(context.tenant_id, resource_path) class NetScalerHealthMonitorManager(NetScalerCommonManager, driver_base.BaseHealthMonitorManager): def __init__(self, driver): driver_base.BaseHealthMonitorManager.__init__(self, driver) NetScalerCommonManager.__init__(self, driver) def create_entity(self, context, hm): ncc_hm = self.payload_preparer.prepare_healthmonitor_for_creation(hm) msg = _("NetScaler driver healthmonitor creation: %s") % repr(ncc_hm) LOG.debug(msg) resource_path = "%s/%s" % (RESOURCE_PREFIX, MONITORS_RESOURCE) self.client.create_resource(context.tenant_id, resource_path, MONITOR_RESOURCE, ncc_hm) def update_entity(self, context, old_healthmonitor, hm): update_hm = self.payload_preparer.prepare_healthmonitor_for_update(hm) resource_path = "%s/%s/%s" % (RESOURCE_PREFIX, MONITORS_RESOURCE, hm.id) msg = (_("NetScaler driver healthmonitor %(healthmonitor_id)s " "update: %(healthmonitor_obj)s") % {"healthmonitor_id": old_healthmonitor.id, "healthmonitor_obj": repr(hm)}) LOG.debug(msg) self.client.update_resource(context.tenant_id, resource_path, MONITOR_RESOURCE, update_hm) def delete_entity(self, context, hm): """Delete a healthmonitor on a NetScaler device.""" resource_path = "%s/%s/%s" % (RESOURCE_PREFIX, MONITORS_RESOURCE, hm.id) msg = _("NetScaler driver healthmonitor removal: %s") % hm.id LOG.debug(msg) self.client.remove_resource(context.tenant_id, resource_path) class PayloadPreparer(object): def prepare_lb_for_creation(self, lb): creation_attrs = { 'id': lb.id, 'tenant_id': lb.tenant_id, 'vip_address': lb.vip_address, 'vip_subnet_id': lb.vip_subnet_id, } update_attrs = self.prepare_lb_for_update(lb) creation_attrs.update(update_attrs) return creation_attrs def prepare_lb_for_update(self, lb): return { 'name': lb.name, 'description': lb.description, 'admin_state_up': lb.admin_state_up, } def prepare_listener_for_creation(self, listener): creation_attrs = { 'id': listener.id, 'tenant_id': listener.tenant_id, 'protocol': listener.protocol, 'protocol_port': listener.protocol_port, 'loadbalancer_id': listener.loadbalancer_id } update_attrs = self.prepare_listener_for_update(listener) creation_attrs.update(update_attrs) return creation_attrs def prepare_listener_for_update(self, listener): sni_container_ids = self.prepare_sni_container_ids(listener) listener_dict = { 'name': listener.name, 'description': listener.description, 'sni_container_ids': sni_container_ids, 'default_tls_container_id': listener.default_tls_container_id, 'connection_limit': listener.connection_limit, 'admin_state_up': listener.admin_state_up } return listener_dict def prepare_pool_for_creation(self, pool): create_attrs = { 'id': pool.id, 'tenant_id': pool.tenant_id, 'listener_id': pool.listener.id, 'protocol': pool.protocol, } update_attrs = self.prepare_pool_for_update(pool) create_attrs.update(update_attrs) return create_attrs def prepare_pool_for_update(self, pool): update_attrs = { 'name': pool.name, 'description': pool.description, 'lb_algorithm': pool.lb_algorithm, 'admin_state_up': pool.admin_state_up } if pool.session_persistence: peristence = pool.session_persistence peristence_payload = self.prepare_sessionpersistence(peristence) update_attrs['session_persistence'] = peristence_payload return update_attrs def prepare_sessionpersistence(self, persistence): return { 'type': persistence.type, 'cookie_name': persistence.cookie_name } def prepare_members_for_pool(self, members): members_attrs = [] for member in members: member_attrs = self.prepare_member_for_creation(member) members_attrs.append(member_attrs) return members_attrs def prepare_member_for_creation(self, member): creation_attrs = { 'id': member.id, 'tenant_id': member.tenant_id, 'address': member.address, 'protocol_port': member.protocol_port, 'subnet_id': member.subnet_id } update_attrs = self.prepare_member_for_update(member) creation_attrs.update(update_attrs) return creation_attrs def prepare_member_for_update(self, member): return { 'weight': member.weight, 'admin_state_up': member.admin_state_up, } def prepare_healthmonitor_for_creation(self, health_monitor): creation_attrs = { 'id': health_monitor.id, 'tenant_id': health_monitor.tenant_id, 'pool_id': health_monitor.pool.id, 'type': health_monitor.type, } update_attrs = self.prepare_healthmonitor_for_update(health_monitor) creation_attrs.update(update_attrs) return creation_attrs def prepare_healthmonitor_for_update(self, health_monitor): ncc_hm = { 'delay': health_monitor.delay, 'timeout': health_monitor.timeout, 'max_retries': health_monitor.max_retries, 'admin_state_up': health_monitor.admin_state_up, } if health_monitor.type in ['HTTP', 'HTTPS']: ncc_hm['http_method'] = health_monitor.http_method ncc_hm['url_path'] = health_monitor.url_path ncc_hm['expected_codes'] = health_monitor.expected_codes return ncc_hm def get_network_info(self, context, plugin, subnet_id): network_info = {} subnet = plugin.db._core_plugin.get_subnet(context, subnet_id) network_id = subnet['network_id'] network = plugin.db._core_plugin.get_network(context, network_id) network_info['network_id'] = network_id network_info['subnet_id'] = subnet_id if PROV_NET_TYPE in network: network_info['network_type'] = network[PROV_NET_TYPE] if PROV_SEGMT_ID in network: network_info['segmentation_id'] = network[PROV_SEGMT_ID] return network_info def prepare_sni_container_ids(self, listener): sni_container_ids = [] for sni_container in listener.sni_containers: sni_container_ids.append(sni_container.tls_container_id) return sni_container_ids class NetScalerStatusService(service.Service): def __init__(self, driver): super(NetScalerStatusService, self).__init__() self.driver = driver def start(self): super(NetScalerStatusService, self).start() self.tg.add_timer( int(self.driver.periodic_task_interval), self.driver.collect_provision_status, None ) neutron-lbaas-8.0.0/neutron_lbaas/drivers/netscaler/__init__.py0000664000567000056710000000000012701407726026031 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/drivers/__init__.py0000664000567000056710000000000012701407726024051 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/drivers/driver_mixins.py0000664000567000056710000002010312701407726025202 0ustar jenkinsjenkins00000000000000# Copyright 2014 A10 Networks # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from neutron.plugins.common import constants from oslo_log import log as logging import six from neutron_lbaas.db.loadbalancer import models from neutron_lbaas.services.loadbalancer import constants as lb_const from neutron_lbaas.services.loadbalancer import data_models LOG = logging.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class BaseManagerMixin(object): def __init__(self, driver): self.driver = driver @abc.abstractproperty def db_delete_method(self): pass @abc.abstractmethod def create(self, context, obj): pass @abc.abstractmethod def update(self, context, obj_old, obj): pass @abc.abstractmethod def delete(self, context, obj): pass def successful_completion(self, context, obj, delete=False, lb_create=False): """ Sets the provisioning_status of the load balancer and obj to ACTIVE. Should be called last in the implementor's BaseManagerMixin methods for successful runs. :param context: neutron context :param obj: instance of a neutron_lbaas.services.loadbalancer.data_model :param delete: set True if being called from a delete method. Will most likely result in the obj being deleted from the db. :param lb_create: set True if this is being called after a successful load balancer create. """ LOG.debug("Starting successful_completion method after a successful " "driver action.") obj_sa_cls = data_models.DATA_MODEL_TO_SA_MODEL_MAP[obj.__class__] if delete: # Check if driver is responsible for vip allocation. If the driver # is responsible, then it is also responsible for cleaning it up. # At this point, the VIP should already be cleaned up, so we are # just doing neutron lbaas db cleanup. if (obj == obj.root_loadbalancer and self.driver.load_balancer.allocates_vip): # NOTE(blogan): this is quite dumb to do but it is necessary # so that a false negative pep8 error does not get thrown. An # "unexpected-keyword-argument" pep8 error occurs bc # self.db_delete_method is a @property method that returns a # method. kwargs = {'delete_vip_port': False} self.db_delete_method(context, obj.id, **kwargs) else: self.db_delete_method(context, obj.id) if obj == obj.root_loadbalancer and delete: # Load balancer was deleted and no longer exists return lb_op_status = None lb_p_status = constants.ACTIVE if obj == obj.root_loadbalancer: # only set the status to online if this an operation on the # load balancer lb_op_status = lb_const.ONLINE # Update the load balancer's vip address and vip port id if the driver # was responsible for allocating the vip. if (self.driver.load_balancer.allocates_vip and lb_create and isinstance(obj, data_models.LoadBalancer)): self.driver.plugin.db.update_loadbalancer( context, obj.id, {'vip_address': obj.vip_address, 'vip_port_id': obj.vip_port_id}) if delete: # We cannot update the status of obj if it was deleted but if the # obj is not a load balancer, the root load balancer should be # updated if not isinstance(obj, data_models.LoadBalancer): self.driver.plugin.db.update_status( context, models.LoadBalancer, obj.root_loadbalancer.id, provisioning_status=lb_p_status, operating_status=lb_op_status) return obj_op_status = lb_const.ONLINE if isinstance(obj, data_models.HealthMonitor): # Health Monitor does not have an operating status obj_op_status = None LOG.debug("Updating object of type {0} with id of {1} to " "provisioning_status = {2}, operating_status = {3}".format( obj.__class__, obj.id, constants.ACTIVE, obj_op_status)) self.driver.plugin.db.update_status( context, obj_sa_cls, obj.id, provisioning_status=constants.ACTIVE, operating_status=obj_op_status) if not isinstance(obj, data_models.LoadBalancer): # Only update the status of the root_loadbalancer if the previous # update was not the root load balancer so we are not updating # it twice. self.driver.plugin.db.update_status( context, models.LoadBalancer, obj.root_loadbalancer.id, provisioning_status=lb_p_status, operating_status=lb_op_status) def failed_completion(self, context, obj): """ Sets the provisioning status of the obj to ERROR. If obj is a loadbalancer it will be set to ERROR, otherwise set to ACTIVE. Should be called whenever something goes wrong (raised exception) in an implementor's BaseManagerMixin methods. :param context: neutron context :param obj: instance of a neutron_lbaas.services.loadbalancer.data_model """ LOG.debug("Starting failed_completion method after a failed driver " "action.") if isinstance(obj, data_models.LoadBalancer): LOG.debug("Updating load balancer {0} to provisioning_status = " "{1}, operating_status = {2}.".format( obj.root_loadbalancer.id, constants.ERROR, lb_const.OFFLINE)) self.driver.plugin.db.update_status( context, models.LoadBalancer, obj.root_loadbalancer.id, provisioning_status=constants.ERROR, operating_status=lb_const.OFFLINE) return obj_sa_cls = data_models.DATA_MODEL_TO_SA_MODEL_MAP[obj.__class__] LOG.debug("Updating object of type {0} with id of {1} to " "provisioning_status = {2}, operating_status = {3}".format( obj.__class__, obj.id, constants.ERROR, lb_const.OFFLINE)) self.driver.plugin.db.update_status( context, obj_sa_cls, obj.id, provisioning_status=constants.ERROR, operating_status=lb_const.OFFLINE) LOG.debug("Updating load balancer {0} to " "provisioning_status = {1}".format(obj.root_loadbalancer.id, constants.ACTIVE)) self.driver.plugin.db.update_status( context, models.LoadBalancer, obj.root_loadbalancer.id, provisioning_status=constants.ACTIVE) def update_vip(self, context, loadbalancer_id, vip_address, vip_port_id=None): lb_update = {'vip_address': vip_address} if vip_port_id: lb_update['vip_port_id'] = vip_port_id self.driver.plugin.db.update_loadbalancer(context, loadbalancer_id, lb_update) @six.add_metaclass(abc.ABCMeta) class BaseRefreshMixin(object): @abc.abstractmethod def refresh(self, context, obj): pass @six.add_metaclass(abc.ABCMeta) class BaseStatsMixin(object): @abc.abstractmethod def stats(self, context, obj): pass neutron-lbaas-8.0.0/neutron_lbaas/drivers/kemptechnologies/0000775000567000056710000000000012701410110025267 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/drivers/kemptechnologies/__init__.py0000664000567000056710000000000012701407726027411 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/drivers/kemptechnologies/config.py0000664000567000056710000000306512701407726027135 0ustar jenkinsjenkins00000000000000# Copyright 2015, Shane McGough, KEMPtechnologies # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron_lbaas._i18n import _ KEMP_OPTS = [ cfg.StrOpt('lm_address', default='192.168.0.1', help=_('Management address of the LoadMaster appliance.')), cfg.StrOpt('lm_username', default='bal', help=_('The management user. Default is bal.')), cfg.StrOpt('lm_password', default='1fourall', secret=True, help=_('Password for management user. Default is 1fourall.')), cfg.IntOpt('check_interval', default=9, help=_('The interval between real server health checks.')), cfg.IntOpt('connect_timeout', default=4, help=_('The time to wait for a real server to respond to a ' 'health check request.')), cfg.IntOpt('retry_count', default=2, help=_('If a real server fails to respond to a health check ' 'request. The LoadMaster will retry the specified ' 'number of times.')), ] neutron-lbaas-8.0.0/neutron_lbaas/drivers/kemptechnologies/driver_v2.py0000664000567000056710000000710412701407726027570 0ustar jenkinsjenkins00000000000000# Copyright 2015, Shane McGough, KEMPtechnologies # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from kemptech_openstack_lbaas import driver as kemptech from oslo_config import cfg from neutron_lbaas.drivers import driver_base from neutron_lbaas.drivers.kemptechnologies import config cfg.CONF.register_opts(config.KEMP_OPTS, 'kemptechnologies') CONF = cfg.CONF.kemptechnologies class LoadBalancerManager(driver_base.BaseLoadBalancerManager): def create(self, context, lb): self.driver.kemptech.load_balancer.create(context, lb) def update(self, context, old_lb, lb): self.driver.kemptech.load_balancer.update(context, old_lb, lb) def delete(self, context, lb): self.driver.kemptech.load_balancer.delete(context, lb) def refresh(self, context, lb): self.driver.kemptech.load_balancer.refresh(context, lb) def stats(self, context, lb): return self.driver.kemptech.load_balancer.stats(context, lb) class ListenerManager(driver_base.BaseListenerManager): def create(self, context, listener): self.driver.kemptech.listener.create(context, listener) def update(self, context, old_listener, listener): self.driver.kemptech.listener.update(context, old_listener, listener) def delete(self, context, listener): self.driver.kemptech.listener.delete(context, listener) class PoolManager(driver_base.BasePoolManager): def create(self, context, pool): self.driver.kemptech.pool.create(context, pool) def update(self, context, old_pool, pool): self.driver.kemptech.pool.update(context, old_pool, pool) def delete(self, context, pool): self.driver.kemptech.pool.delete(context, pool) class MemberManager(driver_base.BaseMemberManager): def create(self, context, member): self.driver.kemptech.member.create(context, member) def update(self, context, old_member, member): self.driver.kemptech.member.update(context, old_member, member) def delete(self, context, member): self.driver.kemptech.member.delete(context, member) class HealthMonitorManager(driver_base.BaseHealthMonitorManager): def create(self, context, health_monitor): self.driver.kemptech.health_monitor.create(context, health_monitor) def update(self, context, old_health_monitor, health_monitor): self.driver.kemptech.health_monitor.update(context, old_health_monitor, health_monitor) def delete(self, context, health_monitor): self.driver.kemptech.health_monitor.delete(context, health_monitor) class KempLoadMasterDriver(driver_base.LoadBalancerBaseDriver): def __init__(self, plugin): super(KempLoadMasterDriver, self).__init__(plugin) self.load_balancer = LoadBalancerManager(self) self.listener = ListenerManager(self) self.pool = PoolManager(self) self.member = MemberManager(self) self.health_monitor = HealthMonitorManager(self) self.kemptech = kemptech.KempLoadMasterDriver(self, CONF) neutron-lbaas-8.0.0/neutron_lbaas/drivers/logging_noop/0000775000567000056710000000000012701410110024410 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/drivers/logging_noop/__init__.py0000664000567000056710000000000012701407726026532 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/drivers/logging_noop/driver.py0000664000567000056710000000751112701407726026304 0ustar jenkinsjenkins00000000000000# Copyright 2014, Doug Wiegley (dougwig), A10 Networks # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron_lbaas.drivers import driver_base LOG = logging.getLogger(__name__) class LoggingNoopLoadBalancerDriver(driver_base.LoadBalancerBaseDriver): def __init__(self, plugin): super(LoggingNoopLoadBalancerDriver, self).__init__(plugin) # Each of the major LBaaS objects in the neutron database # need a corresponding manager/handler class. # # Put common things that are shared across the entire driver, like # config or a rest client handle, here. # # This function is executed when neutron-server starts. self.load_balancer = LoggingNoopLoadBalancerManager(self) self.listener = LoggingNoopListenerManager(self) self.pool = LoggingNoopPoolManager(self) self.member = LoggingNoopMemberManager(self) self.health_monitor = LoggingNoopHealthMonitorManager(self) self.l7policy = LoggingNoopL7PolicyManager(self) self.l7rule = LoggingNoopL7RuleManager(self) class LoggingNoopCommonManager(object): @driver_base.driver_op def create(self, context, obj): LOG.debug("LB %s no-op, create %s", self.__class__.__name__, obj.id) @driver_base.driver_op def update(self, context, old_obj, obj): LOG.debug("LB %s no-op, update %s", self.__class__.__name__, obj.id) @driver_base.driver_op def delete(self, context, obj): LOG.debug("LB %s no-op, delete %s", self.__class__.__name__, obj.id) class LoggingNoopLoadBalancerManager(LoggingNoopCommonManager, driver_base.BaseLoadBalancerManager): @property def allocates_vip(self): LOG.debug('allocates_vip queried') return False def create_and_allocate_vip(self, context, obj): LOG.debug("LB %s no-op, create_and_allocate_vip %s", self.__class__.__name__, obj.id) self.create(context, obj) @driver_base.driver_op def refresh(self, context, obj): # This is intended to trigger the backend to check and repair # the state of this load balancer and all of its dependent objects LOG.debug("LB pool refresh %s", obj.id) @driver_base.driver_op def stats(self, context, lb_obj): LOG.debug("LB stats %s", lb_obj.id) return { "bytes_in": 0, "bytes_out": 0, "active_connections": 0, "total_connections": 0 } class LoggingNoopListenerManager(LoggingNoopCommonManager, driver_base.BaseListenerManager): pass class LoggingNoopPoolManager(LoggingNoopCommonManager, driver_base.BasePoolManager): pass class LoggingNoopMemberManager(LoggingNoopCommonManager, driver_base.BaseMemberManager): pass class LoggingNoopHealthMonitorManager(LoggingNoopCommonManager, driver_base.BaseHealthMonitorManager): pass class LoggingNoopL7PolicyManager(LoggingNoopCommonManager, driver_base.BaseL7PolicyManager): pass class LoggingNoopL7RuleManager(LoggingNoopCommonManager, driver_base.BaseL7RuleManager): pass neutron-lbaas-8.0.0/neutron_lbaas/drivers/driver_base.py0000664000567000056710000001311512701407726024612 0ustar jenkinsjenkins00000000000000# Copyright 2014 A10 Networks # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from functools import wraps from neutron import context as ncontext from oslo_log import log as logging from oslo_utils import excutils from neutron_lbaas.common import exceptions from neutron_lbaas.db.loadbalancer import models from neutron_lbaas.drivers import driver_mixins from neutron_lbaas.services.loadbalancer import constants LOG = logging.getLogger(__name__) class NotImplementedManager(object): """Helper class to make any subclass of LoadBalancerBaseDriver explode if it is missing any of the required object managers. """ def create(self, context, obj): raise NotImplementedError() def update(self, context, old_obj, obj): raise NotImplementedError() def delete(self, context, obj): raise NotImplementedError() class LoadBalancerBaseDriver(object): """LBaaSv2 object model drivers should subclass LoadBalancerBaseDriver, and initialize the following manager classes to create, update, and delete the various load balancer objects. """ model_map = {constants.LOADBALANCER_EVENT: models.LoadBalancer, constants.LISTENER_EVENT: models.Listener, constants.POOL_EVENT: models.PoolV2, constants.MEMBER_EVENT: models.MemberV2} load_balancer = NotImplementedManager() listener = NotImplementedManager() pool = NotImplementedManager() member = NotImplementedManager() health_monitor = NotImplementedManager() l7policy = NotImplementedManager() l7rule = NotImplementedManager() def __init__(self, plugin): self.plugin = plugin def handle_streamed_event(self, container): # TODO(crc32): update_stats will be implemented here in the future if container.info_type not in LoadBalancerBaseDriver.model_map: if container.info_type == constants.LISTENER_STATS_EVENT: return else: exc = exceptions.ModelMapException( target_name=container.info_type) raise exc else: model_class = LoadBalancerBaseDriver.model_map[ container.info_type] context = ncontext.get_admin_context() self.plugin.db.update_status(context, model_class, container.info_id, **container.info_payload) class BaseLoadBalancerManager(driver_mixins.BaseRefreshMixin, driver_mixins.BaseStatsMixin, driver_mixins.BaseManagerMixin): model_class = models.LoadBalancer @property def allocates_vip(self): """Does this driver need to allocate its own virtual IPs""" return False def create_and_allocate_vip(self, context, obj): """Create the load balancer and allocate a VIP If this method is implemented AND allocates_vip returns True, then this method will be called instead of the create method. Any driver that implements this method is responsible for allocating a virtual IP and updating at least the vip_address attribute in the loadbalancer database table. """ raise NotImplementedError @property def db_delete_method(self): return self.driver.plugin.db.delete_loadbalancer class BaseListenerManager(driver_mixins.BaseManagerMixin): model_class = models.Listener @property def db_delete_method(self): return self.driver.plugin.db.delete_listener class BasePoolManager(driver_mixins.BaseManagerMixin): model_class = models.PoolV2 @property def db_delete_method(self): return self.driver.plugin.db.delete_pool class BaseMemberManager(driver_mixins.BaseManagerMixin): model_class = models.MemberV2 @property def db_delete_method(self): return self.driver.plugin.db.delete_member class BaseHealthMonitorManager(driver_mixins.BaseManagerMixin): model_class = models.HealthMonitorV2 @property def db_delete_method(self): return self.driver.plugin.db.delete_healthmonitor class BaseL7PolicyManager(driver_mixins.BaseManagerMixin): model_class = models.L7Policy @property def db_delete_method(self): return self.driver.plugin.db.delete_l7policy class BaseL7RuleManager(driver_mixins.BaseManagerMixin): model_class = models.L7Rule @property def db_delete_method(self): return self.driver.plugin.db.delete_l7policy_rule # A decorator for wrapping driver operations, which will automatically # set the neutron object's status based on whether it sees an exception def driver_op(func): @wraps(func) def func_wrapper(*args, **kwargs): d = (func.__name__ == 'delete') try: r = func(*args, **kwargs) args[0].successful_completion( args[1], args[2], delete=d) return r except Exception: with excutils.save_and_reraise_exception(): args[0].failed_completion(args[1], args[2]) return func_wrapper neutron-lbaas-8.0.0/neutron_lbaas/drivers/vmware/0000775000567000056710000000000012701410110023230 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/drivers/vmware/edge_driver_v2.py0000664000567000056710000001104712701407726026516 0ustar jenkinsjenkins00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log.helpers import log_method_call as call_log from oslo_log import log as logging from neutron_lbaas.common import cert_manager from neutron_lbaas.drivers import driver_base LOG = logging.getLogger(__name__) class EdgeDriverBaseManager(object): @property def nsxv_driver(self): return self.driver.plugin.db._core_plugin.nsx_v class EdgeLoadBalancerDriverV2(driver_base.LoadBalancerBaseDriver): @call_log def __init__(self, plugin): self.plugin = plugin super(EdgeLoadBalancerDriverV2, self).__init__(plugin) self.load_balancer = EdgeLoadBalancerManager(self) self.listener = EdgeListenerManager(self) self.pool = EdgePoolManager(self) self.member = EdgeMemberManager(self) self.health_monitor = EdgeHealthMonitorManager(self) class EdgeLoadBalancerManager(driver_base.BaseLoadBalancerManager, EdgeDriverBaseManager): @call_log def create(self, context, lb): self.nsxv_driver.loadbalancer.create(context, lb) @call_log def update(self, context, old_lb, new_lb): self.nsxv_driver.loadbalancer.update(context, old_lb, new_lb) @call_log def delete(self, context, lb): self.nsxv_driver.loadbalancer.delete(context, lb) @call_log def refresh(self, context, lb): return self.nsxv_driver.loadbalancer.refresh(context, lb) @call_log def stats(self, context, lb): return self.nsxv_driver.loadbalancer.stats(context, lb) class EdgeListenerManager(driver_base.BaseListenerManager, EdgeDriverBaseManager): def _get_default_cert(self, listener): if listener.default_tls_container_id: cert_backend = cert_manager.get_backend() if cert_backend: return cert_backend.CertManager().get_cert( project_id=listener.tenant_id, cert_ref=listener.default_tls_container_id, resource_ref=cert_backend.CertManager.get_service_url( listener.loadbalancer_id) ) @call_log def create(self, context, listener): self.nsxv_driver.listener.create( context, listener, certificate=self._get_default_cert(listener)) @call_log def update(self, context, old_listener, new_listener): self.nsxv_driver.listener.update( context, old_listener, new_listener, certificate=self._get_default_cert(new_listener)) @call_log def delete(self, context, listener): self.nsxv_driver.listener.delete(context, listener) class EdgePoolManager(driver_base.BasePoolManager, EdgeDriverBaseManager): @call_log def create(self, context, pool): self.nsxv_driver.pool.create(context, pool) @call_log def update(self, context, old_pool, new_pool): self.nsxv_driver.pool.update(context, old_pool, new_pool) @call_log def delete(self, context, pool): self.nsxv_driver.pool.delete(context, pool) class EdgeMemberManager(driver_base.BaseMemberManager, EdgeDriverBaseManager): @call_log def create(self, context, member): self.nsxv_driver.member.create(context, member) @call_log def update(self, context, old_member, new_member): self.nsxv_driver.member.update(context, old_member, new_member) @call_log def delete(self, context, member): self.nsxv_driver.member.delete(context, member) class EdgeHealthMonitorManager(driver_base.BaseHealthMonitorManager, EdgeDriverBaseManager): @call_log def create(self, context, hm): self.nsxv_driver.healthmonitor.create(context, hm) @call_log def update(self, context, old_hm, new_hm): self.nsxv_driver.healthmonitor.update(context, old_hm, new_hm) @call_log def delete(self, context, hm): self.nsxv_driver.healthmonitor.delete(context, hm) neutron-lbaas-8.0.0/neutron_lbaas/drivers/vmware/__init__.py0000664000567000056710000000000012701407726025352 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/drivers/radware/0000775000567000056710000000000012701410110023354 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/drivers/radware/rest_client.py0000664000567000056710000001265712701407727026300 0ustar jenkinsjenkins00000000000000# Copyright 2015, Radware LTD. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import httplib from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_serialization import jsonutils from neutron_lbaas._i18n import _LE, _LW from neutron_lbaas.drivers.radware import exceptions as r_exc LOG = logging.getLogger(__name__) RESP_STATUS = 0 RESP_REASON = 1 RESP_STR = 2 RESP_DATA = 3 class vDirectRESTClient(object): """REST server proxy to Radware vDirect.""" @log_helpers.log_method_call def __init__(self, server='localhost', secondary_server=None, user=None, password=None, port=2189, ssl=True, timeout=5000, base_uri=''): self.server = server self.secondary_server = secondary_server self.port = port self.ssl = ssl self.base_uri = base_uri self.timeout = timeout if user and password: self.auth = base64.encodestring('%s:%s' % (user, password)) self.auth = self.auth.replace('\n', '') else: raise r_exc.AuthenticationMissing() debug_params = {'server': self.server, 'sec_server': self.secondary_server, 'port': self.port, 'ssl': self.ssl} LOG.debug('vDirectRESTClient:init server=%(server)s, ' 'secondary server=%(sec_server)s, ' 'port=%(port)d, ' 'ssl=%(ssl)r', debug_params) def _flip_servers(self): LOG.warning(_LW('Fliping servers. Current is: %(server)s, ' 'switching to %(secondary)s'), {'server': self.server, 'secondary': self.secondary_server}) self.server, self.secondary_server = self.secondary_server, self.server def _recover(self, action, resource, data, headers, binary=False): if self.server and self.secondary_server: self._flip_servers() resp = self._call(action, resource, data, headers, binary) return resp else: LOG.error(_LE('REST client is not able to recover ' 'since only one vDirect server is ' 'configured.')) return -1, None, None, None def call(self, action, resource, data, headers, binary=False): resp = self._call(action, resource, data, headers, binary) if resp[RESP_STATUS] == -1: LOG.warning(_LW('vDirect server is not responding (%s).'), self.server) return self._recover(action, resource, data, headers, binary) elif resp[RESP_STATUS] in (301, 307): LOG.warning(_LW('vDirect server is not active (%s).'), self.server) return self._recover(action, resource, data, headers, binary) else: return resp @log_helpers.log_method_call def _call(self, action, resource, data, headers, binary=False): if resource.startswith('http'): uri = resource else: uri = self.base_uri + resource if binary: body = data else: body = jsonutils.dumps(data) debug_data = 'binary' if binary else body debug_data = debug_data if debug_data else 'EMPTY' if not headers: headers = {'Authorization': 'Basic %s' % self.auth} else: headers['Authorization'] = 'Basic %s' % self.auth conn = None if self.ssl: conn = httplib.HTTPSConnection( self.server, self.port, timeout=self.timeout) if conn is None: LOG.error(_LE('vdirectRESTClient: Could not establish HTTPS ' 'connection')) return 0, None, None, None else: conn = httplib.HTTPConnection( self.server, self.port, timeout=self.timeout) if conn is None: LOG.error(_LE('vdirectRESTClient: Could not establish HTTP ' 'connection')) return 0, None, None, None try: conn.request(action, uri, body, headers) response = conn.getresponse() respstr = response.read() respdata = respstr try: respdata = jsonutils.loads(respstr) except ValueError: # response was not JSON, ignore the exception pass ret = (response.status, response.reason, respstr, respdata) except Exception as e: log_dict = {'action': action, 'e': e} LOG.error(_LE('vdirectRESTClient: %(action)s failure, %(e)r'), log_dict) ret = -1, None, None, None conn.close() return ret neutron-lbaas-8.0.0/neutron_lbaas/drivers/radware/__init__.py0000664000567000056710000000000012701407726025476 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/drivers/radware/exceptions.py0000664000567000056710000000312412701407726026132 0ustar jenkinsjenkins00000000000000# Copyright 2015 Radware LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lbaas._i18n import _LE from neutron_lbaas.common import exceptions class RadwareLBaasV2Exception(exceptions.LbaasException): message = _LE('An unknown exception occurred in ' 'Radware LBaaS v2 provider.') class AuthenticationMissing(RadwareLBaasV2Exception): message = _LE('vDirect user/password missing. ' 'Specify in configuration file, under [radwarev2] section') class WorkflowTemplateMissing(RadwareLBaasV2Exception): message = _LE('Workflow template %(workflow_template)s is missing ' 'on vDirect server. Upload missing workflow') class RESTRequestFailure(RadwareLBaasV2Exception): message = _LE('REST request failed with status %(status)s. ' 'Reason: %(reason)s, Description: %(description)s. ' 'Success status codes are %(success_codes)s') class UnsupportedEntityOperation(RadwareLBaasV2Exception): message = _LE('%(operation)s operation is not supported for %(entity)s.') neutron-lbaas-8.0.0/neutron_lbaas/drivers/radware/v2_driver.py0000664000567000056710000007611112701407726025661 0ustar jenkinsjenkins00000000000000# Copyright 2015, Radware LTD. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import netaddr import threading import time from neutron.api.v2 import attributes from neutron import context from neutron.plugins.common import constants from oslo_config import cfg from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import excutils from six.moves import queue as Queue from neutron_lbaas._i18n import _LE, _LW, _LI import neutron_lbaas.common.cert_manager from neutron_lbaas.drivers.radware import base_v2_driver from neutron_lbaas.drivers.radware import exceptions as r_exc from neutron_lbaas.drivers.radware import rest_client as rest CERT_MANAGER_PLUGIN = neutron_lbaas.common.cert_manager.get_backend() TEMPLATE_HEADER = {'Content-Type': 'application/vnd.com.radware.vdirect.' 'template-parameters+json'} PROVISION_HEADER = {'Content-Type': 'application/vnd.com.radware.' 'vdirect.status+json'} CREATE_SERVICE_HEADER = {'Content-Type': 'application/vnd.com.radware.' 'vdirect.adc-service-specification+json'} PROPERTY_DEFAULTS = {'type': 'none', 'cookie_name': 'none', 'url_path': '/', 'http_method': 'GET', 'expected_codes': '200', 'subnet': '255.255.255.255', 'mask': '255.255.255.255', 'gw': '255.255.255.255', } LOADBALANCER_PROPERTIES = ['vip_address', 'admin_state_up'] LISTENER_PROPERTIES = ['id', 'protocol_port', 'protocol', 'connection_limit', 'admin_state_up'] DEFAULT_CERT_PROPERTIES = ['id', 'certificate', 'intermediates', 'private_key', 'passphrase'] SNI_CERT_PROPERTIES = DEFAULT_CERT_PROPERTIES + ['position'] L7_RULE_PROPERTIES = ['id', 'type', 'compare_type', 'key', 'value', 'admin_state_up'] L7_POLICY_PROPERTIES = ['id', 'action', 'redirect_pool_id', 'redirect_url', 'position', 'admin_state_up'] DEFAULT_POOL_PROPERTIES = ['id'] POOL_PROPERTIES = ['id', 'protocol', 'lb_algorithm', 'admin_state_up'] MEMBER_PROPERTIES = ['id', 'address', 'protocol_port', 'weight', 'admin_state_up', 'subnet', 'mask', 'gw'] SESSION_PERSISTENCY_PROPERTIES = ['type', 'cookie_name'] HEALTH_MONITOR_PROPERTIES = ['type', 'delay', 'timeout', 'max_retries', 'admin_state_up', 'url_path', 'http_method', 'expected_codes', 'id'] LOG = logging.getLogger(__name__) class RadwareLBaaSV2Driver(base_v2_driver.RadwareLBaaSBaseV2Driver): # # Assumptions: # 1) We have only one worflow that takes care of l2-l4 and service creation # 2) The workflow template exists on the vDirect server # 3) The workflow expose one operaion named 'update' (plus ctor and dtor) # 4) The 'update' operation gets the loadbalancer object graph as input # 5) The object graph is enehanced by our code before it is sent to the # workflow # 6) Async operations are handled by a different thread # def __init__(self, plugin): base_v2_driver.RadwareLBaaSBaseV2Driver.__init__(self, plugin) rad = cfg.CONF.radwarev2 rad_debug = cfg.CONF.radwarev2_debug self.plugin = plugin self.service = { "name": "_REPLACE_", "tenantId": "_REPLACE_", "haPair": rad.service_ha_pair, "sessionMirroringEnabled": rad.service_session_mirroring_enabled, "primary": { "capacity": { "throughput": rad.service_throughput, "sslThroughput": rad.service_ssl_throughput, "compressionThroughput": rad.service_compression_throughput, "cache": rad.service_cache }, "network": { "type": "portgroup", "portgroups": '_REPLACE_' }, "adcType": rad.service_adc_type, "acceptableAdc": "Exact" } } if rad.service_resource_pool_ids: ids = rad.service_resource_pool_ids self.service['resourcePoolIds'] = [ {'id': id} for id in ids ] else: self.service['resourcePoolIds'] = [] if rad.service_isl_vlan: self.service['islVlan'] = rad.service_isl_vlan self.workflow_template_name = rad.workflow_template_name self.child_workflow_template_names = rad.child_workflow_template_names self.workflow_params = rad.workflow_params self.workflow_action_name = rad.workflow_action_name self.stats_action_name = rad.stats_action_name vdirect_address = rad.vdirect_address sec_server = rad.ha_secondary_address self.rest_client = rest.vDirectRESTClient( server=vdirect_address, secondary_server=sec_server, user=rad.vdirect_user, password=rad.vdirect_password) self.workflow_params['provision_service'] = rad_debug.provision_service self.workflow_params['configure_l3'] = rad_debug.configure_l3 self.workflow_params['configure_l4'] = rad_debug.configure_l4 self.queue = Queue.Queue() self.completion_handler = OperationCompletionHandler(self.queue, self.rest_client, plugin) self.workflow_templates_exists = False self.completion_handler.setDaemon(True) self.completion_handler_started = False def _start_completion_handling_thread(self): if not self.completion_handler_started: LOG.info(_LI('Starting operation completion handling thread')) self.completion_handler.start() self.completion_handler_started = True @staticmethod def _get_wf_name(lb): return 'LB_' + lb.id @log_helpers.log_method_call def _verify_workflow_templates(self): """Verify the existence of workflows on vDirect server.""" resource = '/api/workflowTemplate/' workflow_templates = {self.workflow_template_name: False} for child_wf_name in self.child_workflow_template_names: workflow_templates[child_wf_name] = False response = _rest_wrapper(self.rest_client.call('GET', resource, None, None), [200]) for workflow_template in workflow_templates.keys(): for template in response: if workflow_template == template['name']: workflow_templates[workflow_template] = True break for template, found in workflow_templates.items(): if not found: raise r_exc.WorkflowTemplateMissing( workflow_template=template) @log_helpers.log_method_call def workflow_exists(self, lb): """Create workflow for loadbalancer instance""" wf_name = self._get_wf_name(lb) wf_resource = '/api/workflow/%s' % (wf_name) try: _rest_wrapper(self.rest_client.call( 'GET', wf_resource, None, None), [200]) except Exception: return False return True @log_helpers.log_method_call def _create_workflow(self, lb, lb_network_id, proxy_network_id): """Create workflow for loadbalancer instance""" self._verify_workflow_templates() wf_name = self._get_wf_name(lb) service = copy.deepcopy(self.service) service['tenantId'] = lb.tenant_id service['name'] = 'srv_' + lb_network_id if lb_network_id != proxy_network_id: self.workflow_params["twoleg_enabled"] = True service['primary']['network']['portgroups'] = [ lb_network_id, proxy_network_id] else: self.workflow_params["twoleg_enabled"] = False service['primary']['network']['portgroups'] = [lb_network_id] tmpl_resource = '/api/workflowTemplate/%s?name=%s' % ( self.workflow_template_name, wf_name) _rest_wrapper(self.rest_client.call( 'POST', tmpl_resource, {'parameters': dict(self.workflow_params, service_params=service)}, TEMPLATE_HEADER)) @log_helpers.log_method_call def get_stats(self, ctx, lb): wf_name = self._get_wf_name(lb) resource = '/api/workflow/%s/action/%s' % ( wf_name, self.stats_action_name) response = _rest_wrapper(self.rest_client.call('POST', resource, None, TEMPLATE_HEADER), success_codes=[202]) LOG.debug('stats_action response: %s ', response) resource = '/api/workflow/%s/parameters' % (wf_name) response = _rest_wrapper(self.rest_client.call('GET', resource, None, TEMPLATE_HEADER), success_codes=[200]) LOG.debug('stats_values response: %s ', response) return response['stats'] @log_helpers.log_method_call def execute_workflow(self, ctx, manager, data_model, old_data_model=None, delete=False): lb = data_model.root_loadbalancer # Get possible proxy subnet. # Proxy subnet equals to LB subnet if no proxy # is necessary. # Get subnet id of any member located on different than # loadbalancer's network. If returned subnet id is the subnet id # of loadbalancer - all members are accessible from loadbalancer's # network, meaning no second leg or static routes are required. # Otherwise, create proxy port on found member's subnet and get its # address as a proxy address for loadbalancer instance lb_subnet = self.plugin.db._core_plugin.get_subnet( ctx, lb.vip_subnet_id) proxy_subnet = lb_subnet proxy_port_address = lb.vip_address if not self.workflow_exists(lb): # Create proxy port if needed proxy_port_subnet_id = self._get_proxy_port_subnet_id(lb) if proxy_port_subnet_id != lb.vip_subnet_id: proxy_port = self._create_proxy_port( ctx, lb, proxy_port_subnet_id) proxy_subnet = self.plugin.db._core_plugin.get_subnet( ctx, proxy_port['subnet_id']) proxy_port_address = proxy_port['ip_address'] self._create_workflow(lb, lb_subnet['network_id'], proxy_subnet['network_id']) else: # Check if proxy port exists proxy_port = self._get_proxy_port(ctx, lb) if proxy_port: proxy_subnet = self.plugin.db._core_plugin.get_subnet( ctx, proxy_port['subnet_id']) proxy_port_address = proxy_port['ip_address'] # Build objects graph objects_graph = self._build_objects_graph(ctx, lb, data_model, proxy_port_address, proxy_subnet) LOG.debug("Radware vDirect LB object graph is " + str(objects_graph)) wf_name = self._get_wf_name(lb) resource = '/api/workflow/%s/action/%s' % ( wf_name, self.workflow_action_name) response = _rest_wrapper(self.rest_client.call('POST', resource, {'parameters': objects_graph}, TEMPLATE_HEADER), success_codes=[202]) LOG.debug('_update_workflow response: %s ', response) oper = OperationAttributes( manager, response['uri'], lb, data_model, old_data_model, delete=delete) LOG.debug('Pushing operation %s to the queue', oper) self._start_completion_handling_thread() self.queue.put_nowait(oper) def remove_workflow(self, ctx, manager, lb): wf_name = self._get_wf_name(lb) LOG.debug('Remove the workflow %s' % wf_name) resource = '/api/workflow/%s' % (wf_name) rest_return = self.rest_client.call('DELETE', resource, None, None) response = _rest_wrapper(rest_return, [204, 202, 404]) if rest_return[rest.RESP_STATUS] in [404]: try: self._delete_proxy_port(ctx, lb) LOG.debug('Proxy port for LB %s was deleted', lb.id) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Proxy port deletion for LB %s ' 'failed'), lb.id) manager.successful_completion(ctx, lb, delete=True) else: oper = OperationAttributes( manager, response['uri'], lb, lb, old_data_model=None, delete=True, post_operation_function=self._delete_proxy_port) self._start_completion_handling_thread() self.queue.put_nowait(oper) def _build_objects_graph(self, ctx, lb, data_model, proxy_port_address, proxy_subnet): """Iterate over the LB model starting from root lb entity and build its JSON representtaion for vDirect """ graph = {} for prop in LOADBALANCER_PROPERTIES: graph[prop] = getattr(lb, prop, PROPERTY_DEFAULTS.get(prop)) graph['pip_address'] = proxy_port_address graph['listeners'] = [] listeners = [ listener for listener in lb.listeners if listener.provisioning_status != constants.PENDING_DELETE and (listener.default_pool and listener.default_pool.members)] for listener in listeners: listener_dict = {} for prop in LISTENER_PROPERTIES: listener_dict[prop] = getattr( listener, prop, PROPERTY_DEFAULTS.get(prop)) cert_mgr = CERT_MANAGER_PLUGIN.CertManager() if listener.default_tls_container_id: default_cert = cert_mgr.get_cert( project_id=listener.tenant_id, cert_ref=listener.default_tls_container_id, resource_ref=cert_mgr.get_service_url( listener.loadbalancer_id), service_name='Neutron LBaaS v2 Radware provider') def_cert_dict = { 'id': listener.default_tls_container_id, 'certificate': default_cert.get_certificate(), 'intermediates': default_cert.get_intermediates(), 'private_key': default_cert.get_private_key(), 'passphrase': default_cert.get_private_key_passphrase()} listener_dict['default_tls_certificate'] = def_cert_dict if listener.sni_containers: listener_dict['sni_tls_certificates'] = [] for sni_container in listener.sni_containers: sni_cert = cert_mgr.get_cert( project_id=listener.tenant_id, cert_ref=sni_container.tls_container_id, resource_ref=cert_mgr.get_service_url( listener.loadbalancer_id), service_name='Neutron LBaaS v2 Radware provider') listener_dict['sni_tls_certificates'].append( {'id': sni_container.tls_container_id, 'position': sni_container.position, 'certificate': sni_cert.get_certificate(), 'intermediates': sni_cert.get_intermediates(), 'private_key': sni_cert.get_private_key(), 'passphrase': sni_cert.get_private_key_passphrase()}) listener_dict['l7_policies'] = [] policies = [ policy for policy in listener.l7_policies if policy.provisioning_status != constants.PENDING_DELETE] for policy in policies: policy_dict = {} for prop in L7_POLICY_PROPERTIES: policy_dict[prop] = getattr( policy, prop, PROPERTY_DEFAULTS.get(prop)) policy_dict['rules'] = [] rules = [ rule for rule in policy.rules if rule.provisioning_status != constants.PENDING_DELETE] for rule in rules: rule_dict = {} for prop in L7_RULE_PROPERTIES: rule_dict[prop] = getattr( rule, prop, PROPERTY_DEFAULTS.get(prop)) policy_dict['rules'].append(rule_dict) if policy_dict['rules']: listener_dict['l7_policies'].append(policy_dict) if (listener.default_pool and listener.default_pool.provisioning_status != constants.PENDING_DELETE): def_pool_dict = {'id': listener.default_pool.id} if listener.default_pool.session_persistence: sess_pers_dict = {} for prop in SESSION_PERSISTENCY_PROPERTIES: sess_pers_dict[prop] = getattr( listener.default_pool.session_persistence, prop, PROPERTY_DEFAULTS.get(prop)) def_pool_dict['sessionpersistence'] = sess_pers_dict listener_dict['default_pool'] = def_pool_dict graph['listeners'].append(listener_dict) graph['pools'] = [] pools = [ pool for pool in lb.pools if pool.provisioning_status != constants.PENDING_DELETE] for pool in pools: pool_dict = {} for prop in POOL_PROPERTIES: pool_dict[prop] = getattr( pool, prop, PROPERTY_DEFAULTS.get(prop)) if (pool.healthmonitor and pool.healthmonitor.provisioning_status != constants.PENDING_DELETE): hm_dict = {} for prop in HEALTH_MONITOR_PROPERTIES: hm_dict[prop] = getattr( pool.healthmonitor, prop, PROPERTY_DEFAULTS.get(prop)) pool_dict['healthmonitor'] = hm_dict pool_dict['members'] = [] members = [ member for member in pool.members if member.provisioning_status != constants.PENDING_DELETE] for member in members: member_dict = {} for prop in MEMBER_PROPERTIES: member_dict[prop] = getattr( member, prop, PROPERTY_DEFAULTS.get(prop)) if (proxy_port_address != lb.vip_address and netaddr.IPAddress(member.address) not in netaddr.IPNetwork(proxy_subnet['cidr'])): self._accomplish_member_static_route_data( ctx, member, member_dict, proxy_subnet['gateway_ip']) pool_dict['members'].append(member_dict) graph['pools'].append(pool_dict) return graph def _get_proxy_port_subnet_id(self, lb): """Look for at least one member of any listener's pool that is located on subnet different than loabalancer's subnet. If such member found, return its subnet id. Otherwise, return loadbalancer's subnet id """ for listener in lb.listeners: if listener.default_pool: for member in listener.default_pool.members: if lb.vip_subnet_id != member.subnet_id: return member.subnet_id return lb.vip_subnet_id def _create_proxy_port(self, ctx, lb, proxy_port_subnet_id): """Check if proxy port was created earlier. If not, create a new port on proxy subnet and return its ip address. Returns port IP address """ proxy_port = self._get_proxy_port(ctx, lb) if proxy_port: LOG.info(_LI('LB %(lb_id)s proxy port exists on subnet \ %(subnet_id)s with ip address %(ip_address)s') % {'lb_id': lb.id, 'subnet_id': proxy_port['subnet_id'], 'ip_address': proxy_port['ip_address']}) return proxy_port proxy_port_name = 'proxy_' + lb.id proxy_port_subnet = self.plugin.db._core_plugin.get_subnet( ctx, proxy_port_subnet_id) proxy_port_data = { 'tenant_id': lb.tenant_id, 'name': proxy_port_name, 'network_id': proxy_port_subnet['network_id'], 'mac_address': attributes.ATTR_NOT_SPECIFIED, 'admin_state_up': False, 'device_id': '', 'device_owner': 'neutron:' + constants.LOADBALANCERV2, 'fixed_ips': [{'subnet_id': proxy_port_subnet_id}] } proxy_port = self.plugin.db._core_plugin.create_port( ctx, {'port': proxy_port_data}) proxy_port_ip_data = proxy_port['fixed_ips'][0] LOG.info(_LI('LB %(lb_id)s proxy port created on subnet %(subnet_id)s \ with ip address %(ip_address)s') % {'lb_id': lb.id, 'subnet_id': proxy_port_ip_data['subnet_id'], 'ip_address': proxy_port_ip_data['ip_address']}) return proxy_port_ip_data def _get_proxy_port(self, ctx, lb): ports = self.plugin.db._core_plugin.get_ports( ctx, filters={'name': ['proxy_' + lb.id], }) if not ports: return None proxy_port = ports[0] return proxy_port['fixed_ips'][0] def _delete_proxy_port(self, ctx, lb): port_filter = { 'name': ['proxy_' + lb.id], } ports = self.plugin.db._core_plugin.get_ports( ctx, filters=port_filter) if ports: proxy_port = ports[0] proxy_port_ip_data = proxy_port['fixed_ips'][0] try: LOG.info(_LI('Deleting LB %(lb_id)s proxy port on subnet \ %(subnet_id)s with ip address %(ip_address)s') % {'lb_id': lb.id, 'subnet_id': proxy_port_ip_data['subnet_id'], 'ip_address': proxy_port_ip_data['ip_address']}) self.plugin.db._core_plugin.delete_port( ctx, proxy_port['id']) except Exception as exception: # stop exception propagation, nport may have # been deleted by other means LOG.warning(_LW('Proxy port deletion failed: %r'), exception) def _accomplish_member_static_route_data(self, ctx, member, member_data, proxy_gateway_ip): member_ports = self.plugin.db._core_plugin.get_ports( ctx, filters={'fixed_ips': {'ip_address': [member.address]}, 'tenant_id': [member.tenant_id]}) if len(member_ports) == 1: member_port = member_ports[0] member_port_ip_data = member_port['fixed_ips'][0] LOG.debug('member_port_ip_data:' + repr(member_port_ip_data)) member_subnet = self.plugin.db._core_plugin.get_subnet( ctx, member_port_ip_data['subnet_id']) LOG.debug('member_subnet:' + repr(member_subnet)) member_network = netaddr.IPNetwork(member_subnet['cidr']) member_data['subnet'] = str(member_network.network) member_data['mask'] = str(member_network.netmask) else: member_data['subnet'] = member_data['address'] member_data['gw'] = proxy_gateway_ip class OperationCompletionHandler(threading.Thread): """Update DB with operation status or delete the entity from DB.""" def __init__(self, queue, rest_client, plugin): threading.Thread.__init__(self) self.queue = queue self.rest_client = rest_client self.plugin = plugin self.stoprequest = threading.Event() self.opers_to_handle_before_rest = 0 def join(self, timeout=None): self.stoprequest.set() super(OperationCompletionHandler, self).join(timeout) def handle_operation_completion(self, oper): result = self.rest_client.call('GET', oper.operation_url, None, None) LOG.debug('Operation completion requested %(uri) and got: %(result)', {'uri': oper.operation_url, 'result': result}) completed = result[rest.RESP_DATA]['complete'] reason = result[rest.RESP_REASON], description = result[rest.RESP_STR] if completed: # operation is done - update the DB with the status # or delete the entire graph from DB success = result[rest.RESP_DATA]['success'] sec_to_completion = time.time() - oper.creation_time debug_data = {'oper': oper, 'sec_to_completion': sec_to_completion, 'success': success} LOG.debug('Operation %(oper)s is completed after ' '%(sec_to_completion)d sec ' 'with success status: %(success)s :', debug_data) if not success: # failure - log it and set the return ERROR as DB state if reason or description: msg = 'Reason:%s. Description:%s' % (reason, description) else: msg = "unknown" error_params = {"operation": oper, "msg": msg} LOG.error(_LE( 'Operation %(operation)s failed. Reason: %(msg)s'), error_params) oper.status = constants.ERROR OperationCompletionHandler._run_post_failure_function(oper) else: oper.status = constants.ACTIVE OperationCompletionHandler._run_post_success_function(oper) return completed def run(self): while not self.stoprequest.isSet(): try: oper = self.queue.get(timeout=1) # Get the current queue size (N) and set the counter with it. # Handle N operations with no intermission. # Once N operations handles, get the size again and repeat. if self.opers_to_handle_before_rest <= 0: self.opers_to_handle_before_rest = self.queue.qsize() + 1 LOG.debug('Operation consumed from the queue: ' + str(oper)) # check the status - if oper is done: update the db , # else push the oper again to the queue if not self.handle_operation_completion(oper): LOG.debug('Operation %s is not completed yet..' % oper) # Not completed - push to the queue again self.queue.put_nowait(oper) self.queue.task_done() self.opers_to_handle_before_rest -= 1 # Take one second rest before start handling # new operations or operations handled before if self.opers_to_handle_before_rest <= 0: time.sleep(1) except Queue.Empty: continue except Exception: LOG.error(_LE( "Exception was thrown inside OperationCompletionHandler")) @staticmethod def _run_post_success_function(oper): try: ctx = context.get_admin_context() if oper.post_operation_function: oper.post_operation_function(ctx, oper.data_model) oper.manager.successful_completion(ctx, oper.data_model, delete=oper.delete) LOG.debug('Post-operation success function completed ' 'for operation %s', repr(oper)) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Post-operation success function failed ' 'for operation %s'), repr(oper)) @staticmethod def _run_post_failure_function(oper): try: ctx = context.get_admin_context() oper.manager.failed_completion(ctx, oper.data_model) LOG.debug('Post-operation failure function completed ' 'for operation %s', repr(oper)) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Post-operation failure function failed ' 'for operation %s'), repr(oper)) class OperationAttributes(object): """Holds operation attributes""" def __init__(self, manager, operation_url, lb, data_model=None, old_data_model=None, delete=False, post_operation_function=None): self.manager = manager self.operation_url = operation_url self.lb = lb self.data_model = data_model self.old_data_model = old_data_model self.delete = delete self.post_operation_function = post_operation_function self.creation_time = time.time() def __repr__(self): attrs = self.__dict__ items = ("%s = %r" % (k, v) for k, v in attrs.items()) return "<%s: {%s}>" % (self.__class__.__name__, ', '.join(items)) def _rest_wrapper(response, success_codes=None): """Wrap a REST call and make sure a valido status is returned.""" success_codes = success_codes or [202] if not response: raise r_exc.RESTRequestFailure( status=-1, reason="Unknown", description="Unknown", success_codes=success_codes ) elif response[rest.RESP_STATUS] not in success_codes: raise r_exc.RESTRequestFailure( status=response[rest.RESP_STATUS], reason=response[rest.RESP_REASON], description=response[rest.RESP_STR], success_codes=success_codes ) else: LOG.debug("this is a respone: %s" % (response,)) return response[rest.RESP_DATA] neutron-lbaas-8.0.0/neutron_lbaas/drivers/radware/base_v2_driver.py0000664000567000056710000002714312701407726026654 0ustar jenkinsjenkins00000000000000# Copyright 2015, Radware LTD. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import helpers as log_helpers from neutron_lbaas._i18n import _ from neutron_lbaas.drivers import driver_base VERSION = "K1.0.0" driver_opts = [ cfg.StrOpt('vdirect_address', help=_('IP address of vDirect server.')), cfg.StrOpt('ha_secondary_address', help=_('IP address of secondary vDirect server.')), cfg.StrOpt('vdirect_user', default='vDirect', help=_('vDirect user name.')), cfg.StrOpt('vdirect_password', default='radware', secret=True, help=_('vDirect user password.')), cfg.StrOpt('service_adc_type', default="VA", help=_('Service ADC type. Default: VA.')), cfg.StrOpt('service_adc_version', default="", help=_('Service ADC version.')), cfg.BoolOpt('service_ha_pair', default=False, help=_('Enables or disables the Service HA pair. ' 'Default: False.')), cfg.IntOpt('service_throughput', default=1000, help=_('Service throughput. Default: 1000.')), cfg.IntOpt('service_ssl_throughput', default=100, help=_('Service SSL throughput. Default: 100.')), cfg.IntOpt('service_compression_throughput', default=100, help=_('Service compression throughput. Default: 100.')), cfg.IntOpt('service_cache', default=20, help=_('Size of service cache. Default: 20.')), cfg.ListOpt('service_resource_pool_ids', default=[], help=_('Resource pool IDs.')), cfg.IntOpt('service_isl_vlan', default=-1, help=_('A required VLAN for the interswitch link to use.')), cfg.BoolOpt('service_session_mirroring_enabled', default=False, help=_('Enable or disable Alteon interswitch link for ' 'stateful session failover. Default: False.')), cfg.StrOpt('workflow_template_name', default='os_lb_v2', help=_('Name of the workflow template. Default: os_lb_v2.')), cfg.ListOpt('child_workflow_template_names', default=['manage_l3'], help=_('Name of child workflow templates used.' 'Default: manage_l3')), cfg.DictOpt('workflow_params', default={"twoleg_enabled": "_REPLACE_", "ha_network_name": "HA-Network", "ha_ip_pool_name": "default", "allocate_ha_vrrp": True, "allocate_ha_ips": True, "data_port": 1, "data_ip_address": "192.168.200.99", "data_ip_mask": "255.255.255.0", "gateway": "192.168.200.1", "ha_port": 2}, help=_('Parameter for l2_l3 workflow constructor.')), cfg.StrOpt('workflow_action_name', default='apply', help=_('Name of the workflow action. ' 'Default: apply.')), cfg.StrOpt('stats_action_name', default='stats', help=_('Name of the workflow action for statistics. ' 'Default: stats.')) ] driver_debug_opts = [ cfg.BoolOpt('provision_service', default=True, help=_('Provision ADC service?')), cfg.BoolOpt('configure_l3', default=True, help=_('Configule ADC with L3 parameters?')), cfg.BoolOpt('configure_l4', default=True, help=_('Configule ADC with L4 parameters?')) ] cfg.CONF.register_opts(driver_opts, "radwarev2") cfg.CONF.register_opts(driver_debug_opts, "radwarev2_debug") class RadwareLBaaSBaseV2Driver(driver_base.LoadBalancerBaseDriver): def __init__(self, plugin): super(RadwareLBaaSBaseV2Driver, self).__init__(plugin) self.load_balancer = LoadBalancerManager(self) self.listener = ListenerManager(self) self.l7policy = L7PolicyManager(self) self.l7rule = L7RuleManager(self) self.pool = PoolManager(self) self.member = MemberManager(self) self.health_monitor = HealthMonitorManager(self) class LoadBalancerManager(driver_base.BaseLoadBalancerManager): @log_helpers.log_method_call def create(self, context, lb): self.successful_completion(context, lb) @log_helpers.log_method_call def update(self, context, old_lb, lb): if self.driver.workflow_exists(old_lb): self.driver.execute_workflow( context, self, lb, old_data_model=old_lb) else: self.successful_completion(context, lb) @log_helpers.log_method_call def delete(self, context, lb): if self.driver.workflow_exists(lb): self.driver.remove_workflow( context, self, lb) else: self.successful_completion(context, lb, delete=True) @log_helpers.log_method_call def refresh(self, context, lb): if lb.listeners and any(listener.default_pool and listener.default_pool.members for listener in lb.listeners): self.driver.execute_workflow( context, self, lb) else: self.successful_completion(context, lb) @log_helpers.log_method_call def stats(self, context, lb): if self.driver.workflow_exists(lb): return self.driver.get_stats(context, lb) else: self.successful_completion(context, lb) class ListenerManager(driver_base.BaseListenerManager): @log_helpers.log_method_call def create(self, context, listener): if self.driver.workflow_exists(listener.root_loadbalancer): self.driver.execute_workflow( context, self, listener) else: self.successful_completion(context, listener) @log_helpers.log_method_call def update(self, context, old_listener, listener): if self.driver.workflow_exists(old_listener.root_loadbalancer): self.driver.execute_workflow( context, self, listener, old_data_model=old_listener) else: self.successful_completion(context, listener) @log_helpers.log_method_call def delete(self, context, listener): if self.driver.workflow_exists(listener.root_loadbalancer): self.driver.execute_workflow( context, self, listener, delete=True) else: self.successful_completion(context, listener, delete=True) class L7PolicyManager(driver_base.BaseL7PolicyManager): @log_helpers.log_method_call def create(self, context, policy): if self.driver.workflow_exists(policy.root_loadbalancer): self.driver.execute_workflow( context, self, policy) else: self.successful_completion(context, policy) @log_helpers.log_method_call def update(self, context, old_policy, policy): if self.driver.workflow_exists(old_policy.root_loadbalancer): self.driver.execute_workflow( context, self, policy, old_data_model=old_policy) else: self.successful_completion(context, policy) @log_helpers.log_method_call def delete(self, context, policy): if self.driver.workflow_exists(policy.root_loadbalancer): self.driver.execute_workflow( context, self, policy, delete=True) else: self.successful_completion(context, policy, delete=True) class L7RuleManager(driver_base.BaseL7RuleManager): @log_helpers.log_method_call def create(self, context, rule): if self.driver.workflow_exists(rule.root_loadbalancer): self.driver.execute_workflow( context, self, rule) else: self.successful_completion(context, rule) @log_helpers.log_method_call def update(self, context, old_rule, rule): if self.driver.workflow_exists(old_rule.root_loadbalancer): self.driver.execute_workflow( context, self, rule, old_data_model=old_rule) else: self.successful_completion(context, rule) @log_helpers.log_method_call def delete(self, context, rule): if self.driver.workflow_exists(rule.root_loadbalancer): self.driver.execute_workflow( context, self, rule, delete=True) else: self.successful_completion(context, rule, delete=True) class PoolManager(driver_base.BasePoolManager): @log_helpers.log_method_call def create(self, context, pool): if self.driver.workflow_exists(pool.root_loadbalancer): self.driver.execute_workflow( context, self, pool) else: self.successful_completion(context, pool) @log_helpers.log_method_call def update(self, context, old_pool, pool): if self.driver.workflow_exists(old_pool.root_loadbalancer): self.driver.execute_workflow( context, self, pool, old_data_model=old_pool) else: self.successful_completion(context, pool) @log_helpers.log_method_call def delete(self, context, pool): if self.driver.workflow_exists(pool.root_loadbalancer): self.driver.execute_workflow( context, self, pool, delete=True) else: self.successful_completion(context, pool, delete=True) class MemberManager(driver_base.BaseMemberManager): @log_helpers.log_method_call def create(self, context, member): self.driver.execute_workflow( context, self, member) @log_helpers.log_method_call def update(self, context, old_member, member): self.driver.execute_workflow( context, self, member, old_data_model=old_member) @log_helpers.log_method_call def delete(self, context, member): self.driver.execute_workflow( context, self, member, delete=True) class HealthMonitorManager(driver_base.BaseHealthMonitorManager): @log_helpers.log_method_call def create(self, context, hm): if self.driver.workflow_exists(hm.root_loadbalancer): self.driver.execute_workflow( context, self, hm) else: self.successful_completion(context, hm) @log_helpers.log_method_call def update(self, context, old_hm, hm): if self.driver.workflow_exists(old_hm.root_loadbalancer): self.driver.execute_workflow( context, self, hm, old_data_model=old_hm) else: self.successful_completion(context, hm) @log_helpers.log_method_call def delete(self, context, hm): if self.driver.workflow_exists(hm.root_loadbalancer): self.driver.execute_workflow( context, self, hm, delete=True) else: self.successful_completion(context, hm, delete=True) neutron-lbaas-8.0.0/neutron_lbaas/drivers/common/0000775000567000056710000000000012701410110023217 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/drivers/common/__init__.py0000664000567000056710000000000012701407726025341 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/drivers/common/agent_callbacks.py0000664000567000056710000002062112701407726026712 0ustar jenkinsjenkins00000000000000# Copyright 2015 Rackspace. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.extensions import portbindings from neutron.plugins.common import constants from neutron_lib import exceptions as n_exc from oslo_log import log as logging import oslo_messaging as messaging from neutron_lbaas._i18n import _, _LW from neutron_lbaas.db.loadbalancer import loadbalancer_dbv2 from neutron_lbaas.db.loadbalancer import models as db_models from neutron_lbaas.services.loadbalancer import data_models LOG = logging.getLogger(__name__) class LoadBalancerCallbacks(object): # history # 1.0 Initial version target = messaging.Target(version='1.0') def __init__(self, plugin): super(LoadBalancerCallbacks, self).__init__() self.plugin = plugin def get_ready_devices(self, context, host=None): with context.session.begin(subtransactions=True): agents = self.plugin.db.get_lbaas_agents( context, filters={'host': [host]}) if not agents: return [] elif len(agents) > 1: LOG.warning(_LW('Multiple lbaas agents found on host %s'), host) loadbalancers = self.plugin.db.list_loadbalancers_on_lbaas_agent( context, agents[0].id) loadbalancer_ids = [ l.id for l in loadbalancers] qry = context.session.query( loadbalancer_dbv2.models.LoadBalancer.id) qry = qry.filter( loadbalancer_dbv2.models.LoadBalancer.id.in_( loadbalancer_ids)) qry = qry.filter( loadbalancer_dbv2.models.LoadBalancer.provisioning_status.in_( constants.ACTIVE_PENDING_STATUSES)) up = True # makes pep8 and sqlalchemy happy qry = qry.filter( loadbalancer_dbv2.models.LoadBalancer.admin_state_up == up) return [id for id, in qry] def get_loadbalancer(self, context, loadbalancer_id=None): lb_model = self.plugin.db.get_loadbalancer(context, loadbalancer_id) if lb_model.vip_port and lb_model.vip_port.fixed_ips: for fixed_ip in lb_model.vip_port.fixed_ips: subnet_dict = self.plugin.db._core_plugin.get_subnet( context, fixed_ip.subnet_id ) setattr(fixed_ip, 'subnet', data_models.Subnet.from_dict( subnet_dict)) if lb_model.provider: device_driver = self.plugin.drivers[ lb_model.provider.provider_name].device_driver setattr(lb_model.provider, 'device_driver', device_driver) lb_dict = lb_model.to_dict(stats=False) return lb_dict def loadbalancer_deployed(self, context, loadbalancer_id): with context.session.begin(subtransactions=True): qry = context.session.query(db_models.LoadBalancer) qry = qry.filter_by(id=loadbalancer_id) loadbalancer = qry.one() # set all resources to active if (loadbalancer.provisioning_status in constants.ACTIVE_PENDING_STATUSES): loadbalancer.provisioning_status = constants.ACTIVE if loadbalancer.listeners: for l in loadbalancer.listeners: if (l.provisioning_status in constants.ACTIVE_PENDING_STATUSES): l.provisioning_status = constants.ACTIVE if (l.default_pool and l.default_pool.provisioning_status in constants.ACTIVE_PENDING_STATUSES): l.default_pool.provisioning_status = constants.ACTIVE if l.default_pool.members: for m in l.default_pool.members: if (m.provisioning_status in constants.ACTIVE_PENDING_STATUSES): m.provisioning_status = constants.ACTIVE if l.default_pool.healthmonitor: hm = l.default_pool.healthmonitor ps = hm.provisioning_status if ps in constants.ACTIVE_PENDING_STATUSES: (l.default_pool.healthmonitor .provisioning_status) = constants.ACTIVE def update_status(self, context, obj_type, obj_id, provisioning_status=None, operating_status=None): if not provisioning_status and not operating_status: LOG.warning(_LW('update_status for %(obj_type)s %(obj_id)s called ' 'without specifying provisioning_status or ' 'operating_status') % {'obj_type': obj_type, 'obj_id': obj_id}) return model_mapping = { 'loadbalancer': db_models.LoadBalancer, 'pool': db_models.PoolV2, 'listener': db_models.Listener, 'member': db_models.MemberV2, 'healthmonitor': db_models.HealthMonitorV2 } if obj_type not in model_mapping: raise n_exc.Invalid(_('Unknown object type: %s') % obj_type) try: self.plugin.db.update_status( context, model_mapping[obj_type], obj_id, provisioning_status=provisioning_status, operating_status=operating_status) except n_exc.NotFound: # update_status may come from agent on an object which was # already deleted from db with other request LOG.warning(_LW('Cannot update status: %(obj_type)s %(obj_id)s ' 'not found in the DB, it was probably deleted ' 'concurrently'), {'obj_type': obj_type, 'obj_id': obj_id}) def loadbalancer_destroyed(self, context, loadbalancer_id=None): """Agent confirmation hook that a load balancer has been destroyed. This method exists for subclasses to change the deletion behavior. """ pass def plug_vip_port(self, context, port_id=None, host=None): if not port_id: return try: port = self.plugin.db._core_plugin.get_port( context, port_id ) except n_exc.PortNotFound: LOG.debug('Unable to find port %s to plug.', port_id) return port['admin_state_up'] = True port[portbindings.HOST_ID] = host self.plugin.db._core_plugin.update_port( context, port_id, {'port': port} ) def unplug_vip_port(self, context, port_id=None, host=None): if not port_id: return try: port = self.plugin.db._core_plugin.get_port( context, port_id ) except n_exc.PortNotFound: LOG.debug('Unable to find port %s to unplug. This can occur when ' 'the Vip has been deleted first.', port_id) return port['admin_state_up'] = False port['device_owner'] = '' port['device_id'] = '' try: self.plugin.db._core_plugin.update_port( context, port_id, {'port': port} ) except n_exc.PortNotFound: LOG.debug('Unable to find port %s to unplug. This can occur when ' 'the Vip has been deleted first.', port_id) def update_loadbalancer_stats(self, context, loadbalancer_id=None, stats=None): self.plugin.db.update_loadbalancer_stats(context, loadbalancer_id, stats) neutron-lbaas-8.0.0/neutron_lbaas/drivers/common/agent_driver_base.py0000664000567000056710000003540012701407726027261 0ustar jenkinsjenkins00000000000000# Copyright 2015 Rackspace. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import rpc as n_rpc from neutron.db import agents_db from neutron.services import provider_configuration as provconf from neutron_lib import exceptions as n_exc from oslo_config import cfg import oslo_messaging as messaging from oslo_utils import importutils from neutron_lbaas._i18n import _ from neutron_lbaas.drivers.common import agent_callbacks from neutron_lbaas.drivers import driver_base from neutron_lbaas.extensions import lbaas_agentschedulerv2 from neutron_lbaas.services.loadbalancer import constants as lb_const from neutron_lbaas.services.loadbalancer import data_models LB_SCHEDULERS = 'loadbalancer_schedulers' AGENT_SCHEDULER_OPTS = [ cfg.StrOpt('loadbalancer_scheduler_driver', default='neutron_lbaas.agent_scheduler.ChanceScheduler', help=_('Driver to use for scheduling ' 'to a default loadbalancer agent')), ] cfg.CONF.register_opts(AGENT_SCHEDULER_OPTS) class DriverNotSpecified(n_exc.NeutronException): message = _("Device driver for agent should be specified " "in plugin driver.") class DataModelSerializer(object): def serialize_entity(self, ctx, entity): if isinstance(entity, data_models.BaseDataModel): return entity.to_dict(stats=False) else: return entity class LoadBalancerAgentApi(object): """Plugin side of plugin to agent RPC API.""" # history # 1.0 Initial version # def __init__(self, topic): target = messaging.Target(topic=topic, version='1.0') self.client = n_rpc.get_client(target, serializer=DataModelSerializer()) def agent_updated(self, context, admin_state_up, host): cctxt = self.client.prepare(server=host) cctxt.cast(context, 'agent_updated', payload={'admin_state_up': admin_state_up}) def create_loadbalancer(self, context, loadbalancer, host, driver_name): cctxt = self.client.prepare(server=host) cctxt.cast(context, 'create_loadbalancer', loadbalancer=loadbalancer, driver_name=driver_name) def update_loadbalancer(self, context, old_loadbalancer, loadbalancer, host): cctxt = self.client.prepare(server=host) cctxt.cast(context, 'update_loadbalancer', old_loadbalancer=old_loadbalancer, loadbalancer=loadbalancer) def delete_loadbalancer(self, context, loadbalancer, host): cctxt = self.client.prepare(server=host) cctxt.cast(context, 'delete_loadbalancer', loadbalancer=loadbalancer) def create_listener(self, context, listener, host): cctxt = self.client.prepare(server=host) cctxt.cast(context, 'create_listener', listener=listener) def update_listener(self, context, old_listener, listener, host): cctxt = self.client.prepare(server=host) cctxt.cast(context, 'update_listener', old_listener=old_listener, listener=listener) def delete_listener(self, context, listener, host): cctxt = self.client.prepare(server=host) cctxt.cast(context, 'delete_listener', listener=listener) def create_pool(self, context, pool, host): cctxt = self.client.prepare(server=host) cctxt.cast(context, 'create_pool', pool=pool) def update_pool(self, context, old_pool, pool, host): cctxt = self.client.prepare(server=host) cctxt.cast(context, 'update_pool', old_pool=old_pool, pool=pool) def delete_pool(self, context, pool, host): cctxt = self.client.prepare(server=host) cctxt.cast(context, 'delete_pool', pool=pool) def create_member(self, context, member, host): cctxt = self.client.prepare(server=host) cctxt.cast(context, 'create_member', member=member) def update_member(self, context, old_member, member, host): cctxt = self.client.prepare(server=host) cctxt.cast(context, 'update_member', old_member=old_member, member=member) def delete_member(self, context, member, host): cctxt = self.client.prepare(server=host) cctxt.cast(context, 'delete_member', member=member) def create_healthmonitor(self, context, healthmonitor, host): cctxt = self.client.prepare(server=host) cctxt.cast(context, 'create_healthmonitor', healthmonitor=healthmonitor) def update_healthmonitor(self, context, old_healthmonitor, healthmonitor, host): cctxt = self.client.prepare(server=host) cctxt.cast(context, 'update_healthmonitor', old_healthmonitor=old_healthmonitor, healthmonitor=healthmonitor) def delete_healthmonitor(self, context, healthmonitor, host): cctxt = self.client.prepare(server=host) cctxt.cast(context, 'delete_healthmonitor', healthmonitor=healthmonitor) class LoadBalancerManager(driver_base.BaseLoadBalancerManager): def update(self, context, old_loadbalancer, loadbalancer): super(LoadBalancerManager, self).update(context, old_loadbalancer, loadbalancer) agent = self.driver.get_loadbalancer_agent(context, loadbalancer.id) self.driver.agent_rpc.update_loadbalancer( context, old_loadbalancer, loadbalancer, agent['host']) def create(self, context, loadbalancer): super(LoadBalancerManager, self).create(context, loadbalancer) agent = self.driver.loadbalancer_scheduler.schedule( self.driver.plugin, context, loadbalancer, self.driver.device_driver) if not agent: raise lbaas_agentschedulerv2.NoEligibleLbaasAgent( loadbalancer_id=loadbalancer.id) self.driver.agent_rpc.create_loadbalancer( context, loadbalancer, agent['host'], self.driver.device_driver) def delete(self, context, loadbalancer): super(LoadBalancerManager, self).delete(context, loadbalancer) agent = self.driver.get_loadbalancer_agent(context, loadbalancer.id) # TODO(blogan): Rethink deleting from the database here. May want to # wait until the agent actually deletes it. Doing this now to keep # what v1 had. self.driver.plugin.db.delete_loadbalancer(context, loadbalancer.id) if agent: self.driver.agent_rpc.delete_loadbalancer(context, loadbalancer, agent['host']) def stats(self, context, loadbalancer): pass def refresh(self, context, loadbalancer): pass class ListenerManager(driver_base.BaseListenerManager): def update(self, context, old_listener, listener): super(ListenerManager, self).update( context, old_listener.to_dict(), listener.to_dict()) agent = self.driver.get_loadbalancer_agent( context, listener.loadbalancer.id) self.driver.agent_rpc.update_listener(context, old_listener, listener, agent['host']) def create(self, context, listener): super(ListenerManager, self).create(context, listener) agent = self.driver.get_loadbalancer_agent( context, listener.loadbalancer.id) self.driver.agent_rpc.create_listener(context, listener, agent['host']) def delete(self, context, listener): super(ListenerManager, self).delete(context, listener) agent = self.driver.get_loadbalancer_agent(context, listener.loadbalancer.id) # TODO(blogan): Rethink deleting from the database and updating the lb # status here. May want to wait until the agent actually deletes it. # Doing this now to keep what v1 had. self.driver.plugin.db.delete_listener(context, listener.id) self.driver.plugin.db.update_loadbalancer_provisioning_status( context, listener.loadbalancer.id) self.driver.agent_rpc.delete_listener(context, listener, agent['host']) class PoolManager(driver_base.BasePoolManager): def update(self, context, old_pool, pool): super(PoolManager, self).update(context, old_pool, pool) agent = self.driver.get_loadbalancer_agent( context, pool.loadbalancer.id) self.driver.agent_rpc.update_pool(context, old_pool, pool, agent['host']) def create(self, context, pool): super(PoolManager, self).create(context, pool) agent = self.driver.get_loadbalancer_agent( context, pool.loadbalancer.id) self.driver.agent_rpc.create_pool(context, pool, agent['host']) def delete(self, context, pool): super(PoolManager, self).delete(context, pool) agent = self.driver.get_loadbalancer_agent( context, pool.loadbalancer.id) # TODO(blogan): Rethink deleting from the database and updating the lb # status here. May want to wait until the agent actually deletes it. # Doing this now to keep what v1 had. self.driver.plugin.db.delete_pool(context, pool.id) self.driver.plugin.db.update_loadbalancer_provisioning_status( context, pool.loadbalancer.id) self.driver.agent_rpc.delete_pool(context, pool, agent['host']) class MemberManager(driver_base.BaseMemberManager): def update(self, context, old_member, member): super(MemberManager, self).update(context, old_member, member) agent = self.driver.get_loadbalancer_agent( context, member.pool.loadbalancer.id) self.driver.agent_rpc.update_member(context, old_member, member, agent['host']) def create(self, context, member): super(MemberManager, self).create(context, member) agent = self.driver.get_loadbalancer_agent( context, member.pool.loadbalancer.id) self.driver.agent_rpc.create_member(context, member, agent['host']) def delete(self, context, member): super(MemberManager, self).delete(context, member) agent = self.driver.get_loadbalancer_agent( context, member.pool.loadbalancer.id) # TODO(blogan): Rethink deleting from the database and updating the lb # status here. May want to wait until the agent actually deletes it. # Doing this now to keep what v1 had. self.driver.plugin.db.delete_pool_member(context, member.id) self.driver.plugin.db.update_loadbalancer_provisioning_status( context, member.pool.loadbalancer.id) self.driver.agent_rpc.delete_member(context, member, agent['host']) class HealthMonitorManager(driver_base.BaseHealthMonitorManager): def update(self, context, old_healthmonitor, healthmonitor): super(HealthMonitorManager, self).update( context, old_healthmonitor, healthmonitor) agent = self.driver.get_loadbalancer_agent( context, healthmonitor.pool.loadbalancer.id) self.driver.agent_rpc.update_healthmonitor( context, old_healthmonitor, healthmonitor, agent['host']) def create(self, context, healthmonitor): super(HealthMonitorManager, self).create(context, healthmonitor) agent = self.driver.get_loadbalancer_agent( context, healthmonitor.pool.loadbalancer.id) self.driver.agent_rpc.create_healthmonitor( context, healthmonitor, agent['host']) def delete(self, context, healthmonitor): super(HealthMonitorManager, self).delete(context, healthmonitor) agent = self.driver.get_loadbalancer_agent( context, healthmonitor.pool.loadbalancer.id) # TODO(blogan): Rethink deleting from the database and updating the lb # status here. May want to wait until the agent actually deletes it. # Doing this now to keep what v1 had. self.driver.plugin.db.delete_healthmonitor(context, healthmonitor.id) self.driver.plugin.db.update_loadbalancer_provisioning_status( context, healthmonitor.pool.loadbalancer.id) self.driver.agent_rpc.delete_healthmonitor( context, healthmonitor, agent['host']) class AgentDriverBase(driver_base.LoadBalancerBaseDriver): # name of device driver that should be used by the agent; # vendor specific plugin drivers must override it; device_driver = None def __init__(self, plugin): super(AgentDriverBase, self).__init__(plugin) if not self.device_driver: raise DriverNotSpecified() self.load_balancer = LoadBalancerManager(self) self.listener = ListenerManager(self) self.pool = PoolManager(self) self.member = MemberManager(self) self.health_monitor = HealthMonitorManager(self) self.agent_rpc = LoadBalancerAgentApi(lb_const.LOADBALANCER_AGENTV2) self.agent_endpoints = [ agent_callbacks.LoadBalancerCallbacks(self.plugin), agents_db.AgentExtRpcCallback(self.plugin.db) ] self.conn = None # Setting this on the db because the plugin no longer inherts from # database classes, the db does. self.plugin.db.agent_notifiers.update( {lb_const.AGENT_TYPE_LOADBALANCERV2: self.agent_rpc}) lb_sched_driver = provconf.get_provider_driver_class( cfg.CONF.loadbalancer_scheduler_driver, LB_SCHEDULERS) self.loadbalancer_scheduler = importutils.import_object( lb_sched_driver) def start_rpc_listeners(self): # other agent based plugin driver might already set callbacks on plugin if hasattr(self.plugin, 'agent_callbacks'): return self.conn = n_rpc.create_connection() self.conn.create_consumer(lb_const.LOADBALANCER_PLUGINV2, self.agent_endpoints, fanout=False) return self.conn.consume_in_threads() def get_loadbalancer_agent(self, context, loadbalancer_id): agent = self.plugin.db.get_agent_hosting_loadbalancer( context, loadbalancer_id) if not agent: raise lbaas_agentschedulerv2.NoActiveLbaasAgent( loadbalancer_id=loadbalancer_id) return agent['agent'] neutron-lbaas-8.0.0/neutron_lbaas/drivers/brocade/0000775000567000056710000000000012701410110023326 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/drivers/brocade/README0000664000567000056710000000053312701407726024232 0ustar jenkinsjenkins00000000000000Brocade LBaaS Driver Installation info: - Install Brocade LBaaS Device Driver - Enable Brocade as default lbaas service provider in neutron_lbaas.conf file - Restart Neutron Server Third-party CI info: Contact info for any problems is: DL-GRP-ENG-brocade-adx-openstack-ci at brocade dot com Or contact Pattabi Ayyasami directly (IRC: pattabi) neutron-lbaas-8.0.0/neutron_lbaas/drivers/brocade/__init__.py0000664000567000056710000000000012701407726025450 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/drivers/brocade/driver_v2.py0000664000567000056710000001314712701407726025633 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Brocade Communications Systems, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Pattabi Ayyasami (pattabi), Brocade Communications Systems,Inc. # from brocade_neutron_lbaas import adx_device_driver_v2 as device_driver from neutron_lbaas.drivers import driver_base class BrocadeLoadBalancerDriver(driver_base.LoadBalancerBaseDriver): def __init__(self, plugin): super(BrocadeLoadBalancerDriver, self).__init__(plugin) self.load_balancer = BrocadeLoadBalancerManager(self) self.listener = BrocadeListenerManager(self) self.pool = BrocadePoolManager(self) self.member = BrocadeMemberManager(self) self.health_monitor = BrocadeHealthMonitorManager(self) self.device_driver = device_driver.BrocadeAdxDeviceDriverV2(plugin) class BrocadeLoadBalancerManager(driver_base.BaseLoadBalancerManager): def create(self, context, obj): try: self.driver.device_driver.create_loadbalancer(obj) self.successful_completion(context, obj) except Exception: self.failed_completion(context, obj) def update(self, context, old_obj, obj): try: self.driver.device_driver.update_loadbalancer(obj, old_obj) self.successful_completion(context, obj) except Exception: self.failed_completion(context, obj) def delete(self, context, obj): try: self.driver.device_driver.delete_loadbalancer(obj) except Exception: # Ignore the exception pass self.successful_completion(context, obj, delete=True) def refresh(self, context, lb_obj): # This is intended to trigger the backend to check and repair # the state of this load balancer and all of its dependent objects self.driver.device_driver.refresh(lb_obj) def stats(self, context, lb_obj): return self.driver.device_driver.stats(lb_obj) class BrocadeListenerManager(driver_base.BaseListenerManager): def create(self, context, obj): try: self.driver.device_driver.create_listener(obj) self.successful_completion(context, obj) except Exception: self.failed_completion(context, obj) def update(self, context, old_obj, obj): try: self.driver.device_driver.update_listener(obj, old_obj) self.successful_completion(context, obj) except Exception: self.failed_completion(context, obj) def delete(self, context, obj): try: self.driver.device_driver.delete_listener(obj) except Exception: # Ignore the exception pass self.successful_completion(context, obj, delete=True) class BrocadePoolManager(driver_base.BasePoolManager): def create(self, context, obj): try: self.driver.device_driver.create_pool(obj) self.successful_completion(context, obj) except Exception: self.failed_completion(context, obj) def update(self, context, old_obj, obj): try: self.driver.device_driver.update_pool(obj, old_obj) self.successful_completion(context, obj) except Exception: self.failed_completion(context, obj) def delete(self, context, obj): try: self.driver.device_driver.delete_pool(obj) except Exception: # Ignore the exception pass self.successful_completion(context, obj, delete=True) class BrocadeMemberManager(driver_base.BaseMemberManager): def create(self, context, obj): try: self.driver.device_driver.create_member(obj) self.successful_completion(context, obj) except Exception: self.failed_completion(context, obj) def update(self, context, old_obj, obj): try: self.driver.device_driver.update_member(obj, old_obj) self.successful_completion(context, obj) except Exception: self.failed_completion(context, obj) def delete(self, context, obj): try: self.driver.device_driver.delete_member(obj) except Exception: # Ignore the exception pass self.successful_completion(context, obj, delete=True) class BrocadeHealthMonitorManager(driver_base.BaseHealthMonitorManager): def create(self, context, obj): try: self.driver.device_driver.create_healthmonitor(obj) self.successful_completion(context, obj) except Exception: self.failed_completion(context, obj) def update(self, context, old_obj, obj): try: self.driver.device_driver.update_healthmonitor(obj, old_obj) self.successful_completion(context, obj) except Exception: self.failed_completion(context, obj) def delete(self, context, obj): try: self.driver.device_driver.delete_healthmonitor(obj) except Exception: # Ignore the exception pass self.successful_completion(context, obj, delete=True) neutron-lbaas-8.0.0/neutron_lbaas/drivers/a10networks/0000775000567000056710000000000012701410110024105 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/drivers/a10networks/__init__.py0000664000567000056710000000000012701407726026227 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/drivers/a10networks/driver_v2.py0000664000567000056710000000633612701407726026414 0ustar jenkinsjenkins00000000000000# Copyright 2015, Doug Wiegley (dougwig), A10 Networks # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import a10_neutron_lbaas from oslo_log import log as logging from neutron_lbaas.drivers import driver_base VERSION = "2.0.0" LOG = logging.getLogger(__name__) class ThunderDriver(driver_base.LoadBalancerBaseDriver): def __init__(self, plugin): super(ThunderDriver, self).__init__(plugin) self.load_balancer = LoadBalancerManager(self) self.listener = ListenerManager(self) self.pool = PoolManager(self) self.member = MemberManager(self) self.health_monitor = HealthMonitorManager(self) LOG.debug("A10Driver: v2 initializing, version=%s, lbaas_manager=%s", VERSION, a10_neutron_lbaas.VERSION) self.a10 = a10_neutron_lbaas.A10OpenstackLBV2(self) class LoadBalancerManager(driver_base.BaseLoadBalancerManager): def create(self, context, lb): self.driver.a10.lb.create(context, lb) def update(self, context, old_lb, lb): self.driver.a10.lb.update(context, old_lb, lb) def delete(self, context, lb): self.driver.a10.lb.delete(context, lb) def refresh(self, context, lb): self.driver.a10.lb.refresh(context, lb) def stats(self, context, lb): return self.driver.a10.lb.stats(context, lb) class ListenerManager(driver_base.BaseListenerManager): def create(self, context, listener): self.driver.a10.listener.create(context, listener) def update(self, context, old_listener, listener): self.driver.a10.listener.update(context, old_listener, listener) def delete(self, context, listener): self.driver.a10.listener.delete(context, listener) class PoolManager(driver_base.BasePoolManager): def create(self, context, pool): self.driver.a10.pool.create(context, pool) def update(self, context, old_pool, pool): self.driver.a10.pool.update(context, old_pool, pool) def delete(self, context, pool): self.driver.a10.pool.delete(context, pool) class MemberManager(driver_base.BaseMemberManager): def create(self, context, member): self.driver.a10.member.create(context, member) def update(self, context, old_member, member): self.driver.a10.member.update(context, old_member, member) def delete(self, context, member): self.driver.a10.member.delete(context, member) class HealthMonitorManager(driver_base.BaseHealthMonitorManager): def create(self, context, hm): self.driver.a10.hm.create(context, hm) def update(self, context, old_hm, hm): self.driver.a10.hm.update(context, old_hm, hm) def delete(self, context, hm): self.driver.a10.hm.delete(context, hm) neutron-lbaas-8.0.0/neutron_lbaas/drivers/octavia/0000775000567000056710000000000012701410110023355 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/drivers/octavia/octavia_messaging_consumer.py0000664000567000056710000000741112701407727031354 0ustar jenkinsjenkins00000000000000# Copyright 2016 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lbaas._i18n import _LI from oslo_config import cfg from oslo_log import log as logging import oslo_messaging as messaging from oslo_service import service oslo_messaging_opts = [ cfg.StrOpt('event_stream_topic', default='neutron_lbaas_event', help=_('topic name for receiving events from a queue')) ] cfg.CONF.register_opts(oslo_messaging_opts, group='oslo_messaging') LOG = logging.getLogger(__name__) class InfoContainer(object): @staticmethod def from_dict(dict_obj): return InfoContainer(dict_obj['info_type'], dict_obj['info_id'], dict_obj['info_payload']) def __init__(self, info_type, info_id, info_payload): self.info_type = info_type self.info_id = info_id self.info_payload = info_payload def to_dict(self): return {'info_type': self.info_type, 'info_id': self.info_id, 'info_payload': self.info_payload} def __eq__(self, other): if not isinstance(other, InfoContainer): return False if self.info_type != other.info_type: return False if self.info_id != other.info_id: return False if self.info_payload != other.info_payload: return False return True def __ne__(self, other): return not self == other class ConsumerEndPoint(object): target = messaging.Target(namespace="control", version='1.0') def __init__(self, driver): self.driver = driver def update_info(self, ctx, container): LOG.debug("Received event from stream %s", container) container_inst = InfoContainer.from_dict(container) self.driver.handle_streamed_event(container_inst) class OctaviaConsumer(service.Service): def __init__(self, driver, **kwargs): super(OctaviaConsumer, self).__init__(**kwargs) topic = cfg.CONF.oslo_messaging.event_stream_topic server = cfg.CONF.host self.driver = driver self.transport = messaging.get_transport(cfg.CONF) self.target = messaging.Target(topic=topic, server=server, exchange="common", fanout=False) self.endpoints = [ConsumerEndPoint(self.driver)] self.server = None def start(self): super(OctaviaConsumer, self).start() LOG.info(_LI("Starting octavia consumer...")) self.server = messaging.get_rpc_server(self.transport, self.target, self.endpoints, executor='eventlet') self.server.start() def stop(self, graceful=False): if self.server: LOG.info(_LI('Stopping consumer...')) self.server.stop() if graceful: LOG.info( _LI('Consumer successfully stopped. Waiting for final ' 'messages to be processed...')) self.server.wait() super(OctaviaConsumer, self).stop(graceful=graceful) def reset(self): if self.server: self.server.reset() super(OctaviaConsumer, self).reset() neutron-lbaas-8.0.0/neutron_lbaas/drivers/octavia/__init__.py0000664000567000056710000000000012701407726025477 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/drivers/octavia/driver.py0000664000567000056710000004003612701407726025250 0ustar jenkinsjenkins00000000000000# Copyright 2015, A10 Networks # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from datetime import datetime from functools import wraps import threading import time from neutron import context as ncontext from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_service import service from oslo_utils import excutils import requests from neutron_lbaas._i18n import _ from neutron_lbaas.common import keystone from neutron_lbaas.drivers import driver_base from neutron_lbaas.drivers.octavia import octavia_messaging_consumer from neutron_lbaas.services.loadbalancer import constants LOG = logging.getLogger(__name__) VERSION = "1.0.1" OPTS = [ cfg.StrOpt( 'base_url', default='http://127.0.0.1:9876', help=_('URL of Octavia controller root'), ), cfg.IntOpt( 'request_poll_interval', default=3, help=_('Interval in seconds to poll octavia when an entity is created,' ' updated, or deleted.') ), cfg.IntOpt( 'request_poll_timeout', default=100, help=_('Time to stop polling octavia when a status of an entity does ' 'not change.') ), cfg.BoolOpt( 'allocates_vip', default=False, help=_('True if Octavia will be responsible for allocating the VIP.' ' False if neutron-lbaas will allocate it and pass to Octavia.') ) ] cfg.CONF.register_opts(OPTS, 'octavia') def thread_op(manager, entity, delete=False, lb_create=False): context = ncontext.get_admin_context() poll_interval = cfg.CONF.octavia.request_poll_interval poll_timeout = cfg.CONF.octavia.request_poll_timeout start_dt = datetime.now() prov_status = None while (datetime.now() - start_dt).seconds < poll_timeout: octavia_lb = manager.driver.load_balancer.get(entity.root_loadbalancer) prov_status = octavia_lb.get('provisioning_status') LOG.debug("Octavia reports load balancer {0} has provisioning status " "of {1}".format(entity.root_loadbalancer.id, prov_status)) if prov_status == 'ACTIVE' or prov_status == 'DELETED': kwargs = {'delete': delete} if manager.driver.allocates_vip and lb_create: kwargs['lb_create'] = lb_create # TODO(blogan): drop fk constraint on vip_port_id to ports # table because the port can't be removed unless the load # balancer has been deleted. Until then we won't populate the # vip_port_id field. # entity.vip_port_id = octavia_lb.get('vip').get('port_id') entity.vip_address = octavia_lb.get('vip').get('ip_address') manager.successful_completion(context, entity, **kwargs) return elif prov_status == 'ERROR': manager.failed_completion(context, entity) return time.sleep(poll_interval) LOG.debug("Timeout has expired for load balancer {0} to complete an " "operation. The last reported status was " "{1}".format(entity.root_loadbalancer.id, prov_status)) manager.failed_completion(context, entity) # A decorator for wrapping driver operations, which will automatically # set the neutron object's status based on whether it sees an exception def async_op(func): @wraps(func) def func_wrapper(*args, **kwargs): d = (func.__name__ == 'delete' or func.__name__ == 'delete_cascade') lb_create = ((func.__name__ == 'create') and isinstance(args[0], LoadBalancerManager)) try: r = func(*args, **kwargs) thread = threading.Thread(target=thread_op, args=(args[0], args[2]), kwargs={'delete': d, 'lb_create': lb_create}) thread.setDaemon(True) thread.start() return r except Exception: with excutils.save_and_reraise_exception(): args[0].failed_completion(args[1], args[2]) return func_wrapper class OctaviaRequest(object): def __init__(self, base_url, auth_session): self.base_url = base_url self.auth_session = auth_session def request(self, method, url, args=None, headers=None): if args: if not headers: token = self.auth_session.get_token() headers = { 'Content-type': 'application/json', 'X-Auth-Token': token } args = jsonutils.dumps(args) LOG.debug("url = %s", '%s%s' % (self.base_url, str(url))) LOG.debug("args = %s", args) r = requests.request(method, '%s%s' % (self.base_url, str(url)), data=args, headers=headers) LOG.debug("Octavia Response Code: {0}".format(r.status_code)) LOG.debug("Octavia Response Body: {0}".format(r.content)) LOG.debug("Octavia Response Headers: {0}".format(r.headers)) if method != 'DELETE': return r.json() def post(self, url, args): return self.request('POST', url, args) def put(self, url, args): return self.request('PUT', url, args) def delete(self, url): self.request('DELETE', url) def get(self, url): return self.request('GET', url) class OctaviaDriver(driver_base.LoadBalancerBaseDriver): def __init__(self, plugin): super(OctaviaDriver, self).__init__(plugin) self.req = OctaviaRequest(cfg.CONF.octavia.base_url, keystone.get_session()) self.load_balancer = LoadBalancerManager(self) self.listener = ListenerManager(self) self.pool = PoolManager(self) self.member = MemberManager(self) self.health_monitor = HealthMonitorManager(self) self.l7policy = L7PolicyManager(self) self.l7rule = L7RuleManager(self) self.octavia_consumer = octavia_messaging_consumer.OctaviaConsumer( self) service.launch(cfg.CONF, self.octavia_consumer) LOG.debug("OctaviaDriver: initialized, version=%s", VERSION) @property def allocates_vip(self): return self.load_balancer.allocates_vip class LoadBalancerManager(driver_base.BaseLoadBalancerManager): @staticmethod def _url(lb, id=None): s = '/v1/loadbalancers' if id: s += '/%s' % id return s @property def allocates_vip(self): return cfg.CONF.octavia.allocates_vip @property def deletes_cascade(self): return True def create_and_allocate_vip(self, context, lb): self.create(context, lb) @async_op def create(self, context, lb): args = { 'id': lb.id, 'name': lb.name, 'description': lb.description, 'enabled': lb.admin_state_up, 'project_id': lb.tenant_id, 'vip': { 'subnet_id': lb.vip_subnet_id, 'ip_address': lb.vip_address, 'port_id': lb.vip_port_id, } } self.driver.req.post(self._url(lb), args) @async_op def update(self, context, old_lb, lb): args = { 'name': lb.name, 'description': lb.description, 'enabled': lb.admin_state_up, } self.driver.req.put(self._url(lb, lb.id), args) @async_op def delete(self, context, lb): self.driver.req.delete(self._url(lb, lb.id)) @async_op def refresh(self, context, lb): pass def stats(self, context, lb): return {} # todo def get(self, lb): return self.driver.req.get(self._url(lb, lb.id)) @async_op def delete_cascade(self, context, lb): self.driver.req.delete(self._url(lb, lb.id) + '/delete_cascade') class ListenerManager(driver_base.BaseListenerManager): @staticmethod def _url(listener, id=None): s = '/v1/loadbalancers/%s/listeners' % listener.loadbalancer.id if id: s += '/%s' % id return s @classmethod def _write(cls, write_func, url, listener, create=True): sni_container_ids = [sni.tls_container_id for sni in listener.sni_containers] args = { 'name': listener.name, 'description': listener.description, 'enabled': listener.admin_state_up, 'protocol': listener.protocol, 'protocol_port': listener.protocol_port, 'connection_limit': listener.connection_limit, 'tls_certificate_id': listener.default_tls_container_id, 'default_pool_id': listener.default_pool_id, 'sni_containers': sni_container_ids } if create: args['project_id'] = listener.tenant_id args['id'] = listener.id write_func(url, args) @async_op def create(self, context, listener): self._write(self.driver.req.post, self._url(listener), listener) @async_op def update(self, context, old_listener, listener): self._write(self.driver.req.put, self._url(listener, id=listener.id), listener, create=False) @async_op def delete(self, context, listener): self.driver.req.delete(self._url(listener, id=listener.id)) class PoolManager(driver_base.BasePoolManager): @staticmethod def _url(pool, id=None): s = '/v1/loadbalancers/%s/pools' % ( pool.loadbalancer.id) if id: s += '/%s' % id return s @classmethod def _write(cls, write_func, url, pool, create=True): args = { 'name': pool.name, 'description': pool.description, 'enabled': pool.admin_state_up, 'protocol': pool.protocol, 'lb_algorithm': pool.lb_algorithm } if pool.session_persistence: args['session_persistence'] = { 'type': pool.session_persistence.type, 'cookie_name': pool.session_persistence.cookie_name, } else: args['session_persistence'] = None if create: args['project_id'] = pool.tenant_id args['id'] = pool.id if pool.listeners: args['listener_id'] = pool.listeners[0].id write_func(url, args) @async_op def create(self, context, pool): self._write(self.driver.req.post, self._url(pool), pool) @async_op def update(self, context, old_pool, pool): self._write(self.driver.req.put, self._url(pool, id=pool.id), pool, create=False) @async_op def delete(self, context, pool): self.driver.req.delete(self._url(pool, id=pool.id)) class MemberManager(driver_base.BaseMemberManager): @staticmethod def _url(member, id=None): s = '/v1/loadbalancers/%s/pools/%s/members' % ( member.pool.loadbalancer.id, member.pool.id) if id: s += '/%s' % id return s @async_op def create(self, context, member): args = { 'id': member.id, 'enabled': member.admin_state_up, 'ip_address': member.address, 'protocol_port': member.protocol_port, 'weight': member.weight, 'subnet_id': member.subnet_id, 'project_id': member.tenant_id } self.driver.req.post(self._url(member), args) @async_op def update(self, context, old_member, member): args = { 'enabled': member.admin_state_up, 'protocol_port': member.protocol_port, 'weight': member.weight, } self.driver.req.put(self._url(member, member.id), args) @async_op def delete(self, context, member): self.driver.req.delete(self._url(member, member.id)) class HealthMonitorManager(driver_base.BaseHealthMonitorManager): @staticmethod def _url(hm): s = '/v1/loadbalancers/%s/pools/%s/healthmonitor' % ( hm.pool.loadbalancer.id, hm.pool.id) return s @classmethod def _write(cls, write_func, url, hm, create=True): args = { 'type': hm.type, 'delay': hm.delay, 'timeout': hm.timeout, 'rise_threshold': hm.max_retries, 'fall_threshold': hm.max_retries, 'http_method': hm.http_method, 'url_path': hm.url_path, 'expected_codes': hm.expected_codes, 'enabled': hm.admin_state_up } if create: args['project_id'] = hm.tenant_id write_func(url, args) @async_op def create(self, context, hm): self._write(self.driver.req.post, self._url(hm), hm) @async_op def update(self, context, old_hm, hm): self._write(self.driver.req.put, self._url(hm), hm, create=False) @async_op def delete(self, context, hm): self.driver.req.delete(self._url(hm)) class L7PolicyManager(driver_base.BaseL7PolicyManager): @staticmethod def _url(l7p, id=None): s = '/v1/loadbalancers/%s/listeners/%s/l7policies' % ( l7p.listener.loadbalancer.id, l7p.listener.id) if id: s += '/%s' % id return s @classmethod def _write(cls, write_func, url, l7p, create=True): args = { 'name': l7p.name, 'description': l7p.description, 'action': l7p.action, 'redirect_pool_id': l7p.redirect_pool_id, 'redirect_url': l7p.redirect_url, 'position': l7p.position, 'enabled': l7p.admin_state_up } if args['action'] == constants.L7_POLICY_ACTION_REJECT: del args['redirect_url'] del args['redirect_pool_id'] elif args['action'] == constants.L7_POLICY_ACTION_REDIRECT_TO_POOL: del args['redirect_url'] elif args['action'] == constants.L7_POLICY_ACTION_REDIRECT_TO_URL: del args['redirect_pool_id'] if create: args['id'] = l7p.id write_func(url, args) @async_op def create(self, context, l7p): self._write(self.driver.req.post, self._url(l7p), l7p) @async_op def update(self, context, old_l7p, l7p): self._write(self.driver.req.put, self._url(l7p, id=l7p.id), l7p, create=False) @async_op def delete(self, context, l7p): self.driver.req.delete(self._url(l7p, id=l7p.id)) class L7RuleManager(driver_base.BaseL7RuleManager): @staticmethod def _url(l7r, id=None): s = '/v1/loadbalancers/%s/listeners/%s/l7policies/%s/l7rules' % ( l7r.policy.listener.loadbalancer.id, l7r.policy.listener.id, l7r.policy.id) if id: s += '/%s' % id return s @classmethod def _write(cls, write_func, url, l7r, create=True): args = { 'type': l7r.type, 'compare_type': l7r.compare_type, 'key': l7r.key, 'value': l7r.value, 'invert': l7r.invert } if create: args['id'] = l7r.id write_func(url, args) @async_op def create(self, context, l7r): self._write(self.driver.req.post, self._url(l7r), l7r) @async_op def update(self, context, old_l7r, l7r): self._write(self.driver.req.put, self._url(l7r, id=l7r.id), l7r, create=False) @async_op def delete(self, context, l7r): self.driver.req.delete(self._url(l7r, id=l7r.id)) neutron-lbaas-8.0.0/neutron_lbaas/drivers/haproxy/0000775000567000056710000000000012701410110023421 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/drivers/haproxy/__init__.py0000664000567000056710000000000012701407726025543 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/drivers/haproxy/plugin_driver.py0000664000567000056710000000161612701407726026673 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # Copyright (c) 2015 Rackspace. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lbaas.drivers.common import agent_driver_base from neutron_lbaas.drivers.haproxy import namespace_driver class HaproxyOnHostPluginDriver(agent_driver_base.AgentDriverBase): device_driver = namespace_driver.DRIVER_NAME neutron-lbaas-8.0.0/neutron_lbaas/drivers/haproxy/namespace_driver.py0000664000567000056710000004344012701407726027332 0ustar jenkinsjenkins00000000000000# Copyright 2013 New Dream Network, LLC (DreamHost) # Copyright 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import shutil import socket import netaddr from neutron.agent.linux import ip_lib from neutron.agent.linux import utils as linux_utils from neutron.common import utils as n_utils from neutron.plugins.common import constants from neutron_lib import exceptions from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from neutron_lbaas._i18n import _, _LI, _LE, _LW from neutron_lbaas.agent import agent_device_driver from neutron_lbaas.services.loadbalancer import constants as lb_const from neutron_lbaas.services.loadbalancer import data_models from neutron_lbaas.services.loadbalancer.drivers.haproxy import jinja_cfg from neutron_lbaas.services.loadbalancer.drivers.haproxy \ import namespace_driver LOG = logging.getLogger(__name__) NS_PREFIX = 'qlbaas-' STATS_TYPE_BACKEND_REQUEST = 2 STATS_TYPE_BACKEND_RESPONSE = '1' STATS_TYPE_SERVER_REQUEST = 4 STATS_TYPE_SERVER_RESPONSE = '2' DRIVER_NAME = 'haproxy_ns' STATE_PATH_V2_APPEND = 'v2' cfg.CONF.register_opts(namespace_driver.OPTS, 'haproxy') def get_ns_name(namespace_id): return NS_PREFIX + namespace_id class HaproxyNSDriver(agent_device_driver.AgentDeviceDriver): def __init__(self, conf, plugin_rpc): super(HaproxyNSDriver, self).__init__(conf, plugin_rpc) self.state_path = conf.haproxy.loadbalancer_state_path self.state_path = os.path.join( self.conf.haproxy.loadbalancer_state_path, STATE_PATH_V2_APPEND) try: vif_driver_class = n_utils.load_class_by_alias_or_classname( 'neutron.interface_drivers', conf.interface_driver) except ImportError: with excutils.save_and_reraise_exception(): msg = (_('Error importing interface driver: %s') % conf.interface_driver) LOG.error(msg) self.vif_driver = vif_driver_class(conf) self.deployed_loadbalancers = {} self._loadbalancer = LoadBalancerManager(self) self._listener = ListenerManager(self) self._pool = PoolManager(self) self._member = MemberManager(self) self._healthmonitor = HealthMonitorManager(self) @property def loadbalancer(self): return self._loadbalancer @property def listener(self): return self._listener @property def pool(self): return self._pool @property def member(self): return self._member @property def healthmonitor(self): return self._healthmonitor def get_name(self): return DRIVER_NAME @n_utils.synchronized('haproxy-driver') def undeploy_instance(self, loadbalancer_id, **kwargs): cleanup_namespace = kwargs.get('cleanup_namespace', False) delete_namespace = kwargs.get('delete_namespace', False) namespace = get_ns_name(loadbalancer_id) pid_path = self._get_state_file_path(loadbalancer_id, 'haproxy.pid') # kill the process kill_pids_in_file(pid_path) # unplug the ports if loadbalancer_id in self.deployed_loadbalancers: self._unplug(namespace, self.deployed_loadbalancers[loadbalancer_id].vip_port) # delete all devices from namespace # used when deleting orphans and port is not known for a loadbalancer if cleanup_namespace: ns = ip_lib.IPWrapper(namespace=namespace) for device in ns.get_devices(exclude_loopback=True): self.vif_driver.unplug(device.name, namespace=namespace) # remove the configuration directory conf_dir = os.path.dirname( self._get_state_file_path(loadbalancer_id, '')) if os.path.isdir(conf_dir): shutil.rmtree(conf_dir) if delete_namespace: ns = ip_lib.IPWrapper(namespace=namespace) ns.garbage_collect_namespace() def remove_orphans(self, known_loadbalancer_ids): if not os.path.exists(self.state_path): return orphans = (lb_id for lb_id in os.listdir(self.state_path) if lb_id not in known_loadbalancer_ids) for lb_id in orphans: if self.exists(lb_id): self.undeploy_instance(lb_id, cleanup_namespace=True) def get_stats(self, loadbalancer_id): socket_path = self._get_state_file_path(loadbalancer_id, 'haproxy_stats.sock', False) if os.path.exists(socket_path): parsed_stats = self._get_stats_from_socket( socket_path, entity_type=(STATS_TYPE_BACKEND_REQUEST | STATS_TYPE_SERVER_REQUEST)) lb_stats = self._get_backend_stats(parsed_stats) lb_stats['members'] = self._get_servers_stats(parsed_stats) return lb_stats else: LOG.warning(_LW('Stats socket not found for loadbalancer %s'), loadbalancer_id) return {} @n_utils.synchronized('haproxy-driver') def deploy_instance(self, loadbalancer): """Deploys loadbalancer if necessary :returns: True if loadbalancer was deployed, False otherwise """ if not self.deployable(loadbalancer): LOG.info(_LI("Loadbalancer %s is not deployable.") % loadbalancer.id) return False if self.exists(loadbalancer.id): self.update(loadbalancer) else: self.create(loadbalancer) return True def update(self, loadbalancer): pid_path = self._get_state_file_path(loadbalancer.id, 'haproxy.pid') extra_args = ['-sf'] extra_args.extend(p.strip() for p in open(pid_path, 'r')) self._spawn(loadbalancer, extra_args) def exists(self, loadbalancer_id): namespace = get_ns_name(loadbalancer_id) root_ns = ip_lib.IPWrapper() socket_path = self._get_state_file_path( loadbalancer_id, 'haproxy_stats.sock', False) if root_ns.netns.exists(namespace) and os.path.exists(socket_path): try: s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) s.connect(socket_path) return True except socket.error: pass return False def create(self, loadbalancer): namespace = get_ns_name(loadbalancer.id) self._plug(namespace, loadbalancer.vip_port, loadbalancer.vip_address) self._spawn(loadbalancer) def deployable(self, loadbalancer): """Returns True if loadbalancer is active and has active listeners.""" if not loadbalancer: return False acceptable_listeners = [ listener for listener in loadbalancer.listeners if (listener.provisioning_status != constants.PENDING_DELETE and listener.admin_state_up)] return (bool(acceptable_listeners) and loadbalancer.admin_state_up and loadbalancer.provisioning_status != constants.PENDING_DELETE) def _get_stats_from_socket(self, socket_path, entity_type): try: s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) s.connect(socket_path) s.send('show stat -1 %s -1\n' % entity_type) raw_stats = '' chunk_size = 1024 while True: chunk = s.recv(chunk_size) raw_stats += chunk if len(chunk) < chunk_size: break return self._parse_stats(raw_stats) except socket.error as e: LOG.warning(_LW('Error while connecting to stats socket: %s'), e) return {} def _parse_stats(self, raw_stats): stat_lines = raw_stats.splitlines() if len(stat_lines) < 2: return [] stat_names = [name.strip('# ') for name in stat_lines[0].split(',')] res_stats = [] for raw_values in stat_lines[1:]: if not raw_values: continue stat_values = [value.strip() for value in raw_values.split(',')] res_stats.append(dict(zip(stat_names, stat_values))) return res_stats def _get_backend_stats(self, parsed_stats): for stats in parsed_stats: if stats.get('type') == STATS_TYPE_BACKEND_RESPONSE: unified_stats = dict((k, stats.get(v, '')) for k, v in jinja_cfg.STATS_MAP.items()) return unified_stats return {} def _get_servers_stats(self, parsed_stats): res = {} for stats in parsed_stats: if stats.get('type') == STATS_TYPE_SERVER_RESPONSE: res[stats['svname']] = { lb_const.STATS_STATUS: (constants.INACTIVE if stats['status'] == 'DOWN' else constants.ACTIVE), lb_const.STATS_HEALTH: stats['check_status'], lb_const.STATS_FAILED_CHECKS: stats['chkfail'] } return res def _get_state_file_path(self, loadbalancer_id, kind, ensure_state_dir=True): """Returns the file name for a given kind of config file.""" confs_dir = os.path.abspath(os.path.normpath(self.state_path)) conf_dir = os.path.join(confs_dir, loadbalancer_id) if ensure_state_dir: n_utils.ensure_dir(conf_dir) return os.path.join(conf_dir, kind) def _plug(self, namespace, port, vip_address, reuse_existing=True): self.plugin_rpc.plug_vip_port(port.id) interface_name = self.vif_driver.get_device_name(port) if ip_lib.device_exists(interface_name, namespace=namespace): if not reuse_existing: raise exceptions.PreexistingDeviceFailure( dev_name=interface_name ) else: self.vif_driver.plug( port.network_id, port.id, interface_name, port.mac_address, namespace=namespace ) cidrs = [ '%s/%s' % (ip.ip_address, netaddr.IPNetwork(ip.subnet.cidr).prefixlen) for ip in port.fixed_ips ] self.vif_driver.init_l3(interface_name, cidrs, namespace=namespace) # Haproxy socket binding to IPv6 VIP address will fail if this address # is not yet ready(i.e tentative address). if netaddr.IPAddress(vip_address).version == 6: device = ip_lib.IPDevice(interface_name, namespace=namespace) device.addr.wait_until_address_ready(vip_address) gw_ip = port.fixed_ips[0].subnet.gateway_ip if not gw_ip: host_routes = port.fixed_ips[0].subnet.host_routes for host_route in host_routes: if host_route.destination == "0.0.0.0/0": gw_ip = host_route.nexthop break else: cmd = ['route', 'add', 'default', 'gw', gw_ip] ip_wrapper = ip_lib.IPWrapper(namespace=namespace) ip_wrapper.netns.execute(cmd, check_exit_code=False) # When delete and re-add the same vip, we need to # send gratuitous ARP to flush the ARP cache in the Router. gratuitous_arp = self.conf.haproxy.send_gratuitous_arp if gratuitous_arp > 0: for ip in port.fixed_ips: cmd_arping = ['arping', '-U', '-I', interface_name, '-c', gratuitous_arp, ip.ip_address] ip_wrapper.netns.execute(cmd_arping, check_exit_code=False) def _unplug(self, namespace, port): self.plugin_rpc.unplug_vip_port(port.id) interface_name = self.vif_driver.get_device_name(port) self.vif_driver.unplug(interface_name, namespace=namespace) def _spawn(self, loadbalancer, extra_cmd_args=()): namespace = get_ns_name(loadbalancer.id) conf_path = self._get_state_file_path(loadbalancer.id, 'haproxy.conf') pid_path = self._get_state_file_path(loadbalancer.id, 'haproxy.pid') sock_path = self._get_state_file_path(loadbalancer.id, 'haproxy_stats.sock') user_group = self.conf.haproxy.user_group haproxy_base_dir = self._get_state_file_path(loadbalancer.id, '') jinja_cfg.save_config(conf_path, loadbalancer, sock_path, user_group, haproxy_base_dir) cmd = ['haproxy', '-f', conf_path, '-p', pid_path] cmd.extend(extra_cmd_args) ns = ip_lib.IPWrapper(namespace=namespace) ns.netns.execute(cmd) # remember deployed loadbalancer id self.deployed_loadbalancers[loadbalancer.id] = loadbalancer class LoadBalancerManager(agent_device_driver.BaseLoadBalancerManager): def refresh(self, loadbalancer): loadbalancer_dict = self.driver.plugin_rpc.get_loadbalancer( loadbalancer.id) loadbalancer = data_models.LoadBalancer.from_dict(loadbalancer_dict) if (not self.driver.deploy_instance(loadbalancer) and self.driver.exists(loadbalancer.id)): self.driver.undeploy_instance(loadbalancer.id) def delete(self, loadbalancer): if self.driver.exists(loadbalancer.id): self.driver.undeploy_instance(loadbalancer.id, delete_namespace=True) def create(self, loadbalancer): # loadbalancer has no listeners then do nothing because haproxy will # not start without a tcp port. Consider this successful anyway. if not loadbalancer.listeners: return self.refresh(loadbalancer) def get_stats(self, loadbalancer_id): return self.driver.get_stats(loadbalancer_id) def update(self, old_loadbalancer, loadbalancer): self.refresh(loadbalancer) class ListenerManager(agent_device_driver.BaseListenerManager): def _remove_listener(self, loadbalancer, listener_id): index_to_remove = None for index, listener in enumerate(loadbalancer.listeners): if listener.id == listener_id: index_to_remove = index loadbalancer.listeners.pop(index_to_remove) def update(self, old_listener, new_listener): self.driver.loadbalancer.refresh(new_listener.loadbalancer) def create(self, listener): self.driver.loadbalancer.refresh(listener.loadbalancer) def delete(self, listener): loadbalancer = listener.loadbalancer self._remove_listener(loadbalancer, listener.id) if len(loadbalancer.listeners) > 0: self.driver.loadbalancer.refresh(loadbalancer) else: # undeploy instance because haproxy will throw error if port is # missing in frontend self.driver.undeploy_instance(loadbalancer.id) class PoolManager(agent_device_driver.BasePoolManager): def update(self, old_pool, new_pool): self.driver.loadbalancer.refresh(new_pool.loadbalancer) def create(self, pool): self.driver.loadbalancer.refresh(pool.loadbalancer) def delete(self, pool): loadbalancer = pool.loadbalancer for l in loadbalancer.listeners: if l.default_pool == pool: l.default_pool = None # TODO(sbalukoff): Will need to do this or L7Policies as well # just refresh because haproxy is fine if only frontend is listed self.driver.loadbalancer.refresh(loadbalancer) class MemberManager(agent_device_driver.BaseMemberManager): def _remove_member(self, pool, member_id): index_to_remove = None for index, member in enumerate(pool.members): if member.id == member_id: index_to_remove = index pool.members.pop(index_to_remove) def update(self, old_member, new_member): self.driver.loadbalancer.refresh(new_member.pool.loadbalancer) def create(self, member): self.driver.loadbalancer.refresh(member.pool.loadbalancer) def delete(self, member): self._remove_member(member.pool, member.id) self.driver.loadbalancer.refresh(member.pool.loadbalancer) class HealthMonitorManager(agent_device_driver.BaseHealthMonitorManager): def update(self, old_hm, new_hm): self.driver.loadbalancer.refresh(new_hm.pool.loadbalancer) def create(self, hm): self.driver.loadbalancer.refresh(hm.pool.loadbalancer) def delete(self, hm): hm.pool.healthmonitor = None self.driver.loadbalancer.refresh(hm.pool.loadbalancer) def kill_pids_in_file(pid_path): if os.path.exists(pid_path): with open(pid_path, 'r') as pids: for pid in pids: pid = pid.strip() try: linux_utils.execute(['kill', '-9', pid], run_as_root=True) except RuntimeError: LOG.exception( _LE('Unable to kill haproxy process: %s'), pid ) neutron-lbaas-8.0.0/neutron_lbaas/_i18n.py0000664000567000056710000000252112701407726021564 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_i18n DOMAIN = "neutron_lbaas" _translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary # The contextual translation function using the name "_C" _C = _translators.contextual_form # The plural translation function using the name "_P" _P = _translators.plural_form # Translators for log levels. # # The abbreviated names are meant to reflect the usual use of a short # name like '_'. The "L" is for "log" and the other letter comes from # the level. _LI = _translators.log_info _LW = _translators.log_warning _LE = _translators.log_error _LC = _translators.log_critical def get_available_languages(): return oslo_i18n.get_available_languages(DOMAIN) neutron-lbaas-8.0.0/neutron_lbaas/agent/0000775000567000056710000000000012701410110021347 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/agent/agent_device_driver.py0000664000567000056710000000470012701407726025735 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation. All rights reserved # Copyright 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six @six.add_metaclass(abc.ABCMeta) class AgentDeviceDriver(object): """Abstract device driver that defines the API required by LBaaS agent.""" def __init__(self, conf, plugin_rpc): self.conf = conf self.plugin_rpc = plugin_rpc @abc.abstractproperty def loadbalancer(self): pass @abc.abstractproperty def listener(self): pass @abc.abstractproperty def pool(self): pass @abc.abstractproperty def member(self): pass @abc.abstractproperty def healthmonitor(self): pass @abc.abstractmethod def get_name(self): """Returns unique name across all LBaaS device drivers.""" pass @abc.abstractmethod def deploy_instance(self, loadbalancer): """Fully deploys a loadbalancer instance from a given loadbalancer.""" pass @abc.abstractmethod def undeploy_instance(self, loadbalancer_id, **kwargs): """Fully undeploys the loadbalancer instance.""" pass def remove_orphans(self, known_loadbalancer_ids): # Not all drivers will support this raise NotImplementedError() @six.add_metaclass(abc.ABCMeta) class BaseManager(object): def __init__(self, driver): self.driver = driver @abc.abstractmethod def create(self, obj): pass @abc.abstractmethod def update(self, old_obj, obj): pass @abc.abstractmethod def delete(self, obj): pass class BaseLoadBalancerManager(BaseManager): @abc.abstractmethod def get_stats(self, loadbalancer_id): pass class BaseListenerManager(BaseManager): pass class BasePoolManager(BaseManager): pass class BaseMemberManager(BaseManager): pass class BaseHealthMonitorManager(BaseManager): pass neutron-lbaas-8.0.0/neutron_lbaas/agent/agent_api.py0000664000567000056710000000551112701407726023675 0ustar jenkinsjenkins00000000000000# Copyright 2013 New Dream Network, LLC (DreamHost) # Copyright 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import rpc as n_rpc import oslo_messaging class LbaasAgentApi(object): """Agent side of the Agent to Plugin RPC API.""" # history # 1.0 Initial version def __init__(self, topic, context, host): self.context = context self.host = host target = oslo_messaging.Target(topic=topic, version='1.0') self.client = n_rpc.get_client(target) def get_ready_devices(self): cctxt = self.client.prepare() return cctxt.call(self.context, 'get_ready_devices', host=self.host) def get_loadbalancer(self, loadbalancer_id): cctxt = self.client.prepare() return cctxt.call(self.context, 'get_loadbalancer', loadbalancer_id=loadbalancer_id) def loadbalancer_deployed(self, loadbalancer_id): cctxt = self.client.prepare() return cctxt.call(self.context, 'loadbalancer_deployed', loadbalancer_id=loadbalancer_id) def update_status(self, obj_type, obj_id, provisioning_status=None, operating_status=None): cctxt = self.client.prepare() return cctxt.call(self.context, 'update_status', obj_type=obj_type, obj_id=obj_id, provisioning_status=provisioning_status, operating_status=operating_status) def loadbalancer_destroyed(self, loadbalancer_id): cctxt = self.client.prepare() return cctxt.call(self.context, 'loadbalancer_destroyed', loadbalancer_id=loadbalancer_id) def plug_vip_port(self, port_id): cctxt = self.client.prepare() return cctxt.call(self.context, 'plug_vip_port', port_id=port_id, host=self.host) def unplug_vip_port(self, port_id): cctxt = self.client.prepare() return cctxt.call(self.context, 'unplug_vip_port', port_id=port_id, host=self.host) def update_loadbalancer_stats(self, loadbalancer_id, stats): cctxt = self.client.prepare() return cctxt.call(self.context, 'update_loadbalancer_stats', loadbalancer_id=loadbalancer_id, stats=stats) neutron-lbaas-8.0.0/neutron_lbaas/agent/__init__.py0000664000567000056710000000000012701407726023471 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/agent/agent_manager.py0000664000567000056710000004073012701407726024540 0ustar jenkinsjenkins00000000000000# Copyright 2013 New Dream Network, LLC (DreamHost) # Copyright 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent import rpc as agent_rpc from neutron import context as ncontext from neutron.plugins.common import constants from neutron.services import provider_configuration as provconfig from neutron_lib import exceptions as n_exc from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_service import loopingcall from oslo_service import periodic_task from oslo_utils import importutils from neutron_lbaas._i18n import _, _LE, _LI from neutron_lbaas.agent import agent_api from neutron_lbaas.drivers.common import agent_driver_base from neutron_lbaas.services.loadbalancer import constants as lb_const from neutron_lbaas.services.loadbalancer import data_models LOG = logging.getLogger(__name__) DEVICE_DRIVERS = 'device_drivers' OPTS = [ cfg.MultiStrOpt( 'device_driver', default=['neutron_lbaas.drivers.haproxy.' 'namespace_driver.HaproxyNSDriver'], help=_('Drivers used to manage loadbalancing devices'), ), ] class DeviceNotFoundOnAgent(n_exc.NotFound): message = _('Unknown device with loadbalancer_id %(loadbalancer_id)s') class LbaasAgentManager(periodic_task.PeriodicTasks): # history # 1.0 Initial version target = oslo_messaging.Target(version='1.0') def __init__(self, conf): super(LbaasAgentManager, self).__init__(conf) self.conf = conf self.context = ncontext.get_admin_context_without_session() self.serializer = agent_driver_base.DataModelSerializer() self.plugin_rpc = agent_api.LbaasAgentApi( lb_const.LOADBALANCER_PLUGINV2, self.context, self.conf.host ) self._load_drivers() self.agent_state = { 'binary': 'neutron-lbaasv2-agent', 'host': conf.host, 'topic': lb_const.LOADBALANCER_AGENTV2, 'configurations': {'device_drivers': self.device_drivers.keys()}, 'agent_type': lb_const.AGENT_TYPE_LOADBALANCERV2, 'start_flag': True} self.admin_state_up = True self._setup_state_rpc() self.needs_resync = False # pool_id->device_driver_name mapping used to store known instances self.instance_mapping = {} def _load_drivers(self): self.device_drivers = {} for driver in self.conf.device_driver: driver = provconfig.get_provider_driver_class(driver, DEVICE_DRIVERS) try: driver_inst = importutils.import_object( driver, self.conf, self.plugin_rpc ) except ImportError: msg = _('Error importing loadbalancer device driver: %s') raise SystemExit(msg % driver) driver_name = driver_inst.get_name() if driver_name not in self.device_drivers: self.device_drivers[driver_name] = driver_inst else: msg = _('Multiple device drivers with the same name found: %s') raise SystemExit(msg % driver_name) def _setup_state_rpc(self): self.state_rpc = agent_rpc.PluginReportStateAPI( lb_const.LOADBALANCER_PLUGINV2) report_interval = self.conf.AGENT.report_interval if report_interval: heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) heartbeat.start(interval=report_interval) def _report_state(self): try: instance_count = len(self.instance_mapping) self.agent_state['configurations']['instances'] = instance_count self.state_rpc.report_state(self.context, self.agent_state) self.agent_state.pop('start_flag', None) except Exception: LOG.exception(_LE("Failed reporting state!")) def initialize_service_hook(self, started_by): self.sync_state() @periodic_task.periodic_task def periodic_resync(self, context): if self.needs_resync: self.needs_resync = False self.sync_state() @periodic_task.periodic_task(spacing=6) def collect_stats(self, context): for loadbalancer_id, driver_name in self.instance_mapping.items(): driver = self.device_drivers[driver_name] try: stats = driver.loadbalancer.get_stats(loadbalancer_id) if stats: self.plugin_rpc.update_loadbalancer_stats( loadbalancer_id, stats) except Exception: LOG.exception(_LE('Error updating statistics on loadbalancer' ' %s'), loadbalancer_id) self.needs_resync = True def sync_state(self): known_instances = set(self.instance_mapping.keys()) try: ready_instances = set(self.plugin_rpc.get_ready_devices()) for deleted_id in known_instances - ready_instances: self._destroy_loadbalancer(deleted_id) for loadbalancer_id in ready_instances: self._reload_loadbalancer(loadbalancer_id) except Exception: LOG.exception(_LE('Unable to retrieve ready devices')) self.needs_resync = True self.remove_orphans() def _get_driver(self, loadbalancer_id): if loadbalancer_id not in self.instance_mapping: raise DeviceNotFoundOnAgent(loadbalancer_id=loadbalancer_id) driver_name = self.instance_mapping[loadbalancer_id] return self.device_drivers[driver_name] def _reload_loadbalancer(self, loadbalancer_id): try: loadbalancer_dict = self.plugin_rpc.get_loadbalancer( loadbalancer_id) loadbalancer = data_models.LoadBalancer.from_dict( loadbalancer_dict) driver_name = loadbalancer.provider.device_driver if driver_name not in self.device_drivers: LOG.error(_LE('No device driver on agent: %s.'), driver_name) self.plugin_rpc.update_status( 'loadbalancer', loadbalancer_id, constants.ERROR) return self.device_drivers[driver_name].deploy_instance(loadbalancer) self.instance_mapping[loadbalancer_id] = driver_name self.plugin_rpc.loadbalancer_deployed(loadbalancer_id) except Exception: LOG.exception(_LE('Unable to deploy instance for ' 'loadbalancer: %s'), loadbalancer_id) self.needs_resync = True def _destroy_loadbalancer(self, lb_id): driver = self._get_driver(lb_id) try: driver.undeploy_instance(lb_id, delete_namespace=True) del self.instance_mapping[lb_id] self.plugin_rpc.loadbalancer_destroyed(lb_id) except Exception: LOG.exception(_LE('Unable to destroy device for loadbalancer: %s'), lb_id) self.needs_resync = True def remove_orphans(self): for driver_name in self.device_drivers: lb_ids = [lb_id for lb_id in self.instance_mapping if self.instance_mapping[lb_id] == driver_name] try: self.device_drivers[driver_name].remove_orphans(lb_ids) except NotImplementedError: pass # Not all drivers will support this def _handle_failed_driver_call(self, operation, obj, driver): obj_type = obj.__class__.__name__.lower() LOG.exception(_LE('%(operation)s %(obj)s %(id)s failed on device ' 'driver %(driver)s'), {'operation': operation.capitalize(), 'obj': obj_type, 'id': obj.id, 'driver': driver}) self._update_statuses(obj, error=True) def agent_updated(self, context, payload): """Handle the agent_updated notification event.""" if payload['admin_state_up'] != self.admin_state_up: self.admin_state_up = payload['admin_state_up'] if self.admin_state_up: self.needs_resync = True else: for loadbalancer_id in self.instance_mapping.keys(): LOG.info(_LI("Destroying loadbalancer %s due to agent " "disabling"), loadbalancer_id) self._destroy_loadbalancer(loadbalancer_id) LOG.info(_LI("Agent_updated by server side %s!"), payload) def _update_statuses(self, obj, error=False): lb_p_status = constants.ACTIVE lb_o_status = None obj_type = obj.__class__.__name__.lower() obj_p_status = constants.ACTIVE obj_o_status = lb_const.ONLINE if error: obj_p_status = constants.ERROR obj_o_status = lb_const.OFFLINE if isinstance(obj, data_models.HealthMonitor): obj_o_status = None if isinstance(obj, data_models.LoadBalancer): lb_o_status = lb_const.ONLINE if error: lb_p_status = constants.ERROR lb_o_status = lb_const.OFFLINE lb = obj else: lb = obj.root_loadbalancer self.plugin_rpc.update_status(obj_type, obj.id, provisioning_status=obj_p_status, operating_status=obj_o_status) self.plugin_rpc.update_status('loadbalancer', lb.id, provisioning_status=lb_p_status, operating_status=lb_o_status) def create_loadbalancer(self, context, loadbalancer, driver_name): loadbalancer = data_models.LoadBalancer.from_dict(loadbalancer) if driver_name not in self.device_drivers: LOG.error(_LE('No device driver on agent: %s.'), driver_name) self.plugin_rpc.update_status('loadbalancer', loadbalancer.id, provisioning_status=constants.ERROR) return driver = self.device_drivers[driver_name] try: driver.loadbalancer.create(loadbalancer) except Exception: self._handle_failed_driver_call('create', loadbalancer, driver.get_name()) else: self.instance_mapping[loadbalancer.id] = driver_name self._update_statuses(loadbalancer) def update_loadbalancer(self, context, old_loadbalancer, loadbalancer): loadbalancer = data_models.LoadBalancer.from_dict(loadbalancer) old_loadbalancer = data_models.LoadBalancer.from_dict(old_loadbalancer) driver = self._get_driver(loadbalancer.id) try: driver.loadbalancer.update(old_loadbalancer, loadbalancer) except Exception: self._handle_failed_driver_call('update', loadbalancer, driver.get_name()) else: self._update_statuses(loadbalancer) def delete_loadbalancer(self, context, loadbalancer): loadbalancer = data_models.LoadBalancer.from_dict(loadbalancer) driver = self._get_driver(loadbalancer.id) driver.loadbalancer.delete(loadbalancer) del self.instance_mapping[loadbalancer.id] def create_listener(self, context, listener): listener = data_models.Listener.from_dict(listener) driver = self._get_driver(listener.loadbalancer.id) try: driver.listener.create(listener) except Exception: self._handle_failed_driver_call('create', listener, driver.get_name()) else: self._update_statuses(listener) def update_listener(self, context, old_listener, listener): listener = data_models.Listener.from_dict(listener) old_listener = data_models.Listener.from_dict(old_listener) driver = self._get_driver(listener.loadbalancer.id) try: driver.listener.update(old_listener, listener) except Exception: self._handle_failed_driver_call('update', listener, driver.get_name()) else: self._update_statuses(listener) def delete_listener(self, context, listener): listener = data_models.Listener.from_dict(listener) driver = self._get_driver(listener.loadbalancer.id) driver.listener.delete(listener) def create_pool(self, context, pool): pool = data_models.Pool.from_dict(pool) driver = self._get_driver(pool.loadbalancer.id) try: driver.pool.create(pool) except Exception: self._handle_failed_driver_call('create', pool, driver.get_name()) else: self._update_statuses(pool) def update_pool(self, context, old_pool, pool): pool = data_models.Pool.from_dict(pool) old_pool = data_models.Pool.from_dict(old_pool) driver = self._get_driver(pool.loadbalancer.id) try: driver.pool.update(old_pool, pool) except Exception: self._handle_failed_driver_call('create', pool, driver.get_name()) else: self._update_statuses(pool) def delete_pool(self, context, pool): pool = data_models.Pool.from_dict(pool) driver = self._get_driver(pool.loadbalancer.id) driver.pool.delete(pool) def create_member(self, context, member): member = data_models.Member.from_dict(member) driver = self._get_driver(member.pool.loadbalancer.id) try: driver.member.create(member) except Exception: self._handle_failed_driver_call('create', member, driver.get_name()) else: self._update_statuses(member) def update_member(self, context, old_member, member): member = data_models.Member.from_dict(member) old_member = data_models.Member.from_dict(old_member) driver = self._get_driver(member.pool.loadbalancer.id) try: driver.member.update(old_member, member) except Exception: self._handle_failed_driver_call('create', member, driver.get_name()) else: self._update_statuses(member) def delete_member(self, context, member): member = data_models.Member.from_dict(member) driver = self._get_driver(member.pool.loadbalancer.id) driver.member.delete(member) def create_healthmonitor(self, context, healthmonitor): healthmonitor = data_models.HealthMonitor.from_dict(healthmonitor) driver = self._get_driver(healthmonitor.pool.loadbalancer.id) try: driver.healthmonitor.create(healthmonitor) except Exception: self._handle_failed_driver_call('create', healthmonitor, driver.get_name()) else: self._update_statuses(healthmonitor) def update_healthmonitor(self, context, old_healthmonitor, healthmonitor): healthmonitor = data_models.HealthMonitor.from_dict(healthmonitor) old_healthmonitor = data_models.HealthMonitor.from_dict( old_healthmonitor) driver = self._get_driver(healthmonitor.pool.loadbalancer.id) try: driver.healthmonitor.update(old_healthmonitor, healthmonitor) except Exception: self._handle_failed_driver_call('create', healthmonitor, driver.get_name()) else: self._update_statuses(healthmonitor) def delete_healthmonitor(self, context, healthmonitor): healthmonitor = data_models.HealthMonitor.from_dict(healthmonitor) driver = self._get_driver(healthmonitor.pool.loadbalancer.id) driver.healthmonitor.delete(healthmonitor) neutron-lbaas-8.0.0/neutron_lbaas/agent/agent.py0000664000567000056710000000412612701407727023046 0ustar jenkinsjenkins00000000000000# Copyright 2013 New Dream Network, LLC (DreamHost) # Copyright 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import eventlet eventlet.monkey_patch() from neutron.agent.common import config from neutron.agent.linux import interface from neutron.common import config as common_config from neutron.common import rpc as n_rpc from oslo_config import cfg from oslo_service import service from neutron_lbaas._i18n import _ from neutron_lbaas.agent import agent_manager as manager from neutron_lbaas.services.loadbalancer import constants OPTS = [ cfg.IntOpt( 'periodic_interval', default=10, help=_('Seconds between periodic task runs') ) ] class LbaasAgentService(n_rpc.Service): def start(self): super(LbaasAgentService, self).start() self.tg.add_timer( cfg.CONF.periodic_interval, self.manager.run_periodic_tasks, None, None ) def main(): cfg.CONF.register_opts(OPTS) cfg.CONF.register_opts(manager.OPTS) # import interface options just in case the driver uses namespaces cfg.CONF.register_opts(interface.OPTS) config.register_interface_driver_opts_helper(cfg.CONF) config.register_agent_state_opts_helper(cfg.CONF) config.register_root_helper(cfg.CONF) common_config.init(sys.argv[1:]) config.setup_logging() mgr = manager.LbaasAgentManager(cfg.CONF) svc = LbaasAgentService( host=cfg.CONF.host, topic=constants.LOADBALANCER_AGENTV2, manager=mgr ) service.launch(cfg.CONF, svc).wait() neutron-lbaas-8.0.0/neutron_lbaas/extensions/0000775000567000056710000000000012701410110022450 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/extensions/l7.py0000664000567000056710000002261712701407726023377 0ustar jenkinsjenkins00000000000000# Copyright 2016 Radware LTD # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api import extensions from neutron.api.v2 import attributes as attr from neutron.api.v2 import base from neutron.api.v2 import resource_helper from neutron import manager from neutron.plugins.common import constants from neutron_lib import exceptions as nexception from neutron_lbaas._i18n import _ from neutron_lbaas.extensions import loadbalancerv2 from neutron_lbaas.services.loadbalancer import constants as lb_const LOADBALANCERV2_PREFIX = "/lbaas" class L7PolicyRedirectPoolIdMissing(nexception.Conflict): message = _("Redirect pool id is missing for L7 Policy with" " pool redirect action.") class L7PolicyRedirectUrlMissing(nexception.Conflict): message = _("Redirect URL is missing for L7 Policy with" " URL redirect action.") class RuleNotFoundForL7Policy(nexception.NotFound): message = _("Rule %(rule_id)s could not be found in" " l7 policy %(l7policy_id)s.") class L7RuleKeyMissing(nexception.NotFound): message = _("Rule key is missing." " Key should be specified for rules of " "HEADER and COOKIE types.") class L7RuleInvalidKey(nexception.BadRequest): message = _("Invalid characters in key. See RFCs 2616, 2965, 6265, 7230.") class L7RuleInvalidHeaderValue(nexception.BadRequest): message = _("Invalid characters in value. See RFC 7230.") class L7RuleInvalidCookieValue(nexception.BadRequest): message = _("Invalid characters in value. See RFCs 2616, 2965, 6265.") class L7RuleInvalidRegex(nexception.BadRequest): message = _("Unable to parse regular expression: %(e)s.") class L7RuleUnsupportedCompareType(nexception.BadRequest): message = _("Unsupported compare type for rule of %(type)s type.") RESOURCE_ATTRIBUTE_MAP = { 'l7policies': { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': attr.NAME_MAX_LEN}, 'required_by_policy': True, 'is_visible': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'default': '', 'is_visible': True}, 'description': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': attr.DESCRIPTION_MAX_LEN}, 'is_visible': True, 'default': ''}, 'listener_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True}, 'action': {'allow_post': True, 'allow_put': True, 'validate': { 'type:values': lb_const.SUPPORTED_L7_POLICY_ACTIONS}, 'is_visible': True}, 'redirect_pool_id': {'allow_post': True, 'allow_put': True, 'validate': {'type:uuid_or_none': None}, 'default': attr.ATTR_NOT_SPECIFIED, 'is_visible': True}, 'redirect_url': {'allow_post': True, 'allow_put': True, 'validate': { 'type:regex_or_none': lb_const.URL_REGEX}, 'default': None, 'is_visible': True}, # range max is (2^31 - 1) to get around MySQL quirk 'position': {'allow_post': True, 'allow_put': True, 'convert_to': attr.convert_to_int, 'validate': {'type:range': [1, 2147483647]}, 'default': 2147483647, 'is_visible': True}, 'rules': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'admin_state_up': {'allow_post': True, 'allow_put': True, 'default': True, 'convert_to': attr.convert_to_boolean, 'is_visible': True}, 'status': {'allow_post': False, 'allow_put': False, 'is_visible': True} } } SUB_RESOURCE_ATTRIBUTE_MAP = { 'rules': { 'parent': {'collection_name': 'l7policies', 'member_name': 'l7policy'}, 'parameters': { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': None}, 'required_by_policy': True, 'is_visible': True}, 'type': {'allow_post': True, 'allow_put': True, 'validate': { 'type:values': lb_const.SUPPORTED_L7_RULE_TYPES}, 'is_visible': True}, 'compare_type': {'allow_post': True, 'allow_put': True, 'validate': { 'type:values': lb_const.SUPPORTED_L7_RULE_COMPARE_TYPES}, 'is_visible': True}, 'invert': {'allow_post': True, 'allow_put': True, 'default': False, 'convert_to': attr.convert_to_boolean, 'is_visible': True}, 'key': {'allow_post': True, 'allow_put': True, 'validate': {'type:string_or_none': None}, 'default': None, 'is_visible': True}, 'value': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True}, 'admin_state_up': {'allow_post': True, 'allow_put': True, 'default': True, 'convert_to': attr.convert_to_boolean, 'is_visible': True}, 'status': {'allow_post': False, 'allow_put': False, 'is_visible': True} } } } class L7(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "L7 capabilities for LBaaSv2" @classmethod def get_alias(cls): return "l7" @classmethod def get_description(cls): return "Adding L7 policies and rules support for LBaaSv2" @classmethod def get_namespace(cls): return "http://wiki.openstack.org/neutron/LBaaS/API_2.0" @classmethod def get_updated(cls): return "2016-01-24T10:00:00-00:00" def get_required_extensions(self): return ["lbaasv2"] def get_resources(cls): l7_plurals = {'l7policies': 'l7policy', 'rules': 'rule'} attr.PLURALS.update(l7_plurals) plural_mappings = resource_helper.build_plural_mappings( l7_plurals, RESOURCE_ATTRIBUTE_MAP) resources = resource_helper.build_resource_info( plural_mappings, RESOURCE_ATTRIBUTE_MAP, constants.LOADBALANCERV2, register_quota=True) plugin = manager.NeutronManager.get_service_plugins()[ constants.LOADBALANCERV2] for collection_name in SUB_RESOURCE_ATTRIBUTE_MAP: # Special handling needed for sub-resources with 'y' ending # (e.g. proxies -> proxy) resource_name = plural_mappings.get(collection_name, collection_name[:-1]) parent = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get('parent') params = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get( 'parameters') controller = base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=True, parent=parent, allow_pagination=True, allow_sorting=True) resource = extensions.ResourceExtension( collection_name, controller, parent, path_prefix=LOADBALANCERV2_PREFIX, attr_map=params) resources.append(resource) return resources @classmethod def get_plugin_interface(cls): return loadbalancerv2.LoadBalancerPluginBaseV2 def update_attributes_map(self, attributes, extension_attrs_map=None): super(L7, self).update_attributes_map( attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} neutron-lbaas-8.0.0/neutron_lbaas/extensions/__init__.py0000664000567000056710000000000012701407726024572 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/extensions/lbaas_agentschedulerv2.py0000664000567000056710000001113112701407726027451 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from neutron.api import extensions from neutron.api.v2 import base from neutron.api.v2 import resource from neutron.extensions import agent from neutron import manager from neutron.plugins.common import constants as plugin_const from neutron import policy from neutron import wsgi from neutron_lbaas._i18n import _ from neutron_lbaas.extensions import loadbalancer from neutron_lbaas.extensions import loadbalancerv2 from neutron_lbaas.services.loadbalancer import constants as lb_const LOADBALANCER = 'agent-loadbalancer' LOADBALANCERS = LOADBALANCER + 's' LOADBALANCER_AGENT = 'loadbalancer-hosting-agent' class LoadBalancerSchedulerController(wsgi.Controller): def index(self, request, **kwargs): lbaas_plugin = manager.NeutronManager.get_service_plugins().get( plugin_const.LOADBALANCERV2) if not lbaas_plugin: return {'load_balancers': []} policy.enforce(request.context, "get_%s" % LOADBALANCERS, {}, plugin=lbaas_plugin) lbs = lbaas_plugin.db.list_loadbalancers_on_lbaas_agent( request.context, kwargs['agent_id']) return {'loadbalancers': [lb.to_api_dict() for lb in lbs]} class LbaasAgentHostingLoadBalancerController(wsgi.Controller): def index(self, request, **kwargs): lbaas_plugin = manager.NeutronManager.get_service_plugins().get( plugin_const.LOADBALANCERV2) if not lbaas_plugin: return policy.enforce(request.context, "get_%s" % LOADBALANCER_AGENT, {}, plugin=lbaas_plugin) return lbaas_plugin.db.get_agent_hosting_loadbalancer( request.context, kwargs['loadbalancer_id']) class Lbaas_agentschedulerv2(extensions.ExtensionDescriptor): """Extension class supporting LBaaS agent scheduler. """ @classmethod def get_name(cls): return "Loadbalancer Agent Scheduler V2" @classmethod def get_alias(cls): return lb_const.LBAAS_AGENT_SCHEDULER_V2_EXT_ALIAS @classmethod def get_description(cls): return "Schedule load balancers among lbaas agents" @classmethod def get_namespace(cls): return "http://docs.openstack.org/ext/lbaas_agent_scheduler/api/v1.0" @classmethod def get_updated(cls): return "2013-02-07T10:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" exts = [] parent = dict(member_name="agent", collection_name="agents") controller = resource.Resource(LoadBalancerSchedulerController(), base.FAULT_MAP) exts.append(extensions.ResourceExtension( LOADBALANCERS, controller, parent)) parent = dict(member_name="loadbalancer", collection_name="loadbalancers") controller = resource.Resource( LbaasAgentHostingLoadBalancerController(), base.FAULT_MAP) exts.append(extensions.ResourceExtension( LOADBALANCER_AGENT, controller, parent, path_prefix=loadbalancerv2.LOADBALANCERV2_PREFIX)) return exts def get_extended_resources(self, version): return {} class NoEligibleLbaasAgent(loadbalancer.NoEligibleBackend): message = _("No eligible agent found " "for loadbalancer %(loadbalancer_id)s.") class NoActiveLbaasAgent(agent.AgentNotFound): message = _("No active agent found " "for loadbalancer %(loadbalancer_id)s.") class LbaasAgentSchedulerPluginBase(object): """REST API to operate the lbaas agent scheduler. All of method must be in an admin context. """ @abc.abstractmethod def list_loadbalancers_on_lbaas_agent(self, context, id): pass @abc.abstractmethod def get_agent_hosting_loadbalancer(self, context, loadbalancer_id, active=None): pass neutron-lbaas-8.0.0/neutron_lbaas/extensions/loadbalancer.py0000664000567000056710000004756012701407726025470 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_config import cfg import six from neutron.api import extensions from neutron.api.v2 import attributes as attr from neutron.api.v2 import base from neutron.api.v2 import resource_helper from neutron import manager from neutron.plugins.common import constants from neutron.services import service_base from neutron_lib import exceptions as nexception from neutron_lbaas._i18n import _ from neutron_lbaas.extensions import loadbalancerv2 from neutron_lbaas.services.loadbalancer import constants as lb_const LOADBALANCER_PREFIX = "/lb" # Loadbalancer Exceptions class DelayOrTimeoutInvalid(nexception.BadRequest): message = _("Delay must be greater than or equal to timeout") class NoEligibleBackend(nexception.NotFound): message = _("No eligible backend for pool %(pool_id)s") class VipNotFound(nexception.NotFound): message = _("Vip %(vip_id)s could not be found") class VipExists(nexception.NeutronException): message = _("Another Vip already exists for pool %(pool_id)s") class PoolNotFound(nexception.NotFound): message = _("Pool %(pool_id)s could not be found") class MemberNotFound(nexception.NotFound): message = _("Member %(member_id)s could not be found") class HealthMonitorNotFound(nexception.NotFound): message = _("Health_monitor %(monitor_id)s could not be found") class PoolMonitorAssociationNotFound(nexception.NotFound): message = _("Monitor %(monitor_id)s is not associated " "with Pool %(pool_id)s") class PoolMonitorAssociationExists(nexception.Conflict): message = _('health_monitor %(monitor_id)s is already associated ' 'with pool %(pool_id)s') class StateInvalid(nexception.NeutronException): message = _("Invalid state %(state)s of Loadbalancer resource %(id)s") class PoolInUse(nexception.InUse): message = _("Pool %(pool_id)s is still in use") class HealthMonitorInUse(nexception.InUse): message = _("Health monitor %(monitor_id)s still has associations with " "pools") class PoolStatsNotFound(nexception.NotFound): message = _("Statistics of Pool %(pool_id)s could not be found") class ProtocolMismatch(nexception.BadRequest): message = _("Protocol %(vip_proto)s does not match " "pool protocol %(pool_proto)s") class MemberExists(nexception.NeutronException): message = _("Member with address %(address)s and port %(port)s " "already present in pool %(pool)s") attr.validators['type:connection_limit'] = ( loadbalancerv2._validate_connection_limit) RESOURCE_ATTRIBUTE_MAP = { 'vips': { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': None}, 'required_by_policy': True, 'is_visible': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'default': '', 'is_visible': True}, 'description': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'subnet_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True}, 'address': {'allow_post': True, 'allow_put': False, 'default': attr.ATTR_NOT_SPECIFIED, 'validate': {'type:ip_address_or_none': None}, 'is_visible': True}, 'port_id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True}, 'protocol_port': {'allow_post': True, 'allow_put': False, 'validate': {'type:range': [0, 65535]}, 'convert_to': attr.convert_to_int, 'is_visible': True}, 'protocol': {'allow_post': True, 'allow_put': False, 'validate': {'type:values': ['TCP', 'HTTP', 'HTTPS']}, 'is_visible': True}, 'pool_id': {'allow_post': True, 'allow_put': True, 'validate': {'type:uuid': None}, 'is_visible': True}, 'session_persistence': {'allow_post': True, 'allow_put': True, 'convert_to': attr.convert_none_to_empty_dict, 'default': {}, 'validate': { 'type:dict_or_empty': { 'type': {'type:values': ['APP_COOKIE', 'HTTP_COOKIE', 'SOURCE_IP'], 'required': True}, 'cookie_name': {'type:string': None, 'required': False}}}, 'is_visible': True}, 'connection_limit': {'allow_post': True, 'allow_put': True, 'validate': {'type:connection_limit': lb_const.MIN_CONNECT_VALUE}, 'default': lb_const.MIN_CONNECT_VALUE, 'convert_to': attr.convert_to_int, 'is_visible': True}, 'admin_state_up': {'allow_post': True, 'allow_put': True, 'default': True, 'convert_to': attr.convert_to_boolean, 'is_visible': True}, 'status': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'status_description': {'allow_post': False, 'allow_put': False, 'is_visible': True} }, 'pools': { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': None}, 'required_by_policy': True, 'is_visible': True}, 'vip_id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'default': '', 'is_visible': True}, 'description': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'subnet_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True}, 'protocol': {'allow_post': True, 'allow_put': False, 'validate': {'type:values': ['TCP', 'HTTP', 'HTTPS']}, 'is_visible': True}, 'provider': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': None}, 'is_visible': True, 'default': attr.ATTR_NOT_SPECIFIED}, 'lb_method': {'allow_post': True, 'allow_put': True, 'validate': {'type:values': ['ROUND_ROBIN', 'LEAST_CONNECTIONS', 'SOURCE_IP']}, 'is_visible': True}, 'members': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'health_monitors': {'allow_post': True, 'allow_put': True, 'default': None, 'validate': {'type:uuid_list': None}, 'convert_to': attr.convert_to_list, 'is_visible': True}, 'health_monitors_status': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'admin_state_up': {'allow_post': True, 'allow_put': True, 'default': True, 'convert_to': attr.convert_to_boolean, 'is_visible': True}, 'status': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'status_description': {'allow_post': False, 'allow_put': False, 'is_visible': True} }, 'members': { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': None}, 'required_by_policy': True, 'is_visible': True}, 'pool_id': {'allow_post': True, 'allow_put': True, 'validate': {'type:uuid': None}, 'is_visible': True}, 'address': {'allow_post': True, 'allow_put': False, 'validate': {'type:ip_address': None}, 'is_visible': True}, 'protocol_port': {'allow_post': True, 'allow_put': False, 'validate': {'type:range': [0, 65535]}, 'convert_to': attr.convert_to_int, 'is_visible': True}, 'weight': {'allow_post': True, 'allow_put': True, 'default': 1, 'validate': {'type:range': [0, 256]}, 'convert_to': attr.convert_to_int, 'is_visible': True}, 'admin_state_up': {'allow_post': True, 'allow_put': True, 'default': True, 'convert_to': attr.convert_to_boolean, 'is_visible': True}, 'status': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'status_description': {'allow_post': False, 'allow_put': False, 'is_visible': True} }, 'health_monitors': { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': None}, 'required_by_policy': True, 'is_visible': True}, 'type': {'allow_post': True, 'allow_put': False, 'validate': {'type:values': ['PING', 'TCP', 'HTTP', 'HTTPS']}, 'is_visible': True}, 'delay': {'allow_post': True, 'allow_put': True, 'validate': {'type:non_negative': None}, 'convert_to': attr.convert_to_int, 'is_visible': True}, 'timeout': {'allow_post': True, 'allow_put': True, 'validate': {'type:non_negative': None}, 'convert_to': attr.convert_to_int, 'is_visible': True}, 'max_retries': {'allow_post': True, 'allow_put': True, 'validate': {'type:range': [1, 10]}, 'convert_to': attr.convert_to_int, 'is_visible': True}, 'http_method': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'default': 'GET', 'is_visible': True}, 'url_path': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'default': '/', 'is_visible': True}, 'expected_codes': {'allow_post': True, 'allow_put': True, 'validate': { 'type:regex': r'^(\d{3}(\s*,\s*\d{3})*)$|^(\d{3}-\d{3})$'}, 'default': '200', 'is_visible': True}, 'admin_state_up': {'allow_post': True, 'allow_put': True, 'default': True, 'convert_to': attr.convert_to_boolean, 'is_visible': True}, 'status': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'status_description': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'pools': {'allow_post': False, 'allow_put': False, 'is_visible': True} } } SUB_RESOURCE_ATTRIBUTE_MAP = { 'health_monitors': { 'parent': {'collection_name': 'pools', 'member_name': 'pool'}, 'parameters': {'id': {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': None}, 'required_by_policy': True, 'is_visible': True}, } } } lbaas_quota_opts = [ cfg.IntOpt('quota_vip', default=10, help=_('Number of vips allowed per tenant. ' 'A negative value means unlimited.')), cfg.IntOpt('quota_pool', default=10, help=_('Number of pools allowed per tenant. ' 'A negative value means unlimited.')), cfg.IntOpt('quota_member', default=-1, help=_('Number of pool members allowed per tenant. ' 'A negative value means unlimited.')), cfg.IntOpt('quota_health_monitor', default=-1, help=_('Number of health monitors allowed per tenant. ' 'A negative value means unlimited.')) ] cfg.CONF.register_opts(lbaas_quota_opts, 'QUOTAS') class Loadbalancer(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "LoadBalancing service" @classmethod def get_alias(cls): return "lbaas" @classmethod def get_description(cls): return "Extension for LoadBalancing service" @classmethod def get_namespace(cls): return "http://wiki.openstack.org/neutron/LBaaS/API_1.0" @classmethod def get_updated(cls): return "2012-10-07T10:00:00-00:00" @classmethod def get_resources(cls): plural_mappings = resource_helper.build_plural_mappings( {}, RESOURCE_ATTRIBUTE_MAP) plural_mappings['health_monitors_status'] = 'health_monitor_status' attr.PLURALS.update(plural_mappings) action_map = {'pool': {'stats': 'GET'}} resources = resource_helper.build_resource_info(plural_mappings, RESOURCE_ATTRIBUTE_MAP, constants.LOADBALANCER, action_map=action_map, register_quota=True) plugin = manager.NeutronManager.get_service_plugins()[ constants.LOADBALANCER] for collection_name in SUB_RESOURCE_ATTRIBUTE_MAP: # Special handling needed for sub-resources with 'y' ending # (e.g. proxies -> proxy) resource_name = collection_name[:-1] parent = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get('parent') params = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get( 'parameters') controller = base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=True, parent=parent) resource = extensions.ResourceExtension( collection_name, controller, parent, path_prefix=LOADBALANCER_PREFIX, attr_map=params) resources.append(resource) return resources @classmethod def get_plugin_interface(cls): return LoadBalancerPluginBase def update_attributes_map(self, attributes, extension_attrs_map=None): super(Loadbalancer, self).update_attributes_map( attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} @six.add_metaclass(abc.ABCMeta) class LoadBalancerPluginBase(service_base.ServicePluginBase): def get_plugin_name(self): return constants.LOADBALANCER def get_plugin_type(self): return constants.LOADBALANCER def get_plugin_description(self): return 'LoadBalancer service plugin' @abc.abstractmethod def get_vips(self, context, filters=None, fields=None): pass @abc.abstractmethod def get_vip(self, context, id, fields=None): pass @abc.abstractmethod def create_vip(self, context, vip): pass @abc.abstractmethod def update_vip(self, context, id, vip): pass @abc.abstractmethod def delete_vip(self, context, id): pass @abc.abstractmethod def get_pools(self, context, filters=None, fields=None): pass @abc.abstractmethod def get_pool(self, context, id, fields=None): pass @abc.abstractmethod def create_pool(self, context, pool): pass @abc.abstractmethod def update_pool(self, context, id, pool): pass @abc.abstractmethod def delete_pool(self, context, id): pass @abc.abstractmethod def stats(self, context, pool_id): pass @abc.abstractmethod def create_pool_health_monitor(self, context, health_monitor, pool_id): pass @abc.abstractmethod def get_pool_health_monitor(self, context, id, pool_id, fields=None): pass @abc.abstractmethod def delete_pool_health_monitor(self, context, id, pool_id): pass @abc.abstractmethod def get_members(self, context, filters=None, fields=None): pass @abc.abstractmethod def get_member(self, context, id, fields=None): pass @abc.abstractmethod def create_member(self, context, member): pass @abc.abstractmethod def update_member(self, context, id, member): pass @abc.abstractmethod def delete_member(self, context, id): pass @abc.abstractmethod def get_health_monitors(self, context, filters=None, fields=None): pass @abc.abstractmethod def get_health_monitor(self, context, id, fields=None): pass @abc.abstractmethod def create_health_monitor(self, context, health_monitor): pass @abc.abstractmethod def update_health_monitor(self, context, id, health_monitor): pass @abc.abstractmethod def delete_health_monitor(self, context, id): pass neutron-lbaas-8.0.0/neutron_lbaas/extensions/sharedpools.py0000664000567000056710000000655012701407726025376 0ustar jenkinsjenkins00000000000000# Copyright 2016 Blue Box, an IBM Company # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api import extensions from neutron_lib import exceptions as nexception from neutron_lbaas._i18n import _ class ListenerPoolLoadbalancerMismatch(nexception.BadRequest): message = _("Pool %(pool_id)s is on loadbalancer %(lb_id)s.") class ListenerDefaultPoolAlreadySet(nexception.InUse): message = _("Listener %(listener_id)s " "is already using default pool %(pool_id)s.") class PoolMustHaveLoadbalancer(nexception.BadRequest): message = _("Pool must be created with a loadbalancer or listener.") class ListenerMustHaveLoadbalancer(nexception.BadRequest): message = _("Listener must be created with a loadbalancer or pool.") class ListenerAndPoolMustBeOnSameLoadbalancer(nexception.BadRequest): message = _("Listener and pool must be on the same loadbalancer.") EXTENDED_ATTRIBUTES_2_0 = { 'loadbalancers': { 'pools': {'allow_post': False, 'allow_put': False, 'is_visible': True}}, 'listeners': { 'loadbalancer_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid_or_none': None}, 'default': None, 'is_visible': False}, 'default_pool_id': {'allow_post': True, 'allow_put': True, 'default': None, 'validate': {'type:uuid_or_none': None}, 'is_visible': True}}, 'pools': { 'listener_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid_or_none': None}, 'default': None, 'is_visible': False}, 'loadbalancer_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid_or_none': None}, 'default': None, 'is_visible': True}, 'loadbalancers': {'allow_post': False, 'allow_put': False, 'is_visible': True}}} class Sharedpools(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "Shared pools for LBaaSv2" @classmethod def get_alias(cls): return "shared_pools" @classmethod def get_description(cls): return "Allow pools to be shared among listeners for LBaaSv2" @classmethod def get_namespace(cls): return "http://wiki.openstack.org/neutron/LBaaS/API_2.0" @classmethod def get_updated(cls): return "2016-01-20T10:00:00-00:00" def get_required_extensions(self): return ["lbaasv2"] def get_extended_resources(self, version): if version == "2.0": return dict(EXTENDED_ATTRIBUTES_2_0.items()) else: return {} neutron-lbaas-8.0.0/neutron_lbaas/extensions/lbaas_agentscheduler.py0000664000567000056710000001050112701407726027201 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from neutron.api import extensions from neutron.api.v2 import base from neutron.api.v2 import resource from neutron.extensions import agent from neutron import manager from neutron.plugins.common import constants as plugin_const from neutron import policy from neutron import wsgi from neutron_lib import constants from neutron_lbaas._i18n import _ from neutron_lbaas.extensions import loadbalancer LOADBALANCER_POOL = 'loadbalancer-pool' LOADBALANCER_POOLS = LOADBALANCER_POOL + 's' LOADBALANCER_AGENT = 'loadbalancer-agent' class PoolSchedulerController(wsgi.Controller): def index(self, request, **kwargs): lbaas_plugin = manager.NeutronManager.get_service_plugins().get( plugin_const.LOADBALANCER) if not lbaas_plugin: return {'pools': []} policy.enforce(request.context, "get_%s" % LOADBALANCER_POOLS, {}, plugin=lbaas_plugin) return lbaas_plugin.list_pools_on_lbaas_agent( request.context, kwargs['agent_id']) class LbaasAgentHostingPoolController(wsgi.Controller): def index(self, request, **kwargs): lbaas_plugin = manager.NeutronManager.get_service_plugins().get( plugin_const.LOADBALANCER) if not lbaas_plugin: return policy.enforce(request.context, "get_%s" % LOADBALANCER_AGENT, {}, plugin=lbaas_plugin) return lbaas_plugin.get_lbaas_agent_hosting_pool( request.context, kwargs['pool_id']) class Lbaas_agentscheduler(extensions.ExtensionDescriptor): """Extension class supporting LBaaS agent scheduler. """ @classmethod def get_name(cls): return "Loadbalancer Agent Scheduler" @classmethod def get_alias(cls): return constants.LBAAS_AGENT_SCHEDULER_EXT_ALIAS @classmethod def get_description(cls): return "Schedule pools among lbaas agents" @classmethod def get_namespace(cls): return "http://docs.openstack.org/ext/lbaas_agent_scheduler/api/v1.0" @classmethod def get_updated(cls): return "2013-02-07T10:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" exts = [] parent = dict(member_name="agent", collection_name="agents") controller = resource.Resource(PoolSchedulerController(), base.FAULT_MAP) exts.append(extensions.ResourceExtension( LOADBALANCER_POOLS, controller, parent)) parent = dict(member_name="pool", collection_name="pools") controller = resource.Resource(LbaasAgentHostingPoolController(), base.FAULT_MAP) exts.append(extensions.ResourceExtension( LOADBALANCER_AGENT, controller, parent, path_prefix=loadbalancer.LOADBALANCER_PREFIX)) return exts def get_extended_resources(self, version): return {} class NoEligibleLbaasAgent(loadbalancer.NoEligibleBackend): message = _("No eligible loadbalancer agent found " "for pool %(pool_id)s.") class NoActiveLbaasAgent(agent.AgentNotFound): message = _("No active loadbalancer agent found " "for pool %(pool_id)s.") class LbaasAgentSchedulerPluginBase(object): """REST API to operate the lbaas agent scheduler. All of method must be in an admin context. """ @abc.abstractmethod def list_pools_on_lbaas_agent(self, context, id): pass @abc.abstractmethod def get_lbaas_agent_hosting_pool(self, context, pool_id, active=None): pass neutron-lbaas-8.0.0/neutron_lbaas/extensions/loadbalancerv2.py0000664000567000056710000006155112701407726025734 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_config import cfg from oslo_log import log as logging import six from neutron.api import extensions from neutron.api.v2 import attributes as attr from neutron.api.v2 import base from neutron.api.v2 import resource_helper from neutron import manager from neutron.plugins.common import constants from neutron.services import service_base from neutron_lib import exceptions as nexception from neutron_lbaas._i18n import _ from neutron_lbaas.services.loadbalancer import constants as lb_const LOADBALANCERV2_PREFIX = "/lbaas" LOG = logging.getLogger(__name__) # Loadbalancer Exceptions # This exception is only for a workaround when having v1 and v2 lbaas extension # and plugins enabled class RequiredAttributeNotSpecified(nexception.BadRequest): message = _("Required attribute %(attr_name)s not specified") class EntityNotFound(nexception.NotFound): message = _("%(name)s %(id)s could not be found") class DelayOrTimeoutInvalid(nexception.BadRequest): message = _("Delay must be greater than or equal to timeout") class EntityInUse(nexception.InUse): message = _("%(entity_using)s %(id)s is using this %(entity_in_use)s") class OnePoolPerListener(nexception.InUse): message = _("Only one pool per listener allowed. Listener " "%(listener_id)s is already using Pool %(pool_id)s.") class OneHealthMonitorPerPool(nexception.InUse): message = _("Only one health monitor per pool allowed. Pool %(pool_id)s" " is already using Health Monitor %(hm_id)s.") class LoadBalancerListenerProtocolPortExists(nexception.Conflict): message = _("Load Balancer %(lb_id)s already has a listener with " "protocol_port of %(protocol_port)s") class ListenerPoolProtocolMismatch(nexception.Conflict): message = _("Listener protocol %(listener_proto)s and pool protocol " "%(pool_proto)s are not compatible.") class AttributeIDImmutable(nexception.NeutronException): message = _("Cannot change %(attribute)s if one already exists") class StateInvalid(nexception.NeutronException): message = _("Invalid state %(state)s of loadbalancer resource %(id)s") class MemberNotFoundForPool(nexception.NotFound): message = _("Member %(member_id)s could not be found in pool " "%(pool_id)s") class MemberExists(nexception.Conflict): message = _("Member with address %(address)s and protocol_port %(port)s " "already present in pool %(pool)s") class MemberAddressTypeSubnetTypeMismatch(nexception.NeutronException): message = _("Member with address %(address)s and subnet %(subnet_id) " "have mismatched IP versions") class DriverError(nexception.NeutronException): message = _("An error happened in the driver") class SessionPersistenceConfigurationInvalid(nexception.BadRequest): message = _("Session Persistence Invalid: %(msg)s") class TLSDefaultContainerNotSpecified(nexception.BadRequest): message = _("Default TLS container was not specified") class TLSContainerNotFound(nexception.NotFound): message = _("TLS container %(container_id)s could not be found") class TLSContainerInvalid(nexception.NeutronException): message = _("TLS container %(container_id)s is invalid. %(reason)s") class CertManagerError(nexception.NeutronException): message = _("Could not process TLS container %(ref)s, %(reason)s") class ProviderFlavorConflict(nexception.Conflict): message = _("Cannot specify both a flavor and a provider") class FlavorsPluginNotLoaded(nexception.NotFound): message = _("Flavors plugin not found") def _validate_connection_limit(data, min_value=lb_const.MIN_CONNECT_VALUE): if int(data) < min_value: msg = (_("'%(data)s' is not a valid value, " "because it cannot be less than %(min_value)s") % {'data': data, 'min_value': min_value}) LOG.debug(msg) return msg attr.validators['type:connection_limit'] = _validate_connection_limit RESOURCE_ATTRIBUTE_MAP = { 'loadbalancers': { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': attr.NAME_MAX_LEN}, 'default': '', 'is_visible': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:not_empty_string': attr.TENANT_ID_MAX_LEN}, 'required_by_policy': True, 'is_visible': True}, 'description': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': attr.DESCRIPTION_MAX_LEN}, 'is_visible': True, 'default': ''}, 'vip_subnet_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True}, 'vip_address': {'allow_post': True, 'allow_put': False, 'default': attr.ATTR_NOT_SPECIFIED, 'validate': {'type:ip_address_or_none': None}, 'is_visible': True}, 'vip_port_id': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'provider': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': None}, 'is_visible': True, 'default': attr.ATTR_NOT_SPECIFIED}, 'listeners': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'admin_state_up': {'allow_post': True, 'allow_put': True, 'default': True, 'convert_to': attr.convert_to_boolean, 'is_visible': True}, 'provisioning_status': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'operating_status': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'flavor_id': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'validate': {'type:string': attr.NAME_MAX_LEN}, 'default': attr.ATTR_NOT_SPECIFIED} }, 'listeners': { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:not_empty_string': attr.TENANT_ID_MAX_LEN}, 'required_by_policy': True, 'is_visible': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': attr.NAME_MAX_LEN}, 'default': '', 'is_visible': True}, 'description': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': attr.DESCRIPTION_MAX_LEN}, 'is_visible': True, 'default': ''}, 'loadbalancer_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': False}, 'loadbalancers': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'default_pool_id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True}, 'default_tls_container_ref': {'allow_post': True, 'allow_put': True, 'default': None, 'validate': {'type:string_or_none': 128}, 'is_visible': True}, 'sni_container_refs': {'allow_post': True, 'allow_put': True, 'default': None, 'convert_to': attr.convert_to_list, 'is_visible': True}, 'connection_limit': {'allow_post': True, 'allow_put': True, 'validate': {'type:connection_limit': lb_const.MIN_CONNECT_VALUE}, 'default': lb_const.MIN_CONNECT_VALUE, 'convert_to': attr.convert_to_int, 'is_visible': True}, 'protocol': {'allow_post': True, 'allow_put': False, 'validate': {'type:values': lb_const.LISTENER_SUPPORTED_PROTOCOLS}, 'is_visible': True}, 'protocol_port': {'allow_post': True, 'allow_put': False, 'validate': {'type:range': [0, 65535]}, 'convert_to': attr.convert_to_int, 'is_visible': True}, 'admin_state_up': {'allow_post': True, 'allow_put': True, 'default': True, 'convert_to': attr.convert_to_boolean, 'is_visible': True} }, 'pools': { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:not_empty_string': attr.TENANT_ID_MAX_LEN}, 'required_by_policy': True, 'is_visible': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': attr.NAME_MAX_LEN}, 'is_visible': True, 'default': ''}, 'description': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': attr.DESCRIPTION_MAX_LEN}, 'is_visible': True, 'default': ''}, 'listener_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': False}, 'listeners': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'healthmonitor_id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True}, 'protocol': {'allow_post': True, 'allow_put': False, 'validate': {'type:values': lb_const.POOL_SUPPORTED_PROTOCOLS}, 'is_visible': True}, 'lb_algorithm': {'allow_post': True, 'allow_put': True, 'validate': { 'type:values': lb_const.SUPPORTED_LB_ALGORITHMS}, 'is_visible': True}, 'session_persistence': { 'allow_post': True, 'allow_put': True, 'convert_to': attr.convert_none_to_empty_dict, 'default': {}, 'validate': { 'type:dict_or_empty': { 'type': { 'type:values': lb_const.SUPPORTED_SP_TYPES, 'required': True}, 'cookie_name': {'type:string': None, 'required': False}}}, 'is_visible': True}, 'members': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'admin_state_up': {'allow_post': True, 'allow_put': True, 'default': True, 'convert_to': attr.convert_to_boolean, 'is_visible': True} }, 'healthmonitors': { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:not_empty_string': attr.TENANT_ID_MAX_LEN}, 'required_by_policy': True, 'is_visible': True}, 'pool_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': False}, 'pools': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'type': {'allow_post': True, 'allow_put': False, 'validate': { 'type:values': lb_const.SUPPORTED_HEALTH_MONITOR_TYPES}, 'is_visible': True}, 'delay': {'allow_post': True, 'allow_put': True, 'validate': {'type:non_negative': None}, 'convert_to': attr.convert_to_int, 'is_visible': True}, 'timeout': {'allow_post': True, 'allow_put': True, 'validate': {'type:non_negative': None}, 'convert_to': attr.convert_to_int, 'is_visible': True}, 'max_retries': {'allow_post': True, 'allow_put': True, 'validate': {'type:range': [1, 10]}, 'convert_to': attr.convert_to_int, 'is_visible': True}, 'http_method': {'allow_post': True, 'allow_put': True, 'validate': {'type:values': lb_const.SUPPORTED_HTTP_METHODS}, 'default': 'GET', 'is_visible': True}, 'url_path': {'allow_post': True, 'allow_put': True, 'validate': {'type:regex_or_none': lb_const.SUPPORTED_URL_PATH}, 'default': '/', 'is_visible': True}, 'expected_codes': { 'allow_post': True, 'allow_put': True, 'validate': { 'type:regex': r'^(\d{3}(\s*,\s*\d{3})*)$|^(\d{3}-\d{3})$' }, 'default': '200', 'is_visible': True }, 'admin_state_up': {'allow_post': True, 'allow_put': True, 'default': True, 'convert_to': attr.convert_to_boolean, 'is_visible': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': attr.NAME_MAX_LEN}, 'default': '', 'is_visible': True} } } SUB_RESOURCE_ATTRIBUTE_MAP = { 'members': { 'parent': {'collection_name': 'pools', 'member_name': 'pool'}, 'parameters': { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:not_empty_string': attr.TENANT_ID_MAX_LEN}, 'required_by_policy': True, 'is_visible': True}, 'address': {'allow_post': True, 'allow_put': False, 'validate': {'type:ip_address': None}, 'is_visible': True}, 'protocol_port': {'allow_post': True, 'allow_put': False, 'validate': {'type:range': [0, 65535]}, 'convert_to': attr.convert_to_int, 'is_visible': True}, 'weight': {'allow_post': True, 'allow_put': True, 'default': 1, 'validate': {'type:range': [0, 256]}, 'convert_to': attr.convert_to_int, 'is_visible': True}, 'admin_state_up': {'allow_post': True, 'allow_put': True, 'default': True, 'convert_to': attr.convert_to_boolean, 'is_visible': True}, 'subnet_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': attr.NAME_MAX_LEN}, 'default': '', 'is_visible': True}, } } } lbaasv2_quota_opts = [ cfg.IntOpt('quota_loadbalancer', default=10, help=_('Number of LoadBalancers allowed per tenant. ' 'A negative value means unlimited.')), cfg.IntOpt('quota_listener', default=-1, help=_('Number of Loadbalancer Listeners allowed per tenant. ' 'A negative value means unlimited.')), cfg.IntOpt('quota_pool', default=10, help=_('Number of pools allowed per tenant. ' 'A negative value means unlimited.')), cfg.IntOpt('quota_member', default=-1, help=_('Number of pool members allowed per tenant. ' 'A negative value means unlimited.')), cfg.IntOpt('quota_healthmonitor', default=-1, help=_('Number of health monitors allowed per tenant. ' 'A negative value means unlimited.')) ] cfg.CONF.register_opts(lbaasv2_quota_opts, 'QUOTAS') class Loadbalancerv2(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "LoadBalancing service v2" @classmethod def get_alias(cls): return "lbaasv2" @classmethod def get_description(cls): return "Extension for LoadBalancing service v2" @classmethod def get_namespace(cls): return "http://wiki.openstack.org/neutron/LBaaS/API_2.0" @classmethod def get_updated(cls): return "2014-06-18T10:00:00-00:00" @classmethod def get_resources(cls): plural_mappings = resource_helper.build_plural_mappings( {}, RESOURCE_ATTRIBUTE_MAP) action_map = {'loadbalancer': {'stats': 'GET', 'statuses': 'GET'}} plural_mappings['members'] = 'member' plural_mappings['sni_container_refs'] = 'sni_container_ref' plural_mappings['sni_container_ids'] = 'sni_container_id' attr.PLURALS.update(plural_mappings) resources = resource_helper.build_resource_info( plural_mappings, RESOURCE_ATTRIBUTE_MAP, constants.LOADBALANCERV2, action_map=action_map, register_quota=True) plugin = manager.NeutronManager.get_service_plugins()[ constants.LOADBALANCERV2] for collection_name in SUB_RESOURCE_ATTRIBUTE_MAP: # Special handling needed for sub-resources with 'y' ending # (e.g. proxies -> proxy) resource_name = collection_name[:-1] parent = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get('parent') params = SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get( 'parameters') controller = base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=True, parent=parent, allow_pagination=True, allow_sorting=True) resource = extensions.ResourceExtension( collection_name, controller, parent, path_prefix=LOADBALANCERV2_PREFIX, attr_map=params) resources.append(resource) return resources @classmethod def get_plugin_interface(cls): return LoadBalancerPluginBaseV2 def update_attributes_map(self, attributes, extension_attrs_map=None): super(Loadbalancerv2, self).update_attributes_map( attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} @six.add_metaclass(abc.ABCMeta) class LoadBalancerPluginBaseV2(service_base.ServicePluginBase): def get_plugin_name(self): return constants.LOADBALANCERV2 def get_plugin_type(self): return constants.LOADBALANCERV2 def get_plugin_description(self): return 'LoadBalancer service plugin v2' @abc.abstractmethod def get_loadbalancers(self, context, filters=None, fields=None): pass @abc.abstractmethod def get_loadbalancer(self, context, id, fields=None): pass @abc.abstractmethod def create_loadbalancer(self, context, loadbalancer): pass @abc.abstractmethod def update_loadbalancer(self, context, id, loadbalancer): pass @abc.abstractmethod def delete_loadbalancer(self, context, id): pass @abc.abstractmethod def create_listener(self, context, listener): pass @abc.abstractmethod def get_listener(self, context, id, fields=None): pass @abc.abstractmethod def get_listeners(self, context, filters=None, fields=None): pass @abc.abstractmethod def update_listener(self, context, id, listener): pass @abc.abstractmethod def delete_listener(self, context, id): pass @abc.abstractmethod def get_pools(self, context, filters=None, fields=None): pass @abc.abstractmethod def get_pool(self, context, id, fields=None): pass @abc.abstractmethod def create_pool(self, context, pool): pass @abc.abstractmethod def update_pool(self, context, id, pool): pass @abc.abstractmethod def delete_pool(self, context, id): pass @abc.abstractmethod def stats(self, context, loadbalancer_id): pass @abc.abstractmethod def get_pool_members(self, context, pool_id, filters=None, fields=None): pass @abc.abstractmethod def get_pool_member(self, context, id, pool_id, fields=None): pass @abc.abstractmethod def create_pool_member(self, context, pool_id, member): pass @abc.abstractmethod def update_pool_member(self, context, id, pool_id, member): pass @abc.abstractmethod def delete_pool_member(self, context, id, pool_id): pass @abc.abstractmethod def get_healthmonitors(self, context, filters=None, fields=None): pass @abc.abstractmethod def get_healthmonitor(self, context, id, fields=None): pass @abc.abstractmethod def create_healthmonitor(self, context, healthmonitor): pass @abc.abstractmethod def update_healthmonitor(self, context, id, healthmonitor): pass @abc.abstractmethod def delete_healthmonitor(self, context, id): pass @abc.abstractmethod def get_members(self, context, filters=None, fields=None): pass @abc.abstractmethod def get_member(self, context, id, fields=None): pass @abc.abstractmethod def statuses(self, context, loadbalancer_id): pass def get_l7policies(self, context, filters=None, fields=None): pass @abc.abstractmethod def get_l7policy(self, context, id, fields=None): pass @abc.abstractmethod def create_l7policy(self, context, l7policy): pass @abc.abstractmethod def update_l7policy(self, context, id, l7policy): pass @abc.abstractmethod def delete_l7policy(self, context, id): pass @abc.abstractmethod def get_l7policy_rules(self, context, l7policy_id, filters=None, fields=None): pass @abc.abstractmethod def get_l7policy_rule(self, context, id, l7policy_id, fields=None): pass @abc.abstractmethod def create_l7policy_rule(self, context, rule, l7policy_id): pass @abc.abstractmethod def update_l7policy_rule(self, context, id, rule, l7policy_id): pass @abc.abstractmethod def delete_l7policy_rule(self, context, id, l7policy_id): pass neutron-lbaas-8.0.0/neutron_lbaas/common/0000775000567000056710000000000012701410110021541 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/common/__init__.py0000664000567000056710000000000012701407726023663 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/common/exceptions.py0000664000567000056710000000262012701407726024317 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Neutron Lbaas base exception handling. """ from neutron_lib import exceptions from neutron_lbaas._i18n import _LE class ModelMapException(exceptions.NeutronException): message = _LE("Unable to map model class %(target_name)s") class LbaasException(exceptions.NeutronException): pass class TLSException(LbaasException): pass class NeedsPassphrase(TLSException): message = _LE("Passphrase needed to decrypt key but client " "did not provide one.") class UnreadableCert(TLSException): message = _LE("Could not read X509 from PEM") class MisMatchedKey(TLSException): message = _LE("Key and x509 certificate do not match") class CertificateStorageException(TLSException): message = _LE('Could not store certificate: %(msg)s') neutron-lbaas-8.0.0/neutron_lbaas/common/cert_manager/0000775000567000056710000000000012701410110024170 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/common/cert_manager/barbican_cert_manager.py0000664000567000056710000002054512701407726031043 0ustar jenkinsjenkins00000000000000# Copyright 2014, 2015 Rackspace US, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from barbicanclient import client as barbican_client from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from stevedore import driver as stevedore_driver from neutron_lbaas._i18n import _LI, _LW, _LE from neutron_lbaas.common.cert_manager import cert_manager LOG = logging.getLogger(__name__) CONF = cfg.CONF class Cert(cert_manager.Cert): """Representation of a Cert based on the Barbican CertificateContainer.""" def __init__(self, cert_container): if not isinstance(cert_container, barbican_client.containers.CertificateContainer): raise TypeError(_LE( "Retrieved Barbican Container is not of the correct type " "(certificate).")) self._cert_container = cert_container # Container secrets are accessed upon query and can return as None, # don't return the payload if the secret is not available. def get_certificate(self): if self._cert_container.certificate: return self._cert_container.certificate.payload def get_intermediates(self): if self._cert_container.intermediates: return self._cert_container.intermediates.payload def get_private_key(self): if self._cert_container.private_key: return self._cert_container.private_key.payload def get_private_key_passphrase(self): if self._cert_container.private_key_passphrase: return self._cert_container.private_key_passphrase.payload class CertManager(cert_manager.CertManager): """Certificate Manager that wraps the Barbican client API.""" def __init__(self): super(CertManager, self).__init__() self.auth = stevedore_driver.DriverManager( namespace='neutron_lbaas.cert_manager.barbican_auth', name=cfg.CONF.certificates.barbican_auth, invoke_on_load=True, ).driver def store_cert(self, project_id, certificate, private_key, intermediates=None, private_key_passphrase=None, expiration=None, name='LBaaS TLS Cert'): """Stores a certificate in the certificate manager. :param certificate: PEM encoded TLS certificate :param private_key: private key for the supplied certificate :param intermediates: ordered and concatenated intermediate certs :param private_key_passphrase: optional passphrase for the supplied key :param expiration: the expiration time of the cert in ISO 8601 format :param name: a friendly name for the cert :returns: the container_ref of the stored cert :raises Exception: if certificate storage fails """ connection = self.auth.get_barbican_client(project_id) LOG.info(_LI( "Storing certificate container '{0}' in Barbican." ).format(name)) certificate_secret = None private_key_secret = None intermediates_secret = None pkp_secret = None try: certificate_secret = connection.secrets.create( payload=certificate, expiration=expiration, name="Certificate" ) private_key_secret = connection.secrets.create( payload=private_key, expiration=expiration, name="Private Key" ) certificate_container = connection.containers.create_certificate( name=name, certificate=certificate_secret, private_key=private_key_secret ) if intermediates: intermediates_secret = connection.secrets.create( payload=intermediates, expiration=expiration, name="Intermediates" ) certificate_container.intermediates = intermediates_secret if private_key_passphrase: pkp_secret = connection.secrets.create( payload=private_key_passphrase, expiration=expiration, name="Private Key Passphrase" ) certificate_container.private_key_passphrase = pkp_secret certificate_container.store() return certificate_container.container_ref # Barbican (because of Keystone-middleware) sometimes masks # exceptions strangely -- this will catch anything that it raises and # reraise the original exception, while also providing useful # feedback in the logs for debugging except Exception: for secret in [certificate_secret, private_key_secret, intermediates_secret, pkp_secret]: if secret and secret.secret_ref: old_ref = secret.secret_ref try: secret.delete() LOG.info(_LI( "Deleted secret {0} ({1}) during rollback." ).format(secret.name, old_ref)) except Exception: LOG.warning(_LW( "Failed to delete {0} ({1}) during rollback. This " "is probably not a problem." ).format(secret.name, old_ref)) with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error storing certificate data")) def get_cert(self, project_id, cert_ref, resource_ref, check_only=False, service_name='lbaas'): """Retrieves the specified cert and registers as a consumer. :param cert_ref: the UUID of the cert to retrieve :param resource_ref: Full HATEOAS reference to the consuming resource :param check_only: Read Certificate data without registering :param service_name: Friendly name for the consuming service :returns: octavia.certificates.common.Cert representation of the certificate data :raises Exception: if certificate retrieval fails """ connection = self.auth.get_barbican_client(project_id) LOG.info(_LI( "Loading certificate container {0} from Barbican." ).format(cert_ref)) try: if check_only: cert_container = connection.containers.get( container_ref=cert_ref ) else: cert_container = connection.containers.register_consumer( container_ref=cert_ref, name=service_name, url=resource_ref ) return Cert(cert_container) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error getting {0}").format(cert_ref)) def delete_cert(self, project_id, cert_ref, resource_ref, service_name='lbaas'): """Deregister as a consumer for the specified cert. :param cert_ref: the UUID of the cert to retrieve :param service_name: Friendly name for the consuming service :param lb_id: Loadbalancer id for building resource consumer URL :raises Exception: if deregistration fails """ connection = self.auth.get_barbican_client(project_id) LOG.info(_LI( "Deregistering as a consumer of {0} in Barbican." ).format(cert_ref)) try: connection.containers.remove_consumer( container_ref=cert_ref, name=service_name, url=resource_ref ) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE( "Error deregistering as a consumer of {0}" ).format(cert_ref)) neutron-lbaas-8.0.0/neutron_lbaas/common/cert_manager/__init__.py0000664000567000056710000000267312701407726026334 0ustar jenkinsjenkins00000000000000# Copyright 2015 Rackspace US, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from stevedore import driver CONF = cfg.CONF CERT_MANAGER_DEFAULT = 'barbican' cert_manager_opts = [ cfg.StrOpt('cert_manager_type', default=CERT_MANAGER_DEFAULT, help='Certificate Manager plugin. ' 'Defaults to {0}.'.format(CERT_MANAGER_DEFAULT)), cfg.StrOpt('barbican_auth', default='barbican_acl_auth', help='Name of the Barbican authentication method to use') ] CONF.register_opts(cert_manager_opts, group='certificates') _CERT_MANAGER_PLUGIN = None def get_backend(): global _CERT_MANAGER_PLUGIN if not _CERT_MANAGER_PLUGIN: _CERT_MANAGER_PLUGIN = driver.DriverManager( "neutron_lbaas.cert_manager.backend", cfg.CONF.certificates.cert_manager_type).driver return _CERT_MANAGER_PLUGIN neutron-lbaas-8.0.0/neutron_lbaas/common/cert_manager/cert_manager.py0000664000567000056710000000567212701407726027226 0ustar jenkinsjenkins00000000000000# Copyright 2014, 2015 Rackspace US, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Certificate manager API """ import abc from oslo_config import cfg import six @six.add_metaclass(abc.ABCMeta) class Cert(object): """Base class to represent all certificates.""" @abc.abstractmethod def get_certificate(self): """Returns the certificate.""" pass @abc.abstractmethod def get_intermediates(self): """Returns the intermediate certificates.""" pass @abc.abstractmethod def get_private_key(self): """Returns the private key for the certificate.""" pass @abc.abstractmethod def get_private_key_passphrase(self): """Returns the passphrase for the private key.""" pass @six.add_metaclass(abc.ABCMeta) class CertManager(object): """Base Cert Manager Interface A Cert Manager is responsible for managing certificates for TLS. """ @abc.abstractmethod def store_cert(self, project_id, certificate, private_key, intermediates=None, private_key_passphrase=None, expiration=None, name=None): """Stores (i.e., registers) a cert with the cert manager. This method stores the specified cert and returns its UUID that identifies it within the cert manager. If storage of the certificate data fails, a CertificateStorageException should be raised. """ pass @abc.abstractmethod def get_cert(self, project_id, cert_ref, resource_ref, check_only=False, service_name=None): """Retrieves the specified cert. If check_only is True, don't perform any sort of registration. If the specified cert does not exist, a CertificateStorageException should be raised. """ pass @abc.abstractmethod def delete_cert(self, project_id, cert_ref, resource_ref, service_name=None): """Deletes the specified cert. If the specified cert does not exist, a CertificateStorageException should be raised. """ pass @classmethod def get_service_url(cls, loadbalancer_id): # Format: ://// return "{0}://{1}/{2}/{3}".format( cfg.CONF.service_auth.service_name, cfg.CONF.service_auth.region, "loadbalancer", loadbalancer_id ) neutron-lbaas-8.0.0/neutron_lbaas/common/cert_manager/barbican_auth/0000775000567000056710000000000012701410110026752 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/common/cert_manager/barbican_auth/__init__.py0000664000567000056710000000000012701407726031074 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/common/cert_manager/barbican_auth/common.py0000664000567000056710000000175212701407726030644 0ustar jenkinsjenkins00000000000000# Copyright 2014-2016 Rackspace US, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six @six.add_metaclass(abc.ABCMeta) class BarbicanAuth(object): @abc.abstractmethod def get_barbican_client(self, project_id): """Creates a Barbican client object. :param project_id: Project ID that the request will be used for :return: a Barbican Client object :raises Exception: if the client cannot be created """ neutron-lbaas-8.0.0/neutron_lbaas/common/cert_manager/barbican_auth/barbican_acl.py0000664000567000056710000000325012701407726031727 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014-2016 Rackspace US, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Barbican ACL auth class for Barbican certificate handling """ from barbicanclient import client as barbican_client from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from neutron_lbaas._i18n import _LE from neutron_lbaas.common.cert_manager.barbican_auth import common from neutron_lbaas.common import keystone LOG = logging.getLogger(__name__) CONF = cfg.CONF class BarbicanACLAuth(common.BarbicanAuth): _barbican_client = None @classmethod def get_barbican_client(cls, project_id=None): if not cls._barbican_client: try: cls._barbican_client = barbican_client.Client( session=keystone.get_session(), region_name=CONF.service_auth.region, interface=CONF.service_auth.endpoint_type ) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error creating Barbican client")) return cls._barbican_client neutron-lbaas-8.0.0/neutron_lbaas/common/cert_manager/local_cert_manager.py0000664000567000056710000001663012701407726030374 0ustar jenkinsjenkins00000000000000# Copyright 2014, 2015 Rackspace US, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import uuid from oslo_config import cfg from oslo_log import log as logging from neutron_lbaas._i18n import _LI, _LE from neutron_lbaas.common.cert_manager import cert_manager from neutron_lbaas.common import exceptions LOG = logging.getLogger(__name__) CONF = cfg.CONF TLS_STORAGE_DEFAULT = os.environ.get( 'OS_LBAAS_TLS_STORAGE', '/var/lib/neutron-lbaas/certificates/' ) local_cert_manager_opts = [ cfg.StrOpt('storage_path', default=TLS_STORAGE_DEFAULT, help='Absolute path to the certificate storage directory. ' 'Defaults to env[OS_LBAAS_TLS_STORAGE].') ] CONF.register_opts(local_cert_manager_opts, group='certificates') class Cert(cert_manager.Cert): """Representation of a Cert for local storage.""" def __init__(self, certificate, private_key, intermediates=None, private_key_passphrase=None): self.certificate = certificate self.intermediates = intermediates self.private_key = private_key self.private_key_passphrase = private_key_passphrase def get_certificate(self): return self.certificate def get_intermediates(self): return self.intermediates def get_private_key(self): return self.private_key def get_private_key_passphrase(self): return self.private_key_passphrase class CertManager(cert_manager.CertManager): """Cert Manager Interface that stores data locally.""" def store_cert(self, project_id, certificate, private_key, intermediates=None, private_key_passphrase=None, **kwargs): """Stores (i.e., registers) a cert with the cert manager. This method stores the specified cert to the filesystem and returns a UUID that can be used to retrieve it. :param project_id: Project ID for the owner of the certificate :param certificate: PEM encoded TLS certificate :param private_key: private key for the supplied certificate :param intermediates: ordered and concatenated intermediate certs :param private_key_passphrase: optional passphrase for the supplied key :returns: the UUID of the stored cert :raises CertificateStorageException: if certificate storage fails """ cert_ref = str(uuid.uuid4()) filename_base = os.path.join(CONF.certificates.storage_path, cert_ref) LOG.info(_LI( "Storing certificate data on the local filesystem." )) try: filename_certificate = "{0}.crt".format(filename_base) with open(filename_certificate, 'w') as cert_file: cert_file.write(certificate) filename_private_key = "{0}.key".format(filename_base) with open(filename_private_key, 'w') as key_file: key_file.write(private_key) if intermediates: filename_intermediates = "{0}.int".format(filename_base) with open(filename_intermediates, 'w') as int_file: int_file.write(intermediates) if private_key_passphrase: filename_pkp = "{0}.pass".format(filename_base) with open(filename_pkp, 'w') as pass_file: pass_file.write(private_key_passphrase) except IOError as ioe: LOG.error(_LE("Failed to store certificate.")) raise exceptions.CertificateStorageException(message=ioe.message) return cert_ref def get_cert(self, project_id, cert_ref, resource_ref, **kwargs): """Retrieves the specified cert. :param project_id: Project ID for the owner of the certificate :param cert_ref: the UUID of the cert to retrieve :param resource_ref: Full HATEOAS reference to the consuming resource :returns: neutron_lbaas.common.cert_manager.cert_manager.Cert representation of the certificate data :raises CertificateStorageException: if certificate retrieval fails """ LOG.info(_LI( "Loading certificate {0} from the local filesystem." ).format(cert_ref)) filename_base = os.path.join(CONF.certificates.storage_path, cert_ref) filename_certificate = "{0}.crt".format(filename_base) filename_private_key = "{0}.key".format(filename_base) filename_intermediates = "{0}.int".format(filename_base) filename_pkp = "{0}.pass".format(filename_base) cert_data = dict() try: with open(filename_certificate, 'r') as cert_file: cert_data['certificate'] = cert_file.read() except IOError: LOG.error(_LE( "Failed to read certificate for {0}." ).format(cert_ref)) raise exceptions.CertificateStorageException( msg="Certificate could not be read." ) try: with open(filename_private_key, 'r') as key_file: cert_data['private_key'] = key_file.read() except IOError: LOG.error(_LE( "Failed to read private key for {0}." ).format(cert_ref)) raise exceptions.CertificateStorageException( msg="Private Key could not be read." ) try: with open(filename_intermediates, 'r') as int_file: cert_data['intermediates'] = int_file.read() except IOError: pass try: with open(filename_pkp, 'r') as pass_file: cert_data['private_key_passphrase'] = pass_file.read() except IOError: pass return Cert(**cert_data) def delete_cert(self, project_id, cert_ref, resource_ref, **kwargs): """Deletes the specified cert. :param project_id: Project ID for the owner of the certificate :param cert_ref: the UUID of the cert to delete :param resource_ref: Full HATEOAS reference to the consuming resource :raises CertificateStorageException: if certificate deletion fails """ LOG.info(_LI( "Deleting certificate {0} from the local filesystem." ).format(cert_ref)) filename_base = os.path.join(CONF.certificates.storage_path, cert_ref) filename_certificate = "{0}.crt".format(filename_base) filename_private_key = "{0}.key".format(filename_base) filename_intermediates = "{0}.int".format(filename_base) filename_pkp = "{0}.pass".format(filename_base) try: os.remove(filename_certificate) os.remove(filename_private_key) os.remove(filename_intermediates) os.remove(filename_pkp) except IOError as ioe: LOG.error(_LE( "Failed to delete certificate {0}." ).format(cert_ref)) raise exceptions.CertificateStorageException(message=ioe.message) neutron-lbaas-8.0.0/neutron_lbaas/common/keystone.py0000664000567000056710000000701712701407726024004 0ustar jenkinsjenkins00000000000000# Copyright 2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1.identity import v2 as v2_client from keystoneauth1.identity import v3 as v3_client from keystoneauth1 import session from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from neutron_lbaas._i18n import _, _LE LOG = logging.getLogger(__name__) _SESSION = None OPTS = [ cfg.StrOpt( 'auth_url', default='http://127.0.0.1:5000/v2.0', help=_('Authentication endpoint'), ), cfg.StrOpt( 'admin_user', default='admin', help=_('The service admin user name'), ), cfg.StrOpt( 'admin_tenant_name', default='admin', help=_('The service admin tenant name'), ), cfg.StrOpt( 'admin_password', secret=True, default='password', help=_('The service admin password'), ), cfg.StrOpt( 'admin_user_domain', default='admin', help=_('The admin user domain name'), ), cfg.StrOpt( 'admin_project_domain', default='admin', help=_('The admin project domain name'), ), cfg.StrOpt( 'region', default='RegionOne', help=_('The deployment region'), ), cfg.StrOpt( 'service_name', default='lbaas', help=_('The name of the service'), ), cfg.StrOpt( 'auth_version', default='2', help=_('The auth version used to authenticate'), ), cfg.StrOpt( 'endpoint_type', default='public', help=_('The endpoint_type to be used') ) ] cfg.CONF.register_opts(OPTS, 'service_auth') def get_session(): """Initializes a Keystone session. :returns: a Keystone Session object :raises Exception: if the session cannot be established """ global _SESSION if not _SESSION: auth_url = cfg.CONF.service_auth.auth_url kwargs = {'auth_url': auth_url, 'username': cfg.CONF.service_auth.admin_user, 'password': cfg.CONF.service_auth.admin_password} if cfg.CONF.service_auth.auth_version == '2': client = v2_client kwargs['tenant_name'] = cfg.CONF.service_auth.admin_tenant_name elif cfg.CONF.service_auth.auth_version == '3': client = v3_client kwargs['project_name'] = cfg.CONF.service_auth.admin_tenant_name kwargs['user_domain_name'] = (cfg.CONF.service_auth. admin_user_domain) kwargs['project_domain_name'] = (cfg.CONF.service_auth. admin_project_domain) else: raise Exception('Unknown keystone version!') try: kc = client.Password(**kwargs) _SESSION = session.Session(auth=kc) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error creating Keystone session.")) return _SESSION neutron-lbaas-8.0.0/neutron_lbaas/common/tls_utils/0000775000567000056710000000000012701410110023563 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/common/tls_utils/cert_parser.py0000664000567000056710000001434012701407727026474 0ustar jenkinsjenkins00000000000000# # Copyright 2014 OpenStack Foundation. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cryptography.hazmat import backends from cryptography.hazmat.primitives import serialization from cryptography import x509 from neutron.i18n import _LE from oslo_log import log as logging import six import neutron_lbaas.common.exceptions as exceptions X509_BEG = "-----BEGIN CERTIFICATE-----" X509_END = "-----END CERTIFICATE-----" LOG = logging.getLogger(__name__) def validate_cert(certificate, private_key=None, private_key_passphrase=None, intermediates=None): """ Validate that the certificate is a valid PEM encoded X509 object Optionally verify that the private key matches the certificate. Optionally verify that the intermediates are valid X509 objects. :param certificate: A PEM encoded certificate :param private_key: The private key for the certificate :param private_key_passphrase: Passphrase for accessing the private key :param intermediates: PEM encoded intermediate certificates :returns: boolean """ cert = _get_x509_from_pem_bytes(certificate) if intermediates: for x509Pem in _split_x509s(intermediates): _get_x509_from_pem_bytes(x509Pem) if private_key: pkey = _read_privatekey(private_key, passphrase=private_key_passphrase) pknum = pkey.public_key().public_numbers() certnum = cert.public_key().public_numbers() if pknum != certnum: raise exceptions.MisMatchedKey return True def _read_privatekey(privatekey_pem, passphrase=None): if passphrase: if six.PY2: passphrase = passphrase.encode("utf-8") elif six.PY3: passphrase = six.b(passphrase) try: pkey = privatekey_pem.encode('ascii') return serialization.load_pem_private_key(pkey, passphrase, backends.default_backend()) except Exception: raise exceptions.NeedsPassphrase def _split_x509s(x509Str): """ Split the input string into individb(ual x509 text blocks :param x509Str: A large multi x509 certificate blcok :returns: A list of strings where each string represents an X509 pem block surrounded by BEGIN CERTIFICATE, END CERTIFICATE block tags """ curr_pem_block = [] inside_x509 = False for line in x509Str.replace("\r", "").split("\n"): if inside_x509: curr_pem_block.append(line) if line == X509_END: yield "\n".join(curr_pem_block) curr_pem_block = [] inside_x509 = False continue else: if line == X509_BEG: curr_pem_block.append(line) inside_x509 = True def _read_pyca_private_key(private_key, private_key_passphrase=None): kw = {"password": None, "backend": backends.default_backend()} if private_key_passphrase is not None: kw["password"] = private_key_passphrase.encode("utf-8") else: kw["password"] = None try: pk = serialization.load_pem_private_key(private_key.encode('ascii'), **kw) return pk except TypeError as ex: if len(ex.args) > 0 and ex.args[0].startswith("Password"): raise exceptions.NeedsPassphrase def dump_private_key(private_key, private_key_passphrase=None): """ Parses encrypted key to provide an unencrypted version in PKCS8 :param private_key: private key :param private_key_passphrase: private key passphrase :returns: Unencrypted private key in PKCS8 """ # re encode the key as unencrypted PKCS8 pk = _read_pyca_private_key(private_key, private_key_passphrase=private_key_passphrase) key = pk.private_bytes(encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.PKCS8, encryption_algorithm=serialization.NoEncryption()) return key def get_host_names(certificate): """Extract the host names from the Pem encoded X509 certificate :param certificate: A PEM encoded certificate :returns: A dictionary containing the following keys: ['cn', 'dns_names'] where 'cn' is the CN from the SubjectName of the certificate, and 'dns_names' is a list of dNSNames (possibly empty) from the SubjectAltNames of the certificate. """ try: certificate = certificate.encode('ascii') cert = _get_x509_from_pem_bytes(certificate) cn = cert.subject.get_attributes_for_oid(x509.OID_COMMON_NAME)[0] host_names = { 'cn': cn.value.lower(), 'dns_names': [] } try: ext = cert.extensions.get_extension_for_oid( x509.OID_SUBJECT_ALTERNATIVE_NAME ) host_names['dns_names'] = ext.value.get_values_for_type( x509.DNSName) except x509.ExtensionNotFound: LOG.debug("%s extension not found", x509.OID_SUBJECT_ALTERNATIVE_NAME) return host_names except Exception: LOG.exception(_LE("Unreadable certificate.")) raise exceptions.UnreadableCert def _get_x509_from_pem_bytes(certificate_pem): """ Parse X509 data from a PEM encoded certificate :param certificate_pem: Certificate in PEM format :returns: crypto high-level x509 data from the PEM string """ try: certificate = certificate_pem.encode('ascii') x509cert = x509.load_pem_x509_certificate(certificate, backends.default_backend()) except Exception: raise exceptions.UnreadableCert return x509cert neutron-lbaas-8.0.0/neutron_lbaas/common/tls_utils/__init__.py0000664000567000056710000000000012701407726025705 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/version.py0000664000567000056710000000126412701407726022336 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version version_info = pbr.version.VersionInfo('neutron-lbaas') neutron-lbaas-8.0.0/neutron_lbaas/db/0000775000567000056710000000000012701410110020636 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/db/__init__.py0000664000567000056710000000000012701407726022760 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/db/models/0000775000567000056710000000000012701410110022121 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/db/models/__init__.py0000664000567000056710000000000012701407726024243 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/db/models/head.py0000664000567000056710000000144112701407726023417 0ustar jenkinsjenkins00000000000000# Copyright 2016 Midokura SARL # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron.db.migration.models import head import neutron_lbaas.db.loadbalancer.loadbalancer_db # noqa import neutron_lbaas.db.loadbalancer.models # noqa def get_metadata(): return head.model_base.BASEV2.metadata neutron-lbaas-8.0.0/neutron_lbaas/db/migration/0000775000567000056710000000000012701410110022627 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/db/migration/__init__.py0000664000567000056710000000000012701407726024751 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/0000775000567000056710000000000012701410110026457 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/README0000664000567000056710000000004612701407726027362 0ustar jenkinsjenkins00000000000000Generic single-database configuration.neutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/env.py0000664000567000056710000000460612701407726027652 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from logging import config as logging_config from alembic import context from neutron.db import model_base from oslo_config import cfg from oslo_db.sqlalchemy import session import sqlalchemy as sa from sqlalchemy import event MYSQL_ENGINE = None LBAAS_VERSION_TABLE = 'alembic_version_lbaas' config = context.config neutron_config = config.neutron_config logging_config.fileConfig(config.config_file_name) target_metadata = model_base.BASEV2.metadata def set_mysql_engine(): try: mysql_engine = neutron_config.command.mysql_engine except cfg.NoSuchOptError: mysql_engine = None global MYSQL_ENGINE MYSQL_ENGINE = (mysql_engine or model_base.BASEV2.__table_args__['mysql_engine']) def run_migrations_offline(): set_mysql_engine() kwargs = dict() if neutron_config.database.connection: kwargs['url'] = neutron_config.database.connection else: kwargs['dialect_name'] = neutron_config.database.engine kwargs['version_table'] = LBAAS_VERSION_TABLE context.configure(**kwargs) with context.begin_transaction(): context.run_migrations() @event.listens_for(sa.Table, 'after_parent_attach') def set_storage_engine(target, parent): if MYSQL_ENGINE: target.kwargs['mysql_engine'] = MYSQL_ENGINE def run_migrations_online(): set_mysql_engine() engine = session.create_engine(neutron_config.database.connection) connection = engine.connect() context.configure( connection=connection, target_metadata=target_metadata, version_table=LBAAS_VERSION_TABLE ) try: with context.begin_transaction(): context.run_migrations() finally: connection.close() engine.dispose() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() neutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/versions/0000775000567000056710000000000012701410110030327 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/versions/kilo_release.py0000664000567000056710000000151412701407726033363 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """kilo Revision ID: kilo Revises: 4ba00375f715 Create Date: 2015-04-16 00:00:00.000000 """ # revision identifiers, used by Alembic. revision = 'kilo' down_revision = '4ba00375f715' def upgrade(): """A no-op migration for marking the Kilo release.""" pass neutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/versions/mitaka/0000775000567000056710000000000012701410110031575 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/versions/mitaka/expand/0000775000567000056710000000000012701410110033054 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016700000000000011221 Lustar 00000000000000neutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/versions/mitaka/expand/3543deab1547_add_l7_tables.pyneutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/versions/mitaka/expand/3543deab1540000664000567000056710000000615312701407726034473 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """add_l7_tables Revision ID: 3543deab1547 Revises: 6aee0434f911 Create Date: 2015-02-05 10:50:15.606420 """ # revision identifiers, used by Alembic. revision = '3543deab1547' down_revision = '6aee0434f911' from alembic import op import sqlalchemy as sa l7rule_type = sa.Enum("HOST_NAME", "PATH", "FILE_TYPE", "HEADER", "COOKIE", name="l7rule_typesv2") l7rule_compare_type = sa.Enum("REGEX", "STARTS_WITH", "ENDS_WITH", "CONTAINS", "EQUAL_TO", name="l7rule_compare_typesv2") l7policy_action_type = sa.Enum("REJECT", "REDIRECT_TO_URL", "REDIRECT_TO_POOL", name="l7policy_action_typesv2") def upgrade(): op.create_table( u'lbaas_l7policies', sa.Column(u'tenant_id', sa.String(255), nullable=True), sa.Column(u'id', sa.String(36), nullable=False), sa.Column(u'name', sa.String(255), nullable=True), sa.Column(u'description', sa.String(255), nullable=True), sa.Column(u'listener_id', sa.String(36), nullable=False), sa.Column(u'action', l7policy_action_type, nullable=False), sa.Column(u'redirect_pool_id', sa.String(36), nullable=True), sa.Column(u'redirect_url', sa.String(255), nullable=True), sa.Column(u'position', sa.Integer, nullable=False), sa.Column(u'provisioning_status', sa.String(16), nullable=False), sa.Column(u'admin_state_up', sa.Boolean(), nullable=False), sa.PrimaryKeyConstraint(u'id'), sa.ForeignKeyConstraint([u'listener_id'], [u'lbaas_listeners.id']), sa.ForeignKeyConstraint([u'redirect_pool_id'], [u'lbaas_pools.id']) ) op.create_table( u'lbaas_l7rules', sa.Column(u'tenant_id', sa.String(255), nullable=True), sa.Column(u'id', sa.String(36), nullable=False), sa.Column(u'l7policy_id', sa.String(36), nullable=False), sa.Column(u'type', l7rule_type, nullable=False), sa.Column(u'compare_type', l7rule_compare_type, nullable=False), sa.Column(u'invert', sa.Boolean(), nullable=False), sa.Column(u'key', sa.String(255), nullable=True), sa.Column(u'value', sa.String(255), nullable=False), sa.Column(u'provisioning_status', sa.String(16), nullable=False), sa.Column(u'admin_state_up', sa.Boolean(), nullable=False), sa.PrimaryKeyConstraint(u'id'), sa.ForeignKeyConstraint([u'l7policy_id'], [u'lbaas_l7policies.id']) ) ././@LongLink0000000000000000000000000000016400000000000011216 Lustar 00000000000000neutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/versions/mitaka/expand/4a408dd491c2_UpdateName.pyneutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/versions/mitaka/expand/4a408dd491c0000664000567000056710000000212312701407726034471 0ustar jenkinsjenkins00000000000000# Copyright 2015 NEC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Addition of Name column to lbaas_members and lbaas_healthmonitors table Revision ID: 4a408dd491c2 Revises: 3345facd0452 Create Date: 2015-11-16 11:47:43.061649 """ # revision identifiers, used by Alembic. revision = '4a408dd491c2' down_revision = '3345facd0452' from alembic import op import sqlalchemy as sa LB_TAB_NAME = ['lbaas_members', 'lbaas_healthmonitors'] def upgrade(): for table in LB_TAB_NAME: op.add_column(table, sa.Column('name', sa.String(255), nullable=True)) ././@LongLink0000000000000000000000000000017300000000000011216 Lustar 00000000000000neutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/versions/mitaka/expand/6aee0434f911_independent_pools.pyneutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/versions/mitaka/expand/6aee0434f910000664000567000056710000000640112701407726034476 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # Copyright 2015 Blue Box, an IBM Company # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """independent pools Revision ID: 6aee0434f911 Revises: 3426acbc12de Create Date: 2015-08-28 03:15:42.533386 """ # revision identifiers, used by Alembic. revision = '6aee0434f911' down_revision = '3426acbc12de' from alembic import op import sqlalchemy as sa def upgrade(): conn = op.get_bind() # Minimal examples of the tables we need to manipulate listeners = sa.sql.table( 'lbaas_listeners', sa.sql.column('loadbalancer_id', sa.String), sa.sql.column('default_pool_id', sa.String)) pools = sa.sql.table( 'lbaas_pools', sa.sql.column('loadbalancer_id', sa.String), sa.sql.column('id', sa.String)) # This foreign key does not need to be unique anymore. To remove the # uniqueness but keep the foreign key we have to do some juggling. # # Also, because different database engines handle unique constraints # in incompatible ways, we can't simply call op.drop_constraint and # expect it to work for all DB engines. This is yet another unfortunate # case where sqlalchemy isn't able to abstract everything away. if op.get_context().dialect.name == 'postgresql': # PostgreSQL path: op.drop_constraint('lbaas_listeners_default_pool_id_key', 'lbaas_listeners', 'unique') else: # MySQL path: op.drop_constraint('lbaas_listeners_ibfk_2', 'lbaas_listeners', type_='foreignkey') op.drop_constraint('default_pool_id', 'lbaas_listeners', type_='unique') op.create_foreign_key('lbaas_listeners_ibfk_2', 'lbaas_listeners', 'lbaas_pools', ['default_pool_id'], ['id']) op.add_column( u'lbaas_pools', sa.Column('loadbalancer_id', sa.String(36), sa.ForeignKey('lbaas_loadbalancers.id'), nullable=True) ) # Populate this new column appropriately select_obj = sa.select([listeners.c.loadbalancer_id, listeners.c.default_pool_id]).where( listeners.c.default_pool_id is not None) result = conn.execute(select_obj) for row in result: stmt = pools.update().values(loadbalancer_id=row[0]).where( pools.c.id == row[1]) op.execute(stmt) # For existing installations, the above ETL should populate the above column # using the following procedure: # # Get the output from this: # # SELECT default_pool_id, loadbalancer_id l_id FROM lbaas_listeners WHERE # default_pool_id IS NOT NULL; # # Then for every row returned run: # # UPDATE lbaas_pools SET loadbalancer_id = l_id WHERE id = default_pool_id; ././@LongLink0000000000000000000000000000016700000000000011221 Lustar 00000000000000neutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/versions/mitaka/expand/3426acbc12de_add_flavor_id.pyneutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/versions/mitaka/expand/3426acbc12d0000664000567000056710000000234612701407726034545 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Add flavor id Revision ID: 3426acbc12de Revises: 4a408dd491c2 Create Date: 2015-12-02 15:24:35.775474 """ # revision identifiers, used by Alembic. revision = '3426acbc12de' down_revision = '4a408dd491c2' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('lbaas_loadbalancers', sa.Column(u'flavor_id', sa.String(36), nullable=True)) op.create_foreign_key(u'fk_lbaas_loadbalancers_flavors_id', u'lbaas_loadbalancers', u'flavors', [u'flavor_id'], [u'id']) ././@LongLink0000000000000000000000000000021300000000000011211 Lustar 00000000000000neutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/versions/mitaka/expand/62deca5010cd_add_tenant_id_index_for_l7_tables.pyneutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/versions/mitaka/expand/62deca5010c0000664000567000056710000000226412701407726034543 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 Midokura SARL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Add tenant-id index for L7 tables Revision ID: 62deca5010cd Revises: 3543deab1547 Create Date: 2016-03-02 08:42:37.737281 """ from alembic import op from neutron.db import migration # revision identifiers, used by Alembic. revision = '62deca5010cd' down_revision = '3543deab1547' # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.MITAKA] def upgrade(): for table in ['lbaas_l7rules', 'lbaas_l7policies']: op.create_index(op.f('ix_%s_tenant_id' % table), table, ['tenant_id'], unique=False) neutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/versions/CONTRACT_HEAD0000664000567000056710000000001412701407726032266 0ustar jenkinsjenkins00000000000000130ebfdef43 neutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/versions/start_neutron_lbaas.py0000664000567000056710000000153312701407726034777 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """start neutron-lbaas chain Revision ID: start_neutron_lbaas Revises: None Create Date: 2014-12-09 11:06:18.196062 """ # revision identifiers, used by Alembic. revision = 'start_neutron_lbaas' down_revision = None def upgrade(): pass neutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/versions/EXPAND_HEAD0000664000567000056710000000001512701407726032031 0ustar jenkinsjenkins0000000000000062deca5010cd ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000neutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/versions/4ba00375f715_edge_driver.pyneutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/versions/4ba00375f715_edge_driver.0000664000567000056710000000457012701407726034370 0ustar jenkinsjenkins00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """edge_driver Revision ID: 4ba00375f715 Revises: lbaasv2_tls Create Date: 2015-02-03 20:35:54.830634 """ # revision identifiers, used by Alembic. revision = '4ba00375f715' down_revision = 'lbaasv2_tls' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'nsxv_edge_pool_mappings', sa.Column('pool_id', sa.String(length=36), nullable=False), sa.Column('edge_id', sa.String(length=36), nullable=False), sa.Column('edge_pool_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['pool_id'], ['pools.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('pool_id') ) op.create_table( 'nsxv_edge_vip_mappings', sa.Column('pool_id', sa.String(length=36), nullable=False), sa.Column('edge_id', sa.String(length=36), nullable=False), sa.Column('edge_app_profile_id', sa.String(length=36), nullable=False), sa.Column('edge_vse_id', sa.String(length=36), nullable=False), sa.Column('edge_fw_rule_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['pool_id'], ['pools.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('pool_id') ) op.create_table( 'nsxv_edge_monitor_mappings', sa.Column('monitor_id', sa.String(length=36), nullable=False), sa.Column('edge_id', sa.String(length=36), nullable=False), sa.Column('edge_monitor_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['monitor_id'], ['healthmonitors.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('monitor_id'), sa.UniqueConstraint('monitor_id', 'edge_id', name='uniq_nsxv_edge_monitor_mappings') ) neutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/versions/liberty/0000775000567000056710000000000012701410110032001 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/versions/liberty/contract/0000775000567000056710000000000012701410110033616 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000neutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/versions/liberty/contract/130ebfdef43_initial.pyneutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/versions/liberty/contract/130ebfde0000664000567000056710000000206612701407726035061 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Initial Liberty no-op contract revision. Revision ID: 130ebfdef43 Revises: kilo Create Date: 2015-07-18 14:35:22.242794 """ from neutron.db import migration from neutron.db.migration import cli # revision identifiers, used by Alembic. revision = '130ebfdef43' down_revision = 'kilo' branch_labels = (cli.CONTRACT_BRANCH,) # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.LIBERTY, migration.MITAKA] def upgrade(): pass neutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/versions/liberty/expand/0000775000567000056710000000000012701410110033260 5ustar jenkinsjenkins00000000000000././@LongLink0000000000000000000000000000016200000000000011214 Lustar 00000000000000neutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/versions/liberty/expand/3345facd0452_initial.pyneutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/versions/liberty/expand/3345facd040000664000567000056710000000204012701407726034602 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Initial Liberty no-op expand script. Revision ID: 3345facd0452 Revises: kilo Create Date: 2015-07-18 14:35:22.234191 """ from neutron.db import migration from neutron.db.migration import cli # revision identifiers, used by Alembic. revision = '3345facd0452' down_revision = 'kilo' branch_labels = (cli.EXPAND_BRANCH,) # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.LIBERTY] def upgrade(): pass ././@LongLink0000000000000000000000000000020300000000000011210 Lustar 00000000000000neutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/versions/4deef6d81931_add_provisioning_and_operating_statuses.pyneutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/versions/4deef6d81931_add_provisio0000664000567000056710000000444212701407726034672 0ustar jenkinsjenkins00000000000000# Copyright 2014-2015 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """add provisioning and operating statuses Revision ID: 4deef6d81931 Revises: lbaasv2 Create Date: 2015-01-27 20:38:20.796401 """ # revision identifiers, used by Alembic. revision = '4deef6d81931' down_revision = 'lbaasv2' from alembic import op import sqlalchemy as sa PROVISIONING_STATUS = u'provisioning_status' OPERATING_STATUS = u'operating_status' STATUS = u'status' def upgrade(): op.drop_column(u'lbaas_loadbalancers', STATUS) op.add_column( u'lbaas_loadbalancers', sa.Column(PROVISIONING_STATUS, sa.String(16), nullable=False) ) op.add_column( u'lbaas_loadbalancers', sa.Column(OPERATING_STATUS, sa.String(16), nullable=False) ) op.drop_column(u'lbaas_listeners', STATUS) op.add_column( u'lbaas_listeners', sa.Column(PROVISIONING_STATUS, sa.String(16), nullable=False) ) op.add_column( u'lbaas_listeners', sa.Column(OPERATING_STATUS, sa.String(16), nullable=False) ) op.drop_column(u'lbaas_pools', STATUS) op.add_column( u'lbaas_pools', sa.Column(PROVISIONING_STATUS, sa.String(16), nullable=False) ) op.add_column( u'lbaas_pools', sa.Column(OPERATING_STATUS, sa.String(16), nullable=False) ) op.drop_column(u'lbaas_members', STATUS) op.add_column( u'lbaas_members', sa.Column(PROVISIONING_STATUS, sa.String(16), nullable=False) ) op.add_column( u'lbaas_members', sa.Column(OPERATING_STATUS, sa.String(16), nullable=False) ) op.drop_column(u'lbaas_healthmonitors', STATUS) op.add_column( u'lbaas_healthmonitors', sa.Column(PROVISIONING_STATUS, sa.String(16), nullable=False) ) neutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/versions/364f9b6064f0_agentv2.py0000664000567000056710000000260412701407726034037 0ustar jenkinsjenkins00000000000000# Copyright 2015 Rackspace. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """agentv2 Revision ID: 364f9b6064f0 Revises: 4b6d8d5310b8 Create Date: 2015-02-05 10:17:13.229358 """ # revision identifiers, used by Alembic. revision = '364f9b6064f0' down_revision = '4b6d8d5310b8' from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'lbaas_loadbalanceragentbindings', sa.Column('loadbalancer_id', sa.String(length=36), nullable=False), sa.Column('agent_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['loadbalancer_id'], ['lbaas_loadbalancers.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['agent_id'], ['agents.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('loadbalancer_id')) neutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/versions/lbaasv2_tls.py0000664000567000056710000000343212701407726033142 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """lbaasv2 TLS Revision ID: lbaasv2_tls Revises: 364f9b6064f0 Create Date: 2015-01-18 10:00:00 """ # revision identifiers, used by Alembic. revision = 'lbaasv2_tls' down_revision = '364f9b6064f0' from alembic import op import sqlalchemy as sa from neutron.db import migration old_listener_protocols = sa.Enum("HTTP", "HTTPS", "TCP", name="listener_protocolsv2") new_listener_protocols = sa.Enum("HTTP", "HTTPS", "TCP", "TERMINATED_HTTPS", name="listener_protocolsv2") def upgrade(): migration.alter_enum('lbaas_listeners', 'protocol', new_listener_protocols, nullable=False) op.create_table( u'lbaas_sni', sa.Column(u'listener_id', sa.String(36), nullable=False), sa.Column(u'tls_container_id', sa.String(128), nullable=False), sa.Column(u'position', sa.Integer), sa.ForeignKeyConstraint(['listener_id'], [u'lbaas_listeners.id'], ), sa.PrimaryKeyConstraint(u'listener_id', u'tls_container_id') ) op.add_column('lbaas_listeners', sa.Column(u'default_tls_container_id', sa.String(128), nullable=True)) neutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/versions/lbaasv2.py0000664000567000056710000001551712701407726032267 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """lbaas version 2 api Revision ID: lbaasv2 Revises: start_neutron_lbaas Create Date: 2014-06-18 10:50:15.606420 """ # revision identifiers, used by Alembic. revision = 'lbaasv2' down_revision = 'start_neutron_lbaas' from alembic import op import sqlalchemy as sa listener_protocols = sa.Enum("HTTP", "HTTPS", "TCP", name="listener_protocolsv2") pool_protocols = sa.Enum("HTTP", "HTTPS", "TCP", name="pool_protocolsv2") sesssionpersistences_type = sa.Enum("SOURCE_IP", "HTTP_COOKIE", "APP_COOKIE", name="sesssionpersistences_typev2") lb_algorithms = sa.Enum("ROUND_ROBIN", "LEAST_CONNECTIONS", "SOURCE_IP", name="lb_algorithmsv2") healthmonitors_type = sa.Enum("PING", "TCP", "HTTP", "HTTPS", name="healthmonitors_typev2") def upgrade(): op.create_table( u'lbaas_healthmonitors', sa.Column(u'tenant_id', sa.String(255), nullable=True), sa.Column(u'id', sa.String(36), nullable=False), sa.Column(u'type', healthmonitors_type, nullable=False), sa.Column(u'delay', sa.Integer(), nullable=False), sa.Column(u'timeout', sa.Integer(), nullable=False), sa.Column(u'max_retries', sa.Integer(), nullable=False), sa.Column(u'http_method', sa.String(16), nullable=True), sa.Column(u'url_path', sa.String(255), nullable=True), sa.Column(u'expected_codes', sa.String(64), nullable=True), sa.Column(u'status', sa.String(16), nullable=False), sa.Column(u'admin_state_up', sa.Boolean(), nullable=False), sa.PrimaryKeyConstraint(u'id') ) op.create_table( u'lbaas_pools', sa.Column(u'tenant_id', sa.String(255), nullable=True), sa.Column(u'id', sa.String(36), nullable=False), sa.Column(u'name', sa.String(255), nullable=True), sa.Column(u'description', sa.String(255), nullable=True), sa.Column(u'protocol', pool_protocols, nullable=False), sa.Column(u'lb_algorithm', lb_algorithms, nullable=False), sa.Column(u'healthmonitor_id', sa.String(36), nullable=True), sa.Column(u'status', sa.String(16), nullable=False), sa.Column(u'admin_state_up', sa.Boolean(), nullable=False), sa.PrimaryKeyConstraint(u'id'), sa.UniqueConstraint(u'healthmonitor_id'), sa.ForeignKeyConstraint([u'healthmonitor_id'], [u'lbaas_healthmonitors.id']) ) op.create_table( u'lbaas_sessionpersistences', sa.Column(u'pool_id', sa.String(36), nullable=False), sa.Column(u'type', sesssionpersistences_type, nullable=False), sa.Column(u'cookie_name', sa.String(1024), nullable=True), sa.ForeignKeyConstraint([u'pool_id'], [u'lbaas_pools.id']), sa.PrimaryKeyConstraint(u'pool_id') ) op.create_table( u'lbaas_members', sa.Column(u'tenant_id', sa.String(255), nullable=True), sa.Column(u'id', sa.String(36), nullable=False), sa.Column(u'pool_id', sa.String(36), nullable=False), sa.Column(u'subnet_id', sa.String(36), nullable=True), sa.Column(u'address', sa.String(64), nullable=False), sa.Column(u'protocol_port', sa.Integer(), nullable=False), sa.Column(u'weight', sa.Integer(), nullable=True), sa.Column(u'status', sa.String(16), nullable=False), sa.Column(u'admin_state_up', sa.Boolean(), nullable=False), sa.PrimaryKeyConstraint(u'id'), sa.ForeignKeyConstraint([u'pool_id'], [u'lbaas_pools.id']), sa.UniqueConstraint(u'pool_id', u'address', u'protocol_port', name=u'uniq_pool_address_port_v2') ) op.create_table( u'lbaas_loadbalancers', sa.Column(u'tenant_id', sa.String(255), nullable=True), sa.Column(u'id', sa.String(36), nullable=False), sa.Column(u'name', sa.String(255), nullable=True), sa.Column(u'description', sa.String(255), nullable=True), sa.Column(u'vip_port_id', sa.String(36), nullable=True), sa.Column(u'vip_subnet_id', sa.String(36), nullable=False), sa.Column(u'vip_address', sa.String(36), nullable=True), sa.Column(u'status', sa.String(16), nullable=False), sa.Column(u'admin_state_up', sa.Boolean(), nullable=False), sa.ForeignKeyConstraint([u'vip_port_id'], [u'ports.id'], name=u'fk_lbaas_loadbalancers_ports_id'), sa.PrimaryKeyConstraint(u'id') ) op.create_table( u'lbaas_listeners', sa.Column(u'tenant_id', sa.String(255), nullable=True), sa.Column(u'id', sa.String(36), nullable=False), sa.Column(u'name', sa.String(255), nullable=True), sa.Column(u'description', sa.String(255), nullable=True), sa.Column(u'protocol', listener_protocols, nullable=False), sa.Column(u'protocol_port', sa.Integer(), nullable=False), sa.Column(u'connection_limit', sa.Integer(), nullable=True), sa.Column(u'loadbalancer_id', sa.String(36), nullable=True), sa.Column(u'default_pool_id', sa.String(36), nullable=True), sa.Column(u'status', sa.String(16), nullable=False), sa.Column(u'admin_state_up', sa.Boolean(), nullable=False), sa.ForeignKeyConstraint([u'loadbalancer_id'], [u'lbaas_loadbalancers.id']), sa.ForeignKeyConstraint([u'default_pool_id'], [u'lbaas_pools.id']), sa.UniqueConstraint(u'default_pool_id'), sa.UniqueConstraint(u'loadbalancer_id', u'protocol_port', name=u'uniq_loadbalancer_listener_port'), sa.PrimaryKeyConstraint(u'id') ) op.create_table( u'lbaas_loadbalancer_statistics', sa.Column(u'loadbalancer_id', sa.String(36), nullable=False), sa.Column(u'bytes_in', sa.BigInteger(), nullable=False), sa.Column(u'bytes_out', sa.BigInteger(), nullable=False), sa.Column(u'active_connections', sa.BigInteger(), nullable=False), sa.Column(u'total_connections', sa.BigInteger(), nullable=False), sa.PrimaryKeyConstraint(u'loadbalancer_id'), sa.ForeignKeyConstraint([u'loadbalancer_id'], [u'lbaas_loadbalancers.id']) ) ././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000neutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/versions/4b6d8d5310b8_add_index_tenant_id.pyneutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/versions/4b6d8d5310b8_add_index_te0000664000567000056710000000222012701407726034516 0ustar jenkinsjenkins00000000000000# Copyright 2015 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """add_index_tenant_id Revision ID: 4b6d8d5310b8 Revises: 4deef6d81931 Create Date: 2015-02-10 18:28:26.362881 """ # revision identifiers, used by Alembic. revision = '4b6d8d5310b8' down_revision = '4deef6d81931' from alembic import op TABLES = ['lbaas_members', 'lbaas_healthmonitors', 'lbaas_pools', 'lbaas_loadbalancers', 'lbaas_listeners', 'vips', 'members', 'pools', 'healthmonitors'] def upgrade(): for table in TABLES: op.create_index(op.f('ix_%s_tenant_id' % table), table, ['tenant_id'], unique=False) neutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/__init__.py0000664000567000056710000000000012701407726030601 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/db/migration/alembic_migrations/script.py.mako0000664000567000056710000000203512701407726031306 0ustar jenkinsjenkins00000000000000# Copyright ${create_date.year} # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """${message} Revision ID: ${up_revision} Revises: ${down_revision} Create Date: ${create_date} """ # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} % if branch_labels: branch_labels = ${repr(branch_labels)} %endif from alembic import op import sqlalchemy as sa ${imports if imports else ""} def upgrade(): ${upgrades if upgrades else "pass"} neutron-lbaas-8.0.0/neutron_lbaas/db/loadbalancer/0000775000567000056710000000000012701410110023245 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/db/loadbalancer/loadbalancer_dbv2.py0000664000567000056710000011210212701407726027163 0ustar jenkinsjenkins00000000000000# # Copyright 2014-2015 Rackspace. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from neutron.api.v2 import attributes from neutron.callbacks import events from neutron.callbacks import registry from neutron.callbacks import resources from neutron.db import common_db_mixin as base_db from neutron import manager from neutron.plugins.common import constants from neutron_lib import constants as n_constants from neutron_lib import exceptions as n_exc from oslo_db import exception from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import uuidutils from sqlalchemy import orm from sqlalchemy.orm import exc from neutron_lbaas._i18n import _ from neutron_lbaas import agent_scheduler from neutron_lbaas.db.loadbalancer import models from neutron_lbaas.extensions import l7 from neutron_lbaas.extensions import loadbalancerv2 from neutron_lbaas.extensions import sharedpools from neutron_lbaas.services.loadbalancer import constants as lb_const from neutron_lbaas.services.loadbalancer import data_models LOG = logging.getLogger(__name__) class LoadBalancerPluginDbv2(base_db.CommonDbMixin, agent_scheduler.LbaasAgentSchedulerDbMixin): """Wraps loadbalancer with SQLAlchemy models. A class that wraps the implementation of the Neutron loadbalancer plugin database access interface using SQLAlchemy models. """ @property def _core_plugin(self): return manager.NeutronManager.get_plugin() def _get_resource(self, context, model, id, for_update=False): resource = None try: if for_update: query = self._model_query(context, model).filter( model.id == id).with_lockmode('update') resource = query.one() else: resource = self._get_by_id(context, model, id) except exc.NoResultFound: with excutils.save_and_reraise_exception(reraise=False) as ctx: if issubclass(model, (models.LoadBalancer, models.Listener, models.L7Policy, models.L7Rule, models.PoolV2, models.MemberV2, models.HealthMonitorV2, models.LoadBalancerStatistics, models.SessionPersistenceV2)): raise loadbalancerv2.EntityNotFound(name=model.NAME, id=id) ctx.reraise = True return resource def _resource_exists(self, context, model, id): try: self._get_by_id(context, model, id) except exc.NoResultFound: return False return True def _get_resources(self, context, model, filters=None): query = self._get_collection_query(context, model, filters=filters) return [model_instance for model_instance in query] def _create_port_for_load_balancer(self, context, lb_db, ip_address): # resolve subnet and create port subnet = self._core_plugin.get_subnet(context, lb_db.vip_subnet_id) fixed_ip = {'subnet_id': subnet['id']} if ip_address and ip_address != attributes.ATTR_NOT_SPECIFIED: fixed_ip['ip_address'] = ip_address port_data = { 'tenant_id': lb_db.tenant_id, 'name': 'loadbalancer-' + lb_db.id, 'network_id': subnet['network_id'], 'mac_address': attributes.ATTR_NOT_SPECIFIED, 'admin_state_up': False, 'device_id': lb_db.id, 'device_owner': n_constants.DEVICE_OWNER_LOADBALANCERV2, 'fixed_ips': [fixed_ip] } port = self._core_plugin.create_port(context, {'port': port_data}) lb_db.vip_port_id = port['id'] for fixed_ip in port['fixed_ips']: if fixed_ip['subnet_id'] == lb_db.vip_subnet_id: lb_db.vip_address = fixed_ip['ip_address'] break # explicitly sync session with db context.session.flush() def _create_loadbalancer_stats(self, context, loadbalancer_id, data=None): # This is internal method to add load balancer statistics. It won't # be exposed to API data = data or {} stats_db = models.LoadBalancerStatistics( loadbalancer_id=loadbalancer_id, bytes_in=data.get(lb_const.STATS_IN_BYTES, 0), bytes_out=data.get(lb_const.STATS_OUT_BYTES, 0), active_connections=data.get(lb_const.STATS_ACTIVE_CONNECTIONS, 0), total_connections=data.get(lb_const.STATS_TOTAL_CONNECTIONS, 0) ) return stats_db def _delete_loadbalancer_stats(self, context, loadbalancer_id): # This is internal method to delete pool statistics. It won't # be exposed to API with context.session.begin(subtransactions=True): stats_qry = context.session.query(models.LoadBalancerStatistics) try: stats = stats_qry.filter_by( loadbalancer_id=loadbalancer_id).one() except exc.NoResultFound: raise loadbalancerv2.EntityNotFound( name=models.LoadBalancerStatistics.NAME, id=loadbalancer_id) context.session.delete(stats) def _load_id(self, context, model_dict): model_dict['id'] = uuidutils.generate_uuid() def assert_modification_allowed(self, obj): status = getattr(obj, 'provisioning_status', None) if status in [constants.PENDING_DELETE, constants.PENDING_UPDATE, constants.PENDING_CREATE]: id = getattr(obj, 'id', None) raise loadbalancerv2.StateInvalid(id=id, state=status) def test_and_set_status(self, context, model, id, status): with context.session.begin(subtransactions=True): db_lb_child = None if model == models.LoadBalancer: db_lb = self._get_resource(context, model, id, for_update=True) else: db_lb_child = self._get_resource(context, model, id) db_lb = self._get_resource(context, models.LoadBalancer, db_lb_child.root_loadbalancer.id) # This method will raise an exception if modification is not # allowed. self.assert_modification_allowed(db_lb) # if the model passed in is not a load balancer then we will # set its root load balancer's provisioning status to # PENDING_UPDATE and the model's status to the status passed in # Otherwise we are just setting the load balancer's provisioning # status to the status passed in if db_lb_child: db_lb.provisioning_status = constants.PENDING_UPDATE db_lb_child.provisioning_status = status else: db_lb.provisioning_status = status def update_loadbalancer_provisioning_status(self, context, lb_id, status=constants.ACTIVE): self.update_status(context, models.LoadBalancer, lb_id, provisioning_status=status) def update_status(self, context, model, id, provisioning_status=None, operating_status=None): with context.session.begin(subtransactions=True): if issubclass(model, models.LoadBalancer): try: model_db = (self._model_query(context, model). filter(model.id == id). options(orm.noload('vip_port')). one()) except exc.NoResultFound: raise loadbalancerv2.EntityNotFound( name=models.LoadBalancer.NAME, id=id) else: model_db = self._get_resource(context, model, id) if provisioning_status and (model_db.provisioning_status != provisioning_status): model_db.provisioning_status = provisioning_status if (operating_status and hasattr(model_db, 'operating_status') and model_db.operating_status != operating_status): model_db.operating_status = operating_status def create_loadbalancer(self, context, loadbalancer, allocate_vip=True): with context.session.begin(subtransactions=True): self._load_id(context, loadbalancer) vip_address = loadbalancer.pop('vip_address') loadbalancer['provisioning_status'] = constants.PENDING_CREATE loadbalancer['operating_status'] = lb_const.OFFLINE lb_db = models.LoadBalancer(**loadbalancer) context.session.add(lb_db) context.session.flush() lb_db.stats = self._create_loadbalancer_stats( context, lb_db.id) context.session.add(lb_db) # create port outside of lb create transaction since it can sometimes # cause lock wait timeouts if allocate_vip: LOG.debug("Plugin will allocate the vip as a neutron port.") try: self._create_port_for_load_balancer(context, lb_db, vip_address) except Exception: with excutils.save_and_reraise_exception(): context.session.delete(lb_db) context.session.flush() return data_models.LoadBalancer.from_sqlalchemy_model(lb_db) def update_loadbalancer(self, context, id, loadbalancer): with context.session.begin(subtransactions=True): lb_db = self._get_resource(context, models.LoadBalancer, id) lb_db.update(loadbalancer) return data_models.LoadBalancer.from_sqlalchemy_model(lb_db) def delete_loadbalancer(self, context, id, delete_vip_port=True): with context.session.begin(subtransactions=True): lb_db = self._get_resource(context, models.LoadBalancer, id) context.session.delete(lb_db) if delete_vip_port and lb_db.vip_port: self._core_plugin.delete_port(context, lb_db.vip_port_id) def prevent_lbaasv2_port_deletion(self, context, port_id): try: port_db = self._core_plugin._get_port(context, port_id) except n_exc.PortNotFound: return if port_db['device_owner'] == n_constants.DEVICE_OWNER_LOADBALANCERV2: filters = {'vip_port_id': [port_id]} if len(self.get_loadbalancers(context, filters=filters)) > 0: reason = _('has device owner %s') % port_db['device_owner'] raise n_exc.ServicePortInUse(port_id=port_db['id'], reason=reason) def subscribe(self): registry.subscribe( _prevent_lbaasv2_port_delete_callback, resources.PORT, events.BEFORE_DELETE) def get_loadbalancers(self, context, filters=None): lb_dbs = self._get_resources(context, models.LoadBalancer, filters=filters) return [data_models.LoadBalancer.from_sqlalchemy_model(lb_db) for lb_db in lb_dbs] def get_loadbalancer(self, context, id): lb_db = self._get_resource(context, models.LoadBalancer, id) return data_models.LoadBalancer.from_sqlalchemy_model(lb_db) def _validate_listener_data(self, context, listener): pool_id = listener.get('default_pool_id') lb_id = listener.get('loadbalancer_id') if lb_id: if not self._resource_exists(context, models.LoadBalancer, lb_id): raise loadbalancerv2.EntityNotFound( name=models.LoadBalancer.NAME, id=lb_id) if pool_id: if not self._resource_exists(context, models.PoolV2, pool_id): raise loadbalancerv2.EntityNotFound( name=models.PoolV2.NAME, id=pool_id) pool = self._get_resource(context, models.PoolV2, pool_id) if ((pool.protocol, listener.get('protocol')) not in lb_const.LISTENER_POOL_COMPATIBLE_PROTOCOLS): raise loadbalancerv2.ListenerPoolProtocolMismatch( listener_proto=listener['protocol'], pool_proto=pool.protocol) if lb_id and pool_id: pool = self._get_resource(context, models.PoolV2, pool_id) if pool.loadbalancer_id != lb_id: raise sharedpools.ListenerPoolLoadbalancerMismatch( pool_id=pool_id, lb_id=pool.loadbalancer_id) def _validate_l7policy_data(self, context, l7policy): if l7policy['action'] == lb_const.L7_POLICY_ACTION_REDIRECT_TO_POOL: if not l7policy['redirect_pool_id']: raise l7.L7PolicyRedirectPoolIdMissing() if not self._resource_exists( context, models.PoolV2, l7policy['redirect_pool_id']): raise loadbalancerv2.EntityNotFound( name=models.PoolV2.NAME, id=l7policy['redirect_pool_id']) pool = self._get_resource( context, models.PoolV2, l7policy['redirect_pool_id']) listener = self._get_resource( context, models.Listener, l7policy['listener_id']) if pool.loadbalancer_id != listener.loadbalancer_id: raise sharedpools.ListenerAndPoolMustBeOnSameLoadbalancer() if (l7policy['action'] == lb_const.L7_POLICY_ACTION_REDIRECT_TO_URL and 'redirect_url' not in l7policy): raise l7.L7PolicyRedirectUrlMissing() def _validate_l7rule_data(self, context, rule): def _validate_regex(regex): try: re.compile(regex) except Exception as e: raise l7.L7RuleInvalidRegex(e=str(e)) def _validate_key(key): p = re.compile(lb_const.HTTP_HEADER_COOKIE_NAME_REGEX) if not p.match(key): raise l7.L7RuleInvalidKey() def _validate_cookie_value(value): p = re.compile(lb_const.HTTP_COOKIE_VALUE_REGEX) if not p.match(value): raise l7.L7RuleInvalidCookieValue() def _validate_non_cookie_value(value): p = re.compile(lb_const.HTTP_HEADER_VALUE_REGEX) q = re.compile(lb_const.HTTP_QUOTED_HEADER_VALUE_REGEX) if not p.match(value) and not q.match(value): raise l7.L7RuleInvalidHeaderValue() if rule['compare_type'] == lb_const.L7_RULE_COMPARE_TYPE_REGEX: _validate_regex(rule['value']) if rule['type'] in [lb_const.L7_RULE_TYPE_HEADER, lb_const.L7_RULE_TYPE_COOKIE]: if ('key' not in rule or not rule['key']): raise l7.L7RuleKeyMissing() _validate_key(rule['key']) if rule['compare_type'] != lb_const.L7_RULE_COMPARE_TYPE_REGEX: if rule['type'] == lb_const.L7_RULE_TYPE_COOKIE: _validate_cookie_value(rule['value']) else: if rule['type'] in [lb_const.L7_RULE_TYPE_HEADER, lb_const.L7_RULE_TYPE_HOST_NAME, lb_const.L7_RULE_TYPE_PATH]: _validate_non_cookie_value(rule['value']) elif (rule['compare_type'] == lb_const.L7_RULE_COMPARE_TYPE_EQUAL_TO): _validate_non_cookie_value(rule['value']) else: raise l7.L7RuleUnsupportedCompareType(type=rule['type']) def _convert_api_to_db(self, listener): # NOTE(blogan): Converting the values for db models for now to # limit the scope of this change if 'default_tls_container_ref' in listener: tls_cref = listener.get('default_tls_container_ref') del listener['default_tls_container_ref'] listener['default_tls_container_id'] = tls_cref if 'sni_container_refs' in listener: sni_crefs = listener.get('sni_container_refs') del listener['sni_container_refs'] listener['sni_container_ids'] = sni_crefs def create_listener(self, context, listener): self._convert_api_to_db(listener) try: with context.session.begin(subtransactions=True): self._load_id(context, listener) listener['provisioning_status'] = constants.PENDING_CREATE listener['operating_status'] = lb_const.OFFLINE # Check for unspecified loadbalancer_id and listener_id and # set to None for id in ['loadbalancer_id', 'default_pool_id']: if listener.get(id) == attributes.ATTR_NOT_SPECIFIED: listener[id] = None self._validate_listener_data(context, listener) sni_container_ids = [] if 'sni_container_ids' in listener: sni_container_ids = listener.pop('sni_container_ids') listener_db_entry = models.Listener(**listener) for container_id in sni_container_ids: sni = models.SNI(listener_id=listener_db_entry.id, tls_container_id=container_id) listener_db_entry.sni_containers.append(sni) context.session.add(listener_db_entry) except exception.DBDuplicateEntry: raise loadbalancerv2.LoadBalancerListenerProtocolPortExists( lb_id=listener['loadbalancer_id'], protocol_port=listener['protocol_port']) context.session.refresh(listener_db_entry.loadbalancer) return data_models.Listener.from_sqlalchemy_model(listener_db_entry) def update_listener(self, context, id, listener, tls_containers_changed=False): self._convert_api_to_db(listener) with context.session.begin(subtransactions=True): listener_db = self._get_resource(context, models.Listener, id) if not listener.get('protocol'): # User did not intend to change the protocol so we will just # use the same protocol already stored so the validation knows listener['protocol'] = listener_db.protocol self._validate_listener_data(context, listener) if tls_containers_changed: listener_db.sni_containers = [] for container_id in listener['sni_container_ids']: sni = models.SNI(listener_id=id, tls_container_id=container_id) listener_db.sni_containers.append(sni) listener_db.update(listener) context.session.refresh(listener_db) return data_models.Listener.from_sqlalchemy_model(listener_db) def delete_listener(self, context, id): listener_db_entry = self._get_resource(context, models.Listener, id) with context.session.begin(subtransactions=True): context.session.delete(listener_db_entry) def get_listeners(self, context, filters=None): listener_dbs = self._get_resources(context, models.Listener, filters=filters) return [data_models.Listener.from_sqlalchemy_model(listener_db) for listener_db in listener_dbs] def get_listener(self, context, id): listener_db = self._get_resource(context, models.Listener, id) return data_models.Listener.from_sqlalchemy_model(listener_db) def _create_session_persistence_db(self, session_info, pool_id): session_info['pool_id'] = pool_id return models.SessionPersistenceV2(**session_info) def _update_pool_session_persistence(self, context, pool_id, info): # removing these keys as it is possible that they are passed in and # their existence will cause issues bc they are not acceptable as # dictionary values info.pop('pool', None) info.pop('pool_id', None) pool = self._get_resource(context, models.PoolV2, pool_id) with context.session.begin(subtransactions=True): # Update sessionPersistence table sess_qry = context.session.query(models.SessionPersistenceV2) sesspersist_db = sess_qry.filter_by(pool_id=pool_id).first() # Insert a None cookie_info if it is not present to overwrite an # existing value in the database. if 'cookie_name' not in info: info['cookie_name'] = None if sesspersist_db: sesspersist_db.update(info) else: info['pool_id'] = pool_id sesspersist_db = models.SessionPersistenceV2(**info) context.session.add(sesspersist_db) # Update pool table pool.session_persistence = sesspersist_db context.session.add(pool) def _delete_session_persistence(self, context, pool_id): with context.session.begin(subtransactions=True): sess_qry = context.session.query(models.SessionPersistenceV2) sess_qry.filter_by(pool_id=pool_id).delete() def create_pool(self, context, pool): with context.session.begin(subtransactions=True): self._load_id(context, pool) pool['provisioning_status'] = constants.PENDING_CREATE pool['operating_status'] = lb_const.OFFLINE session_info = pool.pop('session_persistence') pool_db = models.PoolV2(**pool) if session_info: s_p = self._create_session_persistence_db(session_info, pool_db.id) pool_db.session_persistence = s_p context.session.add(pool_db) return data_models.Pool.from_sqlalchemy_model(pool_db) def update_pool(self, context, id, pool): with context.session.begin(subtransactions=True): pool_db = self._get_resource(context, models.PoolV2, id) hm_id = pool.get('healthmonitor_id') if hm_id: if not self._resource_exists(context, models.HealthMonitorV2, hm_id): raise loadbalancerv2.EntityNotFound( name=models.HealthMonitorV2.NAME, id=hm_id) filters = {'healthmonitor_id': [hm_id]} hmpools = self._get_resources(context, models.PoolV2, filters=filters) if hmpools: raise loadbalancerv2.EntityInUse( entity_using=models.PoolV2.NAME, id=hmpools[0].id, entity_in_use=models.HealthMonitorV2.NAME) # Only update or delete session persistence if it was part # of the API request. if 'session_persistence' in pool.keys(): sp = pool.pop('session_persistence') if sp is None or sp == {}: self._delete_session_persistence(context, id) else: self._update_pool_session_persistence(context, id, sp) # sqlalchemy cries if listeners is defined. listeners = pool.get('listeners') if listeners: del pool['listeners'] pool_db.update(pool) context.session.refresh(pool_db) return data_models.Pool.from_sqlalchemy_model(pool_db) def delete_pool(self, context, id): with context.session.begin(subtransactions=True): pool_db = self._get_resource(context, models.PoolV2, id) for l in pool_db.listeners: self.update_listener(context, l.id, {'default_pool_id': None}) for l in pool_db.loadbalancer.listeners: for p in l.l7_policies: if (p.action == lb_const.L7_POLICY_ACTION_REDIRECT_TO_POOL and p.redirect_pool_id == id): self.update_l7policy( context, p.id, {'redirect_pool_id': None, 'action': lb_const.L7_POLICY_ACTION_REJECT}) context.session.delete(pool_db) def get_pools(self, context, filters=None): pool_dbs = self._get_resources(context, models.PoolV2, filters=filters) return [data_models.Pool.from_sqlalchemy_model(pool_db) for pool_db in pool_dbs] def get_pool(self, context, id): pool_db = self._get_resource(context, models.PoolV2, id) return data_models.Pool.from_sqlalchemy_model(pool_db) def create_pool_member(self, context, member, pool_id): try: with context.session.begin(subtransactions=True): self._load_id(context, member) member['pool_id'] = pool_id member['provisioning_status'] = constants.PENDING_CREATE member['operating_status'] = lb_const.OFFLINE member_db = models.MemberV2(**member) context.session.add(member_db) except exception.DBDuplicateEntry: raise loadbalancerv2.MemberExists(address=member['address'], port=member['protocol_port'], pool=pool_id) context.session.refresh(member_db.pool) return data_models.Member.from_sqlalchemy_model(member_db) def update_pool_member(self, context, id, member): with context.session.begin(subtransactions=True): member_db = self._get_resource(context, models.MemberV2, id) member_db.update(member) context.session.refresh(member_db) return data_models.Member.from_sqlalchemy_model(member_db) def delete_pool_member(self, context, id): with context.session.begin(subtransactions=True): member_db = self._get_resource(context, models.MemberV2, id) context.session.delete(member_db) def get_pool_members(self, context, filters=None): filters = filters or {} member_dbs = self._get_resources(context, models.MemberV2, filters=filters) return [data_models.Member.from_sqlalchemy_model(member_db) for member_db in member_dbs] def get_pool_member(self, context, id): member_db = self._get_resource(context, models.MemberV2, id) return data_models.Member.from_sqlalchemy_model(member_db) def delete_member(self, context, id): with context.session.begin(subtransactions=True): member_db = self._get_resource(context, models.MemberV2, id) context.session.delete(member_db) def create_healthmonitor_on_pool(self, context, pool_id, healthmonitor): with context.session.begin(subtransactions=True): hm_db = self.create_healthmonitor(context, healthmonitor) pool = self.get_pool(context, pool_id) # do not want listener, members, healthmonitor or loadbalancer # in dict pool_dict = pool.to_dict(listeners=False, members=False, healthmonitor=False, loadbalancer=False, listener=False, loadbalancer_id=False) pool_dict['healthmonitor_id'] = hm_db.id self.update_pool(context, pool_id, pool_dict) hm_db = self._get_resource(context, models.HealthMonitorV2, hm_db.id) return data_models.HealthMonitor.from_sqlalchemy_model(hm_db) def create_healthmonitor(self, context, healthmonitor): with context.session.begin(subtransactions=True): self._load_id(context, healthmonitor) healthmonitor['provisioning_status'] = constants.PENDING_CREATE hm_db_entry = models.HealthMonitorV2(**healthmonitor) context.session.add(hm_db_entry) return data_models.HealthMonitor.from_sqlalchemy_model(hm_db_entry) def update_healthmonitor(self, context, id, healthmonitor): with context.session.begin(subtransactions=True): hm_db = self._get_resource(context, models.HealthMonitorV2, id) hm_db.update(healthmonitor) context.session.refresh(hm_db) return data_models.HealthMonitor.from_sqlalchemy_model(hm_db) def delete_healthmonitor(self, context, id): with context.session.begin(subtransactions=True): hm_db_entry = self._get_resource(context, models.HealthMonitorV2, id) # TODO(sbalukoff): Clear out pool.healthmonitor_ids referencing # old healthmonitor ID. context.session.delete(hm_db_entry) def get_healthmonitor(self, context, id): hm_db = self._get_resource(context, models.HealthMonitorV2, id) return data_models.HealthMonitor.from_sqlalchemy_model(hm_db) def get_healthmonitors(self, context, filters=None): filters = filters or {} hm_dbs = self._get_resources(context, models.HealthMonitorV2, filters=filters) return [data_models.HealthMonitor.from_sqlalchemy_model(hm_db) for hm_db in hm_dbs] def update_loadbalancer_stats(self, context, loadbalancer_id, stats_data): stats_data = stats_data or {} with context.session.begin(subtransactions=True): lb_db = self._get_resource(context, models.LoadBalancer, loadbalancer_id) lb_db.stats = self._create_loadbalancer_stats(context, loadbalancer_id, data=stats_data) def stats(self, context, loadbalancer_id): loadbalancer = self._get_resource(context, models.LoadBalancer, loadbalancer_id) return data_models.LoadBalancerStatistics.from_sqlalchemy_model( loadbalancer.stats) def create_l7policy(self, context, l7policy): if l7policy['redirect_pool_id'] == attributes.ATTR_NOT_SPECIFIED: l7policy['redirect_pool_id'] = None self._validate_l7policy_data(context, l7policy) with context.session.begin(subtransactions=True): listener_id = l7policy.get('listener_id') listener_db = self._get_resource( context, models.Listener, listener_id) if not listener_db: raise loadbalancerv2.EntityNotFound( name=models.Listener.NAME, id=listener_id) self._load_id(context, l7policy) l7policy['provisioning_status'] = constants.PENDING_CREATE l7policy_db = models.L7Policy(**l7policy) # MySQL int fields are by default 32-bit whereas handy system # constants like sys.maxsize are 64-bit on most platforms today. # Hence the reason this is 2147483647 (2^31 - 1) instead of an # elsewhere-defined constant. if l7policy['position'] == 2147483647: listener_db.l7_policies.append(l7policy_db) else: listener_db.l7_policies.insert(l7policy['position'] - 1, l7policy_db) listener_db.l7_policies.reorder() return data_models.L7Policy.from_sqlalchemy_model(l7policy_db) def update_l7policy(self, context, id, l7policy): with context.session.begin(subtransactions=True): l7policy_db = self._get_resource(context, models.L7Policy, id) if 'action' in l7policy: l7policy['listener_id'] = l7policy_db.listener_id self._validate_l7policy_data(context, l7policy) if ('position' not in l7policy or l7policy['position'] == 2147483647 or l7policy_db.position == l7policy['position']): l7policy_db.update(l7policy) else: listener_id = l7policy_db.listener_id listener_db = self._get_resource( context, models.Listener, listener_id) l7policy_db = listener_db.l7_policies.pop( l7policy_db.position - 1) l7policy_db.update(l7policy) listener_db.l7_policies.insert(l7policy['position'] - 1, l7policy_db) listener_db.l7_policies.reorder() context.session.refresh(l7policy_db) return data_models.L7Policy.from_sqlalchemy_model(l7policy_db) def delete_l7policy(self, context, id): with context.session.begin(subtransactions=True): l7policy_db = self._get_resource(context, models.L7Policy, id) listener_id = l7policy_db.listener_id listener_db = self._get_resource( context, models.Listener, listener_id) listener_db.l7_policies.remove(l7policy_db) def get_l7policy(self, context, id): l7policy_db = self._get_resource(context, models.L7Policy, id) return data_models.L7Policy.from_sqlalchemy_model(l7policy_db) def get_l7policies(self, context, filters=None): l7policy_dbs = self._get_resources(context, models.L7Policy, filters=filters) return [data_models.L7Policy.from_sqlalchemy_model(l7policy_db) for l7policy_db in l7policy_dbs] def create_l7policy_rule(self, context, rule, l7policy_id): with context.session.begin(subtransactions=True): if not self._resource_exists(context, models.L7Policy, l7policy_id): raise loadbalancerv2.EntityNotFound( name=models.L7Policy.NAME, id=l7policy_id) self._validate_l7rule_data(context, rule) self._load_id(context, rule) rule['l7policy_id'] = l7policy_id rule['provisioning_status'] = constants.PENDING_CREATE rule_db = models.L7Rule(**rule) context.session.add(rule_db) return data_models.L7Rule.from_sqlalchemy_model(rule_db) def update_l7policy_rule(self, context, id, rule, l7policy_id): with context.session.begin(subtransactions=True): if not self._resource_exists(context, models.L7Policy, l7policy_id): raise l7.RuleNotFoundForL7Policy( l7policy_id=l7policy_id, rule_id=id) rule_db = self._get_resource(context, models.L7Rule, id) # If user did not intend to change all parameters, # already stored parameters will be used for validations if not rule.get('type'): rule['type'] = rule_db.type if not rule.get('value'): rule['value'] = rule_db.value if not rule.get('compare_type'): rule['compare_type'] = rule_db.compare_type self._validate_l7rule_data(context, rule) rule_db = self._get_resource(context, models.L7Rule, id) rule_db.update(rule) context.session.refresh(rule_db) return data_models.L7Rule.from_sqlalchemy_model(rule_db) def delete_l7policy_rule(self, context, id): with context.session.begin(subtransactions=True): rule_db_entry = self._get_resource(context, models.L7Rule, id) context.session.delete(rule_db_entry) def get_l7policy_rule(self, context, id, l7policy_id): rule_db = self._get_resource(context, models.L7Rule, id) if rule_db.l7policy_id != l7policy_id: raise l7.RuleNotFoundForL7Policy( l7policy_id=l7policy_id, rule_id=id) return data_models.L7Rule.from_sqlalchemy_model(rule_db) def get_l7policy_rules(self, context, l7policy_id, filters=None): if filters: filters.update(filters) else: filters = {'l7policy_id': [l7policy_id]} rule_dbs = self._get_resources(context, models.L7Rule, filters=filters) return [data_models.L7Rule.from_sqlalchemy_model(rule_db) for rule_db in rule_dbs] def _prevent_lbaasv2_port_delete_callback(resource, event, trigger, **kwargs): context = kwargs['context'] port_id = kwargs['port_id'] port_check = kwargs['port_check'] lbaasv2plugin = manager.NeutronManager.get_service_plugins().get( constants.LOADBALANCERV2) if lbaasv2plugin and port_check: lbaasv2plugin.db.prevent_lbaasv2_port_deletion(context, port_id) neutron-lbaas-8.0.0/neutron_lbaas/db/loadbalancer/__init__.py0000664000567000056710000000000012701407726025367 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas/db/loadbalancer/loadbalancer_db.py0000664000567000056710000011065312701407726026724 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron.api.v2 import attributes from neutron.callbacks import events from neutron.callbacks import registry from neutron.callbacks import resources from neutron.db import common_db_mixin as base_db from neutron.db import model_base from neutron.db import models_v2 from neutron.db import servicetype_db as st_db from neutron import manager from neutron.plugins.common import constants from neutron_lib import constants as n_constants from neutron_lib import exceptions as n_exc from oslo_db import exception from oslo_utils import excutils from oslo_utils import uuidutils import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy.orm import exc from sqlalchemy.orm import validates from neutron_lbaas._i18n import _, _LE from neutron_lbaas.extensions import loadbalancer from neutron_lbaas.services.loadbalancer import constants as lb_const class SessionPersistence(model_base.BASEV2): vip_id = sa.Column(sa.String(36), sa.ForeignKey("vips.id"), primary_key=True) type = sa.Column(sa.Enum("SOURCE_IP", "HTTP_COOKIE", "APP_COOKIE", name="sesssionpersistences_type"), nullable=False) cookie_name = sa.Column(sa.String(1024)) class PoolStatistics(model_base.BASEV2): """Represents pool statistics.""" pool_id = sa.Column(sa.String(36), sa.ForeignKey("pools.id"), primary_key=True) bytes_in = sa.Column(sa.BigInteger, nullable=False) bytes_out = sa.Column(sa.BigInteger, nullable=False) active_connections = sa.Column(sa.BigInteger, nullable=False) total_connections = sa.Column(sa.BigInteger, nullable=False) @validates('bytes_in', 'bytes_out', 'active_connections', 'total_connections') def validate_non_negative_int(self, key, value): if value < 0: data = {'key': key, 'value': value} raise ValueError(_('The %(key)s field can not have ' 'negative value. ' 'Current value is %(value)d.') % data) return value class Vip(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant, models_v2.HasStatusDescription): """Represents a v2 neutron loadbalancer vip.""" name = sa.Column(sa.String(255)) description = sa.Column(sa.String(255)) port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id')) protocol_port = sa.Column(sa.Integer, nullable=False) protocol = sa.Column(sa.Enum("HTTP", "HTTPS", "TCP", name="lb_protocols"), nullable=False) pool_id = sa.Column(sa.String(36), nullable=False, unique=True) session_persistence = orm.relationship(SessionPersistence, uselist=False, backref="vips", cascade="all, delete-orphan") admin_state_up = sa.Column(sa.Boolean(), nullable=False) connection_limit = sa.Column(sa.Integer) port = orm.relationship(models_v2.Port) class Member(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant, models_v2.HasStatusDescription): """Represents a v2 neutron loadbalancer member.""" __table_args__ = ( sa.schema.UniqueConstraint('pool_id', 'address', 'protocol_port', name='uniq_member0pool_id0address0port'), ) pool_id = sa.Column(sa.String(36), sa.ForeignKey("pools.id"), nullable=False) address = sa.Column(sa.String(64), nullable=False) protocol_port = sa.Column(sa.Integer, nullable=False) weight = sa.Column(sa.Integer, nullable=False) admin_state_up = sa.Column(sa.Boolean(), nullable=False) class Pool(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant, models_v2.HasStatusDescription): """Represents a v2 neutron loadbalancer pool.""" vip_id = sa.Column(sa.String(36), sa.ForeignKey("vips.id")) name = sa.Column(sa.String(255)) description = sa.Column(sa.String(255)) subnet_id = sa.Column(sa.String(36), nullable=False) protocol = sa.Column(sa.Enum("HTTP", "HTTPS", "TCP", name="lb_protocols"), nullable=False) lb_method = sa.Column(sa.Enum("ROUND_ROBIN", "LEAST_CONNECTIONS", "SOURCE_IP", name="pools_lb_method"), nullable=False) admin_state_up = sa.Column(sa.Boolean(), nullable=False) stats = orm.relationship(PoolStatistics, uselist=False, backref="pools", cascade="all, delete-orphan") members = orm.relationship(Member, backref="pools", cascade="all, delete-orphan") monitors = orm.relationship("PoolMonitorAssociation", backref="pools", cascade="all, delete-orphan") vip = orm.relationship(Vip, backref='pool') provider = orm.relationship( st_db.ProviderResourceAssociation, uselist=False, lazy="joined", primaryjoin="Pool.id==ProviderResourceAssociation.resource_id", foreign_keys=[st_db.ProviderResourceAssociation.resource_id] ) class HealthMonitor(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): """Represents a v2 neutron loadbalancer healthmonitor.""" type = sa.Column(sa.Enum("PING", "TCP", "HTTP", "HTTPS", name="healthmontiors_type"), nullable=False) delay = sa.Column(sa.Integer, nullable=False) timeout = sa.Column(sa.Integer, nullable=False) max_retries = sa.Column(sa.Integer, nullable=False) http_method = sa.Column(sa.String(16)) url_path = sa.Column(sa.String(255)) expected_codes = sa.Column(sa.String(64)) admin_state_up = sa.Column(sa.Boolean(), nullable=False) pools = orm.relationship( "PoolMonitorAssociation", backref="healthmonitor", cascade="all", lazy="joined" ) class PoolMonitorAssociation(model_base.BASEV2, models_v2.HasStatusDescription): """Many-to-many association between pool and healthMonitor classes.""" pool_id = sa.Column(sa.String(36), sa.ForeignKey("pools.id"), primary_key=True) monitor_id = sa.Column(sa.String(36), sa.ForeignKey("healthmonitors.id"), primary_key=True) class LoadBalancerPluginDb(loadbalancer.LoadBalancerPluginBase, base_db.CommonDbMixin): """Wraps loadbalancer with SQLAlchemy models. A class that wraps the implementation of the Neutron loadbalancer plugin database access interface using SQLAlchemy models. """ @property def _core_plugin(self): return manager.NeutronManager.get_plugin() def update_status(self, context, model, id, status, status_description=None): with context.session.begin(subtransactions=True): if issubclass(model, Vip): try: v_db = (self._model_query(context, model). filter(model.id == id). options(orm.noload('port')). one()) except exc.NoResultFound: raise loadbalancer.VipNotFound(vip_id=id) else: v_db = self._get_resource(context, model, id) if v_db.status != status: v_db.status = status # update status_description in two cases: # - new value is passed # - old value is not None (needs to be updated anyway) if status_description or v_db['status_description']: v_db.status_description = status_description def _get_resource(self, context, model, id): try: r = self._get_by_id(context, model, id) except exc.NoResultFound: with excutils.save_and_reraise_exception(reraise=False) as ctx: if issubclass(model, Vip): raise loadbalancer.VipNotFound(vip_id=id) elif issubclass(model, Pool): raise loadbalancer.PoolNotFound(pool_id=id) elif issubclass(model, Member): raise loadbalancer.MemberNotFound(member_id=id) elif issubclass(model, HealthMonitor): raise loadbalancer.HealthMonitorNotFound(monitor_id=id) ctx.reraise = True return r def assert_modification_allowed(self, obj): status = getattr(obj, 'status', None) if status == constants.PENDING_DELETE: id = getattr(obj, 'id', None) raise loadbalancer.StateInvalid(id=id, state=status) ######################################################## # VIP DB access def _make_vip_dict(self, vip, fields=None): fixed_ip = {} # it's possible that vip doesn't have created port yet if vip.port: fixed_ip = (vip.port.fixed_ips or [{}])[0] res = {'id': vip['id'], 'tenant_id': vip['tenant_id'], 'name': vip['name'], 'description': vip['description'], 'subnet_id': fixed_ip.get('subnet_id'), 'address': fixed_ip.get('ip_address'), 'port_id': vip['port_id'], 'protocol_port': vip['protocol_port'], 'protocol': vip['protocol'], 'pool_id': vip['pool_id'], 'session_persistence': None, 'connection_limit': vip['connection_limit'], 'admin_state_up': vip['admin_state_up'], 'status': vip['status'], 'status_description': vip['status_description']} if vip['session_persistence']: s_p = { 'type': vip['session_persistence']['type'] } if vip['session_persistence']['type'] == 'APP_COOKIE': s_p['cookie_name'] = vip['session_persistence']['cookie_name'] res['session_persistence'] = s_p return self._fields(res, fields) def _check_session_persistence_info(self, info): """Performs sanity check on session persistence info. :param info: Session persistence info """ if info['type'] == 'APP_COOKIE': if not info.get('cookie_name'): raise ValueError(_("'cookie_name' should be specified for this" " type of session persistence.")) else: if 'cookie_name' in info: raise ValueError(_("'cookie_name' is not allowed for this type" " of session persistence")) def _create_session_persistence_db(self, session_info, vip_id): self._check_session_persistence_info(session_info) sesspersist_db = SessionPersistence( type=session_info['type'], cookie_name=session_info.get('cookie_name'), vip_id=vip_id) return sesspersist_db def _update_vip_session_persistence(self, context, vip_id, info): self._check_session_persistence_info(info) vip = self._get_resource(context, Vip, vip_id) with context.session.begin(subtransactions=True): # Update sessionPersistence table sess_qry = context.session.query(SessionPersistence) sesspersist_db = sess_qry.filter_by(vip_id=vip_id).first() # Insert a None cookie_info if it is not present to overwrite an # an existing value in the database. if 'cookie_name' not in info: info['cookie_name'] = None if sesspersist_db: sesspersist_db.update(info) else: sesspersist_db = SessionPersistence( type=info['type'], cookie_name=info['cookie_name'], vip_id=vip_id) context.session.add(sesspersist_db) # Update vip table vip.session_persistence = sesspersist_db context.session.add(vip) def _delete_session_persistence(self, context, vip_id): with context.session.begin(subtransactions=True): sess_qry = context.session.query(SessionPersistence) sess_qry.filter_by(vip_id=vip_id).delete() def _create_port_for_vip(self, context, vip_db, subnet_id, ip_address): # resolve subnet and create port subnet = self._core_plugin.get_subnet(context, subnet_id) fixed_ip = {'subnet_id': subnet['id']} if ip_address and ip_address != attributes.ATTR_NOT_SPECIFIED: fixed_ip['ip_address'] = ip_address if subnet.get('gateway_ip') == ip_address: raise n_exc.IpAddressInUse(net_id=subnet['network_id'], ip_address=ip_address) port_data = { 'tenant_id': vip_db.tenant_id, 'name': 'vip-' + vip_db.id, 'network_id': subnet['network_id'], 'mac_address': attributes.ATTR_NOT_SPECIFIED, 'admin_state_up': False, 'device_id': '', 'device_owner': n_constants.DEVICE_OWNER_LOADBALANCER, 'fixed_ips': [fixed_ip] } port = self._core_plugin.create_port(context, {'port': port_data}) vip_db.port_id = port['id'] # explicitly sync session with db context.session.flush() def create_vip(self, context, vip): v = vip['vip'] tenant_id = v['tenant_id'] with context.session.begin(subtransactions=True): if v['pool_id']: pool = self._get_resource(context, Pool, v['pool_id']) # validate that the pool has same tenant if pool['tenant_id'] != tenant_id: raise n_exc.NotAuthorized() # validate that the pool has same protocol if pool['protocol'] != v['protocol']: raise loadbalancer.ProtocolMismatch( vip_proto=v['protocol'], pool_proto=pool['protocol']) if pool['status'] == constants.PENDING_DELETE: raise loadbalancer.StateInvalid(state=pool['status'], id=pool['id']) vip_db = Vip(id=uuidutils.generate_uuid(), tenant_id=tenant_id, name=v['name'], description=v['description'], port_id=None, protocol_port=v['protocol_port'], protocol=v['protocol'], pool_id=v['pool_id'], connection_limit=v['connection_limit'], admin_state_up=v['admin_state_up'], status=constants.PENDING_CREATE) session_info = v['session_persistence'] if session_info: s_p = self._create_session_persistence_db( session_info, vip_db['id']) vip_db.session_persistence = s_p try: context.session.add(vip_db) context.session.flush() except exception.DBDuplicateEntry: raise loadbalancer.VipExists(pool_id=v['pool_id']) try: # create a port to reserve address for IPAM # do it outside the transaction to avoid rpc calls self._create_port_for_vip( context, vip_db, v['subnet_id'], v.get('address')) except Exception: # catch any kind of exceptions with excutils.save_and_reraise_exception(): context.session.delete(vip_db) context.session.flush() if v['pool_id']: # fetching pool again pool = self._get_resource(context, Pool, v['pool_id']) # (NOTE): we rely on the fact that pool didn't change between # above block and here vip_db['pool_id'] = v['pool_id'] pool['vip_id'] = vip_db['id'] # explicitly flush changes as we're outside any transaction context.session.flush() return self._make_vip_dict(vip_db) def update_vip(self, context, id, vip): v = vip['vip'] sess_persist = v.pop('session_persistence', None) with context.session.begin(subtransactions=True): vip_db = self._get_resource(context, Vip, id) self.assert_modification_allowed(vip_db) if sess_persist: self._update_vip_session_persistence(context, id, sess_persist) else: self._delete_session_persistence(context, id) if v: try: # in case new pool already has a vip # update will raise integrity error at first query old_pool_id = vip_db['pool_id'] vip_db.update(v) # If the pool_id is changed, we need to update # the associated pools if 'pool_id' in v: new_pool = self._get_resource(context, Pool, v['pool_id']) self.assert_modification_allowed(new_pool) # check that the pool matches the tenant_id if new_pool['tenant_id'] != vip_db['tenant_id']: raise n_exc.NotAuthorized() # validate that the pool has same protocol if new_pool['protocol'] != vip_db['protocol']: raise loadbalancer.ProtocolMismatch( vip_proto=vip_db['protocol'], pool_proto=new_pool['protocol']) if new_pool['status'] == constants.PENDING_DELETE: raise loadbalancer.StateInvalid( state=new_pool['status'], id=new_pool['id']) if old_pool_id: old_pool = self._get_resource( context, Pool, old_pool_id ) old_pool['vip_id'] = None new_pool['vip_id'] = vip_db['id'] except exception.DBDuplicateEntry: raise loadbalancer.VipExists(pool_id=v['pool_id']) return self._make_vip_dict(vip_db) def delete_vip(self, context, id): with context.session.begin(subtransactions=True): vip = self._get_resource(context, Vip, id) qry = context.session.query(Pool) for pool in qry.filter_by(vip_id=id): pool.update({"vip_id": None}) context.session.delete(vip) if vip.port: # this is a Neutron port self._core_plugin.delete_port(context, vip.port.id) def prevent_lbaas_port_deletion(self, context, port_id): try: port_db = self._core_plugin._get_port(context, port_id) except n_exc.PortNotFound: return # Check only if the owner is loadbalancer. if port_db['device_owner'] == n_constants.DEVICE_OWNER_LOADBALANCER: filters = {'port_id': [port_id]} if len(self.get_vips(context, filters=filters)) > 0: reason = _('has device owner %s') % port_db['device_owner'] raise n_exc.ServicePortInUse(port_id=port_db['id'], reason=reason) def subscribe(self): registry.subscribe( _prevent_lbaas_port_delete_callback, resources.PORT, events.BEFORE_DELETE) def get_vip(self, context, id, fields=None): vip = self._get_resource(context, Vip, id) return self._make_vip_dict(vip, fields) def get_vips(self, context, filters=None, fields=None): return self._get_collection(context, Vip, self._make_vip_dict, filters=filters, fields=fields) ######################################################## # Pool DB access def _make_pool_dict(self, pool, fields=None): res = {'id': pool['id'], 'tenant_id': pool['tenant_id'], 'name': pool['name'], 'description': pool['description'], 'subnet_id': pool['subnet_id'], 'protocol': pool['protocol'], 'vip_id': pool['vip_id'], 'lb_method': pool['lb_method'], 'admin_state_up': pool['admin_state_up'], 'status': pool['status'], 'status_description': pool['status_description'], 'provider': '' } if pool.provider: res['provider'] = pool.provider.provider_name # Get the associated members res['members'] = [member['id'] for member in pool['members']] # Get the associated health_monitors res['health_monitors'] = [ monitor['monitor_id'] for monitor in pool['monitors']] res['health_monitors_status'] = [ {'monitor_id': monitor['monitor_id'], 'status': monitor['status'], 'status_description': monitor['status_description']} for monitor in pool['monitors']] return self._fields(res, fields) def update_pool_stats(self, context, pool_id, data=None): """Update a pool with new stats structure.""" data = data or {} with context.session.begin(subtransactions=True): pool_db = self._get_resource(context, Pool, pool_id) self.assert_modification_allowed(pool_db) pool_db.stats = self._create_pool_stats(context, pool_id, data) for member, stats in data.get('members', {}).items(): stats_status = stats.get(lb_const.STATS_STATUS) if stats_status: self.update_status(context, Member, member, stats_status) def _create_pool_stats(self, context, pool_id, data=None): # This is internal method to add pool statistics. It won't # be exposed to API if not data: data = {} stats_db = PoolStatistics( pool_id=pool_id, bytes_in=data.get(lb_const.STATS_IN_BYTES, 0), bytes_out=data.get(lb_const.STATS_OUT_BYTES, 0), active_connections=data.get(lb_const.STATS_ACTIVE_CONNECTIONS, 0), total_connections=data.get(lb_const.STATS_TOTAL_CONNECTIONS, 0) ) return stats_db def _delete_pool_stats(self, context, pool_id): # This is internal method to delete pool statistics. It won't # be exposed to API with context.session.begin(subtransactions=True): stats_qry = context.session.query(PoolStatistics) try: stats = stats_qry.filter_by(pool_id=pool_id).one() except exc.NoResultFound: raise loadbalancer.PoolStatsNotFound(pool_id=pool_id) context.session.delete(stats) def create_pool(self, context, pool): v = pool['pool'] with context.session.begin(subtransactions=True): pool_db = Pool(id=uuidutils.generate_uuid(), tenant_id=v['tenant_id'], name=v['name'], description=v['description'], subnet_id=v['subnet_id'], protocol=v['protocol'], lb_method=v['lb_method'], admin_state_up=v['admin_state_up'], status=constants.PENDING_CREATE) pool_db.stats = self._create_pool_stats(context, pool_db['id']) context.session.add(pool_db) return self._make_pool_dict(pool_db) def update_pool(self, context, id, pool): p = pool['pool'] with context.session.begin(subtransactions=True): pool_db = self._get_resource(context, Pool, id) self.assert_modification_allowed(pool_db) if p: pool_db.update(p) return self._make_pool_dict(pool_db) def _ensure_pool_delete_conditions(self, context, pool_id): if context.session.query(Vip).filter_by(pool_id=pool_id).first(): raise loadbalancer.PoolInUse(pool_id=pool_id) def delete_pool(self, context, pool_id): # Check if the pool is in use self._ensure_pool_delete_conditions(context, pool_id) with context.session.begin(subtransactions=True): self._delete_pool_stats(context, pool_id) pool_db = self._get_resource(context, Pool, pool_id) context.session.delete(pool_db) def get_pool(self, context, id, fields=None): pool = self._get_resource(context, Pool, id) return self._make_pool_dict(pool, fields) def get_pools(self, context, filters=None, fields=None): collection = self._model_query(context, Pool) collection = self._apply_filters_to_query(collection, Pool, filters) return [self._make_pool_dict(c, fields) for c in collection] def stats(self, context, pool_id): with context.session.begin(subtransactions=True): pool = self._get_resource(context, Pool, pool_id) stats = pool['stats'] res = {lb_const.STATS_IN_BYTES: stats['bytes_in'], lb_const.STATS_OUT_BYTES: stats['bytes_out'], lb_const.STATS_ACTIVE_CONNECTIONS: stats['active_connections'], lb_const.STATS_TOTAL_CONNECTIONS: stats['total_connections']} return {'stats': res} def create_pool_health_monitor(self, context, health_monitor, pool_id): monitor_id = health_monitor['health_monitor']['id'] with context.session.begin(subtransactions=True): # To make sure health_monitor exist. self._get_resource(context, HealthMonitor, monitor_id) assoc_qry = context.session.query(PoolMonitorAssociation) assoc = assoc_qry.filter_by(pool_id=pool_id, monitor_id=monitor_id).first() if assoc: raise loadbalancer.PoolMonitorAssociationExists( monitor_id=monitor_id, pool_id=pool_id) pool = self._get_resource(context, Pool, pool_id) assoc = PoolMonitorAssociation(pool_id=pool_id, monitor_id=monitor_id, status=constants.PENDING_CREATE) pool.monitors.append(assoc) monitors = [monitor['monitor_id'] for monitor in pool['monitors']] res = {"health_monitor": monitors} return res def delete_pool_health_monitor(self, context, id, pool_id): with context.session.begin(subtransactions=True): assoc = self._get_pool_health_monitor(context, id, pool_id) pool = self._get_resource(context, Pool, pool_id) pool.monitors.remove(assoc) def _get_pool_health_monitor(self, context, id, pool_id): try: assoc_qry = context.session.query(PoolMonitorAssociation) return assoc_qry.filter_by(monitor_id=id, pool_id=pool_id).one() except exc.NoResultFound: raise loadbalancer.PoolMonitorAssociationNotFound( monitor_id=id, pool_id=pool_id) def get_pool_health_monitor(self, context, id, pool_id, fields=None): pool_hm = self._get_pool_health_monitor(context, id, pool_id) # need to add tenant_id for admin_or_owner policy check to pass hm = self.get_health_monitor(context, id) res = {'pool_id': pool_id, 'monitor_id': id, 'status': pool_hm['status'], 'status_description': pool_hm['status_description'], 'tenant_id': hm['tenant_id']} return self._fields(res, fields) def update_pool_health_monitor(self, context, id, pool_id, status, status_description=None): with context.session.begin(subtransactions=True): assoc = self._get_pool_health_monitor(context, id, pool_id) self.assert_modification_allowed(assoc) assoc.status = status assoc.status_description = status_description ######################################################## # Member DB access def _make_member_dict(self, member, fields=None): res = {'id': member['id'], 'tenant_id': member['tenant_id'], 'pool_id': member['pool_id'], 'address': member['address'], 'protocol_port': member['protocol_port'], 'weight': member['weight'], 'admin_state_up': member['admin_state_up'], 'status': member['status'], 'status_description': member['status_description']} return self._fields(res, fields) def create_member(self, context, member): v = member['member'] try: with context.session.begin(subtransactions=True): # ensuring that pool exists self._get_resource(context, Pool, v['pool_id']) member_db = Member(id=uuidutils.generate_uuid(), tenant_id=v['tenant_id'], pool_id=v['pool_id'], address=v['address'], protocol_port=v['protocol_port'], weight=v['weight'], admin_state_up=v['admin_state_up'], status=constants.PENDING_CREATE) context.session.add(member_db) return self._make_member_dict(member_db) except exception.DBDuplicateEntry: raise loadbalancer.MemberExists( address=v['address'], port=v['protocol_port'], pool=v['pool_id']) def update_member(self, context, id, member): v = member['member'] try: with context.session.begin(subtransactions=True): member_db = self._get_resource(context, Member, id) self.assert_modification_allowed(member_db) if v: member_db.update(v) return self._make_member_dict(member_db) except exception.DBDuplicateEntry: raise loadbalancer.MemberExists( address=member_db['address'], port=member_db['protocol_port'], pool=member_db['pool_id']) def delete_member(self, context, id): with context.session.begin(subtransactions=True): member_db = self._get_resource(context, Member, id) context.session.delete(member_db) def get_member(self, context, id, fields=None): member = self._get_resource(context, Member, id) return self._make_member_dict(member, fields) def get_members(self, context, filters=None, fields=None): return self._get_collection(context, Member, self._make_member_dict, filters=filters, fields=fields) ######################################################## # HealthMonitor DB access def _make_health_monitor_dict(self, health_monitor, fields=None): res = {'id': health_monitor['id'], 'tenant_id': health_monitor['tenant_id'], 'type': health_monitor['type'], 'delay': health_monitor['delay'], 'timeout': health_monitor['timeout'], 'max_retries': health_monitor['max_retries'], 'admin_state_up': health_monitor['admin_state_up']} # no point to add the values below to # the result if the 'type' is not HTTP/S if res['type'] in ['HTTP', 'HTTPS']: for attr in ['url_path', 'http_method', 'expected_codes']: res[attr] = health_monitor[attr] res['pools'] = [{'pool_id': p['pool_id'], 'status': p['status'], 'status_description': p['status_description']} for p in health_monitor.pools] return self._fields(res, fields) def create_health_monitor(self, context, health_monitor): v = health_monitor['health_monitor'] with context.session.begin(subtransactions=True): # setting ACTIVE status since healthmon is shared DB object monitor_db = HealthMonitor(id=uuidutils.generate_uuid(), tenant_id=v['tenant_id'], type=v['type'], delay=v['delay'], timeout=v['timeout'], max_retries=v['max_retries'], http_method=v['http_method'], url_path=v['url_path'], expected_codes=v['expected_codes'], admin_state_up=v['admin_state_up']) context.session.add(monitor_db) return self._make_health_monitor_dict(monitor_db) def update_health_monitor(self, context, id, health_monitor): v = health_monitor['health_monitor'] with context.session.begin(subtransactions=True): monitor_db = self._get_resource(context, HealthMonitor, id) self.assert_modification_allowed(monitor_db) if v: monitor_db.update(v) return self._make_health_monitor_dict(monitor_db) def delete_health_monitor(self, context, id): """Delete health monitor object from DB Raises an error if the monitor has associations with pools """ query = self._model_query(context, PoolMonitorAssociation) has_associations = query.filter_by(monitor_id=id).first() if has_associations: raise loadbalancer.HealthMonitorInUse(monitor_id=id) with context.session.begin(subtransactions=True): monitor_db = self._get_resource(context, HealthMonitor, id) context.session.delete(monitor_db) def get_health_monitor(self, context, id, fields=None): healthmonitor = self._get_resource(context, HealthMonitor, id) return self._make_health_monitor_dict(healthmonitor, fields) def get_health_monitors(self, context, filters=None, fields=None): return self._get_collection(context, HealthMonitor, self._make_health_monitor_dict, filters=filters, fields=fields) def check_subnet_in_use(self, context, subnet_id): query = context.session.query(Pool).filter_by(subnet_id=subnet_id) if query.count(): pool_id = query.one().id raise n_exc.SubnetInUse( reason=_LE("Subnet is used by loadbalancer pool %s") % pool_id) def _prevent_lbaas_port_delete_callback(resource, event, trigger, **kwargs): context = kwargs['context'] port_id = kwargs['port_id'] port_check = kwargs['port_check'] lbaasplugin = manager.NeutronManager.get_service_plugins().get( constants.LOADBALANCER) if lbaasplugin and port_check: lbaasplugin.prevent_lbaas_port_deletion(context, port_id) def is_subnet_in_use_callback(resource, event, trigger, **kwargs): service = manager.NeutronManager.get_service_plugins().get( constants.LOADBALANCER) if service: context = kwargs.get('context') subnet_id = kwargs.get('subnet_id') service.check_subnet_in_use(context, subnet_id) def subscribe(): registry.subscribe(is_subnet_in_use_callback, resources.SUBNET, events.BEFORE_DELETE) subscribe() neutron-lbaas-8.0.0/neutron_lbaas/db/loadbalancer/models.py0000664000567000056710000003357412701407726025141 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.api.v2 import attributes as attr from neutron.db import model_base from neutron.db import models_v2 from neutron.db import servicetype_db as st_db import sqlalchemy as sa from sqlalchemy.ext import orderinglist from sqlalchemy import orm from neutron_lbaas._i18n import _ from neutron_lbaas.services.loadbalancer import constants as lb_const class SessionPersistenceV2(model_base.BASEV2): __tablename__ = "lbaas_sessionpersistences" pool_id = sa.Column(sa.String(36), sa.ForeignKey("lbaas_pools.id"), primary_key=True, nullable=False) type = sa.Column(sa.Enum(*lb_const.SUPPORTED_SP_TYPES, name="lbaas_sesssionpersistences_typev2"), nullable=False) cookie_name = sa.Column(sa.String(1024), nullable=True) class LoadBalancerStatistics(model_base.BASEV2): """Represents load balancer statistics.""" NAME = 'loadbalancer_stats' __tablename__ = "lbaas_loadbalancer_statistics" loadbalancer_id = sa.Column(sa.String(36), sa.ForeignKey("lbaas_loadbalancers.id"), primary_key=True, nullable=False) bytes_in = sa.Column(sa.BigInteger, nullable=False) bytes_out = sa.Column(sa.BigInteger, nullable=False) active_connections = sa.Column(sa.BigInteger, nullable=False) total_connections = sa.Column(sa.BigInteger, nullable=False) @orm.validates('bytes_in', 'bytes_out', 'active_connections', 'total_connections') def validate_non_negative_int(self, key, value): if value < 0: data = {'key': key, 'value': value} raise ValueError(_('The %(key)s field can not have ' 'negative value. ' 'Current value is %(value)d.') % data) return value class MemberV2(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): """Represents a v2 neutron load balancer member.""" NAME = 'member' __tablename__ = "lbaas_members" __table_args__ = ( sa.schema.UniqueConstraint('pool_id', 'address', 'protocol_port', name='uniq_pool_address_port_v2'), ) pool_id = sa.Column(sa.String(36), sa.ForeignKey("lbaas_pools.id"), nullable=False) address = sa.Column(sa.String(64), nullable=False) protocol_port = sa.Column(sa.Integer, nullable=False) weight = sa.Column(sa.Integer, nullable=True) admin_state_up = sa.Column(sa.Boolean(), nullable=False) subnet_id = sa.Column(sa.String(36), nullable=True) provisioning_status = sa.Column(sa.String(16), nullable=False) operating_status = sa.Column(sa.String(16), nullable=False) name = sa.Column(sa.String(attr.NAME_MAX_LEN), nullable=True) @property def root_loadbalancer(self): return self.pool.loadbalancer class HealthMonitorV2(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): """Represents a v2 neutron load balancer healthmonitor.""" NAME = 'healthmonitor' __tablename__ = "lbaas_healthmonitors" type = sa.Column(sa.Enum(*lb_const.SUPPORTED_HEALTH_MONITOR_TYPES, name="healthmonitors_typev2"), nullable=False) delay = sa.Column(sa.Integer, nullable=False) timeout = sa.Column(sa.Integer, nullable=False) max_retries = sa.Column(sa.Integer, nullable=False) http_method = sa.Column(sa.String(16), nullable=True) url_path = sa.Column(sa.String(255), nullable=True) expected_codes = sa.Column(sa.String(64), nullable=True) provisioning_status = sa.Column(sa.String(16), nullable=False) admin_state_up = sa.Column(sa.Boolean(), nullable=False) name = sa.Column(sa.String(attr.NAME_MAX_LEN), nullable=True) @property def root_loadbalancer(self): return self.pool.loadbalancer class LoadBalancer(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): """Represents a v2 neutron load balancer.""" NAME = 'loadbalancer' __tablename__ = "lbaas_loadbalancers" name = sa.Column(sa.String(255)) description = sa.Column(sa.String(255)) vip_subnet_id = sa.Column(sa.String(36), nullable=False) vip_port_id = sa.Column(sa.String(36), sa.ForeignKey( 'ports.id', name='fk_lbaas_loadbalancers_ports_id')) vip_address = sa.Column(sa.String(36)) provisioning_status = sa.Column(sa.String(16), nullable=False) operating_status = sa.Column(sa.String(16), nullable=False) admin_state_up = sa.Column(sa.Boolean(), nullable=False) vip_port = orm.relationship(models_v2.Port) stats = orm.relationship( LoadBalancerStatistics, uselist=False, backref=orm.backref("loadbalancer", uselist=False), cascade="all, delete-orphan", lazy='joined') provider = orm.relationship( st_db.ProviderResourceAssociation, uselist=False, lazy="joined", primaryjoin="LoadBalancer.id==ProviderResourceAssociation.resource_id", foreign_keys=[st_db.ProviderResourceAssociation.resource_id], # this is only for old API backwards compatibility because when a load # balancer is deleted the pool ID should be the same as the load # balancer ID and should not be cleared out in this table viewonly=True ) flavor_id = sa.Column(sa.String(36), sa.ForeignKey( 'flavors.id', name='fk_lbaas_loadbalancers_flavors_id')) @property def root_loadbalancer(self): return self class PoolV2(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): """Represents a v2 neutron load balancer pool.""" NAME = 'pool' __tablename__ = "lbaas_pools" name = sa.Column(sa.String(255), nullable=True) description = sa.Column(sa.String(255), nullable=True) loadbalancer_id = sa.Column(sa.String(36), sa.ForeignKey( "lbaas_loadbalancers.id")) healthmonitor_id = sa.Column(sa.String(36), sa.ForeignKey("lbaas_healthmonitors.id"), unique=True, nullable=True) protocol = sa.Column(sa.Enum(*lb_const.POOL_SUPPORTED_PROTOCOLS, name="pool_protocolsv2"), nullable=False) lb_algorithm = sa.Column(sa.Enum(*lb_const.SUPPORTED_LB_ALGORITHMS, name="lb_algorithmsv2"), nullable=False) admin_state_up = sa.Column(sa.Boolean(), nullable=False) provisioning_status = sa.Column(sa.String(16), nullable=False) operating_status = sa.Column(sa.String(16), nullable=False) members = orm.relationship(MemberV2, backref=orm.backref("pool", uselist=False), cascade="all, delete-orphan", lazy='joined') healthmonitor = orm.relationship( HealthMonitorV2, backref=orm.backref("pool", uselist=False), lazy='joined') session_persistence = orm.relationship( SessionPersistenceV2, uselist=False, backref=orm.backref("pool", uselist=False), cascade="all, delete-orphan", lazy='joined') loadbalancer = orm.relationship( LoadBalancer, uselist=False, backref=orm.backref("pools", uselist=True), lazy='joined') @property def root_loadbalancer(self): return self.loadbalancer # No real relationship here. But we want to fake a pool having a # 'listener_id' sometimes for API back-ward compatibility purposes. @property def listener(self): if self.listeners: return self.listeners[0] else: return None class SNI(model_base.BASEV2): """Many-to-many association between Listener and TLS container ids Making the SNI certificates list, ordered using the position """ NAME = 'sni' __tablename__ = "lbaas_sni" listener_id = sa.Column(sa.String(36), sa.ForeignKey("lbaas_listeners.id"), primary_key=True, nullable=False) tls_container_id = sa.Column(sa.String(128), primary_key=True, nullable=False) position = sa.Column(sa.Integer) @property def root_loadbalancer(self): return self.listener.loadbalancer class L7Rule(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): """Represents L7 Rule.""" NAME = 'l7rule' __tablename__ = "lbaas_l7rules" l7policy_id = sa.Column(sa.String(36), sa.ForeignKey("lbaas_l7policies.id"), nullable=False) type = sa.Column(sa.Enum(*lb_const.SUPPORTED_L7_RULE_TYPES, name="l7rule_typesv2"), nullable=False) compare_type = sa.Column(sa.Enum(*lb_const.SUPPORTED_L7_RULE_COMPARE_TYPES, name="l7rule_compare_typev2"), nullable=False) invert = sa.Column(sa.Boolean(), nullable=False) key = sa.Column(sa.String(255), nullable=True) value = sa.Column(sa.String(255), nullable=False) provisioning_status = sa.Column(sa.String(16), nullable=False) admin_state_up = sa.Column(sa.Boolean(), nullable=False) @property def root_loadbalancer(self): return self.policy.listener.loadbalancer class L7Policy(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): """Represents L7 Policy.""" NAME = 'l7policy' __tablename__ = "lbaas_l7policies" name = sa.Column(sa.String(255), nullable=True) description = sa.Column(sa.String(255), nullable=True) listener_id = sa.Column(sa.String(36), sa.ForeignKey("lbaas_listeners.id"), nullable=False) action = sa.Column(sa.Enum(*lb_const.SUPPORTED_L7_POLICY_ACTIONS, name="l7policy_action_typesv2"), nullable=False) redirect_pool_id = sa.Column(sa.String(36), sa.ForeignKey("lbaas_pools.id"), nullable=True) redirect_url = sa.Column(sa.String(255), nullable=True) position = sa.Column(sa.Integer, nullable=False) provisioning_status = sa.Column(sa.String(16), nullable=False) admin_state_up = sa.Column(sa.Boolean(), nullable=False) rules = orm.relationship( L7Rule, uselist=True, lazy="joined", primaryjoin="L7Policy.id==L7Rule.l7policy_id", foreign_keys=[L7Rule.l7policy_id], cascade="all, delete-orphan", backref=orm.backref("policy") ) redirect_pool = orm.relationship( PoolV2, backref=orm.backref("l7_policies", uselist=True), lazy='joined') @property def root_loadbalancer(self): return self.listener.loadbalancer class Listener(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): """Represents a v2 neutron listener.""" NAME = 'listener' __tablename__ = "lbaas_listeners" __table_args__ = ( sa.schema.UniqueConstraint('loadbalancer_id', 'protocol_port', name='uniq_loadbalancer_listener_port'), ) name = sa.Column(sa.String(255)) description = sa.Column(sa.String(255)) default_pool_id = sa.Column(sa.String(36), sa.ForeignKey("lbaas_pools.id"), nullable=True) loadbalancer_id = sa.Column(sa.String(36), sa.ForeignKey( "lbaas_loadbalancers.id")) protocol = sa.Column(sa.Enum(*lb_const.LISTENER_SUPPORTED_PROTOCOLS, name="listener_protocolsv2"), nullable=False) default_tls_container_id = sa.Column(sa.String(128), default=None, nullable=True) sni_containers = orm.relationship( SNI, backref=orm.backref("listener", uselist=False), uselist=True, lazy="joined", primaryjoin="Listener.id==SNI.listener_id", order_by='SNI.position', collection_class=orderinglist.ordering_list( 'position'), foreign_keys=[SNI.listener_id], cascade="all, delete-orphan" ) protocol_port = sa.Column(sa.Integer, nullable=False) connection_limit = sa.Column(sa.Integer) admin_state_up = sa.Column(sa.Boolean(), nullable=False) provisioning_status = sa.Column(sa.String(16), nullable=False) operating_status = sa.Column(sa.String(16), nullable=False) default_pool = orm.relationship( PoolV2, backref=orm.backref("listeners"), lazy='joined') loadbalancer = orm.relationship( LoadBalancer, backref=orm.backref("listeners", uselist=True), lazy='joined') l7_policies = orm.relationship( L7Policy, uselist=True, lazy="joined", primaryjoin="Listener.id==L7Policy.listener_id", order_by="L7Policy.position", collection_class=orderinglist.ordering_list('position', count_from=1), foreign_keys=[L7Policy.listener_id], cascade="all, delete-orphan", backref=orm.backref("listener")) @property def root_loadbalancer(self): return self.loadbalancer neutron-lbaas-8.0.0/AUTHORS0000664000567000056710000002377112701410107016505 0ustar jenkinsjenkins00000000000000Aaron Rosen Aaron Rosen Abhishek Raut Adam Harwell Aishwarya Thangappa Akash Gangil Akihiro MOTOKI Akihiro Motoki Al Miller Aleks Chirko Alessandro Pilotti Alessio Ababilov Alessio Ababilov Alex Barclay Amir Sadoughi Anand Shanmugam Andre Pech Andreas Jaeger Andreas Jaeger Angus Lees Ann Kamyshnikova Arata Notsu Arie Bregman Armando Migliaccio Arvind Somy Arvind Somya Assaf Muller Avishay Balderman Bertrand Lallau Bertrand Lallau Bhuvan Arumugam Bo Chi Bo Wang Bob Kukura Bob Melander Bogdan Tabor Brad Hall Brandon Logan Brant Knudson Brian Waldon Carlos D. Garza Cedric Brandily Chang Bo Guo ChangBo Guo(gcb) Christian Berendt Chuck Short Clark Boylan Clint Byrum Craig Tracey Cyril Roelandt Dan Prince Dan Wendlandt Davanum Srinivas Dave Lapsley Deepak N Dirk Mueller Divya ChanneGowda Dongcan Ye Doug Hellmann Doug Hellmann Doug Wiegley Edgar Magana Elena Ezhova Emilien Macchi Eugene Nikanorov Evgeny Fedoruk Franklin Naval Gary Kotton Gary Kotton German Eichberger Gordon Chung Guilherme Salgado Hareesh Puthalath He Jie Xu Hemanth Ravi Henry Gessau Henry Gessau HenryVIII Hirofumi Ichihara Hong Hui Xiao Ignacio Scopetta Ihar Hrachyshka Ionuț Arțăriși Irena Berezovsky Isaku Yamahata Isaku Yamahata Ivar Lazzaro JJ Asghar Jacek Swiderski Jakub Libosvar James Arendt James E. Blair James E. Blair Jason Kölker Jay Pipes Jeremy Stanley Jiajun Liu Jian Wen Joe Gordon Joe Heck John Davidge John Dunning John Schwarz Jordan Tardif Juliano Martinez Julien Danjou Justin Lund Kai Qiang Wu Ken'ichi Ohmichi Kenji Yasui Keshava Bharadwaj Kevin Benton Kevin L. Mitchell Kobi Samoray Kris Lindgren Kun Huang Kyle Mestery Kyle Mestery LipingMao LiuNanke Luke Gorrie Major Hayden Manjeet Singh Bhatia Mark McClain Mark McClain Mark McLoughlin Martin Hickey Maru Newby Maru Newby Mate Lakat Mathieu Rohon Matt Riedemann Matthew Treinish Michael Durrant Michael Johnson Michael Johnson Miguel Angel Ajo Mohammad Banikazemi Monty Taylor Morgan Fainberg Motohiro OTSUKA Nachi Ueno Nachi Ueno Nader Lahouti Nir Magnezi OTSUKA, Yuanying Oleg Bondarev Ondřej Nový Pattabi Ayyasami Paul Michali Paul Michali Pauline Yeung Phillip Toohill Praneet Bachheti Rabi Mishra Rajaram Mallya Ralf Haferkamp Reedip Banerjee Rich Curran Richard Theis Rishabh Das Robert Mizielski Roman Podoliaka Rose Wiegley Rui Zang Russell Bryant Ryota MIBU Salvatore Orlando Salvatore Orlando Sam Betts Samer Deeb Santhosh Santhosh Kumar Santosh Sharma Sascha Peilicke Sascha Peilicke Sascha Peilicke Sean Dague Sean Dague Sean M. Collins Sean McCully Sergey Lukjanov Sergey Skripnick Sergey Vilgelm Shane McGough Shang Yong Shiv Haris Shuquan Huang Somik Behera Somik Behera Stephen Balukoff Stephen Balukoff Stephen Gran Sukhdev Sumit Naiksatam Sungjin Yook Susanne Balle Sushil Kumar Sylvain Afchain Takashi NATSUME Terry Wilson Thierry Carrez Thomas Bechtold Tim Miller Tony Breeds Toshiaki Higuchi Trevor Vardeman Trinath Somanchi Tyler Smith Vijay Venkatachalam Vijayendra Bhamidipati Vishal Agarwal Weidong Shao Wu Wenxiang YAMAMOTO Takashi YAMAMOTO Takashi Yaguang Tang Yatin Kumbhare Ying Liu Yong Sheng Gong Yong Sheng Gong Yoshihiro Kaneko Youcef Laribi Yuanchao Sun Zang MingJie Zhenguo Niu Zhenguo Niu ZhiQiang Fan ZhiQiang Fan Zhongyue Luo Zhou Zhihong Zhou Zhihong ajmiller alexpilotti armando-migliaccio armando-migliaccio berlin chenghang fishcried fujioka yuuichi fumihiko kakuma gongysh gongysh gordon chung huangpengtao jasonrad justin Lund lawrancejing linb liu-sheng liuqing liyingjun liyingjun lizheming llg8212 madhusudhan-kandadai madhusudhan-kandadai mark mcclain mathieu-rohon minwang nmagnezi ptoohill ptoohill1 rohitagarwalla ronak rossella sanuptpm shihanzhang sukhdev trinaths venkata anil vinkesh banka zhhuabj neutron-lbaas-8.0.0/requirements.txt0000664000567000056710000000150112701407727020722 0ustar jenkinsjenkins00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. pbr>=1.6 # Apache-2.0 eventlet!=0.18.3,>=0.18.2 # MIT requests!=2.9.0,>=2.8.1 # Apache-2.0 netaddr!=0.7.16,>=0.7.12 # BSD neutron-lib>=0.0.1 # Apache-2.0 SQLAlchemy<1.1.0,>=1.0.10 # MIT alembic>=0.8.0 # MIT six>=1.9.0 # MIT oslo.config>=3.7.0 # Apache-2.0 oslo.db>=4.1.0 # Apache-2.0 oslo.log>=1.14.0 # Apache-2.0 oslo.messaging>=4.0.0 # Apache-2.0 oslo.serialization>=1.10.0 # Apache-2.0 oslo.service>=1.0.0 # Apache-2.0 oslo.utils>=3.5.0 # Apache-2.0 python-barbicanclient>=3.3.0 # Apache-2.0 pyasn1 # BSD pyasn1-modules # BSD pyOpenSSL>=0.14 # Apache-2.0 stevedore>=1.5.0 # Apache-2.0 keystoneauth1>=2.1.0 # Apache-2.0 neutron-lbaas-8.0.0/HACKING.rst0000664000567000056710000000036012701407726017235 0ustar jenkinsjenkins00000000000000Neutron LBaaS Style Commandments ================================ Please see the Neutron HACKING.rst file for style commandments for neutron-lbaas: `Neutron HACKING.rst `_ neutron-lbaas-8.0.0/TESTING.rst0000664000567000056710000000050412701407726017306 0ustar jenkinsjenkins00000000000000Testing Neutron LBaaS ===================== Please see the TESTING.rst file for the Neutron project itself. This will have the latest up to date instructions for how to test Neutron, and will be applicable to neutron-lbaas as well: `Neutron TESTING.rst `_ neutron-lbaas-8.0.0/CONTRIBUTING.rst0000664000567000056710000000027212701407726020102 0ustar jenkinsjenkins00000000000000Please see the Neutron CONTRIBUTING.rst file for how to contribute to neutron-lbaas: `Neutron CONTRIBUTING.rst `_ neutron-lbaas-8.0.0/neutron_lbaas.egg-info/0000775000567000056710000000000012701410110021743 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/neutron_lbaas.egg-info/requires.txt0000664000567000056710000000061612701410107024354 0ustar jenkinsjenkins00000000000000pbr>=1.6 eventlet!=0.18.3,>=0.18.2 requests!=2.9.0,>=2.8.1 netaddr!=0.7.16,>=0.7.12 neutron-lib>=0.0.1 SQLAlchemy<1.1.0,>=1.0.10 alembic>=0.8.0 six>=1.9.0 oslo.config>=3.7.0 oslo.db>=4.1.0 oslo.log>=1.14.0 oslo.messaging>=4.0.0 oslo.serialization>=1.10.0 oslo.service>=1.0.0 oslo.utils>=3.5.0 python-barbicanclient>=3.3.0 pyasn1 pyasn1-modules pyOpenSSL>=0.14 stevedore>=1.5.0 keystoneauth1>=2.1.0 neutron-lbaas-8.0.0/neutron_lbaas.egg-info/dependency_links.txt0000664000567000056710000000000112701410107026017 0ustar jenkinsjenkins00000000000000 neutron-lbaas-8.0.0/neutron_lbaas.egg-info/not-zip-safe0000664000567000056710000000000112701410101024171 0ustar jenkinsjenkins00000000000000 neutron-lbaas-8.0.0/neutron_lbaas.egg-info/SOURCES.txt0000664000567000056710000003764412701410110023645 0ustar jenkinsjenkins00000000000000.coveragerc .mailmap .pylintrc .testr.conf AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE MANIFEST.in README.rst TESTING.rst babel.cfg requirements.txt setup.cfg setup.py test-requirements.txt tox.ini devstack/README.md devstack/plugin.sh devstack/settings devstack/files/debs/neutron-lbaas devstack/samples/README-Vagrant.md devstack/samples/README.md devstack/samples/Vagrantfile devstack/samples/local.conf devstack/samples/local.sh devstack/samples/webserver.sh doc/source/conf.py doc/source/index.rst doc/source/dashboards/check.dashboard.rst doc/source/dashboards/index.rst etc/README.txt etc/neutron/rootwrap.d/lbaas-haproxy.filters etc/oslo-config-generator/lbaas_agent.ini etc/oslo-config-generator/neutron_lbaas.conf etc/oslo-config-generator/services_lbaas.conf neutron_lbaas/__init__.py neutron_lbaas/_i18n.py neutron_lbaas/agent_scheduler.py neutron_lbaas/opts.py neutron_lbaas/version.py neutron_lbaas.egg-info/PKG-INFO neutron_lbaas.egg-info/SOURCES.txt neutron_lbaas.egg-info/dependency_links.txt neutron_lbaas.egg-info/entry_points.txt neutron_lbaas.egg-info/not-zip-safe neutron_lbaas.egg-info/pbr.json neutron_lbaas.egg-info/requires.txt neutron_lbaas.egg-info/top_level.txt neutron_lbaas/agent/__init__.py neutron_lbaas/agent/agent.py neutron_lbaas/agent/agent_api.py neutron_lbaas/agent/agent_device_driver.py neutron_lbaas/agent/agent_manager.py neutron_lbaas/common/__init__.py neutron_lbaas/common/exceptions.py neutron_lbaas/common/keystone.py neutron_lbaas/common/cert_manager/__init__.py neutron_lbaas/common/cert_manager/barbican_cert_manager.py neutron_lbaas/common/cert_manager/cert_manager.py neutron_lbaas/common/cert_manager/local_cert_manager.py neutron_lbaas/common/cert_manager/barbican_auth/__init__.py neutron_lbaas/common/cert_manager/barbican_auth/barbican_acl.py neutron_lbaas/common/cert_manager/barbican_auth/common.py neutron_lbaas/common/tls_utils/__init__.py neutron_lbaas/common/tls_utils/cert_parser.py neutron_lbaas/db/__init__.py neutron_lbaas/db/loadbalancer/__init__.py neutron_lbaas/db/loadbalancer/loadbalancer_db.py neutron_lbaas/db/loadbalancer/loadbalancer_dbv2.py neutron_lbaas/db/loadbalancer/models.py neutron_lbaas/db/migration/__init__.py neutron_lbaas/db/migration/alembic_migrations/README neutron_lbaas/db/migration/alembic_migrations/__init__.py neutron_lbaas/db/migration/alembic_migrations/env.py neutron_lbaas/db/migration/alembic_migrations/script.py.mako neutron_lbaas/db/migration/alembic_migrations/versions/364f9b6064f0_agentv2.py neutron_lbaas/db/migration/alembic_migrations/versions/4b6d8d5310b8_add_index_tenant_id.py neutron_lbaas/db/migration/alembic_migrations/versions/4ba00375f715_edge_driver.py neutron_lbaas/db/migration/alembic_migrations/versions/4deef6d81931_add_provisioning_and_operating_statuses.py neutron_lbaas/db/migration/alembic_migrations/versions/CONTRACT_HEAD neutron_lbaas/db/migration/alembic_migrations/versions/EXPAND_HEAD neutron_lbaas/db/migration/alembic_migrations/versions/kilo_release.py neutron_lbaas/db/migration/alembic_migrations/versions/lbaasv2.py neutron_lbaas/db/migration/alembic_migrations/versions/lbaasv2_tls.py neutron_lbaas/db/migration/alembic_migrations/versions/start_neutron_lbaas.py neutron_lbaas/db/migration/alembic_migrations/versions/liberty/contract/130ebfdef43_initial.py neutron_lbaas/db/migration/alembic_migrations/versions/liberty/expand/3345facd0452_initial.py neutron_lbaas/db/migration/alembic_migrations/versions/mitaka/expand/3426acbc12de_add_flavor_id.py neutron_lbaas/db/migration/alembic_migrations/versions/mitaka/expand/3543deab1547_add_l7_tables.py neutron_lbaas/db/migration/alembic_migrations/versions/mitaka/expand/4a408dd491c2_UpdateName.py neutron_lbaas/db/migration/alembic_migrations/versions/mitaka/expand/62deca5010cd_add_tenant_id_index_for_l7_tables.py neutron_lbaas/db/migration/alembic_migrations/versions/mitaka/expand/6aee0434f911_independent_pools.py neutron_lbaas/db/models/__init__.py neutron_lbaas/db/models/head.py neutron_lbaas/drivers/__init__.py neutron_lbaas/drivers/driver_base.py neutron_lbaas/drivers/driver_mixins.py neutron_lbaas/drivers/a10networks/__init__.py neutron_lbaas/drivers/a10networks/driver_v2.py neutron_lbaas/drivers/brocade/README neutron_lbaas/drivers/brocade/__init__.py neutron_lbaas/drivers/brocade/driver_v2.py neutron_lbaas/drivers/common/__init__.py neutron_lbaas/drivers/common/agent_callbacks.py neutron_lbaas/drivers/common/agent_driver_base.py neutron_lbaas/drivers/haproxy/__init__.py neutron_lbaas/drivers/haproxy/namespace_driver.py neutron_lbaas/drivers/haproxy/plugin_driver.py neutron_lbaas/drivers/kemptechnologies/__init__.py neutron_lbaas/drivers/kemptechnologies/config.py neutron_lbaas/drivers/kemptechnologies/driver_v2.py neutron_lbaas/drivers/logging_noop/__init__.py neutron_lbaas/drivers/logging_noop/driver.py neutron_lbaas/drivers/netscaler/__init__.py neutron_lbaas/drivers/netscaler/netscaler_driver_v2.py neutron_lbaas/drivers/octavia/__init__.py neutron_lbaas/drivers/octavia/driver.py neutron_lbaas/drivers/octavia/octavia_messaging_consumer.py neutron_lbaas/drivers/radware/__init__.py neutron_lbaas/drivers/radware/base_v2_driver.py neutron_lbaas/drivers/radware/exceptions.py neutron_lbaas/drivers/radware/rest_client.py neutron_lbaas/drivers/radware/v2_driver.py neutron_lbaas/drivers/vmware/__init__.py neutron_lbaas/drivers/vmware/edge_driver_v2.py neutron_lbaas/extensions/__init__.py neutron_lbaas/extensions/l7.py neutron_lbaas/extensions/lbaas_agentscheduler.py neutron_lbaas/extensions/lbaas_agentschedulerv2.py neutron_lbaas/extensions/loadbalancer.py neutron_lbaas/extensions/loadbalancerv2.py neutron_lbaas/extensions/sharedpools.py neutron_lbaas/services/__init__.py neutron_lbaas/services/loadbalancer/__init__.py neutron_lbaas/services/loadbalancer/agent_scheduler.py neutron_lbaas/services/loadbalancer/constants.py neutron_lbaas/services/loadbalancer/data_models.py neutron_lbaas/services/loadbalancer/plugin.py neutron_lbaas/services/loadbalancer/agent/__init__.py neutron_lbaas/services/loadbalancer/agent/agent.py neutron_lbaas/services/loadbalancer/agent/agent_api.py neutron_lbaas/services/loadbalancer/agent/agent_device_driver.py neutron_lbaas/services/loadbalancer/agent/agent_manager.py neutron_lbaas/services/loadbalancer/drivers/__init__.py neutron_lbaas/services/loadbalancer/drivers/abstract_driver.py neutron_lbaas/services/loadbalancer/drivers/a10networks/README.txt neutron_lbaas/services/loadbalancer/drivers/a10networks/__init__.py neutron_lbaas/services/loadbalancer/drivers/a10networks/driver_v1.py neutron_lbaas/services/loadbalancer/drivers/common/__init__.py neutron_lbaas/services/loadbalancer/drivers/common/agent_driver_base.py neutron_lbaas/services/loadbalancer/drivers/haproxy/__init__.py neutron_lbaas/services/loadbalancer/drivers/haproxy/cfg.py neutron_lbaas/services/loadbalancer/drivers/haproxy/jinja_cfg.py neutron_lbaas/services/loadbalancer/drivers/haproxy/namespace_driver.py neutron_lbaas/services/loadbalancer/drivers/haproxy/plugin_driver.py neutron_lbaas/services/loadbalancer/drivers/haproxy/synchronous_namespace_driver.py neutron_lbaas/services/loadbalancer/drivers/haproxy/templates/haproxy.loadbalancer.j2 neutron_lbaas/services/loadbalancer/drivers/haproxy/templates/haproxy_base.j2 neutron_lbaas/services/loadbalancer/drivers/haproxy/templates/haproxy_proxies.j2 neutron_lbaas/services/loadbalancer/drivers/logging_noop/__init__.py neutron_lbaas/services/loadbalancer/drivers/netscaler/__init__.py neutron_lbaas/services/loadbalancer/drivers/netscaler/ncc_client.py neutron_lbaas/services/loadbalancer/drivers/netscaler/netscaler_driver.py neutron_lbaas/services/loadbalancer/drivers/radware/__init__.py neutron_lbaas/services/loadbalancer/drivers/radware/driver.py neutron_lbaas/services/loadbalancer/drivers/radware/exceptions.py neutron_lbaas/services/loadbalancer/drivers/vmware/__init__.py neutron_lbaas/services/loadbalancer/drivers/vmware/db.py neutron_lbaas/services/loadbalancer/drivers/vmware/edge_driver.py neutron_lbaas/services/loadbalancer/drivers/vmware/models.py neutron_lbaas/tests/__init__.py neutron_lbaas/tests/base.py neutron_lbaas/tests/tools.py neutron_lbaas/tests/contrib/decode_args.sh neutron_lbaas/tests/contrib/gate_hook.sh neutron_lbaas/tests/contrib/post_test_hook.sh neutron_lbaas/tests/etc/neutron.conf neutron_lbaas/tests/tempest/README.rst neutron_lbaas/tests/tempest/__init__.py neutron_lbaas/tests/tempest/etc/__init__.py neutron_lbaas/tests/tempest/lib/__init__.py neutron_lbaas/tests/tempest/lib/services/__init__.py neutron_lbaas/tests/tempest/lib/services/network/__init__.py neutron_lbaas/tests/tempest/lib/services/network/json/__init__.py neutron_lbaas/tests/tempest/lib/services/network/json/network_client.py neutron_lbaas/tests/tempest/v1/__init__.py neutron_lbaas/tests/tempest/v1/api/__init__.py neutron_lbaas/tests/tempest/v1/api/base.py neutron_lbaas/tests/tempest/v1/api/clients.py neutron_lbaas/tests/tempest/v1/api/test_load_balancer.py neutron_lbaas/tests/tempest/v1/api/admin/__init__.py neutron_lbaas/tests/tempest/v1/api/admin/test_lbaas_agent_scheduler.py neutron_lbaas/tests/tempest/v1/api/admin/test_load_balancer_admin_actions.py neutron_lbaas/tests/tempest/v1/api/admin/test_quotas.py neutron_lbaas/tests/tempest/v1/scenario/__init__.py neutron_lbaas/tests/tempest/v2/__init__.py neutron_lbaas/tests/tempest/v2/api/__init__.py neutron_lbaas/tests/tempest/v2/api/base.py neutron_lbaas/tests/tempest/v2/api/test_health_monitor_admin.py neutron_lbaas/tests/tempest/v2/api/test_health_monitors_non_admin.py neutron_lbaas/tests/tempest/v2/api/test_listeners_admin.py neutron_lbaas/tests/tempest/v2/api/test_listeners_non_admin.py neutron_lbaas/tests/tempest/v2/api/test_load_balancers_admin.py neutron_lbaas/tests/tempest/v2/api/test_load_balancers_non_admin.py neutron_lbaas/tests/tempest/v2/api/test_members_admin.py neutron_lbaas/tests/tempest/v2/api/test_members_non_admin.py neutron_lbaas/tests/tempest/v2/api/test_pools_admin.py neutron_lbaas/tests/tempest/v2/api/test_pools_non_admin.py neutron_lbaas/tests/tempest/v2/clients/__init__.py neutron_lbaas/tests/tempest/v2/clients/health_monitors_client.py neutron_lbaas/tests/tempest/v2/clients/listeners_client.py neutron_lbaas/tests/tempest/v2/clients/load_balancers_client.py neutron_lbaas/tests/tempest/v2/clients/members_client.py neutron_lbaas/tests/tempest/v2/clients/pools_client.py neutron_lbaas/tests/tempest/v2/ddt/__init__.py neutron_lbaas/tests/tempest/v2/ddt/base_ddt.py neutron_lbaas/tests/tempest/v2/ddt/test_health_monitor_admin_state_up.py neutron_lbaas/tests/tempest/v2/ddt/test_listeners_admin_state.py neutron_lbaas/tests/tempest/v2/ddt/test_members_admin_state_up.py neutron_lbaas/tests/tempest/v2/scenario/__init__.py neutron_lbaas/tests/tempest/v2/scenario/base.py neutron_lbaas/tests/tempest/v2/scenario/test_healthmonitor_basic.py neutron_lbaas/tests/tempest/v2/scenario/test_listener_basic.py neutron_lbaas/tests/tempest/v2/scenario/test_load_balancer_basic.py neutron_lbaas/tests/tempest/v2/scenario/test_session_persistence.py neutron_lbaas/tests/unit/__init__.py neutron_lbaas/tests/unit/test_agent_scheduler.py neutron_lbaas/tests/unit/agent/__init__.py neutron_lbaas/tests/unit/agent/test_agent.py neutron_lbaas/tests/unit/agent/test_agent_api.py neutron_lbaas/tests/unit/agent/test_agent_manager.py neutron_lbaas/tests/unit/common/__init__.py neutron_lbaas/tests/unit/common/cert_manager/__init__.py neutron_lbaas/tests/unit/common/cert_manager/test_barbican.py neutron_lbaas/tests/unit/common/cert_manager/test_cert_manager.py neutron_lbaas/tests/unit/common/cert_manager/test_local.py neutron_lbaas/tests/unit/common/cert_manager/barbican_auth/__init__.py neutron_lbaas/tests/unit/common/cert_manager/barbican_auth/test_barbican_acl.py neutron_lbaas/tests/unit/common/tls_utils/__init__.py neutron_lbaas/tests/unit/common/tls_utils/test_cert_parser.py neutron_lbaas/tests/unit/db/__init__.py neutron_lbaas/tests/unit/db/loadbalancer/__init__.py neutron_lbaas/tests/unit/db/loadbalancer/test_db_loadbalancer.py neutron_lbaas/tests/unit/db/loadbalancer/test_db_loadbalancerv2.py neutron_lbaas/tests/unit/db/loadbalancer/test_migrations.py neutron_lbaas/tests/unit/drivers/__init__.py neutron_lbaas/tests/unit/drivers/a10networks/__init__.py neutron_lbaas/tests/unit/drivers/a10networks/test_driver_v2.py neutron_lbaas/tests/unit/drivers/brocade/__init__.py neutron_lbaas/tests/unit/drivers/brocade/test_driver_v2.py neutron_lbaas/tests/unit/drivers/common/__init__.py neutron_lbaas/tests/unit/drivers/common/test_agent_callbacks.py neutron_lbaas/tests/unit/drivers/common/test_agent_driver_base.py neutron_lbaas/tests/unit/drivers/haproxy/__init__.py neutron_lbaas/tests/unit/drivers/haproxy/test_namespace_driver.py neutron_lbaas/tests/unit/drivers/kemptechnologies/__init__.py neutron_lbaas/tests/unit/drivers/kemptechnologies/test_driver_v2.py neutron_lbaas/tests/unit/drivers/logging_noop/__init__.py neutron_lbaas/tests/unit/drivers/logging_noop/test_logging_noop_driver.py neutron_lbaas/tests/unit/drivers/netscaler/test_netscaler_driver_v2.py neutron_lbaas/tests/unit/drivers/octavia/__init__.py neutron_lbaas/tests/unit/drivers/octavia/test_octavia_driver.py neutron_lbaas/tests/unit/drivers/octavia/test_octavia_messaging_consumer.py neutron_lbaas/tests/unit/drivers/radware/__init__.py neutron_lbaas/tests/unit/drivers/radware/test_v2_plugin_driver.py neutron_lbaas/tests/unit/drivers/vmware/__init__.py neutron_lbaas/tests/unit/drivers/vmware/test_edge_driver_v2.py neutron_lbaas/tests/unit/services/__init__.py neutron_lbaas/tests/unit/services/loadbalancer/__init__.py neutron_lbaas/tests/unit/services/loadbalancer/test_agent_scheduler.py neutron_lbaas/tests/unit/services/loadbalancer/test_data_models.py neutron_lbaas/tests/unit/services/loadbalancer/test_loadbalancer_plugin.py neutron_lbaas/tests/unit/services/loadbalancer/test_loadbalancer_quota_ext.py neutron_lbaas/tests/unit/services/loadbalancer/agent/__init__.py neutron_lbaas/tests/unit/services/loadbalancer/agent/test_agent.py neutron_lbaas/tests/unit/services/loadbalancer/agent/test_agent_manager.py neutron_lbaas/tests/unit/services/loadbalancer/agent/test_api.py neutron_lbaas/tests/unit/services/loadbalancer/drivers/__init__.py neutron_lbaas/tests/unit/services/loadbalancer/drivers/test_agent_driver_base.py neutron_lbaas/tests/unit/services/loadbalancer/drivers/test_driver_base.py neutron_lbaas/tests/unit/services/loadbalancer/drivers/a10networks/__init__.py neutron_lbaas/tests/unit/services/loadbalancer/drivers/a10networks/test_driver_v1.py neutron_lbaas/tests/unit/services/loadbalancer/drivers/haproxy/__init__.py neutron_lbaas/tests/unit/services/loadbalancer/drivers/haproxy/test_cfg.py neutron_lbaas/tests/unit/services/loadbalancer/drivers/haproxy/test_jinja_cfg.py neutron_lbaas/tests/unit/services/loadbalancer/drivers/haproxy/test_namespace_driver.py neutron_lbaas/tests/unit/services/loadbalancer/drivers/haproxy/sample_configs/__init__.py neutron_lbaas/tests/unit/services/loadbalancer/drivers/haproxy/sample_configs/sample_configs.py neutron_lbaas/tests/unit/services/loadbalancer/drivers/netscaler/__init__.py neutron_lbaas/tests/unit/services/loadbalancer/drivers/netscaler/test_ncc_client.py neutron_lbaas/tests/unit/services/loadbalancer/drivers/netscaler/test_netscaler_driver.py neutron_lbaas/tests/unit/services/loadbalancer/drivers/radware/__init__.py neutron_lbaas/tests/unit/services/loadbalancer/drivers/radware/test_plugin_driver.py neutron_lbaas/tests/unit/services/loadbalancer/drivers/vmware/__init__.py neutron_lbaas/tests/unit/services/loadbalancer/drivers/vmware/test_edge_driver.py releasenotes/notes/.placeholder releasenotes/notes/config-file-generation-cddf85d9d4022e2d.yaml releasenotes/notes/lbaasv2-session-persistence-update-fix-67591cbc90d7ad92.yaml releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/liberty.rst releasenotes/source/unreleased.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder tools/check_i18n.py tools/check_i18n_test_case.txt tools/clean.sh tools/generate_config_file_samples.sh tools/i18n_cfg.py tools/install_venv.py tools/install_venv_common.py tools/pretty_tox.sh tools/subunit-trace.py tools/tox_install.sh tools/with_venv.shneutron-lbaas-8.0.0/neutron_lbaas.egg-info/PKG-INFO0000664000567000056710000000267712701410107023062 0ustar jenkinsjenkins00000000000000Metadata-Version: 1.1 Name: neutron-lbaas Version: 8.0.0 Summary: OpenStack Networking Load Balancing as a Service Home-page: http://www.openstack.org/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: Welcome! ======== This package contains the code for the Neutron Load Balancer as a Service (LBaaS) service. This includes third-party drivers. This package requires Neutron to run. External Resources: =================== The homepage for Neutron is: http://launchpad.net/neutron. Use this site for asking for help, and filing bugs. We use a single Launchpad page for all Neutron projects. Code is available on git.openstack.org at: . Please refer to Neutron documentation for more information: `Neutron README.rst `_ Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 neutron-lbaas-8.0.0/neutron_lbaas.egg-info/top_level.txt0000664000567000056710000000001612701410107024500 0ustar jenkinsjenkins00000000000000neutron_lbaas neutron-lbaas-8.0.0/neutron_lbaas.egg-info/pbr.json0000664000567000056710000000005612701410107023430 0ustar jenkinsjenkins00000000000000{"is_release": true, "git_version": "19b18f0"}neutron-lbaas-8.0.0/neutron_lbaas.egg-info/entry_points.txt0000664000567000056710000000407512701410107025255 0ustar jenkinsjenkins00000000000000[console_scripts] neutron-lbaas-agent = neutron_lbaas.services.loadbalancer.agent.agent:main neutron-lbaasv2-agent = neutron_lbaas.agent.agent:main [device_drivers] neutron.services.loadbalancer.drivers.a10networks.driver_v1.ThunderDriver = neutron_lbaas.services.loadbalancer.drivers.a10networks.driver_v1:ThunderDriver neutron.services.loadbalancer.drivers.haproxy.namespace_driver.HaproxyNSDriver = neutron_lbaas.services.loadbalancer.drivers.haproxy.namespace_driver:HaproxyNSDriver neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver = neutron_lbaas.services.loadbalancer.drivers.haproxy.plugin_driver:HaproxyOnHostPluginDriver neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver = neutron_lbaas.services.loadbalancer.drivers.netscaler.netscaler_driver:NetScalerPluginDriver neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver = neutron_lbaas.services.loadbalancer.drivers.radware.driver:LoadBalancerDriver [loadbalancer_schedulers] neutron_lbaas.agent_scheduler.ChanceScheduler = neutron_lbaas.agent_scheduler:ChanceScheduler [neutron.db.alembic_migrations] neutron-lbaas = neutron_lbaas.db.migration:alembic_migrations [neutron.service_plugins] lbaasv2 = neutron_lbaas.services.loadbalancer.plugin.LoadBalancerPluginv2 [neutron_lbaas.cert_manager.backend] barbican = neutron_lbaas.common.cert_manager.barbican_cert_manager local = neutron_lbaas.common.cert_manager.local_cert_manager [neutron_lbaas.cert_manager.barbican_auth] barbican_acl_auth = neutron_lbaas.common.cert_manager.barbican_auth.barbican_acl:BarbicanACLAuth [oslo.config.opts] neutron.lbaas = neutron_lbaas.opts:list_opts neutron.lbaas.agent = neutron_lbaas.opts:list_agent_opts neutron.lbaas.service = neutron_lbaas.opts:list_service_opts [pool_schedulers] neutron.services.loadbalancer.agent_scheduler.ChanceScheduler = neutron_lbaas.services.loadbalancer.agent_scheduler:ChanceScheduler neutron.services.loadbalancer.agent_scheduler.LeastPoolAgentScheduler = neutron_lbaas.services.loadbalancer.agent_scheduler:LeastPoolAgentScheduler neutron-lbaas-8.0.0/releasenotes/0000775000567000056710000000000012701410110020106 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/releasenotes/notes/0000775000567000056710000000000012701410110021236 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/releasenotes/notes/lbaasv2-session-persistence-update-fix-67591cbc90d7ad92.yaml0000664000567000056710000000142312701407726033773 0ustar jenkinsjenkins00000000000000--- issues: - | CLI update does not allow clearing session_persistence. In the process of fixing the session_persistence update bug, we discovered that the CLI for updating LBaaS v2 pools does not allow one to clear the session_persistence for a pool once set. A fix for this is being prepared, but in the mean time, the following work-arounds are possible if a given pool's session_persistence parameter needs to be changed: * The pool can be deleted and recreated without session_persistence. * A tenant can update the session persistence by talking directly to the API using curl. fixes: - | session_persistence on a LBaaSv2 pool is no longer deleted when other pool parameters are updated via the CLI or API. neutron-lbaas-8.0.0/releasenotes/notes/config-file-generation-cddf85d9d4022e2d.yaml0000664000567000056710000000044312701407726031040 0ustar jenkinsjenkins00000000000000--- prelude: > Generation of sample Neutron LBaaS configuration files. features: - Neutron LBaaS no longer includes static example configuration files. Instead, use tools/generate_config_file_samples.sh to generate them. The files are generated with a .sample extension. neutron-lbaas-8.0.0/releasenotes/notes/.placeholder0000664000567000056710000000000012701407726023532 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/releasenotes/source/0000775000567000056710000000000012701410110021406 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/releasenotes/source/index.rst0000664000567000056710000000022112701407727023266 0ustar jenkinsjenkins00000000000000============================= Neutron LBaaS Release Notes ============================= .. toctree:: :maxdepth: 1 liberty unreleased neutron-lbaas-8.0.0/releasenotes/source/_templates/0000775000567000056710000000000012701410110023543 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/releasenotes/source/_templates/.placeholder0000664000567000056710000000000012701407726026037 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/releasenotes/source/unreleased.rst0000664000567000056710000000016012701407726024307 0ustar jenkinsjenkins00000000000000============================== Current Series Release Notes ============================== .. release-notes:: neutron-lbaas-8.0.0/releasenotes/source/liberty.rst0000664000567000056710000000022212701407726023631 0ustar jenkinsjenkins00000000000000============================== Liberty Series Release Notes ============================== .. release-notes:: :branch: origin/stable/liberty neutron-lbaas-8.0.0/releasenotes/source/conf.py0000664000567000056710000002177312701407726022742 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # Neutron LBaaS Release Notes documentation build configuration file, created # by # sphinx-quickstart on Tue Nov 3 17:40:50 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'oslosphinx', 'reno.sphinxext', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Neutron LBaaS Release Notes' copyright = u'2015, Neutron LBaaS Developers' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. from neutron_lbaas.version import version_info as neutron_lbaas_version # The full version, including alpha/beta/rc tags. release = neutron_lbaas_version.version_string_with_vcs() # The short X.Y version. version = neutron_lbaas_version.canonical_version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'NeutronLBaaSReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'NeutronLBaaSReleaseNotes.tex', u'Neutron LBaaS Release Notes Documentation', u'Neutron LBaaS Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'neutronlbaasreleasenotes', u'Neutron LBaaS Release Notes ' 'Documentation', [u'Neutron LBaaS Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'NeutronLBaaSReleaseNotes', u'Neutron LBaaS Release Notes ' 'Documentation', u'Neutron LBaaS Developers', 'NeutronLBaaSReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False neutron-lbaas-8.0.0/releasenotes/source/_static/0000775000567000056710000000000012701410110023034 5ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/releasenotes/source/_static/.placeholder0000664000567000056710000000000012701407726025330 0ustar jenkinsjenkins00000000000000neutron-lbaas-8.0.0/README.rst0000664000567000056710000000120412701407726017124 0ustar jenkinsjenkins00000000000000Welcome! ======== This package contains the code for the Neutron Load Balancer as a Service (LBaaS) service. This includes third-party drivers. This package requires Neutron to run. External Resources: =================== The homepage for Neutron is: http://launchpad.net/neutron. Use this site for asking for help, and filing bugs. We use a single Launchpad page for all Neutron projects. Code is available on git.openstack.org at: . Please refer to Neutron documentation for more information: `Neutron README.rst `_ neutron-lbaas-8.0.0/.mailmap0000664000567000056710000000111612701407726017060 0ustar jenkinsjenkins00000000000000# Format is: # # lawrancejing Jiajun Liu Zhongyue Luo Kun Huang Zhenguo Niu Isaku Yamahata Isaku Yamahata Morgan Fainberg neutron-lbaas-8.0.0/setup.py0000664000567000056710000000200412701407726017146 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr>=1.8'], pbr=True) neutron-lbaas-8.0.0/.pylintrc0000664000567000056710000000537412701407726017316 0ustar jenkinsjenkins00000000000000# The format of this file isn't really documented; just use --generate-rcfile [MASTER] # Add to the black list. It should be a base name, not a # path. You may set this option multiple times. # # Note the 'openstack' below is intended to match only # neutron.openstack.common. If we ever have another 'openstack' # dirname, then we'll need to expand the ignore features in pylint :/ ignore=.git,tests,openstack [MESSAGES CONTROL] # NOTE(gus): This is a long list. A number of these are important and # should be re-enabled once the offending code is fixed (or marked # with a local disable) disable= # "F" Fatal errors that prevent further processing import-error, # "E" Error for important programming issues (likely bugs) no-member, too-many-function-args, # "W" Warnings for stylistic problems or minor programming issues abstract-method, broad-except, expression-not-assigned, fixme, global-statement, no-init, protected-access, redefined-builtin, star-args, unused-argument, # "C" Coding convention violations bad-continuation, invalid-name, missing-docstring, # "R" Refactor recommendations abstract-class-little-used, abstract-class-not-used, duplicate-code, interface-not-implemented, no-self-use, too-few-public-methods, too-many-ancestors, too-many-arguments, too-many-branches, too-many-instance-attributes, too-many-lines, too-many-locals, too-many-public-methods, too-many-return-statements, too-many-statements [BASIC] # Variable names can be 1 to 31 characters long, with lowercase and underscores variable-rgx=[a-z_][a-z0-9_]{0,30}$ # Argument names can be 2 to 31 characters long, with lowercase and underscores argument-rgx=[a-z_][a-z0-9_]{1,30}$ # Method names should be at least 3 characters long # and be lowecased with underscores method-rgx=([a-z_][a-z0-9_]{2,}|setUp|tearDown)$ # Module names matching neutron-* are ok (files in bin/) module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(neutron-[a-z0-9_-]+))$ # Don't require docstrings on tests. no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ [FORMAT] # Maximum number of characters on a single line. max-line-length=79 [VARIABLES] # List of additional names supposed to be defined in builtins. Remember that # you should avoid to define new builtins when possible. # _ is used by our localization additional-builtins=_ [CLASSES] # List of interface methods to ignore, separated by a comma. ignore-iface-methods= [IMPORTS] # Deprecated modules which should not be used, separated by a comma deprecated-modules= # should use openstack.common.jsonutils json [TYPECHECK] # List of module names for which member attributes should not be checked ignored-modules=six.moves,_MovedItems [REPORTS] # Tells whether to display a full report or only the messages reports=no neutron-lbaas-8.0.0/test-requirements.txt0000664000567000056710000000150012701407727021676 0ustar jenkinsjenkins00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. hacking<0.11,>=0.10.0 coverage>=3.6 # Apache-2.0 fixtures>=1.3.1 # Apache-2.0/BSD mock>=1.2 # BSD python-subunit>=0.0.18 # Apache-2.0/BSD requests-mock>=0.7.0 # Apache-2.0 sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 # BSD oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0 oslo.concurrency>=3.5.0 # Apache-2.0 oslotest>=1.10.0 # Apache-2.0 tempest-lib>=0.14.0 # Apache-2.0 testrepository>=0.0.18 # Apache-2.0/BSD testresources>=0.2.4 # Apache-2.0/BSD testtools>=1.4.0 # MIT testscenarios>=0.4 # Apache-2.0/BSD WebOb>=1.2.3 # MIT WebTest>=2.0 # MIT reno>=0.1.1 # Apache2 # Needed to run DB commands in virtualenvs PyMySQL>=0.6.2 # MIT License