neutron-vpnaas-8.0.0/ 0000775 0005670 0005671 00000000000 12701410103 015625 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/devstack/ 0000775 0005670 0005671 00000000000 12701410103 017431 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/devstack/settings 0000664 0005670 0005671 00000001713 12701407726 021237 0 ustar jenkins jenkins 0000000 0000000 # Settings for the VPNaaS devstack plugin
enable_service neutron-vpnaas
AGENT_VPN_BINARY="$NEUTRON_BIN_DIR/neutron-vpn-agent"
# Plugin
VPN_PLUGIN=${VPN_PLUGIN:-"neutron_vpnaas.services.vpn.plugin.VPNDriverPlugin"}
# Service Driver
NEUTRON_VPNAAS_SERVICE_PROVIDER=${NEUTRON_VPNAAS_SERVICE_PROVIDER:-"VPN:openswan:neutron_vpnaas.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default"}
# Device driver
IPSEC_PACKAGE=${IPSEC_PACKAGE:-"openswan"}
NEUTRON_VPNAAS_DEVICE_DRIVER=${NEUTRON_VPNAAS_DEVICE_DRIVER:-"neutron_vpnaas.services.vpn.device_drivers.ipsec.OpenSwanDriver"}
# Config files
NEUTRON_VPNAAS_DIR=$DEST/neutron-vpnaas
Q_VPN_CONF_FILE=$NEUTRON_CONF_DIR/vpn_agent.ini
NEUTRON_VPNAAS_CONF=$NEUTRON_CONF_DIR/neutron_vpnaas.conf
declare -a Q_VPN_EXTRA_CONF_FILES
# Need this because if FW and VPN enabled, the FW config must be included,
# when starting the agent. VPN-agent is a L3-agent is a FW-agent.
Q_FWAAS_CONF_FILE=$NEUTRON_CONF_DIR/fwaas_driver.ini
neutron-vpnaas-8.0.0/devstack/plugin.sh 0000664 0005670 0005671 00000010176 12701407726 021311 0 ustar jenkins jenkins 0000000 0000000 # plugin.sh - DevStack plugin.sh dispatch script template
VPNAAS_XTRACE=$(set +o | grep xtrace)
set -o xtrace
function neutron_vpnaas_install {
setup_develop $NEUTRON_VPNAAS_DIR
if is_service_enabled q-l3; then
neutron_agent_vpnaas_install_agent_packages
fi
}
function neutron_agent_vpnaas_install_agent_packages {
install_package $IPSEC_PACKAGE
if is_ubuntu && [[ "$IPSEC_PACKAGE" == "strongswan" ]]; then
install_package apparmor
sudo ln -sf /etc/apparmor.d/usr.lib.ipsec.charon /etc/apparmor.d/disable/
sudo ln -sf /etc/apparmor.d/usr.lib.ipsec.stroke /etc/apparmor.d/disable/
# NOTE: Due to https://bugs.launchpad.net/ubuntu/+source/apparmor/+bug/1387220
# one must use 'sudo start apparmor ACTION=reload' for Ubuntu 14.10
restart_service apparmor
fi
}
function neutron_vpnaas_configure_common {
cp $NEUTRON_VPNAAS_DIR/etc/neutron_vpnaas.conf.sample $NEUTRON_VPNAAS_CONF
_neutron_service_plugin_class_add $VPN_PLUGIN
_neutron_deploy_rootwrap_filters $NEUTRON_VPNAAS_DIR
inicomment $NEUTRON_VPNAAS_CONF service_providers service_provider
iniadd $NEUTRON_VPNAAS_CONF service_providers service_provider $NEUTRON_VPNAAS_SERVICE_PROVIDER
iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES
}
function neutron_vpnaas_configure_db {
$NEUTRON_BIN_DIR/neutron-db-manage --subproject neutron-vpnaas --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head
}
function neutron_vpnaas_configure_agent {
local conf_file=${1:-$Q_VPN_CONF_FILE}
cp $NEUTRON_VPNAAS_DIR/etc/vpn_agent.ini.sample $conf_file
if [[ "$IPSEC_PACKAGE" == "strongswan" ]]; then
if is_fedora; then
iniset_multiline $conf_file vpnagent vpn_device_driver neutron_vpnaas.services.vpn.device_drivers.fedora_strongswan_ipsec.FedoraStrongSwanDriver
else
iniset_multiline $conf_file vpnagent vpn_device_driver neutron_vpnaas.services.vpn.device_drivers.strongswan_ipsec.StrongSwanDriver
fi
elif [[ "$IPSEC_PACKAGE" == "libreswan" ]]; then
iniset_multiline $Q_VPN_CONF_FILE vpnagent vpn_device_driver neutron_vpnaas.services.vpn.device_drivers.libreswan_ipsec.LibreSwanDriver
else
iniset_multiline $conf_file vpnagent vpn_device_driver $NEUTRON_VPNAAS_DEVICE_DRIVER
fi
}
function neutron_vpnaas_start {
local cfg_file
local opts="--config-file $NEUTRON_CONF --config-file=$Q_L3_CONF_FILE --config-file=$Q_VPN_CONF_FILE"
if is_service_enabled q-fwaas; then
opts+=" --config-file $Q_FWAAS_CONF_FILE"
fi
for cfg_file in ${Q_VPN_EXTRA_CONF_FILES[@]}; do
opts+=" --config-file $cfg_file"
done
run_process neutron-vpnaas "$AGENT_VPN_BINARY $opts"
}
function neutron_vpnaas_stop {
local ipsec_data_dir=$DATA_DIR/neutron/ipsec
local pids
if [ -d $ipsec_data_dir ]; then
pids=$(find $ipsec_data_dir -name 'pluto.pid' -exec cat {} \;)
fi
if [ -n "$pids" ]; then
sudo kill $pids
fi
stop_process neutron-vpnaas
}
function neutron_vpnaas_generate_config_files {
# Uses oslo config generator to generate VPNaaS sample configuration files
(cd $NEUTRON_VPNAAS_DIR && exec sudo ./tools/generate_config_file_samples.sh)
}
# Main plugin processing
# NOP for pre-install step
if [[ "$1" == "stack" && "$2" == "install" ]]; then
echo_summary "Installing neutron-vpnaas"
neutron_vpnaas_install
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
neutron_vpnaas_generate_config_files
neutron_vpnaas_configure_common
if is_service_enabled q-svc; then
echo_summary "Configuring neutron-vpnaas on controller"
neutron_vpnaas_configure_db
fi
if is_service_enabled q-l3; then
echo_summary "Configuring neutron-vpnaas agent"
neutron_vpnaas_configure_agent
fi
elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
if is_service_enabled q-l3; then
echo_summary "Initializing neutron-vpnaas"
neutron_vpnaas_start
fi
elif [[ "$1" == "unstack" ]]; then
if is_service_enabled q-l3; then
neutron_vpnaas_stop
fi
# NOP for clean step
fi
$VPNAAS_XTRACE
neutron-vpnaas-8.0.0/devstack/README.md 0000664 0005670 0005671 00000000174 12701407726 020733 0 ustar jenkins jenkins 0000000 0000000 This directory contains the neutron-vpnaas devstack plugin. Please
see the devref for how to set up VPNaaS with devstack.
neutron-vpnaas-8.0.0/etc/ 0000775 0005670 0005671 00000000000 12701410103 016400 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/etc/neutron/ 0000775 0005670 0005671 00000000000 12701410103 020072 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/etc/neutron/rootwrap.d/ 0000775 0005670 0005671 00000000000 12701410103 022171 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/etc/neutron/rootwrap.d/vpnaas.filters 0000664 0005670 0005671 00000001164 12701407726 025076 0 ustar jenkins jenkins 0000000 0000000 # neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
ip: IpFilter, ip, root
ip_exec: IpNetnsExecFilter, ip, root
ipsec: CommandFilter, ipsec, root
strongswan: CommandFilter, strongswan, root
neutron_netns_wrapper: CommandFilter, neutron-vpn-netns-wrapper, root
neutron_netns_wrapper_local: CommandFilter, /usr/local/bin/neutron-vpn-netns-wrapper, root
chown: RegExpFilter, chown, root, chown, --from=.*, root.root, .*/ipsec.secrets
neutron-vpnaas-8.0.0/etc/oslo-config-generator/ 0000775 0005670 0005671 00000000000 12701410103 022603 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/etc/oslo-config-generator/vpn_agent.ini 0000664 0005670 0005671 00000000143 12701407726 025304 0 ustar jenkins jenkins 0000000 0000000 [DEFAULT]
output_file = etc/vpn_agent.ini.sample
wrap_width = 79
namespace = neutron.vpnaas.agent
neutron-vpnaas-8.0.0/etc/oslo-config-generator/neutron_vpnaas.conf 0000664 0005670 0005671 00000000143 12701407726 026533 0 ustar jenkins jenkins 0000000 0000000 [DEFAULT]
output_file = etc/neutron_vpnaas.conf.sample
wrap_width = 79
namespace = neutron.vpnaas
neutron-vpnaas-8.0.0/etc/README.txt 0000664 0005670 0005671 00000000477 12701407726 020127 0 ustar jenkins jenkins 0000000 0000000 To generate the sample neutron VPNaaS configuration files, run the following
command from the top level of the neutron VPNaaS directory:
tox -e genconfig
If a 'tox' environment is unavailable, then you can run the following script
instead to generate the configuration files:
./tools/generate_config_file_samples.sh
neutron-vpnaas-8.0.0/babel.cfg 0000664 0005670 0005671 00000000021 12701407726 017365 0 ustar jenkins jenkins 0000000 0000000 [python: **.py]
neutron-vpnaas-8.0.0/.coveragerc 0000664 0005670 0005671 00000000151 12701407726 017764 0 ustar jenkins jenkins 0000000 0000000 [run]
branch = True
source = neutron_vpnaas
omit = neutron_vpnaas/tests/*
[report]
ignore_errors = True
neutron-vpnaas-8.0.0/setup.cfg 0000664 0005670 0005671 00000004147 12701410103 017454 0 ustar jenkins jenkins 0000000 0000000 [metadata]
name = neutron-vpnaas
summary = OpenStack Networking VPN as a Service
description-file =
README.rst
author = OpenStack
author-email = openstack-dev@lists.openstack.org
home-page = http://www.openstack.org/
classifier =
Environment :: OpenStack
Intended Audience :: Information Technology
Intended Audience :: System Administrators
License :: OSI Approved :: Apache Software License
Operating System :: POSIX :: Linux
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
[files]
packages =
neutron_vpnaas
data_files =
etc/neutron/rootwrap.d =
etc/neutron/rootwrap.d/vpnaas.filters
[global]
setup-hooks =
pbr.hooks.setup_hook
[entry_points]
console_scripts =
neutron-vpn-netns-wrapper = neutron_vpnaas.services.vpn.common.netns_wrapper:main
neutron-vpn-agent = neutron_vpnaas.cmd.eventlet.agent:main
neutron-vyatta-agent = neutron_vpnaas.cmd.eventlet.vyatta_agent:main
device_drivers =
neutron.services.vpn.device_drivers.ipsec.OpenSwanDriver = neutron_vpnaas.services.vpn.device_drivers.ipsec:OpenSwanDriver
neutron.services.vpn.device_drivers.cisco_ipsec.CiscoCsrIPsecDriver = neutron_vpnaas.services.vpn.device_drivers.cisco_ipsec:CiscoCsrIPsecDriver
neutron.services.vpn.device_drivers.vyatta_ipsec.VyattaIPsecDriver = neutron_vpnaas.services.vpn.device_drivers.vyatta_ipsec:VyattaIPsecDriver
neutron.db.alembic_migrations =
neutron-vpnaas = neutron_vpnaas.db.migration:alembic_migrations
oslo.config.opts =
neutron.vpnaas = neutron_vpnaas.opts:list_opts
neutron.vpnaas.agent = neutron_vpnaas.opts:list_agent_opts
[build_sphinx]
all_files = 1
build-dir = doc/build
source-dir = doc/source
[extract_messages]
keywords = _ gettext ngettext l_ lazy_gettext
mapping_file = babel.cfg
output_file = neutron_vpnaas/locale/neutron_vpnaas.pot
[compile_catalog]
directory = neutron_vpnaas/locale
domain = neutron_vpnaas
[update_catalog]
domain = neutron_vpnaas
output_dir = neutron_vpnaas/locale
input_file = neutron_vpnaas/locale/neutron_vpnaas.pot
[wheel]
universal = 1
[pbr]
warnerrors = true
[egg_info]
tag_build =
tag_date = 0
tag_svn_revision = 0
neutron-vpnaas-8.0.0/tools/ 0000775 0005670 0005671 00000000000 12701410103 016765 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/tools/pretty_tox.sh 0000775 0005670 0005671 00000000330 12701407726 021562 0 ustar jenkins jenkins 0000000 0000000 #! /bin/sh
TESTRARGS=$1
exec 3>&1
status=$(exec 4>&1 >&3; ( lockutils-wrapper python setup.py testr --slowest --testr-args="--subunit $TESTRARGS"; echo $? >&4 ) | $(dirname $0)/subunit-trace.py -f) && exit $status
neutron-vpnaas-8.0.0/tools/check_i18n.py 0000664 0005670 0005671 00000012434 12701407726 021300 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import compiler
import imp
import os.path
import sys
def is_localized(node):
"""Check message wrapped by _()"""
if isinstance(node.parent, compiler.ast.CallFunc):
if isinstance(node.parent.node, compiler.ast.Name):
if node.parent.node.name == '_':
return True
return False
class ASTWalker(compiler.visitor.ASTVisitor):
def default(self, node, *args):
for child in node.getChildNodes():
child.parent = node
compiler.visitor.ASTVisitor.default(self, node, *args)
class Visitor(object):
def __init__(self, filename, i18n_msg_predicates,
msg_format_checkers, debug):
self.filename = filename
self.debug = debug
self.error = 0
self.i18n_msg_predicates = i18n_msg_predicates
self.msg_format_checkers = msg_format_checkers
with open(filename) as f:
self.lines = f.readlines()
def visitConst(self, node):
if not isinstance(node.value, str):
return
if is_localized(node):
for (checker, msg) in self.msg_format_checkers:
if checker(node):
print('%s:%d %s: %s Error: %s' %
(self.filename, node.lineno,
self.lines[node.lineno - 1][:-1],
checker.__name__, msg),
file=sys.stderr)
self.error = 1
return
if debug:
print('%s:%d %s: %s' %
(self.filename, node.lineno,
self.lines[node.lineno - 1][:-1],
"Pass"))
else:
for (predicate, action, msg) in self.i18n_msg_predicates:
if predicate(node):
if action == 'skip':
if debug:
print('%s:%d %s: %s' %
(self.filename, node.lineno,
self.lines[node.lineno - 1][:-1],
"Pass"))
return
elif action == 'error':
print('%s:%d %s: %s Error: %s' %
(self.filename, node.lineno,
self.lines[node.lineno - 1][:-1],
predicate.__name__, msg),
file=sys.stderr)
self.error = 1
return
elif action == 'warn':
print('%s:%d %s: %s' %
(self.filename, node.lineno,
self.lines[node.lineno - 1][:-1],
"Warn: %s" % msg))
return
print('Predicate with wrong action!', file=sys.stderr)
def is_file_in_black_list(black_list, f):
for f in black_list:
if os.path.abspath(input_file).startswith(
os.path.abspath(f)):
return True
return False
def check_i18n(input_file, i18n_msg_predicates, msg_format_checkers, debug):
input_mod = compiler.parseFile(input_file)
v = compiler.visitor.walk(input_mod,
Visitor(input_file,
i18n_msg_predicates,
msg_format_checkers,
debug),
ASTWalker())
return v.error
if __name__ == '__main__':
input_path = sys.argv[1]
cfg_path = sys.argv[2]
try:
cfg_mod = imp.load_source('', cfg_path)
except Exception:
print("Load cfg module failed", file=sys.stderr)
sys.exit(1)
i18n_msg_predicates = cfg_mod.i18n_msg_predicates
msg_format_checkers = cfg_mod.msg_format_checkers
black_list = cfg_mod.file_black_list
debug = False
if len(sys.argv) > 3:
if sys.argv[3] == '-d':
debug = True
if os.path.isfile(input_path):
sys.exit(check_i18n(input_path,
i18n_msg_predicates,
msg_format_checkers,
debug))
error = 0
for dirpath, dirs, files in os.walk(input_path):
for f in files:
if not f.endswith('.py'):
continue
input_file = os.path.join(dirpath, f)
if is_file_in_black_list(black_list, input_file):
continue
if check_i18n(input_file,
i18n_msg_predicates,
msg_format_checkers,
debug):
error = 1
sys.exit(error)
neutron-vpnaas-8.0.0/tools/subunit-trace.py 0000775 0005670 0005671 00000024543 12701407726 022160 0 ustar jenkins jenkins 0000000 0000000 #!/usr/bin/env python
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# Copyright 2014 Samsung Electronics
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Trace a subunit stream in reasonable detail and high accuracy."""
import argparse
import functools
import os
import re
import sys
import mimeparse
import subunit
import testtools
DAY_SECONDS = 60 * 60 * 24
FAILS = []
RESULTS = {}
class Starts(testtools.StreamResult):
def __init__(self, output):
super(Starts, self).__init__()
self._output = output
def startTestRun(self):
self._neednewline = False
self._emitted = set()
def status(self, test_id=None, test_status=None, test_tags=None,
runnable=True, file_name=None, file_bytes=None, eof=False,
mime_type=None, route_code=None, timestamp=None):
super(Starts, self).status(
test_id, test_status,
test_tags=test_tags, runnable=runnable, file_name=file_name,
file_bytes=file_bytes, eof=eof, mime_type=mime_type,
route_code=route_code, timestamp=timestamp)
if not test_id:
if not file_bytes:
return
if not mime_type or mime_type == 'test/plain;charset=utf8':
mime_type = 'text/plain; charset=utf-8'
primary, sub, parameters = mimeparse.parse_mime_type(mime_type)
content_type = testtools.content_type.ContentType(
primary, sub, parameters)
content = testtools.content.Content(
content_type, lambda: [file_bytes])
text = content.as_text()
if text and text[-1] not in '\r\n':
self._neednewline = True
self._output.write(text)
elif test_status == 'inprogress' and test_id not in self._emitted:
if self._neednewline:
self._neednewline = False
self._output.write('\n')
worker = ''
for tag in test_tags or ():
if tag.startswith('worker-'):
worker = '(' + tag[7:] + ') '
if timestamp:
timestr = timestamp.isoformat()
else:
timestr = ''
self._output.write('%s: %s%s [start]\n' %
(timestr, worker, test_id))
self._emitted.add(test_id)
def cleanup_test_name(name, strip_tags=True, strip_scenarios=False):
"""Clean up the test name for display.
By default we strip out the tags in the test because they don't help us
in identifying the test that is run to it's result.
Make it possible to strip out the testscenarios information (not to
be confused with tempest scenarios) however that's often needed to
identify generated negative tests.
"""
if strip_tags:
tags_start = name.find('[')
tags_end = name.find(']')
if tags_start > 0 and tags_end > tags_start:
newname = name[:tags_start]
newname += name[tags_end + 1:]
name = newname
if strip_scenarios:
tags_start = name.find('(')
tags_end = name.find(')')
if tags_start > 0 and tags_end > tags_start:
newname = name[:tags_start]
newname += name[tags_end + 1:]
name = newname
return name
def get_duration(timestamps):
start, end = timestamps
if not start or not end:
duration = ''
else:
delta = end - start
duration = '%d.%06ds' % (
delta.days * DAY_SECONDS + delta.seconds, delta.microseconds)
return duration
def find_worker(test):
for tag in test['tags']:
if tag.startswith('worker-'):
return int(tag[7:])
return 'NaN'
# Print out stdout/stderr if it exists, always
def print_attachments(stream, test, all_channels=False):
"""Print out subunit attachments.
Print out subunit attachments that contain content. This
runs in 2 modes, one for successes where we print out just stdout
and stderr, and an override that dumps all the attachments.
"""
channels = ('stdout', 'stderr')
for name, detail in test['details'].items():
# NOTE(sdague): the subunit names are a little crazy, and actually
# are in the form pythonlogging:'' (with the colon and quotes)
name = name.split(':')[0]
if detail.content_type.type == 'test':
detail.content_type.type = 'text'
if (all_channels or name in channels) and detail.as_text():
title = "Captured %s:" % name
stream.write("\n%s\n%s\n" % (title, ('~' * len(title))))
# indent attachment lines 4 spaces to make them visually
# offset
for line in detail.as_text().split('\n'):
stream.write(" %s\n" % line)
def show_outcome(stream, test, print_failures=False, failonly=False):
global RESULTS
status = test['status']
# TODO(sdague): ask lifeless why on this?
if status == 'exists':
return
worker = find_worker(test)
name = cleanup_test_name(test['id'])
duration = get_duration(test['timestamps'])
if worker not in RESULTS:
RESULTS[worker] = []
RESULTS[worker].append(test)
# don't count the end of the return code as a fail
if name == 'process-returncode':
return
if status == 'fail':
FAILS.append(test)
stream.write('{%s} %s [%s] ... FAILED\n' % (
worker, name, duration))
if not print_failures:
print_attachments(stream, test, all_channels=True)
elif not failonly:
if status == 'success':
stream.write('{%s} %s [%s] ... ok\n' % (
worker, name, duration))
print_attachments(stream, test)
elif status == 'skip':
stream.write('{%s} %s ... SKIPPED: %s\n' % (
worker, name, test['details']['reason'].as_text()))
else:
stream.write('{%s} %s [%s] ... %s\n' % (
worker, name, duration, test['status']))
if not print_failures:
print_attachments(stream, test, all_channels=True)
stream.flush()
def print_fails(stream):
"""Print summary failure report.
Currently unused, however there remains debate on inline vs. at end
reporting, so leave the utility function for later use.
"""
if not FAILS:
return
stream.write("\n==============================\n")
stream.write("Failed %s tests - output below:" % len(FAILS))
stream.write("\n==============================\n")
for f in FAILS:
stream.write("\n%s\n" % f['id'])
stream.write("%s\n" % ('-' * len(f['id'])))
print_attachments(stream, f, all_channels=True)
stream.write('\n')
def count_tests(key, value):
count = 0
for k, v in RESULTS.items():
for item in v:
if key in item:
if re.search(value, item[key]):
count += 1
return count
def run_time():
runtime = 0.0
for k, v in RESULTS.items():
for test in v:
runtime += float(get_duration(test['timestamps']).strip('s'))
return runtime
def worker_stats(worker):
tests = RESULTS[worker]
num_tests = len(tests)
delta = tests[-1]['timestamps'][1] - tests[0]['timestamps'][0]
return num_tests, delta
def print_summary(stream):
stream.write("\n======\nTotals\n======\n")
stream.write("Run: %s in %s sec.\n" % (count_tests('status', '.*'),
run_time()))
stream.write(" - Passed: %s\n" % count_tests('status', 'success'))
stream.write(" - Skipped: %s\n" % count_tests('status', 'skip'))
stream.write(" - Failed: %s\n" % count_tests('status', 'fail'))
# we could have no results, especially as we filter out the process-codes
if RESULTS:
stream.write("\n==============\nWorker Balance\n==============\n")
for w in range(max(RESULTS.keys()) + 1):
if w not in RESULTS:
stream.write(
" - WARNING: missing Worker %s! "
"Race in testr accounting.\n" % w)
else:
num, time = worker_stats(w)
stream.write(" - Worker %s (%s tests) => %ss\n" %
(w, num, time))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--no-failure-debug', '-n', action='store_true',
dest='print_failures', help='Disable printing failure '
'debug information in realtime')
parser.add_argument('--fails', '-f', action='store_true',
dest='post_fails', help='Print failure debug '
'information after the stream is proccesed')
parser.add_argument('--failonly', action='store_true',
dest='failonly', help="Don't print success items",
default=(
os.environ.get('TRACE_FAILONLY', False)
is not False))
return parser.parse_args()
def main():
args = parse_args()
stream = subunit.ByteStreamToStreamResult(
sys.stdin, non_subunit_name='stdout')
starts = Starts(sys.stdout)
outcomes = testtools.StreamToDict(
functools.partial(show_outcome, sys.stdout,
print_failures=args.print_failures,
failonly=args.failonly
))
summary = testtools.StreamSummary()
result = testtools.CopyStreamResult([starts, outcomes, summary])
result.startTestRun()
try:
stream.run(result)
finally:
result.stopTestRun()
if count_tests('status', '.*') == 0:
print("The test run didn't actually run any tests")
return 1
if args.post_fails:
print_fails(sys.stdout)
print_summary(sys.stdout)
return (0 if summary.wasSuccessful() else 1)
if __name__ == '__main__':
sys.exit(main())
neutron-vpnaas-8.0.0/tools/generate_config_file_samples.sh 0000775 0005670 0005671 00000001433 12701407726 025210 0 ustar jenkins jenkins 0000000 0000000 #!/bin/sh
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -e
GEN_CMD=oslo-config-generator
if ! type "$GEN_CMD" > /dev/null; then
echo "ERROR: $GEN_CMD not installed on the system."
exit 1
fi
for file in etc/oslo-config-generator/*; do
$GEN_CMD --config-file=$file
done
set -x
neutron-vpnaas-8.0.0/tools/check_i18n_test_case.txt 0000664 0005670 0005671 00000002645 12701407726 023524 0 ustar jenkins jenkins 0000000 0000000 # test-case for check_i18n.py
# python check_i18n.py check_i18n.txt -d
# message format checking
# capital checking
msg = _("hello world, error")
msg = _("hello world_var, error")
msg = _('file_list xyz, pass')
msg = _("Hello world, pass")
# format specifier checking
msg = _("Hello %s world %d, error")
msg = _("Hello %s world, pass")
msg = _("Hello %(var1)s world %(var2)s, pass")
# message has been localized
# is_localized
msg = _("Hello world, pass")
msg = _("Hello world, pass") % var
LOG.debug(_('Hello world, pass'))
LOG.info(_('Hello world, pass'))
raise x.y.Exception(_('Hello world, pass'))
raise Exception(_('Hello world, pass'))
# message need be localized
# is_log_callfunc
LOG.debug('hello world, error')
LOG.debug('hello world, error' % xyz)
sys.append('hello world, warn')
# is_log_i18n_msg_with_mod
LOG.debug(_('Hello world, error') % xyz)
# default warn
msg = 'hello world, warn'
msg = 'hello world, warn' % var
# message needn't be localized
# skip only one word
msg = ''
msg = "hello,pass"
# skip dict
msg = {'hello world, pass': 1}
# skip list
msg = ["hello world, pass"]
# skip subscript
msg['hello world, pass']
# skip xml marker
msg = ", pass"
# skip sql statement
msg = "SELECT * FROM xyz WHERE hello=1, pass"
msg = "select * from xyz, pass"
# skip add statement
msg = 'hello world' + e + 'world hello, pass'
# skip doc string
"""
Hello world, pass
"""
class Msg:
pass
neutron-vpnaas-8.0.0/tools/install_venv_common.py 0000664 0005670 0005671 00000013507 12701407726 023442 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2013 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides methods needed by installation script for OpenStack development
virtual environments.
Since this script is used to bootstrap a virtualenv from the system's Python
environment, it should be kept strictly compatible with Python 2.6.
Synced in from openstack-common
"""
from __future__ import print_function
import optparse
import os
import subprocess
import sys
class InstallVenv(object):
def __init__(self, root, venv, requirements,
test_requirements, py_version,
project):
self.root = root
self.venv = venv
self.requirements = requirements
self.test_requirements = test_requirements
self.py_version = py_version
self.project = project
def die(self, message, *args):
print(message % args, file=sys.stderr)
sys.exit(1)
def check_python_version(self):
if sys.version_info < (2, 6):
self.die("Need Python Version >= 2.6")
def run_command_with_code(self, cmd, redirect_output=True,
check_exit_code=True):
"""Runs a command in an out-of-process shell.
Returns the output of that command. Working directory is self.root.
"""
if redirect_output:
stdout = subprocess.PIPE
else:
stdout = None
proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout)
output = proc.communicate()[0]
if check_exit_code and proc.returncode != 0:
self.die('Command "%s" failed.\n%s', ' '.join(cmd), output)
return (output, proc.returncode)
def run_command(self, cmd, redirect_output=True, check_exit_code=True):
return self.run_command_with_code(cmd, redirect_output,
check_exit_code)[0]
def get_distro(self):
if (os.path.exists('/etc/fedora-release') or
os.path.exists('/etc/redhat-release')):
return Fedora(
self.root, self.venv, self.requirements,
self.test_requirements, self.py_version, self.project)
else:
return Distro(
self.root, self.venv, self.requirements,
self.test_requirements, self.py_version, self.project)
def check_dependencies(self):
self.get_distro().install_virtualenv()
def create_virtualenv(self, no_site_packages=True):
"""Creates the virtual environment and installs PIP.
Creates the virtual environment and installs PIP only into the
virtual environment.
"""
if not os.path.isdir(self.venv):
print('Creating venv...', end=' ')
if no_site_packages:
self.run_command(['virtualenv', '-q', '--no-site-packages',
self.venv])
else:
self.run_command(['virtualenv', '-q', self.venv])
print('done.')
else:
print("venv already exists...")
pass
def pip_install(self, *args):
self.run_command(['tools/with_venv.sh',
'pip', 'install', '--upgrade'] + list(args),
redirect_output=False)
def install_dependencies(self):
print('Installing dependencies with pip (this can take a while)...')
# First things first, make sure our venv has the latest pip and
# setuptools and pbr
self.pip_install('pip>=1.4')
self.pip_install('setuptools')
self.pip_install('pbr')
self.pip_install('-r', self.requirements, '-r', self.test_requirements)
def parse_args(self, argv):
"""Parses command-line arguments."""
parser = optparse.OptionParser()
parser.add_option('-n', '--no-site-packages',
action='store_true',
help="Do not inherit packages from global Python "
"install.")
return parser.parse_args(argv[1:])[0]
class Distro(InstallVenv):
def check_cmd(self, cmd):
return bool(self.run_command(['which', cmd],
check_exit_code=False).strip())
def install_virtualenv(self):
if self.check_cmd('virtualenv'):
return
if self.check_cmd('easy_install'):
print('Installing virtualenv via easy_install...', end=' ')
if self.run_command(['easy_install', 'virtualenv']):
print('Succeeded')
return
else:
print('Failed')
self.die('ERROR: virtualenv not found.\n\n%s development'
' requires virtualenv, please install it using your'
' favorite package management tool' % self.project)
class Fedora(Distro):
"""This covers all Fedora-based distributions.
Includes: Fedora, RHEL, CentOS, Scientific Linux
"""
def check_pkg(self, pkg):
return self.run_command_with_code(['rpm', '-q', pkg],
check_exit_code=False)[1] == 0
def install_virtualenv(self):
if self.check_cmd('virtualenv'):
return
if not self.check_pkg('python-virtualenv'):
self.die("Please install 'python-virtualenv'.")
super(Fedora, self).install_virtualenv()
neutron-vpnaas-8.0.0/tools/configure_for_vpn_func_testing.sh 0000775 0005670 0005671 00000004004 12701407726 025625 0 ustar jenkins jenkins 0000000 0000000 #!/usr/bin/env bash
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -e
IS_GATE=${IS_GATE:-False}
USE_CONSTRAINT_ENV=${USE_CONSTRAINT_ENV:-False}
PROJECT_NAME=${PROJECT_NAME:-neutron-vpnaas}
REPO_BASE=${GATE_DEST:-$(cd $(dirname "$BASH_SOURCE")/../.. && pwd)}
source $REPO_BASE/neutron/tools/configure_for_func_testing.sh
source $REPO_BASE/neutron-vpnaas/devstack/settings
source $NEUTRON_VPNAAS_DIR/devstack/plugin.sh
function _install_vpn_package {
case $VENV in
dsvm-functional-sswan*)
IPSEC_PACKAGE=strongswan
;;
*)
IPSEC_PACKAGE=openswan
;;
esac
echo_summary "Installing $IPSEC_PACKAGE for $VENV"
neutron_agent_vpnaas_install_agent_packages
}
function _configure_vpn_ini_file {
echo_summary "Configuring VPN ini file"
local temp_ini=$(mktemp)
neutron_vpnaas_generate_config_files
neutron_vpnaas_configure_agent $temp_ini
sudo install -d -o $STACK_USER /etc/neutron/
sudo install -m 644 -o $STACK_USER $temp_ini $Q_VPN_CONF_FILE
}
function configure_host_for_vpn_func_testing {
echo_summary "Configuring for VPN functional testing"
if [ "$IS_GATE" == "True" ]; then
configure_host_for_func_testing
fi
# Note(pc_m): Need to ensure this is installed so we have
# oslo-config-generator present (as this script runs before tox.ini).
sudo pip install --force oslo.config
_install_vpn_package
_configure_vpn_ini_file
}
if [ "$IS_GATE" != "True" ]; then
configure_host_for_vpn_func_testing
fi
neutron-vpnaas-8.0.0/tools/install_venv.py 0000664 0005670 0005671 00000004400 12701407726 022062 0 ustar jenkins jenkins 0000000 0000000 #!/usr/bin/env python
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2010 OpenStack Foundation.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Installation script for Neutron's development virtualenv
"""
from __future__ import print_function
import os
import sys
import install_venv_common as install_venv
def print_help():
help = """
Neutron development environment setup is complete.
Neutron development uses virtualenv to track and manage Python dependencies
while in development and testing.
To activate the Neutron virtualenv for the extent of your current shell
session you can run:
$ source .venv/bin/activate
Or, if you prefer, you can run commands in the virtualenv on a case by case
basis by running:
$ tools/with_venv.sh
Also, make test will automatically use the virtualenv.
"""
print(help)
def main(argv):
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
venv = os.path.join(root, '.venv')
pip_requires = os.path.join(root, 'requirements.txt')
test_requires = os.path.join(root, 'test-requirements.txt')
py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
project = 'Neutron'
install = install_venv.InstallVenv(root, venv, pip_requires, test_requires,
py_version, project)
options = install.parse_args(argv)
install.check_python_version()
install.check_dependencies()
install.create_virtualenv(no_site_packages=options.no_site_packages)
install.install_dependencies()
print_help()
if __name__ == '__main__':
main(sys.argv)
neutron-vpnaas-8.0.0/tools/deploy_rootwrap.sh 0000775 0005670 0005671 00000003554 12701407726 022605 0 ustar jenkins jenkins 0000000 0000000 #!/usr/bin/env bash
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -eu
if [ $# -ne 2 ]; then
>&2 echo "Usage: $0 /path/to/repo /path/to/virtual-env
Deploy rootwrap configuration and filters.
Warning: Any existing rootwrap files at the specified etc path will be
removed by this script.
Optional: set OS_SUDO_TESTING=1 to deploy the filters required by
Neutron's functional testing suite."
exit 1
fi
OS_SUDO_TESTING=${OS_SUDO_TESTING:-0}
repo_path=$1
venv_path=$2
src_conf_path=${repo_path}/neutron_vpnaas/tests/contrib
src_conf=${src_conf_path}/functional-test-rootwrap.conf
src_rootwrap_path=${repo_path}/etc/neutron/rootwrap.d
dst_conf_path=${venv_path}/etc/neutron
dst_conf=${dst_conf_path}/rootwrap.conf
dst_rootwrap_path=${dst_conf_path}/rootwrap.d
# Clear any existing filters in virtual env
if [[ -d "$dst_rootwrap_path" ]]; then
rm -rf ${dst_rootwrap_path}
fi
mkdir -p -m 755 ${dst_rootwrap_path}
# Get all needed filters
cp -p ${src_rootwrap_path}/* ${dst_rootwrap_path}/
if [[ "$OS_SUDO_TESTING" = "1" ]]; then
cp -p ${repo_path}/neutron_vpnaas/tests/contrib/functional-testing.filters \
${dst_rootwrap_path}/
fi
# Get config file and modify for this repo
cp -p ${src_conf} ${dst_conf}
sed -i "s:^filters_path=.*$:filters_path=${dst_rootwrap_path}:" ${dst_conf}
sed -i "s:^\(exec_dirs=.*\)$:\1,${venv_path}/bin:" ${dst_conf}
sudo cp ${dst_conf} /etc/neutron/
neutron-vpnaas-8.0.0/tools/with_venv.sh 0000775 0005670 0005671 00000001333 12701407726 021356 0 ustar jenkins jenkins 0000000 0000000 #!/usr/bin/env bash
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
TOOLS=`dirname $0`
VENV=$TOOLS/../.venv
source $VENV/bin/activate && "$@"
neutron-vpnaas-8.0.0/tools/i18n_cfg.py 0000664 0005670 0005671 00000006643 12701407726 020767 0 ustar jenkins jenkins 0000000 0000000 import compiler
import re
def is_log_callfunc(n):
"""LOG.xxx('hello %s' % xyz) and LOG('hello')"""
if isinstance(n.parent, compiler.ast.Mod):
n = n.parent
if isinstance(n.parent, compiler.ast.CallFunc):
if isinstance(n.parent.node, compiler.ast.Getattr):
if isinstance(n.parent.node.getChildNodes()[0],
compiler.ast.Name):
if n.parent.node.getChildNodes()[0].name == 'LOG':
return True
return False
def is_log_i18n_msg_with_mod(n):
"""LOG.xxx("Hello %s" % xyz) should be LOG.xxx("Hello %s", xyz)"""
if not isinstance(n.parent.parent, compiler.ast.Mod):
return False
n = n.parent.parent
if isinstance(n.parent, compiler.ast.CallFunc):
if isinstance(n.parent.node, compiler.ast.Getattr):
if isinstance(n.parent.node.getChildNodes()[0],
compiler.ast.Name):
if n.parent.node.getChildNodes()[0].name == 'LOG':
return True
return False
def is_wrong_i18n_format(n):
"""Check _('hello %s' % xyz)"""
if isinstance(n.parent, compiler.ast.Mod):
n = n.parent
if isinstance(n.parent, compiler.ast.CallFunc):
if isinstance(n.parent.node, compiler.ast.Name):
if n.parent.node.name == '_':
return True
return False
"""
Used for check message need be localized or not.
(predicate_func, action, message)
"""
i18n_msg_predicates = [
# Skip ['hello world', 1]
(lambda n: isinstance(n.parent, compiler.ast.List), 'skip', ''),
# Skip {'hellow world', 1}
(lambda n: isinstance(n.parent, compiler.ast.Dict), 'skip', ''),
# Skip msg['hello world']
(lambda n: isinstance(n.parent, compiler.ast.Subscript), 'skip', ''),
# Skip doc string
(lambda n: isinstance(n.parent, compiler.ast.Discard), 'skip', ''),
# Skip msg = "hello", in normal, message should more than one word
(lambda n: len(n.value.strip().split(' ')) <= 1, 'skip', ''),
# Skip msg = 'hello world' + vars + 'world hello'
(lambda n: isinstance(n.parent, compiler.ast.Add), 'skip', ''),
# Skip xml markers msg = ""
(lambda n: len(re.compile("").findall(n.value)) > 0, 'skip', ''),
# Skip sql statement
(lambda n: len(
re.compile("^SELECT.*FROM", flags=re.I).findall(n.value)) > 0,
'skip', ''),
# LOG.xxx()
(is_log_callfunc, 'error', 'Message must be localized'),
# _('hello %s' % xyz) should be _('hello %s') % xyz
(is_wrong_i18n_format, 'error',
("Message format was wrong, _('hello %s' % xyz) "
"should be _('hello %s') % xyz")),
# default
(lambda n: True, 'warn', 'Message might need localized')
]
"""
Used for checking message format. (checker_func, message)
"""
msg_format_checkers = [
# If message contain more than on format specifier, it should use
# mapping key
(lambda n: len(re.compile("%[bcdeEfFgGnosxX]").findall(n.value)) > 1,
"The message shouldn't contain more than one format specifier"),
# Check capital
(lambda n: n.value.split(' ')[0].count('_') == 0 and
n.value[0].isalpha() and
n.value[0].islower(),
"First letter must be capital"),
(is_log_i18n_msg_with_mod,
'LOG.xxx("Hello %s" % xyz) should be LOG.xxx("Hello %s", xyz)')
]
file_black_list = ["./neutron/tests/unit",
"./neutron/openstack",
"./neutron/plugins/bigswitch/tests"]
neutron-vpnaas-8.0.0/tools/clean.sh 0000775 0005670 0005671 00000000304 12701407726 020424 0 ustar jenkins jenkins 0000000 0000000 #!/usr/bin/env bash
rm -rf ./*.deb ./*.tar.gz ./*.dsc ./*.changes
rm -rf */*.deb
rm -rf ./plugins/**/build/ ./plugins/**/dist
rm -rf ./plugins/**/lib/neutron_*_plugin.egg-info ./plugins/neutron-*
neutron-vpnaas-8.0.0/tools/tox_install.sh 0000775 0005670 0005671 00000003053 12701407726 021706 0 ustar jenkins jenkins 0000000 0000000 #!/usr/bin/env bash
# Many of neutron's repos suffer from the problem of depending on neutron,
# but it not existing on pypi.
# This wrapper for tox's package installer will use the existing package
# if it exists, else use zuul-cloner if that program exists, else grab it
# from neutron master via a hard-coded URL. That last case should only
# happen with devs running unit tests locally.
# From the tox.ini config page:
# install_command=ARGV
# default:
# pip install {opts} {packages}
set -x
ZUUL_CLONER=/usr/zuul-env/bin/zuul-cloner
neutron_installed=$(echo "import neutron" | python 2>/dev/null ; echo $?)
NEUTRON_DIR=$HOME/neutron
BRANCH_NAME=stable/mitaka
set -e
install_cmd="pip install"
if [ "$1" = "constrained" ]; then
install_cmd="$install_cmd $2"
shift
fi
shift
if [ -d "$NEUTRON_DIR" ]; then
echo "FOUND Neutron code at $NEUTRON_DIR - using"
$install_cmd -U -e $NEUTRON_DIR
elif [ $neutron_installed -eq 0 ]; then
location=$(python -c "import neutron; print(neutron.__file__)")
echo "ALREADY INSTALLED at $location"
elif [ -x "$ZUUL_CLONER" ]; then
echo "USING ZUUL CLONER to obtain Neutron code"
cwd=$(/bin/pwd)
cd /tmp
$ZUUL_CLONER --cache-dir \
/opt/git \
--branch $BRANCH_NAME \
git://git.openstack.org \
openstack/neutron
cd openstack/neutron
$install_cmd -e .
cd "$cwd"
else
echo "LOCAL - Obtaining Neutron code from git.openstack.org"
$install_cmd -U -egit+https://git.openstack.org/openstack/neutron@$BRANCH_NAME#egg=neutron
fi
$install_cmd -U $*
exit $?
neutron-vpnaas-8.0.0/tools/check_unit_test_structure.sh 0000775 0005670 0005671 00000003077 12701407726 024647 0 ustar jenkins jenkins 0000000 0000000 #!/usr/bin/env bash
# This script identifies the unit test modules that do not correspond
# directly with a module in the code tree. See TESTING.rst for the
# intended structure.
neutron_path=$(cd "$(dirname "$0")/.." && pwd)
base_test_path=neutron_vpnaas/tests/unit
test_path=$neutron_path/$base_test_path
test_files=$(find ${test_path} -iname 'test_*.py')
ignore_regexes=(
"^plugins.*$"
)
error_count=0
ignore_count=0
total_count=0
for test_file in ${test_files[@]}; do
relative_path=${test_file#$test_path/}
expected_path=$(dirname $neutron_path/neutron_vpnaas/$relative_path)
test_filename=$(basename "$test_file")
expected_filename=${test_filename#test_}
# Module filename (e.g. foo/bar.py -> foo/test_bar.py)
filename=$expected_path/$expected_filename
# Package dir (e.g. foo/ -> test_foo.py)
package_dir=${filename%.py}
if [ ! -f "$filename" ] && [ ! -d "$package_dir" ]; then
for ignore_regex in ${ignore_regexes[@]}; do
if [[ "$relative_path" =~ $ignore_regex ]]; then
((ignore_count++))
continue 2
fi
done
echo "Unexpected test file: $base_test_path/$relative_path"
((error_count++))
fi
((total_count++))
done
if [ "$ignore_count" -ne 0 ]; then
echo "$ignore_count unmatched test modules were ignored"
fi
if [ "$error_count" -eq 0 ]; then
echo 'Success! All test modules match targets in the code tree.'
exit 0
else
echo "Failure! $error_count of $total_count test modules do not match targets in the code tree."
exit 1
fi
neutron-vpnaas-8.0.0/tools/ostestr_compat_shim.sh 0000775 0005670 0005671 00000000251 12701407726 023431 0 ustar jenkins jenkins 0000000 0000000 #!/bin/sh
# preserve old behavior of using an arg as a regex when '--' is not present
case $@ in
(*--*) ostestr $@;;
('') ostestr;;
(*) ostestr --regex "$@"
esac
neutron-vpnaas-8.0.0/doc/ 0000775 0005670 0005671 00000000000 12701410103 016372 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/doc/source/ 0000775 0005670 0005671 00000000000 12701410103 017672 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/doc/source/index.rst 0000664 0005670 0005671 00000003517 12701407726 021562 0 ustar jenkins jenkins 0000000 0000000 ..
Copyright 2015 OpenStack Foundation
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
Welcome to Neutron VPNaaS developer documentation!
==================================================
This provides Virtual Private Network as a Service (VPNaaS) capabilities to Neutron.
Maintained as a separate repo, this works in conjunction with the Neutron repo to
provide VPN services for OpenStack. The `VPNaaS API`_ is implementation as an
extension to Neutron's networking API:
.. _`VPNaaS API`: http://developer.openstack.org/api-ref-networking-v2-ext.html#vpnaas-v2.0
This documentation is generated by the Sphinx toolkit and lives in the source
tree. Additional documentation on VPNaaS and other components of OpenStack
can be found on the `OpenStack wiki`_ and the `Neutron section of the wiki`_ (see
the VPN related pages).
The `Neutron Development wiki`_ is also a good resource for new contributors.
.. _`OpenStack wiki`: http://wiki.openstack.org
.. _`Neutron section of the wiki`: http://wiki.openstack.org/Neutron
.. _`Neutron Development wiki`: http://wiki.openstack.org/NeutronDevelopment
Enjoy!
Developer Docs
==============
.. toctree::
:maxdepth: 1
devref/index
API Extensions
==============
Go to http://api.openstack.org for information about OpenStack Network API extensions.
neutron-vpnaas-8.0.0/doc/source/conf.py 0000664 0005670 0005671 00000017563 12701407726 021226 0 ustar jenkins jenkins 0000000 0000000 # -*- coding: utf-8 -*-
# Copyright (c) 2010 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Keystone documentation build configuration file, created by
# sphinx-quickstart on Tue May 18 13:50:15 2010.
#
# This file is execfile()'d with the current directory set to it's containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import subprocess
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.abspath(os.path.join(BASE_DIR, "..", ".."))
sys.path.insert(0, ROOT_DIR)
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.graphviz',
'sphinx.ext.todo',
'oslosphinx']
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = []
if os.getenv('HUDSON_PUBLISH_DOCS'):
templates_path = ['_ga', '_templates']
else:
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master doctree document.
master_doc = 'index'
# General information about the project.
project = u'Neutron VPNaaS'
copyright = u'2011-present, OpenStack Foundation.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# Version info
from neutron_vpnaas.version import version_info as neutron_vpnaas_version
release = neutron_vpnaas_version.release_string()
# The short X.Y version.
version = neutron_vpnaas_version.version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['neutron_vpnaas.']
# -- Options for man page output --------------------------------------------
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
#man_pages = [
# ('man/neutron-server', 'neutron-server', u'Neutron Server',
# [u'OpenStack'], 1)
#]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# " v documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local",
"-n1"]
html_last_updated_fmt = subprocess.Popen(git_cmd,
stdout=subprocess.PIPE).communicate()[0]
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
#htmlhelp_basename = 'neutrondoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author,
# documentclass [howto/manual]).
#latex_documents = [
# ('index', 'Neutron.tex', u'Neutron Documentation',
# u'Neutron development team', 'manual'),
#]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
neutron-vpnaas-8.0.0/doc/source/devref/ 0000775 0005670 0005671 00000000000 12701410103 021145 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/doc/source/devref/index.rst 0000664 0005670 0005671 00000003404 12701407726 023030 0 ustar jenkins jenkins 0000000 0000000 ..
Copyright 2015 OpenStack Foundation
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
Developer Guide
===============
In the Developer Guide, you will find information on the design, and
architecture of the Neutron Virtual Private Network as a Service repo.
This include things like, information on the reference implementation
flavors, design details on VPNaaS internals, and testing. Developers
will extend this, as needed, in the future to contain more information.
VPNaaS Flavors
-----------------
.. toctree::
:maxdepth: 3
.. todo::
Info on the different Swan flavors, how they are different, and what
Operating Systems support them.
VPNaaS Internals
-----------------
.. toctree::
:maxdepth: 3
multiple-local-subnets
VPNaaS Rally Tests
--------------------
.. toctree::
:maxdepth: 3
vpnaas-rally-test
Testing
-------
.. toctree::
:maxdepth: 3
devstack
.. todo::
Add notes about functional testing, with info on how
different reference drivers are tested.
Module Reference
----------------
.. toctree::
:maxdepth: 3
.. todo::
Add in all the big modules as automodule indexes.
Indices and tables
------------------
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
neutron-vpnaas-8.0.0/doc/source/devref/vpnaas-rally-test.rst 0000664 0005670 0005671 00000005372 12701407726 025315 0 ustar jenkins jenkins 0000000 0000000 ===================
VPNaaS Rally Tests
===================
This contains the rally test codes for the Neutron VPN as a Service (VPNaaS) service. The tests
currently require rally to be installed via devstack or standalone. It is assumed that you
also have Neutron with the Neutron VPNaaS service installed.
These tests could also be run against a multinode openstack.
Please see /neutron-vpnaas/devstack/README.md for the required devstack configuration settings
for Neutron-VPNaaS.
Structure:
==========
1. plugins - Directory where you can add rally plugins. Almost everything in Rally is a plugin.
Contains base, common methods and actual scenario tests
2. rally-configs - Contains input configurations for the scenario tests
How to test:
============
Included in the repo are rally tests. For information on rally, please see the rally README :
https://github.com/openstack/rally/blob/master/README.rst
* Create a rally deployment for your cloud and make sure it is active.
rally deployment create --file=cloud_cred.json --name=MyCloud
You can also create a rally deployment from the environment variables.
rally deployment create --fromenv --name=MyCloud
* Create a folder structure as below
sudo mkdir /opt/rally
* Create a symbolic link to the plugins directory
cd /opt/rally
sudo ln -s /opt/stack/neutron-vpnaas/rally-jobs/plugins
* Run the tests. You can run the tests in various combinations.
(a) Single Node with DVR with admin credentials
(b) Single Node with DVR with non admin credentials
(c) Multi Node with DVR with admin credentials
(d) Multi Node with DVR with non admin credentials
(e) Single Node, Non DVR with admin credentials
(f) Multi Node, Non DVR with admin credentials
-> Create a args.json file with the correct credentials depending on whether it is a
single node or multinode cloud. A args_template.json file is available at
/opt/stack/neutron-vpnaas/rally-jobs/rally-configs/args_template.json for your reference.
-> Update the rally_config_dvr.yaml or rally_config_non_dvr.yaml file to change the
admin/non_admin credentials.
-> Use the appropriate config files to run either dvr or non_dvr tests.
With DVR:
rally task start /opt/stack/neutron-vpnaas/rally-jobs/rally-configs/rally_config_dvr.yaml
--task-args-file /opt/stack/neutron-vpnaas/rally-jobs/rally-configs/args.json
Non DVR:
rally task start /opt/stack/neutron-vpnaas/rally-jobs/rally-configs/rally_config_non_dvr.yaml
--task-args-file /opt/stack/neutron-vpnaas/rally-jobs/rally-configs/args.json
**Note:**
Non DVR scenario can only be run as admin as you need admin credentials to create
a non DVR router.
External Resources:
===================
For more information on the rally testing framework see:
neutron-vpnaas-8.0.0/doc/source/devref/devstack.rst 0000664 0005670 0005671 00000003073 12701407726 023527 0 ustar jenkins jenkins 0000000 0000000 ===============================
Configuring VPNaaS for DevStack
===============================
-----------------------
Multinode vs All-In-One
-----------------------
Devstack typically runs in single or "All-In-One" (AIO) mode. However, it
can also be deployed to run on multiple nodes. For VPNaaS, running on an
AIO setup is simple, as everything happens on the same node. However, to
deploy to a multinode setup requires the following things to happen:
#. Each controller node requires database migrations in support of running
VPNaaS.
#. Each network node that would run the L3 agent needs to run the Neutron
VPNaaS agent in its place.
Therefore, the devstack plugin script needs some extra logic.
----------------
How to Configure
----------------
To configure VPNaaS, it is only necessary to enable the neutron-vpnaas
devstack plugin by adding the following line to the [[local|localrc]]
section of devstack's local.conf file::
enable_plugin neutron-vpnaas [BRANCH]
is the URL of a neutron-vpnaas repository
[BRANCH] is an optional git ref (branch/ref/tag). The default is master.
For example::
enable_plugin neutron-vpnaas https://git.openstack.org/openstack/neutron-vpnaas stable/kilo
This VPNaaS devstack plugin code will then
#. Install the common VPNaaS configuration and code,
#. Apply database migrations on nodes that are running the controller (as
determined by enabling the q-svc service),
#. Run the VPNaaS agent on nodes that would normally be running the L3 agent
(as determined by enabling the q-l3 service).
neutron-vpnaas-8.0.0/doc/source/devref/multiple-local-subnets.rst 0000664 0005670 0005671 00000026637 12701407726 026342 0 ustar jenkins jenkins 0000000 0000000 =================================
Multiple Local Subnets for VPNaaS
=================================
As originally implemented, an VPN IPSec connection could have one or more
peer subnets specified, but only **one** local subnet. To support multiple
local subnets, multiple IPSec connections would be needed.
With the multiple local subnet support, three goals are addressed. First,
there can be multiple local and peer endpoints for a single IPSec connection.
Second, validation enforces that the same IP version is used for all
endpoints (to reduce complexity and ease testing).
Third, the "what is connected" is separated from the "how to connect",
so that other flavors of VPN (as they are developed) can use some of this
mechanism.
Design Notes
------------
There were three proposals considered, to support multiple local subnets.
Proposal A was to just add the local subnets to the IPSec connection API.
That would be the quickest way, and addresses the first two goals, but
not the third.
Proposal B was to create a new API that specifies of the local subnets
and peer CIDRs, and reference those in the connection API. This would
separate the "what is connected" from the "how to connect", and again
addresses the first two goals (only).
Proposal C, which was the *selected proposal*, creates a new API
that represents the "endpoint groups" for VPN connections, in the same
manner as proposal B. The added flexibility here, though, which meets
goal three, is to also include the endpoint group "type", thus allowing
subnets (local) and CIDRs (peer) to be used for IPSec, but routers,
networks, and VLANs to be used for other VPN types (BGP, L2, direct
connection). Additional types can be added in the future as needed.
Client CLI API
--------------
The originally implemented client CLI APIs (which are still available
for backward compatibility) for an IPsec connection are::
neutron vpn-service-create ROUTER SUBNET
neutron ipsec-site-connection-create
--vpnservice-id VPNSERVICE
--ikepolicy-id IKEPOLICY
--ipsecpolicy-id IPSECPOLICY
--peer-address PEER_ADDRESS
--peer-id PEER_ID
--peer-cidr PEER_CIDRS
--dpd action=ACTION,interval=INTERVAL,timeout=TIMEOUT
--initiator {bi-directional | response-only}
--mtu MTU
--psk PSK
Changes to the API, to support multiple local subnets, are shown in
**bold** text::
neutron vpn-service-create ROUTER
**neutron vpn-endpoint-groups-create**
**--name OPTIONAL-NAME**
**--description OPTIONAL-DESCRIPTION**
**--ep-type={subnet,cidr,network,vlan,router}**
**--ep-value=[list-of-endpoints-of-type]**
neutron ipsec-site-connection-create
--vpnservice-id VPNSERVICE
--ikepolicy-id IKEPOLICY
--ipsecpolicy-id IPSECPOLICY
--peer-address PEER_ADDRESS
--peer-id PEER_ID
--dpd action=ACTION,interval=INTERVAL,timeout=TIMEOUT
--initiator {bi-directional | response-only}
--mtu MTU
--psk PSK
**--local-endpoints ENDPOINT-GROUPS-UUID**
**--peer-endpoints ENDPOINT-GROUPS-UUID**
The SUBNET in the original service API is optional, and will be used as an
indicator of whether or not the multiple local subnets feature is active.
See the 'Backward Compatibility' section, below, for details.
For the endpoint groups, the --ep-type value is a string, so that other
types can be supported in the future.
The endpoint groups API would enforce that the endpoint values are all of
the same type, and match the endpoint type specified.
The connection APIs, would then provide additional validation. For example,
with IPSec, the endpoint type must be 'subnet' for local, and 'cidr' for
peer, all the endpoints should be of the same IP version, and for the local
endpoint, all subnets would be on the same router.
For BGP VPN with dynamic routing, only a local endpoint group would be
specified, and the type would be 'network'.
The ROUTER may also be able to be removed, in the future, and can be
determined, when the connections are created.
Note: Using --ep-type, as --endpoint-type is already used elsewhere, and
--type is too generic. Using --ep-value, as --endpoint is already in use,
--end-point could be easily mistyped as --endpoint, and --value is too
generic.
Examples
--------
The original APIs to create one side of an IPSec connection with
only one local and peer subnet::
neutron vpn-ikepolicy-create ikepolicy
neutron vpn-ipsecpolicy-create ipsecpolicy
neutron vpn-service-create --name myvpn router1 privateA
neutron ipsec-site-connection-create
--name vpnconnection1
--vpnservice-id myvpn
--ikepolicy-id ikepolicy
--ipsecpolicy-id ipsecpolicy
--peer-address 172.24.4.13
--peer-id 172.24.4.13
--peer-cidr 10.3.0.0/24
--psk secret
The local CIDR is obtained from the subnet, privateA. In this example,
that would be 10.1.0.0/24 (because that's how privateA was created).
Using the multiple local subnet feature, the APIs (with changes shown
in **bold** below::
neutron vpn-ikepolicy-create ikepolicy
neutron vpn-ipsecpolicy-create ipsecpolicy
neutron vpn-service-create --name myvpn router1
**neutron vpn-endpoint-group-create**
**--name local-eps**
**--ep-type=subnet**
**--ep-value=privateA**
**--ep-value=privateB**
**neutron vpn-endpoint-group-create**
**--name peer-eps**
**--ep-type=cidr**
**--ep-vallue=10.3.0.0/24**
neutron ipsec-site-connection-create
--name vpnconnection1
--vpnservice-id myvpn
--ikepolicy-id ikepolicy
--ipsecpolicy-id ipsecpolicy
--peer-address 172.24.4.13
--psk secret
**--local-endpoints local-eps**
**--peer-endpoints peer-eps**
The subnets privateA and privateB are used for local endpoints and the
10.3.0.0/24 CIDR is used for the peer endpoint.
Database
--------
The vpn_endpoints table contains single endpoint entries and a reference
to the containing endpoint group. The vpn_endpoint_groups table defines
the group, specifying the endpoint type.
Database Migration
------------------
For an older database, the first subnet, in the subnet entry of the
service table can be placed in an endpoint group that will be used
for the local endpoints of the connection. The CIDRs from the connection
can be placed into another endpoint group for the peer endpoints.
Backwards Compatibility
-----------------------
Operators would like to see this new capability provided, with backward
compatibility support. The implication, as I see it, is to provide the
ability for end users to be able to switch to the new API at any time,
versus being forced to use the new API immediately, upon upgrade to the
new release containing this feature. This would apply to both manual
API use, and client apps/scripting-tools that would be used to configure
VPNaaS.
There are several attributes that are involve here. One is the subnet ID
attribute in the VPN service API. The other is the peer CIDR attribute in
the IPSec connection API. Both would be specified by endpoint groups in
the new API, and these groups would be called out in the IPSec connection
API.
A plan to meet the backward compatibility goal of allowing both APIs to
be used at once involves taking the following steps.
For VPN service:
- Make the subnet ID attribute optional.
- If subnet ID is specified for create, consider old API mode.
- If subnet ID specified for create, create endpoint group and store ID.
- For delete, if subnet ID exists, delete corresponding endpoint group.
- For show/list, if subnet ID exists, show the ID in output.
- Subnet ID is not mutable, so no change for update API.
For IPSec site to site connection:
- For create, if old API mode, only allow peer-cidr attribute.
- For create, if not old API mode, require local/peer endpoint group IDs attributes.
- For create, if peer-cidr specified, create endpoint group and store ID.
- For create, reject endpoint group ID attributes, if old API mode.
- For create, reject peer-cidr attribute, if not old API mode.
- For create, if old API mode, lookup subnet in service, find containing endpoint group ID and store.
- For delete, if old API mode, delete endpoint group for peer.
- For update of CIDRs (old mode), will delete endpoint group and create new one. (note 1)
- For update of endpoint-group IDs (new mode), will allow different groups to be specified. (note 1,2)
- For show/list, if old API mode, only display the peer CIDR values from peer endpoint group.
- For show/list, if not old API mode, also show local subnets from local endpoint group.
Note 1: Implication is that connection is torn down and re-created (as is
done currently).
Note 2: Users would create a new endpoint group, and then select that group,
when modifying the IPSec connection.
For endpoint groups:
- For delete, if subnet, and (sole) subnet ID is used in a VPN service (old mode), reject request.
- Updates are not supported, so no action required. (note 2)
Note 2: Allowing updates would require deletion/recreation of connection
using endpoint group. Avoiding that complexity.
The thought here is to use endpoint groups under the hood, but if the old
API was being used, treat the endpoint groups as if they never existed.
Deleting connections and services would remove any endpoint groups, unlike
with the new API, where they are independent.
Migration can be used to move any VPNaaS configurations using the old
schema to the new schema. This would look at VPN services and for any
with a subnet ID, an endpoint group would be created and the group ID
stored in any existing IPSec connections for that service. Likewise,
any peer CIDRs in a connection would be copied into a new endpoint group
and the group ID stored in the connection.
The subnet ID field would then be removed from the VPN service table,
and the peer CIDRs table would be removed.
This migration could be done at the time of the new API release, in which
case all tenants with existing VPNaaS configurations would use the new
API to manage them (but could use old for new configurations).
Alternatively, the migration could be deferred until the old API is
removed, to ensure all existing configurations conform to the new schema.
Migration tools can then be created to manually migrate individual
tenants, as desired.
Stories
-------
For the endpoint groups, stories can cover:
- CRUD API for the endpoint groups.
- Database support for new tables.
- Migration creation of new tables.
- Validation of endpoints for a group (same type).
- Neutron client support for new API.
- Horizon support for new API.
- API documentation update.
For the multiple local subnets, stories can cover:
- create IPsec connection with one local subnet, but using new API.
- create IPSec connection with multiple local subnets.
- Show IPSec connection to display endpoint group IDs (or endpoints?).
- Ensure previous API still works, but uses new tables.
- Validation to ensure old and new APIs are not mixed.
- Modify CLI client.
- Validate multiple local subnets on same router.
- Validate local and peer endpoints are of same IP version.
- Functional tests with multiple local subnets
- API and How-To documentation update
Note: The intent here is to have the initial stories take slices
vertically through the process so that we can demonstrate the
capability early.
Note: Horizon work to support the changes is not expected to be part
of this effort and would be handled by the Horizon team separately,
if support is desired.
neutron-vpnaas-8.0.0/LICENSE 0000664 0005670 0005671 00000023637 12701407726 016666 0 ustar jenkins jenkins 0000000 0000000
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
neutron-vpnaas-8.0.0/ChangeLog 0000664 0005670 0005671 00000122304 12701410102 017400 0 ustar jenkins jenkins 0000000 0000000 CHANGES
=======
8.0.0
-----
* Constraint requirements using mitaka upper-constraints.txt file
8.0.0.0rc2
----------
* Update devstack plugin for dependent packages
* Update .gitreview for stable/mitaka
8.0.0.0rc1
----------
* neutron-vpnaas fix for 'tox -e api' test
* Tag the alembic migration revisions for Mitaka
* VPNaaS Fix bandit Jinja issue
* Move db migration added during Mitaka to proper directory
* Fix tox.ini constraints for post jobs
* Updated from global requirements
* Put py34 first in the env order of tox
* vyatta: added missing agent console script
8.0.0.0b3
---------
* Track alembic heads
* Remove unused pngmath Sphinx extension
* VPNaaS: Cleanup constraints in tox.ini
* Updated from global requirements
* Updated from global requirements
* VPNaaS: make use of neutron_lib exceptions
* Updated from global requirements
* Use constraints on all targets
* Fix configure script for functional tests
* Switch from testr to ostestr
* Single/Multinode VPNaaS Scenario Tests using Rally
* Remove Foreign Key constraint during ALTER
* Add "nonstandard-exception" to .pylintrc
* Add VPNaaS API tests in neutron-vpnaas tree
* Add an explicit BRANCH_NAME to tox_install.sh
* Don't need builtins defined in tox.ini
* Update translation setup
* Updated from global requirements
* Consume _ from local _1i8n
* Updated from global requirements
8.0.0.0b2
---------
* Clean up removed hacking rule from [flake8] ignore lists
* Add multi-node devstack support to VPNaaS
* Convert warnings to errors
* ipsec site connection status is blocked on "DOWN"
* Add constraints targets support for neutron-vpnaas
* Updated from global requirements
* Fix for the deprecated library function
* Updated from global requirements
* LOG.warn -> LOG.warning
* Make VPN endpoint groups aware from API
* Don't use constrained environment for functional tests
* Fix some inconsistency in docstrings
* Updated from global requirements
* tox_install.sh: don't hide output from tox logs
* Avoid duplicating tenant check when creating resources
* Fix a typo from UPPER_CONTRAINTS_FILE to UPPER_CONSTRAINTS_FILE
* Setup for translation
* Remove Neutron VPNaaS static example configuration files
* Deprecated tox -downloadcache option removed
* Automatically generate neutron VPNaaS configuration files
* Updated from global requirements
* Updated from global requirements
* Added constraints tox targets
* Fix db error when running python34 Unit tests
* Add reno for release notes management
* Remove pylint from the default list of tox targets
* Fix pylint/astroid breakage
* Remove version from setup.cfg
8.0.0.0b1
---------
* Switch to internal _i18n pattern, as per oslo_i18n guidelines
* Updated from global requirements
* Fix wrong file name in setup.cfg
* Prepare neutron "use_namespaces" option removal
* Updated from global requirements
* Fix options for pluto per-peer logging
* Updated from global requirements
* Updated from global requirements
* Set ZUUL_BRANCH using BRANCH if not available
* Remove unused logging module import
* Set IPSec site connection Down if peer doesn't respond
* VPNaaS: Remove unneeded test
* Don't assume the order of endpoints on get is the same as on create
* Don't add disabled ipsec connections to pluto
* Support testing of multiple local subnets
* Updated from global requirements
* VPNaaS: Multiple Local Subnets feature
* Remove references to router_delete_namespaces option in tests
* Update list of modules supporting py34 tests
* Switch to using neutron.common.utils:replace_file()
* Updated from global requirements
* Updated from global requirements
* Cleanup .ctl/.pid files for both OpenSwan and LibreSwan
* Updated from global requirements
* Removed new=True argument from create_connection
* Include VPN scenario test for two different tenants
* Include alembic versions directory to the package
7.0.0
-----
* Add testresources used by oslo.db fixture
* Add testresources used by oslo.db fixture
* Updated from global requirements
* Updated from global requirements
* Include scenario test for checking VPN status
* Remove root owned ipsec.secrets for ensure_configs
* Enable configuring LibreSwan in VPNaaS
* Fix argument order for assertEqual to (expected, observed)
* Updated from global requirements
* Include README.rst for rally tests
* VPN Scenario tests using Rally
7.0.0.0rc2
----------
* Tag the alembic migration revisions for Liberty
* Tag the alembic migration revisions for Liberty
* VPNaaS: Provide Endpoint groups capability
* Fix minor comment typos in VPNaaS
* Use stable/liberty branch for neutron dep
* Kill HEADS file
7.0.0.0rc1
----------
* Update defaultbranch in .gitreview to stable/liberty
* Open Mitaka development
* Change ignore-errors to ignore_errors
* Updated from global requirements
* VPNaaS Scenario tests using Rally
* Make chown rootwrap filter ipsec.secrets file specific
* tox.ini: switch to --subproject for check-migration
* Added +x permission to gate hook scripts
* Exclude neutron_vpnaas/tests from coverage report
* [OpenSwan] Enable usage of the MTU value of an IPSec connection
* Use ping assertions from net_utils in test_scenario
* VPNaaS: Restore coverage operation
* Set owner to root for ipsec.secrets for LibreSwan
* Support VPNaaS with L3 HA
* Updated from global requirements
* Support for VPN functional tests on Neutron commits
* Fix AH-ESP transform protocol in IPSec Policy
* Manage cleanup of .ctl/.pid files for LibreSwan
* Implement ModelMigrationSyncTest
* Drop for 'tenant_id' column for 'cisco_csr_identifier_map' table
7.0.0.0b3
---------
* VPNaaS: DevRef for multiple local subnets
* Remove fall-back logic to service provider registration
* Adopt the migration chaining and branch names from neutron
* Explictly set file mode on IPSec pre-shared key files
* Removing unused dependency: discover
* Allow enabling detailed logging for OpenSwan
* VPNaaS: Use new service name for devstack plugin
* Fix UT fallout
* Switch to using os-testr's copy of subunit2html
* VPNaaS: Splitting out models from database class
* Register provider configuration with ServiceTypeManager
* Fix stale module import
* [DevStack] Fix StrongSwan setup on Fedora
* Add cisco_csr_rest_client.py module for py34 support
* Add test_cisco_ipsec.py module for py34 support
* Updated from global requirements
* Revert "Remove default service provider from conf file"
* Rename a test method in test_ipsec.py
* Remove default service provider from conf file
* Killed existing downgrade rules in migration scripts
* script.py.mako: added license header and missing branch_labels
* Add test_netns_wrapper.py module for py34 support
* Use oslo.log library instead of system logging module
* Updated from global requirements
* VPNaaS: Store local side's tunnel IP for later retrieval
* Don't include encryption algorithm in phase2alg for the AH protocol
* py34: Enable initial python34 testing for VPNaaS
* VPNaaS: Correcting method name for functional test
7.0.0.0b2
---------
* py34: Fix usage of gettext.install
* Register alembic_migrations at install time
* Updated from global requirements
* StrongSwanProcess: redefine DIALECT_MAP at class level
* migrations: rearrange the tree to support split migration phases
* Remove quantum untracked files from .gitignore
* adopt neutron.common.utils.ensure_dir
* Revert "VPNaaS: Temporarily disable check_migration"
* VPNaaS: Enable devstack plugin for tests
* VPNaaS DevStack Plugin support
* VPNaaS: Temporarily disable check_migration
* Scenario test for vpnaas: ipsec-site-connection
* Remove dependency on config file for db check_migration
* Set vpn agent's agent_state['binary'] attribute
* VPNaaS: Fix migration head
* VPNaaS: Don't clone neutron automatically for tests
* COMMON_PREFIXES cleanup - patch 4/5
* Updated from global requirements
* VPNaaS: Fix another import due to Neutron change
* Fix breakage due to recent movements of Neutron modules
* Switch to oslo.service
* Updated from global requirements
7.0.0.0b1
---------
* Use DvrEdgeRouter instead of decomposed DvrRouter in test_ipsec
* Switch to oslo_utils.uuidutils
* Trim some unused test requirements
* Update version for Liberty
7.0.0a0
-------
* Updated from global requirements
* Resize cisco_csr_identifier_map.ipsec_site_conn_id
* Updated from global requirements
* Updated from global requirements
* VPNaaS: And devref doc infrastructure
* VPNaaS: Enable pylint duplicate-key check
* Updated from global requirements
* Enable random hash seeds
* Python 3: use six.iteritems instead of dict.items
* Do not assume order of mounts in execute_with_mount
* Set owner of Q_VPN_CONF_FILE file to STACK_USER user
* VPNaaS: Cleanup functional hook scripts
* gate-neutron-vpnaas-pep8 failing for test_cisco_ipsec.py
* Assign external_ip based on ip version of peer_address
* Switch from MySQL-python to PyMySQL
* Updated from global requirements
* VPNaaS: Fix breakage in status reporting
* VPNaaS: Revise functional test hooks
* Add neutron-vpnaas/tests/unit/extensions/__init__
* Remove contextlib.nested from tests
* IPv6 support for OpenSwan, Libreswan and Strongswan
* Updated from global requirements
* Libreswan driver support in VPNaaS
* Updated from global requirements
* Provide Fedora support for StrongSwan
* Fix failures for integration tests
2015.1.0
--------
* VPNaaS: Refactor functional tests to use discover
* update .gitreview for stable/kilo
* Add Kilo release milestone
* Add Kilo release milestone
* Pin neutron to stable/kilo
* VPNService takes names of device drivers from self.conf
* VPNaaS Remove dependency on Neutron for unit test
2015.1.0rc1
-----------
* VPNaaS: Remove check for bash usage
* VPNaaS: Reorganize test tree
* Open Liberty development
* VPNaaS: Refactoring to use callback mechanism
* VPNaaS Fix unit test breakage
* Use BaseSudoTestCase instead of BaseLinuxTestCase
* Set ipsec connection to Error if peer fqdn can't be resolved
* Updated from global requirements
* Introduce Vyatta VPN agent cmd in monkey patched eventlet module
* Add some unit tests for strongswan driver
2015.1.0b3
----------
* tests: stop overwriting neutron BaseTestCase configuration files
* Functional tests of ipsec strongswan vpnaas driver
* IPsec strongSwan driver implemention
* VPNaaS breakage by refactoring commit
* Fix functional test breakage from DevStack change
* VPNaaS: Enable StrongSwan in gate hook
* Remove the reference for non-existent cisco.l3.plugging_drivers
* VPNaaS: device driver and agent refactoring
* Decouple L3 and VPN during DVR router migration
* VPNaaS: Fixing UT breakage
* Updating alembic HEAD file according to the current code
* Updated from global requirements
* VPNaaS: Fix unit test breakage
* Fix up the import path for vyatta.common to use networking_brocade
* Move pylint checks to pep8 testenv
* VPNaaS: Restructure test dir layout
* Decouple L3 and VPN service plugins during router operations
* Migrate to oslo.log
* Add test case for the 'Peer ID gets additional "@"' fix
* VPNaaS refactor service driver to reuse VpnDriver code
* Change L3 agent AdvancedService class to be non-singleton
* Fix the ipsec conn issue when peer addr is fqdn
* Updated from global requirements
* Remove remaining root_helper references
* Implementation of Brocade Vyatta VPNaaS Plugin
* Explicitly monkey patch VPN agent
* Add IPSec encap mode validation to Cisco VPNaas
* VPNaaS Enable coverage testing for functional tests
* Reorder Neutron import statements in file
* Stop storing and passing root_helper
* Fix breakage caused by removing deprecated root_helper config in neutron
* Add index on tenant_id
* Provide service info for RouterInUse exception
* VPNaaS: Remove duplication for exception - part 1
* VPNaaS: Enable coverage tests
* Fixed tests to use neutron_vpnaas extensions and neutrons
* oslo: migrate to namespace-less import paths
2015.1.0b2
----------
* Updated from global requirements
* Move config and extensions to service repo
* Provide hooks for VPNaaS repo functional gate
* Pass root_helper to ip_lib by keyword argument to prep for removal
* Handle common boilerplate arguments to RouterInfo
* Fix the neutron-vpnaas unit test failures
* Updated from global requirements
* Updated from global requirements
* vpn namespace wrapper
* Updated from global requirements
* Migrate to oslo.concurrency
* Updated from global requirements
* Update hacking to 0.10
* Updated from global requirements
* Adapt VPN agent to use new main for L3 Agent
* Updated from global requirements
* VPNaaS: Remove unneeded metaclass decorator
* Fix VPN Service for Distributed Routers
* Backward compatibility for vpnaas
* Moved vpnaas.filters from main neutron repo
* Cleaned up requirements.txt
* Bump from global requirements
* Added __init__.py so migrations can work
* Fix gitignore of egg files properly
* Do not list neutron in requirements.txt
* VPNaas: L3 Agent restructure - observer hierarchy
* VPNaaS: Unit tests using policy.conf
* Update documentation files for VPNaaS
2015.1.0b1
----------
* Do not restart vpn processes for every router update
* VPNaaS: Advanced Services split - unit tests
* Kill oslo-incubator files
* tests: initialize admin context after super().setUp call
* Init separate alembic migration chain
* Remove erroneously commited egg files
* Move classes out of l3_agent.py
* Fix python neutron path for neutron_vpnaas
* After the services split, get neutron-vpnaas Jenkins jobs passing
* Point gitreview at correct repo
* Split vpnaas services code into neutron-vpnaas
* Workflow documentation is now in infra-manual
* tox.ini: Prevent casual addition of bash dependency
* Updated from global requirements
* Convert several uses of RpcCallback
* Get rid of py26 references: OrderedDict, httplib, xml testing
* Updated the README.rst
* pretty_tox.sh: Portablity improvement
* test_dhcp_agent: Fix no-op tests
* Enable undefined-loop-variable pylint check
* Fix incorrect exception order in _execute_request
* Migrate to oslo.i18n
* Migrate to oslo.middleware
* Migrate to oslo.utils
* Remove Python 2.6 classifier
* Remove ryu plugin
* Updated from global requirements
* Drop RpcProxy usage from VPNaaS code
* Show progress output while running unit tests
* enable H401 hacking check
* enable H237 check
* Updated from global requirements
* Drop several uses of RpcCallback
* Updated from global requirements
* Update i18n translation for neutron.agents log msg's
* enable F812 check for flake8
* enable F811 check for flake8
* Support pudb as a different post mortem debugger
* switch to oslo.serialization
* Add rootwrap filters for ofagent
* Cisco VPNaaS and L3 router plugin integration
* Remove openvswitch core plugin entry point
* Updated from global requirements
* Purge use of "PRED and A or B" poor-mans-ternary
* Remove use_namespaces from RouterInfo Property
* Updated from global requirements
* Remove XML support
* enable F402 check for flake8
* enable E713 in pep8 tests
* Hyper-V: Remove useless use of "else" clause on for loop
* Enable no-name-in-module pylint check
* Move disabling of metadata and ipv6_ra to _destroy_router_namespace
* Updated from global requirements
* Remove duplicate import of constants module
* Switch run-time import to using importutils.import_module
* Enable assignment-from-no-return pylint check
* tox.ini: Avoid using bash where unnecessary
* Empty files should not contain copyright or license
* Remove single occurrence of lost-exception warning
* Updated fileutils and its dependencies
* VPNaaS Cisco unit test clean-up
* remove E251 exemption from pep8 check
* Update VPN logging to use new i18n functions
* mock.assert_called_once() is not a valid method
* Check for VPN Objects when deleting interfaces
* Add pylint tox environment and disable all existing warnings
* Updated from global requirements
* Ignore top-level hidden dirs/files by default
* Avoid constructing a RouterInfo object to get namespace name
* Drop sslutils and versionutils modules
* Refactor _process_routers to handle a single router
* Remove all_routers argument from _process_routers
* Removed kombu from requirements
* Updated from global requirements
* Updated from global requirements
* Remove sslutils from openstack.common
* Fix setup of Neutron core plugin in VPNaaS UT
* remove linuxbridge plugin
* Open Kilo development
* Implement ModelsMigrationsSync test from oslo.db
* Do not assume order of report list elements
* Fix entrypoint of OneConvergencePlugin plugin
* Rework and enable VPNaaS UT for Cisco CSR REST
* Set dsvm-functional job to use system packages
* Separate Configuration from Freescale SDN ML2 mechanism Driver
* Remove @author(s) from copyright statements
* Add HA support to the l3 agent
* Updated from global requirements
* Adds ipset support for Security Groups
* UTs: Disable auto deletion of ports/subnets/nets
* Add requests_mock to test-requirements.txt
* Removed kombu from requirements
* Supply missing cisco_cfg_agent.ini file
* Updated from global requirements
* Work toward Python 3.4 support and testing
* Revert "Cisco DFA ML2 Mechanism Driver"
* Big Switch: Separate L3 functions into L3 service
* Remove reference to cisco_cfg_agent.ini from setup.cfg again
* Adds router service plugin for CSR1kv
* Support for extensions in ML2
* Cisco DFA ML2 Mechanism Driver
* Adding mechanism driver in ML2 plugin for Nuage Networks
* Fix state_path in tests
* Remove ovs dependency in embrane plugin
* Use lockutils module for tox functional env
* Cisco VPN with in-band CSR (interim solution)
* Inline "for val in [ref]" statements
* Updated from global requirements
* VPNaaS: Enable UT cases with newer oslo.messaging
* Add specific docs build option to tox
* Fix bigswitch setup.cfg lines
* Remove auto-generation of db schema from models at startup
* Updated from global requirements
* Use jsonutils instead of stdlib json
* VPNaaS: Cisco fix validation for GW IP
* Opencontrail plug-in implementation for core resources
* Do not assume order of new_peers list elements
* Remove redundant topic from rpc calls
* Add a tox test environment for random hashseed testing
* Updated from global requirements
* Move Cisco VPN RESTapi URI strings to constants
* Remove reference to cisco_cfg_agent.ini from setup.cfg
* Exit Firewall Agent if config is invalid
* Fix spelling mistakes
* Removed configobj from test requirements
* Updated from global requirements
* Functional tests work fine with random PYTHONHASHSEED
* Set python hash seed to 0 in tox.ini
* Configuration agent for Cisco devices
* Updated from global requirements
* Define some abstract methods in VpnDriver class
* ML2 mechanism driver for SR-IOV capable NIC based switching, Part 2
* Modify L3 Agent for Distributed Routers
* This patch changes the name of directory from mech_arista to arista
* ML2 mechanism driver for SR-IOV capable NIC based switching, Part 1
* Allow to import _LC, _LE, _LI and _LW functions directly
* Make readme reference git.openstack.org not github
* VPNaaS: Separate validation for Cisco impl
* VPNaaS: separate out validation logic for ref impl
* VPNaaS Cisco REST client enhance CSR create
* Bump hacking to version 0.9.2
* Use auth_token from keystonemiddleware
* Change all occurences of no_delete to do_delete
* Revert "VPNaaS REST Client UT Broken"
* Extract CommonDBMixin to a separate file
* Remove reference to setuptools_git
* Add a gate-specific tox env for functional tests
* Add CONTRIBUTING.rst
* Updated from global requirements
* VPNaaS REST Client UT Broken
* Updated from global requirements
* Updated from global requirements
* Fix example for running individual tests
* Switch to using of oslo.db
* remove unsupported middleware
* Add config for performance gate job
* Synced log module and its dependencies from olso-incubator
* don't ignore rules that are already enforced
* Moved rpc_compat.py code back into rpc.py
* Updated from global requirements
* Updated from global requirements
* ofagent: move main module from ryu repository
* Remove the useless vim modelines
* Removed 'rpc' and 'notifier' incubator modules
* Removed create_rpc_dispatcher methods
* Use openstack.common.lockutils module for locks in tox functional tests
* Renamed consume_in_thread -> consume_in_threads
* Port to oslo.messaging
* Pass 'top' to remove_rule so that rule matching succeeds
* Updated from global requirements
* Ignore emacs checkpoint files
* Added missing core_plugins symbolic names
* Introduced rpc_compat.create_connection()
* Introduce RpcCallback class
* remove pep8 E122 exemption and correct style
* remove E112 hacking exemption and fix errors
* Updated from global requirements
* Added RpcProxy class
* Freescale SDN Mechanism Driver for ML2 Plugin
* Remove run-time version checking for openvswitch features
* Added missing plugin .ini files to setup.cfg
* Updated from global requirements
* Synced jsonutils from oslo-incubator
* Cisco APIC ML2 mechanism driver, part 2
* NSX: get rid of the last Nicira/NVP bits
* Metaclass Python 3.x Compatibility
* Add missing translation support
* Add mailmap entry
* Updated from global requirements
* Remove explicit dependency on amqplib
* Remove duplicate module-rgx line in .pylintrc
* Fix H302 violations
* Fix H302 violations in unit tests
* Cisco VPN device driver - support IPSec connection updates
* Updated from global requirements
* Fix H302 violations in db package and services
* Updated from global requirements
* Support enhancements to Cisco CSR VPN REST APIs
* Exclude .ropeproject from flake8 checks
* Enable flake8 E711 and E712 checking
* Updated from global requirements
* Sync service and systemd modules from oslo-incubator
* Move bash whitelisting to pep8 testenv
* Fix Jenkins translation jobs
* Set ns_name in RouterInfo as attribute
* ignore build directory for pep8
* Enable hacking H301 check
* Updated from global requirements
* Remove last parts of Quantum compatibility shim
* UT: do not hide an original error in test resource ctxtmgr
* Open Juno development
* Start using oslosphinx theme for docs
* Cisco VPN driver correct reporting for admin state chg
* Updated from global requirements
* VPNaaS support for VPN service admin state change and reporting
* add HEAD sentinel file that contains migration revision
* Fix usage of save_and_reraise_exception
* Cisco VPN device driver post-merge cleanup
* Bugfix and refactoring for ovs_lib flow methods
* Removes calls to mock.patch.stopall in unit tests
* VPNaaS Device Driver for Cisco CSR
* Updated from global requirements
* Updated from global requirements
* Updated from global requirements
* One Convergence Neutron Plugin l3 ext support
* One Convergence Neutron Plugin Implementation
* BigSwitch: Add SSL Certificate Validation
* VPNaaS Service Driver for Cisco CSR
* Updated from global requirements
* Add OpenDaylight ML2 MechanismDriver
* Implementaion of Mechanism driver for Brocade VDX cluster of switches
* Implement Mellanox ML2 MechanismDriver
* Support advanced NVP IPsec VPN Service
* Implement OpenFlow Agent mechanism driver
* Finish off rebranding of the Nicira NVP plugin
* BigSwitch: Add agent to support neutron sec groups
* Adds the new IBM SDN-VE plugin
* Updated from global requirements
* Update License Headers to replace Nicira with VMware
* Developer documentation
* tests/service: consolidate setUp/tearDown logic
* options: consolidate options definitions
* Rename Neutron core/service plugins for VMware NSX
* Updated from global requirements
* Fix VPN agent does not handle multiple connections per vpn service
* Sync minimum requirements
* Copy cache package from oslo-incubator
* tests/unit: Initialize core plugin in TestL3GwModeMixin
* Fix typo in service_drivers.ipsec
* Remove dependent module py3kcompat
* Use save_and_reraise_exception when reraise exception
* Add migration support from agent to NSX dhcp/metadata services
* Remove psutil dependency
* LBaaS: move agent based driver files into a separate dir
* mailmap: update .mailmap
* Return request-id in API response
* Prepare for multiple cisco ML2 mech drivers
* Support building wheels (PEP-427)
* Use oslo.rootwrap library instead of local copy
* Enables BigSwitch/Restproxy ML2 VLAN driver
* Add an explicit tox job for functional tests
* Base ML2 bulk support on the loaded drivers
* Enable hacking H233 rule
* Update RPC code from oslo
* Configure plugins by name
* Update lockutils and fixture in openstack.common
* Rename nicira configuration elements to match new naming structure
* Remove unused imports
* Rename check_nvp_config utility tool
* Corrects broken format strings in check_i18n.py
* Updates tox.ini to use new features
* Updated from global requirements
* Sync global requirements to pin sphinx to sphinx>=1.1.2,<1.2
* validate if the router has external gateway interface set
* Add fwaas_driver.ini to setup.cfg
* Add vpnaas and debug filters to setup.cfg
* Fix misspells
* update error msg for invalid state to update vpn resources
* Updates .gitignore
* Update Zhenguo Niu's mailmap
* Replace stubout with fixtures
* Ensure get_pid_to_kill works with rootwrap script
* Apply six for metaclass
* Updated from global requirements
* Cleanup HACKING.rst
* Fix import log_handler error with publish_errors set
* Updated from global requirements
* Updated from global requirements
* Fix incorrect indentations found by Pep 1.4.6+
* Cleanup and make HACKING.rst DRYer
* Add support for managing async processes
* Use L3 api from vpn ipsec driver via service plugin
* Fix access to lifetime dict in update_ipsecpolicy method
* Remove obsolete redhat-eventlet.patch
* Enable Quota DB driver by default
* Open Icehouse development
* Updated from global requirements
* Require oslo.config 1.2.0 final
* Use built-in print() instead of print statement
* Increase size of peer_address attribute in VPNaaS
* Add router ownership check on vpnservice creation
* Fix error code for deletion of router which is in use by vpnservice
* Add l2 population base classes
* Adds support for L3 routing/NAT as a service plugin
* Fix message i18n error
* Install metering_agent.ini and vpn_agent.ini
* fix conversion type missing
* Ensure unit tests do not let looping calls roam freely
* Enclose command args in with_venv.sh
* ML2 Mechanism Driver for Cisco Nexus
* Reference driver implementation (IPsec) for VPNaaS
* Verify MTU is valid for ipsec_site_connection
* Implement ML2 port binding
* Arista ML2 Mechanism driver
* ML2 Mechanism Driver for Tail-f Network Control System (NCS)
* Default to not capturing log output in tests
* Make ipsec_site_connection dpd_timeout == dpd_interval return 400
* Add Neutron l3 metering agent
* Update mailmap
* Fix wrong example in HACKING.rst
* Bumps hacking to 0.7.0
* remove binaries under bin
* Fixes Windows setup dependency bug
* Restore Babel to requirements.txt
* Remove DHCP lease logic
* Remove last vestiges of nose
* Updated from global requirements
* Ignore pbr*.egg directory
* Fix H102, H103 Apache 2.0 license hacking check error
* Remove openstack.common.exception usage
* Adds Babel dependency missing from 555d27c
* Fix the alphabetical order in requirement files
* VPNaaS datamodel IKEPolicy lifetime unit typo
* Remove comments from requirements.txt (workaround pbr bug)
* VPN as a Service (VPNaaS) API and DataModel
* remove netifaces dependency of ryu-agent
* Add gre tunneling support for the ML2 plugin
* Add VXLAN tunneling support for the ML2 plugin
* xenapi - rename quantum to neutron
* Fix issue with pip installing oslo.config-1.2.0
* Initial Modular L2 Mechanism Driver implementation
* Add cover/ to .gitignore
* fix some missing change from quantum to neutron
* git remove old non-working packaging files
* Rename Quantum to Neutron
* Rename quantum to neutron in .gitreview
* Sync install_venv_common from oslo
* Update to use OSLO db
* Require greenlet 0.3.2 (or later)
* Remove single-version-externally-managed in setup.cfg
* Fix single-version-externally-mananged typo in setup.cfg
* Allow use of lowercase section names in conf files
* Require pbr 0.5.16 or newer
* Update to the latest stevedore
* Rename agent_loadbalancer directory to loadbalancer
* Remove unit tests that are no longer run
* Update with latest OSLO code
* Remove explicit distribute depend
* Fix and enable H90x tests
* Remove generic Exception when using assertRaises
* Add *.swo/swp to .gitignore
* python3: Introduce py33 to tox.ini
* Rename README to README.rst
* Rename requires files to standard names
* Initial Modular L2 plugin implementation
* Revert dependency on oslo.config 1.2.0
* Perform a sync with oslo-incubator
* Require oslo.config 1.2.0a2
* update mailmap
* Revert "Fix ./run_tests.sh --pep8"
* Move to pbr
* Docstrings formatted according to pep257
* relax amqplib and kombu version requirements
* Fix ./run_tests.sh --pep8
* blueprint mellanox-quantum-plugin
* Update flake8 pinned versions
* Let the cover venv run individual tests
* Copy the RHEL6 eventlet workaround from Oslo
* Remove locals() from strings substitutions
* Enable automatic validation of many HACKING rules
* Shorten the path of the nicira nvp plugin
* Allow pdb debugging in manually-invoked tests
* Reformat openstack-common.conf
* Switch to flake8 from pep8
* Parallelize quantum unit testing:
* blueprint cisco-single-config
* Add lbaas_agent files to setup.py
* Add VIRTUAL_ENV key to enviroment passed to patch_tox_env
* Pin SQLAlchemy to 0.7.x
* Sync latest Oslo components for updated copyright
* drop rfc.sh
* Replace "OpenStack LLC" with "OpenStack Foundation"
* First havana commit
* remove references to netstack in setup.py
* Switch to final 1.1.0 oslo.config release
* Update to Quantum Client 2.2.0
* Update tox.ini to support RHEL 6.x
* Switch to oslo.config
* Add common test base class to hold common things
* Pin pep8 to 1.3.3
* Add initial testr support
* LBaaS Agent Reference Implementation
* Bump python-quantumclient version to 2.1.2
* Add scheduling feature basing on agent management extension
* Remove compat cfg wrapper
* Unpin PasteDeploy dependency version
* Use testtools instead of unittest or unittest2
* Add midonet to setup.py
* Sync latest install_venv_common.py with olso
* Add check-nvp-config utility
* Add unit test for ryu-agent
* Use oslo-config-2013.1b3
* Adds Brocade Plugin implementation
* Synchronize code from oslo
* PLUMgrid quantum plugin
* Update .coveragerc
* Allow tools/install_venv_common.py to be run from within the source directory
* Updated to latest oslo-version code
* Use install_venv_common.py from oslo
* Cisco plugin cleanup
* Use babel to generate translation file
* Update WebOb version to >=1.2
* Update latest OSLO
* Adding multi switch support to the Cisco Nexus plugin
* Adds support for deploying Quantum on Windows
* Latest OSLO updates
* Port to argparse based cfg
* Add migration support to Quantum
* Undo change to require WebOb 1.2.3, instead, require only >=1.0.8
* .gitignore cleanup
* Upgrade WebOb to 1.2.3
* Logging module cleanup
* Add OVS cleanup utility
* Add tox artifacts to .gitignore
* Add restproxy.ini to config_path in setup.py
* Add script for checking i18n message
* l3 agent rpc
* Add metadata_agent.ini to config_path in setup.py
* Remove __init__.py from bin/ and tools/
* add metadata proxy support for Quantum Networks
* Use auth_token middleware in keystoneclient
* Add QUANTUM_ prefix for env used by quantum-debug
* Make tox.ini run pep8 checks on bin
* Explicitly include versioninfo in tarball
* Import lockutils and fileutils from openstack-common
* Updated openstack-common setup and version code
* Ensure that the anyjson version is correct
* Add eventlet_backdoor and threadgroup from openstack-common
* Add loopingcall from openstack-common
* Added service from openstack-common
* Drop lxml dependency
* Add uuidutils module
* Import order clean-up
* pin sqlalchemy to 0.7
* Correct Intended Audience
* Add OpenStack trove classifier for PyPI
* Improve unit test times
* l3_nat_agent was renamed to l3_agent and this was missed
* Support for several HA RabbitMQ servers
* add missing files from setup.py
* Create .mailmap file
* Lower webob dep from v1.2.0 to v1.0.8
* Implements agent for Quantum Networking testing
* Create utility to clean-up netns
* Update rootwrap; track changes in nova/cinder
* Execute unit tests for Cisco plugin with Quantum tests
* Add lease expiration script support for dnsmasq
* Add nosehtmloutput as a test dependency
* quantum l3 + floating IP support
* Updates pip requirements
* NEC OpenFlow plugin support
* remove old gflags config code
* RPC support for OVS Plugin and Agent
* Initial implemention of MetaPlugin
* RPC support for Linux Bridge Plugin and Agent
* Exempt openstack-common from pep8 check
* fix bug lp:1025526,update iniparser.py to accept empty value
* Introduce files from openstack common
* fix bug lp:1019230,update rpc from openstack-common
* implement dhcp agent for quantum
* Use setuptools git plugin for file inclusion
* Remove paste configuration details to a seperate file. blueprint use-common-cfg
* Implements the blueprint use-common-cfg for the quantum service. More specifically uses global CONF for the quantum.conf file
* Add authZ through incorporation of policy checks
* Bug #1013967 - Quantum is breaking on tests with pep 1.3
* Use openstack.common.exception
* API v2: mprove validation of post/put, rename few attributes
* Add API v2 support
* Fix up test running to match jenkins expectation
* Add build_sphinx options
* Quantum should use openstack.common.jsonutils
* Remove hardcoded version for pep8 from tools/test-requires
* Quantum should use openstack.common.importutils
* PEP8 fixes
* Bug #1002605
* Parse linuxbridge plugins using openstack.common.cfg
* Add HACKING.rst to tarball generation bug 1001220
* Include AUTHORS in release package
* Change Resource.__call__() to not leak internal errors
* Removed simplejson from pip-requires
* Remove dependency on python-quantumclient
* Add sphinx to the test build deps
* Add HACKING.rst coding style doc
* bug 963152: add a few missing files to sdist tarball
* Fix path to python-quantumclient
* Split out pip requires and aligned tox file
* Fix missing files in sdist package [bug 954906]
* Downgraded required version of WebOb to 1.0.8
* more files missing in sdist tarball
* make sure pip-requires is included in setup.py sdist
* remove pep8 and strict lxml version from setup.py
* plugin: introduce ryu plugin
* bug 934459: pip no longer supports -E
* blueprint quantum-ovs-tunnel-agent
* Initial commit: nvp plugin
* Cleanup the source distribution
* blueprint quantum-linux-bridge-plugin
* Remove quantum CLI console script
* Bug 925372: remove deprecated webob attributes (and also specify stable webob version in pip-requires)
essex-3
-------
* Make tox config work
* Pin versions to standard versions
* Split out quantum.client and quantum.common
* Quantum was missing depend on lxml
* moving batch config out of quantum-server repo
* Getting ready for the client split
* Removed erroneous print from setup.py
* Base version.py on glance
* Fix lp bug 897882
* Install a good version of pip in the venv
* Rename .quantum-venv to .venv
* Remove plugin pip-requires
essex-2
-------
* Bug #890028
* Fix for bug 900316
* Second round of packaging changes
* Changes to make pip-based tests work with jenkins
* Fix for bug 888811
* Fix for Bug #888820 - pip-requires file support for plugins
essex-1
-------
* blueprint quantum-packaging
* Add .gitreview config file for gerrit
* Add code-coverage support to run_tests.sh (lp860160)
2011.3
------
* Add rfc.sh to help with gerrit workflow
* merge tyler's unit tests for cisco plugin changes lp845140
* merge salv's no-cheetah CLI branch lp 842190
* merge sumit's branch for lp837752
* Merging latest from lp:quantum
* Merging lo:~salvatore-orlando/quantum/quantum-api-auth
* Updating CLI for not using Cheetah anymore. Now using a mechanism based on Python built-in templates
* Merging Sumit's changes including fixes for multinic support, and CLI module for working with extensions
* Merging from Cisco branch
* Merging from lp:quantum
* merge cisco consolidated plugin changes
* Merging lp:~salvatore-orlando/quantum/bug834449
* merge trunk
* Merging from lp:quantum
* merge salvatore's new cli code
* Addressing comments from Dan
* Merging from quantum
* merge cisco extensions branch
* Merging from Sumit's branch, changes to VIF-driver and Scheduler; extension action names have been changed in response to Salvatore's review comments in the extensions branch review
* Syncing with Cisco extensions branch
* Merging from Sumit's branch, import ordering related changes
* Merging the Cisco branch
* Finishing cli work Fixing bug with XML deserialization
* Merging lp:~salvatore-orlando/quantum/quantum-api-alignment
* merge latest quantum branch and resolve conflicts
* Merging lp:~asomya/quantum/lp833163 Fix for Bug #833163: Pep8 violations in recent packaging changes that were merged into trunk (Critical)
* PEP8 fixes for setup.py
* Merging lp:~cisco-openstack/quantum/802dot1qbh-vifdriver-scheduler
* Merging lp:~cisco-openstack/quantum/l2network-plugin-persistence
* Merging lp:quantum
* merging with lp:quantum
* Making Keystone version configurable
* Merging branch: lp:~danwent/quantum/test-refactor
* Syncing with lp:quantum
* Merging fixes and changes batch-config script. Thanks lp:danwent !
* Merging lp:~asomya/quantum/lp824145 Fix for Bug#824145 : Adding a setup script for quantum
* merge trunk pep8 fixes adapting CLI to API v1.0 Fixing wsgi to avoid failure with extensions
* merge trunk
* Pulling in changes from lp:quantum
* Merging Cisco's contribution to Quantum. Thanks to various folks at Cisco Systems, Quantum will have plugins to integrate with Cisco UCS blade servers using 802.1Qbh, Cisco Nexus family of switches and the ability for Quantum plugin to have multiple switches/devices within a single Quantum plugin
* Merging from Sumit's branch pylint fixes and incorporating review comments
* Mergin from cisco brach
* Merging from lp:quantum
* Introducting cheetah Updating list_nets in CLI Writing unit tests for list_nets Stubbing out with FakeConnection now
* Merging quantum extenions framework into trunk. Thanks rajaram vinkesh, deepak & santhosh for the great work!
* lp Bug#824145 : Adding a setup script for quantum
* skeleton for cli unit tests
* merge trunk
* Merged quantum trunk
* - Adding setup script
* force batch_config.py to use json, as XML has issues (see bug: 798262)
* update batch_config.py to use new client lib, hooray for deleting code
* Merging changes addressing Bug # 802772. Thanks lp:danwent !
* Merging bugfix for Bug 822890 - Added License file for Quantum code distribution
* L2 Network Plugin Framework merge
* Adding Apache Version 2.0 license file. This is the official license agreement under which Quantum code is available to the Open Source community
* merge
* merge heckj's pip-requires fixes
* updates to pip-requires for CI
* Merged quantum trunk
* Merging changes from lp:quantum
* Completing API spec alignment Unit tests aligned with changes in the API spec
* Merging the brand new Quantum-client-library feature
* Merging lp:quantum updates
* persistence of l2network & ucs plugins using mysql - db_conn.ini - configuration details of making a connection to the database - db_test_plugin.py - contains abstraction methods for storing database values in a dict and unit test cases for DB testing - l2network_db.py - db methods for l2network models - l2network_models.py - class definitions for the l2 network tables - ucs_db.py - db methods for ucs models - ucs_models.py - class definition for the ucs tables dynamic loading of the 2nd layer plugin db's based on passed arguments Create, Delete, Get, Getall, Update database methods at - Quantum, L2Network and Ucs Unit test cases for create, delete, getall and update operations for L2Network and Ucs plugins pep8 checks done branch based off revision 34 plugin-framework
* Merged from trunk
* merged the latest changes from plugin-framework branch - revision 39 conforming to the new cisco plugin directory structure and moving all db related modules into cisco/db folder updated db_test_plugin.py - added import of cisco constants module - added LOG.getLogger for logging component name - updated import module paths for l2network_models/db and ucs_models/db to use the new directory structure - updated (rearranged) imports section to obey openstack alphabetical placement convention updated db_conn.ini - updated database name from cisco_naas to quantum_l2network unit test cases ran successfully and pep8 checks done again
* merge branch for to fix bug817826
* Merging the latest changes from lp:quantum
* fix bug 817826 and similar error in batch_config.py
* merge Salvatore's api branch with fixes for tests. Tweaking branch to remove unwanted bin/quantum.py as part of merge
* Santhosh/Rajaram|latest merge from quantum and made extensions use options to load plugin
* Apply fix for bug #797419 merging lp:~salvatore-orlando/quantum/bug797419
* Merging branch lp:~netstack/quantum/quantum-unit-tests
* Merged from quantum trunk
* Adapated plugin infrastructure to allow API to pass options to plugins Now using in-memory sqlite db for tests on FakePlugin teardown() now 'resets' the in-memory db Adding unit tests for APIs
* Adding Routes>=1.12.3 to tools/pip-requires
* Merging dan wendlandt's bugfixes for Bug #800466 and improvements that enable Quantum to seamlessly run on KVM!
* more pep8 goodness
* refactor batch_config, allow multiple attaches with the empty string
* merge and pep8 cleanup
* Merging latest changes from parent repo - lp:network-service , Parent repo had approved merge proposal for merging lp:~santhom/network-service/quantum_testing_framework , which has now been merged into lp:network-service
* Merging pep8 and functional test related changes lp:~santhom/network-service/quantum_testing_framework branch
* add example to usage string for batch_config.py
* Bug fixes and clean-up, including supporting libvirt
* Santhosh/Vinkesh | Added the testing framework. Moved the smoketest to tests/functional
* Pushing initial started code based on Glance project and infrstructure work done by the melange team
* Merging in latest changes from lp:quantum
neutron-vpnaas-8.0.0/PKG-INFO 0000664 0005670 0005671 00000002654 12701410103 016731 0 ustar jenkins jenkins 0000000 0000000 Metadata-Version: 1.1
Name: neutron-vpnaas
Version: 8.0.0
Summary: OpenStack Networking VPN as a Service
Home-page: http://www.openstack.org/
Author: OpenStack
Author-email: openstack-dev@lists.openstack.org
License: UNKNOWN
Description: Welcome!
========
This package contains the code for the Neutron VPN as a Service
(VPNaaS) service. This includes third-party drivers. This package
requires Neutron to run.
External Resources:
===================
The homepage for Neutron is: http://launchpad.net/neutron. Use this
site for asking for help, and filing bugs. We use a single Launchpad
page for all Neutron projects.
Code is available on git.openstack.org at:
`_
Platform: UNKNOWN
Classifier: Environment :: OpenStack
Classifier: Intended Audience :: Information Technology
Classifier: Intended Audience :: System Administrators
Classifier: License :: OSI Approved :: Apache Software License
Classifier: Operating System :: POSIX :: Linux
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.7
neutron-vpnaas-8.0.0/rally-jobs/ 0000775 0005670 0005671 00000000000 12701410103 017703 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/rally-jobs/__init__.py 0000664 0005670 0005671 00000000000 12701407726 022023 0 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/rally-jobs/plugins/ 0000775 0005670 0005671 00000000000 12701410103 021364 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/rally-jobs/plugins/vpn_base.py 0000664 0005670 0005671 00000052301 12701407726 023555 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import concurrent.futures
import exceptions
import re
import threading
import time
from oslo_utils import uuidutils
from rally.common import log as logging
from rally.plugins.openstack import scenario as rally_base
from rally.task import atomic
import vpn_utils
LOG = logging.getLogger(__name__)
LOCK = threading.RLock()
MAX_RESOURCES = 2
class VpnBase(rally_base.OpenStackScenario):
def setup(self, **kwargs):
"""Create and initialize data structures to hold various resources"""
with LOCK:
LOG.debug('SETUP RESOURCES')
self.neutron_admin_client = self.admin_clients("neutron")
if kwargs['use_admin_client']:
self.neutron_client = self.neutron_admin_client
self.keystone_client = self.admin_clients("keystone")
self.nova_client = self.admin_clients("nova")
else:
self.neutron_client = self.clients("neutron")
self.nova_client = self.clients("nova")
self.suffixes = [uuidutils.generate_uuid(),
uuidutils.generate_uuid()]
self.remote_key_files = ['rally_keypair_' + x
for x in self.suffixes]
self.local_key_files = ['/tmp/' + x for x in self.remote_key_files]
self.private_key_file = kwargs["private_key"]
self.keypairs = []
self.tenant_ids = []
self.ns_controller_tuples = []
self.qrouterns_compute_tuples = []
self.router_ids = []
self.rally_router_gw_ips = []
self.rally_routers = []
self.rally_networks = []
self.rally_subnets = []
self.rally_cidrs = []
self.ike_policy = None
self.ipsec_policy = None
self.vpn_services = []
self.ipsec_site_connections = []
self.servers = []
self.server_private_ips = []
self.server_fips = []
def create_tenants(self):
"""Create tenants"""
for x in range(MAX_RESOURCES):
tenant_id = vpn_utils.create_tenant(
self.keystone_client, self.suffixes[x])
with LOCK:
self.tenant_ids.append(tenant_id)
def create_networks(self, **kwargs):
"""Create networks to test vpn connectivity"""
for x in range(MAX_RESOURCES):
if self.tenant_ids:
router, network, subnet, cidr = vpn_utils.create_network(
self.neutron_client, self.neutron_admin_client,
self.suffixes[x], tenant_id=self.tenant_ids[x],
DVR_flag=kwargs["DVR_flag"],
ext_net_name=kwargs["ext-net"])
else:
router, network, subnet, cidr = vpn_utils.create_network(
self.neutron_client, self.neutron_admin_client,
self.suffixes[x], DVR_flag=kwargs["DVR_flag"],
ext_net_name=kwargs["ext-net"])
with LOCK:
self.rally_cidrs.append(cidr)
self.rally_subnets.append(subnet)
self.rally_networks.append(network)
self.rally_routers.append(router)
self.router_ids.append(router["router"]['id'])
self.rally_router_gw_ips.append(
router["router"]["external_gateway_info"]
["external_fixed_ips"][0]["ip_address"])
if(kwargs["DVR_flag"]):
ns, controller = vpn_utils.wait_for_namespace_creation(
"snat-", router["router"]['id'],
kwargs['controller_creds'],
self.private_key_file,
kwargs['namespace_creation_timeout'])
else:
ns, controller = vpn_utils.wait_for_namespace_creation(
"qrouter-", router["router"]['id'],
kwargs['controller_creds'],
self.private_key_file,
kwargs['namespace_creation_timeout'])
with LOCK:
self.ns_controller_tuples.append((ns, controller))
def create_servers(self, **kwargs):
"""Create servers"""
for x in range(MAX_RESOURCES):
kwargs.update({
"nics":
[{"net-id": self.rally_networks[x]["network"]["id"]}],
"sec_group_suffix": self.suffixes[x],
"server_suffix": self.suffixes[x]
})
keypair = vpn_utils.create_keypair(
self.nova_client, self.suffixes[x])
server = vpn_utils.create_server(
self.nova_client, keypair, **kwargs)
vpn_utils.assert_server_status(server, **kwargs)
with LOCK:
self.servers.append(server)
self.keypairs.append(keypair)
self.server_private_ips.append(vpn_utils.get_server_ip(
self.nova_client, server.id, self.suffixes[x]))
if(kwargs["DVR_flag"]):
qrouter, compute = vpn_utils.wait_for_namespace_creation(
"qrouter-", self.router_ids[x],
kwargs['compute_creds'],
self.private_key_file,
kwargs['namespace_creation_timeout'])
vpn_utils.write_key_to_compute_node(
keypair, self.local_key_files[x],
self.remote_key_files[x], compute,
self.private_key_file)
with LOCK:
self.qrouterns_compute_tuples.append((qrouter, compute))
else:
vpn_utils.write_key_to_local_path(self.keypairs[x],
self.local_key_files[x])
fip = vpn_utils.add_floating_ip(self.nova_client, server)
with LOCK:
self.server_fips.append(fip)
def check_route(self):
"""Verify route exists between the router gateways"""
LOG.debug("VERIFY ROUTE EXISTS BETWEEN THE ROUTER GATEWAYS")
for tuple in self.ns_controller_tuples:
for ip in self.rally_router_gw_ips:
assert(vpn_utils.ping_router_gateway(
tuple, ip, self.private_key_file)), (
"PING TO IP " + ip + " FAILED")
@atomic.action_timer("_create_ike_policy")
def _create_ike_policy(self, **kwargs):
"""Create IKE policy
:return: IKE policy
"""
LOG.debug('CREATING IKE_POLICY')
ike_policy = self.neutron_client.create_ikepolicy({
"ikepolicy": {
"phase1_negotiation_mode":
kwargs.get("phase1_negotiation_mode", "main"),
"auth_algorithm": kwargs.get("auth_algorithm", "sha1"),
"encryption_algorithm":
kwargs.get("encryption_algorithm", "aes-128"),
"pfs": kwargs.get("pfs", "group5"),
"lifetime": {
"units": "seconds",
"value": kwargs.get("value", 7200)},
"ike_version": kwargs.get("ike_version", "v1"),
"name": "rally_ikepolicy"
}
})
return ike_policy
@atomic.action_timer("_create_ipsec_policy")
def _create_ipsec_policy(self, **kwargs):
"""Create IPSEC policy
:return: IPSEC policy
"""
LOG.debug('CREATING IPSEC_POLICY')
ipsec_policy = self.neutron_client.create_ipsecpolicy({
"ipsecpolicy": {
"name": "rally_ipsecpolicy",
"transform_protocol": kwargs.get("transform_protocol", "esp"),
"auth_algorithm": kwargs.get("auth_algorithm", "sha1"),
"encapsulation_mode":
kwargs.get("encapsulation_mode", "tunnel"),
"encryption_algorithm":
kwargs.get("encryption_algorithm", "aes-128"),
"pfs": kwargs.get("pfs", "group5"),
"lifetime": {
"units": "seconds",
"value": kwargs.get("value", 7200)
}
}
})
return ipsec_policy
@atomic.action_timer("_create_vpn_service")
def _create_vpn_service(self, rally_subnet, rally_router, vpn_suffix=None):
"""Create VPN service endpoints
:param rally_subnet: local subnet
:param rally_router: router endpoint
:param vpn_suffix: suffix name for vpn service
:return: VPN service
"""
LOG.debug('CREATING VPN_SERVICE')
vpn_service = self.neutron_client.create_vpnservice({
"vpnservice": {
"subnet_id": rally_subnet["subnet"]["id"],
"router_id": rally_router["router"]["id"],
"name": "rally_vpn_service_" + vpn_suffix,
"admin_state_up": True
}
})
return vpn_service
def create_vpn_services(self):
"""Create VPN services"""
for x in range(MAX_RESOURCES):
vpn_service = self._create_vpn_service(
self.rally_subnets[x], self.rally_routers[x], self.suffixes[x])
with LOCK:
self.vpn_services.append(vpn_service)
@atomic.action_timer("_create_ipsec_site_connection")
def _create_ipsec_site_connection(self, local_index, peer_index, **kwargs):
"""Create IPSEC site connection
:param local_index: parameter to point to the local end-point
:param peer_index: parameter to point to the peer end-point
:return: IPSEC site connection
"""
LOG.debug('CREATING IPSEC_SITE_CONNECTION')
ipsec_site_conn = self.neutron_client.create_ipsec_site_connection({
"ipsec_site_connection": {
"psk": kwargs.get("secret", "secret"),
"initiator": "bi-directional",
"ipsecpolicy_id": self.ipsec_policy["ipsecpolicy"]["id"],
"admin_state_up": True,
"peer_cidrs": self.rally_cidrs[peer_index],
"mtu": kwargs.get("mtu", "1500"),
"ikepolicy_id": self.ike_policy["ikepolicy"]["id"],
"dpd": {
"action": "disabled",
"interval": 60,
"timeout": 240
},
"vpnservice_id":
self.vpn_services[local_index]["vpnservice"]["id"],
"peer_address": self.rally_router_gw_ips[peer_index],
"peer_id": self.rally_router_gw_ips[peer_index],
"name": "rally_ipsec_site_connection_" +
self.suffixes[local_index]
}
})
return ipsec_site_conn
def create_ipsec_site_connections(self, **kwargs):
"""Create IPSEC site connections"""
a = self._create_ipsec_site_connection(0, 1, **kwargs)
b = self._create_ipsec_site_connection(1, 0, **kwargs)
with LOCK:
self.ipsec_site_connections = [a, b]
def _get_resource(self, resource_tag, resource_id):
"""Get the resource(vpn_service or ipsec_site_connection)
:param resource_tag: "vpnservice" or "ipsec_site_connection"
:param resource_id: id of the resource
:return: resource (vpn_service or ipsec_site_connection)
"""
if resource_tag == "vpnservice":
vpn_service = self.neutron_client.show_vpnservice(resource_id)
if vpn_service:
return vpn_service
elif resource_tag == 'ipsec_site_connection':
ipsec_site_conn = self.neutron_client.show_ipsec_site_connection(
resource_id)
if ipsec_site_conn:
return ipsec_site_conn
def _wait_for_status_change(self, resource, resource_tag, final_status,
wait_timeout=60, check_interval=1):
"""Wait for resource's status change
Wait till the status of the resource changes to final state or till
the time exceeds the wait_timeout value.
:param resource: resource whose status has to be checked
:param final_status: desired final status of the resource
:param resource_tag: to identify the resource as vpnservice or
ipsec_site_connection
:param wait_timeout: timeout value in seconds
:param check_interval: time to sleep before each check for the status
change
:return: resource
"""
LOG.debug('WAIT_FOR_%s_STATUS_CHANGE ', resource[resource_tag]['id'])
start_time = time.time()
while True:
resource = self._get_resource(
resource_tag, resource[resource_tag]['id'])
current_status = resource[resource_tag]['status']
if current_status == final_status:
return resource
time.sleep(check_interval)
if time.time() - start_time > wait_timeout:
raise exceptions.Exception(
"Timeout waiting for resource {} to change to {} status".
format(resource[resource_tag]['name'], final_status))
@atomic.action_timer("wait_time_for_status_change")
def _assert_statuses(self, ipsec_site_conn, vpn_service,
final_status, **kwargs):
"""Assert statuses of vpn_service and ipsec_site_connection
:param ipsec_site_conn: ipsec_site_connection object
:param vpn_service: vpn_service object
:param final_status: status of vpn and ipsec_site_connection object
"""
vpn_service = self._wait_for_status_change(
vpn_service,
resource_tag="vpnservice",
final_status=final_status,
wait_timeout=kwargs.get("vpn_service_creation_timeout"),
check_interval=5)
ipsec_site_conn = self._wait_for_status_change(
ipsec_site_conn,
resource_tag="ipsec_site_connection",
final_status=final_status,
wait_timeout=kwargs.get("ipsec_site_connection_creation_timeout"),
check_interval=5)
LOG.debug("VPN SERVICE STATUS %s", vpn_service['vpnservice']['status'])
LOG.debug("IPSEC_SITE_CONNECTION STATUS %s",
ipsec_site_conn['ipsec_site_connection']['status'])
def assert_statuses(self, final_status, **kwargs):
"""Assert active statuses for VPN services and VPN connections
:param final_status: the final status you expect the resource to be in
"""
LOG.debug("ASSERTING ACTIVE STATUSES FOR VPN-SERVICES AND "
"IPSEC-SITE-CONNECTIONS")
for x in range(MAX_RESOURCES):
self._assert_statuses(
self.ipsec_site_connections[x], self.vpn_services[x],
final_status, **kwargs)
def _get_qg_interface(self, peer_index):
"""Get the qg- interface
:param peer_index: parameter to point to the local end-point
:return: qg-interface
"""
qg = vpn_utils.get_interfaces(
self.ns_controller_tuples[peer_index],
self.private_key_file)
p = re.compile(r"qg-\w+-\w+")
for line in qg:
m = p.search(line)
if m:
return m.group()
return None
@atomic.action_timer("_verify_vpn_connection")
def _verify_vpn_connectivity(self, local_index, peer_index, **kwargs):
"""Verify the vpn connectivity between the endpoints
Get the qg- interface from the snat namespace corresponding to the
peer router and start a tcp dump. Concurrently, SSH into the nova
instance on the local subnet from the qrouter namespace and try
to ping the nova instance on the peer subnet. Inspect the captured
packets to see if they are encrypted.
:param local_index: parameter to point to the local end-point
:param peer_index: parameter to point to the peer end-point
:return: True if vpn connectivity test passes
False if the test fails
"""
qg_interface = self._get_qg_interface(peer_index)
if qg_interface:
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as e:
tcpdump_future = e.submit(vpn_utils.start_tcpdump,
self.ns_controller_tuples[peer_index],
qg_interface, self.private_key_file)
if(kwargs["DVR_flag"]):
ssh_future = e.submit(
vpn_utils.ssh_and_ping_server,
self.server_private_ips[local_index],
self.server_private_ips[peer_index],
self.qrouterns_compute_tuples[local_index],
self.remote_key_files[local_index],
self.private_key_file)
else:
ssh_future = e.submit(
vpn_utils.ssh_and_ping_server_with_fip,
self.server_fips[local_index],
self.server_private_ips[peer_index],
self.local_key_files[local_index],
self.private_key_file)
assert(ssh_future.result()), "SSH/Ping failed"
for line in tcpdump_future.result():
if 'ESP' in line:
return True
return False
def verify_vpn_connectivity(self, **kwargs):
"""Verify VPN connectivity"""
LOG.debug("VERIFY THE VPN CONNECTIVITY")
with LOCK:
assert(self._verify_vpn_connectivity(
0, 1, **kwargs)), "VPN CONNECTION FAILED"
with LOCK:
assert(self._verify_vpn_connectivity(
1, 0, **kwargs)), "VPN CONNECTION FAILED"
def update_router(self, router_id, admin_state_up=False):
"""Update router's admin_state_up field
:param router_id: uuid of the router
:param admin_state_up: True or False
"""
LOG.debug('UPDATE ROUTER')
router_args = {'router': {'admin_state_up': admin_state_up}}
self.neutron_client.update_router(router_id, router_args)
@atomic.action_timer("_delete_ipsec_site_connection")
def _delete_ipsec_site_connections(self):
"""Delete IPSEC site connections"""
for site_conn in self.ipsec_site_connections:
LOG.debug("DELETING IPSEC_SITE_CONNECTION %s",
site_conn['ipsec_site_connection']['id'])
self.neutron_client.delete_ipsec_site_connection(
site_conn['ipsec_site_connection']['id'])
@atomic.action_timer("_delete_vpn_service")
def _delete_vpn_services(self):
"""Delete VPN service endpoints"""
for vpn_service in self.vpn_services:
LOG.debug("DELETING VPN_SERVICE %s",
vpn_service['vpnservice']['id'])
self.neutron_client.delete_vpnservice(
vpn_service['vpnservice']['id'])
@atomic.action_timer("_delete_ipsec_policy")
def _delete_ipsec_policy(self):
"""Delete IPSEC policy"""
LOG.debug("DELETING IPSEC POLICY")
if self.ipsec_policy:
self.neutron_client.delete_ipsecpolicy(
self.ipsec_policy['ipsecpolicy']['id'])
@atomic.action_timer("_delete_ike_policy")
def _delete_ike_policy(self):
"""Delete IKE policy"""
LOG.debug('DELETING IKE POLICY')
if self.ike_policy:
self.neutron_client.delete_ikepolicy(
self.ike_policy['ikepolicy']['id'])
@atomic.action_timer("cleanup")
def cleanup(self):
"""Clean the resources"""
vpn_utils.delete_servers(self.nova_client, self.servers)
if self.server_fips:
vpn_utils.delete_floating_ips(self.nova_client, self.server_fips)
vpn_utils.delete_keypairs(self.nova_client, self.keypairs)
if self.qrouterns_compute_tuples:
vpn_utils.delete_hosts_from_knownhosts_file(
self.server_private_ips, self.qrouterns_compute_tuples,
self.private_key_file)
vpn_utils.delete_keyfiles(
self.local_key_files, self.remote_key_files,
self.qrouterns_compute_tuples, self.private_key_file)
else:
vpn_utils.delete_hosts_from_knownhosts_file(
self.server_private_ips)
vpn_utils.delete_keyfiles(self.local_key_files)
self._delete_ipsec_site_connections()
self._delete_vpn_services()
self._delete_ipsec_policy()
self._delete_ike_policy()
vpn_utils.delete_networks(
self.neutron_client, self.neutron_admin_client, self.rally_routers,
self.rally_networks, self.rally_subnets)
if self.tenant_ids:
vpn_utils.delete_tenants(self.keystone_client, self.tenant_ids)
neutron-vpnaas-8.0.0/rally-jobs/plugins/test_vpn_status.py 0000664 0005670 0005671 00000005727 12701407726 025237 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_vpnaas._i18n import _LI
from rally.common import log as logging
from rally.task import scenario
from rally.task import types as types
import vpn_base
LOG = logging.getLogger(__name__)
class TestVpnStatusScenario(vpn_base.VpnBase):
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@scenario.configure()
def check_vpn_status(self, **kwargs):
"""Test VPN's status correctly after bringing router's status to
DOWN and back to ACTIVE state
1. Create 2 private networks, subnets and routers
2. Create public network, subnets and GW IPs on routers, if not present
3. Execute ip netns command and get the snat and qrouter namespaces
(assuming we use DVR)
4. Verify that there is a route between the router gateways by pinging
each other from their snat namespaces
5. Add security group rules for SSH and ICMP
6. Start a nova instance in each of the private networks
7. Create IKE and IPSEC policies
8. Create VPN service at each of the routers
9. Create IPSEC site connections at both endpoints
10. Bring both the private router's status to DOWN state
11. Verify that vpn-service and ipsec-site-connection is DOWN
12. Bring back the router's status to ACTIVE state
13. Verify the vpn-service and ipsec-site-connection is back to ACTIVE
14. Perform resource cleanup
"""
try:
self.setup(**kwargs)
self.create_networks(**kwargs)
self.check_route()
self.ike_policy = self._create_ike_policy(**kwargs)
self.ipsec_policy = self._create_ipsec_policy(**kwargs)
self.create_vpn_services()
self.create_ipsec_site_connections(**kwargs)
self.assert_statuses(final_status='ACTIVE', **kwargs)
self.update_router(self.router_ids[0], admin_state_up=False)
self.update_router(self.router_ids[1], admin_state_up=False)
self.assert_statuses(final_status='DOWN', **kwargs)
self.update_router(self.router_ids[0], admin_state_up=True)
self.update_router(self.router_ids[1], admin_state_up=True)
self.assert_statuses(final_status='ACTIVE', **kwargs)
LOG.info(_LI("VPN STATUS TEST PASSED!"))
finally:
self.cleanup()
neutron-vpnaas-8.0.0/rally-jobs/plugins/vpn_utils.py 0000664 0005670 0005671 00000061224 12701407726 024007 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import exceptions
import os
import paramiko
import socket
import stat
import time
from rally.common import log as logging
from rally.plugins.openstack.wrappers import network as network_wrapper
from rally.task import utils as task_utils
LOG = logging.getLogger(__name__)
SUBNET_IP_VERSION = 4
START_CIDR = "10.2.0.0/24"
EXT_NET_CIDR = "172.16.1.0/24"
def execute_cmd_over_ssh(host, cmd, private_key):
"""Run the given command over ssh
Using paramiko package, it creates a connection to the given host;
executes the required command on it and returns the output.
:param host: Dictionary of ip, username and password
:param cmd: Command to be run over ssh
:param private_key: path to private key file
:return: Output of the executed command
"""
LOG.debug('EXECUTE COMMAND <%s> OVER SSH', cmd)
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
k = paramiko.RSAKey.from_private_key_file(private_key)
try:
client.connect(host["ip"], username=host["username"], pkey=k)
except paramiko.BadHostKeyException as e:
raise exceptions.Exception(
"BADHOSTKEY EXCEPTION WHEN CONNECTING TO %s", host["ip"], e)
except paramiko.AuthenticationException as e:
raise exceptions.Exception(
"AUTHENTICATION EXCEPTION WHEN CONNECTING TO %s",
host["ip"], e)
except paramiko.SSHException as e:
raise exceptions.Exception(
"SSH EXCEPTION WHEN CONNECTING TO %s", host["ip"], e)
except socket.error as e:
raise exceptions.Exception(
"SOCKET ERROR WHEN CONNECTING TO %s", host["ip"], e)
LOG.debug("CONNECTED TO HOST <%s>", host["ip"])
try:
stdin, stdout, stderr = client.exec_command(cmd)
return stdout.read().splitlines()
except paramiko.SSHException as e:
raise exceptions.Exception(
"SSHEXCEPTION WHEN CONNECTING TO %s", host["ip"], e)
finally:
client.close()
def create_tenant(keystone_client, tenant_suffix):
"""Creates keystone tenant with a random name.
:param keystone_client: keystone client
:param tenant_suffix: suffix name for the tenant
:returns: uuid of the new tenant
"""
tenant_name = "rally_tenant_" + tenant_suffix
LOG.debug("CREATING NEW TENANT %s", tenant_name)
return keystone_client.tenants.create(tenant_name).id
def create_network(neutron_client, neutron_admin_client, network_suffix,
tenant_id=None, DVR_flag=True, ext_net_name=None):
"""Create neutron network, subnet, router
:param neutron_client: neutron client
:param neutron_admin_client: neutron client with admin credentials
:param network_suffix: str, suffix name of the new network
:param tenant_id: uuid of the tenant
:param DVR_flag: True - creates a DVR router
False - creates a non DVR router
:param ext_net_name: external network that is to be used
:return: router, subnet, network, subnet_cidr
"""
subnet_cidr = network_wrapper.generate_cidr(start_cidr=START_CIDR)
def _create_network(neutron_client, network_suffix, is_external=False):
"""Creates neutron network"""
network_name = "rally_network_" + network_suffix
network_args = {"name": network_name,
"router:external": is_external
}
if tenant_id:
network_args["tenant_id"] = tenant_id
LOG.debug("ADDING NEW NETWORK %s", network_name)
return neutron_client.create_network({"network": network_args})
def _create_subnet(neutron_client, rally_network, network_suffix, cidr):
"""Create neutron subnet"""
network_id = rally_network["network"]["id"]
subnet_name = "rally_subnet_" + network_suffix
subnet_args = {"name": subnet_name,
"cidr": cidr,
"network_id": network_id,
"ip_version": SUBNET_IP_VERSION
}
if tenant_id:
subnet_args["tenant_id"] = tenant_id
LOG.debug("ADDING SUBNET %s", subnet_name)
return neutron_client.create_subnet({"subnet": subnet_args})
def _create_router(neutron_client, ext_network_id, rally_subnet, dvr_flag):
"""Create router, set the external gateway and add router interface
:param neutron_client: neutron_client
:param ext_network_id: uuid of the external network
:param rally_subnet: subnet to add router interface
:param dvr_flag: True - creates a DVR router
False - creates a non DVR router
:return: router
"""
router_name = "rally_router_" + network_suffix
gw_info = {"network_id": ext_network_id}
router_args = {"name": router_name,
"external_gateway_info": gw_info
}
if not dvr_flag:
router_args["distributed"] = dvr_flag
if tenant_id:
router_args["tenant_id"] = 'tenant_id'
LOG.debug("ADDING ROUTER %s", router_name)
rally_router = neutron_client.create_router({"router": router_args})
LOG.debug("[%s]: ADDING ROUTER INTERFACE")
neutron_client.add_interface_router(
rally_router['router']["id"],
{"subnet_id": rally_subnet["subnet"]["id"]})
return rally_router
def _get_external_network_id(ext_net_name):
"""Fetch the network id for the given external network, if it exists.
Else fetch the first external network present.
"""
ext_nets = neutron_client.list_networks(
**{'router:external': True})['networks']
ext_nets_searched = [n for n in ext_nets if n['name'] == ext_net_name]
if ext_nets_searched:
return ext_nets_searched[0]['id']
elif ext_nets:
return ext_nets[0]['id']
else:
return None
def _create_external_network():
"""Creat external network and subnet"""
ext_net = _create_network(neutron_admin_client, "public", True)
_create_subnet(neutron_admin_client, ext_net, "public", EXT_NET_CIDR)
return ext_net['network']['id']
ext_network_id = _get_external_network_id(ext_net_name)
if not ext_network_id:
ext_network_id = _create_external_network()
rally_network = _create_network(neutron_client, network_suffix)
rally_subnet = _create_subnet(neutron_client, rally_network,
network_suffix, subnet_cidr)
rally_router = _create_router(neutron_client, ext_network_id,
rally_subnet, DVR_flag)
return rally_router, rally_network, rally_subnet, subnet_cidr
def create_keypair(nova_client, keypair_suffix):
"""Create keypair
:param nova_client: nova_client
:param keypair_suffix: sufix name for the keypair
:return: keypair
"""
keypair_name = "rally_keypair_" + keypair_suffix
LOG.debug("CREATING A KEYPAIR %s", keypair_name)
keypair = nova_client.keypairs.create(keypair_name)
return keypair
def write_key_to_local_path(keypair, local_key_file):
"""Write the private key of the nova instance to a temp file
:param keypair: nova keypair
:param local_key_file: path to private key file
:return:
"""
with open(local_key_file, 'w') as f:
os.chmod(local_key_file, stat.S_IREAD | stat.S_IWRITE)
f.write(keypair.private_key)
def write_key_to_compute_node(keypair, local_path, remote_path, host,
private_key):
"""Write the private key of the nova instance to the compute node
First fetches the private key from the keypair and writes it to a
temporary file in the local machine. It then sftp's the file
to the compute host.
:param keypair: nova keypair
:param local_path: path to private key file of the nova instance in the
local machine
:param remote_path: path where the private key file has to be placed
in the remote machine
:param host: compute host credentials
:param private_key: path to your private key file
:return:
"""
LOG.debug("WRITING PRIVATE KEY TO COMPUTE NODE")
k = paramiko.RSAKey.from_private_key_file(private_key)
write_key_to_local_path(keypair, local_path)
try:
transport = paramiko.Transport(host['ip'], host['port'])
except paramiko.SSHException as e:
raise exceptions.Exception(
"PARAMIKO TRANSPORT FAILED. CHECK IF THE HOST IP %s AND PORT %s "
"ARE CORRECT %s", host['ip'], host['port'], e)
try:
transport.connect(
username=host['username'], pkey=k)
except paramiko.BadHostKeyException as e:
transport.close()
raise exceptions.Exception(
"BADHOSTKEY EXCEPTION WHEN CONNECTING TO %s", host["ip"], e)
except paramiko.AuthenticationException as e:
transport.close()
raise exceptions.Exception(
"AUTHENTICATION EXCEPTION WHEN CONNECTING TO %s",
host["ip"], e)
except paramiko.SSHException as e:
transport.close()
raise exceptions.Exception(
"SSH EXCEPTION WHEN CONNECTING TO %s", host["ip"], e)
LOG.debug("CONNECTED TO HOST <%s>", host["ip"])
try:
sftp_client = paramiko.SFTPClient.from_transport(transport)
sftp_client.put(local_path, remote_path)
except IOError as e:
raise exceptions.Exception("FILE PATH DOESN'T EXIST", e)
finally:
transport.close()
def create_server(nova_client, keypair, **kwargs):
"""Create nova instance
:param nova_client: nova client
:param keypair: key-pair to allow ssh
:return: new nova instance
"""
# add sec-group
sec_group_name = "rally_secgroup_" + kwargs["sec_group_suffix"]
LOG.debug("ADDING NEW SECURITY GROUP %s", sec_group_name)
secgroup = nova_client.security_groups.create(sec_group_name,
sec_group_name)
# add security rules for SSH and ICMP
nova_client.security_group_rules.create(secgroup.id, from_port=22,
to_port=22, ip_protocol="tcp", cidr="0.0.0.0/0")
nova_client.security_group_rules.create(secgroup.id, from_port=-1,
to_port=-1, ip_protocol="icmp", cidr="0.0.0.0/0")
# boot new nova instance
server_name = "rally_server_" + (kwargs["server_suffix"])
LOG.debug("BOOTING NEW INSTANCE: %s", server_name)
LOG.debug("%s", kwargs["image"])
server = nova_client.servers.create(server_name,
image=kwargs["image"],
flavor=kwargs["flavor"],
key_name=keypair.name,
security_groups=[secgroup.id],
nics=kwargs["nics"])
return server
def assert_server_status(server, **kwargs):
"""Assert server status
:param server: nova server
"""
LOG.debug('WAITING FOR SERVER TO GO ACTIVE')
server = task_utils.wait_for(
server,
is_ready=task_utils.resource_is("ACTIVE"),
update_resource=task_utils.get_from_manager(),
timeout=kwargs["nova_server_boot_timeout"],
check_interval=5)
LOG.debug("SERVER STATUS: %s", server.status)
assert('ACTIVE' == server.status), ("THE INSTANCE IS NOT IN ACTIVE STATE")
def get_server_ip(nova_client, server_id, network_suffix):
"""Get the ip associated with the nova instance
:param nova_client: nova client
:param server_id: uuid of the nova instance whose ip is required
:param network_suffix: suffix name of the network
:return: ip address of the instance
"""
network_name = "rally_network_" + network_suffix
server_details = nova_client.servers.get(server_id)
server_ip = server_details.addresses[network_name][0]["addr"]
return server_ip
def add_floating_ip(nova_client, server):
"""Associates floating-ip to a server
:param nova_client: nova client
:param server: nova instance
:return: associated floating ip
"""
fip_list = nova_client.floating_ips.list()
for fip in fip_list:
if fip.instance_id is None:
floating_ip = fip
break
else:
LOG.debug("CREATING NEW FLOATING IP")
floating_ip = nova_client.floating_ips.create()
LOG.debug("ASSOCIATING FLOATING IP %s", floating_ip.ip)
nova_client.servers.add_floating_ip(server.id, floating_ip.ip)
return floating_ip
def get_namespace(host, private_key):
"""SSH into the host and get the namespaces
:param host : dictionary of controller/compute node credentials
{ip:x.x.x.x, username:xxx, password:xxx}
:param private_key: path to private key file
:return: namespaces
"""
LOG.debug("GET NAMESPACES")
cmd = "sudo ip netns"
namespaces = execute_cmd_over_ssh(host, cmd, private_key)
LOG.debug("NAMESPACES %s", namespaces)
return namespaces
def wait_for_namespace_creation(namespace_tag, router_id, hosts, private_key,
timeout=60):
"""Wait for the namespace creation
Get into each of the controllers/compute nodes and check which one contains
the snat/qrouter namespace corresponding to rally_router. Sleep for a sec
and repeat until either the namespace is found or the namespace_creation_
time exceeded.
:param namespace_tag: which namespace ("snat_" or "qrouter_")
:param router_id: uuid of the rally_router
:param hosts: controllers or compute hosts
:param private_key: path to private key file
:param timeout: namespace creation time
:return:
"""
start_time = time.time()
while True:
for host in hosts:
namespaces = get_namespace(host, private_key)
for line in namespaces:
if line == (namespace_tag + router_id):
namespace_tag = line
return namespace_tag, host
time.sleep(1)
if time.time() - start_time > timeout:
raise exceptions.Exception("TIMEOUT WHILE WAITING FOR"
" NAMESPACES TO BE CREATED")
def ping(host, cmd, private_key):
"""Execute ping command over ssh"""
ping_result = execute_cmd_over_ssh(host, cmd, private_key)
if ping_result:
LOG.debug("PING RESULT %s", ping_result)
return True
else:
return False
def ping_router_gateway(namespace_controller_tuple, router_gw_ip, private_key):
"""Ping the ip address from network namespace
Get into controller's snat-namespaces and ping the peer router gateway ip.
:param namespace_controller_tuple: namespace, controller tuple. (It's the
controller that contains the namespace )
:param router_gw_ip: ip address to be pinged
:param private_key: path to private key file
:return: True if ping succeeds
False if ping fails
"""
namespace, controller = namespace_controller_tuple
LOG.debug("PING %s FROM THE NAMESPACE %s", router_gw_ip, namespace)
count = 4
cmd = "sudo ip netns exec {} ping -w {} -c {} {}".format(
namespace, 2 * count, count, router_gw_ip)
return ping(controller, cmd, private_key)
def get_interfaces(namespace_controller_tuple, private_key):
"""Get the interfaces
Get into the controller's snat namespace and list the interfaces.
:param namespace_controller_tuple: namespace, controller tuple(the
controller that contains the namespace).
:param private_key: path to private key file
:return: interfaces
"""
namespace, controller = namespace_controller_tuple
LOG.debug("GET THE INTERFACES BY USING 'ip a' FROM THE NAMESPACE %s",
namespace)
cmd = "sudo ip netns exec {} ip a".format(namespace)
interfaces = execute_cmd_over_ssh(controller, cmd, private_key)
LOG.debug("INTERFACES %s", interfaces)
return interfaces
def start_tcpdump(namespace_controller_tuple, interface, private_key):
"""Start the tcpdump at the given interface
Get into the controller's snat namespace and start a tcp dump at the
qg-interface.
:param namespace_controller_tuple: namespace, controller tuple. (It's the
controller that contains the namespace )
:param interface: interface in which tcpdump has to be run
:param private_key: path to private key file
:return: tcpdump output
"""
namespace, controller = namespace_controller_tuple
LOG.debug("START THE TCPDUMP USING 'tcpdump -i %s FROM THE NAMESPACE"
" %s", interface, namespace)
cmd = ("sudo ip netns exec {} timeout 15 tcpdump -n -i {}"
.format(namespace, interface))
tcpdump = execute_cmd_over_ssh(controller, cmd, private_key)
LOG.debug("TCPDUMP %s", tcpdump)
return tcpdump
def ssh_and_ping_server(local_server, peer_server, ns_compute_tuple, keyfile,
private_key):
"""SSH and ping the nova instance from the namespace
Get into the compute node's qrouter namespace and then ssh into the local
nova instance & ping the peer nova instance.
:param local_server: private ip of the server to ssh into
:param peer_server: private ip of the server to ping to
:param ns_compute_tuple: namespace, compute tuple. (It's the
compute node that contains the namespace )
:param keyfile: path to private key file of the nova instance
:param private_key: path to private key file
:return: True if ping succeeds
False if ping fails
"""
namespace, compute_host = ns_compute_tuple
LOG.debug("SSH INTO SERVER %s AND PING THE PEER SERVER %s FROM THE"
" NAMESPACE %s", local_server, peer_server, namespace)
host = "cirros@" + local_server
count = 20
cmd = ("sudo ip netns exec {} ssh -v -o StrictHostKeyChecking=no -o"
"HashKnownHosts=no -i {} {} ping -w {} -c {} {}"
.format(namespace, keyfile, host, 2 * count, count, peer_server))
return ping(compute_host, cmd, private_key)
def ssh_and_ping_server_with_fip(local_server, peer_server, keyfile,
private_key):
"""SSH into the local nova instance and ping the peer instance using fips
:param local_server: fip of the server to ssh into
:param peer_server: private ip of the server to ping to
:param keyfile: path to private key file of the nova instance
:param private_key: path to private key file
:return: True if ping succeeds
False if ping fails
"""
LOG.debug("SSH INTO LOCAL SERVER %s AND PING THE PEER SERVER %s",
local_server.ip, peer_server)
count = 20
local_host = {"ip": "127.0.0.1", "username": None}
host = "cirros@" + local_server.ip
cmd = ("ssh -v -o StrictHostKeyChecking=no -o"
"HashKnownHosts=no -i {} {} ping -w {} -c {} {}"
.format(keyfile, host, 2 * count, count, peer_server))
return ping(local_host, cmd, private_key)
def delete_servers(nova_client, servers):
"""Delete nova servers
It deletes the nova servers, associated security groups.
:param nova_client: nova client
:param servers: nova instances to be deleted
:return:
"""
for server in servers:
LOG.debug("DELETING NOVA INSTANCE: %s", server.id)
sec_group_id = server.security_groups[0]['name']
nova_client.servers.delete(server.id)
LOG.debug("WAITING FOR INSTANCE TO GET DELETED")
task_utils.wait_for_delete(
server, update_resource=task_utils.get_from_manager())
for secgroup in nova_client.security_groups.list():
if secgroup.id == sec_group_id:
LOG.debug("DELETING SEC_GROUP: %s", sec_group_id)
nova_client.security_groups.delete(secgroup.id)
def delete_floating_ips(nova_client, fips):
"""Delete floating ips
:param nova_client: nova client
:param fips: list of floating ips
:return:
"""
for fip in fips:
nova_client.floating_ips.delete(fip.id)
def delete_keypairs(nova_client, keypairs):
"""Delete key pairs
:param nova_client: nova client
:param keypairs: list of keypairs
:return
"""
for key_pair in keypairs:
LOG.debug("DELETING KEY_PAIR %s", key_pair.name)
nova_client.keypairs.delete(key_pair.id)
def delete_networks(neutron_client, neutron_admin_client,
routers, networks, subnets):
"""Delete neutron network, subnets amd routers
:param neutron_client: neutron client
:param neutron_admin_client: neutron_admin_client
:param routers: list of routers to be deleted
:param networks: list of networks to be deleted
:param subnets: list of subnets to be deleted
:return
"""
LOG.debug("DELETING RALLY ROUTER INTERFACES & GATEWAYS")
for router in routers:
neutron_client.remove_gateway_router(router['router']['id'])
router_name = router['router']['name']
subnet_name = ("rally_subnet_" + router_name[13:len(router_name)])
for subnet in subnets:
if subnet_name == subnet['subnet']['name']:
neutron_client.remove_interface_router(
router['router']['id'],
{"subnet_id": subnet['subnet']['id']})
LOG.debug("DELETING RALLY ROUTERS")
for router in routers:
neutron_client.delete_router(router['router']['id'])
LOG.debug("DELETING RALLY NETWORKS")
for network in networks:
if (network['network']['router:external'] and
network['network']['name'] == "rally_network_public"):
external_network = network
neutron_admin_client.delete_network(
external_network['network']["id"])
elif network['network']['router:external']:
pass
else:
neutron_client.delete_network(network['network']['id'])
def delete_tenants(keystone_client, tenant_ids):
"""Delete keystone tenant
:param keystone_client: keystone client
:param tenant_ids: list of tenants' uuids
:returns: delete keystone tenant instance
"""
LOG.debug('DELETE TENANTS')
for id in tenant_ids:
keystone_client.tenants.delete(id)
def delete_keyfiles(local_key_files, remote_key_files=None,
ns_compute_tuples=None, private_key=None):
"""Delete the SSH keyfiles from the compute and the local nodes
:param local_key_files: paths to ssh key files in local node
:param remote_key_files: paths to ssh key files in compute nodes
:param ns_compute_tuples: namespace, compute tuple. (It's the
compute node that contains the namespace )
:param private_key: path to private key file
:return:
"""
LOG.debug("DELETING RALLY KEY FILES FROM LOCAL MACHINE")
for key in local_key_files:
if os.path.exists(key):
os.remove(key)
if ns_compute_tuples:
LOG.debug("DELETING RALLY KEY FILES FROM COMPUTE HOSTS")
for key, ns_comp in zip(remote_key_files, ns_compute_tuples):
cmd = "sudo rm -f {}".format(key)
host = ns_comp[1]
execute_cmd_over_ssh(host, cmd, private_key)
def delete_hosts_from_knownhosts_file(hosts, ns_compute_tuples=None,
private_key=None):
"""Remove the hosts from the knownhosts file
:param hosts: host ips to be removed from /root/.ssh/knownhosts
:param ns_compute_tuples: namespace, compute tuple. (It's the
compute node that contains the namespace )
:param private_key: path to private key file
:return:
"""
if ns_compute_tuples:
LOG.debug("DELETES HOSTS FROM THE KNOWNHOSTS FILE")
for host, ns_comp in zip(hosts, ns_compute_tuples):
compute_host = ns_comp[1]
cmd = ("sudo ssh-keygen -f /root/.ssh/known_hosts -R"
" {}".format(host))
execute_cmd_over_ssh(compute_host, cmd, private_key)
else:
for host in hosts:
os.system("sudo ssh-keygen -f /root/.ssh/known_hosts -R"
" {}".format(host))
neutron-vpnaas-8.0.0/rally-jobs/plugins/__init__.py 0000664 0005670 0005671 00000000000 12701407726 023504 0 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/rally-jobs/plugins/test_vpn_tenant_scenario.py 0000664 0005670 0005671 00000004672 12701407726 027066 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_vpnaas._i18n import _LI
from rally.common import log as logging
from rally.task import scenario
from rally.task import types as types
import vpn_base
LOG = logging.getLogger(__name__)
class TestVpnTenantScenario(vpn_base.VpnBase):
"""Rally scenarios for VPNaaS"""
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@scenario.configure()
def multitenants_vpn_test(self, **kwargs):
"""Test VPN connectivity under two different tenants.
1. Create 2 private networks with 2 different tenants, subnets, routers
2. Create public network, subnets and GW IPs on routers, if not present
3. Execute ip netns command and get the snat and qrouter namespaces
(assuming we use DVR)
4. Verify that there is a route between the router gateways by pinging
each other from their snat namespaces
5. Add security group rules for SSH and ICMP
6. Start a nova instance in each of the private networks
7. Create IKE and IPSEC policies
8. Create VPN service at each of the routers
9. Create IPSEC site connections at both endpoints
10. Verify that the vpn-service and ipsec-site-connection are ACTIVE
11. Cleanup the resources that are setup for this test
"""
try:
self.setup(**kwargs)
self.create_tenants()
self.create_networks(**kwargs)
self.check_route()
self.ike_policy = self._create_ike_policy(**kwargs)
self.ipsec_policy = self._create_ipsec_policy(**kwargs)
self.create_vpn_services()
self.create_ipsec_site_connections(**kwargs)
self.assert_statuses(final_status='ACTIVE', **kwargs)
LOG.info(_LI("VPN TENANT TEST PASSED!"))
finally:
self.cleanup()
neutron-vpnaas-8.0.0/rally-jobs/plugins/test_vpn_connectivity.py 0000664 0005670 0005671 00000005653 12701407726 026430 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_vpnaas._i18n import _LI
from rally.common import log as logging
from rally.task import scenario
from rally.task import types as types
import vpn_base
LOG = logging.getLogger(__name__)
class TestVpnBasicScenario(vpn_base.VpnBase):
"""Rally scenarios for VPNaaS"""
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@scenario.configure()
def create_and_delete_vpn_connection(self, **kwargs):
"""Basic VPN connectivity scenario.
1. Create 2 private networks, subnets and routers
2. Create public network, subnets and GW IPs on routers, if not present
3. Execute ip netns command and get the snat and qrouter namespaces
(assuming we use DVR)
4. Verify that there is a route between the router gateways by pinging
each other from their snat namespaces
5. Add security group rules for SSH and ICMP
6. Start a nova instance in each of the private networks
7. Create IKE and IPSEC policies
8. Create VPN service at each of the routers
9. Create IPSEC site connections at both endpoints
10. Verify that the ipsec-site-connection is ACTIVE (takes upto 30secs)
11. To verify the vpn connectivity, get into the peer router's snat
namespace and start a tcpdump at the qg-xxxx interface
12. SSH into the nova instance from the local qrouter namespace
and try to ping the nova instance on the peer network.
14. Verify that the captured packets are encapsulated and encrypted.
15. Verify the connectivity in the reverse direction following the
steps 11 through 13
16. Submit a request to delete all the resources
"""
try:
self.setup(**kwargs)
self.create_networks(**kwargs)
self.create_servers(**kwargs)
self.check_route()
self.ike_policy = self._create_ike_policy(**kwargs)
self.ipsec_policy = self._create_ipsec_policy(**kwargs)
self.create_vpn_services()
self.create_ipsec_site_connections(**kwargs)
self.assert_statuses(final_status='ACTIVE', **kwargs)
self.verify_vpn_connectivity(**kwargs)
LOG.info(_LI("VPN CONNECTIVITY TEST PASSED!"))
finally:
self.cleanup()
neutron-vpnaas-8.0.0/rally-jobs/rally-configs/ 0000775 0005670 0005671 00000000000 12701410103 022454 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/rally-jobs/rally-configs/rally_config_non_dvr.yaml 0000664 0005670 0005671 00000004773 12701407726 027571 0 ustar jenkins jenkins 0000000 0000000 ---
TestVpnBasicScenario.create_and_delete_vpn_connection:
-
args:
flavor:
name: "m1.tiny"
image:
name: {{image_name}}
phase1_negotiation_mode: "main"
auth_algorithm: "sha1"
encryption_algorithm: "aes-128"
pfs: "group5"
value: 7200
ike_version: "v1"
transform_protocol: "esp"
encapsulation_mode: "tunnel"
mtu: 1500
secret: "secret"
nova_server_boot_timeout: 60 * 6
vpn_service_creation_timeout: 100
ipsec_site_connection_creation_timeout: 180
namespace_creation_timeout: 60
private_key: {{private_key}}
controller_creds: {{controller_creds}}
compute_creds: {{compute_creds}}
DVR_flag: False
use_admin_client: True
ext-net: "ext-net"
runner:
type: "constant"
times: 1
concurrency: 1
context:
users:
tenants: 1
users_per_tenant: 1
sla:
failure_rate:
max: 0
TestVpnStatusScenario.check_vpn_status:
-
args:
flavor:
name: "m1.tiny"
image:
name: {{image_name}}
nova_server_boot_timeout: 60 * 6
vpn_service_creation_timeout: 100
ipsec_site_connection_creation_timeout: 400
namespace_creation_timeout: 60
private_key: {{private_key}}
controller_creds: {{controller_creds}}
compute_creds: {{compute_creds}}
DVR_flag: False
use_admin_client: True
ext-net: "ext-net"
runner:
type: "constant"
times: 1
concurrency: 1
context:
users:
tenants: 1
users_per_tenant: 1
sla:
failure_rate:
max: 0
TestVpnTenantScenario.multitenants_vpn_test:
-
args:
flavor:
name: "m1.tiny"
image:
name: {{image_name}}
nova_server_boot_timeout: 60 * 6
vpn_service_creation_timeout: 100
ipsec_site_connection_creation_timeout: 180
namespace_creation_timeout: 60
private_key: {{private_key}}
controller_creds: {{controller_creds}}
compute_creds: {{compute_creds}}
DVR_flag: False
use_admin_client: True
ext-net: "ext-net"
runner:
type: "constant"
times: 1
concurrency: 1
context:
users:
tenants: 1
users_per_tenant: 1
sla:
failure_rate:
max: 0
neutron-vpnaas-8.0.0/rally-jobs/rally-configs/rally_config_dvr.yaml 0000664 0005670 0005671 00000004772 12701407726 026716 0 ustar jenkins jenkins 0000000 0000000 ---
TestVpnBasicScenario.create_and_delete_vpn_connection:
-
args:
flavor:
name: "m1.tiny"
image:
name: {{image_name}}
phase1_negotiation_mode: "main"
auth_algorithm: "sha1"
encryption_algorithm: "aes-128"
pfs: "group5"
value: 7200
ike_version: "v1"
transform_protocol: "esp"
encapsulation_mode: "tunnel"
mtu: 1500
secret: "secret"
nova_server_boot_timeout: 60 * 6
vpn_service_creation_timeout: 100
ipsec_site_connection_creation_timeout: 180
namespace_creation_timeout: 60
private_key: {{private_key}}
controller_creds: {{controller_creds}}
compute_creds: {{compute_creds}}
DVR_flag: True
use_admin_client: False
ext-net: "ext-net"
runner:
type: "constant"
times: 1
concurrency: 1
context:
users:
tenants: 1
users_per_tenant: 1
sla:
failure_rate:
max: 0
TestVpnStatusScenario.check_vpn_status:
-
args:
flavor:
name: "m1.tiny"
image:
name: {{image_name}}
nova_server_boot_timeout: 60 * 6
vpn_service_creation_timeout: 100
ipsec_site_connection_creation_timeout: 400
namespace_creation_timeout: 60
private_key: {{private_key}}
controller_creds: {{controller_creds}}
compute_creds: {{compute_creds}}
DVR_flag: True
use_admin_client: False
ext-net: "ext-net"
runner:
type: "constant"
times: 1
concurrency: 1
context:
users:
tenants: 1
users_per_tenant: 1
sla:
failure_rate:
max: 0
TestVpnTenantScenario.multitenants_vpn_test:
-
args:
flavor:
name: "m1.tiny"
image:
name: {{image_name}}
nova_server_boot_timeout: 60 * 6
vpn_service_creation_timeout: 100
ipsec_site_connection_creation_timeout: 180
namespace_creation_timeout: 60
private_key: {{private_key}}
controller_creds: {{controller_creds}}
compute_creds: {{compute_creds}}
DVR_flag: True
use_admin_client: True
ext-net: "ext-net"
runner:
type: "constant"
times: 1
concurrency: 1
context:
users:
tenants: 1
users_per_tenant: 1
sla:
failure_rate:
max: 0 neutron-vpnaas-8.0.0/rally-jobs/rally-configs/args_template.json 0000664 0005670 0005671 00000000462 12701407726 026221 0 ustar jenkins jenkins 0000000 0000000 {
"image_name": "^cirros.*-disk$",
"private_key": "",
"controller_creds": [
{
"ip": "x.x.x.x",
"username": "xxx"
}
],
"compute_creds": [
{
"ip": "x.x.x.x",
"username": "xxx",
"port": 22
}
]
}
neutron-vpnaas-8.0.0/MANIFEST.in 0000664 0005670 0005671 00000000424 12701407726 017404 0 ustar jenkins jenkins 0000000 0000000 include AUTHORS
include README.rst
include ChangeLog
include LICENSE
include neutron_vpnaas/db/migration/alembic_migrations/script.py.mako
recursive-include neutron_vpnaas/db/migration/alembic_migrations/versions *
exclude .gitignore
exclude .gitreview
global-exclude *.pyc
neutron-vpnaas-8.0.0/tox.ini 0000664 0005670 0005671 00000007672 12701407726 017175 0 ustar jenkins jenkins 0000000 0000000 [tox]
envlist = py34,py27,pep8
minversion = 1.6
skipsdist = True
[testenv]
setenv = VIRTUAL_ENV={envdir}
usedevelop = True
install_command = {toxinidir}/tools/tox_install.sh constrained -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=stable/mitaka} {opts} {packages}
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
whitelist_externals = sh
commands =
{toxinidir}/tools/ostestr_compat_shim.sh {posargs}
# there is also secret magic in ostestr which lets you run in a fail only
# mode. To do this define the TRACE_FAILONLY environmental variable.
[testenv:functional]
deps =
{[testenv]deps}
-r{toxinidir}/neutron_vpnaas/tests/functional/requirements.txt
setenv =
OS_SUDO_TESTING=1
OS_ROOTWRAP_CMD=sudo {envdir}/bin/neutron-rootwrap {envdir}/etc/neutron/rootwrap.conf
OS_ROOTWRAP_DAEMON_CMD=sudo {envdir}/bin/neutron-rootwrap-daemon {envdir}/etc/neutron/rootwrap.conf
OS_FAIL_ON_MISSING_DEPS=1
whitelist_externals =
sh
cp
sudo
[testenv:dsvm-functional]
setenv =
OS_TEST_PATH=./neutron_vpnaas/tests/functional/openswan
{[testenv:functional]setenv}
deps = {[testenv:functional]deps}
sitepackages=True
whitelist_externals = {[testenv:functional]whitelist_externals}
commands =
{toxinidir}/tools/deploy_rootwrap.sh {toxinidir} {envdir}
sh tools/pretty_tox.sh '{posargs}'
[testenv:api]
sitepackages=True
setenv =
OS_TEST_PATH=./neutron_vpnaas/tests/api
OS_TESTR_CONCURRENCY=1
TEMPEST_CONFIG_DIR={env:TEMPEST_CONFIG_DIR:/opt/stack/tempest/etc}
[testenv:dsvm-functional-sswan]
setenv =
OS_TEST_PATH=./neutron_vpnaas/tests/functional/strongswan
{[testenv:functional]setenv}
deps = {[testenv:functional]deps}
sitepackages=True
whitelist_externals = {[testenv:functional]whitelist_externals}
commands =
{toxinidir}/tools/deploy_rootwrap.sh {toxinidir} {envdir}
sh tools/pretty_tox.sh '{posargs}'
[testenv:releasenotes]
# TODO(pc_m): Remove install_command, once infra supports constraints for
# this target.
install_command = {toxinidir}/tools/tox_install.sh unconstrained {opts} {packages}
commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
[testenv:pep8]
commands =
flake8
pylint --rcfile=.pylintrc --output-format=colorized {posargs:neutron_vpnaas}
{toxinidir}/tools/check_unit_test_structure.sh
neutron-db-manage --subproject neutron-vpnaas --database-connection sqlite:// check_migration
{[testenv:genconfig]commands}
whitelist_externals = sh
[testenv:i18n]
commands = python ./tools/check_i18n.py ./neutron-vpnaas ./tools/i18n_cfg.py
[testenv:cover]
# TODO(pc_m): Remove install_command, once infra supports constraints for
# this target.
install_command = {toxinidir}/tools/tox_install.sh unconstrained {opts} {packages}
commands =
python setup.py test --coverage --coverage-package-name=neutron_vpnaas --testr-args='{posargs}'
[testenv:venv]
# TODO(pc_m): Remove install_command, once infra supports constraints for
# this target.
install_command = {toxinidir}/tools/tox_install.sh unconstrained {opts} {packages}
commands = {posargs}
[testenv:docs]
commands = sphinx-build -W -b html doc/source doc/build
[flake8]
# E125 continuation line does not distinguish itself from next logical line
# E126 continuation line over-indented for hanging indent
# E128 continuation line under-indented for visual indent
# E129 visually indented line with same indent as next logical line
# E265 block comment should start with ‘# ‘
# H405 multi line docstring summary not separated with an empty line
# TODO(marun) H404 multi line docstring should start with a summary
ignore = E125,E126,E128,E129,E265,H404,H405
show-source = true
exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build,tools,.ropeproject,rally-scenarios
[hacking]
import_exceptions = neutron_vpnaas._i18n
local-check-factory = neutron.hacking.checks.factory
[testenv:genconfig]
commands = {toxinidir}/tools/generate_config_file_samples.sh
neutron-vpnaas-8.0.0/.testr.conf 0000664 0005670 0005671 00000000373 12701407726 017737 0 ustar jenkins jenkins 0000000 0000000 [DEFAULT]
test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_LOG_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./neutron_vpnaas/tests/unit} $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE
test_list_option=--list
neutron-vpnaas-8.0.0/AUTHORS 0000664 0005670 0005671 00000020671 12701410102 016702 0 ustar jenkins jenkins 0000000 0000000 Aaron Rosen
Abhishek Raut
Adam Harwell
Aishwarya Thangappa
Akihiro MOTOKI
Akihiro Motoki
Akihiro Motoki
Al Miller
Aleks Chirko
Alessandro Pilotti
Alessio Ababilov
Alessio Ababilov
Amir Sadoughi
Andre Pech
Andreas Jaeger
Andreas Jaeger
Andrew Boik
Angus Lees
Ann Kamyshnikova
Arvind Somy
Arvind Somya
Assaf Muller
Bertrand Lallau
Bertrand Lallau
Bharath M
Bhuvan Arumugam
Bo Chi
Bob Kukura
Bob Melander
Bogdan Tabor
Brad Hall
Brandon Logan
Brandon Logan
Brant Knudson
Brent Eagles
Brian Haley
Brian Waldon
Carl Baldwin
Cedric Brandily
Chang Bo Guo
Christian Berendt
Chuck Short
Clark Boylan
Clint Byrum
Cyril Roelandt
Dan Prince
Dan Wendlandt
Davanum Srinivas
Dave Lapsley
Deepak N
Dirk Mueller
Dongcan Ye
Doug Hellmann
Doug Hellmann
Doug Wiegley
Doug Wiegley
Edgar Magana
Elena Ezhova
Emilien Macchi
Eugene Nikanorov
Gary Kotton
Gary Kotton
German Eichberger
Gordon Chung
Guilherme Salgado
Hareesh Puthalath
He Jie Xu
Hemanth Ravi
Henry Gessau
Henry Gessau
HenryVIII
Hirofumi Ichihara
Ignacio Scopetta
Ihar Hrachyshka
Ionuț Arțăriși
Irena Berezovsky
Isaku Yamahata
Isaku Yamahata
JJ Asghar
Jacek Swiderski
Jakub Libosvar
James Arendt
James E. Blair
James E. Blair
Jason Kölker
Jay Pipes
Jeremy Stanley
Jiajun Liu
Joe Gordon
Joe Heck
John Davidge
John Dunning
Jordan Tardif
Juliano Martinez
Julien Danjou
Justin Lund
Keshava Bharadwaj
Kevin Benton
Kevin L. Mitchell
Kris Lindgren
Kun Huang
Kyle Mestery
Kyle Mestery
Li Ma
LiuNanke
Luke Gorrie
Ly Loi
Major Hayden
Mark McClain
Mark McClain
Mark McLoughlin
Martin Hickey
Maru Newby
Maru Newby
Mate Lakat
Mathieu Rohon
Matt Riedemann
Matthew Kassawara
Matthew Treinish
Michael Johnson
Michael Smith
Miguel Angel Ajo
Mohammad Banikazemi
Monty Taylor
Morgan Fainberg
Motohiro OTSUKA
Nachi Ueno
Nachi Ueno
Nader Lahouti
Nick
Numan Siddique
Oleg Bondarev
Ondřej Nový
Paul Michali
Paul Michali
Peng Zhi Xiong
Praneet Bachheti
Rajaram Mallya
Ralf Haferkamp
Reedip Banerjee
Rich Curran
Roman Podoliaka
Rui Zang
Russell Bryant
Ryan Moats
Ryota MIBU
Salvatore Orlando
Salvatore Orlando
Samer Deeb
Santhosh
Santhosh Kumar
Sascha Peilicke
Sascha Peilicke
Sascha Peilicke
Sean Dague
Sean Dague
Sean M. Collins
Sergey Lukjanov
Sergey Skripnick
Sergey Vilgelm
Shiv Haris
Somik Behera
Somik Behera
Sridhar Ramaswamy
Sridhar Ramaswamy
Steven Gonzales
Sukhdev
Sumit Naiksatam
Sun Zhengnan
Sushil Kumar
Swaminathan Vasudevan
Sylvain Afchain
Takashi NATSUME
Terry Wilson
Thierry Carrez
Thomas Bechtold
Tim Miller
Tomoko Inoue
Tony Breeds
Trinath Somanchi
Tyler Smith
Vladislav Belogrudov
Wei Hu
Weidong Shao
Wu Wenxiang
YAMAMOTO Takashi
Yaguang Tang
Yanping Qu
Yatin Kumbhare
Ying Liu
Yong Sheng Gong
Yong Sheng Gong
Yoshihiro Kaneko
Zang MingJie
Zhang Hua
Zhenguo Niu
ZhiQiang Fan
ZhiQiang Fan
Zhongyue Luo
ajmiller
alexpilotti
armando-migliaccio
armando-migliaccio
berlin
changzhi
fujioka yuuichi
fumihiko kakuma
gongysh
gongysh
gordon chung
johndavidge
justin Lund
lawrancejing
leejian0612
liu-sheng
liuqing
llg8212
madhusudhan-kandadai
mark mcclain
mathieu-rohon
nfedotov
rohitagarwalla
ronak
rossella
sanuptpm
shihanzhang
sridhargaddam
sukhdev
trinaths
venkata anil
vikas
vinkesh banka
zhhuabj
neutron-vpnaas-8.0.0/requirements.txt 0000664 0005670 0005671 00000001642 12701407726 021135 0 ustar jenkins jenkins 0000000 0000000 # The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
pbr>=1.6 # Apache-2.0
requests!=2.9.0,>=2.8.1 # Apache-2.0
Jinja2>=2.8 # BSD License (3 clause)
netaddr!=0.7.16,>=0.7.12 # BSD
SQLAlchemy<1.1.0,>=1.0.10 # MIT
alembic>=0.8.0 # MIT
six>=1.9.0 # MIT
neutron-lib>=0.0.1 # Apache-2.0
oslo.concurrency>=3.5.0 # Apache-2.0
oslo.config>=3.7.0 # Apache-2.0
oslo.db>=4.1.0 # Apache-2.0
oslo.log>=1.14.0 # Apache-2.0
oslo.messaging>=4.0.0 # Apache-2.0
oslo.serialization>=1.10.0 # Apache-2.0
oslo.service>=1.0.0 # Apache-2.0
oslo.utils>=3.5.0 # Apache-2.0
# This project does depend on neutron as a library, but the
# openstack tooling does not play nicely with projects that
# are not publicly available in pypi.
# -e git+https://git.openstack.org/openstack/neutron#egg=neutron
neutron-vpnaas-8.0.0/HACKING.rst 0000664 0005670 0005671 00000000363 12701407726 017446 0 ustar jenkins jenkins 0000000 0000000 Neutron VPNaaS Style Commandments
=================================
Please see the Neutron HACKING.rst file for style commandments for
neutron-vpnaas:
`Neutron HACKING.rst `_
neutron-vpnaas-8.0.0/TESTING.rst 0000664 0005670 0005671 00000000507 12701407726 017517 0 ustar jenkins jenkins 0000000 0000000 Testing Neutron VPNaaS
======================
Please see the TESTING.rst file for the Neutron project itself. This will have
the latest up to date instructions for how to test Neutron, and will
be applicable to neutron-vpnaas as well:
`Neutron TESTING.rst `_
neutron-vpnaas-8.0.0/CONTRIBUTING.rst 0000664 0005670 0005671 00000000273 12701407726 020311 0 ustar jenkins jenkins 0000000 0000000 Please see the Neutron CONTRIBUTING.rst file for how to contribute to
neutron-vpnaas:
`Neutron CONTRIBUTING.rst `_
neutron-vpnaas-8.0.0/neutron_vpnaas.egg-info/ 0000775 0005670 0005671 00000000000 12701410103 022361 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas.egg-info/requires.txt 0000664 0005670 0005671 00000000457 12701410102 024766 0 ustar jenkins jenkins 0000000 0000000 pbr>=1.6
requests!=2.9.0,>=2.8.1
Jinja2>=2.8
netaddr!=0.7.16,>=0.7.12
SQLAlchemy<1.1.0,>=1.0.10
alembic>=0.8.0
six>=1.9.0
neutron-lib>=0.0.1
oslo.concurrency>=3.5.0
oslo.config>=3.7.0
oslo.db>=4.1.0
oslo.log>=1.14.0
oslo.messaging>=4.0.0
oslo.serialization>=1.10.0
oslo.service>=1.0.0
oslo.utils>=3.5.0
neutron-vpnaas-8.0.0/neutron_vpnaas.egg-info/dependency_links.txt 0000664 0005670 0005671 00000000001 12701410102 026426 0 ustar jenkins jenkins 0000000 0000000
neutron-vpnaas-8.0.0/neutron_vpnaas.egg-info/not-zip-safe 0000664 0005670 0005671 00000000001 12701410075 024617 0 ustar jenkins jenkins 0000000 0000000
neutron-vpnaas-8.0.0/neutron_vpnaas.egg-info/SOURCES.txt 0000664 0005670 0005671 00000020720 12701410103 024246 0 ustar jenkins jenkins 0000000 0000000 .coveragerc
.mailmap
.pylintrc
.testr.conf
AUTHORS
CONTRIBUTING.rst
ChangeLog
HACKING.rst
LICENSE
MANIFEST.in
README.rst
TESTING.rst
babel.cfg
requirements.txt
setup.cfg
setup.py
test-requirements.txt
tox.ini
devstack/README.md
devstack/plugin.sh
devstack/settings
doc/source/conf.py
doc/source/index.rst
doc/source/devref/devstack.rst
doc/source/devref/index.rst
doc/source/devref/multiple-local-subnets.rst
doc/source/devref/vpnaas-rally-test.rst
etc/README.txt
etc/neutron/rootwrap.d/vpnaas.filters
etc/oslo-config-generator/neutron_vpnaas.conf
etc/oslo-config-generator/vpn_agent.ini
neutron_vpnaas/__init__.py
neutron_vpnaas/_i18n.py
neutron_vpnaas/opts.py
neutron_vpnaas/version.py
neutron_vpnaas.egg-info/PKG-INFO
neutron_vpnaas.egg-info/SOURCES.txt
neutron_vpnaas.egg-info/dependency_links.txt
neutron_vpnaas.egg-info/entry_points.txt
neutron_vpnaas.egg-info/not-zip-safe
neutron_vpnaas.egg-info/pbr.json
neutron_vpnaas.egg-info/requires.txt
neutron_vpnaas.egg-info/top_level.txt
neutron_vpnaas/cmd/__init__.py
neutron_vpnaas/cmd/eventlet/__init__.py
neutron_vpnaas/cmd/eventlet/agent.py
neutron_vpnaas/cmd/eventlet/vyatta_agent.py
neutron_vpnaas/db/__init__.py
neutron_vpnaas/db/migration/__init__.py
neutron_vpnaas/db/migration/alembic_migrations/README
neutron_vpnaas/db/migration/alembic_migrations/__init__.py
neutron_vpnaas/db/migration/alembic_migrations/env.py
neutron_vpnaas/db/migration/alembic_migrations/script.py.mako
neutron_vpnaas/db/migration/alembic_migrations/versions/3ea02b2a773e_add_index_tenant_id.py
neutron_vpnaas/db/migration/alembic_migrations/versions/CONTRACT_HEAD
neutron_vpnaas/db/migration/alembic_migrations/versions/EXPAND_HEAD
neutron_vpnaas/db/migration/alembic_migrations/versions/kilo_release.py
neutron_vpnaas/db/migration/alembic_migrations/versions/start_neutron_vpnaas.py
neutron_vpnaas/db/migration/alembic_migrations/versions/liberty/contract/2c82e782d734_drop_tenant_id_in_cisco_csr_identifier_.py
neutron_vpnaas/db/migration/alembic_migrations/versions/liberty/contract/333dfd6afaa2_populate_vpn_service_table_fields.py
neutron_vpnaas/db/migration/alembic_migrations/versions/liberty/contract/56893333aa52_fix_identifier_map_fk.py
neutron_vpnaas/db/migration/alembic_migrations/versions/liberty/expand/24f28869838b_add_fields_to_vpn_service_table.py
neutron_vpnaas/db/migration/alembic_migrations/versions/liberty/expand/30018084ed99_initial.py
neutron_vpnaas/db/migration/alembic_migrations/versions/mitaka/contract/2cb4ee992b41_multiple_local_subnets.py
neutron_vpnaas/db/migration/alembic_migrations/versions/mitaka/expand/28ee739a7e4b_multiple_local_subnets.py
neutron_vpnaas/db/migration/alembic_migrations/versions/mitaka/expand/41b509d10b5e_vpnaas_endpoint_groups.py
neutron_vpnaas/db/models/__init__.py
neutron_vpnaas/db/models/head.py
neutron_vpnaas/db/vpn/__init__.py
neutron_vpnaas/db/vpn/vpn_db.py
neutron_vpnaas/db/vpn/vpn_models.py
neutron_vpnaas/db/vpn/vpn_validator.py
neutron_vpnaas/extensions/__init__.py
neutron_vpnaas/extensions/vpn_endpoint_groups.py
neutron_vpnaas/extensions/vpnaas.py
neutron_vpnaas/services/__init__.py
neutron_vpnaas/services/vpn/__init__.py
neutron_vpnaas/services/vpn/agent.py
neutron_vpnaas/services/vpn/plugin.py
neutron_vpnaas/services/vpn/vpn_service.py
neutron_vpnaas/services/vpn/vyatta_agent.py
neutron_vpnaas/services/vpn/vyatta_vpn_service.py
neutron_vpnaas/services/vpn/common/__init__.py
neutron_vpnaas/services/vpn/common/constants.py
neutron_vpnaas/services/vpn/common/netns_wrapper.py
neutron_vpnaas/services/vpn/common/topics.py
neutron_vpnaas/services/vpn/device_drivers/__init__.py
neutron_vpnaas/services/vpn/device_drivers/cisco_csr_rest_client.py
neutron_vpnaas/services/vpn/device_drivers/cisco_ipsec.py
neutron_vpnaas/services/vpn/device_drivers/fedora_strongswan_ipsec.py
neutron_vpnaas/services/vpn/device_drivers/ipsec.py
neutron_vpnaas/services/vpn/device_drivers/libreswan_ipsec.py
neutron_vpnaas/services/vpn/device_drivers/strongswan_ipsec.py
neutron_vpnaas/services/vpn/device_drivers/vyatta_ipsec.py
neutron_vpnaas/services/vpn/device_drivers/template/openswan/ipsec.conf.template
neutron_vpnaas/services/vpn/device_drivers/template/openswan/ipsec.secret.template
neutron_vpnaas/services/vpn/device_drivers/template/strongswan/ipsec.conf.template
neutron_vpnaas/services/vpn/device_drivers/template/strongswan/ipsec.secret.template
neutron_vpnaas/services/vpn/device_drivers/template/strongswan/strongswan.conf.template
neutron_vpnaas/services/vpn/service_drivers/__init__.py
neutron_vpnaas/services/vpn/service_drivers/base_ipsec.py
neutron_vpnaas/services/vpn/service_drivers/cisco_csr_db.py
neutron_vpnaas/services/vpn/service_drivers/cisco_ipsec.py
neutron_vpnaas/services/vpn/service_drivers/cisco_validator.py
neutron_vpnaas/services/vpn/service_drivers/ipsec.py
neutron_vpnaas/services/vpn/service_drivers/ipsec_validator.py
neutron_vpnaas/services/vpn/service_drivers/vyatta_ipsec.py
neutron_vpnaas/tests/__init__.py
neutron_vpnaas/tests/base.py
neutron_vpnaas/tests/api/__init__.py
neutron_vpnaas/tests/api/base.py
neutron_vpnaas/tests/api/clients.py
neutron_vpnaas/tests/api/test_vpnaas.py
neutron_vpnaas/tests/contrib/README
neutron_vpnaas/tests/contrib/filters.template
neutron_vpnaas/tests/contrib/functional-test-rootwrap.conf
neutron_vpnaas/tests/contrib/functional-testing.filters
neutron_vpnaas/tests/contrib/gate_hook.sh
neutron_vpnaas/tests/contrib/post_test_hook.sh
neutron_vpnaas/tests/functional/__init__.py
neutron_vpnaas/tests/functional/requirements.txt
neutron_vpnaas/tests/functional/common/README
neutron_vpnaas/tests/functional/common/__init__.py
neutron_vpnaas/tests/functional/common/test_migrations_sync.py
neutron_vpnaas/tests/functional/common/test_scenario.py
neutron_vpnaas/tests/functional/openswan/README
neutron_vpnaas/tests/functional/openswan/__init__.py
neutron_vpnaas/tests/functional/openswan/test_openswan_driver.py
neutron_vpnaas/tests/functional/strongswan/README
neutron_vpnaas/tests/functional/strongswan/__init__.py
neutron_vpnaas/tests/functional/strongswan/test_netns_wrapper.py
neutron_vpnaas/tests/functional/strongswan/test_strongswan_driver.py
neutron_vpnaas/tests/unit/__init__.py
neutron_vpnaas/tests/unit/db/__init__.py
neutron_vpnaas/tests/unit/db/vpn/__init__.py
neutron_vpnaas/tests/unit/db/vpn/test_vpn_db.py
neutron_vpnaas/tests/unit/db/vpn/test_vpn_validator.py
neutron_vpnaas/tests/unit/extensions/__init__.py
neutron_vpnaas/tests/unit/extensions/test_vpn_endpoint_groups.py
neutron_vpnaas/tests/unit/extensions/test_vpnaas.py
neutron_vpnaas/tests/unit/services/__init__.py
neutron_vpnaas/tests/unit/services/vpn/__init__.py
neutron_vpnaas/tests/unit/services/vpn/test_plugin.py
neutron_vpnaas/tests/unit/services/vpn/test_vpn_service.py
neutron_vpnaas/tests/unit/services/vpn/test_vyatta_vpn_service.py
neutron_vpnaas/tests/unit/services/vpn/common/__init__.py
neutron_vpnaas/tests/unit/services/vpn/common/test_netns_wrapper.py
neutron_vpnaas/tests/unit/services/vpn/device_drivers/__init__.py
neutron_vpnaas/tests/unit/services/vpn/device_drivers/test_cisco_csr_rest_client.py
neutron_vpnaas/tests/unit/services/vpn/device_drivers/test_cisco_ipsec.py
neutron_vpnaas/tests/unit/services/vpn/device_drivers/test_ipsec.py
neutron_vpnaas/tests/unit/services/vpn/device_drivers/test_vyatta_ipsec.py
neutron_vpnaas/tests/unit/services/vpn/service_drivers/__init__.py
neutron_vpnaas/tests/unit/services/vpn/service_drivers/test_cisco_ipsec.py
neutron_vpnaas/tests/unit/services/vpn/service_drivers/test_ipsec.py
neutron_vpnaas/tests/unit/services/vpn/service_drivers/test_vyatta_ipsec.py
rally-jobs/__init__.py
rally-jobs/plugins/__init__.py
rally-jobs/plugins/test_vpn_connectivity.py
rally-jobs/plugins/test_vpn_status.py
rally-jobs/plugins/test_vpn_tenant_scenario.py
rally-jobs/plugins/vpn_base.py
rally-jobs/plugins/vpn_utils.py
rally-jobs/rally-configs/args_template.json
rally-jobs/rally-configs/rally_config_dvr.yaml
rally-jobs/rally-configs/rally_config_non_dvr.yaml
releasenotes/notes/.placeholder
releasenotes/notes/config-file-generation-0dcf19f5d8baaf5d.yaml
releasenotes/source/conf.py
releasenotes/source/index.rst
releasenotes/source/liberty.rst
releasenotes/source/unreleased.rst
releasenotes/source/_static/.placeholder
releasenotes/source/_templates/.placeholder
tools/check_i18n.py
tools/check_i18n_test_case.txt
tools/check_unit_test_structure.sh
tools/clean.sh
tools/configure_for_vpn_func_testing.sh
tools/deploy_rootwrap.sh
tools/generate_config_file_samples.sh
tools/i18n_cfg.py
tools/install_venv.py
tools/install_venv_common.py
tools/ostestr_compat_shim.sh
tools/pretty_tox.sh
tools/subunit-trace.py
tools/tox_install.sh
tools/with_venv.sh neutron-vpnaas-8.0.0/neutron_vpnaas.egg-info/PKG-INFO 0000664 0005670 0005671 00000002654 12701410102 023464 0 ustar jenkins jenkins 0000000 0000000 Metadata-Version: 1.1
Name: neutron-vpnaas
Version: 8.0.0
Summary: OpenStack Networking VPN as a Service
Home-page: http://www.openstack.org/
Author: OpenStack
Author-email: openstack-dev@lists.openstack.org
License: UNKNOWN
Description: Welcome!
========
This package contains the code for the Neutron VPN as a Service
(VPNaaS) service. This includes third-party drivers. This package
requires Neutron to run.
External Resources:
===================
The homepage for Neutron is: http://launchpad.net/neutron. Use this
site for asking for help, and filing bugs. We use a single Launchpad
page for all Neutron projects.
Code is available on git.openstack.org at:
`_
Platform: UNKNOWN
Classifier: Environment :: OpenStack
Classifier: Intended Audience :: Information Technology
Classifier: Intended Audience :: System Administrators
Classifier: License :: OSI Approved :: Apache Software License
Classifier: Operating System :: POSIX :: Linux
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.7
neutron-vpnaas-8.0.0/neutron_vpnaas.egg-info/top_level.txt 0000664 0005670 0005671 00000000017 12701410102 025110 0 ustar jenkins jenkins 0000000 0000000 neutron_vpnaas
neutron-vpnaas-8.0.0/neutron_vpnaas.egg-info/pbr.json 0000664 0005670 0005671 00000000056 12701410102 024037 0 ustar jenkins jenkins 0000000 0000000 {"is_release": true, "git_version": "a1b1260"} neutron-vpnaas-8.0.0/neutron_vpnaas.egg-info/entry_points.txt 0000664 0005670 0005671 00000001561 12701410102 025661 0 ustar jenkins jenkins 0000000 0000000 [console_scripts]
neutron-vpn-agent = neutron_vpnaas.cmd.eventlet.agent:main
neutron-vpn-netns-wrapper = neutron_vpnaas.services.vpn.common.netns_wrapper:main
neutron-vyatta-agent = neutron_vpnaas.cmd.eventlet.vyatta_agent:main
[device_drivers]
neutron.services.vpn.device_drivers.cisco_ipsec.CiscoCsrIPsecDriver = neutron_vpnaas.services.vpn.device_drivers.cisco_ipsec:CiscoCsrIPsecDriver
neutron.services.vpn.device_drivers.ipsec.OpenSwanDriver = neutron_vpnaas.services.vpn.device_drivers.ipsec:OpenSwanDriver
neutron.services.vpn.device_drivers.vyatta_ipsec.VyattaIPsecDriver = neutron_vpnaas.services.vpn.device_drivers.vyatta_ipsec:VyattaIPsecDriver
[neutron.db.alembic_migrations]
neutron-vpnaas = neutron_vpnaas.db.migration:alembic_migrations
[oslo.config.opts]
neutron.vpnaas = neutron_vpnaas.opts:list_opts
neutron.vpnaas.agent = neutron_vpnaas.opts:list_agent_opts
neutron-vpnaas-8.0.0/releasenotes/ 0000775 0005670 0005671 00000000000 12701410103 020316 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/releasenotes/notes/ 0000775 0005670 0005671 00000000000 12701410103 021446 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/releasenotes/notes/.placeholder 0000664 0005670 0005671 00000000000 12701407726 023740 0 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/releasenotes/notes/config-file-generation-0dcf19f5d8baaf5d.yaml 0000664 0005670 0005671 00000000437 12701407726 031410 0 ustar jenkins jenkins 0000000 0000000 ---
prelude: >
Generation of sample Neutron VPNaaS configuration files.
features:
- Neutron VPNaaS no longer includes static example configuration files.
Instead, use tools/generate_config_file_samples.sh to generate them.
The files are generated with a .sample extension.
neutron-vpnaas-8.0.0/releasenotes/source/ 0000775 0005670 0005671 00000000000 12701410103 021616 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/releasenotes/source/index.rst 0000664 0005670 0005671 00000000224 12701407726 023476 0 ustar jenkins jenkins 0000000 0000000 ==============================
Neutron VPNaaS Release Notes
==============================
.. toctree::
:maxdepth: 1
liberty
unreleased
neutron-vpnaas-8.0.0/releasenotes/source/_templates/ 0000775 0005670 0005671 00000000000 12701410103 023753 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/releasenotes/source/_templates/.placeholder 0000664 0005670 0005671 00000000000 12701407726 026245 0 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/releasenotes/source/unreleased.rst 0000664 0005670 0005671 00000000160 12701407726 024515 0 ustar jenkins jenkins 0000000 0000000 ==============================
Current Series Release Notes
==============================
.. release-notes::
neutron-vpnaas-8.0.0/releasenotes/source/liberty.rst 0000664 0005670 0005671 00000000222 12701407726 024037 0 ustar jenkins jenkins 0000000 0000000 ==============================
Liberty Series Release Notes
==============================
.. release-notes::
:branch: origin/stable/liberty
neutron-vpnaas-8.0.0/releasenotes/source/conf.py 0000664 0005670 0005671 00000022022 12701407726 023134 0 ustar jenkins jenkins 0000000 0000000 # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Neutron VPNaaS Release Notes documentation build configuration file, created
# by # sphinx-quickstart on Tue Nov 3 17:40:50 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'oslosphinx',
'reno.sphinxext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Neutron VPNaaS Release Notes'
copyright = u'2015, Neutron VPNaaS Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from neutron_vpnaas.version import version_info as neutron_vpnaas_version
# The full version, including alpha/beta/rc tags.
release = neutron_vpnaas_version.version_string_with_vcs()
# The short X.Y version.
version = neutron_vpnaas_version.canonical_version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# " v documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'NeutronVPNaaSReleaseNotesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'NeutronVPNaaSReleaseNotes.tex',
u'Neutron VPNaaS Release Notes Documentation',
u'Neutron VPNaaS Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'neutronvpnaasreleasenotes', u'Neutron VPNaaS Release Notes '
'Documentation',
[u'Neutron VPNaaS Developers'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'NeutronVPNaaSReleaseNotes', u'Neutron VPNaaS Release Notes '
'Documentation',
u'Neutron VPNaaS Developers', 'NeutronVPNaaSReleaseNotes',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
neutron-vpnaas-8.0.0/releasenotes/source/_static/ 0000775 0005670 0005671 00000000000 12701410103 023244 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/releasenotes/source/_static/.placeholder 0000664 0005670 0005671 00000000000 12701407726 025536 0 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/README.rst 0000664 0005670 0005671 00000001173 12701407726 017337 0 ustar jenkins jenkins 0000000 0000000 Welcome!
========
This package contains the code for the Neutron VPN as a Service
(VPNaaS) service. This includes third-party drivers. This package
requires Neutron to run.
External Resources:
===================
The homepage for Neutron is: http://launchpad.net/neutron. Use this
site for asking for help, and filing bugs. We use a single Launchpad
page for all Neutron projects.
Code is available on git.openstack.org at:
`_
neutron-vpnaas-8.0.0/neutron_vpnaas/ 0000775 0005670 0005671 00000000000 12701410103 020667 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/tests/ 0000775 0005670 0005671 00000000000 12701410103 022031 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/tests/__init__.py 0000664 0005670 0005671 00000000000 12701407726 024151 0 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/tests/contrib/ 0000775 0005670 0005671 00000000000 12701410103 023471 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/tests/contrib/README 0000664 0005670 0005671 00000000227 12701407726 024373 0 ustar jenkins jenkins 0000000 0000000 The files in this directory are intended for use by the
infra jobs that run the various functional test
suite in the gate for the neutron-vpnaas repo.
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/contrib/functional-testing.filters 0000664 0005670 0005671 00000004072 12701407726 030724 0 ustar jenkins jenkins 0000000 0000000 # neutron-rootwrap command filters to support functional testing. It
# is NOT intended to be used outside of a test environment.
#
# This file should be owned by (and only-writeable by) the root user
[Filters]
# enable ping from namespace
ping_filter: CommandFilter, ping, root
ping6_filter: CommandFilter, ping6, root
# enable curl from namespace
curl_filter: CommandFilter, curl, root
tee_filter: CommandFilter, tee, root
tee_kill: KillFilter, root, tee, -9
nc_filter: CommandFilter, nc, root
# netcat has different binaries depending on linux distribution
nc_kill: KillFilter, root, nc, -9
ncbsd_kill: KillFilter, root, nc.openbsd, -9
ncat_kill: KillFilter, root, ncat, -9
ss_filter: CommandFilter, ss, root
# arping
arping: CommandFilter, arping, root
# l3_agent
sysctl: CommandFilter, sysctl, root
route: CommandFilter, route, root
radvd: CommandFilter, radvd, root
# metadata proxy
metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root
# RHEL invocation of the metadata proxy will report /usr/bin/python
kill_metadata: KillFilter, root, python, -9
kill_metadata7: KillFilter, root, python2.7, -9
kill_radvd_usr: KillFilter, root, /usr/sbin/radvd, -9, -HUP
kill_radvd: KillFilter, root, /sbin/radvd, -9, -HUP
# ip_lib
ip: IpFilter, ip, root
find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
ip_exec: IpNetnsExecFilter, ip, root
# For ip monitor
kill_ip_monitor: KillFilter, root, ip, -9
# ovs_lib (if OVSInterfaceDriver is used)
ovs-vsctl: CommandFilter, ovs-vsctl, root
# iptables_manager
iptables-save: CommandFilter, iptables-save, root
iptables-restore: CommandFilter, iptables-restore, root
ip6tables-save: CommandFilter, ip6tables-save, root
ip6tables-restore: CommandFilter, ip6tables-restore, root
# Keepalived
keepalived: CommandFilter, keepalived, root
kill_keepalived: KillFilter, root, /usr/sbin/keepalived, -HUP, -15, -9
# l3 agent to delete floatingip's conntrack state
conntrack: CommandFilter, conntrack, root
# keepalived state change monitor
keepalived_state_change: CommandFilter, neutron-keepalived-state-change, root
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/contrib/functional-test-rootwrap.conf 0000664 0005670 0005671 00000002233 12701407726 031353 0 ustar jenkins jenkins 0000000 0000000 # Configuration for neutron-rootwrap
# This file should be owned by (and only-writeable by) the root user
[DEFAULT]
# List of directories to load filter definitions from (separated by ',').
# These directories MUST all be only writeable by root !
filters_path=/etc/neutron/rootwrap.d,/usr/share/neutron/rootwrap
# List of directories to search executables in, in case filters do not
# explicitely specify a full path (separated by ',')
# If not specified, defaults to system PATH environment variable.
# These directories MUST all be only writeable by root !
exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin
# Enable logging to syslog
# Default value is False
use_syslog=False
# Which syslog facility to use.
# Valid values include auth, authpriv, syslog, local0, local1...
# Default value is 'syslog'
syslog_log_facility=syslog
# Which messages to log.
# INFO means log all usage
# ERROR means only log unsuccessful attempts
syslog_log_level=ERROR
[xenapi]
# XenAPI configuration is only required by the L2 agent if it is to
# target a XenServer/XCP compute host's dom0.
xenapi_connection_url=
xenapi_connection_username=root
xenapi_connection_password=
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/contrib/filters.template 0000664 0005670 0005671 00000001374 12701407726 026724 0 ustar jenkins jenkins 0000000 0000000 # neutron-rootwrap command filters to support functional testing. It
# is NOT intended to be used outside of a test environment.
#
# This file should be owned by (and only-writeable by) the root user
[Filters]
# '$BASE_PATH' is intended to be replaced with the expected tox path
# (e.g. /opt/stack/new/neutron/.tox/dsvm-functional) by the neutron
# functional jenkins job. This ensures that tests can kill the
# processes that they launch with their containing tox environment's
# python.
kill_tox_python: KillFilter, root, $BASE_PATH/bin/python, -9
# enable ping from namespace
ping_filter: CommandFilter, ping, root
# enable curl from namespace
curl_filter: CommandFilter, curl, root
tee_filter: CommandFilter, tee, root
tee_kill: KillFilter, root, tee, -9
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/contrib/gate_hook.sh 0000775 0005670 0005671 00000001604 12701407726 026012 0 ustar jenkins jenkins 0000000 0000000 #!/usr/bin/env bash
set -ex
VENV=${1:-"dsvm-functional"}
export DEVSTACK_LOCAL_CONFIG="enable_plugin neutron-vpnaas https://git.openstack.org/openstack/neutron-vpnaas"
case $VENV in
dsvm-functional | dsvm-functional-sswan)
# The following need to be set before sourcing
# configure_for_func_testing.
GATE_DEST=$BASE/new
GATE_STACK_USER=stack
NEUTRON_PATH=$GATE_DEST/neutron
PROJECT_NAME=neutron-vpnaas
NEUTRON_VPN_PATH=$GATE_DEST/$PROJECT_NAME
DEVSTACK_PATH=$GATE_DEST/devstack
IS_GATE=True
USE_CONSTRAINT_ENV=False
source $NEUTRON_VPN_PATH/tools/configure_for_vpn_func_testing.sh
# Make the workspace owned by the stack user
sudo chown -R $STACK_USER:$STACK_USER $BASE
configure_host_for_vpn_func_testing
;;
api) $BASE/new/devstack-gate/devstack-vm-gate.sh ;;
esac
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/contrib/post_test_hook.sh 0000775 0005670 0005671 00000002405 12701407726 027116 0 ustar jenkins jenkins 0000000 0000000 #!/usr/bin/env bash
set -xe
NEUTRON_VPNAAS_DIR="$BASE/new/neutron-vpnaas"
TEMPEST_CONFIG_DIR="$BASE/new/tempest/etc"
SCRIPTS_DIR="/usr/os-testr-env/bin"
VENV=${1:-"dsvm-functional"}
function generate_testr_results {
# Give job user rights to access tox logs
sudo -H -u $owner chmod o+rw .
sudo -H -u $owner chmod o+rw -R .testrepository
if [ -f ".testrepository/0" ] ; then
.tox/$VENV/bin/subunit-1to2 < .testrepository/0 > ./testrepository.subunit
$SCRIPTS_DIR/subunit2html ./testrepository.subunit testr_results.html
gzip -9 ./testrepository.subunit
gzip -9 ./testr_results.html
sudo mv ./*.gz /opt/stack/logs/
fi
}
case $VENV in
dsvm-functional | dsvm-functional-sswan)
owner=stack
sudo_env=
;;
api)
owner=tempest
# Configure the api tests to use the tempest.conf set by devstack.
sudo_env="TEMPEST_CONFIG_DIR=$TEMPEST_DIR/etc"
;;
esac
# Set owner permissions according to job's requirements.
cd $NEUTRON_VPNAAS_DIR
sudo chown -R $owner:stack $NEUTRON_VPNAAS_DIR
echo "Running neutron $VENV test suite"
set +e
sudo -H -u $owner $sudo_env tox -e $VENV
testr_exit_code=$?
set -e
# Collect and parse results
generate_testr_results
exit $testr_exit_code
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/base.py 0000664 0005670 0005671 00000002022 12701407726 023332 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutron.tests import base as n_base
from neutron.tests.unit.db import test_db_base_plugin_v2 as test_db_plugin
from neutron.tests.unit.extensions import base as test_api_v2_extension
class BaseTestCase(n_base.BaseTestCase):
pass
class ExtensionTestCase(test_api_v2_extension.ExtensionTestCase):
pass
class NeutronDbPluginV2TestCase(test_db_plugin.NeutronDbPluginV2TestCase):
pass
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/ 0000775 0005670 0005671 00000000000 12701410103 023010 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/__init__.py 0000664 0005670 0005671 00000001267 12701407726 025150 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
cfg.CONF.use_stderr = False
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/services/ 0000775 0005670 0005671 00000000000 12701410103 024633 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/services/vpn/ 0000775 0005670 0005671 00000000000 12701410103 025436 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/services/vpn/test_vyatta_vpn_service.py 0000664 0005670 0005671 00000003504 12701407726 033005 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2015 Brocade Communications System, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.agent.common import config as agent_config
from oslo_config import cfg
from oslo_utils import uuidutils
from neutron_vpnaas.services.vpn import vyatta_vpn_service
from neutron_vpnaas.tests import base
_uuid = uuidutils.generate_uuid
FAKE_ROUTER_ID = _uuid()
class TestVyattaVPNService(base.BaseTestCase):
def setUp(self):
super(TestVyattaVPNService, self).setUp()
self.conf = cfg.CONF
agent_config.register_root_helper(self.conf)
self.ri_kwargs = {'root_helper': self.conf.AGENT.root_helper,
'agent_conf': self.conf,
'interface_driver': mock.sentinel.interface_driver}
self.agent = mock.Mock()
self.vyatta_service = vyatta_vpn_service.VyattaVPNService(
self.agent)
self.l3_agent = self.vyatta_service.l3_agent
def test_get_router_client(self):
self.vyatta_service.get_router_client(FAKE_ROUTER_ID)
self.l3_agent.get_router_client.assert_called_once_with(FAKE_ROUTER_ID)
def test_get_router(self):
self.vyatta_service.get_router(FAKE_ROUTER_ID)
self.l3_agent.get_router.assert_called_once_with(FAKE_ROUTER_ID)
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/services/vpn/__init__.py 0000664 0005670 0005671 00000000000 12701407726 027556 0 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/services/vpn/test_plugin.py 0000664 0005670 0005671 00000017205 12701407726 030373 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2013, Nachi Ueno, NTT I3, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
from neutron.common import constants
from neutron import context
from neutron import manager
from neutron.plugins.common import constants as p_constants
from neutron.tests.unit.db import test_agentschedulers_db
from neutron.tests.unit.extensions import test_agent as test_agent_ext_plugin
from neutron_vpnaas.db.vpn import vpn_validator
from neutron_vpnaas.services.vpn.service_drivers import ipsec as ipsec_driver
from neutron_vpnaas.tests.unit.db.vpn import test_vpn_db as test_db_vpnaas
FAKE_HOST = test_agent_ext_plugin.L3_HOSTA
VPN_DRIVER_CLASS = 'neutron_vpnaas.services.vpn.plugin.VPNDriverPlugin'
class TestVPNDriverPlugin(test_db_vpnaas.TestVpnaas,
test_agentschedulers_db.AgentSchedulerTestMixIn,
test_agent_ext_plugin.AgentDBTestMixIn):
def setUp(self):
driver_cls_p = mock.patch(
'neutron_vpnaas.services.vpn.'
'service_drivers.ipsec.IPsecVPNDriver')
driver_cls = driver_cls_p.start()
self.driver = mock.Mock()
self.driver.service_type = ipsec_driver.IPSEC
self.driver.validator = vpn_validator.VpnReferenceValidator()
driver_cls.return_value = self.driver
super(TestVPNDriverPlugin, self).setUp(
vpnaas_plugin=VPN_DRIVER_CLASS)
# Note: Context must be created after BaseTestCase.setUp() so that
# config for policy is set.
self.adminContext = context.get_admin_context()
def test_create_ipsec_site_connection(self, **extras):
super(TestVPNDriverPlugin, self).test_create_ipsec_site_connection()
self.driver.create_ipsec_site_connection.assert_called_once_with(
mock.ANY, mock.ANY)
self.driver.delete_ipsec_site_connection.assert_called_once_with(
mock.ANY, mock.ANY)
def test_create_vpnservice(self):
super(TestVPNDriverPlugin, self).test_create_vpnservice()
self.driver.create_vpnservice.assert_called_once_with(
mock.ANY, mock.ANY)
def test_delete_vpnservice(self, **extras):
super(TestVPNDriverPlugin, self).test_delete_vpnservice()
self.driver.delete_vpnservice.assert_called_once_with(
mock.ANY, mock.ANY)
def test_update_vpnservice(self, **extras):
super(TestVPNDriverPlugin, self).test_update_vpnservice()
self.driver.update_vpnservice.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY)
@contextlib.contextmanager
def vpnservice_set(self):
"""Test case to create a ipsec_site_connection."""
vpnservice_name = "vpn1"
ipsec_site_connection_name = "ipsec_site_connection"
ikename = "ikepolicy1"
ipsecname = "ipsecpolicy1"
description = "my-vpn-connection"
keys = {'name': vpnservice_name,
'description': "my-vpn-connection",
'peer_address': '192.168.1.10',
'peer_id': '192.168.1.10',
'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'],
'initiator': 'bi-directional',
'mtu': 1500,
'dpd_action': 'hold',
'dpd_interval': 40,
'dpd_timeout': 120,
'tenant_id': self._tenant_id,
'psk': 'abcd',
'status': 'PENDING_CREATE',
'admin_state_up': True}
with self.ikepolicy(name=ikename) as ikepolicy:
with self.ipsecpolicy(name=ipsecname) as ipsecpolicy:
with self.subnet() as subnet:
with self.router() as router:
plugin = manager.NeutronManager.get_plugin()
agent = {'host': FAKE_HOST,
'agent_type': constants.AGENT_TYPE_L3,
'binary': 'fake-binary',
'topic': 'fake-topic'}
plugin.create_or_update_agent(self.adminContext, agent)
plugin.schedule_router(
self.adminContext, router['router']['id'])
with self.vpnservice(name=vpnservice_name,
subnet=subnet,
router=router) as vpnservice1:
keys['ikepolicy_id'] = ikepolicy['ikepolicy']['id']
keys['ipsecpolicy_id'] = (
ipsecpolicy['ipsecpolicy']['id']
)
keys['vpnservice_id'] = (
vpnservice1['vpnservice']['id']
)
with self.ipsec_site_connection(
self.fmt,
ipsec_site_connection_name,
keys['peer_address'],
keys['peer_id'],
keys['peer_cidrs'],
keys['mtu'],
keys['psk'],
keys['initiator'],
keys['dpd_action'],
keys['dpd_interval'],
keys['dpd_timeout'],
vpnservice1,
ikepolicy,
ipsecpolicy,
keys['admin_state_up'],
description=description,
):
yield vpnservice1['vpnservice']
def test_get_agent_hosting_vpn_services(self):
with self.vpnservice_set():
service_plugin = manager.NeutronManager.get_service_plugins()[
p_constants.VPN]
vpnservices = service_plugin._get_agent_hosting_vpn_services(
self.adminContext, FAKE_HOST)
vpnservices = vpnservices.all()
self.assertEqual(1, len(vpnservices))
vpnservice_db = vpnservices[0]
self.assertEqual(1, len(vpnservice_db.ipsec_site_connections))
ipsec_site_connection = vpnservice_db.ipsec_site_connections[0]
self.assertIsNotNone(
ipsec_site_connection['ikepolicy'])
self.assertIsNotNone(
ipsec_site_connection['ipsecpolicy'])
def test_update_status(self):
with self.vpnservice_set() as vpnservice:
self._register_agent_states()
service_plugin = manager.NeutronManager.get_service_plugins()[
p_constants.VPN]
service_plugin.update_status_by_agent(
self.adminContext,
[{'status': 'ACTIVE',
'ipsec_site_connections': {},
'updated_pending_status': True,
'id': vpnservice['id']}])
vpnservices = service_plugin._get_agent_hosting_vpn_services(
self.adminContext, FAKE_HOST)
vpnservice_db = vpnservices[0]
self.assertEqual(p_constants.ACTIVE, vpnservice_db['status'])
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/services/vpn/common/ 0000775 0005670 0005671 00000000000 12701410103 026726 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/services/vpn/common/test_netns_wrapper.py 0000664 0005670 0005671 00000006043 12701407726 033252 0 ustar jenkins jenkins 0000000 0000000 # Copyright (c) 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.tests import base
from neutron_vpnaas.services.vpn.common import netns_wrapper as nswrap
class TestNetnsWrapper(base.BaseTestCase):
def setUp(self):
super(TestNetnsWrapper, self).setUp()
patch_methods = ['filter_command',
'execute',
'setup_conf']
for method in patch_methods:
self.patch_obj(nswrap, method)
patch_classes = ['neutron.common.config.setup_logging',
'os.path.isdir',
'os.path.samefile',
'sys.exit']
for cls in patch_classes:
self.patch_cls(cls)
self.filter_command.return_value = False
self.execute.return_value = 0
self.conf = mock.Mock()
self.conf.cmd = 'ls,-al'
self.conf.mount_paths = {'/foo': '/dir/foo',
'/var': '/dir/var'}
self.setup_conf.return_value = self.conf
self.conf.rootwrap_config = 'conf'
self.isdir.return_value = True
self.samefile.return_value = False
def patch_obj(self, obj, method):
_m = mock.patch.object(obj, method)
_mock = _m.start()
setattr(self, method, _mock)
def patch_cls(self, patch_class):
_m = mock.patch(patch_class)
mock_name = patch_class.split('.')[-1]
_mock = _m.start()
setattr(self, mock_name, _mock)
def test_netns_wrap_fail_without_netns(self):
self.samefile.return_value = True
return_val = nswrap.execute_with_mount()
self.assertTrue(return_val)
def test_netns_wrap(self):
self.conf.cmd = 'ls,-al'
return_val = nswrap.execute_with_mount()
exp_calls = [mock.call(['mount', '--bind', '/dir/foo', '/foo']),
mock.call(['mount', '--bind', '/dir/var', '/var']),
mock.call('ls,-al')]
self.execute.assert_has_calls(exp_calls, any_order=True)
self.assertFalse(return_val)
def test_netns_wrap_fail_without_cmd(self):
self.conf.cmd = None
return_val = nswrap.execute_with_mount()
self.assertFalse(self.execute.called)
self.assertTrue(return_val)
def test_netns_wrap_fail_without_mount_paths(self):
self.conf.mount_paths = None
return_val = nswrap.execute_with_mount()
self.assertFalse(self.execute.called)
self.assertTrue(return_val)
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/services/vpn/common/__init__.py 0000664 0005670 0005671 00000000000 12701407726 031046 0 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/services/vpn/service_drivers/ 0000775 0005670 0005671 00000000000 12701410103 030634 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/services/vpn/service_drivers/__init__.py 0000664 0005670 0005671 00000000000 12701407726 032754 0 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/services/vpn/service_drivers/test_cisco_ipsec.py 0000664 0005670 0005671 00000051650 12701407726 034560 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from six import moves
from neutron import context as n_ctx
from neutron.db import servicetype_db as st_db
from neutron.plugins.common import constants
from neutron.tests.unit import testlib_api
from oslo_config import cfg
from oslo_utils import uuidutils
from neutron_vpnaas.services.vpn import plugin as vpn_plugin
from neutron_vpnaas.services.vpn.service_drivers import cisco_csr_db as csr_db
from neutron_vpnaas.services.vpn.service_drivers \
import cisco_ipsec as ipsec_driver
from neutron_vpnaas.services.vpn.service_drivers \
import cisco_validator as validator
from neutron_vpnaas.tests import base
_uuid = uuidutils.generate_uuid
FAKE_VPN_CONN_ID = _uuid()
FAKE_SERVICE_ID = _uuid()
FAKE_VPN_CONNECTION = {
'vpnservice_id': FAKE_SERVICE_ID,
'id': FAKE_VPN_CONN_ID,
'ikepolicy_id': _uuid(),
'ipsecpolicy_id': _uuid(),
'tenant_id': _uuid()
}
FAKE_ROUTER_ID = _uuid()
FAKE_VPN_SERVICE = {
'router_id': FAKE_ROUTER_ID
}
FAKE_HOST = 'fake_host'
IPV4 = 4
CISCO_IPSEC_SERVICE_DRIVER = ('neutron_vpnaas.services.vpn.service_drivers.'
'cisco_ipsec.CiscoCsrIPsecVPNDriver')
class TestCiscoValidatorSelection(base.BaseTestCase):
def setUp(self):
super(TestCiscoValidatorSelection, self).setUp()
# TODO(armax): remove this if branch as soon as the ServiceTypeManager
# API for adding provider configurations becomes available
if not hasattr(st_db.ServiceTypeManager, 'add_provider_configuration'):
vpnaas_provider = (constants.VPN +
':vpnaas:' +
CISCO_IPSEC_SERVICE_DRIVER + ':default')
cfg.CONF.set_override(
'service_provider', [vpnaas_provider], 'service_providers')
else:
vpnaas_provider = [{
'service_type': constants.VPN,
'name': 'vpnaas',
'driver': CISCO_IPSEC_SERVICE_DRIVER,
'default': True
}]
# override the default service provider
self.service_providers = (
mock.patch.object(st_db.ServiceTypeManager,
'get_service_providers').start())
self.service_providers.return_value = vpnaas_provider
st_db.ServiceTypeManager._instance = None
mock.patch('neutron.common.rpc.create_connection').start()
self.vpn_plugin = vpn_plugin.VPNDriverPlugin()
def test_reference_driver_used(self):
self.assertIsInstance(self.vpn_plugin._get_validator(),
validator.CiscoCsrVpnValidator)
class TestCiscoIPsecDriverValidation(base.BaseTestCase):
def setUp(self):
super(TestCiscoIPsecDriverValidation, self).setUp()
self.l3_plugin = mock.Mock()
mock.patch(
'neutron.manager.NeutronManager.get_service_plugins',
return_value={constants.L3_ROUTER_NAT: self.l3_plugin}).start()
self.context = n_ctx.Context('some_user', 'some_tenant')
self.vpn_service = {'router_id': '123'}
self.router = mock.Mock()
self.service_plugin = mock.Mock()
self.validator = validator.CiscoCsrVpnValidator(self.service_plugin)
def test_ike_version_unsupported(self):
"""Failure test that Cisco CSR REST API does not support IKE v2."""
policy_info = {'ike_version': 'v2',
'lifetime': {'units': 'seconds', 'value': 60}}
self.assertRaises(validator.CsrValidationFailure,
self.validator.validate_ike_version,
policy_info)
def test_ike_lifetime_not_in_seconds(self):
"""Failure test of unsupported lifetime units for IKE policy."""
policy_info = {'lifetime': {'units': 'kilobytes', 'value': 1000}}
self.assertRaises(validator.CsrValidationFailure,
self.validator.validate_lifetime,
"IKE Policy", policy_info)
def test_ipsec_lifetime_not_in_seconds(self):
"""Failure test of unsupported lifetime units for IPSec policy."""
policy_info = {'lifetime': {'units': 'kilobytes', 'value': 1000}}
self.assertRaises(validator.CsrValidationFailure,
self.validator.validate_lifetime,
"IPSec Policy", policy_info)
def test_ike_lifetime_seconds_values_at_limits(self):
"""Test valid lifetime values for IKE policy."""
policy_info = {'lifetime': {'units': 'seconds', 'value': 60}}
self.validator.validate_lifetime('IKE Policy', policy_info)
policy_info = {'lifetime': {'units': 'seconds', 'value': 86400}}
self.validator.validate_lifetime('IKE Policy', policy_info)
def test_ipsec_lifetime_seconds_values_at_limits(self):
"""Test valid lifetime values for IPSec policy."""
policy_info = {'lifetime': {'units': 'seconds', 'value': 120}}
self.validator.validate_lifetime('IPSec Policy', policy_info)
policy_info = {'lifetime': {'units': 'seconds', 'value': 2592000}}
self.validator.validate_lifetime('IPSec Policy', policy_info)
def test_ike_lifetime_values_invalid(self):
"""Failure test of unsupported lifetime values for IKE policy."""
which = "IKE Policy"
policy_info = {'lifetime': {'units': 'seconds', 'value': 59}}
self.assertRaises(validator.CsrValidationFailure,
self.validator.validate_lifetime,
which, policy_info)
policy_info = {'lifetime': {'units': 'seconds', 'value': 86401}}
self.assertRaises(validator.CsrValidationFailure,
self.validator.validate_lifetime,
which, policy_info)
def test_ipsec_lifetime_values_invalid(self):
"""Failure test of unsupported lifetime values for IPSec policy."""
which = "IPSec Policy"
policy_info = {'lifetime': {'units': 'seconds', 'value': 119}}
self.assertRaises(validator.CsrValidationFailure,
self.validator.validate_lifetime,
which, policy_info)
policy_info = {'lifetime': {'units': 'seconds', 'value': 2592001}}
self.assertRaises(validator.CsrValidationFailure,
self.validator.validate_lifetime,
which, policy_info)
def test_ipsec_connection_with_mtu_at_limits(self):
"""Test IPSec site-to-site connection with MTU at limits."""
conn_info = {'mtu': 1500}
self.validator.validate_mtu(conn_info)
conn_info = {'mtu': 9192}
self.validator.validate_mtu(conn_info)
def test_ipsec_connection_with_invalid_mtu(self):
"""Failure test of IPSec site connection with unsupported MTUs."""
conn_info = {'mtu': 1499}
self.assertRaises(validator.CsrValidationFailure,
self.validator.validate_mtu, conn_info)
conn_info = {'mtu': 9193}
self.assertRaises(validator.CsrValidationFailure,
self.validator.validate_mtu, conn_info)
def simulate_gw_ip_available(self):
"""Helper function indicating that tunnel has a gateway IP."""
def have_one():
return 1
self.router.gw_port.fixed_ips.__len__ = have_one
ip_addr_mock = mock.Mock()
self.router.gw_port.fixed_ips = [ip_addr_mock]
def test_have_public_ip_for_router(self):
"""Ensure that router for IPSec connection has gateway IP."""
self.simulate_gw_ip_available()
try:
self.validator.validate_public_ip_present(self.router)
except Exception:
self.fail("Unexpected exception on validation")
def test_router_with_missing_gateway_ip(self):
"""Failure test of IPSec connection with missing gateway IP."""
self.simulate_gw_ip_available()
self.router.gw_port = None
self.assertRaises(validator.CsrValidationFailure,
self.validator.validate_public_ip_present,
self.router)
def test_peer_id_is_an_ip_address(self):
"""Ensure peer ID is an IP address for IPsec connection create."""
ipsec_sitecon = {'peer_id': '10.10.10.10'}
self.validator.validate_peer_id(ipsec_sitecon)
def test_peer_id_is_not_ip_address(self):
"""Failure test of peer_id that is not an IP address."""
ipsec_sitecon = {'peer_id': 'some-site.com'}
self.assertRaises(validator.CsrValidationFailure,
self.validator.validate_peer_id, ipsec_sitecon)
def test_validation_for_create_ipsec_connection(self):
"""Ensure all validation passes for IPSec site connection create."""
self.simulate_gw_ip_available()
self.service_plugin.get_ikepolicy = mock.Mock(
return_value={'ike_version': 'v1',
'lifetime': {'units': 'seconds', 'value': 60}})
self.service_plugin.get_ipsecpolicy = mock.Mock(
return_value={'lifetime': {'units': 'seconds', 'value': 120},
'encapsulation_mode': 'tunnel'})
self.service_plugin.get_vpnservice = mock.Mock(
return_value=self.vpn_service)
self.l3_plugin._get_router = mock.Mock(return_value=self.router)
# Provide the minimum needed items to validate
ipsec_sitecon = {'id': '1',
'vpnservice_id': FAKE_SERVICE_ID,
'ikepolicy_id': '123',
'ipsecpolicy_id': '2',
'mtu': 1500,
'peer_id': '10.10.10.10'}
# Using defaults for DPD info
expected = {'dpd_action': 'hold',
'dpd_interval': 30,
'dpd_timeout': 120}
expected.update(ipsec_sitecon)
self.validator.assign_sensible_ipsec_sitecon_defaults(ipsec_sitecon)
self.validator.validate_ipsec_site_connection(self.context,
ipsec_sitecon, IPV4)
self.assertEqual(expected, ipsec_sitecon)
def test_ipsec_encap_mode_unsupported(self):
"""Failure test for unsupported encap mode for IPsec policy."""
policy_info = {'encapsulation_mode': 'transport'}
self.assertRaises(validator.CsrValidationFailure,
self.validator.validate_ipsec_encap_mode,
policy_info)
class TestCiscoIPsecDriverMapping(base.BaseTestCase):
def setUp(self):
super(TestCiscoIPsecDriverMapping, self).setUp()
self.context = mock.patch.object(n_ctx, 'Context').start()
self.session = self.context.session
self.query_mock = self.session.query.return_value.order_by
def test_identifying_first_mapping_id(self):
"""Make sure first available ID is obtained for each ID type."""
# Simulate mapping table is empty - get first one
self.query_mock.return_value = []
next_id = csr_db.get_next_available_tunnel_id(self.session)
self.assertEqual(0, next_id)
next_id = csr_db.get_next_available_ike_policy_id(self.session)
self.assertEqual(1, next_id)
next_id = csr_db.get_next_available_ipsec_policy_id(self.session)
self.assertEqual(1, next_id)
def test_last_mapping_id_available(self):
"""Make sure can get the last ID for each of the table types."""
# Simulate query indicates table is full
self.query_mock.return_value = [
(x, ) for x in moves.range(csr_db.MAX_CSR_TUNNELS - 1)]
next_id = csr_db.get_next_available_tunnel_id(self.session)
self.assertEqual(csr_db.MAX_CSR_TUNNELS - 1, next_id)
self.query_mock.return_value = [
(x, ) for x in moves.range(1, csr_db.MAX_CSR_IKE_POLICIES)]
next_id = csr_db.get_next_available_ike_policy_id(self.session)
self.assertEqual(csr_db.MAX_CSR_IKE_POLICIES, next_id)
self.query_mock.return_value = [
(x, ) for x in moves.range(1, csr_db.MAX_CSR_IPSEC_POLICIES)]
next_id = csr_db.get_next_available_ipsec_policy_id(self.session)
self.assertEqual(csr_db.MAX_CSR_IPSEC_POLICIES, next_id)
def test_reusing_first_available_mapping_id(self):
"""Ensure that we reuse the first available ID.
Make sure that the next lowest ID is obtained from the mapping
table when there are "holes" from deletions. Database query sorts
the entries, so will return them in order. Using tunnel ID, as the
logic is the same for each ID type.
"""
self.query_mock.return_value = [(0, ), (1, ), (2, ), (5, ), (6, )]
next_id = csr_db.get_next_available_tunnel_id(self.session)
self.assertEqual(3, next_id)
def test_no_more_mapping_ids_available(self):
"""Failure test of trying to reserve ID, when none available."""
self.query_mock.return_value = [
(x, ) for x in moves.range(csr_db.MAX_CSR_TUNNELS)]
self.assertRaises(IndexError, csr_db.get_next_available_tunnel_id,
self.session)
self.query_mock.return_value = [
(x, ) for x in moves.range(1, csr_db.MAX_CSR_IKE_POLICIES + 1)]
self.assertRaises(IndexError, csr_db.get_next_available_ike_policy_id,
self.session)
self.query_mock.return_value = [
(x, ) for x in moves.range(1, csr_db.MAX_CSR_IPSEC_POLICIES + 1)]
self.assertRaises(IndexError,
csr_db.get_next_available_ipsec_policy_id,
self.session)
def test_create_tunnel_mappings(self):
"""Ensure successfully create new tunnel mappings."""
# Simulate that first IDs are obtained
self.query_mock.return_value = []
map_db_mock = mock.patch.object(csr_db, 'IdentifierMap').start()
conn_info = {'ikepolicy_id': '10',
'ipsecpolicy_id': '50',
'id': '100',
'tenant_id': '1000'}
csr_db.create_tunnel_mapping(self.context, conn_info)
map_db_mock.assert_called_once_with(csr_tunnel_id=0,
csr_ike_policy_id=1,
csr_ipsec_policy_id=1,
ipsec_site_conn_id='100',
tenant_id='1000')
# Create another, with next ID of 2 for all IDs (not mocking each
# ID separately, so will not have different IDs).
self.query_mock.return_value = [(0, ), (1, )]
map_db_mock.reset_mock()
conn_info = {'ikepolicy_id': '20',
'ipsecpolicy_id': '60',
'id': '101',
'tenant_id': '1000'}
csr_db.create_tunnel_mapping(self.context, conn_info)
map_db_mock.assert_called_once_with(csr_tunnel_id=2,
csr_ike_policy_id=2,
csr_ipsec_policy_id=2,
ipsec_site_conn_id='101',
tenant_id='1000')
class TestCiscoIPsecDriver(testlib_api.SqlTestCase):
"""Test that various incoming requests are sent to device driver."""
def setUp(self):
super(TestCiscoIPsecDriver, self).setUp()
mock.patch('neutron.common.rpc.create_connection').start()
self._fake_vpn_router_id = _uuid()
service_plugin = mock.Mock()
service_plugin._get_vpnservice.return_value = {
'router_id': self._fake_vpn_router_id
}
l3_plugin = mock.Mock()
mock.patch(
'neutron.manager.NeutronManager.get_service_plugins',
return_value={constants.L3_ROUTER_NAT: l3_plugin}).start()
l3_plugin.get_host_for_router.return_value = FAKE_HOST
l3_agent = mock.Mock()
l3_agent.host = 'some-host'
l3_plugin.get_l3_agents_hosting_routers.return_value = [l3_agent]
self.driver = ipsec_driver.CiscoCsrIPsecVPNDriver(service_plugin)
mock.patch.object(csr_db, 'create_tunnel_mapping').start()
self.context = n_ctx.Context('some_user', 'some_tenant')
def _test_update(self, func, args, additional_info=None):
with mock.patch.object(self.driver.agent_rpc.client,
'cast') as rpc_mock, \
mock.patch.object(self.driver.agent_rpc.client,
'prepare') as prepare_mock:
prepare_mock.return_value = self.driver.agent_rpc.client
func(self.context, *args)
prepare_args = {'server': 'fake_host', 'version': '1.0'}
prepare_mock.assert_called_once_with(**prepare_args)
rpc_mock.assert_called_once_with(self.context, 'vpnservice_updated',
**additional_info)
def test_create_ipsec_site_connection(self):
self._test_update(self.driver.create_ipsec_site_connection,
[FAKE_VPN_CONNECTION],
{'reason': 'ipsec-conn-create',
'router': {'id': self._fake_vpn_router_id}})
def test_update_ipsec_site_connection(self):
self._test_update(self.driver.update_ipsec_site_connection,
[FAKE_VPN_CONNECTION, FAKE_VPN_CONNECTION],
{'reason': 'ipsec-conn-update',
'router': {'id': self._fake_vpn_router_id}})
def test_delete_ipsec_site_connection(self):
self._test_update(self.driver.delete_ipsec_site_connection,
[FAKE_VPN_CONNECTION],
{'reason': 'ipsec-conn-delete',
'router': {'id': self._fake_vpn_router_id}})
def test_update_vpnservice(self):
self._test_update(self.driver.update_vpnservice,
[FAKE_VPN_SERVICE, FAKE_VPN_SERVICE],
{'reason': 'vpn-service-update',
'router': {'id': FAKE_VPN_SERVICE['router_id']}})
def test_delete_vpnservice(self):
self._test_update(self.driver.delete_vpnservice,
[FAKE_VPN_SERVICE],
{'reason': 'vpn-service-delete',
'router': {'id': FAKE_VPN_SERVICE['router_id']}})
class TestCiscoIPsecDriverRequests(base.BaseTestCase):
"""Test handling device driver requests for service info."""
def setUp(self):
super(TestCiscoIPsecDriverRequests, self).setUp()
mock.patch('neutron.common.rpc.create_connection').start()
service_plugin = mock.Mock()
self.driver = ipsec_driver.CiscoCsrIPsecVPNDriver(service_plugin)
def test_build_router_tunnel_interface_name(self):
"""Check formation of inner/outer interface name for CSR router."""
router_info = {
'_interfaces': [
{'hosting_info': {'segmentation_id': 100,
'hosting_port_name': 't1_p:1'}}
],
'gw_port':
{'hosting_info': {'segmentation_id': 200,
'hosting_port_name': 't2_p:1'}}
}
self.assertEqual(
'GigabitEthernet2.100',
self.driver._create_interface(router_info['_interfaces'][0]))
self.assertEqual(
'GigabitEthernet3.200',
self.driver._create_interface(router_info['gw_port']))
def test_build_router_info(self):
"""Check creation of CSR info to send to device driver."""
router_info = {
'hosting_device': {
'management_ip_address': '1.1.1.1',
'credentials': {'username': 'me', 'password': 'secret'}
},
'gw_port':
{'hosting_info': {'segmentation_id': 101,
'hosting_port_name': 't2_p:1'}},
'id': u'c607b58e-f150-4289-b83f-45623578d122',
'_interfaces': [
{'hosting_info': {'segmentation_id': 100,
'hosting_port_name': 't1_p:1'}}
]
}
expected = {'rest_mgmt_ip': '1.1.1.1',
'username': 'me',
'password': 'secret',
'inner_if_name': 'GigabitEthernet2.100',
'outer_if_name': 'GigabitEthernet3.101',
'vrf': 'nrouter-c607b5',
'timeout': 30}
self.assertEqual(expected, self.driver._get_router_info(router_info))
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/services/vpn/service_drivers/test_ipsec.py 0000664 0005670 0005671 00000045732 12701407726 033404 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2013, Nachi Ueno, NTT I3, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron import context as n_ctx
from neutron.plugins.common import constants as nconstants
from oslo_utils import uuidutils
from neutron_vpnaas.services.vpn.service_drivers import ipsec as ipsec_driver
from neutron_vpnaas.tests import base
_uuid = uuidutils.generate_uuid
FAKE_SERVICE_ID = _uuid()
FAKE_VPN_CONNECTION = {
'vpnservice_id': FAKE_SERVICE_ID
}
FAKE_ROUTER_ID = _uuid()
FAKE_VPN_SERVICE = {
'router_id': FAKE_ROUTER_ID
}
FAKE_HOST = 'fake_host'
FAKE_CONN_ID = _uuid()
IPSEC_SERVICE_DRIVER = ('neutron_vpnaas.services.vpn.service_drivers.'
'ipsec.IPsecVPNDriver')
class FakeSqlQueryObject(dict):
"""To fake SqlAlchemy query object and access keys as attributes."""
def __init__(self, **entries):
self.__dict__.update(entries)
super(FakeSqlQueryObject, self).__init__(**entries)
class TestIPsecDriver(base.BaseTestCase):
def setUp(self):
super(TestIPsecDriver, self).setUp()
mock.patch('neutron.common.rpc.create_connection').start()
l3_agent = mock.Mock()
l3_agent.host = FAKE_HOST
plugin = mock.Mock()
plugin.get_l3_agents_hosting_routers.return_value = [l3_agent]
plugin_p = mock.patch('neutron.manager.NeutronManager.get_plugin')
get_plugin = plugin_p.start()
get_plugin.return_value = plugin
service_plugin_p = mock.patch(
'neutron.manager.NeutronManager.get_service_plugins')
get_service_plugin = service_plugin_p.start()
get_service_plugin.return_value = {nconstants.L3_ROUTER_NAT: plugin}
self.svc_plugin = mock.Mock()
self.svc_plugin.get_l3_agents_hosting_routers.return_value = [l3_agent]
self._fake_vpn_router_id = _uuid()
self.svc_plugin._get_vpnservice.return_value = {
'router_id': self._fake_vpn_router_id
}
self.driver = ipsec_driver.IPsecVPNDriver(self.svc_plugin)
def _test_update(self, func, args, additional_info=None):
ctxt = n_ctx.Context('', 'somebody')
with mock.patch.object(self.driver.agent_rpc.client, 'cast'
) as rpc_mock, \
mock.patch.object(self.driver.agent_rpc.client, 'prepare'
) as prepare_mock:
prepare_mock.return_value = self.driver.agent_rpc.client
func(ctxt, *args)
prepare_args = {'server': 'fake_host', 'version': '1.0'}
prepare_mock.assert_called_once_with(**prepare_args)
rpc_mock.assert_called_once_with(ctxt, 'vpnservice_updated',
**additional_info)
def test_create_ipsec_site_connection(self):
self._test_update(self.driver.create_ipsec_site_connection,
[FAKE_VPN_CONNECTION],
{'router': {'id': self._fake_vpn_router_id}})
def test_update_ipsec_site_connection(self):
self._test_update(self.driver.update_ipsec_site_connection,
[FAKE_VPN_CONNECTION, FAKE_VPN_CONNECTION],
{'router': {'id': self._fake_vpn_router_id}})
def test_delete_ipsec_site_connection(self):
self._test_update(self.driver.delete_ipsec_site_connection,
[FAKE_VPN_CONNECTION],
{'router': {'id': self._fake_vpn_router_id}})
def test_update_vpnservice(self):
self._test_update(self.driver.update_vpnservice,
[FAKE_VPN_SERVICE, FAKE_VPN_SERVICE],
{'router': {'id': FAKE_VPN_SERVICE['router_id']}})
def test_delete_vpnservice(self):
self._test_update(self.driver.delete_vpnservice,
[FAKE_VPN_SERVICE],
{'router': {'id': FAKE_VPN_SERVICE['router_id']}})
def prepare_dummy_query_objects(self, info):
"""Create fake query objects to test dict creation for sync oper."""
external_ip = '10.0.0.99'
peer_address = '10.0.0.2'
peer_endpoints = info.get('peer_endpoints', [])
local_endpoints = info.get('local_endpoints', [])
peer_cidrs = info.get('peer_cidrs', ['40.4.0.0/24', '50.5.0.0/24'])
peer_id = info.get('peer_id', '30.30.0.0')
fake_ikepolicy = FakeSqlQueryObject(id='foo-ike', name='ike-name')
fake_ipsecpolicy = FakeSqlQueryObject(id='foo-ipsec')
fake_peer_cidrs_list = [
FakeSqlQueryObject(cidr=cidr, ipsec_site_connection_id='conn-id')
for cidr in peer_cidrs]
peer_epg_id = 'peer-epg-id' if peer_endpoints else None
local_epg_id = 'local-epg-id' if local_endpoints else None
fake_ipsec_conn = FakeSqlQueryObject(id='conn-id',
peer_id=peer_id,
peer_address=peer_address,
ikepolicy=fake_ikepolicy,
ipsecpolicy=fake_ipsecpolicy,
peer_ep_group_id=peer_epg_id,
local_ep_group_id=local_epg_id,
peer_cidrs=fake_peer_cidrs_list)
if peer_endpoints:
fake_peer_ep_group = FakeSqlQueryObject(id=peer_epg_id)
fake_peer_ep_group.endpoints = [
FakeSqlQueryObject(endpoint=ep,
endpoint_group_id=peer_epg_id)
for ep in peer_endpoints]
fake_ipsec_conn.peer_ep_group = fake_peer_ep_group
if local_endpoints:
fake_local_ep_group = FakeSqlQueryObject(id=local_epg_id)
fake_local_ep_group.endpoints = [
FakeSqlQueryObject(endpoint=ep,
endpoint_group_id=local_epg_id)
for ep in local_endpoints]
fake_ipsec_conn.local_ep_group = fake_local_ep_group
subnet_id = None
else:
subnet_id = 'foo-subnet-id'
fake_gw_port = {'fixed_ips': [{'ip_address': external_ip}]}
fake_router = FakeSqlQueryObject(gw_port=fake_gw_port)
fake_vpnservice = FakeSqlQueryObject(id='foo-vpn-id', name='foo-vpn',
description='foo-vpn-service',
admin_state_up=True,
status='active',
external_v4_ip=external_ip,
external_v6_ip=None,
subnet_id=subnet_id,
router_id='foo-router-id')
if local_endpoints:
fake_vpnservice.subnet = None
else:
fake_subnet = FakeSqlQueryObject(id=subnet_id,
name='foo-subnet',
cidr='9.0.0.0/16',
network_id='foo-net-id')
fake_vpnservice.subnet = fake_subnet
fake_vpnservice.router = fake_router
fake_vpnservice.ipsec_site_connections = [fake_ipsec_conn]
return fake_vpnservice
def build_expected_dict(self, info):
"""Create the expected dict used in sync operations.
The default is to use non-endpoint groups, where the peer CIDRs come
from the peer_cidrs arguments, the local CIDRs come from the (sole)
subnet CIDR, and there is subnet info. Tests will customize the peer
ID and peer CIDRs.
"""
external_ip = '10.0.0.99'
peer_id = info.get('peer_id', '30.30.0.0')
peer_cidrs = info.get('peer_cidrs', ['40.4.0.0/24', '50.5.0.0/24'])
return {'name': 'foo-vpn',
'id': 'foo-vpn-id',
'description': 'foo-vpn-service',
'admin_state_up': True,
'status': 'active',
'external_v4_ip': external_ip,
'external_v6_ip': None,
'router_id': 'foo-router-id',
'subnet': {'cidr': '9.0.0.0/16',
'id': 'foo-subnet-id',
'name': 'foo-subnet',
'network_id': 'foo-net-id'},
'subnet_id': 'foo-subnet-id',
'external_ip': external_ip,
'ipsec_site_connections': [
{'id': 'conn-id',
'peer_id': peer_id,
'external_ip': external_ip,
'peer_address': '10.0.0.2',
'ikepolicy': {'id': 'foo-ike',
'name': 'ike-name'},
'ipsecpolicy': {'id': 'foo-ipsec'},
'peer_ep_group_id': None,
'local_ep_group_id': None,
'peer_cidrs': peer_cidrs,
'local_cidrs': ['9.0.0.0/16'],
'local_ip_vers': 4}
]}
def build_expected_dict_for_endpoints(self, info):
"""Create the expected dict used in sync operations for endpoints.
The local and peer CIDRs come from the endpoint groups (with the
local CIDR translated from the corresponding subnets specified).
Tests will customize CIDRs, and the subnet, which is needed for
backward compatibility with agents, during rolling upgrades.
"""
external_ip = '10.0.0.99'
peer_id = '30.30.0.0'
return {'name': 'foo-vpn',
'id': 'foo-vpn-id',
'description': 'foo-vpn-service',
'admin_state_up': True,
'status': 'active',
'external_v4_ip': external_ip,
'external_v6_ip': None,
'router_id': 'foo-router-id',
'subnet': None,
'subnet_id': None,
'external_ip': external_ip,
'ipsec_site_connections': [
{'id': 'conn-id',
'peer_id': peer_id,
'external_ip': external_ip,
'peer_address': '10.0.0.2',
'ikepolicy': {'id': 'foo-ike',
'name': 'ike-name'},
'ipsecpolicy': {'id': 'foo-ipsec'},
'peer_ep_group_id': 'peer-epg-id',
'local_ep_group_id': 'local-epg-id',
'peer_cidrs': info['peers'],
'local_cidrs': info['locals'],
'local_ip_vers': info['vers']}
]}
def test_make_vpnservice_dict_peer_id_is_ipaddr(self):
"""Peer ID as IP should be copied as-is, when creating dict."""
subnet_cidr_map = {}
peer_id_as_ip = {'peer_id': '10.0.0.2'}
fake_service = self.prepare_dummy_query_objects(peer_id_as_ip)
expected_dict = self.build_expected_dict(peer_id_as_ip)
actual_dict = self.driver.make_vpnservice_dict(fake_service,
subnet_cidr_map)
self.assertEqual(expected_dict, actual_dict)
# make sure that ipsec_site_conn peer_id is not updated by
# _make_vpnservice_dict (bug #1423244)
self.assertEqual(peer_id_as_ip['peer_id'],
fake_service.ipsec_site_connections[0].peer_id)
def test_make_vpnservice_dict_peer_id_is_string(self):
"""Peer ID as string should have '@' prepended, when creating dict."""
subnet_cidr_map = {}
peer_id_as_name = {'peer_id': 'foo.peer.id'}
fake_service = self.prepare_dummy_query_objects(peer_id_as_name)
expected_peer_id = {'peer_id': '@foo.peer.id'}
expected_dict = self.build_expected_dict(expected_peer_id)
actual_dict = self.driver.make_vpnservice_dict(fake_service,
subnet_cidr_map)
self.assertEqual(expected_dict, actual_dict)
# make sure that ipsec_site_conn peer_id is not updated by
# _make_vpnservice_dict (bug #1423244)
self.assertEqual(peer_id_as_name['peer_id'],
fake_service.ipsec_site_connections[0].peer_id)
def test_make_vpnservice_dict_peer_cidrs_from_peer_cidr_table(self):
"""Peer CIDRs list populated from peer_cidr table.
User provides peer CIDRs as parameters to IPSec site-to-site
connection API, and they are stored in the peercidrs table.
"""
subnet_cidr_map = {}
peer_cidrs = {'peer_cidrs': ['80.0.0.0/24', '90.0.0.0/24']}
fake_service = self.prepare_dummy_query_objects(peer_cidrs)
expected_dict = self.build_expected_dict(peer_cidrs)
actual_dict = self.driver.make_vpnservice_dict(fake_service,
subnet_cidr_map)
self.assertEqual(expected_dict, actual_dict)
def test_make_vpnservice_dict_cidrs_from_endpoints(self):
"""CIDRs list populated from local and peer endpoints.
User provides peer and local endpoint group IDs in the IPSec
site-to-site connection API. The endpoint groups contains peer
CIDRs and local subnets (which will be mapped to CIDRs).
"""
# Cannot have peer CIDRs specified, when using endpoint group
subnet_cidr_map = {'local-sn1': '5.0.0.0/16',
'local-sn2': '5.1.0.0/16'}
endpoint_groups = {'peer_cidrs': [],
'peer_endpoints': ['80.0.0.0/24', '90.0.0.0/24'],
'local_endpoints': ['local-sn1', 'local-sn2']}
expected_cidrs = {'peers': ['80.0.0.0/24', '90.0.0.0/24'],
'locals': ['5.0.0.0/16', '5.1.0.0/16'],
'vers': 4}
fake_service = self.prepare_dummy_query_objects(endpoint_groups)
expected_dict = self.build_expected_dict_for_endpoints(expected_cidrs)
expected_dict['subnet'] = {'cidr': '5.0.0.0/16'}
actual_dict = self.driver.make_vpnservice_dict(fake_service,
subnet_cidr_map)
self.assertEqual(expected_dict, actual_dict)
def test_make_vpnservice_dict_v6_cidrs_from_endpoints(self):
"""IPv6 CIDRs list populated from local and peer endpoints."""
# Cannot have peer CIDRs specified, when using endpoint group
subnet_cidr_map = {'local-sn1': '2002:0a00:0000::/48',
'local-sn2': '2002:1400:0000::/48'}
endpoint_groups = {'peer_cidrs': [],
'peer_endpoints': ['2002:5000:0000::/48',
'2002:5a00:0000::/48'],
'local_endpoints': ['local-sn1', 'local-sn2']}
expected_cidrs = {'peers': ['2002:5000:0000::/48',
'2002:5a00:0000::/48'],
'locals': ['2002:0a00:0000::/48',
'2002:1400:0000::/48'],
'vers': 6}
fake_service = self.prepare_dummy_query_objects(endpoint_groups)
expected_dict = self.build_expected_dict_for_endpoints(expected_cidrs)
expected_dict['subnet'] = {'cidr': '2002:0a00:0000::/48'}
actual_dict = self.driver.make_vpnservice_dict(fake_service,
subnet_cidr_map)
self.assertEqual(expected_dict, actual_dict)
def test_get_external_ip_based_on_ipv4_peer(self):
vpnservice = mock.Mock()
vpnservice.external_v4_ip = '10.0.0.99'
vpnservice.external_v6_ip = '2001::1'
ipsec_sitecon = {'id': FAKE_CONN_ID, 'peer_address': '10.0.0.9'}
ip_to_use = self.driver.get_external_ip_based_on_peer(vpnservice,
ipsec_sitecon)
self.assertEqual('10.0.0.99', ip_to_use)
def test_get_external_ip_based_on_ipv6_peer(self):
vpnservice = mock.Mock()
vpnservice.external_v4_ip = '10.0.0.99'
vpnservice.external_v6_ip = '2001::1'
ipsec_sitecon = {'id': FAKE_CONN_ID, 'peer_address': '2001::5'}
ip_to_use = self.driver.get_external_ip_based_on_peer(vpnservice,
ipsec_sitecon)
self.assertEqual('2001::1', ip_to_use)
def test_get_ipv4_gw_ip(self):
vpnservice = mock.Mock()
vpnservice.router.gw_port = {'fixed_ips':
[{'ip_address': '10.0.0.99'}]}
v4_ip, v6_ip = self.driver._get_gateway_ips(vpnservice.router)
self.assertEqual('10.0.0.99', v4_ip)
self.assertIsNone(v6_ip)
def test_get_ipv6_gw_ip(self):
vpnservice = mock.Mock()
vpnservice.router.gw_port = {'fixed_ips': [{'ip_address': '2001::1'}]}
v4_ip, v6_ip = self.driver._get_gateway_ips(vpnservice.router)
self.assertIsNone(v4_ip)
self.assertEqual('2001::1', v6_ip)
def test_get_both_gw_ips(self):
vpnservice = mock.Mock()
vpnservice.router.gw_port = {'fixed_ips': [{'ip_address': '10.0.0.99'},
{'ip_address': '2001::1'}]}
v4_ip, v6_ip = self.driver._get_gateway_ips(vpnservice.router)
self.assertEqual('10.0.0.99', v4_ip)
self.assertEqual('2001::1', v6_ip)
def test_use_first_gw_ips_when_multiples(self):
vpnservice = mock.Mock()
vpnservice.router.gw_port = {'fixed_ips': [{'ip_address': '10.0.0.99'},
{'ip_address': '20.0.0.99'},
{'ip_address': '2001::1'},
{'ip_address': 'fd00::4'}]}
v4_ip, v6_ip = self.driver._get_gateway_ips(vpnservice.router)
self.assertEqual('10.0.0.99', v4_ip)
self.assertEqual('2001::1', v6_ip)
def test_store_gw_ips_on_service_create(self):
vpnservice = mock.Mock()
self.svc_plugin._get_vpnservice.return_value = vpnservice
vpnservice.router.gw_port = {'fixed_ips': [{'ip_address': '10.0.0.99'},
{'ip_address': '2001::1'}]}
ctxt = n_ctx.Context('', 'somebody')
vpnservice_dict = {'id': FAKE_SERVICE_ID,
'router_id': FAKE_ROUTER_ID}
self.driver.create_vpnservice(ctxt, vpnservice_dict)
self.svc_plugin.set_external_tunnel_ips.assert_called_once_with(
ctxt, FAKE_SERVICE_ID, v4_ip='10.0.0.99', v6_ip='2001::1')
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/services/vpn/service_drivers/test_vyatta_ipsec.py 0000664 0005670 0005671 00000007777 12701407726 035003 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2015 Brocade Communications System, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
from neutron import context as n_ctx
from neutron.plugins.common import constants
from oslo_utils import uuidutils
from neutron_vpnaas.services.vpn.service_drivers import vyatta_ipsec
from neutron_vpnaas.tests import base
_uuid = uuidutils.generate_uuid
FAKE_HOST = 'fake_host'
FAKE_SERVICE_ID = _uuid()
FAKE_VPN_CONNECTION = {
'vpnservice_id': FAKE_SERVICE_ID
}
FAKE_ROUTER_ID = _uuid()
FAKE_VPN_SERVICE = {
'router_id': FAKE_ROUTER_ID
}
class TestVyattaDriver(base.BaseTestCase):
def setUp(self):
super(TestVyattaDriver, self).setUp()
mock.patch('neutron.common.rpc.create_connection').start()
l3_agent = mock.Mock()
l3_agent.host = FAKE_HOST
plugin = mock.Mock()
plugin.get_l3_agents_hosting_routers.return_value = [l3_agent]
plugin_p = mock.patch('neutron.manager.NeutronManager.get_plugin')
get_plugin = plugin_p.start()
get_plugin.return_value = plugin
service_plugin_p = mock.patch(
'neutron.manager.NeutronManager.get_service_plugins')
get_service_plugin = service_plugin_p.start()
get_service_plugin.return_value = {constants.L3_ROUTER_NAT: plugin}
service_plugin = mock.Mock()
service_plugin.get_l3_agents_hosting_routers.return_value = [l3_agent]
self._fake_vpn_router_id = _uuid()
service_plugin._get_vpnservice.return_value = {
'router_id': self._fake_vpn_router_id
}
self.driver = vyatta_ipsec.VyattaIPsecDriver(service_plugin)
def _test_update(self, func, args, additional_info=None):
ctxt = n_ctx.Context('', 'somebody')
with mock.patch.object(self.driver.agent_rpc.client, 'cast'
) as rpc_mock, \
mock.patch.object(self.driver.agent_rpc.client, 'prepare'
) as prepare_mock:
prepare_mock.return_value = self.driver.agent_rpc.client
func(ctxt, *args)
prepare_args = {'server': 'fake_host', 'version': '1.0'}
prepare_mock.assert_called_once_with(**prepare_args)
rpc_mock.assert_called_once_with(ctxt, 'vpnservice_updated',
**additional_info)
def test_create_ipsec_site_connection(self):
self._test_update(self.driver.create_ipsec_site_connection,
[FAKE_VPN_CONNECTION],
{'router': {'id': self._fake_vpn_router_id}})
def test_update_ipsec_site_connection(self):
self._test_update(self.driver.update_ipsec_site_connection,
[FAKE_VPN_CONNECTION, FAKE_VPN_CONNECTION],
{'router': {'id': self._fake_vpn_router_id}})
def test_delete_ipsec_site_connection(self):
self._test_update(self.driver.delete_ipsec_site_connection,
[FAKE_VPN_CONNECTION],
{'router': {'id': self._fake_vpn_router_id}})
def test_update_vpnservice(self):
self._test_update(self.driver.update_vpnservice,
[FAKE_VPN_SERVICE, FAKE_VPN_SERVICE],
{'router': {'id': FAKE_VPN_SERVICE['router_id']}})
def test_delete_vpnservice(self):
self._test_update(self.driver.delete_vpnservice,
[FAKE_VPN_SERVICE],
{'router': {'id': FAKE_VPN_SERVICE['router_id']}})
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/services/vpn/test_vpn_service.py 0000664 0005670 0005671 00000012120 12701407726 031407 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.agent.l3 import legacy_router
from neutron.callbacks import registry
from oslo_config import cfg
from oslo_utils import uuidutils
from neutron_vpnaas.extensions import vpnaas
from neutron_vpnaas.services.vpn import agent as vpn_agent
from neutron_vpnaas.services.vpn import device_drivers
from neutron_vpnaas.services.vpn import vpn_service
from neutron_vpnaas.tests import base
_uuid = uuidutils.generate_uuid
VPNAAS_NOP_DEVICE = ('neutron_vpnaas.tests.unit.services.'
'vpn.test_vpn_service.NoopDeviceDriver')
VPNAAS_DEFAULT_DEVICE = ('neutron_vpnaas.services.vpn.'
'device_drivers.ipsec.OpenSwanDriver')
FAKE_ROUTER_ID = _uuid()
class NoopDeviceDriver(device_drivers.DeviceDriver):
def sync(self, context, processes):
pass
def create_router(self, router_info):
pass
def destroy_router(self, process_id):
pass
class VPNBaseTestCase(base.BaseTestCase):
def setUp(self):
super(VPNBaseTestCase, self).setUp()
self.conf = cfg.CONF
self.ri_kwargs = {'router': {'id': FAKE_ROUTER_ID, 'ha': False},
'agent_conf': self.conf,
'interface_driver': mock.sentinel.interface_driver}
class TestVirtualPrivateNetworkDeviceDriverLoading(VPNBaseTestCase):
def setUp(self):
super(TestVirtualPrivateNetworkDeviceDriverLoading, self).setUp()
cfg.CONF.register_opts(vpn_agent.vpn_agent_opts, 'vpnagent')
self.agent = mock.Mock()
self.agent.conf = cfg.CONF
mock.patch.object(registry, 'subscribe').start()
self.service = vpn_service.VPNService(self.agent)
def test_loading_vpn_device_drivers(self):
"""Get two device drivers (in a list) for VPNaaS."""
cfg.CONF.set_override('vpn_device_driver',
[VPNAAS_NOP_DEVICE, VPNAAS_NOP_DEVICE],
'vpnagent')
drivers = self.service.load_device_drivers('host')
self.assertEqual(2, len(drivers))
self.assertIn(drivers[0].__class__.__name__, VPNAAS_NOP_DEVICE)
self.assertIn(drivers[1].__class__.__name__, VPNAAS_NOP_DEVICE)
def test_use_default_for_vpn_device_driver(self):
"""When no VPNaaS device drivers specified, we get the default."""
drivers = self.service.load_device_drivers('host')
self.assertEqual(1, len(drivers))
self.assertIn(drivers[0].__class__.__name__, VPNAAS_DEFAULT_DEVICE)
def test_fail_no_such_vpn_device_driver(self):
"""Failure test of import error for VPNaaS device driver."""
cfg.CONF.set_override('vpn_device_driver',
['no.such.class'],
'vpnagent')
self.assertRaises(vpnaas.DeviceDriverImportError,
self.service.load_device_drivers, 'host')
class TestVPNServiceEventHandlers(VPNBaseTestCase):
def setUp(self):
super(TestVPNServiceEventHandlers, self).setUp()
self.l3_agent = mock.Mock()
self.l3_agent.context = mock.sentinel.context
mock.patch.object(registry, 'subscribe').start()
self.service = vpn_service.VPNService(mock.Mock())
self.device_driver = mock.Mock()
self.l3_agent.device_drivers = [self.device_driver]
def test_router_added_actions(self):
ri = legacy_router.LegacyRouter(FAKE_ROUTER_ID, **self.ri_kwargs)
vpn_service.router_added_actions(mock.Mock(), mock.Mock(),
self.l3_agent, router=ri)
self.device_driver.create_router.assert_called_once_with(ri)
self.device_driver.sync.assert_called_once_with(self.l3_agent.context,
[ri.router])
def test_router_removed_actions(self):
ri = legacy_router.LegacyRouter(FAKE_ROUTER_ID, **self.ri_kwargs)
vpn_service.router_removed_actions(mock.Mock(), mock.Mock(),
self.l3_agent, router=ri)
self.device_driver.destroy_router.assert_called_once_with(
FAKE_ROUTER_ID)
def test_router_updated_actions(self):
ri = legacy_router.LegacyRouter(FAKE_ROUTER_ID, **self.ri_kwargs)
vpn_service.router_updated_actions(mock.Mock(), mock.Mock(),
self.l3_agent, router=ri)
self.device_driver.sync.assert_called_once_with(self.l3_agent.context,
[ri.router])
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/services/vpn/device_drivers/ 0000775 0005670 0005671 00000000000 12701410103 030433 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/services/vpn/device_drivers/__init__.py 0000664 0005670 0005671 00000000000 12701407726 032553 0 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/services/vpn/device_drivers/test_cisco_ipsec.py 0000664 0005670 0005671 00000226556 12701407726 034370 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import operator
import mock
from neutron import context
from neutron.plugins.common import constants
from oslo_utils import uuidutils
from neutron_vpnaas.services.vpn.device_drivers import (
cisco_csr_rest_client as csr_client)
from neutron_vpnaas.services.vpn.device_drivers \
import cisco_ipsec as ipsec_driver
from neutron_vpnaas.tests import base
import six
if six.PY3:
from http import client as httplib
else:
import httplib
_uuid = uuidutils.generate_uuid
FAKE_HOST = 'fake_host'
FAKE_ROUTER_ID = _uuid()
FAKE_VPN_SERVICE = {
'id': _uuid(),
'router_id': FAKE_ROUTER_ID,
'admin_state_up': True,
'status': constants.PENDING_CREATE,
'subnet': {'cidr': '10.0.0.0/24'},
'ipsec_site_connections': [
{'peer_cidrs': ['20.0.0.0/24',
'30.0.0.0/24']},
{'peer_cidrs': ['40.0.0.0/24',
'50.0.0.0/24']}]
}
FIND_CFG_FOR_CSRS = ('neutron_vpnaas.services.vpn.device_drivers.cisco_ipsec.'
'find_available_csrs_from_config')
class TestCiscoCsrIPSecConnection(base.BaseTestCase):
def setUp(self):
super(TestCiscoCsrIPSecConnection, self).setUp()
self.conn_info = {
u'id': '123',
u'status': constants.PENDING_CREATE,
u'admin_state_up': True,
'psk': 'secret',
'peer_address': '192.168.1.2',
'peer_cidrs': ['10.1.0.0/24', '10.2.0.0/24'],
'mtu': 1500,
'ike_policy': {'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-128',
'pfs': 'Group5',
'ike_version': 'v1',
'lifetime_units': 'seconds',
'lifetime_value': 3600},
'ipsec_policy': {'transform_protocol': 'ah',
'encryption_algorithm': 'aes-128',
'auth_algorithm': 'sha1',
'pfs': 'group5',
'lifetime_units': 'seconds',
'lifetime_value': 3600},
'cisco': {'site_conn_id': 'Tunnel0',
'ike_policy_id': 222,
'ipsec_policy_id': 333}
}
self.csr = mock.Mock(spec=csr_client.CsrRestClient)
self.csr.status = 201 # All calls to CSR REST API succeed
self.ipsec_conn = ipsec_driver.CiscoCsrIPSecConnection(self.conn_info,
self.csr)
def test_create_ipsec_site_connection(self):
"""Ensure all steps are done to create an IPSec site connection.
Verify that each of the driver calls occur (in order), and
the right information is stored for later deletion.
"""
expected = ['create_pre_shared_key',
'create_ike_policy',
'create_ipsec_policy',
'create_ipsec_connection',
'create_static_route',
'create_static_route']
expected_rollback_steps = [
ipsec_driver.RollbackStep(action='pre_shared_key',
resource_id='123',
title='Pre-Shared Key'),
ipsec_driver.RollbackStep(action='ike_policy',
resource_id=222,
title='IKE Policy'),
ipsec_driver.RollbackStep(action='ipsec_policy',
resource_id=333,
title='IPSec Policy'),
ipsec_driver.RollbackStep(action='ipsec_connection',
resource_id='Tunnel0',
title='IPSec Connection'),
ipsec_driver.RollbackStep(action='static_route',
resource_id='10.1.0.0_24_Tunnel0',
title='Static Route'),
ipsec_driver.RollbackStep(action='static_route',
resource_id='10.2.0.0_24_Tunnel0',
title='Static Route')]
self.ipsec_conn.create_ipsec_site_connection(mock.Mock(),
self.conn_info)
client_calls = [c[0] for c in self.csr.method_calls]
self.assertEqual(expected, client_calls)
self.assertEqual(expected_rollback_steps, self.ipsec_conn.steps)
def test_create_ipsec_site_connection_with_rollback(self):
"""Failure test of IPSec site conn creation that fails and rolls back.
Simulate a failure in the last create step (making routes for the
peer networks), and ensure that the create steps are called in
order (except for create_static_route), and that the delete
steps are called in reverse order. At the end, there should be no
rollback information for the connection.
"""
def fake_route_check_fails(*args):
if args[0] == 'Static Route':
# So that subsequent calls to CSR rest client (for rollback)
# will fake as passing.
self.csr.status = httplib.NO_CONTENT
raise ipsec_driver.CsrResourceCreateFailure(resource=args[0],
which=args[1])
with mock.patch.object(ipsec_driver.CiscoCsrIPSecConnection,
'_check_create',
side_effect=fake_route_check_fails):
expected = ['create_pre_shared_key',
'create_ike_policy',
'create_ipsec_policy',
'create_ipsec_connection',
'create_static_route',
'delete_ipsec_connection',
'delete_ipsec_policy',
'delete_ike_policy',
'delete_pre_shared_key']
self.ipsec_conn.create_ipsec_site_connection(mock.Mock(),
self.conn_info)
client_calls = [c[0] for c in self.csr.method_calls]
self.assertEqual(expected, client_calls)
self.assertEqual([], self.ipsec_conn.steps)
def test_create_verification_with_error(self):
"""Negative test of create check step had failed."""
self.csr.status = httplib.NOT_FOUND
self.assertRaises(ipsec_driver.CsrResourceCreateFailure,
self.ipsec_conn._check_create, 'name', 'id')
def test_failure_with_invalid_create_step(self):
"""Negative test of invalid create step (programming error)."""
self.ipsec_conn.steps = []
try:
self.ipsec_conn.do_create_action('bogus', None, '123', 'Bad Step')
except ipsec_driver.CsrResourceCreateFailure:
pass
else:
self.fail('Expected exception with invalid create step')
def test_failure_with_invalid_delete_step(self):
"""Negative test of invalid delete step (programming error)."""
self.ipsec_conn.steps = [ipsec_driver.RollbackStep(action='bogus',
resource_id='123',
title='Bogus Step')]
try:
self.ipsec_conn.do_rollback()
except ipsec_driver.CsrResourceCreateFailure:
pass
else:
self.fail('Expected exception with invalid delete step')
def test_delete_ipsec_connection(self):
"""Perform delete of IPSec site connection and check steps done."""
# Simulate that a create was done with rollback steps stored
self.ipsec_conn.steps = [
ipsec_driver.RollbackStep(action='pre_shared_key',
resource_id='123',
title='Pre-Shared Key'),
ipsec_driver.RollbackStep(action='ike_policy',
resource_id=222,
title='IKE Policy'),
ipsec_driver.RollbackStep(action='ipsec_policy',
resource_id=333,
title='IPSec Policy'),
ipsec_driver.RollbackStep(action='ipsec_connection',
resource_id='Tunnel0',
title='IPSec Connection'),
ipsec_driver.RollbackStep(action='static_route',
resource_id='10.1.0.0_24_Tunnel0',
title='Static Route'),
ipsec_driver.RollbackStep(action='static_route',
resource_id='10.2.0.0_24_Tunnel0',
title='Static Route')]
expected = ['delete_static_route',
'delete_static_route',
'delete_ipsec_connection',
'delete_ipsec_policy',
'delete_ike_policy',
'delete_pre_shared_key']
self.ipsec_conn.delete_ipsec_site_connection(mock.Mock(), 123)
client_calls = [c[0] for c in self.csr.method_calls]
self.assertEqual(expected, client_calls)
class TestCiscoCsrIPsecConnectionCreateTransforms(base.BaseTestCase):
"""Verifies that config info is prepared/transformed correctly."""
def setUp(self):
super(TestCiscoCsrIPsecConnectionCreateTransforms, self).setUp()
self.conn_info = {
u'id': '123',
u'status': constants.PENDING_CREATE,
u'admin_state_up': True,
'psk': 'secret',
'peer_address': '192.168.1.2',
'peer_cidrs': ['10.1.0.0/24', '10.2.0.0/24'],
'mtu': 1500,
'ike_policy': {'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-128',
'pfs': 'Group5',
'ike_version': 'v1',
'lifetime_units': 'seconds',
'lifetime_value': 3600},
'ipsec_policy': {'transform_protocol': 'ah',
'encryption_algorithm': 'aes-128',
'auth_algorithm': 'sha1',
'pfs': 'group5',
'lifetime_units': 'seconds',
'lifetime_value': 3600},
'cisco': {'site_conn_id': 'Tunnel0',
'ike_policy_id': 222,
'ipsec_policy_id': 333}
}
self.csr = mock.Mock(spec=csr_client.CsrRestClient)
self.csr.tunnel_ip = '172.24.4.23'
self.ipsec_conn = ipsec_driver.CiscoCsrIPSecConnection(self.conn_info,
self.csr)
def test_invalid_attribute(self):
"""Negative test of unknown attribute - programming error."""
self.assertRaises(ipsec_driver.CsrDriverMismatchError,
self.ipsec_conn.translate_dialect,
'ike_policy', 'unknown_attr', self.conn_info)
def test_driver_unknown_mapping(self):
"""Negative test of service driver providing unknown value to map."""
self.conn_info['ike_policy']['pfs'] = "unknown_value"
self.assertRaises(ipsec_driver.CsrUnknownMappingError,
self.ipsec_conn.translate_dialect,
'ike_policy', 'pfs', self.conn_info['ike_policy'])
def test_psk_create_info(self):
"""Ensure that pre-shared key info is created correctly."""
expected = {u'keyring-name': '123',
u'pre-shared-key-list': [
{u'key': 'secret',
u'encrypted': False,
u'peer-address': '192.168.1.2'}]}
psk_id = self.conn_info['id']
psk_info = self.ipsec_conn.create_psk_info(psk_id, self.conn_info)
self.assertEqual(expected, psk_info)
def test_create_ike_policy_info(self):
"""Ensure that IKE policy info is mapped/created correctly."""
expected = {u'priority-id': 222,
u'encryption': u'aes',
u'hash': u'sha',
u'dhGroup': 5,
u'version': u'v1',
u'lifetime': 3600}
policy_id = self.conn_info['cisco']['ike_policy_id']
policy_info = self.ipsec_conn.create_ike_policy_info(policy_id,
self.conn_info)
self.assertEqual(expected, policy_info)
def test_create_ike_policy_info_different_encryption(self):
"""Ensure that IKE policy info is mapped/created correctly."""
self.conn_info['ike_policy']['encryption_algorithm'] = 'aes-192'
expected = {u'priority-id': 222,
u'encryption': u'aes192',
u'hash': u'sha',
u'dhGroup': 5,
u'version': u'v1',
u'lifetime': 3600}
policy_id = self.conn_info['cisco']['ike_policy_id']
policy_info = self.ipsec_conn.create_ike_policy_info(policy_id,
self.conn_info)
self.assertEqual(expected, policy_info)
def test_create_ike_policy_info_non_defaults(self):
"""Ensure that IKE policy info with different values."""
self.conn_info['ike_policy'] = {
'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-256',
'pfs': 'Group14',
'ike_version': 'v1',
'lifetime_units': 'seconds',
'lifetime_value': 60
}
expected = {u'priority-id': 222,
u'encryption': u'aes256',
u'hash': u'sha',
u'dhGroup': 14,
u'version': u'v1',
u'lifetime': 60}
policy_id = self.conn_info['cisco']['ike_policy_id']
policy_info = self.ipsec_conn.create_ike_policy_info(policy_id,
self.conn_info)
self.assertEqual(expected, policy_info)
def test_ipsec_policy_info(self):
"""Ensure that IPSec policy info is mapped/created correctly.
Note: That although the default for anti-replay-window-size on the
CSR is 64, we force it to disabled, for OpenStack use.
"""
expected = {u'policy-id': 333,
u'protection-suite': {
u'esp-encryption': u'esp-aes',
u'esp-authentication': u'esp-sha-hmac',
u'ah': u'ah-sha-hmac'
},
u'lifetime-sec': 3600,
u'pfs': u'group5',
u'anti-replay-window-size': u'disable'}
ipsec_policy_id = self.conn_info['cisco']['ipsec_policy_id']
policy_info = self.ipsec_conn.create_ipsec_policy_info(ipsec_policy_id,
self.conn_info)
self.assertEqual(expected, policy_info)
def test_ipsec_policy_info_different_encryption(self):
"""Create IPSec policy with different settings."""
self.conn_info['ipsec_policy']['transform_protocol'] = 'ah-esp'
self.conn_info['ipsec_policy']['encryption_algorithm'] = 'aes-192'
expected = {u'policy-id': 333,
u'protection-suite': {
u'esp-encryption': u'esp-192-aes',
u'esp-authentication': u'esp-sha-hmac',
u'ah': u'ah-sha-hmac'
},
u'lifetime-sec': 3600,
u'pfs': u'group5',
u'anti-replay-window-size': u'disable'}
ipsec_policy_id = self.conn_info['cisco']['ipsec_policy_id']
policy_info = self.ipsec_conn.create_ipsec_policy_info(ipsec_policy_id,
self.conn_info)
self.assertEqual(expected, policy_info)
def test_ipsec_policy_info_non_defaults(self):
"""Create/map IPSec policy info with different values."""
self.conn_info['ipsec_policy'] = {'transform_protocol': 'esp',
'encryption_algorithm': '3des',
'auth_algorithm': 'sha1',
'pfs': 'group14',
'lifetime_units': 'seconds',
'lifetime_value': 120,
'anti-replay-window-size': 'disable'}
expected = {u'policy-id': 333,
u'protection-suite': {
u'esp-encryption': u'esp-3des',
u'esp-authentication': u'esp-sha-hmac'
},
u'lifetime-sec': 120,
u'pfs': u'group14',
u'anti-replay-window-size': u'disable'}
ipsec_policy_id = self.conn_info['cisco']['ipsec_policy_id']
policy_info = self.ipsec_conn.create_ipsec_policy_info(ipsec_policy_id,
self.conn_info)
self.assertEqual(expected, policy_info)
def test_site_connection_info(self):
"""Ensure site-to-site connection info is created/mapped correctly."""
expected = {u'vpn-interface-name': 'Tunnel0',
u'ipsec-policy-id': 333,
u'remote-device': {
u'tunnel-ip-address': '192.168.1.2'
},
u'mtu': 1500}
ipsec_policy_id = self.conn_info['cisco']['ipsec_policy_id']
site_conn_id = self.conn_info['cisco']['site_conn_id']
conn_info = self.ipsec_conn.create_site_connection_info(
site_conn_id, ipsec_policy_id, self.conn_info)
self.assertEqual(expected, conn_info)
def test_static_route_info(self):
"""Create static route info for peer CIDRs."""
expected = [('10.1.0.0_24_Tunnel0',
{u'destination-network': '10.1.0.0/24',
u'outgoing-interface': 'Tunnel0'}),
('10.2.0.0_24_Tunnel0',
{u'destination-network': '10.2.0.0/24',
u'outgoing-interface': 'Tunnel0'})]
# self.driver.csr.make_route_id.side_effect = ['10.1.0.0_24_Tunnel0',
# '10.2.0.0_24_Tunnel0']
site_conn_id = self.conn_info['cisco']['site_conn_id']
routes_info = self.ipsec_conn.create_routes_info(site_conn_id,
self.conn_info)
self.assertEqual(2, len(routes_info))
self.assertEqual(expected, routes_info)
class TestCiscoCsrIPsecDeviceDriverSyncStatuses(base.BaseTestCase):
"""Test status/state of services and connections, after sync."""
def setUp(self):
super(TestCiscoCsrIPsecDeviceDriverSyncStatuses, self).setUp()
for klass in ['neutron.common.rpc.create_connection',
'neutron.context.get_admin_context_without_session',
'oslo_service.loopingcall.FixedIntervalLoopingCall']:
mock.patch(klass).start()
self.context = context.Context('some_user', 'some_tenant')
self.agent = mock.Mock()
self.driver = ipsec_driver.CiscoCsrIPsecDriver(self.agent, FAKE_HOST)
self.driver.agent_rpc = mock.Mock()
self.conn_create = mock.patch.object(
ipsec_driver.CiscoCsrIPSecConnection,
'create_ipsec_site_connection').start()
self.conn_delete = mock.patch.object(
ipsec_driver.CiscoCsrIPSecConnection,
'delete_ipsec_site_connection').start()
self.admin_state = mock.patch.object(
ipsec_driver.CiscoCsrIPSecConnection,
'set_admin_state').start()
self.csr = mock.Mock()
self.router_info = {
u'router_info': {'rest_mgmt_ip': '2.2.2.2',
'tunnel_ip': '1.1.1.3',
'username': 'me',
'password': 'password',
'timeout': 120,
'outer_if_name': u'GigabitEthernet3.102',
'inner_if_name': u'GigabitEthernet3.101'}}
self.service123_data = {u'id': u'123',
u'status': constants.DOWN,
u'admin_state_up': False}
self.service123_data.update(self.router_info)
self.conn1_data = {u'id': u'1',
u'status': constants.ACTIVE,
u'admin_state_up': True,
u'mtu': 1500,
u'psk': u'secret',
u'peer_address': '192.168.1.2',
u'peer_cidrs': ['10.1.0.0/24', '10.2.0.0/24'],
u'ike_policy': {
u'auth_algorithm': u'sha1',
u'encryption_algorithm': u'aes-128',
u'pfs': u'Group5',
u'ike_version': u'v1',
u'lifetime_units': u'seconds',
u'lifetime_value': 3600},
u'ipsec_policy': {
u'transform_protocol': u'ah',
u'encryption_algorithm': u'aes-128',
u'auth_algorithm': u'sha1',
u'pfs': u'group5',
u'lifetime_units': u'seconds',
u'lifetime_value': 3600},
u'cisco': {u'site_conn_id': u'Tunnel0'}}
# NOTE: For sync, there is mark (trivial), update (tested),
# sweep (tested), and report(tested) phases.
def test_update_ipsec_connection_create_notify(self):
"""Notified of connection create request - create."""
# Make the (existing) service
self.driver.create_vpn_service(self.service123_data)
conn_data = copy.deepcopy(self.conn1_data)
conn_data[u'status'] = constants.PENDING_CREATE
connection = self.driver.update_connection(self.context,
u'123', conn_data)
self.assertFalse(connection.is_dirty)
self.assertEqual(u'Tunnel0', connection.tunnel)
self.assertEqual(constants.PENDING_CREATE, connection.last_status)
self.assertEqual(1, self.conn_create.call_count)
def test_detect_no_change_to_ipsec_connection(self):
"""No change to IPSec connection - nop."""
# Make existing service, and connection that was active
vpn_service = self.driver.create_vpn_service(self.service123_data)
connection = vpn_service.create_connection(self.conn1_data)
self.assertFalse(connection.check_for_changes(self.conn1_data))
def test_detect_state_only_change_to_ipsec_connection(self):
"""Only IPSec connection state changed - update."""
# Make existing service, and connection that was active
vpn_service = self.driver.create_vpn_service(self.service123_data)
connection = vpn_service.create_connection(self.conn1_data)
conn_data = copy.deepcopy(self.conn1_data)
conn_data[u'admin_state_up'] = False
self.assertFalse(connection.check_for_changes(conn_data))
def test_detect_non_state_change_to_ipsec_connection(self):
"""Connection change instead of/in addition to state - update."""
# Make existing service, and connection that was active
vpn_service = self.driver.create_vpn_service(self.service123_data)
connection = vpn_service.create_connection(self.conn1_data)
conn_data = copy.deepcopy(self.conn1_data)
conn_data[u'ipsec_policy'][u'encryption_algorithm'] = u'aes-256'
self.assertTrue(connection.check_for_changes(conn_data))
def test_update_ipsec_connection_changed_admin_down(self):
"""Notified of connection state change - update.
For a connection that was previously created, expect to
force connection down on an admin down (only) change.
"""
# Make existing service, and connection that was active
vpn_service = self.driver.create_vpn_service(self.service123_data)
vpn_service.create_connection(self.conn1_data)
# Simulate that notification of connection update received
self.driver.mark_existing_connections_as_dirty()
# Modify the connection data for the 'sync'
conn_data = copy.deepcopy(self.conn1_data)
conn_data[u'admin_state_up'] = False
connection = self.driver.update_connection(self.context,
'123', conn_data)
self.assertFalse(connection.is_dirty)
self.assertEqual(u'Tunnel0', connection.tunnel)
self.assertEqual(constants.ACTIVE, connection.last_status)
self.assertFalse(self.conn_create.called)
self.assertFalse(connection.is_admin_up)
self.assertTrue(connection.forced_down)
self.assertEqual(1, self.admin_state.call_count)
def test_update_ipsec_connection_changed_config(self):
"""Notified of connection changing config - update.
Goal here is to detect that the connection is deleted and then
created, but not that the specific values have changed, so picking
arbitrary value (MTU).
"""
# Make existing service, and connection that was active
vpn_service = self.driver.create_vpn_service(self.service123_data)
vpn_service.create_connection(self.conn1_data)
# Simulate that notification of connection update received
self.driver.mark_existing_connections_as_dirty()
# Modify the connection data for the 'sync'
conn_data = copy.deepcopy(self.conn1_data)
conn_data[u'mtu'] = 9200
connection = self.driver.update_connection(self.context,
'123', conn_data)
self.assertFalse(connection.is_dirty)
self.assertEqual(u'Tunnel0', connection.tunnel)
self.assertEqual(constants.ACTIVE, connection.last_status)
self.assertEqual(1, self.conn_create.call_count)
self.assertEqual(1, self.conn_delete.call_count)
self.assertTrue(connection.is_admin_up)
self.assertFalse(connection.forced_down)
self.assertFalse(self.admin_state.called)
def test_update_of_unknown_ipsec_connection(self):
"""Notified of update of unknown connection - create.
Occurs if agent restarts and receives a notification of change
to connection, but has no previous record of the connection.
Result will be to rebuild the connection.
"""
# Will have previously created service, but don't know of connection
self.driver.create_vpn_service(self.service123_data)
# Simulate that notification of connection update received
self.driver.mark_existing_connections_as_dirty()
conn_data = copy.deepcopy(self.conn1_data)
conn_data[u'status'] = constants.DOWN
connection = self.driver.update_connection(self.context,
u'123', conn_data)
self.assertFalse(connection.is_dirty)
self.assertEqual(u'Tunnel0', connection.tunnel)
self.assertEqual(constants.DOWN, connection.last_status)
self.assertEqual(1, self.conn_create.call_count)
self.assertTrue(connection.is_admin_up)
self.assertFalse(connection.forced_down)
self.assertFalse(self.admin_state.called)
def test_update_missing_connection_admin_down(self):
"""Connection not present is in admin down state - nop.
If the agent has restarted, and a sync notification occurs with
a connection that is in admin down state, recreate the connection,
but indicate that the connection is down.
"""
# Make existing service, but no connection
self.driver.create_vpn_service(self.service123_data)
conn_data = copy.deepcopy(self.conn1_data)
conn_data.update({u'status': constants.DOWN,
u'admin_state_up': False})
connection = self.driver.update_connection(self.context,
u'123', conn_data)
self.assertIsNotNone(connection)
self.assertFalse(connection.is_dirty)
self.assertEqual(1, self.conn_create.call_count)
self.assertFalse(connection.is_admin_up)
self.assertTrue(connection.forced_down)
self.assertEqual(1, self.admin_state.call_count)
def test_update_connection_admin_up(self):
"""Connection updated to admin up state - record."""
# Make existing service, and connection that was admin down
conn_data = copy.deepcopy(self.conn1_data)
conn_data.update({u'status': constants.DOWN, u'admin_state_up': False})
service_data = {u'id': u'123',
u'status': constants.DOWN,
u'admin_state_up': True,
u'ipsec_conns': [conn_data]}
service_data.update(self.router_info)
self.driver.update_service(self.context, service_data)
# Simulate that notification of connection update received
self.driver.mark_existing_connections_as_dirty()
# Now simulate that the notification shows the connection admin up
new_conn_data = copy.deepcopy(conn_data)
new_conn_data[u'admin_state_up'] = True
connection = self.driver.update_connection(self.context,
u'123', new_conn_data)
self.assertFalse(connection.is_dirty)
self.assertEqual(u'Tunnel0', connection.tunnel)
self.assertEqual(constants.DOWN, connection.last_status)
self.assertTrue(connection.is_admin_up)
self.assertFalse(connection.forced_down)
self.assertEqual(2, self.admin_state.call_count)
def test_update_for_vpn_service_create(self):
"""Creation of new IPSec connection on new VPN service - create.
Service will be created and marked as 'clean', and update
processing for connection will occur (create).
"""
conn_data = copy.deepcopy(self.conn1_data)
conn_data[u'status'] = constants.PENDING_CREATE
service_data = {u'id': u'123',
u'status': constants.PENDING_CREATE,
u'admin_state_up': True,
u'ipsec_conns': [conn_data]}
service_data.update(self.router_info)
vpn_service = self.driver.update_service(self.context, service_data)
self.assertFalse(vpn_service.is_dirty)
self.assertEqual(constants.PENDING_CREATE, vpn_service.last_status)
connection = vpn_service.get_connection(u'1')
self.assertIsNotNone(connection)
self.assertFalse(connection.is_dirty)
self.assertEqual(u'Tunnel0', connection.tunnel)
self.assertEqual(constants.PENDING_CREATE, connection.last_status)
self.assertEqual(1, self.conn_create.call_count)
self.assertTrue(connection.is_admin_up)
self.assertFalse(connection.forced_down)
self.assertFalse(self.admin_state.called)
def test_update_for_new_connection_on_existing_service(self):
"""Creating a new IPSec connection on an existing service."""
# Create the service before testing, and mark it dirty
prev_vpn_service = self.driver.create_vpn_service(self.service123_data)
self.driver.mark_existing_connections_as_dirty()
conn_data = copy.deepcopy(self.conn1_data)
conn_data[u'status'] = constants.PENDING_CREATE
service_data = {u'id': u'123',
u'status': constants.ACTIVE,
u'admin_state_up': True,
u'ipsec_conns': [conn_data]}
service_data.update(self.router_info)
vpn_service = self.driver.update_service(self.context, service_data)
# Should reuse the entry and update the status
self.assertEqual(prev_vpn_service, vpn_service)
self.assertFalse(vpn_service.is_dirty)
self.assertEqual(constants.ACTIVE, vpn_service.last_status)
connection = vpn_service.get_connection(u'1')
self.assertIsNotNone(connection)
self.assertFalse(connection.is_dirty)
self.assertEqual(u'Tunnel0', connection.tunnel)
self.assertEqual(constants.PENDING_CREATE, connection.last_status)
self.assertEqual(1, self.conn_create.call_count)
def test_update_for_vpn_service_with_one_unchanged_connection(self):
"""Existing VPN service and IPSec connection without any changes - nop.
Service and connection will be marked clean. No processing for
either, as there are no changes.
"""
# Create a service and add in a connection that is active
prev_vpn_service = self.driver.create_vpn_service(self.service123_data)
prev_vpn_service.create_connection(self.conn1_data)
self.driver.mark_existing_connections_as_dirty()
# Create notification with conn unchanged and service already created
service_data = {u'id': u'123',
u'status': constants.ACTIVE,
u'admin_state_up': True,
u'ipsec_conns': [self.conn1_data]}
service_data.update(self.router_info)
vpn_service = self.driver.update_service(self.context, service_data)
# Should reuse the entry and update the status
self.assertEqual(prev_vpn_service, vpn_service)
self.assertFalse(vpn_service.is_dirty)
self.assertEqual(constants.ACTIVE, vpn_service.last_status)
connection = vpn_service.get_connection(u'1')
self.assertIsNotNone(connection)
self.assertFalse(connection.is_dirty)
self.assertEqual(u'Tunnel0', connection.tunnel)
self.assertEqual(constants.ACTIVE, connection.last_status)
self.assertFalse(self.conn_create.called)
def test_update_service_admin_down(self):
"""VPN service updated to admin down state - force all down.
If service is down, then all connections are forced down.
"""
# Create an "existing" service, prior to notification
prev_vpn_service = self.driver.create_vpn_service(self.service123_data)
self.driver.mark_existing_connections_as_dirty()
service_data = {u'id': u'123',
u'status': constants.DOWN,
u'admin_state_up': False,
u'ipsec_conns': [self.conn1_data]}
service_data.update(self.router_info)
vpn_service = self.driver.update_service(self.context, service_data)
self.assertEqual(prev_vpn_service, vpn_service)
self.assertFalse(vpn_service.is_dirty)
self.assertFalse(vpn_service.is_admin_up)
self.assertEqual(constants.DOWN, vpn_service.last_status)
conn = vpn_service.get_connection(u'1')
self.assertIsNotNone(conn)
self.assertFalse(conn.is_dirty)
self.assertTrue(conn.forced_down)
self.assertTrue(conn.is_admin_up)
def test_update_new_service_admin_down(self):
"""Unknown VPN service updated to admin down state - nop.
Can happen if agent restarts and then gets its first notification
of a service that is in the admin down state. Structures will be
created, but forced down.
"""
service_data = {u'id': u'123',
u'status': constants.DOWN,
u'admin_state_up': False,
u'ipsec_conns': [self.conn1_data]}
service_data.update(self.router_info)
vpn_service = self.driver.update_service(self.context, service_data)
self.assertIsNotNone(vpn_service)
self.assertFalse(vpn_service.is_dirty)
self.assertFalse(vpn_service.is_admin_up)
self.assertEqual(constants.DOWN, vpn_service.last_status)
conn = vpn_service.get_connection(u'1')
self.assertIsNotNone(conn)
self.assertFalse(conn.is_dirty)
self.assertTrue(conn.forced_down)
self.assertTrue(conn.is_admin_up)
def test_update_service_admin_up(self):
"""VPN service updated to admin up state - restore.
If service is up now, then connections that are admin up will come
up and connections that are admin down, will remain down.
"""
# Create an "existing" service, prior to notification
prev_vpn_service = self.driver.create_vpn_service(self.service123_data)
self.driver.mark_existing_connections_as_dirty()
conn_data1 = {u'id': u'1', u'status': constants.DOWN,
u'admin_state_up': False,
u'cisco': {u'site_conn_id': u'Tunnel0'}}
conn_data2 = {u'id': u'2', u'status': constants.ACTIVE,
u'admin_state_up': True,
u'cisco': {u'site_conn_id': u'Tunnel1'}}
service_data = {u'id': u'123',
u'status': constants.DOWN,
u'admin_state_up': True,
u'ipsec_conns': [conn_data1, conn_data2]}
service_data.update(self.router_info)
vpn_service = self.driver.update_service(self.context, service_data)
self.assertEqual(prev_vpn_service, vpn_service)
self.assertFalse(vpn_service.is_dirty)
self.assertTrue(vpn_service.is_admin_up)
self.assertEqual(constants.DOWN, vpn_service.last_status)
conn1 = vpn_service.get_connection(u'1')
self.assertIsNotNone(conn1)
self.assertFalse(conn1.is_dirty)
self.assertTrue(conn1.forced_down)
self.assertFalse(conn1.is_admin_up)
conn2 = vpn_service.get_connection(u'2')
self.assertIsNotNone(conn2)
self.assertFalse(conn2.is_dirty)
self.assertFalse(conn2.forced_down)
self.assertTrue(conn2.is_admin_up)
def test_update_of_unknown_service_create(self):
"""Create of VPN service that is currently unknown - record.
If agent is restarted or user changes VPN service to admin up, the
notification may contain a VPN service with an IPSec connection
that is not in PENDING_CREATE state.
"""
conn_data = {u'id': u'1', u'status': constants.DOWN,
u'admin_state_up': True,
u'cisco': {u'site_conn_id': u'Tunnel0'}}
service_data = {u'id': u'123',
u'status': constants.ACTIVE,
u'admin_state_up': True,
u'ipsec_conns': [conn_data]}
service_data.update(self.router_info)
vpn_service = self.driver.update_service(self.context, service_data)
self.assertFalse(vpn_service.is_dirty)
self.assertEqual(constants.ACTIVE, vpn_service.last_status)
connection = vpn_service.get_connection(u'1')
self.assertIsNotNone(connection)
self.assertFalse(connection.is_dirty)
self.assertEqual(u'Tunnel0', connection.tunnel)
self.assertEqual(constants.DOWN, connection.last_status)
self.assertEqual(1, self.conn_create.call_count)
def _check_connection_for_service(self, count, vpn_service):
"""Helper to check the connection information for a service."""
connection = vpn_service.get_connection(u'%d' % count)
self.assertIsNotNone(connection, "for connection %d" % count)
self.assertFalse(connection.is_dirty, "for connection %d" % count)
self.assertEqual(u'Tunnel%d' % count, connection.tunnel,
"for connection %d" % count)
self.assertEqual(constants.PENDING_CREATE, connection.last_status,
"for connection %d" % count)
return count + 1
def notification_for_two_services_with_two_conns(self):
"""Helper used by tests to create two services, each with two conns."""
conn1_data = {u'id': u'1', u'status': constants.PENDING_CREATE,
u'admin_state_up': True,
u'cisco': {u'site_conn_id': u'Tunnel1'}}
conn2_data = {u'id': u'2', u'status': constants.PENDING_CREATE,
u'admin_state_up': True,
u'cisco': {u'site_conn_id': u'Tunnel2'}}
service1_data = {u'id': u'123',
u'status': constants.PENDING_CREATE,
u'admin_state_up': True,
u'ipsec_conns': [conn1_data, conn2_data]}
service1_data.update(self.router_info)
conn3_data = {u'id': u'3', u'status': constants.PENDING_CREATE,
u'admin_state_up': True,
u'cisco': {u'site_conn_id': u'Tunnel3'}}
conn4_data = {u'id': u'4', u'status': constants.PENDING_CREATE,
u'admin_state_up': True,
u'cisco': {u'site_conn_id': u'Tunnel4'}}
service2_data = {u'id': u'456',
u'status': constants.PENDING_CREATE,
u'admin_state_up': True,
u'ipsec_conns': [conn3_data, conn4_data]}
service2_data.update(self.router_info)
return service1_data, service2_data
def test_create_two_connections_on_two_services(self):
"""High level test of multiple VPN services with connections."""
# Build notification message
(service1_data,
service2_data) = self.notification_for_two_services_with_two_conns()
# Simulate plugin returning notification, when requested
self.driver.agent_rpc.get_vpn_services_on_host.return_value = [
service1_data, service2_data]
vpn_services = self.driver.update_all_services_and_connections(
self.context)
self.assertEqual(2, len(vpn_services))
count = 1
for vpn_service in vpn_services:
self.assertFalse(vpn_service.is_dirty,
"for service %s" % vpn_service)
self.assertEqual(constants.PENDING_CREATE, vpn_service.last_status,
"for service %s" % vpn_service)
count = self._check_connection_for_service(count, vpn_service)
count = self._check_connection_for_service(count, vpn_service)
self.assertEqual(4, self.conn_create.call_count)
def test_sweep_connection_marked_as_clean(self):
"""Sync updated connection - no action."""
# Create a service and connection
vpn_service = self.driver.create_vpn_service(self.service123_data)
connection = vpn_service.create_connection(self.conn1_data)
self.driver.mark_existing_connections_as_dirty()
# Simulate that the update phase visited both of them
vpn_service.is_dirty = False
connection.is_dirty = False
self.driver.remove_unknown_connections(self.context)
vpn_service = self.driver.service_state.get(u'123')
self.assertIsNotNone(vpn_service)
self.assertFalse(vpn_service.is_dirty)
connection = vpn_service.get_connection(u'1')
self.assertIsNotNone(connection)
self.assertFalse(connection.is_dirty)
def test_sweep_connection_dirty(self):
"""Sync did not update connection - delete."""
# Create a service and connection
vpn_service = self.driver.create_vpn_service(self.service123_data)
vpn_service.create_connection(self.conn1_data)
self.driver.mark_existing_connections_as_dirty()
# Simulate that the update phase only visited the service
vpn_service.is_dirty = False
self.driver.remove_unknown_connections(self.context)
vpn_service = self.driver.service_state.get(u'123')
self.assertIsNotNone(vpn_service)
self.assertFalse(vpn_service.is_dirty)
connection = vpn_service.get_connection(u'1')
self.assertIsNone(connection)
self.assertEqual(1, self.conn_delete.call_count)
def test_sweep_service_dirty(self):
"""Sync did not update service - delete it and all conns."""
# Create a service and connection
vpn_service = self.driver.create_vpn_service(self.service123_data)
vpn_service.create_connection(self.conn1_data)
self.driver.mark_existing_connections_as_dirty()
# Both the service and the connection are still 'dirty'
self.driver.remove_unknown_connections(self.context)
self.assertIsNone(self.driver.service_state.get(u'123'))
self.assertEqual(1, self.conn_delete.call_count)
def test_sweep_multiple_services(self):
"""One service and conn updated, one service and conn not."""
# Create two services, each with a connection
vpn_service1 = self.driver.create_vpn_service(self.service123_data)
vpn_service1.create_connection(self.conn1_data)
service456_data = {u'id': u'456',
u'status': constants.ACTIVE,
u'admin_state_up': False}
service456_data.update(self.router_info)
conn2_data = {u'id': u'2', u'status': constants.ACTIVE,
u'admin_state_up': True,
u'cisco': {u'site_conn_id': u'Tunnel0'}}
prev_vpn_service2 = self.driver.create_vpn_service(service456_data)
prev_connection2 = prev_vpn_service2.create_connection(conn2_data)
self.driver.mark_existing_connections_as_dirty()
# Simulate that the update phase visited the first service and conn
prev_vpn_service2.is_dirty = False
prev_connection2.is_dirty = False
self.driver.remove_unknown_connections(self.context)
self.assertIsNone(self.driver.service_state.get(u'123'))
vpn_service2 = self.driver.service_state.get(u'456')
self.assertEqual(prev_vpn_service2, vpn_service2)
self.assertFalse(vpn_service2.is_dirty)
connection2 = vpn_service2.get_connection(u'2')
self.assertEqual(prev_connection2, connection2)
self.assertFalse(connection2.is_dirty)
self.assertEqual(1, self.conn_delete.call_count)
def simulate_mark_update_sweep_for_service_with_conn(self, service_state,
connection_state):
"""Create internal structures for single service with connection.
Creates a service and corresponding connection. Then, simulates
the mark/update/sweep operation by marking both the service and
connection as clean and updating their status. Override the REST
client created for the service, with a mock, so that all calls
can be mocked out.
"""
conn_data = {u'id': u'1', u'status': connection_state,
u'admin_state_up': True,
u'cisco': {u'site_conn_id': u'Tunnel0'}}
service_data = {u'id': u'123',
u'admin_state_up': True}
service_data.update(self.router_info)
# Create a service and connection
vpn_service = self.driver.create_vpn_service(service_data)
vpn_service.csr = self.csr # Mocked REST client
connection = vpn_service.create_connection(conn_data)
# Simulate that the update phase visited both of them
vpn_service.is_dirty = False
vpn_service.connections_removed = False
vpn_service.last_status = service_state
vpn_service.is_admin_up = True
connection.is_dirty = False
connection.last_status = connection_state
connection.is_admin_up = True
connection.forced_down = False
return vpn_service
def test_report_fragment_connection_created(self):
"""Generate report section for a created connection."""
# Prepare service and connection in PENDING_CREATE state
vpn_service = self.simulate_mark_update_sweep_for_service_with_conn(
constants.PENDING_CREATE, constants.PENDING_CREATE)
# Simulate that CSR has reported the connection is still up
self.csr.read_tunnel_statuses.return_value = [
(u'Tunnel0', u'UP-ACTIVE'), ]
# Get the statuses for connections existing on CSR
tunnels = vpn_service.get_ipsec_connections_status()
self.assertEqual({u'Tunnel0': constants.ACTIVE}, tunnels)
# Check that there is a status for this connection
connection = vpn_service.get_connection(u'1')
self.assertIsNotNone(connection)
current_status = connection.find_current_status_in(tunnels)
self.assertEqual(constants.ACTIVE, current_status)
# Create report fragment due to change
self.assertNotEqual(connection.last_status, current_status)
report_frag = connection.update_status_and_build_report(current_status)
self.assertEqual(current_status, connection.last_status)
expected = {'1': {'status': constants.ACTIVE,
'updated_pending_status': True}}
self.assertEqual(expected, report_frag)
def test_report_fragment_connection_unchanged_status(self):
"""No report section generated for a created connection."""
# Prepare service and connection in ACTIVE state
vpn_service = self.simulate_mark_update_sweep_for_service_with_conn(
constants.ACTIVE, constants.ACTIVE)
# Simulate that CSR has reported the connection is up
self.csr.read_tunnel_statuses.return_value = [
(u'Tunnel0', u'UP-IDLE'), ]
# Get the statuses for connections existing on CSR
tunnels = vpn_service.get_ipsec_connections_status()
self.assertEqual({u'Tunnel0': constants.ACTIVE}, tunnels)
# Check that there is a status for this connection
connection = vpn_service.get_connection(u'1')
self.assertIsNotNone(connection)
current_status = connection.find_current_status_in(tunnels)
self.assertEqual(constants.ACTIVE, current_status)
# Should be no report, as no change
self.assertEqual(connection.last_status, current_status)
report_frag = connection.update_status_and_build_report(current_status)
self.assertEqual(current_status, connection.last_status)
self.assertEqual({}, report_frag)
def test_report_fragment_connection_changed_status(self):
"""Generate report section for connection with changed state."""
# Prepare service in ACTIVE state and connection in DOWN state
vpn_service = self.simulate_mark_update_sweep_for_service_with_conn(
constants.ACTIVE, constants.DOWN)
# Simulate that CSR has reported the connection is still up
self.csr.read_tunnel_statuses.return_value = [
(u'Tunnel0', u'UP-NO-IKE'), ]
# Get the statuses for connections existing on CSR
tunnels = vpn_service.get_ipsec_connections_status()
self.assertEqual({u'Tunnel0': constants.ACTIVE}, tunnels)
# Check that there is a status for this connection
connection = vpn_service.get_connection(u'1')
self.assertIsNotNone(connection)
current_status = connection.find_current_status_in(tunnels)
self.assertEqual(constants.ACTIVE, current_status)
# Create report fragment due to change
self.assertNotEqual(connection.last_status, current_status)
report_frag = connection.update_status_and_build_report(current_status)
self.assertEqual(current_status, connection.last_status)
expected = {'1': {'status': constants.ACTIVE,
'updated_pending_status': False}}
self.assertEqual(expected, report_frag)
def test_report_fragment_connection_failed_create(self):
"""Failure test of report fragment for conn that failed creation.
Normally, without any status from the CSR, the connection report would
be skipped, but we need to report back failures.
"""
# Prepare service and connection in PENDING_CREATE state
vpn_service = self.simulate_mark_update_sweep_for_service_with_conn(
constants.PENDING_CREATE, constants.PENDING_CREATE)
# Simulate that CSR does NOT report the status (no tunnel)
self.csr.read_tunnel_statuses.return_value = []
# Get the statuses for connections existing on CSR
tunnels = vpn_service.get_ipsec_connections_status()
self.assertEqual({}, tunnels)
# Check that there is a status for this connection
connection = vpn_service.get_connection(u'1')
self.assertIsNotNone(connection)
current_status = connection.find_current_status_in(tunnels)
self.assertEqual(constants.ERROR, current_status)
# Create report fragment due to change
self.assertNotEqual(connection.last_status, current_status)
report_frag = connection.update_status_and_build_report(current_status)
self.assertEqual(current_status, connection.last_status)
expected = {'1': {'status': constants.ERROR,
'updated_pending_status': True}}
self.assertEqual(expected, report_frag)
def test_report_fragment_connection_admin_down(self):
"""Report for a connection that is in admin down state."""
# Prepare service and connection with previous status ACTIVE, but
# with connection admin down
conn_data = {u'id': u'1', u'status': constants.ACTIVE,
u'admin_state_up': False,
u'cisco': {u'site_conn_id': u'Tunnel0'}}
service_data = {u'id': u'123',
u'status': constants.ACTIVE,
u'admin_state_up': True,
u'ipsec_conns': [conn_data]}
service_data.update(self.router_info)
vpn_service = self.driver.update_service(self.context, service_data)
vpn_service.csr = self.csr # Mocked REST client
# Tunnel would have been deleted, so simulate no status
self.csr.read_tunnel_statuses.return_value = []
connection = vpn_service.get_connection(u'1')
self.assertIsNotNone(connection)
self.assertTrue(connection.forced_down)
self.assertEqual(constants.ACTIVE, connection.last_status)
# Create report fragment due to change
report_frag = self.driver.build_report_for_connections_on(vpn_service)
self.assertEqual(constants.DOWN, connection.last_status)
expected = {'1': {'status': constants.DOWN,
'updated_pending_status': False}}
self.assertEqual(expected, report_frag)
def test_report_fragment_two_connections(self):
"""Generate report fragment for two connections on a service."""
# Prepare service with two connections, one ACTIVE, one DOWN
conn1_data = {u'id': u'1', u'status': constants.DOWN,
u'admin_state_up': True,
u'cisco': {u'site_conn_id': u'Tunnel1'}}
conn2_data = {u'id': u'2', u'status': constants.ACTIVE,
u'admin_state_up': True,
u'cisco': {u'site_conn_id': u'Tunnel2'}}
service_data = {u'id': u'123',
u'status': constants.ACTIVE,
u'admin_state_up': True,
u'ipsec_conns': [conn1_data, conn2_data]}
service_data.update(self.router_info)
vpn_service = self.driver.update_service(self.context, service_data)
vpn_service.csr = self.csr # Mocked REST client
# Simulate that CSR has reported the connections with diff status
self.csr.read_tunnel_statuses.return_value = [
(u'Tunnel1', u'UP-IDLE'), (u'Tunnel2', u'DOWN-NEGOTIATING')]
# Get the report fragments for the connections
report_frag = self.driver.build_report_for_connections_on(vpn_service)
expected = {u'1': {u'status': constants.ACTIVE,
u'updated_pending_status': False},
u'2': {u'status': constants.DOWN,
u'updated_pending_status': False}}
self.assertEqual(expected, report_frag)
def test_report_service_create(self):
"""VPN service and IPSec connection created - report."""
# Simulate creation of the service and connection
vpn_service = self.simulate_mark_update_sweep_for_service_with_conn(
constants.PENDING_CREATE, constants.PENDING_CREATE)
# Simulate that the CSR has created the connection
self.csr.read_tunnel_statuses.return_value = [
(u'Tunnel0', u'UP-ACTIVE'), ]
report = self.driver.build_report_for_service(vpn_service)
expected_report = {
u'id': u'123',
u'updated_pending_status': True,
u'status': constants.ACTIVE,
u'ipsec_site_connections': {
u'1': {u'status': constants.ACTIVE,
u'updated_pending_status': True}
}
}
self.assertEqual(expected_report, report)
# Check that service and connection statuses are updated
self.assertEqual(constants.ACTIVE, vpn_service.last_status)
self.assertEqual(constants.ACTIVE,
vpn_service.get_connection(u'1').last_status)
def test_report_service_create_of_first_conn_fails(self):
"""VPN service and IPSec conn created, but conn failed - report.
Since this is the sole IPSec connection on the service, and the
create failed (connection in ERROR state), the VPN service's
status will be set to DOWN.
"""
# Simulate creation of the service and connection
vpn_service = self.simulate_mark_update_sweep_for_service_with_conn(
constants.PENDING_CREATE, constants.PENDING_CREATE)
# Simulate that the CSR has no info due to failed create
self.csr.read_tunnel_statuses.return_value = []
report = self.driver.build_report_for_service(vpn_service)
expected_report = {
u'id': u'123',
u'updated_pending_status': True,
u'status': constants.DOWN,
u'ipsec_site_connections': {
u'1': {u'status': constants.ERROR,
u'updated_pending_status': True}
}
}
self.assertEqual(expected_report, report)
# Check that service and connection statuses are updated
self.assertEqual(constants.DOWN, vpn_service.last_status)
self.assertEqual(constants.ERROR,
vpn_service.get_connection(u'1').last_status)
def test_report_connection_created_on_existing_service(self):
"""Creating connection on existing service - report."""
# Simulate existing service and connection create
vpn_service = self.simulate_mark_update_sweep_for_service_with_conn(
constants.ACTIVE, constants.PENDING_CREATE)
# Simulate that the CSR has created the connection
self.csr.read_tunnel_statuses.return_value = [
(u'Tunnel0', u'UP-IDLE'), ]
report = self.driver.build_report_for_service(vpn_service)
expected_report = {
u'id': u'123',
u'updated_pending_status': False,
u'status': constants.ACTIVE,
u'ipsec_site_connections': {
u'1': {u'status': constants.ACTIVE,
u'updated_pending_status': True}
}
}
self.assertEqual(expected_report, report)
# Check that service and connection statuses are updated
self.assertEqual(constants.ACTIVE, vpn_service.last_status)
self.assertEqual(constants.ACTIVE,
vpn_service.get_connection(u'1').last_status)
def test_no_report_no_changes(self):
"""VPN service with unchanged IPSec connection - no report.
Note: No report will be generated if the last connection on the
service is deleted. The service (and connection) objects will
have been removed by the sweep operation and thus not reported.
On the plugin, the service should be changed to DOWN. Likewise,
if the service goes to admin down state.
"""
# Simulate an existing service and connection that are ACTIVE
vpn_service = self.simulate_mark_update_sweep_for_service_with_conn(
constants.ACTIVE, constants.ACTIVE)
# Simulate that the CSR reports the connection still active
self.csr.read_tunnel_statuses.return_value = [
(u'Tunnel0', u'UP-ACTIVE'), ]
report = self.driver.build_report_for_service(vpn_service)
self.assertEqual({}, report)
# Check that service and connection statuses are still same
self.assertEqual(constants.ACTIVE, vpn_service.last_status)
self.assertEqual(constants.ACTIVE,
vpn_service.get_connection(u'1').last_status)
def test_report_sole_connection_goes_down(self):
"""Only connection on VPN service goes down - report.
In addition to reporting the status change and recording the new
state for the IPSec connection, the VPN service status will be
DOWN.
"""
# Simulate an existing service and connection that are ACTIVE
vpn_service = self.simulate_mark_update_sweep_for_service_with_conn(
constants.ACTIVE, constants.ACTIVE)
# Simulate that the CSR reports the connection went down
self.csr.read_tunnel_statuses.return_value = [
(u'Tunnel0', u'DOWN-NEGOTIATING'), ]
report = self.driver.build_report_for_service(vpn_service)
expected_report = {
u'id': u'123',
u'updated_pending_status': False,
u'status': constants.DOWN,
u'ipsec_site_connections': {
u'1': {u'status': constants.DOWN,
u'updated_pending_status': False}
}
}
self.assertEqual(expected_report, report)
# Check that service and connection statuses are updated
self.assertEqual(constants.DOWN, vpn_service.last_status)
self.assertEqual(constants.DOWN,
vpn_service.get_connection(u'1').last_status)
def test_report_sole_connection_comes_up(self):
"""Only connection on VPN service comes up - report.
In addition to reporting the status change and recording the new
state for the IPSec connection, the VPN service status will be
ACTIVE.
"""
# Simulate an existing service and connection that are DOWN
vpn_service = self.simulate_mark_update_sweep_for_service_with_conn(
constants.DOWN, constants.DOWN)
# Simulate that the CSR reports the connection came up
self.csr.read_tunnel_statuses.return_value = [
(u'Tunnel0', u'UP-NO-IKE'), ]
report = self.driver.build_report_for_service(vpn_service)
expected_report = {
u'id': u'123',
u'updated_pending_status': False,
u'status': constants.ACTIVE,
u'ipsec_site_connections': {
u'1': {u'status': constants.ACTIVE,
u'updated_pending_status': False}
}
}
self.assertEqual(expected_report, report)
# Check that service and connection statuses are updated
self.assertEqual(constants.ACTIVE, vpn_service.last_status)
self.assertEqual(constants.ACTIVE,
vpn_service.get_connection(u'1').last_status)
def test_report_service_with_two_connections_gone_down(self):
"""One service with two connections that went down - report.
Shows the case where all the connections are down, so that the
service should report as DOWN, as well.
"""
# Simulate one service with two ACTIVE connections
conn1_data = {u'id': u'1', u'status': constants.ACTIVE,
u'admin_state_up': True,
u'cisco': {u'site_conn_id': u'Tunnel1'}}
conn2_data = {u'id': u'2', u'status': constants.ACTIVE,
u'admin_state_up': True,
u'cisco': {u'site_conn_id': u'Tunnel2'}}
service_data = {u'id': u'123',
u'status': constants.ACTIVE,
u'admin_state_up': True,
u'ipsec_conns': [conn1_data, conn2_data]}
service_data.update(self.router_info)
vpn_service = self.driver.update_service(self.context, service_data)
vpn_service.csr = self.csr # Mocked REST client
# Simulate that the CSR has reported that the connections are DOWN
self.csr.read_tunnel_statuses.return_value = [
(u'Tunnel1', u'DOWN-NEGOTIATING'), (u'Tunnel2', u'DOWN')]
report = self.driver.build_report_for_service(vpn_service)
expected_report = {
u'id': u'123',
u'updated_pending_status': False,
u'status': constants.DOWN,
u'ipsec_site_connections': {
u'1': {u'status': constants.DOWN,
u'updated_pending_status': False},
u'2': {u'status': constants.DOWN,
u'updated_pending_status': False}}
}
self.assertEqual(expected_report, report)
# Check that service and connection statuses are updated
self.assertEqual(constants.DOWN, vpn_service.last_status)
self.assertEqual(constants.DOWN,
vpn_service.get_connection(u'1').last_status)
self.assertEqual(constants.DOWN,
vpn_service.get_connection(u'2').last_status)
def test_report_service_with_connection_removed(self):
"""One service with two connections where one is removed - report.
With a connection removed and the other connection unchanged,
normally there would be nothing to report for the connections, but
we need to report any possible change to the service state. In this
case, the service was ACTIVE, but since the only ACTIVE connection
is deleted and the remaining connection is DOWN, the service will
indicate as DOWN.
"""
# Simulate one service with one connection up, one down
conn1_data = {u'id': u'1', u'status': constants.ACTIVE,
u'admin_state_up': True,
u'mtu': 1500,
u'psk': u'secret',
u'peer_address': '192.168.1.2',
u'peer_cidrs': ['10.1.0.0/24', '10.2.0.0/24'],
u'ike_policy': {u'auth_algorithm': u'sha1',
u'encryption_algorithm': u'aes-128',
u'pfs': u'Group5',
u'ike_version': u'v1',
u'lifetime_units': u'seconds',
u'lifetime_value': 3600},
u'ipsec_policy': {u'transform_protocol': u'ah',
u'encryption_algorithm': u'aes-128',
u'auth_algorithm': u'sha1',
u'pfs': u'group5',
u'lifetime_units': u'seconds',
u'lifetime_value': 3600},
u'cisco': {u'site_conn_id': u'Tunnel1'}}
conn2_data = {u'id': u'2', u'status': constants.DOWN,
u'admin_state_up': True,
u'mtu': 1500,
u'psk': u'secret',
u'peer_address': '192.168.1.2',
u'peer_cidrs': ['10.1.0.0/24', '10.2.0.0/24'],
u'ike_policy': {u'auth_algorithm': u'sha1',
u'encryption_algorithm': u'aes-128',
u'pfs': u'Group5',
u'ike_version': u'v1',
u'lifetime_units': u'seconds',
u'lifetime_value': 3600},
u'ipsec_policy': {u'transform_protocol': u'ah',
u'encryption_algorithm': u'aes-128',
u'auth_algorithm': u'sha1',
u'pfs': u'group5',
u'lifetime_units': u'seconds',
u'lifetime_value': 3600},
u'cisco': {u'site_conn_id': u'Tunnel2'}}
service_data = {u'id': u'123',
u'status': constants.ACTIVE,
u'admin_state_up': True,
u'ipsec_conns': [conn1_data, conn2_data]}
service_data.update(self.router_info)
vpn_service = self.driver.update_service(self.context, service_data)
self.assertEqual(constants.ACTIVE, vpn_service.last_status)
self.assertEqual(constants.ACTIVE,
vpn_service.get_connection(u'1').last_status)
self.assertEqual(constants.DOWN,
vpn_service.get_connection(u'2').last_status)
# Simulate that one is deleted
self.driver.mark_existing_connections_as_dirty()
service_data = {u'id': u'123',
u'status': constants.ACTIVE,
u'admin_state_up': True,
u'ipsec_conns': [conn2_data]}
service_data.update(self.router_info)
vpn_service = self.driver.update_service(self.context, service_data)
vpn_service.csr = self.csr # Mocked REST client
self.driver.remove_unknown_connections(self.context)
self.assertTrue(vpn_service.connections_removed)
self.assertEqual(constants.ACTIVE, vpn_service.last_status)
self.assertIsNone(vpn_service.get_connection(u'1'))
self.assertEqual(constants.DOWN,
vpn_service.get_connection(u'2').last_status)
# Simulate that only one connection reports and status is unchanged,
# so there will be NO connection info to report.
self.csr.read_tunnel_statuses.return_value = [(u'Tunnel2', u'DOWN')]
report = self.driver.build_report_for_service(vpn_service)
expected_report = {
u'id': u'123',
u'updated_pending_status': False,
u'status': constants.DOWN,
u'ipsec_site_connections': {}
}
self.assertEqual(expected_report, report)
# Check that service and connection statuses are updated
self.assertEqual(constants.DOWN, vpn_service.last_status)
self.assertEqual(constants.DOWN,
vpn_service.get_connection(u'2').last_status)
def test_report_service_admin_down_with_two_connections(self):
"""One service admin down, with two connections - report.
When the service is admin down, all the connections will report
as DOWN.
"""
# Simulate one service (admin down) with two ACTIVE connections
conn1_data = {u'id': u'1', u'status': constants.ACTIVE,
u'admin_state_up': True,
u'cisco': {u'site_conn_id': u'Tunnel1'}}
conn2_data = {u'id': u'2', u'status': constants.ACTIVE,
u'admin_state_up': True,
u'cisco': {u'site_conn_id': u'Tunnel2'}}
service_data = {u'id': u'123',
u'status': constants.ACTIVE,
u'admin_state_up': False,
u'ipsec_conns': [conn1_data, conn2_data]}
service_data.update(self.router_info)
vpn_service = self.driver.update_service(self.context, service_data)
vpn_service.csr = self.csr # Mocked REST client
# Since service admin down, connections will have been deleted
self.csr.read_tunnel_statuses.return_value = []
report = self.driver.build_report_for_service(vpn_service)
expected_report = {
u'id': u'123',
u'updated_pending_status': False,
u'status': constants.DOWN,
u'ipsec_site_connections': {
u'1': {u'status': constants.DOWN,
u'updated_pending_status': False},
u'2': {u'status': constants.DOWN,
u'updated_pending_status': False}}
}
self.assertEqual(expected_report, report)
# Check that service and connection statuses are updated
self.assertEqual(constants.DOWN, vpn_service.last_status)
self.assertEqual(constants.DOWN,
vpn_service.get_connection(u'1').last_status)
self.assertEqual(constants.DOWN,
vpn_service.get_connection(u'2').last_status)
def test_report_multiple_services(self):
"""Status changes for several services - report."""
# Simulate creation of the service and connection
(service1_data,
service2_data) = self.notification_for_two_services_with_two_conns()
vpn_service1 = self.driver.update_service(self.context, service1_data)
vpn_service2 = self.driver.update_service(self.context, service2_data)
# Simulate that the CSR has created the connections
vpn_service1.csr = vpn_service2.csr = self.csr # Mocked REST client
self.csr.read_tunnel_statuses.return_value = [
(u'Tunnel1', u'UP-ACTIVE'), (u'Tunnel2', u'DOWN'),
(u'Tunnel3', u'DOWN-NEGOTIATING'), (u'Tunnel4', u'UP-IDLE')]
report = self.driver.report_status(self.context)
expected_report = [{u'id': u'123',
u'updated_pending_status': True,
u'status': constants.ACTIVE,
u'ipsec_site_connections': {
u'1': {u'status': constants.ACTIVE,
u'updated_pending_status': True},
u'2': {u'status': constants.DOWN,
u'updated_pending_status': True}}
},
{u'id': u'456',
u'updated_pending_status': True,
u'status': constants.ACTIVE,
u'ipsec_site_connections': {
u'3': {u'status': constants.DOWN,
u'updated_pending_status': True},
u'4': {u'status': constants.ACTIVE,
u'updated_pending_status': True}}
}]
self.assertEqual(expected_report,
sorted(report, key=operator.itemgetter('id')))
# Check that service and connection statuses are updated
self.assertEqual(constants.ACTIVE, vpn_service1.last_status)
self.assertEqual(constants.ACTIVE,
vpn_service1.get_connection(u'1').last_status)
self.assertEqual(constants.DOWN,
vpn_service1.get_connection(u'2').last_status)
self.assertEqual(constants.ACTIVE, vpn_service2.last_status)
self.assertEqual(constants.DOWN,
vpn_service2.get_connection(u'3').last_status)
self.assertEqual(constants.ACTIVE,
vpn_service2.get_connection(u'4').last_status)
# TODO(pcm) FUTURE - UTs for update action, when supported.
def test_vpnservice_updated(self):
with mock.patch.object(self.driver, 'sync') as sync:
context = mock.Mock()
self.driver.vpnservice_updated(context)
sync.assert_called_once_with(context, [])
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/services/vpn/device_drivers/test_ipsec.py 0000664 0005670 0005671 00000170623 12701407726 033201 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2013, Nachi Ueno, NTT I3, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import difflib
import io
import mock
import os
import socket
from neutron.agent.l3 import dvr_edge_router
from neutron.agent.l3 import dvr_snat_ns
from neutron.agent.l3 import legacy_router
from neutron.agent.linux import iptables_manager
from neutron.plugins.common import constants
from oslo_config import cfg
from oslo_utils import uuidutils
from neutron_vpnaas.extensions import vpnaas
from neutron_vpnaas.services.vpn.device_drivers import fedora_strongswan_ipsec
from neutron_vpnaas.services.vpn.device_drivers import ipsec as openswan_ipsec
from neutron_vpnaas.services.vpn.device_drivers import libreswan_ipsec
from neutron_vpnaas.services.vpn.device_drivers import strongswan_ipsec
from neutron_vpnaas.tests import base
_uuid = uuidutils.generate_uuid
FAKE_HOST = 'fake_host'
FAKE_ROUTER_ID = _uuid()
FAKE_IPSEC_SITE_CONNECTION1_ID = _uuid()
FAKE_IPSEC_SITE_CONNECTION2_ID = _uuid()
FAKE_IKE_POLICY = {
'ike_version': 'v1',
'encryption_algorithm': 'aes-128',
'auth_algorithm': 'sha1',
'pfs': 'group5'
}
FAKE_IPSEC_POLICY = {
'encryption_algorithm': 'aes-128',
'auth_algorithm': 'sha1',
'pfs': 'group5',
'transform_protocol': 'esp'
}
FAKE_VPN_SERVICE = {
'id': _uuid(),
'router_id': FAKE_ROUTER_ID,
'name': 'myvpn',
'admin_state_up': True,
'status': constants.PENDING_CREATE,
'external_ip': '60.0.0.4',
'ipsec_site_connections': [
{'peer_cidrs': ['20.0.0.0/24',
'30.0.0.0/24'],
'local_cidrs': ['10.0.0.0/24'],
'local_ip_vers': 4,
'admin_state_up': True,
'id': FAKE_IPSEC_SITE_CONNECTION1_ID,
'external_ip': '60.0.0.4',
'peer_address': '60.0.0.5',
'mtu': 1500,
'peer_id': '60.0.0.5',
'psk': 'password',
'initiator': 'bi-directional',
'ikepolicy': FAKE_IKE_POLICY,
'ipsecpolicy': FAKE_IPSEC_POLICY,
'status': constants.PENDING_CREATE},
{'peer_cidrs': ['40.0.0.0/24',
'50.0.0.0/24'],
'local_cidrs': ['11.0.0.0/24'],
'local_ip_vers': 4,
'admin_state_up': True,
'external_ip': '60.0.0.4',
'peer_address': '60.0.0.6',
'peer_id': '60.0.0.6',
'mtu': 1500,
'psk': 'password',
'id': FAKE_IPSEC_SITE_CONNECTION2_ID,
'initiator': 'bi-directional',
'ikepolicy': FAKE_IKE_POLICY,
'ipsecpolicy': FAKE_IPSEC_POLICY,
'status': constants.PENDING_CREATE}]
}
AUTH_ESP = '''esp
# [encryption_algorithm]-[auth_algorithm]-[pfs]
phase2alg=aes128-sha1;modp1536'''
AUTH_AH = '''ah
# AH protocol does not support encryption
# [auth_algorithm]-[pfs]
phase2alg=sha1;modp1536'''
OPENSWAN_CONNECTION_DETAILS = '''# rightsubnet=networkA/netmaskA, networkB/netmaskB (IKEv2 only)
# [mtu]
mtu=1500
# [dpd_action]
dpdaction=
# [dpd_interval]
dpddelay=
# [dpd_timeout]
dpdtimeout=
# [auth_mode]
authby=secret
######################
# IKEPolicy params
######################
#ike version
ikev2=never
# [encryption_algorithm]-[auth_algorithm]-[pfs]
ike=aes128-sha1;modp1536
# [lifetime_value]
ikelifetime=s
# NOTE: it looks lifetime_units=kilobytes can't be enforced \
(could be seconds, hours, days...)
##########################
# IPsecPolicys params
##########################
# [transform_protocol]
auth=%(auth_mode)s
# [encapsulation_mode]
type=
# [lifetime_value]
lifetime=s
# lifebytes=100000 if lifetime_units=kilobytes (IKEv2 only)
'''
IPV4_NEXT_HOP = '''# NOTE: a default route is required for %defaultroute to work...
leftnexthop=%defaultroute
rightnexthop=%defaultroute'''
IPV6_NEXT_HOP = '''# To recognize the given IP addresses in this config
# as IPv6 addresses by pluto whack. Default is ipv4
connaddrfamily=ipv6
# openswan can't process defaultroute for ipv6.
# Assign gateway address as leftnexthop
leftnexthop=%s
# rightnexthop is not mandatory for ipsec, so no need in ipv6.'''
EXPECTED_OPENSWAN_CONF = """
# Configuration for myvpn
config setup
nat_traversal=yes
conn %%default
ikelifetime=480m
keylife=60m
keyingtries=%%forever
conn %(conn1_id)s
%(next_hop)s
left=%(left)s
leftid=%(left)s
auto=start
# NOTE:REQUIRED
# [subnet]
leftsubnet%(local_cidrs1)s
# [updown]
# What "updown" script to run to adjust routing and/or firewalling when
# the status of the connection changes (default "ipsec _updown").
# "--route yes" allows to specify such routing options as mtu and metric.
leftupdown="ipsec _updown --route yes"
######################
# ipsec_site_connections
######################
# [peer_address]
right=%(right1)s
# [peer_id]
rightid=%(right1)s
# [peer_cidrs]
rightsubnets={ %(peer_cidrs1)s }
%(conn_details)sconn %(conn2_id)s
%(next_hop)s
left=%(left)s
leftid=%(left)s
auto=start
# NOTE:REQUIRED
# [subnet]
leftsubnet%(local_cidrs2)s
# [updown]
# What "updown" script to run to adjust routing and/or firewalling when
# the status of the connection changes (default "ipsec _updown").
# "--route yes" allows to specify such routing options as mtu and metric.
leftupdown="ipsec _updown --route yes"
######################
# ipsec_site_connections
######################
# [peer_address]
right=%(right2)s
# [peer_id]
rightid=%(right2)s
# [peer_cidrs]
rightsubnets={ %(peer_cidrs2)s }
%(conn_details)s
"""
EXPECTED_IPSEC_OPENSWAN_SECRET_CONF = '''
# Configuration for myvpn
60.0.0.4 60.0.0.5 : PSK "password"
60.0.0.4 60.0.0.6 : PSK "password"'''
EXPECTED_IPSEC_STRONGSWAN_CONF = '''
# Configuration for myvpn
config setup
conn %%default
ikelifetime=60m
keylife=20m
rekeymargin=3m
keyingtries=1
authby=psk
mobike=no
conn %(conn1_id)s
keyexchange=ikev1
left=%(left)s
leftsubnet=%(local_cidrs1)s
leftid=%(left)s
leftfirewall=yes
right=%(right1)s
rightsubnet=%(peer_cidrs1)s
rightid=%(right1)s
auto=route
conn %(conn2_id)s
keyexchange=ikev1
left=%(left)s
leftsubnet=%(local_cidrs2)s
leftid=%(left)s
leftfirewall=yes
right=%(right2)s
rightsubnet=%(peer_cidrs2)s
rightid=%(right2)s
auto=route
'''
EXPECTED_STRONGSWAN_DEFAULT_CONF = '''
charon {
load_modular = yes
plugins {
include strongswan.d/charon/*.conf
}
}
include strongswan.d/*.conf
'''
EXPECTED_IPSEC_STRONGSWAN_SECRET_CONF = '''
# Configuration for myvpn
60.0.0.4 60.0.0.5 : PSK "password"
60.0.0.4 60.0.0.6 : PSK "password"
'''
PLUTO_ACTIVE_STATUS = """000 "%(conn_id)s/0x1": erouted;\n
000 #4: "%(conn_id)s/0x1":500 STATE_QUICK_R2 (IPsec SA established);""" % {
'conn_id': FAKE_IPSEC_SITE_CONNECTION2_ID}
PLUTO_ACTIVE_NO_IPSEC_SA_STATUS = """000 "%(conn_id)s/0x1": erouted;\n
000 #258: "%(conn_id)s/0x1":500 STATE_MAIN_R2 (sent MR2, expecting MI3);""" % {
'conn_id': FAKE_IPSEC_SITE_CONNECTION2_ID}
PLUTO_DOWN_STATUS = "000 \"%(conn_id)s/0x1\": unrouted;" % {'conn_id':
FAKE_IPSEC_SITE_CONNECTION2_ID}
CHARON_ACTIVE_STATUS = "%(conn_id)s{1}: INSTALLED, TUNNEL" % {'conn_id':
FAKE_IPSEC_SITE_CONNECTION2_ID}
CHARON_DOWN_STATUS = "%(conn_id)s{1}: ROUTED, TUNNEL" % {'conn_id':
FAKE_IPSEC_SITE_CONNECTION2_ID}
NOT_RUNNING_STATUS = "Command: ['ipsec', 'status'] Exit code: 3 Stdout:"
class BaseIPsecDeviceDriver(base.BaseTestCase):
def setUp(self, driver=openswan_ipsec.OpenSwanDriver,
ipsec_process=openswan_ipsec.OpenSwanProcess,
vpnservice=FAKE_VPN_SERVICE):
super(BaseIPsecDeviceDriver, self).setUp()
for klass in [
'neutron.common.rpc.create_connection',
'oslo_service.loopingcall.FixedIntervalLoopingCall'
]:
mock.patch(klass).start()
self._execute = mock.patch.object(ipsec_process, '_execute').start()
self.agent = mock.Mock()
self.conf = cfg.CONF
self.agent.conf = self.conf
self.driver = driver(
self.agent,
FAKE_HOST)
self.driver.agent_rpc = mock.Mock()
self.ri_kwargs = {'router': {'id': FAKE_ROUTER_ID, 'ha': False},
'agent_conf': self.conf,
'interface_driver': mock.sentinel.interface_driver}
self.iptables = mock.Mock()
self.apply_mock = mock.Mock()
self.vpnservice = copy.deepcopy(vpnservice)
@staticmethod
def generate_diff(a, b):
"""Generates unified diff of a and b."""
by_lines = lambda x: x.splitlines(True)
a, b = list(by_lines(a)), list(by_lines(b))
diff = difflib.unified_diff(a, b, fromfile="expected",
tofile="actual")
return diff
def modify_config_for_test(self, overrides):
"""Revise service/connection settings to test variations.
Must update service, so that dialect mappings occur for any changes
that are made.
"""
ipsec_auth_protocol = overrides.get('ipsec_auth')
if ipsec_auth_protocol:
auth_proto = {'transform_protocol': ipsec_auth_protocol}
for conn in self.vpnservice['ipsec_site_connections']:
conn['ipsecpolicy'].update(auth_proto)
local_cidrs = overrides.get('local_cidrs')
if local_cidrs:
for i, conn in enumerate(
self.vpnservice['ipsec_site_connections']):
conn['local_cidrs'] = local_cidrs[i]
local_ip_version = overrides.get('local_ip_vers', 4)
for conn in self.vpnservice['ipsec_site_connections']:
conn['local_ip_vers'] = local_ip_version
peer_cidrs = overrides.get('peer_cidrs')
if peer_cidrs:
for i, conn in enumerate(
self.vpnservice['ipsec_site_connections']):
conn['peer_cidrs'] = peer_cidrs[i]
peers = overrides.get('peers')
if peers:
for i, conn in enumerate(
self.vpnservice['ipsec_site_connections']):
conn['peer_id'] = peers[i]
conn['peer_address'] = peers[i]
local_ip = overrides.get('local')
if local_ip:
for conn in self.vpnservice['ipsec_site_connections']:
conn['external_ip'] = local_ip
def check_config_file(self, expected, actual):
expected = expected.strip()
actual = actual.strip()
res_diff = self.generate_diff(expected, actual)
self.assertEqual(expected, actual, message=''.join(res_diff))
def _test_ipsec_connection_config(self, info):
"""Check config file string for service/connection.
Calls test specific method to create (and override as needed) the
expected config file string, generates the config using the test's
IPSec template, and then compares the results.
"""
expected = self.build_ipsec_expected_config_for_test(info)
actual = self.process._gen_config_content(self.ipsec_template,
self.vpnservice)
self.check_config_file(expected, actual)
class IPSecDeviceLegacy(BaseIPsecDeviceDriver):
def setUp(self, driver=openswan_ipsec.OpenSwanDriver,
ipsec_process=openswan_ipsec.OpenSwanProcess):
super(IPSecDeviceLegacy, self).setUp(driver, ipsec_process)
self._make_router_info_for_test()
def _make_router_info_for_test(self):
self.router = legacy_router.LegacyRouter(FAKE_ROUTER_ID,
**self.ri_kwargs)
self.router.router['distributed'] = False
self.router.iptables_manager.ipv4['nat'] = self.iptables
self.router.iptables_manager.apply = self.apply_mock
self.driver.routers[FAKE_ROUTER_ID] = self.router
def _test_vpnservice_updated(self, expected_param, **kwargs):
with mock.patch.object(self.driver, 'sync') as sync:
context = mock.Mock()
self.driver.vpnservice_updated(context, **kwargs)
sync.assert_called_once_with(context, expected_param)
def test_vpnservice_updated(self):
self._test_vpnservice_updated([])
def test_vpnservice_updated_with_router_info(self):
router_info = {'id': FAKE_ROUTER_ID, 'ha': False}
kwargs = {'router': router_info}
self._test_vpnservice_updated([router_info], **kwargs)
def test_create_router(self):
process = mock.Mock(openswan_ipsec.OpenSwanProcess)
process.vpnservice = self.vpnservice
self.driver.processes = {
FAKE_ROUTER_ID: process}
self.driver.create_router(self.router)
self._test_add_nat_rule()
process.enable.assert_called_once_with()
def test_destroy_router(self):
process_id = _uuid()
process = mock.Mock()
process.vpnservice = self.vpnservice
self.driver.processes = {
process_id: process}
self.driver.destroy_router(process_id)
process.disable.assert_called_once_with()
self.assertNotIn(process_id, self.driver.processes)
def _test_add_nat_rule(self):
self.router.iptables_manager.ipv4['nat'].assert_has_calls([
mock.call.add_rule(
'POSTROUTING',
'-s 10.0.0.0/24 -d 20.0.0.0/24 -m policy '
'--dir out --pol ipsec -j ACCEPT ',
top=True),
mock.call.add_rule(
'POSTROUTING',
'-s 10.0.0.0/24 -d 30.0.0.0/24 -m policy '
'--dir out --pol ipsec -j ACCEPT ',
top=True),
mock.call.add_rule(
'POSTROUTING',
'-s 11.0.0.0/24 -d 40.0.0.0/24 -m policy '
'--dir out --pol ipsec -j ACCEPT ',
top=True),
mock.call.add_rule(
'POSTROUTING',
'-s 11.0.0.0/24 -d 50.0.0.0/24 -m policy '
'--dir out --pol ipsec -j ACCEPT ',
top=True)
])
self.router.iptables_manager.apply.assert_called_once_with()
def _test_add_nat_rule_with_multiple_locals(self):
self.router.iptables_manager.ipv4['nat'].assert_has_calls([
mock.call.add_rule(
'POSTROUTING',
'-s 10.0.0.0/24 -d 20.0.0.0/24 -m policy '
'--dir out --pol ipsec -j ACCEPT ',
top=True),
mock.call.add_rule(
'POSTROUTING',
'-s 10.0.0.0/24 -d 30.0.0.0/24 -m policy '
'--dir out --pol ipsec -j ACCEPT ',
top=True),
mock.call.add_rule(
'POSTROUTING',
'-s 11.0.0.0/24 -d 20.0.0.0/24 -m policy '
'--dir out --pol ipsec -j ACCEPT ',
top=True),
mock.call.add_rule(
'POSTROUTING',
'-s 11.0.0.0/24 -d 30.0.0.0/24 -m policy '
'--dir out --pol ipsec -j ACCEPT ',
top=True),
mock.call.add_rule(
'POSTROUTING',
'-s 12.0.0.0/24 -d 40.0.0.0/24 -m policy '
'--dir out --pol ipsec -j ACCEPT ',
top=True),
mock.call.add_rule(
'POSTROUTING',
'-s 12.0.0.0/24 -d 50.0.0.0/24 -m policy '
'--dir out --pol ipsec -j ACCEPT ',
top=True),
mock.call.add_rule(
'POSTROUTING',
'-s 13.0.0.0/24 -d 40.0.0.0/24 -m policy '
'--dir out --pol ipsec -j ACCEPT ',
top=True),
mock.call.add_rule(
'POSTROUTING',
'-s 13.0.0.0/24 -d 50.0.0.0/24 -m policy '
'--dir out --pol ipsec -j ACCEPT ',
top=True)
])
self.router.iptables_manager.apply.assert_called_once_with()
def test_sync(self):
fake_vpn_service = FAKE_VPN_SERVICE
self.driver.agent_rpc.get_vpn_services_on_host.return_value = [
fake_vpn_service]
context = mock.Mock()
self.driver._sync_vpn_processes = mock.Mock()
self.driver._delete_vpn_processes = mock.Mock()
self.driver._cleanup_stale_vpn_processes = mock.Mock()
sync_routers = [{'id': fake_vpn_service['router_id']}]
sync_router_ids = [fake_vpn_service['router_id']]
self.driver.sync(context, sync_routers)
self.driver._sync_vpn_processes.assert_called_once_with(
[fake_vpn_service], sync_router_ids)
self.driver._delete_vpn_processes.assert_called_once_with(
sync_router_ids, sync_router_ids)
self.driver._cleanup_stale_vpn_processes.assert_called_once_with(
sync_router_ids)
def test__sync_vpn_processes_new_vpn_service(self):
new_vpnservice = self.vpnservice
router_id = new_vpnservice['router_id']
self.driver.processes = {}
with mock.patch.object(self.driver, 'ensure_process') as ensure_p:
ensure_p.side_effect = self.fake_ensure_process
self.driver._sync_vpn_processes([new_vpnservice], router_id)
self._test_add_nat_rule()
self.driver.processes[router_id].update.assert_called_once_with()
def test_add_nat_rules_with_multiple_local_subnets(self):
"""Ensure that add nat rule combinations are correct."""
overrides = {'local_cidrs': [['10.0.0.0/24', '11.0.0.0/24'],
['12.0.0.0/24', '13.0.0.0/24']]}
self.modify_config_for_test(overrides)
self.driver._update_nat(self.vpnservice, self.driver.add_nat_rule)
self._test_add_nat_rule_with_multiple_locals()
def test__sync_vpn_processes_router_with_no_vpn(self):
"""Test _sync_vpn_processes with a router not hosting vpnservice.
This test case tests that when a router which doesn't host
vpn services is updated, sync_vpn_processes doesn't restart/update
the existing vpnservice processes.
"""
process = mock.Mock()
process.vpnservice = self.vpnservice
process.connection_status = {}
self.driver.processes = {
self.vpnservice['router_id']: process}
router_id_no_vpn = _uuid()
with mock.patch.object(self.driver, 'ensure_process') as ensure_p:
self.driver._sync_vpn_processes([self.vpnservice],
[router_id_no_vpn])
self.assertEqual(0, ensure_p.call_count)
def test__sync_vpn_processes_router_with_no_vpn_and_no_vpn_services(self):
"""No vpn services running and router not hosting vpn svc."""
router_id_no_vpn = _uuid()
self.driver.process_status_cache = {}
self.driver.processes = {}
with mock.patch.object(self.driver, 'ensure_process') as ensure_p:
ensure_p.side_effect = self.fake_ensure_process
self.driver._sync_vpn_processes([], [router_id_no_vpn])
self.assertEqual(0, ensure_p.call_count)
def test__sync_vpn_processes_router_with_no_vpn_agent_restarted(self):
"""Test for the router not hosting vpnservice and agent restarted.
This test case tests that when a non vpnservice hosted router
is updated, _sync_vpn_processes restart/update the existing vpnservices
which are not yet stored in driver.processes.
"""
router_id = FAKE_ROUTER_ID
self.driver.process_status_cache = {}
self.driver.processes = {}
with mock.patch.object(self.driver, 'ensure_process') as ensure_p:
ensure_p.side_effect = self.fake_ensure_process
self.driver._sync_vpn_processes([self.vpnservice], [router_id])
self._test_add_nat_rule()
self.driver.processes[router_id].update.assert_called_once_with()
def test_delete_vpn_processes(self):
router_id_no_vpn = _uuid()
vpn_service_router_id = _uuid()
with mock.patch.object(self.driver,
'destroy_process') as (fake_destroy_process):
self.driver._delete_vpn_processes([router_id_no_vpn],
[vpn_service_router_id])
fake_destroy_process.assert_has_calls(
[mock.call(router_id_no_vpn)])
# test that _delete_vpn_processes doesn't delete the
# the valid vpn processes
with mock.patch.object(self.driver,
'destroy_process') as fake_destroy_process:
self.driver._delete_vpn_processes([vpn_service_router_id],
[vpn_service_router_id])
self.assertFalse(fake_destroy_process.called)
def test_cleanup_stale_vpn_processes(self):
stale_vpn_service = {'router_id': _uuid()}
active_vpn_service = {'router_id': _uuid()}
self.driver.processes = {
stale_vpn_service['router_id']: stale_vpn_service,
active_vpn_service['router_id']: active_vpn_service}
with mock.patch.object(self.driver, 'destroy_process') as destroy_p:
self.driver._cleanup_stale_vpn_processes(
[active_vpn_service['router_id']])
destroy_p.assert_has_calls(
[mock.call(stale_vpn_service['router_id'])])
def fake_ensure_process(self, process_id, vpnservice=None):
process = self.driver.processes.get(process_id)
if not process:
process = mock.Mock()
process.vpnservice = self.vpnservice
process.connection_status = {}
process.status = constants.ACTIVE
process.updated_pending_status = True
self.driver.processes[process_id] = process
elif vpnservice:
process.vpnservice = vpnservice
process.update_vpnservice(vpnservice)
return process
def fake_destroy_router(self, process_id):
process = self.driver.processes.get(process_id)
if process:
del self.driver.processes[process_id]
def test_sync_update_vpnservice(self):
with mock.patch.object(self.driver,
'ensure_process') as ensure_process:
ensure_process.side_effect = self.fake_ensure_process
new_vpn_service = self.vpnservice
updated_vpn_service = copy.deepcopy(new_vpn_service)
updated_vpn_service['ipsec_site_connections'][1].update(
{'peer_cidrs': ['60.0.0.0/24', '70.0.0.0/24']})
context = mock.Mock()
self.driver.process_status_cache = {}
self.driver.agent_rpc.get_vpn_services_on_host.return_value = [
new_vpn_service]
self.driver.sync(context, [{'id': FAKE_ROUTER_ID}])
process = self.driver.processes[FAKE_ROUTER_ID]
self.assertEqual(new_vpn_service, process.vpnservice)
self.driver.agent_rpc.get_vpn_services_on_host.return_value = [
updated_vpn_service]
self.driver.sync(context, [{'id': FAKE_ROUTER_ID}])
process = self.driver.processes[FAKE_ROUTER_ID]
process.update_vpnservice.assert_called_once_with(
updated_vpn_service)
self.assertEqual(updated_vpn_service, process.vpnservice)
def test_sync_removed(self):
self.driver.agent_rpc.get_vpn_services_on_host.return_value = []
context = mock.Mock()
process_id = _uuid()
process = mock.Mock()
process.vpnservice = self.vpnservice
self.driver.processes = {
process_id: process}
self.driver.sync(context, [])
process.disable.assert_called_once_with()
self.assertNotIn(process_id, self.driver.processes)
def test_sync_removed_router(self):
self.driver.agent_rpc.get_vpn_services_on_host.return_value = []
context = mock.Mock()
process_id = _uuid()
self.driver.sync(context, [{'id': process_id}])
self.assertNotIn(process_id, self.driver.processes)
def test_status_updated_on_connection_admin_down(self):
self.driver.process_status_cache = {
'1': {
'status': constants.ACTIVE,
'id': 123,
'updated_pending_status': False,
'ipsec_site_connections': {
'10': {
'status': constants.ACTIVE,
'updated_pending_status': False,
},
'20': {
'status': constants.ACTIVE,
'updated_pending_status': False,
}
}
}
}
# Simulate that there is no longer status for connection '20'
# e.g. connection admin down
new_status = {
'ipsec_site_connections': {
'10': {
'status': constants.ACTIVE,
'updated_pending_status': False
}
}
}
self.driver.update_downed_connections('1', new_status)
existing_conn = new_status['ipsec_site_connections'].get('10')
self.assertIsNotNone(existing_conn)
self.assertEqual(constants.ACTIVE, existing_conn['status'])
missing_conn = new_status['ipsec_site_connections'].get('20')
self.assertIsNotNone(missing_conn)
self.assertEqual(constants.DOWN, missing_conn['status'])
def test_status_updated_on_service_admin_down(self):
self.driver.process_status_cache = {
'1': {
'status': constants.ACTIVE,
'id': 123,
'updated_pending_status': False,
'ipsec_site_connections': {
'10': {
'status': constants.ACTIVE,
'updated_pending_status': False,
},
'20': {
'status': constants.ACTIVE,
'updated_pending_status': False,
}
}
}
}
# Simulate that there are no connections now
new_status = {
'ipsec_site_connections': {}
}
self.driver.update_downed_connections('1', new_status)
missing_conn = new_status['ipsec_site_connections'].get('10')
self.assertIsNotNone(missing_conn)
self.assertEqual(constants.DOWN, missing_conn['status'])
missing_conn = new_status['ipsec_site_connections'].get('20')
self.assertIsNotNone(missing_conn)
self.assertEqual(constants.DOWN, missing_conn['status'])
def _test_status_handling_for_downed_connection(self, down_status):
"""Test status handling for downed connection."""
router_id = self.router.router_id
connection_id = FAKE_IPSEC_SITE_CONNECTION2_ID
self.driver.ensure_process(router_id, self.vpnservice)
self._execute.return_value = down_status
self.driver.report_status(mock.Mock())
process_status = self.driver.process_status_cache[router_id]
ipsec_site_conn = process_status['ipsec_site_connections']
self.assertEqual(constants.ACTIVE, process_status['status'])
self.assertEqual(constants.DOWN,
ipsec_site_conn[connection_id]['status'])
def _test_status_handling_for_active_connection(self, active_status):
"""Test status handling for active connection."""
router_id = self.router.router_id
connection_id = FAKE_IPSEC_SITE_CONNECTION2_ID
self.driver.ensure_process(router_id, self.vpnservice)
self._execute.return_value = active_status
self.driver.report_status(mock.Mock())
process_status = self.driver.process_status_cache[
router_id]
ipsec_site_conn = process_status['ipsec_site_connections']
self.assertEqual(constants.ACTIVE, process_status['status'])
self.assertEqual(constants.ACTIVE,
ipsec_site_conn[connection_id]['status'])
def _test_status_handling_for_deleted_connection(self,
not_running_status):
"""Test status handling for deleted connection."""
router_id = self.router.router_id
self.driver.ensure_process(router_id, self.vpnservice)
self._execute.return_value = not_running_status
self.driver.report_status(mock.Mock())
process_status = self.driver.process_status_cache[router_id]
ipsec_site_conn = process_status['ipsec_site_connections']
self.assertEqual(constants.DOWN, process_status['status'])
self.assertFalse(ipsec_site_conn)
def _test_parse_connection_status(self, not_running_status,
active_status, down_status):
"""Test the status of ipsec-site-connection is parsed correctly."""
router_id = self.router.router_id
process = self.driver.ensure_process(router_id, self.vpnservice)
self._execute.return_value = not_running_status
self.assertFalse(process.active)
# An empty return value to simulate that the process
# does not have any status to report.
self._execute.return_value = ''
self.assertFalse(process.active)
self._execute.return_value = active_status
self.assertTrue(process.active)
self._execute.return_value = down_status
self.assertTrue(process.active)
def test_get_namespace_for_router(self):
namespace = self.driver.get_namespace(FAKE_ROUTER_ID)
self.assertEqual('qrouter-' + FAKE_ROUTER_ID, namespace)
def test_fail_getting_namespace_for_unknown_router(self):
self.assertFalse(self.driver.get_namespace('bogus_id'))
def test_add_nat_rule(self):
self.driver.add_nat_rule(FAKE_ROUTER_ID, 'fake_chain',
'fake_rule', True)
self.iptables.add_rule.assert_called_once_with(
'fake_chain', 'fake_rule', top=True)
def test_add_nat_rule_with_no_router(self):
self.driver.add_nat_rule(
'bogus_router_id',
'fake_chain',
'fake_rule',
True)
self.assertFalse(self.iptables.add_rule.called)
def test_remove_rule(self):
self.driver.remove_nat_rule(FAKE_ROUTER_ID, 'fake_chain',
'fake_rule', True)
self.iptables.remove_rule.assert_called_once_with(
'fake_chain', 'fake_rule', top=True)
def test_remove_rule_with_no_router(self):
self.driver.remove_nat_rule(
'bogus_router_id',
'fake_chain',
'fake_rule')
self.assertFalse(self.iptables.remove_rule.called)
def test_iptables_apply(self):
self.driver.iptables_apply(FAKE_ROUTER_ID)
self.apply_mock.assert_called_once_with()
def test_iptables_apply_with_no_router(self):
self.driver.iptables_apply('bogus_router_id')
self.assertFalse(self.apply_mock.called)
class IPSecDeviceDVR(BaseIPsecDeviceDriver):
def setUp(self, driver=openswan_ipsec.OpenSwanDriver,
ipsec_process=openswan_ipsec.OpenSwanProcess):
super(IPSecDeviceDVR, self).setUp(driver, ipsec_process)
mock.patch.object(dvr_snat_ns.SnatNamespace, 'create').start()
self._make_dvr_edge_router_info_for_test()
def _make_dvr_edge_router_info_for_test(self):
router = dvr_edge_router.DvrEdgeRouter(mock.sentinel.agent,
mock.sentinel.myhost,
FAKE_ROUTER_ID,
**self.ri_kwargs)
router.router['distributed'] = True
router.snat_namespace = dvr_snat_ns.SnatNamespace(router.router['id'],
mock.sentinel.agent,
self.driver,
mock.ANY)
router.snat_namespace.create()
router.snat_iptables_manager = iptables_manager.IptablesManager(
namespace='snat-' + FAKE_ROUTER_ID, use_ipv6=mock.ANY)
router.snat_iptables_manager.ipv4['nat'] = self.iptables
router.snat_iptables_manager.apply = self.apply_mock
self.driver.routers[FAKE_ROUTER_ID] = router
def test_get_namespace_for_dvr_edge_router(self):
namespace = self.driver.get_namespace(FAKE_ROUTER_ID)
self.assertEqual('snat-' + FAKE_ROUTER_ID, namespace)
def test_add_nat_rule_with_dvr_edge_router(self):
self.driver.add_nat_rule(FAKE_ROUTER_ID, 'fake_chain',
'fake_rule', True)
self.iptables.add_rule.assert_called_once_with(
'fake_chain', 'fake_rule', top=True)
def test_iptables_apply_with_dvr_edge_router(self):
self.driver.iptables_apply(FAKE_ROUTER_ID)
self.apply_mock.assert_called_once_with()
def test_remove_rule_with_dvr_edge_router(self):
self.driver.remove_nat_rule(FAKE_ROUTER_ID, 'fake_chain',
'fake_rule', True)
self.iptables.remove_rule.assert_called_once_with(
'fake_chain', 'fake_rule', top=True)
class TestOpenSwanConfigGeneration(BaseIPsecDeviceDriver):
"""Verify that configuration files are generated correctly.
Besides the normal translation of some settings, when creating the config
file, the generated file can also vary based on the following
special conditions:
- IPv6 versus IPv4
- Multiple left subnets versus a single left subnet
- IPSec policy using AH transform
The tests will focus on these variations.
"""
def setUp(self, driver=openswan_ipsec.OpenSwanDriver,
ipsec_process=openswan_ipsec.OpenSwanProcess):
super(TestOpenSwanConfigGeneration, self).setUp(
driver, ipsec_process, vpnservice=FAKE_VPN_SERVICE)
self.conf.register_opts(openswan_ipsec.openswan_opts, 'openswan')
self.conf.set_override('state_path', '/tmp')
self.ipsec_template = self.conf.openswan.ipsec_config_template
self.process = openswan_ipsec.OpenSwanProcess(self.conf,
'foo-process-id',
self.vpnservice,
mock.ANY)
def build_ipsec_expected_config_for_test(self, info):
"""Modify OpenSwan ipsec expected config files for test variations."""
auth_mode = info.get('ipsec_auth', AUTH_ESP)
conn_details = OPENSWAN_CONNECTION_DETAILS % {'auth_mode': auth_mode}
# Convert local CIDRs into assignment strings. IF more than one,
# pluralize the attribute name and enclose in brackets.
cidrs = info.get('local_cidrs', [['10.0.0.0/24'], ['11.0.0.0/24']])
local_cidrs = []
for cidr in cidrs:
if len(cidr) == 2:
local_cidrs.append("s={ %s }" % ' '.join(cidr))
else:
local_cidrs.append("=%s" % cidr[0])
# Convert peer CIDRs into space separated strings
cidrs = info.get('peer_cidrs', [['20.0.0.0/24', '30.0.0.0/24'],
['40.0.0.0/24', '50.0.0.0/24']])
peer_cidrs = [' '.join(cidr) for cidr in cidrs]
local_ip = info.get('local', '60.0.0.4')
version = info.get('local_ip_vers', 4)
next_hop = IPV4_NEXT_HOP if version == 4 else IPV6_NEXT_HOP % local_ip
peer_ips = info.get('peers', ['60.0.0.5', '60.0.0.6'])
return EXPECTED_OPENSWAN_CONF % {
'next_hop': next_hop,
'local_cidrs1': local_cidrs[0], 'local_cidrs2': local_cidrs[1],
'local_ver': version,
'peer_cidrs1': peer_cidrs[0], 'peer_cidrs2': peer_cidrs[1],
'left': local_ip,
'right1': peer_ips[0], 'right2': peer_ips[1],
'conn1_id': FAKE_IPSEC_SITE_CONNECTION1_ID,
'conn2_id': FAKE_IPSEC_SITE_CONNECTION2_ID,
'conn_details': conn_details}
def test_connections_with_esp_transform_protocol(self):
"""Test config file with IPSec policy using ESP."""
self._test_ipsec_connection_config({})
def test_connections_with_ah_transform_protocol(self):
"""Test config file with IPSec policy using ESP."""
overrides = {'ipsec_auth': 'ah'}
self.modify_config_for_test(overrides)
self.process.update_vpnservice(self.vpnservice)
info = {'ipsec_auth': AUTH_AH}
self._test_ipsec_connection_config(info)
def test_connections_with_multiple_left_subnets(self):
"""Test multiple local subnets.
The configure uses the 'leftsubnets' attribute, instead of the
'leftsubnet' attribute.
"""
overrides = {'local_cidrs': [['10.0.0.0/24', '11.0.0.0/24'],
['12.0.0.0/24', '13.0.0.0/24']]}
self.modify_config_for_test(overrides)
self.process.update_vpnservice(self.vpnservice)
self._test_ipsec_connection_config(overrides)
def test_config_files_with_ipv6_addresses(self):
"""Test creating config files using IPv6 addressing."""
overrides = {'local_cidrs': [['2002:0a00::/48'], ['2002:0b00::/48']],
'local_ip_vers': 6,
'peer_cidrs': [['2002:1400::/48', '2002:1e00::/48'],
['2002:2800::/48', '2002:3200::/48']],
'local': '2002:3c00:0004::',
'peers': ['2002:3c00:0005::', '2002:3c00:0006::']}
self.modify_config_for_test(overrides)
self.process.update_vpnservice(self.vpnservice)
self._test_ipsec_connection_config(overrides)
def test_secrets_config_file(self):
expected = EXPECTED_IPSEC_OPENSWAN_SECRET_CONF
actual = self.process._gen_config_content(
self.conf.openswan.ipsec_secret_template, self.vpnservice)
self.check_config_file(expected, actual)
class IPsecStrongswanConfigGeneration(BaseIPsecDeviceDriver):
def setUp(self, driver=strongswan_ipsec.StrongSwanDriver,
ipsec_process=strongswan_ipsec.StrongSwanProcess):
super(IPsecStrongswanConfigGeneration, self).setUp(
driver, ipsec_process, vpnservice=FAKE_VPN_SERVICE)
self.conf.register_opts(strongswan_ipsec.strongswan_opts,
'strongswan')
self.conf.set_override('state_path', '/tmp')
self.ipsec_template = self.conf.strongswan.ipsec_config_template
self.process = strongswan_ipsec.StrongSwanProcess(self.conf,
'foo-process-id',
self.vpnservice,
mock.ANY)
def build_ipsec_expected_config_for_test(self, info):
cidrs = info.get('local_cidrs', [['10.0.0.0/24'], ['11.0.0.0/24']])
local_cidrs = [','.join(cidr) for cidr in cidrs]
cidrs = info.get('peer_cidrs', [['20.0.0.0/24', '30.0.0.0/24'],
['40.0.0.0/24', '50.0.0.0/24']])
peer_cidrs = [','.join(cidr) for cidr in cidrs]
local_ip = info.get('local', '60.0.0.4')
peer_ips = info.get('peers', ['60.0.0.5', '60.0.0.6'])
return EXPECTED_IPSEC_STRONGSWAN_CONF % {
'local_cidrs1': local_cidrs[0], 'local_cidrs2': local_cidrs[1],
'peer_cidrs1': peer_cidrs[0], 'peer_cidrs2': peer_cidrs[1],
'left': local_ip,
'right1': peer_ips[0], 'right2': peer_ips[1],
'conn1_id': FAKE_IPSEC_SITE_CONNECTION1_ID,
'conn2_id': FAKE_IPSEC_SITE_CONNECTION2_ID}
def test_ipsec_config_file(self):
self._test_ipsec_connection_config({})
def test_ipsec_config_file_for_v6(self):
overrides = {'local_cidrs': [['2002:0a00::/48'], ['2002:0b00::/48']],
'peer_cidrs': [['2002:1400::/48', '2002:1e00::/48'],
['2002:2800::/48', '2002:3200::/48']],
'local': '2002:3c00:0004::',
'peers': ['2002:3c00:0005::', '2002:3c00:0006::']}
self.modify_config_for_test(overrides)
self.process.update_vpnservice(self.vpnservice)
self._test_ipsec_connection_config(overrides)
def test_strongswan_default_config_file(self):
expected = EXPECTED_STRONGSWAN_DEFAULT_CONF
actual = self.process._gen_config_content(
self.conf.strongswan.strongswan_config_template, self.vpnservice)
self.check_config_file(expected, actual)
def test_secrets_config_file(self):
expected = EXPECTED_IPSEC_STRONGSWAN_SECRET_CONF
actual = self.process._gen_config_content(
self.conf.strongswan.ipsec_secret_template, self.vpnservice)
self.check_config_file(expected, actual)
class TestOpenSwanProcess(IPSecDeviceLegacy):
_test_timeout = 1
_test_backoff = 2
_test_retries = 5
def setUp(self, driver=openswan_ipsec.OpenSwanDriver,
ipsec_process=openswan_ipsec.OpenSwanProcess):
super(TestOpenSwanProcess, self).setUp(driver, ipsec_process)
self.conf.register_opts(openswan_ipsec.openswan_opts,
'openswan')
self.conf.set_override('state_path', '/tmp')
cfg.CONF.register_opts(openswan_ipsec.pluto_opts,
'pluto')
cfg.CONF.set_override('shutdown_check_timeout', self._test_timeout,
group='pluto')
cfg.CONF.set_override('shutdown_check_back_off', self._test_backoff,
group='pluto')
cfg.CONF.set_override('shutdown_check_retries', self._test_retries,
group='pluto')
self.addCleanup(cfg.CONF.reset)
self.os_remove = mock.patch('os.remove').start()
self.process = openswan_ipsec.OpenSwanProcess(self.conf,
'foo-process-id',
self.vpnservice,
mock.ANY)
def test__resolve_fqdn(self):
with mock.patch.object(socket, 'getaddrinfo') as mock_getaddr_info:
mock_getaddr_info.return_value = [(2, 1, 6, '',
('172.168.1.2', 0))]
resolved_ip_addr = self.process._resolve_fqdn('fqdn.foo.addr')
self.assertEqual('172.168.1.2', resolved_ip_addr)
def _test_get_nexthop_helper(self, address, _resolve_fqdn_side_effect,
expected_ip_cmd, expected_nexthop):
with mock.patch.object(self.process,
'_resolve_fqdn') as fake_resolve_fqdn:
fake_resolve_fqdn.side_effect = _resolve_fqdn_side_effect
returned_next_hop = self.process._get_nexthop(address,
'fake-conn-id')
_resolve_fqdn_expected_call_count = (
1 if _resolve_fqdn_side_effect else 0)
self.assertEqual(_resolve_fqdn_expected_call_count,
fake_resolve_fqdn.call_count)
self._execute.assert_called_once_with(expected_ip_cmd)
self.assertEqual(expected_nexthop, returned_next_hop)
def test__get_nexthop_peer_addr_is_ipaddr(self):
gw_addr = '10.0.0.1'
self._execute.return_value = '172.168.1.2 via %s' % gw_addr
peer_address = '172.168.1.2'
expected_ip_cmd = ['ip', 'route', 'get', peer_address]
self._test_get_nexthop_helper(peer_address, None,
expected_ip_cmd, gw_addr)
def test__get_nexthop_peer_addr_is_valid_fqdn(self):
peer_address = 'foo.peer.addr'
expected_ip_cmd = ['ip', 'route', 'get', '172.168.1.2']
gw_addr = '10.0.0.1'
self._execute.return_value = '172.168.1.2 via %s' % gw_addr
def _fake_resolve_fqdn(address):
return '172.168.1.2'
self._test_get_nexthop_helper(peer_address, _fake_resolve_fqdn,
expected_ip_cmd, gw_addr)
def test__get_nexthop_gw_not_present(self):
peer_address = '172.168.1.2'
expected_ip_cmd = ['ip', 'route', 'get', '172.168.1.2']
self._execute.return_value = ' '
self._test_get_nexthop_helper(peer_address, None,
expected_ip_cmd, peer_address)
def test__get_nexthop_fqdn_peer_addr_is_not_resolved(self):
self.process.connection_status = {}
expected_connection_status_dict = (
{'fake-conn-id': {'status': constants.ERROR,
'updated_pending_status': True}})
self.assertRaises(vpnaas.VPNPeerAddressNotResolved,
self.process._get_nexthop, 'foo.peer.addr',
'fake-conn-id')
self.assertEqual(expected_connection_status_dict,
self.process.connection_status)
self.process.connection_status = (
{'fake-conn-id': {'status': constants.PENDING_CREATE,
'updated_pending_status': False}})
self.assertRaises(vpnaas.VPNPeerAddressNotResolved,
self.process._get_nexthop, 'foo.peer.addr',
'fake-conn-id')
self.assertEqual(expected_connection_status_dict,
self.process.connection_status)
@mock.patch('neutron_vpnaas.services.vpn.device_drivers.'
'ipsec.OpenSwanProcess._get_nexthop',
return_value='172.168.1.2')
@mock.patch('neutron_vpnaas.services.vpn.device_drivers.'
'ipsec.OpenSwanProcess._cleanup_control_files')
def test_no_cleanups(self, cleanup_mock, hop_mock):
# Not an "awesome test" but more of a check box item. Basically,
# what happens if we didn't need to clean up any files.
with mock.patch.object(self.process,
'_process_running',
return_value=True) as query_mock:
self.process.start()
self.assertEqual(1, query_mock.call_count)
# This is really what is being tested here. If process is
# running, we shouldn't attempt a cleanup.
self.assertFalse(cleanup_mock.called)
@mock.patch('neutron_vpnaas.services.vpn.device_drivers.'
'ipsec.OpenSwanProcess._get_nexthop',
return_value='172.168.1.2')
@mock.patch('os.path.exists', return_value=True)
def test_cleanup_files(self, exists_mock, hop_mock):
# Tests the 'bones' of things really and kind of check-box-item-bogus
# test - this really needs exercising through a higher level test.
with mock.patch.object(self.process,
'_process_running',
return_value=False) as query_mock:
fake_path = '/fake/path/run'
self.process.pid_path = fake_path
self.process.pid_file = '%s.pid' % fake_path
self.process.start()
self.assertEqual(1, query_mock.call_count)
self.assertEqual(2, self.os_remove.call_count)
self.os_remove.assert_has_calls([mock.call('%s.pid' % fake_path),
mock.call('%s.ctl' % fake_path)])
@mock.patch('neutron_vpnaas.services.vpn.device_drivers.'
'ipsec.OpenSwanProcess._get_nexthop',
return_value='172.168.1.2')
@mock.patch('neutron_vpnaas.services.vpn.device_drivers.'
'ipsec.OpenSwanProcess._process_running',
return_value=False)
@mock.patch('neutron_vpnaas.services.vpn.device_drivers.'
'ipsec.OpenSwanProcess._cleanup_control_files')
@mock.patch('eventlet.sleep')
def test_restart_process_not_running(self, sleep_mock, cleanup_mock,
query_mock, hop_mock):
self.process.restart()
# Really what is being tested - retry configuration exists and that
# we do the right things when process check is false.
self.assertTrue(query_mock.called)
self.assertTrue(cleanup_mock.called)
self.assertFalse(sleep_mock.called)
@mock.patch('neutron_vpnaas.services.vpn.device_drivers.'
'ipsec.OpenSwanProcess._get_nexthop',
return_value='172.168.1.2')
@mock.patch('neutron_vpnaas.services.vpn.device_drivers.'
'ipsec.OpenSwanProcess._process_running',
return_value=True)
@mock.patch('neutron_vpnaas.services.vpn.device_drivers.'
'ipsec.OpenSwanProcess._cleanup_control_files')
@mock.patch('eventlet.sleep')
def test_restart_process_doesnt_stop(self, sleep_mock, cleanup_mock,
query_mock, hop_mock):
self.process.restart()
# Really what is being tested - retry configuration exists and that
# we do the right things when process check is True.
self.assertEqual(self._test_retries + 1, query_mock.call_count)
self.assertFalse(cleanup_mock.called)
self.assertEqual(self._test_retries, sleep_mock.call_count)
calls = [mock.call(1), mock.call(2), mock.call(4),
mock.call(8), mock.call(16)]
sleep_mock.assert_has_calls(calls)
@mock.patch('neutron_vpnaas.services.vpn.device_drivers.'
'ipsec.OpenSwanProcess._get_nexthop',
return_value='172.168.1.2')
@mock.patch('neutron_vpnaas.services.vpn.device_drivers.'
'ipsec.OpenSwanProcess._process_running',
side_effect=[True, True, False, False])
@mock.patch('neutron_vpnaas.services.vpn.device_drivers.'
'ipsec.OpenSwanProcess._cleanup_control_files')
@mock.patch('eventlet.sleep')
def test_restart_process_retry_until_stop(self, sleep_mock, cleanup_mock,
query_mock, hop_mock):
self.process.restart()
# Really what is being tested - retry configuration exists and that
# we do the right things when process check is True a few times and
# then returns False.
self.assertEqual(4, query_mock.call_count)
self.assertTrue(cleanup_mock.called)
self.assertEqual(2, sleep_mock.call_count)
def test_process_running_no_pid(self):
with mock.patch('os.path.exists', return_value=False):
self.assertFalse(
self.process._process_running())
# open() is used elsewhere, so we need to inject a mocked open into the
# module to be tested.
@mock.patch('os.path.exists', return_value=True)
@mock.patch('neutron_vpnaas.services.vpn.device_drivers.ipsec.open',
create=True,
side_effect=IOError)
def test_process_running_open_failure(self, mock_open, mock_exists):
self.assertFalse(self.process._process_running())
self.assertTrue(mock_exists.called)
self.assertTrue(mock_open.called)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('neutron_vpnaas.services.vpn.device_drivers.ipsec.open',
create=True,
side_effect=[io.StringIO(u'invalid'),
IOError])
def test_process_running_bogus_pid(self, mock_open, mock_exists):
with mock.patch.object(openswan_ipsec.LOG, 'error'):
self.assertFalse(self.process._process_running())
self.assertTrue(mock_exists.called)
self.assertEqual(2, mock_open.call_count)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('neutron_vpnaas.services.vpn.device_drivers.ipsec.open',
create=True,
side_effect=[io.StringIO(u'134'), io.StringIO(u'')])
def test_process_running_no_cmdline(self, mock_open, mock_exists):
with mock.patch.object(openswan_ipsec.LOG, 'error') as log_mock:
self.assertFalse(self.process._process_running())
self.assertFalse(log_mock.called)
self.assertEqual(2, mock_open.call_count)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('neutron_vpnaas.services.vpn.device_drivers.ipsec.open',
create=True,
side_effect=[io.StringIO(u'134'), io.StringIO(u'ps ax')])
def test_process_running_cmdline_mismatch(self, mock_open, mock_exists):
with mock.patch.object(openswan_ipsec.LOG, 'error') as log_mock:
self.assertFalse(self.process._process_running())
self.assertFalse(log_mock.called)
self.assertEqual(2, mock_open.call_count)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('neutron_vpnaas.services.vpn.device_drivers.ipsec.open',
create=True,
side_effect=[io.StringIO(u'134'),
io.StringIO(u'/usr/libexec/ipsec/pluto -ctlbase'
'/some/foo/path')])
def test_process_running_cmdline_match(self, mock_open, mock_exists):
self.process.pid_path = '/some/foo/path'
with mock.patch.object(openswan_ipsec.LOG, 'error') as log_mock:
self.assertTrue(self.process._process_running())
self.assertTrue(log_mock.called)
def test_status_handling_for_downed_connection(self):
"""Test status handling for downed connection."""
self._test_status_handling_for_downed_connection(PLUTO_DOWN_STATUS)
def test_status_handling_for_connection_with_no_ipsec_sa(self):
"""Test status handling for downed connection."""
self._test_status_handling_for_downed_connection(
PLUTO_ACTIVE_NO_IPSEC_SA_STATUS)
def test_status_handling_for_active_connection(self):
"""Test status handling for active connection."""
self._test_status_handling_for_active_connection(PLUTO_ACTIVE_STATUS)
def test_status_handling_for_deleted_connection(self):
"""Test status handling for deleted connection."""
self._test_status_handling_for_deleted_connection(NOT_RUNNING_STATUS)
def test_parse_connection_status(self):
"""Test the status of ipsec-site-connection parsed correctly."""
self._test_parse_connection_status(NOT_RUNNING_STATUS,
PLUTO_ACTIVE_STATUS,
PLUTO_DOWN_STATUS)
class TestLibreSwanProcess(base.BaseTestCase):
def setUp(self):
super(TestLibreSwanProcess, self).setUp()
self.vpnservice = copy.deepcopy(FAKE_VPN_SERVICE)
self.ipsec_process = libreswan_ipsec.LibreSwanProcess(cfg.CONF,
'foo-process-id',
self.vpnservice,
mock.ANY)
@mock.patch('os.remove')
@mock.patch('os.path.exists', return_value=True)
def test_ensure_configs_on_restart(self, exists_mock, remove_mock):
openswan_ipsec.OpenSwanProcess.ensure_configs = mock.Mock()
with mock.patch.object(self.ipsec_process, '_execute') as fake_execute:
self.ipsec_process.ensure_configs()
expected = [mock.call(['chown', '--from=%s' % os.getuid(),
'root:root',
self.ipsec_process._get_config_filename(
'ipsec.secrets')]),
mock.call(['ipsec', '_stackmanager', 'start']),
mock.call(['ipsec', 'checknss',
self.ipsec_process.etc_dir])]
fake_execute.assert_has_calls(expected)
self.assertEqual(3, fake_execute.call_count)
self.assertTrue(exists_mock.called)
self.assertTrue(remove_mock.called)
@mock.patch('os.remove')
@mock.patch('os.path.exists', return_value=False)
def test_ensure_configs(self, exists_mock, remove_mock):
openswan_ipsec.OpenSwanProcess.ensure_configs = mock.Mock()
with mock.patch.object(self.ipsec_process, '_execute') as fake_execute:
self.ipsec_process.ensure_configs()
expected = [mock.call(['chown', '--from=%s' % os.getuid(),
'root:root',
self.ipsec_process._get_config_filename(
'ipsec.secrets')]),
mock.call(['ipsec', '_stackmanager', 'start']),
mock.call(['ipsec', 'checknss',
self.ipsec_process.etc_dir])]
fake_execute.assert_has_calls(expected)
self.assertEqual(3, fake_execute.call_count)
self.assertTrue(exists_mock.called)
self.assertFalse(remove_mock.called)
exists_mock.reset_mock()
remove_mock.reset_mock()
with mock.patch.object(self.ipsec_process, '_execute') as fake_execute:
fake_execute.side_effect = [None, None, RuntimeError, None]
self.ipsec_process.ensure_configs()
expected = [mock.call(['chown', '--from=%s' % os.getuid(),
'root:root',
self.ipsec_process._get_config_filename(
'ipsec.secrets')]),
mock.call(['ipsec', '_stackmanager', 'start']),
mock.call(['ipsec', 'checknss',
self.ipsec_process.etc_dir]),
mock.call(['ipsec', 'initnss',
self.ipsec_process.etc_dir])]
fake_execute.assert_has_calls(expected)
self.assertEqual(4, fake_execute.call_count)
self.assertTrue(exists_mock.called)
self.assertFalse(remove_mock.called)
class IPsecStrongswanDeviceDriverLegacy(IPSecDeviceLegacy):
def setUp(self, driver=strongswan_ipsec.StrongSwanDriver,
ipsec_process=strongswan_ipsec.StrongSwanProcess):
super(IPsecStrongswanDeviceDriverLegacy, self).setUp(driver,
ipsec_process)
self.conf.register_opts(strongswan_ipsec.strongswan_opts,
'strongswan')
self.conf.set_override('state_path', '/tmp')
self.driver.agent_rpc.get_vpn_services_on_host.return_value = [
self.vpnservice]
def test_status_handling_for_downed_connection(self):
"""Test status handling for downed connection."""
self._test_status_handling_for_downed_connection(CHARON_DOWN_STATUS)
def test_status_handling_for_active_connection(self):
"""Test status handling for active connection."""
self._test_status_handling_for_active_connection(CHARON_ACTIVE_STATUS)
def test_status_handling_for_deleted_connection(self):
"""Test status handling for deleted connection."""
self._test_status_handling_for_deleted_connection(NOT_RUNNING_STATUS)
def test_parse_connection_status(self):
"""Test the status of ipsec-site-connection parsed correctly."""
self._test_parse_connection_status(NOT_RUNNING_STATUS,
CHARON_ACTIVE_STATUS,
CHARON_DOWN_STATUS)
class IPsecStrongswanDeviceDriverDVR(IPSecDeviceDVR):
def setUp(self, driver=strongswan_ipsec.StrongSwanDriver,
ipsec_process=strongswan_ipsec.StrongSwanProcess):
super(IPsecStrongswanDeviceDriverDVR, self).setUp(driver,
ipsec_process)
class IPsecFedoraStrongswanDeviceDriverLegacy(
IPsecStrongswanDeviceDriverLegacy):
def setUp(self, driver=fedora_strongswan_ipsec.FedoraStrongSwanDriver,
ipsec_process=fedora_strongswan_ipsec.FedoraStrongSwanProcess):
super(IPsecFedoraStrongswanDeviceDriverLegacy,
self).setUp(driver, ipsec_process)
class IPsecFedoraStrongswanDeviceDriverDVR(IPSecDeviceDVR):
def setUp(self, driver=fedora_strongswan_ipsec.FedoraStrongSwanDriver,
ipsec_process=fedora_strongswan_ipsec.FedoraStrongSwanProcess):
super(IPsecFedoraStrongswanDeviceDriverDVR, self).setUp(driver,
ipsec_process)
././@LongLink 0000000 0000000 0000000 00000000151 00000000000 011212 L ustar 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/services/vpn/device_drivers/test_cisco_csr_rest_client.py neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/services/vpn/device_drivers/test_cisco_csr_rest_clien0000664 0005670 0005671 00000220513 12701407726 035617 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import random
import re
import requests
from requests import exceptions as r_exc
from requests_mock.contrib import fixture as mock_fixture
from neutron_vpnaas.services.vpn.device_drivers import (
cisco_csr_rest_client as csr_client)
from neutron_vpnaas.tests import base
dummy_policy_id = 'dummy-ipsec-policy-id-name'
TEST_VRF = 'nrouter-123456'
BASE_URL = 'https://%s:55443/api/v1/'
LOCAL_URL = 'https://localhost:55443/api/v1/'
URI_HOSTNAME = 'global/host-name'
URI_USERS = 'global/local-users'
URI_AUTH = 'auth/token-services'
URI_INTERFACE_GE1 = 'interfaces/GigabitEthernet1'
URI_PSK = 'vrf/' + TEST_VRF + '/vpn-svc/ike/keyrings'
URI_PSK_ID = URI_PSK + '/%s'
URI_IKE_POLICY = 'vpn-svc/ike/policies'
URI_IKE_POLICY_ID = URI_IKE_POLICY + '/%s'
URI_IPSEC_POLICY = 'vpn-svc/ipsec/policies'
URI_IPSEC_POLICY_ID = URI_IPSEC_POLICY + '/%s'
URI_IPSEC_CONN = 'vrf/' + TEST_VRF + '/vpn-svc/site-to-site'
URI_IPSEC_CONN_ID = URI_IPSEC_CONN + '/%s'
URI_KEEPALIVE = 'vpn-svc/ike/keepalive'
URI_ROUTES = 'vrf/' + TEST_VRF + '/routing-svc/static-routes'
URI_ROUTES_ID = URI_ROUTES + '/%s'
URI_SESSIONS = 'vrf/' + TEST_VRF + '/vpn-svc/site-to-site/active/sessions'
# Note: Helper functions to test reuse of IDs.
def generate_pre_shared_key_id():
return random.randint(100, 200)
def generate_ike_policy_id():
return random.randint(200, 300)
def generate_ipsec_policy_id():
return random.randint(300, 400)
class CiscoCsrBaseTestCase(base.BaseTestCase):
"""Helper methods to register mock intercepts - used by child classes."""
def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=None):
super(CiscoCsrBaseTestCase, self).setUp()
self.base_url = BASE_URL % host
self.requests = self.useFixture(mock_fixture.Fixture())
info = {'rest_mgmt_ip': host, 'tunnel_ip': tunnel_ip,
'vrf': 'nrouter-123456',
'username': 'stack', 'password': 'cisco', 'timeout': timeout}
self.csr = csr_client.CsrRestClient(info)
def _register_local_get(self, uri, json=None,
result_code=requests.codes.OK):
self.requests.register_uri(
'GET',
LOCAL_URL + uri,
status_code=result_code,
json=json)
def _register_local_post(self, uri, resource_id,
result_code=requests.codes.CREATED):
self.requests.register_uri(
'POST',
LOCAL_URL + uri,
status_code=result_code,
headers={'location': LOCAL_URL + uri + '/' + str(resource_id)})
def _register_local_delete(self, uri, resource_id, json=None,
result_code=requests.codes.NO_CONTENT):
self.requests.register_uri(
'DELETE',
LOCAL_URL + uri + '/' + str(resource_id),
status_code=result_code,
json=json)
def _register_local_delete_by_id(self, resource_id,
result_code=requests.codes.NO_CONTENT):
local_resource_re = re.compile(LOCAL_URL + '.+%s$' % resource_id)
self.requests.register_uri(
'DELETE',
local_resource_re,
status_code=result_code)
def _register_local_put(self, uri, resource_id,
result_code=requests.codes.NO_CONTENT):
self.requests.register_uri('PUT',
LOCAL_URL + uri + '/' + resource_id,
status_code=result_code)
def _register_local_get_not_found(self, uri, resource_id,
result_code=requests.codes.NOT_FOUND):
self.requests.register_uri(
'GET',
LOCAL_URL + uri + '/' + str(resource_id),
status_code=result_code)
def _helper_register_auth_request(self):
self.requests.register_uri('POST',
LOCAL_URL + URI_AUTH,
status_code=requests.codes.OK,
json={'token-id': 'dummy-token'})
def _helper_register_psk_post(self, psk_id):
self._register_local_post(URI_PSK, psk_id)
def _helper_register_ike_policy_post(self, policy_id):
self._register_local_post(URI_IKE_POLICY, policy_id)
def _helper_register_ipsec_policy_post(self, policy_id):
self._register_local_post(URI_IPSEC_POLICY, policy_id)
def _helper_register_tunnel_post(self, tunnel):
self._register_local_post(URI_IPSEC_CONN, tunnel)
class TestCsrLoginRestApi(CiscoCsrBaseTestCase):
"""Test logging into CSR to obtain token-id."""
def test_get_token(self):
"""Obtain the token and its expiration time."""
self._helper_register_auth_request()
self.assertTrue(self.csr.authenticate())
self.assertEqual(requests.codes.OK, self.csr.status)
self.assertIsNotNone(self.csr.token)
def test_unauthorized_token_request(self):
"""Negative test of invalid user/password."""
self.requests.register_uri('POST',
LOCAL_URL + URI_AUTH,
status_code=requests.codes.UNAUTHORIZED)
self.csr.auth = ('stack', 'bogus')
self.assertIsNone(self.csr.authenticate())
self.assertEqual(requests.codes.UNAUTHORIZED, self.csr.status)
def _simulate_wrong_host(self, request):
if 'wrong-host' in request.url:
raise r_exc.ConnectionError()
def test_non_existent_host(self):
"""Negative test of request to non-existent host."""
self.requests.add_matcher(self._simulate_wrong_host)
self.csr.host = 'wrong-host'
self.csr.token = 'Set by some previously successful access'
self.assertIsNone(self.csr.authenticate())
self.assertEqual(requests.codes.NOT_FOUND, self.csr.status)
self.assertIsNone(self.csr.token)
def _simulate_token_timeout(self, request):
raise r_exc.Timeout()
def test_timeout_on_token_access(self):
"""Negative test of a timeout on a request."""
self.requests.add_matcher(self._simulate_token_timeout)
self.assertIsNone(self.csr.authenticate())
self.assertEqual(requests.codes.REQUEST_TIMEOUT, self.csr.status)
self.assertIsNone(self.csr.token)
class TestCsrGetRestApi(CiscoCsrBaseTestCase):
"""Test CSR GET REST API."""
def test_valid_rest_gets(self):
"""Simple GET requests.
First request will do a post to get token (login). Assumes
that there are two interfaces on the CSR.
"""
self._helper_register_auth_request()
self._register_local_get(URI_HOSTNAME,
json={u'kind': u'object#host-name',
u'host-name': u'Router'})
self._register_local_get(URI_USERS,
json={u'kind': u'collection#local-user',
u'users': ['peter', 'paul', 'mary']})
actual = self.csr.get_request(URI_HOSTNAME)
self.assertEqual(requests.codes.OK, self.csr.status)
self.assertIn('host-name', actual)
self.assertIsNotNone(actual['host-name'])
actual = self.csr.get_request(URI_USERS)
self.assertEqual(requests.codes.OK, self.csr.status)
self.assertIn('users', actual)
class TestCsrPostRestApi(CiscoCsrBaseTestCase):
"""Test CSR POST REST API."""
def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=None):
"""Setup for each test in this suite.
Each test case will have a normal authentication mock response
registered here, although they may replace it, as needed.
"""
super(TestCsrPostRestApi, self).setUp(host, tunnel_ip, timeout)
self._helper_register_auth_request()
def test_post_requests(self):
"""Simple POST requests (repeatable).
First request will do a post to get token (login). Assumes
that there are two interfaces (Ge1 and Ge2) on the CSR.
"""
interface_re = re.compile('https://localhost:55443/.*/interfaces/'
'GigabitEthernet\d/statistics')
self.requests.register_uri('POST',
interface_re,
status_code=requests.codes.NO_CONTENT)
actual = self.csr.post_request(
'interfaces/GigabitEthernet1/statistics',
payload={'action': 'clear'})
self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
self.assertIsNone(actual)
actual = self.csr.post_request(
'interfaces/GigabitEthernet2/statistics',
payload={'action': 'clear'})
self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
self.assertIsNone(actual)
def test_post_with_location(self):
"""Create a user and verify that location returned."""
self.requests.register_uri(
'POST',
LOCAL_URL + URI_USERS,
status_code=requests.codes.CREATED,
headers={'location': LOCAL_URL + URI_USERS + '/test-user'})
location = self.csr.post_request(
URI_USERS,
payload={'username': 'test-user',
'password': 'pass12345',
'privilege': 15})
self.assertEqual(requests.codes.CREATED, self.csr.status)
self.assertIn(URI_USERS + '/test-user', location)
def test_post_missing_required_attribute(self):
"""Negative test of POST with missing mandatory info."""
self.requests.register_uri('POST',
LOCAL_URL + URI_USERS,
status_code=requests.codes.BAD_REQUEST)
self.csr.post_request(URI_USERS,
payload={'password': 'pass12345',
'privilege': 15})
self.assertEqual(requests.codes.BAD_REQUEST, self.csr.status)
def test_post_invalid_attribute(self):
"""Negative test of POST with invalid info."""
self.requests.register_uri('POST',
LOCAL_URL + URI_USERS,
status_code=requests.codes.BAD_REQUEST)
self.csr.post_request(URI_USERS,
payload={'username': 'test-user',
'password': 'pass12345',
'privilege': 20})
self.assertEqual(requests.codes.BAD_REQUEST, self.csr.status)
def test_post_already_exists(self):
"""Negative test of a duplicate POST.
Uses the lower level _do_request() API to just perform the POST and
obtain the response, without any error processing.
"""
self.requests.register_uri(
'POST',
LOCAL_URL + URI_USERS,
status_code=requests.codes.CREATED,
headers={'location': LOCAL_URL + URI_USERS + '/test-user'})
location = self.csr._do_request(
'POST',
URI_USERS,
payload={'username': 'test-user',
'password': 'pass12345',
'privilege': 15},
more_headers=csr_client.HEADER_CONTENT_TYPE_JSON)
self.assertEqual(requests.codes.CREATED, self.csr.status)
self.assertIn(URI_USERS + '/test-user', location)
self.csr.post_request(URI_USERS,
payload={'username': 'test-user',
'password': 'pass12345',
'privilege': 20})
self.requests.register_uri(
'POST',
LOCAL_URL + URI_USERS,
status_code=requests.codes.NOT_FOUND,
json={u'error-code': -1,
u'error-message': u'user test-user already exists'})
self.csr._do_request(
'POST',
URI_USERS,
payload={'username': 'test-user',
'password': 'pass12345',
'privilege': 15},
more_headers=csr_client.HEADER_CONTENT_TYPE_JSON)
# Note: For local-user, a 404 error is returned. For
# site-to-site connection a 400 is returned.
self.assertEqual(requests.codes.NOT_FOUND, self.csr.status)
def test_post_changing_value(self):
"""Negative test of a POST trying to change a value."""
self.requests.register_uri(
'POST',
LOCAL_URL + URI_USERS,
status_code=requests.codes.CREATED,
headers={'location': LOCAL_URL + URI_USERS + '/test-user'})
location = self.csr.post_request(
URI_USERS,
payload={'username': 'test-user',
'password': 'pass12345',
'privilege': 15})
self.assertEqual(requests.codes.CREATED, self.csr.status)
self.assertIn(URI_USERS + '/test-user', location)
self.requests.register_uri(
'POST',
LOCAL_URL + URI_USERS,
status_code=requests.codes.NOT_FOUND,
json={u'error-code': -1,
u'error-message': u'user test-user already exists'})
actual = self.csr.post_request(URI_USERS,
payload={'username': 'test-user',
'password': 'changed',
'privilege': 15})
self.assertEqual(requests.codes.NOT_FOUND, self.csr.status)
expected = {u'error-code': -1,
u'error-message': u'user test-user already exists'}
self.assertDictSupersetOf(expected, actual)
class TestCsrPutRestApi(CiscoCsrBaseTestCase):
"""Test CSR PUT REST API."""
def _save_resources(self):
self._register_local_get(URI_HOSTNAME,
json={u'kind': u'object#host-name',
u'host-name': u'Router'})
interface_info = {u'kind': u'object#interface',
u'description': u'Changed description',
u'if-name': 'interfaces/GigabitEthernet1',
u'proxy-arp': True,
u'subnet-mask': u'255.255.255.0',
u'icmp-unreachable': True,
u'nat-direction': u'',
u'icmp-redirects': True,
u'ip-address': u'192.168.200.1',
u'verify-unicast-source': False,
u'type': u'ethernet'}
self._register_local_get(URI_INTERFACE_GE1,
json=interface_info)
details = self.csr.get_request(URI_HOSTNAME)
if self.csr.status != requests.codes.OK:
self.fail("Unable to save original host name")
self.original_host = details['host-name']
details = self.csr.get_request(URI_INTERFACE_GE1)
if self.csr.status != requests.codes.OK:
self.fail("Unable to save interface Ge1 description")
self.original_if = details
self.csr.token = None
def _restore_resources(self, user, password):
"""Restore the host name and interface description.
Must restore the user and password, so that authentication
token can be obtained (as some tests corrupt auth info).
Will also clear token, so that it gets a fresh token.
"""
self._register_local_put('global', 'host-name')
self._register_local_put('interfaces', 'GigabitEthernet1')
self.csr.auth = (user, password)
self.csr.token = None
payload = {'host-name': self.original_host}
self.csr.put_request(URI_HOSTNAME, payload=payload)
if self.csr.status != requests.codes.NO_CONTENT:
self.fail("Unable to restore host name after test")
payload = {'description': self.original_if['description'],
'if-name': self.original_if['if-name'],
'ip-address': self.original_if['ip-address'],
'subnet-mask': self.original_if['subnet-mask'],
'type': self.original_if['type']}
self.csr.put_request(URI_INTERFACE_GE1,
payload=payload)
if self.csr.status != requests.codes.NO_CONTENT:
self.fail("Unable to restore I/F Ge1 description after test")
def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=None):
"""Setup for each test in this suite.
Each test case will have a normal authentication mock response
registered here, although they may replace it, as needed. In
addition, resources are saved, before each test is run, and
restored, after each test completes.
"""
super(TestCsrPutRestApi, self).setUp(host, tunnel_ip, timeout)
self._helper_register_auth_request()
self._save_resources()
self.addCleanup(self._restore_resources, 'stack', 'cisco')
def test_put_requests(self):
"""Simple PUT requests (repeatable).
First request will do a post to get token (login). Assumes
that there are two interfaces on the CSR (Ge1 and Ge2).
"""
self._register_local_put('interfaces', 'GigabitEthernet1')
self._register_local_put('global', 'host-name')
actual = self.csr.put_request(URI_HOSTNAME,
payload={'host-name': 'TestHost'})
self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
self.assertIsNone(actual)
actual = self.csr.put_request(URI_HOSTNAME,
payload={'host-name': 'TestHost2'})
self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
self.assertIsNone(actual)
def test_change_interface_description(self):
"""Test that interface description can be changed.
This was a problem with an earlier version of the CSR image and is
here to prevent regression.
"""
self._register_local_put('interfaces', 'GigabitEthernet1')
payload = {'description': u'Changed description',
'if-name': self.original_if['if-name'],
'ip-address': self.original_if['ip-address'],
'subnet-mask': self.original_if['subnet-mask'],
'type': self.original_if['type']}
actual = self.csr.put_request(URI_INTERFACE_GE1, payload=payload)
self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
self.assertIsNone(actual)
actual = self.csr.get_request(URI_INTERFACE_GE1)
self.assertEqual(requests.codes.OK, self.csr.status)
self.assertIn('description', actual)
self.assertEqual(u'Changed description',
actual['description'])
def ignore_test_change_to_empty_interface_description(self):
"""Test that interface description can be changed to empty string.
This is here to prevent regression, where the CSR was rejecting
an attempt to set the description to an empty string.
"""
self._register_local_put('interfaces', 'GigabitEthernet1')
payload = {'description': '',
'if-name': self.original_if['if-name'],
'ip-address': self.original_if['ip-address'],
'subnet-mask': self.original_if['subnet-mask'],
'type': self.original_if['type']}
actual = self.csr.put_request(URI_INTERFACE_GE1, payload=payload)
self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
self.assertIsNone(actual)
actual = self.csr.get_request(URI_INTERFACE_GE1)
self.assertEqual(requests.codes.OK, self.csr.status)
self.assertIn('description', actual)
self.assertEqual('', actual['description'])
class TestCsrDeleteRestApi(CiscoCsrBaseTestCase):
"""Test CSR DELETE REST API."""
def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=None):
"""Setup for each test in this suite.
Each test case will have a normal authentication mock response
registered here, although they may replace it, as needed.
"""
super(TestCsrDeleteRestApi, self).setUp(host, tunnel_ip, timeout)
self._helper_register_auth_request()
def _make_dummy_user(self):
"""Create a user that will be later deleted."""
self.requests.register_uri(
'POST',
LOCAL_URL + URI_USERS,
status_code=requests.codes.CREATED,
headers={'location': LOCAL_URL + URI_USERS + '/dummy'})
self.csr.post_request(URI_USERS,
payload={'username': 'dummy',
'password': 'dummy',
'privilege': 15})
self.assertEqual(requests.codes.CREATED, self.csr.status)
def test_delete_requests(self):
"""Simple DELETE requests (creating entry first)."""
self._register_local_delete(URI_USERS, 'dummy')
self._make_dummy_user()
self.csr.token = None # Force login
self.csr.delete_request(URI_USERS + '/dummy')
self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
# Delete again, but without logging in this time
self._make_dummy_user()
self.csr.delete_request(URI_USERS + '/dummy')
self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
def test_delete_non_existent_entry(self):
"""Negative test of trying to delete a non-existent user."""
expected = {u'error-code': -1,
u'error-message': u'user unknown not found'}
self._register_local_delete(URI_USERS, 'unknown',
result_code=requests.codes.NOT_FOUND,
json=expected)
actual = self.csr.delete_request(URI_USERS + '/unknown')
self.assertEqual(requests.codes.NOT_FOUND, self.csr.status)
self.assertDictSupersetOf(expected, actual)
def test_delete_not_allowed(self):
"""Negative test of trying to delete the host-name."""
self._register_local_delete(
'global', 'host-name',
result_code=requests.codes.METHOD_NOT_ALLOWED)
self.csr.delete_request(URI_HOSTNAME)
self.assertEqual(requests.codes.METHOD_NOT_ALLOWED,
self.csr.status)
class TestCsrRestApiFailures(CiscoCsrBaseTestCase):
"""Test failure cases common for all REST APIs.
Uses the lower level _do_request() to just perform the operation and get
the result, without any error handling.
"""
def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=0.1):
"""Setup for each test in this suite.
Each test case will have a normal authentication mock response
registered here, although they may replace it, as needed.
"""
super(TestCsrRestApiFailures, self).setUp(host, tunnel_ip, timeout)
self._helper_register_auth_request()
def _simulate_timeout(self, request):
if URI_HOSTNAME in request.path_uri:
raise r_exc.Timeout()
def test_request_for_non_existent_resource(self):
"""Negative test of non-existent resource on REST request."""
self.requests.register_uri('POST',
LOCAL_URL + 'no/such/request',
status_code=requests.codes.NOT_FOUND)
self.csr.post_request('no/such/request')
self.assertEqual(requests.codes.NOT_FOUND, self.csr.status)
# The result is HTTP 404 message, so no error content to check
def _simulate_get_timeout(self, request):
"""Will raise exception for any host request to this resource."""
if URI_HOSTNAME in request.path_url:
raise r_exc.Timeout()
def test_timeout_during_request(self):
"""Negative test of timeout during REST request."""
self.requests.add_matcher(self._simulate_get_timeout)
self.csr._do_request('GET', URI_HOSTNAME)
self.assertEqual(requests.codes.REQUEST_TIMEOUT, self.csr.status)
def _simulate_auth_failure(self, request):
"""First time auth POST is done, re-report unauthorized."""
if URI_AUTH in request.path_url and not self.called_once:
self.called_once = True
resp = requests.Response()
resp.status_code = requests.codes.UNAUTHORIZED
return resp
def test_token_expired_on_request(self):
"""Token expired before trying a REST request.
First, the token is set to a bogus value, to force it to
try to authenticate on the GET request. Second, a mock that
runs once, will simulate an auth failure. Third, the normal
auth mock will simulate success.
"""
self._register_local_get(URI_HOSTNAME,
json={u'kind': u'object#host-name',
u'host-name': u'Router'})
self.called_once = False
self.requests.add_matcher(self._simulate_auth_failure)
self.csr.token = '123' # These are 44 characters, so won't match
actual = self.csr._do_request('GET', URI_HOSTNAME)
self.assertEqual(requests.codes.OK, self.csr.status)
self.assertIn('host-name', actual)
self.assertIsNotNone(actual['host-name'])
def test_failed_to_obtain_token_for_request(self):
"""Negative test of unauthorized user for REST request."""
self.csr.auth = ('stack', 'bogus')
self._register_local_get(URI_HOSTNAME,
result_code=requests.codes.UNAUTHORIZED)
self.csr._do_request('GET', URI_HOSTNAME)
self.assertEqual(requests.codes.UNAUTHORIZED, self.csr.status)
class TestCsrRestIkePolicyCreate(CiscoCsrBaseTestCase):
"""Test IKE policy create REST requests."""
def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=None):
"""Setup for each test in this suite.
Each test case will have a normal authentication and post mock
response registered, although the test may replace them, if needed.
"""
super(TestCsrRestIkePolicyCreate, self).setUp(host, tunnel_ip, timeout)
self._helper_register_auth_request()
self._helper_register_ike_policy_post(2)
def _helper_register_ike_policy_get(self):
content = {u'kind': u'object#ike-policy',
u'priority-id': u'2',
u'version': u'v1',
u'local-auth-method': u'pre-share',
u'encryption': u'aes256',
u'hash': u'sha',
u'dhGroup': 5,
u'lifetime': 3600}
self._register_local_get(URI_IKE_POLICY_ID % '2', json=content)
def test_create_delete_ike_policy(self):
"""Create and then delete IKE policy."""
self._helper_register_ike_policy_get()
policy_info = {u'priority-id': u'2',
u'encryption': u'aes256',
u'hash': u'sha',
u'dhGroup': 5,
u'lifetime': 3600}
location = self.csr.create_ike_policy(policy_info)
self.assertEqual(requests.codes.CREATED, self.csr.status)
self.assertIn(URI_IKE_POLICY_ID % '2', location)
# Check the hard-coded items that get set as well...
actual = self.csr.get_request(location, full_url=True)
self.assertEqual(requests.codes.OK, self.csr.status)
expected_policy = {u'kind': u'object#ike-policy',
u'version': u'v1',
u'local-auth-method': u'pre-share'}
expected_policy.update(policy_info)
self.assertEqual(expected_policy, actual)
# Now delete and verify the IKE policy is gone
self._register_local_delete(URI_IKE_POLICY, 2)
self._register_local_get_not_found(URI_IKE_POLICY, 2)
self.csr.delete_ike_policy(2)
self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
actual = self.csr.get_request(location, full_url=True)
self.assertEqual(requests.codes.NOT_FOUND, self.csr.status)
def test_create_ike_policy_with_defaults(self):
"""Create IKE policy using defaults for all optional values."""
policy = {u'kind': u'object#ike-policy',
u'priority-id': u'2',
u'version': u'v1',
u'local-auth-method': u'pre-share',
u'encryption': u'des',
u'hash': u'sha',
u'dhGroup': 1,
u'lifetime': 86400}
self._register_local_get(URI_IKE_POLICY_ID % '2', json=policy)
policy_info = {u'priority-id': u'2'}
location = self.csr.create_ike_policy(policy_info)
self.assertEqual(requests.codes.CREATED, self.csr.status)
self.assertIn(URI_IKE_POLICY_ID % '2', location)
# Check the hard-coded items that get set as well...
actual = self.csr.get_request(location, full_url=True)
self.assertEqual(requests.codes.OK, self.csr.status)
expected_policy = {u'kind': u'object#ike-policy',
u'version': u'v1',
u'encryption': u'des',
u'hash': u'sha',
u'dhGroup': 1,
u'lifetime': 86400,
# Lower level sets this, but it is the default
u'local-auth-method': u'pre-share'}
expected_policy.update(policy_info)
self.assertEqual(expected_policy, actual)
def test_create_duplicate_ike_policy(self):
"""Negative test of trying to create a duplicate IKE policy."""
self._helper_register_ike_policy_get()
policy_info = {u'priority-id': u'2',
u'encryption': u'aes',
u'hash': u'sha',
u'dhGroup': 5,
u'lifetime': 3600}
location = self.csr.create_ike_policy(policy_info)
self.assertEqual(requests.codes.CREATED, self.csr.status)
self.assertIn(URI_IKE_POLICY_ID % '2', location)
self.requests.register_uri(
'POST',
LOCAL_URL + URI_IKE_POLICY,
status_code=requests.codes.BAD_REQUEST,
json={u'error-code': -1,
u'error-message': u'policy 2 exist, not allow to '
u'update policy using POST method'})
location = self.csr.create_ike_policy(policy_info)
self.assertEqual(requests.codes.BAD_REQUEST, self.csr.status)
expected = {u'error-code': -1,
u'error-message': u'policy 2 exist, not allow to '
u'update policy using POST method'}
self.assertDictSupersetOf(expected, location)
class TestCsrRestIPSecPolicyCreate(CiscoCsrBaseTestCase):
"""Test IPSec policy create REST requests."""
def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=None):
"""Set up for each test in this suite.
Each test case will have a normal authentication and post mock
response registered, although the test may replace them, if needed.
"""
super(TestCsrRestIPSecPolicyCreate, self).setUp(host,
tunnel_ip,
timeout)
self._helper_register_auth_request()
self._helper_register_ipsec_policy_post(123)
def _helper_register_ipsec_policy_get(self, override=None):
content = {u'kind': u'object#ipsec-policy',
u'mode': u'tunnel',
u'policy-id': u'123',
u'protection-suite': {
u'esp-encryption': u'esp-256-aes',
u'esp-authentication': u'esp-sha-hmac',
u'ah': u'ah-sha-hmac',
},
u'anti-replay-window-size': u'Disable',
u'lifetime-sec': 120,
u'pfs': u'group5',
u'lifetime-kb': 4608000,
u'idle-time': None}
if override:
content.update(override)
self._register_local_get(URI_IPSEC_POLICY + '/123', json=content)
def test_create_delete_ipsec_policy(self):
"""Create and then delete IPSec policy."""
policy_info = {
u'policy-id': u'123',
u'protection-suite': {
u'esp-encryption': u'esp-256-aes',
u'esp-authentication': u'esp-sha-hmac',
u'ah': u'ah-sha-hmac',
},
u'lifetime-sec': 120,
u'pfs': u'group5',
u'anti-replay-window-size': u'disable'
}
location = self.csr.create_ipsec_policy(policy_info)
self.assertEqual(requests.codes.CREATED, self.csr.status)
self.assertIn(URI_IPSEC_POLICY + '/123', location)
# Check the hard-coded items that get set as well...
self._helper_register_ipsec_policy_get()
actual = self.csr.get_request(location, full_url=True)
self.assertEqual(requests.codes.OK, self.csr.status)
expected_policy = {u'kind': u'object#ipsec-policy',
u'mode': u'tunnel',
u'lifetime-kb': 4608000,
u'idle-time': None}
expected_policy.update(policy_info)
# CSR will respond with capitalized value
expected_policy[u'anti-replay-window-size'] = u'Disable'
self.assertEqual(expected_policy, actual)
# Now delete and verify the IPSec policy is gone
self._register_local_delete(URI_IPSEC_POLICY, 123)
self._register_local_get_not_found(URI_IPSEC_POLICY, 123)
self.csr.delete_ipsec_policy('123')
self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
actual = self.csr.get_request(location, full_url=True)
self.assertEqual(requests.codes.NOT_FOUND, self.csr.status)
def test_create_ipsec_policy_with_defaults(self):
"""Create IPSec policy with default for all optional values."""
policy_info = {u'policy-id': u'123'}
location = self.csr.create_ipsec_policy(policy_info)
self.assertEqual(requests.codes.CREATED, self.csr.status)
self.assertIn(URI_IPSEC_POLICY + '/123', location)
# Check the hard-coded items that get set as well...
expected_policy = {u'kind': u'object#ipsec-policy',
u'mode': u'tunnel',
u'policy-id': u'123',
u'protection-suite': {},
u'lifetime-sec': 3600,
u'pfs': u'Disable',
u'anti-replay-window-size': u'None',
u'lifetime-kb': 4608000,
u'idle-time': None}
self._register_local_get(URI_IPSEC_POLICY + '/123',
json=expected_policy)
actual = self.csr.get_request(location, full_url=True)
self.assertEqual(requests.codes.OK, self.csr.status)
self.assertEqual(expected_policy, actual)
def test_create_ipsec_policy_with_uuid(self):
"""Create IPSec policy using UUID for id."""
# Override normal POST response w/one that has a different policy ID
self._helper_register_ipsec_policy_post(dummy_policy_id)
policy_info = {
u'policy-id': u'%s' % dummy_policy_id,
u'protection-suite': {
u'esp-encryption': u'esp-256-aes',
u'esp-authentication': u'esp-sha-hmac',
u'ah': u'ah-sha-hmac',
},
u'lifetime-sec': 120,
u'pfs': u'group5',
u'anti-replay-window-size': u'disable'
}
location = self.csr.create_ipsec_policy(policy_info)
self.assertEqual(requests.codes.CREATED, self.csr.status)
self.assertIn(URI_IPSEC_POLICY_ID % dummy_policy_id, location)
# Check the hard-coded items that get set as well...
expected_policy = {u'kind': u'object#ipsec-policy',
u'mode': u'tunnel',
u'lifetime-kb': 4608000,
u'idle-time': None}
expected_policy.update(policy_info)
# CSR will respond with capitalized value
expected_policy[u'anti-replay-window-size'] = u'Disable'
self._register_local_get(URI_IPSEC_POLICY_ID % dummy_policy_id,
json=expected_policy)
actual = self.csr.get_request(location, full_url=True)
self.assertEqual(requests.codes.OK, self.csr.status)
self.assertEqual(expected_policy, actual)
def test_create_ipsec_policy_without_ah(self):
"""Create IPSec policy."""
policy_info = {
u'policy-id': u'123',
u'protection-suite': {
u'esp-encryption': u'esp-aes',
u'esp-authentication': u'esp-sha-hmac',
},
u'lifetime-sec': 120,
u'pfs': u'group5',
u'anti-replay-window-size': u'128'
}
location = self.csr.create_ipsec_policy(policy_info)
self.assertEqual(requests.codes.CREATED, self.csr.status)
self.assertIn(URI_IPSEC_POLICY_ID % '123', location)
# Check the hard-coded items that get set as well...
self._helper_register_ipsec_policy_get(
override={u'anti-replay-window-size': u'128',
u'protection-suite': {
u'esp-encryption': u'esp-aes',
u'esp-authentication': u'esp-sha-hmac'}})
actual = self.csr.get_request(location, full_url=True)
self.assertEqual(requests.codes.OK, self.csr.status)
expected_policy = {u'kind': u'object#ipsec-policy',
u'mode': u'tunnel',
u'lifetime-kb': 4608000,
u'idle-time': None}
expected_policy.update(policy_info)
self.assertEqual(expected_policy, actual)
def test_invalid_ipsec_policy_lifetime(self):
"""Failure test of IPSec policy with unsupported lifetime."""
# Override normal POST response with one that indicates bad request
self.requests.register_uri('POST',
LOCAL_URL + URI_IPSEC_POLICY,
status_code=requests.codes.BAD_REQUEST)
policy_info = {
u'policy-id': u'123',
u'protection-suite': {
u'esp-encryption': u'esp-aes',
u'esp-authentication': u'esp-sha-hmac',
u'ah': u'ah-sha-hmac',
},
u'lifetime-sec': 119,
u'pfs': u'group5',
u'anti-replay-window-size': u'128'
}
self.csr.create_ipsec_policy(policy_info)
self.assertEqual(requests.codes.BAD_REQUEST, self.csr.status)
def test_create_ipsec_policy_with_invalid_name(self):
"""Failure test of creating IPSec policy with name too long."""
# Override normal POST response with one that indicates bad request
self.requests.register_uri('POST',
LOCAL_URL + URI_IPSEC_POLICY,
status_code=requests.codes.BAD_REQUEST)
policy_info = {u'policy-id': u'policy-name-is-too-long-32-chars'}
self.csr.create_ipsec_policy(policy_info)
self.assertEqual(requests.codes.BAD_REQUEST, self.csr.status)
class TestCsrRestPreSharedKeyCreate(CiscoCsrBaseTestCase):
"""Test Pre-shared key (PSK) create REST requests."""
def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=None):
"""Set up for each test in this suite.
Each test case will have a normal authentication and post mock
response registered, although the test may replace them, if needed.
"""
super(TestCsrRestPreSharedKeyCreate, self).setUp(host,
tunnel_ip,
timeout)
self._helper_register_auth_request()
self._helper_register_psk_post(5)
def _helper_register_psk_get(self, override=None):
content = {u'kind': u'object#ike-keyring',
u'keyring-name': u'5',
u'pre-shared-key-list': [
{u'key': u'super-secret',
u'encrypted': False,
u'peer-address': u'10.10.10.20 255.255.255.0'}
]}
if override:
content.update(override)
self._register_local_get(URI_PSK_ID % '5', json=content)
def test_create_delete_pre_shared_key(self):
"""Create and then delete a keyring entry for pre-shared key."""
psk_info = {u'keyring-name': u'5',
u'pre-shared-key-list': [
{u'key': u'super-secret',
u'encrypted': False,
u'peer-address': u'10.10.10.20/24'}
]}
location = self.csr.create_pre_shared_key(psk_info)
self.assertEqual(requests.codes.CREATED, self.csr.status)
self.assertIn(URI_PSK_ID % '5', location)
# Check the hard-coded items that get set as well...
self._helper_register_psk_get()
content = self.csr.get_request(location, full_url=True)
self.assertEqual(requests.codes.OK, self.csr.status)
expected_policy = {u'kind': u'object#ike-keyring'}
expected_policy.update(psk_info)
# Note: the peer CIDR is returned as an IP and mask
expected_policy[u'pre-shared-key-list'][0][u'peer-address'] = (
u'10.10.10.20 255.255.255.0')
self.assertEqual(expected_policy, content)
# Now delete and verify pre-shared key is gone
self._register_local_delete(URI_PSK, 5)
self._register_local_get_not_found(URI_PSK, 5)
self.csr.delete_pre_shared_key('5')
self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
content = self.csr.get_request(location, full_url=True)
self.assertEqual(requests.codes.NOT_FOUND, self.csr.status)
def test_create_pre_shared_key_with_fqdn_peer(self):
"""Create pre-shared key using FQDN for peer address."""
psk_info = {u'keyring-name': u'5',
u'pre-shared-key-list': [
{u'key': u'super-secret',
u'encrypted': False,
u'peer-address': u'cisco.com'}
]}
location = self.csr.create_pre_shared_key(psk_info)
self.assertEqual(requests.codes.CREATED, self.csr.status)
self.assertIn(URI_PSK_ID % '5', location)
# Check the hard-coded items that get set as well...
self._helper_register_psk_get(
override={u'pre-shared-key-list': [
{u'key': u'super-secret',
u'encrypted': False,
u'peer-address': u'cisco.com'}
]}
)
content = self.csr.get_request(location, full_url=True)
self.assertEqual(requests.codes.OK, self.csr.status)
expected_policy = {u'kind': u'object#ike-keyring'}
expected_policy.update(psk_info)
self.assertEqual(expected_policy, content)
class TestCsrRestIPSecConnectionCreate(CiscoCsrBaseTestCase):
"""Test IPSec site-to-site connection REST requests.
This requires us to have first created an IKE policy, IPSec policy,
and pre-shared key, so it's more of an integration test, when used
with a real CSR (as we can't mock out these pre-conditions).
"""
def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=None):
"""Setup for each test in this suite.
Each test case will have a normal authentication mock response
registered here, although they may replace it, as needed.
"""
super(TestCsrRestIPSecConnectionCreate, self).setUp(host,
tunnel_ip,
timeout)
self._helper_register_auth_request()
self.route_id = '10.1.0.0_24_GigabitEthernet1'
def _make_psk_for_test(self):
psk_id = generate_pre_shared_key_id()
self._remove_resource_for_test(self.csr.delete_pre_shared_key,
psk_id)
self._helper_register_psk_post(psk_id)
psk_info = {u'keyring-name': u'%d' % psk_id,
u'pre-shared-key-list': [
{u'key': u'super-secret',
u'encrypted': False,
u'peer-address': u'10.10.10.20/24'}
]}
self.csr.create_pre_shared_key(psk_info)
if self.csr.status != requests.codes.CREATED:
self.fail("Unable to create PSK for test case")
self.addCleanup(self._remove_resource_for_test,
self.csr.delete_pre_shared_key, psk_id)
return psk_id
def _make_ike_policy_for_test(self):
policy_id = generate_ike_policy_id()
self._remove_resource_for_test(self.csr.delete_ike_policy,
policy_id)
self._helper_register_ike_policy_post(policy_id)
policy_info = {u'priority-id': u'%d' % policy_id,
u'encryption': u'aes',
u'hash': u'sha',
u'dhGroup': 5,
u'lifetime': 3600}
self.csr.create_ike_policy(policy_info)
if self.csr.status != requests.codes.CREATED:
self.fail("Unable to create IKE policy for test case")
self.addCleanup(self._remove_resource_for_test,
self.csr.delete_ike_policy, policy_id)
return policy_id
def _make_ipsec_policy_for_test(self):
policy_id = generate_ipsec_policy_id()
self._remove_resource_for_test(self.csr.delete_ipsec_policy,
policy_id)
self._helper_register_ipsec_policy_post(policy_id)
policy_info = {
u'policy-id': u'%d' % policy_id,
u'protection-suite': {
u'esp-encryption': u'esp-aes',
u'esp-authentication': u'esp-sha-hmac',
u'ah': u'ah-sha-hmac',
},
u'lifetime-sec': 120,
u'pfs': u'group5',
u'anti-replay-window-size': u'disable'
}
self.csr.create_ipsec_policy(policy_info)
if self.csr.status != requests.codes.CREATED:
self.fail("Unable to create IPSec policy for test case")
self.addCleanup(self._remove_resource_for_test,
self.csr.delete_ipsec_policy, policy_id)
return policy_id
def _remove_resource_for_test(self, delete_resource, resource_id):
self._register_local_delete_by_id(resource_id)
delete_resource(resource_id)
def _prepare_for_site_conn_create(self, skip_psk=False, skip_ike=False,
skip_ipsec=False):
"""Create the policies and PSK so can then create site conn."""
if not skip_psk:
ike_policy_id = self._make_psk_for_test()
else:
ike_policy_id = generate_ike_policy_id()
if not skip_ike:
self._make_ike_policy_for_test()
if not skip_ipsec:
ipsec_policy_id = self._make_ipsec_policy_for_test()
else:
ipsec_policy_id = generate_ipsec_policy_id()
# Note: Use same ID number for tunnel and IPSec policy, so that when
# GET tunnel info, the mocks can infer the IPSec policy ID from the
# tunnel number.
return (ike_policy_id, ipsec_policy_id, ipsec_policy_id)
def _helper_register_ipsec_conn_get(self, tunnel, override=None):
# Use same number, to allow mock to generate IPSec policy ID
ipsec_policy_id = tunnel[6:]
content = {u'kind': u'object#vpn-site-to-site',
u'vpn-interface-name': u'%s' % tunnel,
u'ip-version': u'ipv4',
u'vpn-type': u'site-to-site',
u'ipsec-policy-id': u'%s' % ipsec_policy_id,
u'ike-profile-id': None,
u'mtu': 1500,
u'tunnel-vrf': TEST_VRF,
u'local-device': {
u'ip-address': '10.3.0.1/24',
u'tunnel-ip-address': '10.10.10.10'
},
u'remote-device': {
u'tunnel-ip-address': '10.10.10.20'
}}
if override:
content.update(override)
self._register_local_get(URI_IPSEC_CONN_ID % tunnel, json=content)
def test_create_delete_ipsec_connection(self):
"""Create and then delete an IPSec connection."""
ike_policy_id, ipsec_policy_id, tunnel_id = (
self._prepare_for_site_conn_create())
tunnel_name = u'Tunnel%s' % tunnel_id
self._helper_register_tunnel_post(tunnel_name)
self._register_local_post(URI_ROUTES, self.route_id)
connection_info = {
u'vpn-interface-name': tunnel_name,
u'ipsec-policy-id': u'%d' % ipsec_policy_id,
u'mtu': 1500,
u'local-device': {u'ip-address': u'10.3.0.1/24',
u'tunnel-ip-address': u'10.10.10.10'},
u'remote-device': {u'tunnel-ip-address': u'10.10.10.20'}
}
expected_connection = {u'kind': u'object#vpn-site-to-site',
u'ike-profile-id': None,
u'vpn-type': u'site-to-site',
u'mtu': 1500,
u'tunnel-vrf': TEST_VRF,
u'ip-version': u'ipv4'}
expected_connection.update(connection_info)
location = self.csr.create_ipsec_connection(connection_info)
self.addCleanup(self._remove_resource_for_test,
self.csr.delete_ipsec_connection,
tunnel_name)
self.assertEqual(requests.codes.CREATED, self.csr.status)
self.assertIn(URI_IPSEC_CONN_ID % tunnel_name, location)
# Check the hard-coded items that get set as well...
self._helper_register_ipsec_conn_get(tunnel_name)
content = self.csr.get_request(location, full_url=True)
self.assertEqual(requests.codes.OK, self.csr.status)
self.assertEqual(expected_connection, content)
# Now delete and verify that site-to-site connection is gone
self._register_local_delete_by_id(tunnel_name)
self._register_local_delete_by_id(ipsec_policy_id)
self._register_local_delete_by_id(ike_policy_id)
self._register_local_get_not_found(URI_IPSEC_CONN,
tunnel_name)
# Only delete connection. Cleanup will take care of prerequisites
self.csr.delete_ipsec_connection(tunnel_name)
self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
content = self.csr.get_request(location, full_url=True)
self.assertEqual(requests.codes.NOT_FOUND, self.csr.status)
def test_create_ipsec_connection_with_no_tunnel_subnet(self):
"""Create an IPSec connection without an IP address on tunnel."""
_, ipsec_policy_id, tunnel_id = (
self._prepare_for_site_conn_create())
tunnel_name = u'Tunnel%s' % tunnel_id
self._helper_register_tunnel_post(tunnel_name)
self._register_local_post(URI_ROUTES, self.route_id)
connection_info = {
u'vpn-interface-name': tunnel_name,
u'ipsec-policy-id': u'%d' % ipsec_policy_id,
u'local-device': {u'ip-address': u'GigabitEthernet3',
u'tunnel-ip-address': u'10.10.10.10'},
u'remote-device': {u'tunnel-ip-address': u'10.10.10.20'}
}
expected_connection = {u'kind': u'object#vpn-site-to-site',
u'ike-profile-id': None,
u'vpn-type': u'site-to-site',
u'mtu': 1500,
u'tunnel-vrf': TEST_VRF,
u'ip-version': u'ipv4'}
expected_connection.update(connection_info)
location = self.csr.create_ipsec_connection(connection_info)
self.addCleanup(self._remove_resource_for_test,
self.csr.delete_ipsec_connection,
tunnel_name)
self.assertEqual(requests.codes.CREATED, self.csr.status)
self.assertIn(URI_IPSEC_CONN_ID % tunnel_name, location)
# Check the hard-coded items that get set as well...
self._helper_register_ipsec_conn_get(tunnel_name, override={
u'local-device': {
u'ip-address': u'GigabitEthernet3',
u'tunnel-ip-address': u'10.10.10.10'
}})
content = self.csr.get_request(location, full_url=True)
self.assertEqual(requests.codes.OK, self.csr.status)
self.assertEqual(expected_connection, content)
def test_create_ipsec_connection_no_pre_shared_key(self):
"""Test of connection create without associated pre-shared key.
The CSR will create the connection, but will not be able to pass
traffic without the pre-shared key.
"""
_, ipsec_policy_id, tunnel_id = (
self._prepare_for_site_conn_create(skip_psk=True))
tunnel_name = u'Tunnel%s' % tunnel_id
self._helper_register_tunnel_post(tunnel_name)
self._register_local_post(URI_ROUTES, self.route_id)
connection_info = {
u'vpn-interface-name': tunnel_name,
u'ipsec-policy-id': u'%d' % ipsec_policy_id,
u'mtu': 1500,
u'local-device': {u'ip-address': u'10.3.0.1/24',
u'tunnel-ip-address': u'10.10.10.10'},
u'remote-device': {u'tunnel-ip-address': u'10.10.10.20'}
}
expected_connection = {u'kind': u'object#vpn-site-to-site',
u'ike-profile-id': None,
u'tunnel-vrf': TEST_VRF,
u'vpn-type': u'site-to-site',
u'ip-version': u'ipv4'}
expected_connection.update(connection_info)
location = self.csr.create_ipsec_connection(connection_info)
self.addCleanup(self._remove_resource_for_test,
self.csr.delete_ipsec_connection,
tunnel_name)
self.assertEqual(requests.codes.CREATED, self.csr.status)
self.assertIn(URI_IPSEC_CONN_ID % tunnel_name, location)
# Check the hard-coded items that get set as well...
self._helper_register_ipsec_conn_get(tunnel_name)
content = self.csr.get_request(location, full_url=True)
self.assertEqual(requests.codes.OK, self.csr.status)
self.assertEqual(expected_connection, content)
def test_create_ipsec_connection_with_default_ike_policy(self):
"""Test of connection create without IKE policy (uses default).
Without an IKE policy, the CSR will use a built-in default IKE
policy setting for the connection.
"""
_, ipsec_policy_id, tunnel_id = (
self._prepare_for_site_conn_create(skip_ike=True))
tunnel_name = u'Tunnel%s' % tunnel_id
self._helper_register_tunnel_post(tunnel_name)
self._register_local_post(URI_ROUTES, self.route_id)
connection_info = {
u'vpn-interface-name': tunnel_name,
u'ipsec-policy-id': u'%d' % ipsec_policy_id,
u'mtu': 1500,
u'local-device': {u'ip-address': u'10.3.0.1/24',
u'tunnel-ip-address': u'10.10.10.10'},
u'remote-device': {u'tunnel-ip-address': u'10.10.10.20'}
}
expected_connection = {u'kind': u'object#vpn-site-to-site',
u'ike-profile-id': None,
u'tunnel-vrf': TEST_VRF,
u'vpn-type': u'site-to-site',
u'ip-version': u'ipv4'}
expected_connection.update(connection_info)
location = self.csr.create_ipsec_connection(connection_info)
self.addCleanup(self._remove_resource_for_test,
self.csr.delete_ipsec_connection,
tunnel_name)
self.assertEqual(requests.codes.CREATED, self.csr.status)
self.assertIn(URI_IPSEC_CONN_ID % tunnel_name, location)
# Check the hard-coded items that get set as well...
self._helper_register_ipsec_conn_get(tunnel_name)
content = self.csr.get_request(location, full_url=True)
self.assertEqual(requests.codes.OK, self.csr.status)
self.assertEqual(expected_connection, content)
def test_set_ipsec_connection_admin_state_changes(self):
"""Create IPSec connection in admin down state."""
_, ipsec_policy_id, tunnel_id = (
self._prepare_for_site_conn_create())
tunnel_name = u'Tunnel%s' % tunnel_id
self._helper_register_tunnel_post(tunnel_name)
self._register_local_post(URI_ROUTES, self.route_id)
connection_info = {
u'vpn-interface-name': tunnel_name,
u'ipsec-policy-id': u'%d' % ipsec_policy_id,
u'mtu': 1500,
u'local-device': {u'ip-address': u'10.3.0.1/24',
u'tunnel-ip-address': u'10.10.10.10'},
u'remote-device': {u'tunnel-ip-address': u'10.10.10.20'}
}
location = self.csr.create_ipsec_connection(connection_info)
self.addCleanup(self._remove_resource_for_test,
self.csr.delete_ipsec_connection,
tunnel_name)
self.assertEqual(requests.codes.CREATED, self.csr.status)
self.assertIn(URI_IPSEC_CONN_ID % tunnel_name, location)
state_url = location + "/state"
state_uri = URI_IPSEC_CONN_ID % tunnel_name + '/state'
# Note: When created, the tunnel will be in admin 'up' state
# Note: Line protocol state will be down, unless have an active conn.
expected_state = {u'kind': u'object#vpn-site-to-site-state',
u'vpn-interface-name': tunnel_name,
u'line-protocol-state': u'down',
u'enabled': False}
self._register_local_put(URI_IPSEC_CONN_ID % tunnel_name, 'state')
self.csr.set_ipsec_connection_state(tunnel_name, admin_up=False)
self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
self._register_local_get(state_uri, json=expected_state)
content = self.csr.get_request(state_url, full_url=True)
self.assertEqual(requests.codes.OK, self.csr.status)
self.assertEqual(expected_state, content)
self.csr.set_ipsec_connection_state(tunnel_name, admin_up=True)
self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
expected_state = {u'kind': u'object#vpn-site-to-site-state',
u'vpn-interface-name': tunnel_name,
u'line-protocol-state': u'down',
u'enabled': True}
self._register_local_get(state_uri, json=expected_state)
content = self.csr.get_request(state_url, full_url=True)
self.assertEqual(requests.codes.OK, self.csr.status)
self.assertEqual(expected_state, content)
def test_create_ipsec_connection_missing_ipsec_policy(self):
"""Negative test of connection create without IPSec policy."""
_, ipsec_policy_id, tunnel_id = (
self._prepare_for_site_conn_create(skip_ipsec=True))
tunnel_name = u'Tunnel%s' % tunnel_id
self._register_local_post(URI_IPSEC_CONN, tunnel_name,
result_code=requests.codes.BAD_REQUEST)
connection_info = {
u'vpn-interface-name': tunnel_name,
u'ipsec-policy-id': u'%d' % ipsec_policy_id,
u'local-device': {u'ip-address': u'10.3.0.1/24',
u'tunnel-ip-address': u'10.10.10.10'},
u'remote-device': {u'tunnel-ip-address': u'10.10.10.20'}
}
self.csr.create_ipsec_connection(connection_info)
self.addCleanup(self._remove_resource_for_test,
self.csr.delete_ipsec_connection,
'Tunnel%d' % tunnel_id)
self.assertEqual(requests.codes.BAD_REQUEST, self.csr.status)
def _determine_conflicting_ip(self):
content = {u'kind': u'object#interface',
u'subnet-mask': u'255.255.255.0',
u'ip-address': u'10.5.0.2'}
self._register_local_get('interfaces/GigabitEthernet3', json=content)
details = self.csr.get_request('interfaces/GigabitEthernet3')
if self.csr.status != requests.codes.OK:
self.fail("Unable to obtain interface GigabitEthernet3's IP")
if_ip = details.get('ip-address')
if not if_ip:
self.fail("No IP address for GigabitEthernet3 interface")
return '.'.join(if_ip.split('.')[:3]) + '.10'
def test_create_ipsec_connection_conficting_tunnel_ip(self):
"""Negative test of connection create with conflicting tunnel IP.
Find out the IP of a local interface (GigabitEthernet3) and create an
IP that is on the same subnet. Note: this interface needs to be up.
"""
conflicting_ip = self._determine_conflicting_ip()
_, ipsec_policy_id, tunnel_id = (
self._prepare_for_site_conn_create())
tunnel_name = u'Tunnel%s' % tunnel_id
self._register_local_post(URI_IPSEC_CONN, tunnel_name,
result_code=requests.codes.BAD_REQUEST)
connection_info = {
u'vpn-interface-name': tunnel_name,
u'ipsec-policy-id': u'%d' % ipsec_policy_id,
u'local-device': {u'ip-address': u'%s/24' % conflicting_ip,
u'tunnel-ip-address': u'10.10.10.10'},
u'remote-device': {u'tunnel-ip-address': u'10.10.10.20'}
}
self.csr.create_ipsec_connection(connection_info)
self.addCleanup(self._remove_resource_for_test,
self.csr.delete_ipsec_connection,
tunnel_name)
self.assertEqual(requests.codes.BAD_REQUEST, self.csr.status)
def test_create_ipsec_connection_with_max_mtu(self):
"""Create an IPSec connection with max MTU value."""
_, ipsec_policy_id, tunnel_id = (
self._prepare_for_site_conn_create())
tunnel_name = u'Tunnel%s' % tunnel_id
self._helper_register_tunnel_post(tunnel_name)
self._register_local_post(URI_ROUTES, self.route_id)
connection_info = {
u'vpn-interface-name': tunnel_name,
u'ipsec-policy-id': u'%d' % ipsec_policy_id,
u'mtu': 9192,
u'local-device': {u'ip-address': u'10.3.0.1/24',
u'tunnel-ip-address': u'10.10.10.10'},
u'remote-device': {u'tunnel-ip-address': u'10.10.10.20'}
}
expected_connection = {u'kind': u'object#vpn-site-to-site',
u'ike-profile-id': None,
u'tunnel-vrf': TEST_VRF,
u'vpn-type': u'site-to-site',
u'ip-version': u'ipv4'}
expected_connection.update(connection_info)
location = self.csr.create_ipsec_connection(connection_info)
self.addCleanup(self._remove_resource_for_test,
self.csr.delete_ipsec_connection,
tunnel_name)
self.assertEqual(requests.codes.CREATED, self.csr.status)
self.assertIn(URI_IPSEC_CONN_ID % tunnel_name, location)
# Check the hard-coded items that get set as well...
self._helper_register_ipsec_conn_get(tunnel_name, override={
u'mtu': 9192})
content = self.csr.get_request(location, full_url=True)
self.assertEqual(requests.codes.OK, self.csr.status)
self.assertEqual(expected_connection, content)
def test_create_ipsec_connection_with_bad_mtu(self):
"""Negative test of connection create with unsupported MTU value."""
_, ipsec_policy_id, tunnel_id = (
self._prepare_for_site_conn_create())
tunnel_name = u'Tunnel%s' % tunnel_id
self._register_local_post(URI_IPSEC_CONN, tunnel_name,
result_code=requests.codes.BAD_REQUEST)
connection_info = {
u'vpn-interface-name': tunnel_name,
u'ipsec-policy-id': u'%d' % ipsec_policy_id,
u'mtu': 9193,
u'local-device': {u'ip-address': u'10.3.0.1/24',
u'tunnel-ip-address': u'10.10.10.10'},
u'remote-device': {u'tunnel-ip-address': u'10.10.10.20'}
}
self.csr.create_ipsec_connection(connection_info)
self.addCleanup(self._remove_resource_for_test,
self.csr.delete_ipsec_connection,
tunnel_name)
self.assertEqual(requests.codes.BAD_REQUEST, self.csr.status)
def test_status_when_no_tunnels_exist(self):
"""Get status, when there are no tunnels."""
content = {u'kind': u'collection#vpn-active-sessions',
u'items': []}
self._register_local_get(URI_SESSIONS, json=content)
tunnels = self.csr.read_tunnel_statuses()
self.assertEqual(requests.codes.OK, self.csr.status)
self.assertEqual([], tunnels)
def test_status_for_one_tunnel(self):
"""Get status of one tunnel."""
# Create the IPsec site-to-site connection first
_, ipsec_policy_id, tunnel_id = (
self._prepare_for_site_conn_create())
tunnel_name = u'Tunnel%s' % tunnel_id
self._helper_register_tunnel_post(tunnel_name)
self._register_local_post(URI_ROUTES, self.route_id)
connection_info = {
u'vpn-interface-name': tunnel_name,
u'ipsec-policy-id': u'%d' % ipsec_policy_id,
u'local-device': {u'ip-address': u'10.3.0.1/24',
u'tunnel-ip-address': u'10.10.10.10'},
u'remote-device': {u'tunnel-ip-address': u'10.10.10.20'}
}
location = self.csr.create_ipsec_connection(connection_info)
self.addCleanup(self._remove_resource_for_test,
self.csr.delete_ipsec_connection,
tunnel_name)
self.assertEqual(requests.codes.CREATED, self.csr.status)
self.assertIn(URI_IPSEC_CONN_ID % tunnel_name, location)
# Now, check the status
content = {u'kind': u'collection#vpn-active-sessions',
u'items': [{u'status': u'DOWN-NEGOTIATING',
u'vpn-interface-name': tunnel_name}, ]}
self._register_local_get(URI_SESSIONS, json=content)
self._helper_register_ipsec_conn_get(tunnel_name)
tunnels = self.csr.read_tunnel_statuses()
self.assertEqual(requests.codes.OK, self.csr.status)
self.assertEqual([(tunnel_name, u'DOWN-NEGOTIATING'), ], tunnels)
class TestCsrRestIkeKeepaliveCreate(CiscoCsrBaseTestCase):
"""Test IKE keepalive REST requests.
Note: On the Cisco CSR, the IKE keepalive for v1 is a global configuration
that applies to all VPN tunnels to specify Dead Peer Detection information.
As a result, this REST API is not used in the OpenStack device driver, and
the keepalive will default to zero (disabled).
"""
def _save_dpd_info(self):
details = self.csr.get_request(URI_KEEPALIVE)
if self.csr.status == requests.codes.OK:
self.dpd = details
self.addCleanup(self._restore_dpd_info)
elif self.csr.status != requests.codes.NOT_FOUND:
self.fail("Unable to save original DPD info")
def _restore_dpd_info(self):
payload = {'interval': self.dpd['interval'],
'retry': self.dpd['retry']}
self.csr.put_request(URI_KEEPALIVE, payload=payload)
if self.csr.status != requests.codes.NO_CONTENT:
self.fail("Unable to restore DPD info after test")
def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=None):
"""Set up for each test in this suite.
Each test case will have a normal authentication, get, and put mock
responses registered, although the test may replace them, if needed.
Dead Peer Detection settings will be saved for each test, and
restored afterwards.
"""
super(TestCsrRestIkeKeepaliveCreate, self).setUp(host,
tunnel_ip,
timeout)
self._helper_register_auth_request()
self._helper_register_keepalive_get()
self._register_local_put('vpn-svc/ike', 'keepalive')
self._save_dpd_info()
self.csr.token = None
def _helper_register_keepalive_get(self, override=None):
content = {u'interval': 60,
u'retry': 4,
u'periodic': True}
if override:
content.update(override)
self._register_local_get(URI_KEEPALIVE, json=content)
def test_configure_ike_keepalive(self):
"""Set IKE keep-alive (aka Dead Peer Detection) for the CSR."""
keepalive_info = {'interval': 60, 'retry': 4}
self.csr.configure_ike_keepalive(keepalive_info)
self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
content = self.csr.get_request(URI_KEEPALIVE)
self.assertEqual(requests.codes.OK, self.csr.status)
expected = {'periodic': False}
expected.update(keepalive_info)
self.assertDictSupersetOf(expected, content)
def test_disable_ike_keepalive(self):
"""Disable IKE keep-alive (aka Dead Peer Detection) for the CSR."""
keepalive_info = {'interval': 0, 'retry': 4}
self.csr.configure_ike_keepalive(keepalive_info)
self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
class TestCsrRestStaticRoute(CiscoCsrBaseTestCase):
"""Test static route REST requests.
A static route is added for the peer's private network. Would create
a route for each of the peer CIDRs specified for the VPN connection.
"""
def setUp(self, host='localhost', tunnel_ip='10.10.10.10', timeout=None):
"""Set up for each test in this suite.
Each test case will have a normal authentication mock response
registered, although the test may replace it, if needed.
"""
super(TestCsrRestStaticRoute, self).setUp(host, tunnel_ip, timeout)
self._helper_register_auth_request()
def test_create_delete_static_route(self):
"""Create and then delete a static route for the tunnel."""
expected_id = '10.1.0.0_24_GigabitEthernet1'
self._register_local_post(URI_ROUTES, resource_id=expected_id)
cidr = u'10.1.0.0/24'
interface = u'GigabitEthernet1'
route_info = {u'destination-network': cidr,
u'outgoing-interface': interface}
location = self.csr.create_static_route(route_info)
self.assertEqual(requests.codes.CREATED, self.csr.status)
self.assertIn(URI_ROUTES_ID % expected_id, location)
# Check the hard-coded items that get set as well...
expected_route = {u'destination-network': u'10.1.0.0/24',
u'kind': u'object#static-route',
u'next-hop-router': None,
u'outgoing-interface': u'GigabitEthernet1',
u'admin-distance': 1}
self._register_local_get(URI_ROUTES_ID % expected_id,
json=expected_route)
content = self.csr.get_request(location, full_url=True)
self.assertEqual(requests.codes.OK, self.csr.status)
self.assertEqual(expected_route, content)
# Now delete and verify that static route is gone
self._register_local_delete(URI_ROUTES, expected_id)
self._register_local_get_not_found(URI_ROUTES, expected_id)
route_id = csr_client.make_route_id(cidr, interface)
self.csr.delete_static_route(route_id)
self.assertEqual(requests.codes.NO_CONTENT, self.csr.status)
content = self.csr.get_request(location, full_url=True)
self.assertEqual(requests.codes.NOT_FOUND, self.csr.status)
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/services/vpn/device_drivers/test_vyatta_ipsec.py 0000664 0005670 0005671 00000015420 12701407726 034562 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2015 Brocade Communications System, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
import mock
from neutron.agent.l3 import legacy_router
from oslo_utils import uuidutils
from neutron_vpnaas.tests import base
with mock.patch.dict(sys.modules, {
'networking_brocade': mock.Mock(),
'networking_brocade.vyatta': mock.Mock(),
'networking_brocade.vyatta.common': mock.Mock(),
'networking_brocade.vyatta.vrouter': mock.Mock(),
'networking_brocade.vyatta.vpn': mock.Mock(),
}):
from networking_brocade.vyatta.common import vrouter_config
from networking_brocade.vyatta.vpn import config as vyatta_vpn_config
from neutron_vpnaas.services.vpn.device_drivers import vyatta_ipsec
_uuid = uuidutils.generate_uuid
FAKE_HOST = 'fake_host'
class TestNeutronServerAPI(base.BaseTestCase):
def setUp(self):
super(TestNeutronServerAPI, self).setUp()
get_client_mock = mock.patch(
'neutron.common.rpc.get_client').start()
self.client = get_client_mock.return_value
self.api = vyatta_ipsec.NeutronServerAPI('fake-topic')
def test_get_vpn_services_on_host(self):
fake_context = mock.Mock()
svc_connections = [
self._make_svc_connection(),
self._make_svc_connection()
]
vpn_services_on_host = [{
vyatta_ipsec._KEY_CONNECTIONS: svc_connections
}]
cctxt = self.client.prepare.return_value
cctxt.call.return_value = vpn_services_on_host
vpn_services = self.api.get_vpn_services_on_host(
fake_context, FAKE_HOST)
cctxt.call.assert_called_with(
fake_context, 'get_vpn_services_on_host', host=FAKE_HOST)
validate_func = vyatta_vpn_config.validate_svc_connection
for connection in svc_connections:
validate_func.assert_any_call(connection)
self.assertEqual(len(vpn_services_on_host), len(vpn_services))
def test_update_status(self):
context = mock.Mock()
fake_status = 'fake-status'
cctxt = self.client.prepare.return_value
self.api.update_status(context, 'fake-status')
cctxt.cast.assert_called_once_with(
context, 'update_status', status=fake_status)
@staticmethod
def _make_svc_connection():
return {
vyatta_ipsec._KEY_IKEPOLICY: {
'encryption_algorithm': 'aes-256',
'lifetime_units': 'seconds',
},
vyatta_ipsec._KEY_ESPPOLICY: {
'encryption_algorithm': 'aes-256',
'lifetime_units': 'seconds',
'transform_protocol': 'esp',
'pfs': 'dh-group2',
'encapsulation_mode': 'tunnel'
},
'dpd_action': 'hold',
}
class TestVyattaDeviceDriver(base.BaseTestCase):
def setUp(self):
super(TestVyattaDeviceDriver, self).setUp()
mock.patch('oslo_service.loopingcall.DynamicLoopingCall').start()
self.server_api = mock.patch(
'neutron_vpnaas.services.vpn.device_drivers'
'.vyatta_ipsec.NeutronServerAPI').start()
self.agent = mock.Mock()
self.driver = vyatta_ipsec.VyattaIPSecDriver(self.agent, FAKE_HOST)
def test_create_router(self):
router_id = _uuid()
router = mock.Mock(legacy_router.LegacyRouter)
router.router_id = router_id
vrouter_svc_list = [self._make_vrouter_svc()]
parse_vrouter_config = mock.Mock()
parse_vrouter_config.return_value = vrouter_svc_list
with mock.patch.object(vrouter_config, 'parse_config'), \
mock.patch.object(vyatta_vpn_config, 'parse_vrouter_config',
parse_vrouter_config), \
mock.patch.object(self.driver, 'get_router_resources',
mock.MagicMock()):
self.driver.create_router(router)
svc_cache = self.driver._svc_cache
self.assertEqual(1, len(svc_cache))
self.assertEqual(router_id, svc_cache[0]['router_id'])
ipsec_connections = svc_cache[0]['ipsec_site_connections']
self.assertEqual(
'172.24.4.234',
ipsec_connections[0]['peer_address'])
def test_destroy_router(self):
router_id = _uuid()
get_router_resources = mock.Mock()
vrouter_svc = self._make_vrouter_svc()
vrouter_svc['router_id'] = router_id
svc_cache = [vrouter_svc]
svc_delete = mock.Mock()
with mock.patch.object(self.driver, 'get_router_resources',
get_router_resources), \
mock.patch.object(self.driver, '_svc_delete', svc_delete), \
mock.patch.object(self.driver, '_svc_cache', svc_cache):
self.driver.destroy_router(router_id)
self.assertNotIn(vrouter_svc, svc_cache)
svc_delete.assert_called_with(vrouter_svc, mock.ANY)
def test_sync(self):
router_id = _uuid()
self.agent.router_info = {
router_id: mock.Mock()
}
to_del = [self._make_svc()]
to_change = [
(self._make_svc(), self._make_svc()),
]
to_add = [self._make_svc()]
svc_diff = mock.Mock()
svc_diff.return_value = (
to_del,
to_change,
to_add,
)
svc_delete = mock.Mock()
svc_add = mock.Mock()
with mock.patch.object(self.driver, '_svc_diff', svc_diff), \
mock.patch.object(self.driver, '_svc_delete', svc_delete), \
mock.patch.object(self.driver, '_svc_add', svc_add):
self.driver.sync(mock.Mock(), None)
for svc in to_add:
svc_add.assert_any_call(svc, mock.ANY)
for svc in to_del:
svc_delete.assert_any_call(svc, mock.ANY)
for old, new in to_change:
svc_delete.assert_any_call(old, mock.ANY)
svc_add.assert_any_call(new, mock.ANY)
@staticmethod
def _make_vrouter_svc():
return {
'id': _uuid(),
vyatta_ipsec._KEY_CONNECTIONS: [{
'peer_address': '172.24.4.234',
}]
}
@staticmethod
def _make_svc():
return {
'router_id': _uuid()
}
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/services/__init__.py 0000664 0005670 0005671 00000000000 12701407726 026753 0 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/extensions/ 0000775 0005670 0005671 00000000000 12701410103 025207 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/extensions/test_vpn_endpoint_groups.py 0000664 0005670 0005671 00000020474 12701407726 032752 0 ustar jenkins jenkins 0000000 0000000 # (c) Copyright 2015 NEC Corporation, All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from oslo_utils import uuidutils
from webob import exc
from neutron.plugins.common import constants as nconstants
from neutron.tests.unit.api.v2 import test_base as test_api_v2
from neutron_vpnaas.extensions import vpn_endpoint_groups
from neutron_vpnaas.extensions import vpnaas
from neutron_vpnaas.services.vpn.common import constants
from neutron_vpnaas.tests import base
_uuid = uuidutils.generate_uuid
_get_path = test_api_v2._get_path
class VpnEndpointGroupsTestPlugin(
vpnaas.VPNPluginBase,
vpn_endpoint_groups.VPNEndpointGroupsPluginBase):
pass
class VpnEndpointGroupsTestCase(base.ExtensionTestCase):
fmt = 'json'
def setUp(self):
super(VpnEndpointGroupsTestCase, self).setUp()
plural_mappings = {'endpoint_group': 'endpoint-groups'}
self._setUpExtension(
'neutron_vpnaas.tests.unit.extensions.test_vpn_endpoint_groups.'
'VpnEndpointGroupsTestPlugin',
nconstants.VPN,
vpn_endpoint_groups.RESOURCE_ATTRIBUTE_MAP,
vpn_endpoint_groups.Vpn_endpoint_groups,
'vpn', plural_mappings=plural_mappings,
use_quota=True)
def helper_test_endpoint_group_create(self, data):
"""Check that the endpoint_group_create works.
Uses passed in endpoint group information, which specifies an
endpoint type and values.
"""
data['endpoint_group'].update({'tenant_id': _uuid(),
'name': 'my endpoint group',
'description': 'my description'})
return_value = copy.copy(data['endpoint_group'])
return_value.update({'id': _uuid()})
instance = self.plugin.return_value
instance.create_endpoint_group.return_value = return_value
res = self.api.post(_get_path('vpn/endpoint-groups', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_endpoint_group.assert_called_with(
mock.ANY, endpoint_group=data)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('endpoint_group', res)
self.assertEqual(res['endpoint_group'], return_value)
def test_create_cidr_endpoint_group_create(self):
"""Test creation of CIDR type endpoint group."""
data = {'endpoint_group':
{'type': constants.CIDR_ENDPOINT,
'endpoints': ['10.10.10.0/24', '20.20.20.0/24']}}
self.helper_test_endpoint_group_create(data)
def test_create_subnet_endpoint_group_create(self):
"""Test creation of subnet type endpoint group."""
data = {'endpoint_group':
{'type': constants.SUBNET_ENDPOINT,
'endpoints': [_uuid(), _uuid()]}}
self.helper_test_endpoint_group_create(data)
def test_create_vlan_endpoint_group_create(self):
"""Test creation of VLAN type endpoint group."""
data = {'endpoint_group':
{'type': constants.VLAN_ENDPOINT,
'endpoints': ['100', '200', '300', '400']}}
self.helper_test_endpoint_group_create(data)
def test_get_endpoint_group(self):
"""Test show for endpoint group."""
endpoint_group_id = _uuid()
return_value = {'id': endpoint_group_id,
'tenant_id': _uuid(),
'name': 'my-endpoint-group',
'description': 'my endpoint group',
'type': constants.CIDR_ENDPOINT,
'endpoints': ['10.10.10.0/24']}
instance = self.plugin.return_value
instance.get_endpoint_group.return_value = return_value
res = self.api.get(_get_path('vpn/endpoint-groups',
id=endpoint_group_id,
fmt=self.fmt))
instance.get_endpoint_group.assert_called_with(mock.ANY,
endpoint_group_id,
fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('endpoint_group', res)
self.assertEqual(res['endpoint_group'], return_value)
def test_endpoint_group_list(self):
"""Test listing all endpoint groups."""
return_value = [{'id': _uuid(),
'tenant_id': _uuid(),
'name': 'my-endpoint-group',
'description': 'my endpoint group',
'type': constants.CIDR_ENDPOINT,
'endpoints': ['10.10.10.0/24']},
{'id': _uuid(),
'tenant_id': _uuid(),
'name': 'another-endpoint-group',
'description': 'second endpoint group',
'type': constants.VLAN_ENDPOINT,
'endpoints': ['100', '200', '300']}]
instance = self.plugin.return_value
instance.get_endpoint_groups.return_value = return_value
res = self.api.get(_get_path('vpn/endpoint-groups', fmt=self.fmt))
instance.get_endpoint_groups.assert_called_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_endpoint_group_delete(self):
"""Test deleting an endpoint group."""
self._test_entity_delete('endpoint_group')
def test_endpoint_group_update(self):
"""Test updating endpoint_group."""
endpoint_group_id = _uuid()
update_data = {'endpoint_group': {'description': 'new description'}}
return_value = {'id': endpoint_group_id,
'tenant_id': _uuid(),
'name': 'my-endpoint-group',
'description': 'new_description',
'type': constants.CIDR_ENDPOINT,
'endpoints': ['10.10.10.0/24']}
instance = self.plugin.return_value
instance.update_endpoint_group.return_value = return_value
res = self.api.put(_get_path('vpn/endpoint-groups',
id=endpoint_group_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_endpoint_group.assert_called_with(
mock.ANY, endpoint_group_id, endpoint_group=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('endpoint_group', res)
self.assertEqual(res['endpoint_group'], return_value)
def test_fail_updating_endpoints_in_endpoint_group(self):
"""Test fails to update the endpoints in an endpoint group.
This documents that we are not allowing endpoints to be updated
(currently), as doing so, implies that the connection using the
enclosing endpoint group would also need to be updated. For now,
a new endpoint group can be created, and the connection can be
updated to point to the new endpoint group.
"""
endpoint_group_id = _uuid()
update_data = {'endpoint_group': {'endpoints': ['10.10.10.0/24']}}
res = self.api.put(_get_path('vpn/endpoint-groups',
id=endpoint_group_id,
fmt=self.fmt),
params=self.serialize(update_data),
expect_errors=True)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/extensions/__init__.py 0000664 0005670 0005671 00000000000 12701407726 027327 0 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/extensions/test_vpnaas.py 0000664 0005670 0005671 00000062025 12701407726 030136 0 ustar jenkins jenkins 0000000 0000000 # (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from neutron.plugins.common import constants as nconstants
from neutron.tests.unit.api.v2 import test_base as test_api_v2
from oslo_utils import uuidutils
from webob import exc
from neutron_vpnaas.extensions import vpnaas
from neutron_vpnaas.tests import base
_uuid = uuidutils.generate_uuid
_get_path = test_api_v2._get_path
class VpnaasExtensionTestCase(base.ExtensionTestCase):
fmt = 'json'
def setUp(self):
super(VpnaasExtensionTestCase, self).setUp()
plural_mappings = {'ipsecpolicy': 'ipsecpolicies',
'ikepolicy': 'ikepolicies',
'ipsec_site_connection': 'ipsec-site-connections',
'endpoint_group': 'endpoint-groups'}
self._setUpExtension(
'neutron_vpnaas.extensions.vpnaas.VPNPluginBase', nconstants.VPN,
vpnaas.RESOURCE_ATTRIBUTE_MAP, vpnaas.Vpnaas,
'vpn', plural_mappings=plural_mappings,
use_quota=True)
def test_ikepolicy_create(self):
"""Test case to create an ikepolicy."""
ikepolicy_id = _uuid()
data = {'ikepolicy': {'name': 'ikepolicy1',
'description': 'myikepolicy1',
'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-128',
'phase1_negotiation_mode': 'main',
'lifetime': {
'units': 'seconds',
'value': 3600},
'ike_version': 'v1',
'pfs': 'group5',
'tenant_id': _uuid()}}
return_value = copy.copy(data['ikepolicy'])
return_value.update({'id': ikepolicy_id})
instance = self.plugin.return_value
instance.create_ikepolicy.return_value = return_value
res = self.api.post(_get_path('vpn/ikepolicies', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_ikepolicy.assert_called_with(mock.ANY,
ikepolicy=data)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('ikepolicy', res)
self.assertEqual(return_value, res['ikepolicy'])
def test_ikepolicy_list(self):
"""Test case to list all ikepolicies."""
ikepolicy_id = _uuid()
return_value = [{'name': 'ikepolicy1',
'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-128',
'pfs': 'group5',
'ike_version': 'v1',
'id': ikepolicy_id}]
instance = self.plugin.return_value
instance.get_ikepolicies.return_value = return_value
res = self.api.get(_get_path('vpn/ikepolicies', fmt=self.fmt))
instance.get_ikepolicies.assert_called_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(exc.HTTPOk.code, res.status_int)
def test_ikepolicy_update(self):
"""Test case to update an ikepolicy."""
ikepolicy_id = _uuid()
update_data = {'ikepolicy': {'name': 'ikepolicy1',
'encryption_algorithm': 'aes-256'}}
return_value = {'name': 'ikepolicy1',
'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-256',
'phase1_negotiation_mode': 'main',
'lifetime': {
'units': 'seconds',
'value': 3600},
'ike_version': 'v1',
'pfs': 'group5',
'tenant_id': _uuid(),
'id': ikepolicy_id}
instance = self.plugin.return_value
instance.update_ikepolicy.return_value = return_value
res = self.api.put(_get_path('vpn/ikepolicies', id=ikepolicy_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_ikepolicy.assert_called_with(mock.ANY, ikepolicy_id,
ikepolicy=update_data)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('ikepolicy', res)
self.assertEqual(return_value, res['ikepolicy'])
def test_ikepolicy_get(self):
"""Test case to get or show an ikepolicy."""
ikepolicy_id = _uuid()
return_value = {'name': 'ikepolicy1',
'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-128',
'phase1_negotiation_mode': 'main',
'lifetime': {
'units': 'seconds',
'value': 3600},
'ike_version': 'v1',
'pfs': 'group5',
'tenant_id': _uuid(),
'id': ikepolicy_id}
instance = self.plugin.return_value
instance.get_ikepolicy.return_value = return_value
res = self.api.get(_get_path('vpn/ikepolicies', id=ikepolicy_id,
fmt=self.fmt))
instance.get_ikepolicy.assert_called_with(mock.ANY,
ikepolicy_id,
fields=mock.ANY)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('ikepolicy', res)
self.assertEqual(return_value, res['ikepolicy'])
def test_ikepolicy_delete(self):
"""Test case to delete an ikepolicy."""
self._test_entity_delete('ikepolicy')
def test_ipsecpolicy_create(self):
"""Test case to create an ipsecpolicy."""
ipsecpolicy_id = _uuid()
data = {'ipsecpolicy': {'name': 'ipsecpolicy1',
'description': 'myipsecpolicy1',
'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-128',
'encapsulation_mode': 'tunnel',
'lifetime': {
'units': 'seconds',
'value': 3600},
'transform_protocol': 'esp',
'pfs': 'group5',
'tenant_id': _uuid()}}
return_value = copy.copy(data['ipsecpolicy'])
return_value.update({'id': ipsecpolicy_id})
instance = self.plugin.return_value
instance.create_ipsecpolicy.return_value = return_value
res = self.api.post(_get_path('vpn/ipsecpolicies', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_ipsecpolicy.assert_called_with(mock.ANY,
ipsecpolicy=data)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('ipsecpolicy', res)
self.assertEqual(return_value, res['ipsecpolicy'])
def test_ipsecpolicy_list(self):
"""Test case to list an ipsecpolicy."""
ipsecpolicy_id = _uuid()
return_value = [{'name': 'ipsecpolicy1',
'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-128',
'pfs': 'group5',
'id': ipsecpolicy_id}]
instance = self.plugin.return_value
instance.get_ipsecpolicies.return_value = return_value
res = self.api.get(_get_path('vpn/ipsecpolicies', fmt=self.fmt))
instance.get_ipsecpolicies.assert_called_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(exc.HTTPOk.code, res.status_int)
def test_ipsecpolicy_update(self):
"""Test case to update an ipsecpolicy."""
ipsecpolicy_id = _uuid()
update_data = {'ipsecpolicy': {'name': 'ipsecpolicy1',
'encryption_algorithm': 'aes-256'}}
return_value = {'name': 'ipsecpolicy1',
'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-128',
'encapsulation_mode': 'tunnel',
'lifetime': {
'units': 'seconds',
'value': 3600},
'transform_protocol': 'esp',
'pfs': 'group5',
'tenant_id': _uuid(),
'id': ipsecpolicy_id}
instance = self.plugin.return_value
instance.update_ipsecpolicy.return_value = return_value
res = self.api.put(_get_path('vpn/ipsecpolicies',
id=ipsecpolicy_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_ipsecpolicy.assert_called_with(mock.ANY,
ipsecpolicy_id,
ipsecpolicy=update_data)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('ipsecpolicy', res)
self.assertEqual(return_value, res['ipsecpolicy'])
def test_ipsecpolicy_get(self):
"""Test case to get or show an ipsecpolicy."""
ipsecpolicy_id = _uuid()
return_value = {'name': 'ipsecpolicy1',
'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-128',
'encapsulation_mode': 'tunnel',
'lifetime': {
'units': 'seconds',
'value': 3600},
'transform_protocol': 'esp',
'pfs': 'group5',
'tenant_id': _uuid(),
'id': ipsecpolicy_id}
instance = self.plugin.return_value
instance.get_ipsecpolicy.return_value = return_value
res = self.api.get(_get_path('vpn/ipsecpolicies',
id=ipsecpolicy_id,
fmt=self.fmt))
instance.get_ipsecpolicy.assert_called_with(mock.ANY,
ipsecpolicy_id,
fields=mock.ANY)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('ipsecpolicy', res)
self.assertEqual(return_value, res['ipsecpolicy'])
def test_ipsecpolicy_delete(self):
"""Test case to delete an ipsecpolicy."""
self._test_entity_delete('ipsecpolicy')
def _test_vpnservice_create(self, more_args, defaulted_args):
"""Helper to test VPN service creation.
Allows additional args to be specified for different test cases.
Includes expected args, for case where an optional args are not
specified and API applies defaults.
"""
data = {'vpnservice': {'name': 'vpnservice1',
'description': 'descr_vpn1',
'router_id': _uuid(),
'admin_state_up': True,
'tenant_id': _uuid()}}
data['vpnservice'].update(more_args)
# Add in any default values for args that were not provided
actual_args = copy.copy(data)
actual_args['vpnservice'].update(defaulted_args)
return_value = copy.copy(data['vpnservice'])
return_value.update({'status': "ACTIVE", 'id': _uuid()})
return_value.update(defaulted_args)
instance = self.plugin.return_value
instance.create_vpnservice.return_value = return_value
res = self.api.post(_get_path('vpn/vpnservices', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_vpnservice.assert_called_with(mock.ANY,
vpnservice=actual_args)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('vpnservice', res)
self.assertEqual(return_value, res['vpnservice'])
def test_vpnservice_create(self):
"""Create VPN service using subnet (older API)."""
subnet = {'subnet_id': _uuid()}
self._test_vpnservice_create(more_args=subnet, defaulted_args={})
def test_vpnservice_create_no_subnet(self):
"""Test case to create a vpnservice w/o subnet (newer API)."""
no_subnet = {'subnet_id': None}
self._test_vpnservice_create(more_args={}, defaulted_args=no_subnet)
def test_vpnservice_list(self):
"""Test case to list all vpnservices."""
vpnservice_id = _uuid()
return_value = [{'name': 'vpnservice1',
'tenant_id': _uuid(),
'status': 'ACTIVE',
'id': vpnservice_id}]
instance = self.plugin.return_value
instance.get_vpnservice.return_value = return_value
res = self.api.get(_get_path('vpn/vpnservices', fmt=self.fmt))
instance.get_vpnservices.assert_called_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(exc.HTTPOk.code, res.status_int)
def test_vpnservice_update(self):
"""Test case to update a vpnservice."""
vpnservice_id = _uuid()
update_data = {'vpnservice': {'admin_state_up': False}}
return_value = {'name': 'vpnservice1',
'admin_state_up': False,
'subnet_id': _uuid(),
'router_id': _uuid(),
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': vpnservice_id}
instance = self.plugin.return_value
instance.update_vpnservice.return_value = return_value
res = self.api.put(_get_path('vpn/vpnservices',
id=vpnservice_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_vpnservice.assert_called_with(mock.ANY,
vpnservice_id,
vpnservice=update_data)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('vpnservice', res)
self.assertEqual(return_value, res['vpnservice'])
def test_vpnservice_get(self):
"""Test case to get or show a vpnservice."""
vpnservice_id = _uuid()
return_value = {'name': 'vpnservice1',
'admin_state_up': True,
'subnet_id': _uuid(),
'router_id': _uuid(),
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': vpnservice_id}
instance = self.plugin.return_value
instance.get_vpnservice.return_value = return_value
res = self.api.get(_get_path('vpn/vpnservices',
id=vpnservice_id,
fmt=self.fmt))
instance.get_vpnservice.assert_called_with(mock.ANY,
vpnservice_id,
fields=mock.ANY)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('vpnservice', res)
self.assertEqual(return_value, res['vpnservice'])
def test_vpnservice_delete(self):
"""Test case to delete a vpnservice."""
self._test_entity_delete('vpnservice')
def _test_ipsec_site_connection_create(self, more_args, defaulted_args):
"""Helper to test creating IPSec connection."""
ipsecsite_con_id = _uuid()
ikepolicy_id = _uuid()
ipsecpolicy_id = _uuid()
data = {
'ipsec_site_connection': {'name': 'connection1',
'description': 'Remote-connection1',
'peer_address': '192.168.1.10',
'peer_id': '192.168.1.10',
'mtu': 1500,
'psk': 'abcd',
'initiator': 'bi-directional',
'dpd': {
'action': 'hold',
'interval': 30,
'timeout': 120},
'ikepolicy_id': ikepolicy_id,
'ipsecpolicy_id': ipsecpolicy_id,
'vpnservice_id': _uuid(),
'admin_state_up': True,
'tenant_id': _uuid()}
}
data['ipsec_site_connection'].update(more_args)
# Add in any default values for args that were not provided
actual_args = copy.copy(data)
actual_args['ipsec_site_connection'].update(defaulted_args)
return_value = copy.copy(data['ipsec_site_connection'])
return_value.update({'status': "ACTIVE", 'id': ipsecsite_con_id})
return_value.update(defaulted_args)
instance = self.plugin.return_value
instance.create_ipsec_site_connection.return_value = return_value
res = self.api.post(_get_path('vpn/ipsec-site-connections',
fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_ipsec_site_connection.assert_called_with(
mock.ANY, ipsec_site_connection=actual_args)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('ipsec_site_connection', res)
self.assertEqual(return_value, res['ipsec_site_connection'])
def test_ipsec_site_connection_create(self):
"""Create an IPSec connection with peer CIDRs (old API)."""
peer_cidrs = {'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24']}
no_endpoint_groups = {'local_ep_group_id': None,
'peer_ep_group_id': None}
self._test_ipsec_site_connection_create(
more_args=peer_cidrs, defaulted_args=no_endpoint_groups)
def test_ipsec_site_connection_create_with_endpoints(self):
"""Create an IPSec connection with endpoint groups (new API)."""
endpoint_groups = {'local_ep_group_id': _uuid(),
'peer_ep_group_id': _uuid()}
no_peer_cidrs = {'peer_cidrs': []}
self._test_ipsec_site_connection_create(more_args=endpoint_groups,
defaulted_args=no_peer_cidrs)
def test_ipsec_site_connection_list(self):
"""Test case to list all ipsec_site_connections."""
ipsecsite_con_id = _uuid()
return_value = [{'name': 'connection1',
'peer_address': '192.168.1.10',
'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'],
'route_mode': 'static',
'auth_mode': 'psk',
'local_ep_group_id': None,
'peer_ep_group_id': None,
'tenant_id': _uuid(),
'status': 'ACTIVE',
'id': ipsecsite_con_id}]
instance = self.plugin.return_value
instance.get_ipsec_site_connections.return_value = return_value
res = self.api.get(
_get_path('vpn/ipsec-site-connections', fmt=self.fmt))
instance.get_ipsec_site_connections.assert_called_with(
mock.ANY, fields=mock.ANY, filters=mock.ANY
)
self.assertEqual(exc.HTTPOk.code, res.status_int)
def test_ipsec_site_connection_update(self):
"""Test case to update a ipsec_site_connection."""
ipsecsite_con_id = _uuid()
update_data = {'ipsec_site_connection': {'admin_state_up': False}}
return_value = {'name': 'connection1',
'description': 'Remote-connection1',
'peer_address': '192.168.1.10',
'peer_id': '192.168.1.10',
'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'],
'mtu': 1500,
'psk': 'abcd',
'initiator': 'bi-directional',
'dpd': {
'action': 'hold',
'interval': 30,
'timeout': 120},
'ikepolicy_id': _uuid(),
'ipsecpolicy_id': _uuid(),
'vpnservice_id': _uuid(),
'admin_state_up': False,
'local_ep_group_id': None,
'peer_ep_group_id': None,
'tenant_id': _uuid(),
'status': 'ACTIVE',
'id': ipsecsite_con_id}
instance = self.plugin.return_value
instance.update_ipsec_site_connection.return_value = return_value
res = self.api.put(_get_path('vpn/ipsec-site-connections',
id=ipsecsite_con_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_ipsec_site_connection.assert_called_with(
mock.ANY, ipsecsite_con_id, ipsec_site_connection=update_data
)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('ipsec_site_connection', res)
self.assertEqual(return_value, res['ipsec_site_connection'])
def test_ipsec_site_connection_get(self):
"""Test case to get or show a ipsec_site_connection."""
ipsecsite_con_id = _uuid()
return_value = {'name': 'connection1',
'description': 'Remote-connection1',
'peer_address': '192.168.1.10',
'peer_id': '192.168.1.10',
'peer_cidrs': ['192.168.2.0/24',
'192.168.3.0/24'],
'mtu': 1500,
'psk': 'abcd',
'initiator': 'bi-directional',
'dpd': {
'action': 'hold',
'interval': 30,
'timeout': 120},
'ikepolicy_id': _uuid(),
'ipsecpolicy_id': _uuid(),
'vpnservice_id': _uuid(),
'admin_state_up': True,
'tenant_id': _uuid(),
'local_ep_group_id': None,
'peer_ep_group_id': None,
'status': 'ACTIVE',
'id': ipsecsite_con_id}
instance = self.plugin.return_value
instance.get_ipsec_site_connection.return_value = return_value
res = self.api.get(_get_path('vpn/ipsec-site-connections',
id=ipsecsite_con_id,
fmt=self.fmt))
instance.get_ipsec_site_connection.assert_called_with(
mock.ANY, ipsecsite_con_id, fields=mock.ANY
)
self.assertEqual(exc.HTTPOk.code, res.status_int)
res = self.deserialize(res)
self.assertIn('ipsec_site_connection', res)
self.assertEqual(return_value, res['ipsec_site_connection'])
def test_ipsec_site_connection_delete(self):
"""Test case to delete a ipsec_site_connection."""
self._test_entity_delete('ipsec_site_connection')
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/db/ 0000775 0005670 0005671 00000000000 12701410103 023375 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/db/vpn/ 0000775 0005670 0005671 00000000000 12701410103 024200 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/db/vpn/test_vpn_validator.py 0000664 0005670 0005671 00000062362 12701407726 030513 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2015, Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import socket
from neutron import context as n_ctx
from neutron.db import l3_db
from neutron.db import servicetype_db as st_db
from neutron.plugins.common import constants as nconstants
from neutron_lib import exceptions as nexception
from oslo_utils import uuidutils
from sqlalchemy.orm import query
from neutron_vpnaas.extensions import vpnaas
from neutron_vpnaas.services.vpn.common import constants as v_constants
from neutron_vpnaas.services.vpn import plugin as vpn_plugin
from neutron_vpnaas.services.vpn.service_drivers \
import ipsec_validator as vpn_validator
from neutron_vpnaas.tests import base
_uuid = uuidutils.generate_uuid
FAKE_ROUTER_ID = _uuid()
FAKE_ROUTER = {l3_db.EXTERNAL_GW_INFO: FAKE_ROUTER_ID}
FAKE_SUBNET_ID = _uuid()
IPV4 = 4
IPV6 = 6
IPSEC_SERVICE_DRIVER = ('neutron_vpnaas.services.vpn.service_drivers.'
'ipsec.IPsecVPNDriver')
class TestValidatorSelection(base.BaseTestCase):
def setUp(self):
super(TestValidatorSelection, self).setUp()
vpnaas_provider = [{
'service_type': nconstants.VPN,
'name': 'vpnaas',
'driver': IPSEC_SERVICE_DRIVER,
'default': True
}]
# override the default service provider
self.service_providers = (
mock.patch.object(st_db.ServiceTypeManager,
'get_service_providers').start())
self.service_providers.return_value = vpnaas_provider
mock.patch('neutron.common.rpc.create_connection').start()
stm = st_db.ServiceTypeManager()
mock.patch('neutron.db.servicetype_db.ServiceTypeManager.get_instance',
return_value=stm).start()
self.vpn_plugin = vpn_plugin.VPNDriverPlugin()
def test_reference_driver_used(self):
self.assertIsInstance(self.vpn_plugin._get_validator(),
vpn_validator.IpsecVpnValidator)
class TestIPsecDriverValidation(base.BaseTestCase):
def setUp(self):
super(TestIPsecDriverValidation, self).setUp()
self.l3_plugin = mock.Mock()
mock.patch(
'neutron.manager.NeutronManager.get_service_plugins',
return_value={nconstants.L3_ROUTER_NAT: self.l3_plugin}).start()
self.core_plugin = mock.Mock()
mock.patch('neutron.manager.NeutronManager.get_plugin',
return_value=self.core_plugin).start()
self.context = n_ctx.Context('some_user', 'some_tenant')
self.service_plugin = mock.Mock()
self.validator = vpn_validator.IpsecVpnValidator(self.service_plugin)
self.router = mock.Mock()
self.router.gw_port = {'fixed_ips': [{'ip_address': '10.0.0.99'}]}
def test_non_public_router_for_vpn_service(self):
"""Failure test of service validate, when router missing ext. I/F."""
self.l3_plugin.get_router.return_value = {} # No external gateway
vpnservice = {'router_id': 123, 'subnet_id': 456}
self.assertRaises(vpnaas.RouterIsNotExternal,
self.validator.validate_vpnservice,
self.context, vpnservice)
def test_subnet_not_connected_for_vpn_service(self):
"""Failure test of service validate, when subnet not on router."""
self.l3_plugin.get_router.return_value = FAKE_ROUTER
self.core_plugin.get_ports.return_value = None
vpnservice = {'router_id': FAKE_ROUTER_ID, 'subnet_id': FAKE_SUBNET_ID}
self.assertRaises(vpnaas.SubnetIsNotConnectedToRouter,
self.validator.validate_vpnservice,
self.context, vpnservice)
def test_defaults_for_ipsec_site_connections_on_create(self):
"""Check that defaults are applied correctly.
MTU has a default and will always be present on create.
However, the DPD settings do not have a default, so
database create method will assign default values for any
missing. In addition, the DPD dict will be flattened
for storage into the database, so we'll do it as part of
assigning defaults.
"""
ipsec_sitecon = {}
self.validator.assign_sensible_ipsec_sitecon_defaults(ipsec_sitecon)
expected = {
'dpd_action': 'hold',
'dpd_timeout': 120,
'dpd_interval': 30
}
self.assertEqual(expected, ipsec_sitecon)
ipsec_sitecon = {'dpd': {'interval': 50}}
self.validator.assign_sensible_ipsec_sitecon_defaults(ipsec_sitecon)
expected = {
'dpd': {'interval': 50},
'dpd_action': 'hold',
'dpd_timeout': 120,
'dpd_interval': 50
}
self.assertEqual(expected, ipsec_sitecon)
def test_resolve_peer_address_with_ipaddress(self):
ipsec_sitecon = {'peer_address': '10.0.0.9'}
self.validator._validate_peer_address = mock.Mock()
self.validator.resolve_peer_address(ipsec_sitecon, self.router)
self.assertEqual('10.0.0.9', ipsec_sitecon['peer_address'])
self.validator._validate_peer_address.assert_called_once_with(
IPV4, self.router)
def test_resolve_peer_address_with_fqdn(self):
with mock.patch.object(socket, 'getaddrinfo') as mock_getaddr_info:
mock_getaddr_info.return_value = [(2, 1, 6, '',
('10.0.0.9', 0))]
ipsec_sitecon = {'peer_address': 'fqdn.peer.addr'}
self.validator._validate_peer_address = mock.Mock()
self.validator.resolve_peer_address(ipsec_sitecon, self.router)
self.assertEqual('10.0.0.9', ipsec_sitecon['peer_address'])
self.validator._validate_peer_address.assert_called_once_with(
IPV4, self.router)
def test_resolve_peer_address_with_invalid_fqdn(self):
with mock.patch.object(socket, 'getaddrinfo') as mock_getaddr_info:
def getaddr_info_failer(*args, **kwargs):
raise socket.gaierror()
mock_getaddr_info.side_effect = getaddr_info_failer
ipsec_sitecon = {'peer_address': 'fqdn.invalid'}
self.assertRaises(vpnaas.VPNPeerAddressNotResolved,
self.validator.resolve_peer_address,
ipsec_sitecon, self.router)
def helper_validate_peer_address(self, fixed_ips, ip_version,
expected_exception=False):
self.router.id = FAKE_ROUTER_ID
self.router.gw_port = {'fixed_ips': fixed_ips}
try:
self.validator._validate_peer_address(ip_version, self.router)
if expected_exception:
self.fail("No exception raised for invalid peer address")
except vpnaas.ExternalNetworkHasNoSubnet:
if not expected_exception:
self.fail("exception for valid peer address raised")
def test_validate_peer_address(self):
# validate ipv4 peer_address with ipv4 gateway
fixed_ips = [{'ip_address': '10.0.0.99'}]
self.helper_validate_peer_address(fixed_ips, IPV4)
# validate ipv6 peer_address with ipv6 gateway
fixed_ips = [{'ip_address': '2001::1'}]
self.helper_validate_peer_address(fixed_ips, IPV6)
# validate ipv6 peer_address with both ipv4 and ipv6 gateways
fixed_ips = [{'ip_address': '2001::1'}, {'ip_address': '10.0.0.99'}]
self.helper_validate_peer_address(fixed_ips, IPV6)
# validate ipv4 peer_address with both ipv4 and ipv6 gateways
fixed_ips = [{'ip_address': '2001::1'}, {'ip_address': '10.0.0.99'}]
self.helper_validate_peer_address(fixed_ips, IPV4)
# validate ipv4 peer_address with ipv6 gateway
fixed_ips = [{'ip_address': '2001::1'}]
self.helper_validate_peer_address(fixed_ips, IPV4,
expected_exception=True)
# validate ipv6 peer_address with ipv4 gateway
fixed_ips = [{'ip_address': '10.0.0.99'}]
self.helper_validate_peer_address(fixed_ips, IPV6,
expected_exception=True)
def test_validate_ipsec_policy(self):
ipsec_policy = {'transform_protocol': 'ah-esp'}
self.assertRaises(vpn_validator.IpsecValidationFailure,
self.validator.validate_ipsec_policy,
self.context, ipsec_policy)
def test_defaults_for_ipsec_site_connections_on_update(self):
"""Check that defaults are used for any values not specified."""
ipsec_sitecon = {}
prev_connection = {'peer_cidrs': [{'cidr': '10.0.0.0/24'},
{'cidr': '20.0.0.0/24'}],
'local_ep_group_id': None,
'peer_ep_group_id': None,
'dpd_action': 'clear',
'dpd_timeout': 500,
'dpd_interval': 250}
self.validator.assign_sensible_ipsec_sitecon_defaults(ipsec_sitecon,
prev_connection)
expected = {
'peer_cidrs': ['10.0.0.0/24', '20.0.0.0/24'],
'local_ep_group_id': None,
'peer_ep_group_id': None,
'dpd_action': 'clear',
'dpd_timeout': 500,
'dpd_interval': 250
}
self.assertEqual(expected, ipsec_sitecon)
ipsec_sitecon = {'dpd': {'timeout': 200}}
local_epg_id = _uuid()
peer_epg_id = _uuid()
prev_connection = {'peer_cidrs': [],
'local_ep_group_id': local_epg_id,
'peer_ep_group_id': peer_epg_id,
'dpd_action': 'clear',
'dpd_timeout': 500,
'dpd_interval': 100}
self.validator.assign_sensible_ipsec_sitecon_defaults(ipsec_sitecon,
prev_connection)
expected = {
'peer_cidrs': [],
'local_ep_group_id': local_epg_id,
'peer_ep_group_id': peer_epg_id,
'dpd': {'timeout': 200},
'dpd_action': 'clear',
'dpd_timeout': 200,
'dpd_interval': 100
}
self.assertEqual(expected, ipsec_sitecon)
def test_bad_dpd_settings_on_create(self):
"""Failure tests of DPD settings for IPSec conn during create."""
ipsec_sitecon = {'dpd_action': 'hold', 'dpd_interval': 100,
'dpd_timeout': 100}
self.assertRaises(vpnaas.IPsecSiteConnectionDpdIntervalValueError,
self.validator._check_dpd, ipsec_sitecon)
ipsec_sitecon = {'dpd_action': 'hold', 'dpd_interval': 100,
'dpd_timeout': 99}
self.assertRaises(vpnaas.IPsecSiteConnectionDpdIntervalValueError,
self.validator._check_dpd, ipsec_sitecon)
def test_bad_mtu_for_ipsec_connection(self):
"""Failure test of invalid MTU values for IPSec conn create/update."""
ip_version_limits = vpn_validator.IpsecVpnValidator.IP_MIN_MTU
for version, limit in ip_version_limits.items():
ipsec_sitecon = {'mtu': limit - 1}
self.assertRaises(
vpnaas.IPsecSiteConnectionMtuError,
self.validator._check_mtu,
self.context, ipsec_sitecon['mtu'], version)
def test_endpoints_all_cidrs_in_endpoint_group(self):
"""All endpoints in the endpoint group are valid CIDRs."""
endpoint_group = {'type': v_constants.CIDR_ENDPOINT,
'endpoints': ['10.10.10.0/24', '20.20.20.0/24']}
try:
self.validator.validate_endpoint_group(self.context,
endpoint_group)
except Exception:
self.fail("All CIDRs in endpoint_group should be valid")
def test_endpoints_all_subnets_in_endpoint_group(self):
"""All endpoints in the endpoint group are valid subnets."""
endpoint_group = {'type': v_constants.SUBNET_ENDPOINT,
'endpoints': [_uuid(), _uuid()]}
try:
self.validator.validate_endpoint_group(self.context,
endpoint_group)
except Exception:
self.fail("All subnets in endpoint_group should be valid")
def test_mixed_endpoint_types_in_endpoint_group(self):
"""Fail when mixing types of endpoints in endpoint group."""
endpoint_group = {'type': v_constants.CIDR_ENDPOINT,
'endpoints': ['10.10.10.0/24', _uuid()]}
self.assertRaises(vpnaas.InvalidEndpointInEndpointGroup,
self.validator.validate_endpoint_group,
self.context, endpoint_group)
endpoint_group = {'type': v_constants.SUBNET_ENDPOINT,
'endpoints': [_uuid(), '10.10.10.0/24']}
self.assertRaises(vpnaas.InvalidEndpointInEndpointGroup,
self.validator.validate_endpoint_group,
self.context, endpoint_group)
def test_missing_endpoints_for_endpoint_group(self):
endpoint_group = {'type': v_constants.CIDR_ENDPOINT,
'endpoints': []}
self.assertRaises(vpnaas.MissingEndpointForEndpointGroup,
self.validator.validate_endpoint_group,
self.context, endpoint_group)
def test_fail_bad_cidr_in_endpoint_group(self):
"""Testing catches bad CIDR.
Just check one case, as CIDR validator used has good test coverage.
"""
endpoint_group = {'type': v_constants.CIDR_ENDPOINT,
'endpoints': ['10.10.10.10/24', '20.20.20.1']}
self.assertRaises(vpnaas.InvalidEndpointInEndpointGroup,
self.validator.validate_endpoint_group,
self.context, endpoint_group)
def test_unknown_subnet_in_endpoint_group(self):
subnet_id = _uuid()
self.core_plugin.get_subnet.side_effect = nexception.SubnetNotFound(
subnet_id=subnet_id)
endpoint_group = {'type': v_constants.SUBNET_ENDPOINT,
'endpoints': [subnet_id]}
self.assertRaises(vpnaas.NonExistingSubnetInEndpointGroup,
self.validator.validate_endpoint_group,
self.context, endpoint_group)
def test_fail_subnets_not_on_same_router_for_endpoint_group(self):
"""Detect when local endpoints not on the same router."""
subnet1 = {'id': _uuid(), 'ip_version': 4}
subnet2 = {'id': _uuid(), 'ip_version': 4}
router = _uuid()
multiple_subnets = [subnet1, subnet2]
port_mock = mock.patch.object(self.core_plugin, "get_ports").start()
port_mock.side_effect = ['dummy info', None]
self.assertRaises(vpnaas.SubnetIsNotConnectedToRouter,
self.validator._check_local_subnets_on_router,
self.context, router, multiple_subnets)
def test_ipsec_conn_local_endpoints_same_ip_version(self):
"""Check local endpoint subnets all have same IP version."""
endpoint_group_id = _uuid()
subnet1 = {'ip_version': 4}
subnet2 = {'ip_version': 4}
single_subnet = [subnet1]
version = self.validator._check_local_endpoint_ip_versions(
endpoint_group_id, single_subnet)
self.assertEqual(4, version)
multiple_subnets = [subnet1, subnet2]
version = self.validator._check_local_endpoint_ip_versions(
endpoint_group_id, multiple_subnets)
self.assertEqual(4, version)
def test_fail_ipsec_conn_local_endpoints_mixed_ip_version(self):
"""Fail when mixed IP versions in local endpoints."""
endpoint_group_id = _uuid()
subnet1 = {'ip_version': 6}
subnet2 = {'ip_version': 4}
mixed_subnets = [subnet1, subnet2]
self.assertRaises(vpnaas.MixedIPVersionsForIPSecEndpoints,
self.validator._check_local_endpoint_ip_versions,
endpoint_group_id, mixed_subnets)
def test_ipsec_conn_peer_endpoints_same_ip_version(self):
"""Check all CIDRs have the same IP version."""
endpoint_group_id = _uuid()
one_cidr = ['2002:0a00::/48']
version = self.validator._check_peer_endpoint_ip_versions(
endpoint_group_id, one_cidr)
self.assertEqual(6, version)
multiple_cidr = ['10.10.10.0/24', '20.20.20.0/24']
version = self.validator._check_peer_endpoint_ip_versions(
endpoint_group_id, multiple_cidr)
self.assertEqual(4, version)
def test_fail_ipsec_conn_peer_endpoints_mixed_ip_version(self):
"""Fail when mixed IP versions in peer endpoints."""
endpoint_group_id = _uuid()
mixed_cidrs = ['10.10.10.0/24', '2002:1400::/48']
self.assertRaises(vpnaas.MixedIPVersionsForIPSecEndpoints,
self.validator._check_peer_endpoint_ip_versions,
endpoint_group_id, mixed_cidrs)
def test_fail_ipsec_conn_locals_and_peers_different_ip_version(self):
"""Ensure catch when local and peer IP versions are not the same."""
self.assertRaises(vpnaas.MixedIPVersionsForIPSecConnection,
self.validator._validate_compatible_ip_versions,
4, 6)
def test_fail_ipsec_conn_no_subnet_requiring_endpoint_groups(self):
"""When no subnet, connection must use endpoints.
This means both endpoint groups must be present, and peer_cidrs
cannot be used.
"""
subnet = None
ipsec_sitecon = {'peer_cidrs': ['10.0.0.0/24'],
'local_ep_group_id': 'local-epg-id',
'peer_ep_group_id': 'peer-epg-id'}
self.assertRaises(vpnaas.PeerCidrsInvalid,
self.validator.validate_ipsec_conn_optional_args,
ipsec_sitecon, subnet)
ipsec_sitecon = {'peer_cidrs': [],
'local_ep_group_id': None,
'peer_ep_group_id': 'peer-epg-id'}
self.assertRaises(vpnaas.MissingRequiredEndpointGroup,
self.validator.validate_ipsec_conn_optional_args,
ipsec_sitecon, subnet)
ipsec_sitecon = {'peer_cidrs': [],
'local_ep_group_id': 'local-epg-id',
'peer_ep_group_id': None}
self.assertRaises(vpnaas.MissingRequiredEndpointGroup,
self.validator.validate_ipsec_conn_optional_args,
ipsec_sitecon, subnet)
ipsec_sitecon = {'peer_cidrs': [],
'local_ep_group_id': None,
'peer_ep_group_id': None}
self.assertRaises(vpnaas.MissingRequiredEndpointGroup,
self.validator.validate_ipsec_conn_optional_args,
ipsec_sitecon, subnet)
def test_fail_ipsec_conn_subnet_requiring_peer_cidrs(self):
"""When legacy mode, no endpoint groups.
This means neither endpoint group can be specified, and the peer_cidrs
must be present.
"""
subnet = {'id': FAKE_SUBNET_ID}
ipsec_sitecon = {'peer_cidrs': [],
'local_ep_group_id': None,
'peer_ep_group_id': None}
self.assertRaises(vpnaas.MissingPeerCidrs,
self.validator.validate_ipsec_conn_optional_args,
ipsec_sitecon, subnet)
ipsec_sitecon = {'peer_cidrs': ['10.0.0.0/24'],
'local_ep_group_id': 'local-epg-id',
'peer_ep_group_id': None}
self.assertRaises(vpnaas.InvalidEndpointGroup,
self.validator.validate_ipsec_conn_optional_args,
ipsec_sitecon, subnet)
ipsec_sitecon = {'peer_cidrs': ['10.0.0.0/24'],
'local_ep_group_id': None,
'peer_ep_group_id': 'peer-epg-id'}
self.assertRaises(vpnaas.InvalidEndpointGroup,
self.validator.validate_ipsec_conn_optional_args,
ipsec_sitecon, subnet)
ipsec_sitecon = {'peer_cidrs': ['10.0.0.0/24'],
'local_ep_group_id': 'local-epg-id',
'peer_ep_group_id': 'peer-epg-id'}
self.assertRaises(vpnaas.InvalidEndpointGroup,
self.validator.validate_ipsec_conn_optional_args,
ipsec_sitecon, subnet)
def test_ipsec_conn_get_local_subnets(self):
subnet1 = _uuid()
subnet2 = _uuid()
expected_subnets = [subnet1, subnet2]
local_epg = {'id': _uuid(),
'type': v_constants.SUBNET_ENDPOINT,
'endpoints': expected_subnets}
query_mock = mock.patch.object(query.Query, 'all').start()
query_mock.return_value = expected_subnets
subnets = self.validator._get_local_subnets(self.context, local_epg)
self.assertEqual(expected_subnets, subnets)
def test_ipsec_conn_get_peer_cidrs(self):
expected_cidrs = ['10.10.10.10/24', '20.20.20.20/24']
peer_epg = {'id': 'should-be-cidrs',
'type': v_constants.CIDR_ENDPOINT,
'endpoints': expected_cidrs}
cidrs = self.validator._get_peer_cidrs(peer_epg)
self.assertEqual(expected_cidrs, cidrs)
def test_fail_ipsec_conn_endpoint_group_types(self):
local_epg = {'id': 'should-be-subnets',
'type': v_constants.CIDR_ENDPOINT,
'endpoints': ['10.10.10.10/24', '20.20.20.20/24']}
self.assertRaises(vpnaas.WrongEndpointGroupType,
self.validator._get_local_subnets,
self.context, local_epg)
peer_epg = {'id': 'should-be-cidrs',
'type': v_constants.SUBNET_ENDPOINT,
'endpoints': [_uuid(), _uuid()]}
self.assertRaises(vpnaas.WrongEndpointGroupType,
self.validator._get_peer_cidrs,
peer_epg)
def test_validate_ipsec_conn_for_endpoints(self):
"""Check upper-level validation method for endpoint groups.
Tests the happy path for doing general validation of the IPSec
connection, calling all the sub-checks for an endpoint group case.
"""
subnet1 = {'id': _uuid(), 'ip_version': 4}
subnet2 = {'id': _uuid(), 'ip_version': 4}
local_subnets = [subnet1, subnet2]
local_epg_id = _uuid()
local_epg = {'id': local_epg_id,
'type': v_constants.SUBNET_ENDPOINT,
'endpoints': local_subnets}
# Mock getting the subnets from the IDs
query_mock = mock.patch.object(query.Query, 'all').start()
query_mock.return_value = local_subnets
# Mock that subnet is on router
port_mock = mock.patch.object(self.core_plugin, "get_ports").start()
port_mock.side_effect = ['dummy info', 'more dummy info']
peer_epg_id = _uuid()
peer_cidrs = ['10.10.10.10/24', '20.20.20.20/24']
peer_epg = {'id': peer_epg_id,
'type': v_constants.CIDR_ENDPOINT,
'endpoints': peer_cidrs}
ipsec_sitecon = {'local_ep_group_id': local_epg_id,
'local_epg_subnets': local_epg,
'peer_ep_group_id': peer_epg_id,
'peer_epg_cidrs': peer_epg,
'mtu': 2000,
'dpd_action': 'hold',
'dpd_interval': 30,
'dpd_timeout': 120}
local_version = None
vpnservice = {'router_id': _uuid()}
self.validator.validate_ipsec_site_connection(
self.context, ipsec_sitecon, local_version, vpnservice)
# NOTE: Following are tests for the older API, providing some additional
# coverage.
def test_ipsec_conn_peer_cidrs_same_ip_version(self):
"""Check legacy peer_cidrs have same IP version."""
one_cidr = ['2002:0a00::/48']
version = self.validator._check_peer_cidrs_ip_versions(one_cidr)
self.assertEqual(6, version)
multiple_cidrs = ['10.10.10.0/24', '20.20.20.0/24']
version = self.validator._check_peer_cidrs_ip_versions(multiple_cidrs)
self.assertEqual(4, version)
def test_fail_ipsec_conn_peer_cidrs_mixed_ip_version(self):
mixed_cidrs = ['2002:0a00::/48', '20.20.20.0/24']
self.assertRaises(vpnaas.MixedIPVersionsForPeerCidrs,
self.validator._check_peer_cidrs_ip_versions,
mixed_cidrs)
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/db/vpn/__init__.py 0000664 0005670 0005671 00000000000 12701407726 026320 0 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/db/vpn/test_vpn_db.py 0000664 0005670 0005671 00000313630 12701407726 027110 0 ustar jenkins jenkins 0000000 0000000 # (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# (c) Copyright 2015 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
import os
import mock
from neutron.api import extensions as api_extensions
from neutron.api.v2 import attributes
from neutron.common import config
from neutron.common import constants as l3_constants
from neutron import context
from neutron.db import agentschedulers_db
from neutron.db import l3_agentschedulers_db
from neutron.db import servicetype_db as sdb
from neutron import extensions as nextensions
from neutron.extensions import l3 as l3_exception
from neutron import manager
from neutron.plugins.common import constants as nconstants
from neutron.scheduler import l3_agent_scheduler
from neutron.tests.unit.db import test_db_base_plugin_v2 as test_db_plugin
from neutron.tests.unit.extensions import test_l3 as test_l3_plugin
from oslo_db import exception as db_exc
from oslo_utils import uuidutils
import six
import webob.exc
from neutron_vpnaas.db.vpn import vpn_db
from neutron_vpnaas.db.vpn import vpn_models
from neutron_vpnaas.services.vpn.common import constants
from neutron_vpnaas.services.vpn import plugin as vpn_plugin
from neutron_vpnaas.tests import base
from neutron_vpnaas import extensions
from neutron_vpnaas.extensions import vpnaas
DB_CORE_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
DB_VPN_PLUGIN_KLASS = "neutron_vpnaas.services.vpn.plugin.VPNPlugin"
ROOTDIR = os.path.normpath(os.path.join(
os.path.dirname(__file__),
'..', '..', '..', '..'))
extensions_path = ':'.join(extensions.__path__ + nextensions.__path__)
_uuid = uuidutils.generate_uuid
class TestVpnCorePlugin(test_l3_plugin.TestL3NatIntPlugin,
l3_agentschedulers_db.L3AgentSchedulerDbMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin):
def __init__(self, configfile=None):
super(TestVpnCorePlugin, self).__init__()
self.router_scheduler = l3_agent_scheduler.ChanceScheduler()
class VPNTestMixin(object):
resource_prefix_map = dict(
(k.replace('_', '-'),
"/vpn")
for k in vpnaas.RESOURCE_ATTRIBUTE_MAP
)
def _create_ikepolicy(self, fmt,
name='ikepolicy1',
auth_algorithm='sha1',
encryption_algorithm='aes-128',
phase1_negotiation_mode='main',
lifetime_units='seconds',
lifetime_value=3600,
ike_version='v1',
pfs='group5',
expected_res_status=None, **kwargs):
data = {'ikepolicy': {
'name': name,
'auth_algorithm': auth_algorithm,
'encryption_algorithm': encryption_algorithm,
'phase1_negotiation_mode': phase1_negotiation_mode,
'lifetime': {
'units': lifetime_units,
'value': lifetime_value},
'ike_version': ike_version,
'pfs': pfs,
'tenant_id': self._tenant_id
}}
if kwargs.get('description') is not None:
data['ikepolicy']['description'] = kwargs['description']
ikepolicy_req = self.new_create_request('ikepolicies', data, fmt)
ikepolicy_res = ikepolicy_req.get_response(self.ext_api)
if expected_res_status:
self.assertEqual(ikepolicy_res.status_int, expected_res_status)
return ikepolicy_res
@contextlib.contextmanager
def ikepolicy(self, fmt=None,
name='ikepolicy1',
auth_algorithm='sha1',
encryption_algorithm='aes-128',
phase1_negotiation_mode='main',
lifetime_units='seconds',
lifetime_value=3600,
ike_version='v1',
pfs='group5',
do_delete=True,
**kwargs):
if not fmt:
fmt = self.fmt
res = self._create_ikepolicy(fmt,
name,
auth_algorithm,
encryption_algorithm,
phase1_negotiation_mode,
lifetime_units,
lifetime_value,
ike_version,
pfs,
**kwargs)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
ikepolicy = self.deserialize(fmt or self.fmt, res)
yield ikepolicy
if do_delete:
self._delete('ikepolicies', ikepolicy['ikepolicy']['id'])
def _create_ipsecpolicy(self, fmt,
name='ipsecpolicy1',
auth_algorithm='sha1',
encryption_algorithm='aes-128',
encapsulation_mode='tunnel',
transform_protocol='esp',
lifetime_units='seconds',
lifetime_value=3600,
pfs='group5',
expected_res_status=None,
**kwargs):
data = {'ipsecpolicy': {'name': name,
'auth_algorithm': auth_algorithm,
'encryption_algorithm': encryption_algorithm,
'encapsulation_mode': encapsulation_mode,
'transform_protocol': transform_protocol,
'lifetime': {'units': lifetime_units,
'value': lifetime_value},
'pfs': pfs,
'tenant_id': self._tenant_id}}
if kwargs.get('description') is not None:
data['ipsecpolicy']['description'] = kwargs['description']
ipsecpolicy_req = self.new_create_request('ipsecpolicies', data, fmt)
ipsecpolicy_res = ipsecpolicy_req.get_response(self.ext_api)
if expected_res_status:
self.assertEqual(ipsecpolicy_res.status_int, expected_res_status)
return ipsecpolicy_res
@contextlib.contextmanager
def ipsecpolicy(self, fmt=None,
name='ipsecpolicy1',
auth_algorithm='sha1',
encryption_algorithm='aes-128',
encapsulation_mode='tunnel',
transform_protocol='esp',
lifetime_units='seconds',
lifetime_value=3600,
pfs='group5',
do_delete=True, **kwargs):
if not fmt:
fmt = self.fmt
res = self._create_ipsecpolicy(fmt,
name,
auth_algorithm,
encryption_algorithm,
encapsulation_mode,
transform_protocol,
lifetime_units,
lifetime_value,
pfs,
**kwargs)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
ipsecpolicy = self.deserialize(fmt or self.fmt, res)
yield ipsecpolicy
if do_delete:
self._delete('ipsecpolicies', ipsecpolicy['ipsecpolicy']['id'])
def _create_vpnservice(self, fmt, name,
admin_state_up,
router_id, subnet_id,
expected_res_status=None, **kwargs):
tenant_id = kwargs.get('tenant_id', self._tenant_id)
data = {'vpnservice': {'name': name,
'subnet_id': subnet_id,
'router_id': router_id,
'admin_state_up': admin_state_up,
'tenant_id': tenant_id}}
if kwargs.get('description') is not None:
data['vpnservice']['description'] = kwargs['description']
vpnservice_req = self.new_create_request('vpnservices', data, fmt)
if (kwargs.get('set_context') and
'tenant_id' in kwargs):
# create a specific auth context for this request
vpnservice_req.environ['neutron.context'] = context.Context(
'', kwargs['tenant_id'])
vpnservice_res = vpnservice_req.get_response(self.ext_api)
if expected_res_status:
self.assertEqual(vpnservice_res.status_int, expected_res_status)
return vpnservice_res
@contextlib.contextmanager
def vpnservice(self, fmt=None, name='vpnservice1',
subnet=None,
router=None,
admin_state_up=True,
do_delete=True,
plug_subnet=True,
external_subnet_cidr='192.168.100.0/24',
external_router=True,
**kwargs):
if not fmt:
fmt = self.fmt
with test_db_plugin.optional_ctx(subnet, self.subnet) as tmp_subnet, \
test_db_plugin.optional_ctx(router,
self.router) as tmp_router, \
self.subnet(cidr=external_subnet_cidr) as public_sub:
if external_router:
self._set_net_external(
public_sub['subnet']['network_id'])
self._add_external_gateway_to_router(
tmp_router['router']['id'],
public_sub['subnet']['network_id'])
tmp_router['router']['external_gateway_info'] = {
'network_id': public_sub['subnet']['network_id']}
if plug_subnet:
self._router_interface_action(
'add',
tmp_router['router']['id'],
tmp_subnet['subnet']['id'], None)
res = self._create_vpnservice(fmt,
name,
admin_state_up,
router_id=(tmp_router['router']
['id']),
subnet_id=(tmp_subnet['subnet']
['id']),
**kwargs)
vpnservice = self.deserialize(fmt or self.fmt, res)
if res.status_int < 400:
yield vpnservice
if do_delete and vpnservice.get('vpnservice'):
self._delete('vpnservices',
vpnservice['vpnservice']['id'])
if plug_subnet:
self._router_interface_action(
'remove',
tmp_router['router']['id'],
tmp_subnet['subnet']['id'], None)
if external_router:
external_gateway = tmp_router['router'].get(
'external_gateway_info')
if external_gateway:
network_id = external_gateway['network_id']
self._remove_external_gateway_from_router(
tmp_router['router']['id'], network_id)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(
code=res.status_int, detail=vpnservice)
self._delete('subnets', public_sub['subnet']['id'])
if not subnet:
self._delete('subnets', tmp_subnet['subnet']['id'])
def _create_ipsec_site_connection(self, fmt, name='test',
peer_address='192.168.1.10',
peer_id='192.168.1.10',
peer_cidrs=None,
mtu=1500,
psk='abcdefg',
initiator='bi-directional',
dpd_action='hold',
dpd_interval=30,
dpd_timeout=120,
vpnservice_id='fake_id',
ikepolicy_id='fake_id',
ipsecpolicy_id='fake_id',
admin_state_up=True,
local_ep_group_id=None,
peer_ep_group_id=None,
expected_res_status=None, **kwargs):
data = {
'ipsec_site_connection': {'name': name,
'peer_address': peer_address,
'peer_id': peer_id,
'peer_cidrs': peer_cidrs,
'mtu': mtu,
'psk': psk,
'initiator': initiator,
'dpd': {
'action': dpd_action,
'interval': dpd_interval,
'timeout': dpd_timeout,
},
'vpnservice_id': vpnservice_id,
'ikepolicy_id': ikepolicy_id,
'ipsecpolicy_id': ipsecpolicy_id,
'admin_state_up': admin_state_up,
'tenant_id': self._tenant_id,
'local_ep_group_id': local_ep_group_id,
'peer_ep_group_id': peer_ep_group_id}
}
if kwargs.get('description') is not None:
data['ipsec_site_connection'][
'description'] = kwargs['description']
ipsec_site_connection_req = self.new_create_request(
'ipsec-site-connections', data, fmt
)
ipsec_site_connection_res = ipsec_site_connection_req.get_response(
self.ext_api
)
if expected_res_status:
self.assertEqual(
ipsec_site_connection_res.status_int, expected_res_status
)
return ipsec_site_connection_res
@contextlib.contextmanager
def ipsec_site_connection(self, fmt=None, name='ipsec_site_connection1',
peer_address='192.168.1.10',
peer_id='192.168.1.10',
peer_cidrs=None,
mtu=1500,
psk='abcdefg',
initiator='bi-directional',
dpd_action='hold',
dpd_interval=30,
dpd_timeout=120,
vpnservice=None,
ikepolicy=None,
ipsecpolicy=None,
admin_state_up=True, do_delete=True,
local_ep_group_id=None,
peer_ep_group_id=None,
**kwargs):
if not fmt:
fmt = self.fmt
with test_db_plugin.optional_ctx(vpnservice, self.vpnservice
) as tmp_vpnservice, \
test_db_plugin.optional_ctx(ikepolicy, self.ikepolicy
) as tmp_ikepolicy, \
test_db_plugin.optional_ctx(ipsecpolicy, self.ipsecpolicy
) as tmp_ipsecpolicy:
vpnservice_id = tmp_vpnservice['vpnservice']['id']
ikepolicy_id = tmp_ikepolicy['ikepolicy']['id']
ipsecpolicy_id = tmp_ipsecpolicy['ipsecpolicy']['id']
if not peer_cidrs and not local_ep_group_id:
# Must be legacy usage - pick default to use
peer_cidrs = ['10.0.0.0/24']
res = self._create_ipsec_site_connection(fmt,
name,
peer_address,
peer_id,
peer_cidrs,
mtu,
psk,
initiator,
dpd_action,
dpd_interval,
dpd_timeout,
vpnservice_id,
ikepolicy_id,
ipsecpolicy_id,
admin_state_up,
local_ep_group_id,
peer_ep_group_id,
**kwargs)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
ipsec_site_connection = self.deserialize(
fmt or self.fmt, res
)
yield ipsec_site_connection
if do_delete:
self._delete(
'ipsec-site-connections',
ipsec_site_connection[
'ipsec_site_connection']['id']
)
def _check_ipsec_site_connection(self, ipsec_site_connection, keys, dpd):
self.assertEqual(
keys,
dict((k, v) for k, v
in ipsec_site_connection.items()
if k in keys))
self.assertEqual(
dpd,
dict((k, v) for k, v
in ipsec_site_connection['dpd'].items()
if k in dpd))
def _set_active(self, model, resource_id):
service_plugin = manager.NeutronManager.get_service_plugins()[
nconstants.VPN]
adminContext = context.get_admin_context()
with adminContext.session.begin(subtransactions=True):
resource_db = service_plugin._get_resource(
adminContext,
model,
resource_id)
resource_db.status = nconstants.ACTIVE
class VPNPluginDbTestCase(VPNTestMixin,
test_l3_plugin.L3NatTestCaseMixin,
base.NeutronDbPluginV2TestCase):
def setUp(self, core_plugin=None, vpnaas_plugin=DB_VPN_PLUGIN_KLASS,
vpnaas_provider=None):
if not vpnaas_provider:
vpnaas_provider = (
nconstants.VPN +
':vpnaas:neutron_vpnaas.services.vpn.'
'service_drivers.ipsec.IPsecVPNDriver:default')
bits = vpnaas_provider.split(':')
vpnaas_provider = {
'service_type': bits[0],
'name': bits[1],
'driver': bits[2]
}
if len(bits) == 4:
vpnaas_provider['default'] = True
# override the default service provider
self.service_providers = (
mock.patch.object(sdb.ServiceTypeManager,
'get_service_providers').start())
self.service_providers.return_value = [vpnaas_provider]
# force service type manager to reload configuration:
sdb.ServiceTypeManager._instance = None
service_plugins = {'vpnaas_plugin': vpnaas_plugin}
plugin_str = ('neutron_vpnaas.tests.unit.db.vpn.'
'test_vpn_db.TestVpnCorePlugin')
super(VPNPluginDbTestCase, self).setUp(
plugin_str,
service_plugins=service_plugins
)
self._subnet_id = _uuid()
self.core_plugin = TestVpnCorePlugin()
self.plugin = vpn_plugin.VPNPlugin()
ext_mgr = api_extensions.PluginAwareExtensionManager(
extensions_path,
{nconstants.CORE: self.core_plugin,
nconstants.VPN: self.plugin}
)
app = config.load_paste_app('extensions_test_app')
self.ext_api = api_extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr)
class TestVpnaas(VPNPluginDbTestCase):
def setUp(self, **kwargs):
# TODO(armax): this is far from being a unit test case, as it tests
# that multiple parties (core + vpn) are integrated properly and
# should be replaced by API test that do not rely on so much mocking.
# NOTE(armax): make sure that the callbacks needed by this test are
# registered, as they may get wiped out depending by the order in
# which imports, subscriptions and mocks occur.
super(TestVpnaas, self).setUp(**kwargs)
vpn_db.subscribe()
def _check_policy(self, policy, keys, lifetime):
for k, v in keys:
self.assertEqual(policy[k], v)
for k, v in six.iteritems(lifetime):
self.assertEqual(policy['lifetime'][k], v)
def test_create_ikepolicy(self):
"""Test case to create an ikepolicy."""
name = "ikepolicy1"
description = 'ipsec-ikepolicy'
keys = [('name', name),
('description', 'ipsec-ikepolicy'),
('auth_algorithm', 'sha1'),
('encryption_algorithm', 'aes-128'),
('phase1_negotiation_mode', 'main'),
('ike_version', 'v1'),
('pfs', 'group5'),
('tenant_id', self._tenant_id)]
lifetime = {
'units': 'seconds',
'value': 3600}
with self.ikepolicy(name=name, description=description) as ikepolicy:
self._check_policy(ikepolicy['ikepolicy'], keys, lifetime)
def test_delete_ikepolicy(self):
"""Test case to delete an ikepolicy."""
with self.ikepolicy(do_delete=False) as ikepolicy:
req = self.new_delete_request('ikepolicies',
ikepolicy['ikepolicy']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 204)
def test_show_ikepolicy(self):
"""Test case to show or get an ikepolicy."""
name = "ikepolicy1"
description = 'ipsec-ikepolicy'
keys = [('name', name),
('auth_algorithm', 'sha1'),
('encryption_algorithm', 'aes-128'),
('phase1_negotiation_mode', 'main'),
('ike_version', 'v1'),
('pfs', 'group5'),
('tenant_id', self._tenant_id)]
lifetime = {
'units': 'seconds',
'value': 3600}
with self.ikepolicy(name=name, description=description) as ikepolicy:
req = self.new_show_request('ikepolicies',
ikepolicy['ikepolicy']['id'],
fmt=self.fmt)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self._check_policy(res['ikepolicy'], keys, lifetime)
def test_list_ikepolicies(self):
"""Test case to list all ikepolicies."""
name = "ikepolicy_list"
keys = [('name', name),
('auth_algorithm', 'sha1'),
('encryption_algorithm', 'aes-128'),
('phase1_negotiation_mode', 'main'),
('ike_version', 'v1'),
('pfs', 'group5'),
('tenant_id', self._tenant_id)]
lifetime = {
'units': 'seconds',
'value': 3600}
with self.ikepolicy(name=name) as ikepolicy:
keys.append(('id', ikepolicy['ikepolicy']['id']))
req = self.new_list_request('ikepolicies')
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual(len(res), 1)
for k, v in keys:
self.assertEqual(res['ikepolicies'][0][k], v)
for k, v in six.iteritems(lifetime):
self.assertEqual(res['ikepolicies'][0]['lifetime'][k], v)
def test_list_ikepolicies_with_sort_emulated(self):
"""Test case to list all ikepolicies."""
with self.ikepolicy(name='ikepolicy1') as ikepolicy1, \
self.ikepolicy(name='ikepolicy2') as ikepolicy2, \
self.ikepolicy(name='ikepolicy3') as ikepolicy3:
self._test_list_with_sort('ikepolicy', (ikepolicy3,
ikepolicy2,
ikepolicy1),
[('name', 'desc')],
'ikepolicies')
def test_list_ikepolicies_with_pagination_emulated(self):
"""Test case to list all ikepolicies with pagination."""
with self.ikepolicy(name='ikepolicy1') as ikepolicy1, \
self.ikepolicy(name='ikepolicy2') as ikepolicy2, \
self.ikepolicy(name='ikepolicy3') as ikepolicy3:
self._test_list_with_pagination('ikepolicy',
(ikepolicy1,
ikepolicy2,
ikepolicy3),
('name', 'asc'), 2, 2,
'ikepolicies')
def test_list_ikepolicies_with_pagination_reverse_emulated(self):
"""Test case to list all ikepolicies with reverse pagination."""
with self.ikepolicy(name='ikepolicy1') as ikepolicy1, \
self.ikepolicy(name='ikepolicy2') as ikepolicy2, \
self.ikepolicy(name='ikepolicy3') as ikepolicy3:
self._test_list_with_pagination_reverse('ikepolicy',
(ikepolicy1,
ikepolicy2,
ikepolicy3),
('name', 'asc'), 2, 2,
'ikepolicies')
def test_update_ikepolicy(self):
"""Test case to update an ikepolicy."""
name = "new_ikepolicy1"
keys = [('name', name),
('auth_algorithm', 'sha1'),
('encryption_algorithm', 'aes-128'),
('phase1_negotiation_mode', 'main'),
('ike_version', 'v1'),
('pfs', 'group5'),
('tenant_id', self._tenant_id),
('lifetime', {'units': 'seconds',
'value': 60})]
with self.ikepolicy(name=name) as ikepolicy:
data = {'ikepolicy': {'name': name,
'lifetime': {'units': 'seconds',
'value': 60}}}
req = self.new_update_request("ikepolicies",
data,
ikepolicy['ikepolicy']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
for k, v in keys:
self.assertEqual(res['ikepolicy'][k], v)
def test_create_ikepolicy_with_invalid_values(self):
"""Test case to test invalid values."""
name = 'ikepolicy1'
self._create_ikepolicy(name=name,
fmt=self.fmt,
auth_algorithm='md5',
expected_res_status=400)
self._create_ikepolicy(name=name,
fmt=self.fmt,
auth_algorithm=200,
expected_res_status=400)
self._create_ikepolicy(name=name,
fmt=self.fmt,
encryption_algorithm='des',
expected_res_status=400)
self._create_ikepolicy(name=name,
fmt=self.fmt,
encryption_algorithm=100,
expected_res_status=400)
self._create_ikepolicy(name=name,
fmt=self.fmt,
phase1_negotiation_mode='aggressive',
expected_res_status=400)
self._create_ikepolicy(name=name,
fmt=self.fmt,
phase1_negotiation_mode=-100,
expected_res_status=400)
self._create_ikepolicy(name=name,
fmt=self.fmt,
ike_version='v6',
expected_res_status=400)
self._create_ikepolicy(name=name,
fmt=self.fmt,
ike_version=500,
expected_res_status=400)
self._create_ikepolicy(name=name,
fmt=self.fmt,
pfs='group1',
expected_res_status=400)
self._create_ikepolicy(name=name,
fmt=self.fmt,
pfs=120,
expected_res_status=400)
self._create_ikepolicy(name=name,
fmt=self.fmt,
lifetime_units='Megabytes',
expected_res_status=400)
self._create_ikepolicy(name=name,
fmt=self.fmt,
lifetime_units=20000,
expected_res_status=400)
self._create_ikepolicy(name=name,
fmt=self.fmt,
lifetime_value=-20,
expected_res_status=400)
self._create_ikepolicy(name=name,
fmt=self.fmt,
lifetime_value='Megabytes',
expected_res_status=400)
def test_create_ipsecpolicy(self):
"""Test case to create an ipsecpolicy."""
name = "ipsecpolicy1"
description = 'my-ipsecpolicy'
keys = [('name', name),
('description', 'my-ipsecpolicy'),
('auth_algorithm', 'sha1'),
('encryption_algorithm', 'aes-128'),
('encapsulation_mode', 'tunnel'),
('transform_protocol', 'esp'),
('pfs', 'group5'),
('tenant_id', self._tenant_id)]
lifetime = {
'units': 'seconds',
'value': 3600}
with self.ipsecpolicy(name=name,
description=description) as ipsecpolicy:
self._check_policy(ipsecpolicy['ipsecpolicy'], keys, lifetime)
def test_delete_ipsecpolicy(self):
"""Test case to delete an ipsecpolicy."""
with self.ipsecpolicy(do_delete=False) as ipsecpolicy:
req = self.new_delete_request('ipsecpolicies',
ipsecpolicy['ipsecpolicy']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 204)
def test_show_ipsecpolicy(self):
"""Test case to show or get an ipsecpolicy."""
name = "ipsecpolicy1"
keys = [('name', name),
('auth_algorithm', 'sha1'),
('encryption_algorithm', 'aes-128'),
('encapsulation_mode', 'tunnel'),
('transform_protocol', 'esp'),
('pfs', 'group5'),
('tenant_id', self._tenant_id)]
lifetime = {
'units': 'seconds',
'value': 3600}
with self.ipsecpolicy(name=name) as ipsecpolicy:
req = self.new_show_request('ipsecpolicies',
ipsecpolicy['ipsecpolicy']['id'],
fmt=self.fmt)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self._check_policy(res['ipsecpolicy'], keys, lifetime)
def test_list_ipsecpolicies(self):
"""Test case to list all ipsecpolicies."""
name = "ipsecpolicy_list"
keys = [('name', name),
('auth_algorithm', 'sha1'),
('encryption_algorithm', 'aes-128'),
('encapsulation_mode', 'tunnel'),
('transform_protocol', 'esp'),
('pfs', 'group5'),
('tenant_id', self._tenant_id)]
lifetime = {
'units': 'seconds',
'value': 3600}
with self.ipsecpolicy(name=name) as ipsecpolicy:
keys.append(('id', ipsecpolicy['ipsecpolicy']['id']))
req = self.new_list_request('ipsecpolicies')
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual(len(res), 1)
self._check_policy(res['ipsecpolicies'][0], keys, lifetime)
def test_list_ipsecpolicies_with_sort_emulated(self):
"""Test case to list all ipsecpolicies."""
with self.ipsecpolicy(name='ipsecpolicy1') as ipsecpolicy1, \
self.ipsecpolicy(name='ipsecpolicy2') as ipsecpolicy2, \
self.ipsecpolicy(name='ipsecpolicy3') as ipsecpolicy3:
self._test_list_with_sort('ipsecpolicy', (ipsecpolicy3,
ipsecpolicy2,
ipsecpolicy1),
[('name', 'desc')],
'ipsecpolicies')
def test_list_ipsecpolicies_with_pagination_emulated(self):
"""Test case to list all ipsecpolicies with pagination."""
with self.ipsecpolicy(name='ipsecpolicy1') as ipsecpolicy1, \
self.ipsecpolicy(name='ipsecpolicy2') as ipsecpolicy2, \
self.ipsecpolicy(name='ipsecpolicy3') as ipsecpolicy3:
self._test_list_with_pagination('ipsecpolicy',
(ipsecpolicy1,
ipsecpolicy2,
ipsecpolicy3),
('name', 'asc'), 2, 2,
'ipsecpolicies')
def test_list_ipsecpolicies_with_pagination_reverse_emulated(self):
"""Test case to list all ipsecpolicies with reverse pagination."""
with self.ipsecpolicy(name='ipsecpolicy1') as ipsecpolicy1, \
self.ipsecpolicy(name='ipsecpolicy2') as ipsecpolicy2, \
self.ipsecpolicy(name='ipsecpolicy3') as ipsecpolicy3:
self._test_list_with_pagination_reverse('ipsecpolicy',
(ipsecpolicy1,
ipsecpolicy2,
ipsecpolicy3),
('name', 'asc'), 2, 2,
'ipsecpolicies')
def test_update_ipsecpolicy(self):
"""Test case to update an ipsecpolicy."""
name = "new_ipsecpolicy1"
keys = [('name', name),
('auth_algorithm', 'sha1'),
('encryption_algorithm', 'aes-128'),
('encapsulation_mode', 'tunnel'),
('transform_protocol', 'esp'),
('pfs', 'group5'),
('tenant_id', self._tenant_id),
('lifetime', {'units': 'seconds',
'value': 60})]
with self.ipsecpolicy(name=name) as ipsecpolicy:
data = {'ipsecpolicy': {'name': name,
'lifetime': {'units': 'seconds',
'value': 60}}}
req = self.new_update_request("ipsecpolicies",
data,
ipsecpolicy['ipsecpolicy']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
for k, v in keys:
self.assertEqual(res['ipsecpolicy'][k], v)
def test_update_ipsecpolicy_lifetime(self):
with self.ipsecpolicy() as ipsecpolicy:
data = {'ipsecpolicy': {'lifetime': {'units': 'seconds'}}}
req = self.new_update_request("ipsecpolicies",
data,
ipsecpolicy['ipsecpolicy']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual(res['ipsecpolicy']['lifetime']['units'],
'seconds')
data = {'ipsecpolicy': {'lifetime': {'value': 60}}}
req = self.new_update_request("ipsecpolicies",
data,
ipsecpolicy['ipsecpolicy']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual(res['ipsecpolicy']['lifetime']['value'], 60)
def test_create_ipsecpolicy_with_invalid_values(self):
"""Test case to test invalid values."""
name = 'ipsecpolicy1'
self._create_ipsecpolicy(
fmt=self.fmt,
name=name, auth_algorithm='md5', expected_res_status=400)
self._create_ipsecpolicy(
fmt=self.fmt,
name=name, auth_algorithm=100, expected_res_status=400)
self._create_ipsecpolicy(
fmt=self.fmt,
name=name, encryption_algorithm='des', expected_res_status=400)
self._create_ipsecpolicy(
fmt=self.fmt,
name=name, encryption_algorithm=200, expected_res_status=400)
self._create_ipsecpolicy(
fmt=self.fmt,
name=name, transform_protocol='abcd', expected_res_status=400)
self._create_ipsecpolicy(
fmt=self.fmt,
name=name, transform_protocol=500, expected_res_status=400)
self._create_ipsecpolicy(
fmt=self.fmt,
name=name,
encapsulation_mode='unsupported', expected_res_status=400)
self._create_ipsecpolicy(name=name,
fmt=self.fmt,
encapsulation_mode=100,
expected_res_status=400)
self._create_ipsecpolicy(name=name,
fmt=self.fmt,
pfs='group9', expected_res_status=400)
self._create_ipsecpolicy(
fmt=self.fmt, name=name, pfs=-1, expected_res_status=400)
self._create_ipsecpolicy(
fmt=self.fmt, name=name, lifetime_units='minutes',
expected_res_status=400)
self._create_ipsecpolicy(fmt=self.fmt, name=name, lifetime_units=100,
expected_res_status=400)
self._create_ipsecpolicy(fmt=self.fmt, name=name,
lifetime_value=-800, expected_res_status=400)
self._create_ipsecpolicy(fmt=self.fmt, name=name,
lifetime_value='Megabytes',
expected_res_status=400)
def test_create_vpnservice(self, **extras):
"""Test case to create a vpnservice."""
description = 'my-vpn-service'
expected = {'name': 'vpnservice1',
'description': 'my-vpn-service',
'admin_state_up': True,
'status': 'PENDING_CREATE',
'tenant_id': self._tenant_id, }
expected.update(extras)
with self.subnet(cidr='10.2.0.0/24') as subnet:
with self.router() as router:
expected['router_id'] = router['router']['id']
expected['subnet_id'] = subnet['subnet']['id']
name = expected['name']
with self.vpnservice(name=name,
subnet=subnet,
router=router,
description=description,
**extras) as vpnservice:
self.assertEqual(dict((k, v) for k, v in
vpnservice['vpnservice'].items()
if k in expected),
expected)
def test_delete_router_interface_in_use_by_vpnservice(self):
"""Test delete router interface in use by vpn service."""
with self.subnet(cidr='10.2.0.0/24') as subnet:
with self.router() as router:
with self.vpnservice(subnet=subnet,
router=router):
self._router_interface_action('remove',
router['router']['id'],
subnet['subnet']['id'],
None,
expected_code=webob.exc.
HTTPConflict.code)
def test_delete_external_gateway_interface_in_use_by_vpnservice(self):
"""Test delete external gateway interface in use by vpn service."""
with self.subnet(cidr='10.2.0.0/24') as subnet:
with self.router() as router:
with self.subnet(cidr='11.0.0.0/24') as public_sub:
self._set_net_external(
public_sub['subnet']['network_id'])
self._add_external_gateway_to_router(
router['router']['id'],
public_sub['subnet']['network_id'])
with self.vpnservice(subnet=subnet,
router=router):
self._remove_external_gateway_from_router(
router['router']['id'],
public_sub['subnet']['network_id'],
expected_code=webob.exc.HTTPConflict.code)
def test_router_update_after_ipsec_site_connection(self):
"""Test case to update router after vpn connection."""
rname1 = "router_one"
rname2 = "router_two"
with self.subnet(cidr='10.2.0.0/24') as subnet:
with self.router(name=rname1) as r:
with self.vpnservice(subnet=subnet,
router=r
) as vpnservice:
self.ipsec_site_connection(
name='connection1', vpnservice=vpnservice
)
body = self._show('routers', r['router']['id'])
self.assertEqual(body['router']['name'], rname1)
body = self._update('routers', r['router']['id'],
{'router': {'name': rname2}})
body = self._show('routers', r['router']['id'])
self.assertEqual(body['router']['name'], rname2)
def test_update_vpnservice(self):
"""Test case to update a vpnservice."""
name = 'new_vpnservice1'
keys = [('name', name)]
with self.subnet(cidr='10.2.0.0/24') as subnet, \
self.router() as router:
with self.vpnservice(name=name,
subnet=subnet,
router=router) as vpnservice:
keys.append(('subnet_id',
vpnservice['vpnservice']['subnet_id']))
keys.append(('router_id',
vpnservice['vpnservice']['router_id']))
data = {'vpnservice': {'name': name}}
self._set_active(vpn_models.VPNService,
vpnservice['vpnservice']['id'])
req = self.new_update_request(
'vpnservices',
data,
vpnservice['vpnservice']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.ext_api))
for k, v in keys:
self.assertEqual(res['vpnservice'][k], v)
def test_update_vpnservice_with_invalid_state(self):
"""Test case to update a vpnservice in invalid state ."""
name = 'new_vpnservice1'
keys = [('name', name)]
with self.subnet(cidr='10.2.0.0/24') as subnet, \
self.router() as router:
with self.vpnservice(name=name,
subnet=subnet,
router=router) as vpnservice:
keys.append(('subnet_id',
vpnservice['vpnservice']['subnet_id']))
keys.append(('router_id',
vpnservice['vpnservice']['router_id']))
data = {'vpnservice': {'name': name}}
req = self.new_update_request(
'vpnservices',
data,
vpnservice['vpnservice']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(400, res.status_int)
res = self.deserialize(self.fmt, res)
self.assertIn(vpnservice['vpnservice']['id'],
res['NeutronError']['message'])
def test_delete_vpnservice(self):
"""Test case to delete a vpnservice."""
with self.vpnservice(name='vpnserver',
do_delete=False) as vpnservice:
req = self.new_delete_request('vpnservices',
vpnservice['vpnservice']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 204)
def test_show_vpnservice(self):
"""Test case to show or get a vpnservice."""
name = "vpnservice1"
keys = [('name', name),
('description', ''),
('admin_state_up', True),
('status', 'PENDING_CREATE')]
with self.vpnservice(name=name) as vpnservice:
req = self.new_show_request('vpnservices',
vpnservice['vpnservice']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
for k, v in keys:
self.assertEqual(res['vpnservice'][k], v)
def test_list_vpnservices(self):
"""Test case to list all vpnservices."""
name = "vpnservice_list"
keys = [('name', name),
('description', ''),
('admin_state_up', True),
('status', 'PENDING_CREATE')]
with self.vpnservice(name=name) as vpnservice:
keys.append(('subnet_id', vpnservice['vpnservice']['subnet_id']))
keys.append(('router_id', vpnservice['vpnservice']['router_id']))
req = self.new_list_request('vpnservices')
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual(len(res), 1)
for k, v in keys:
self.assertEqual(res['vpnservices'][0][k], v)
def test_list_vpnservices_with_sort_emulated(self):
"""Test case to list all vpnservices with sorting."""
with self.subnet() as subnet:
with self.router() as router:
with self.vpnservice(name='vpnservice1',
subnet=subnet,
router=router,
external_subnet_cidr='192.168.10.0/24'
) as vpnservice1, \
self.vpnservice(name='vpnservice2',
subnet=subnet,
router=router,
plug_subnet=False,
external_router=False,
external_subnet_cidr='192.168.11.0/24'
) as vpnservice2, \
self.vpnservice(name='vpnservice3',
subnet=subnet,
router=router,
plug_subnet=False,
external_router=False,
external_subnet_cidr='192.168.13.0/24'
) as vpnservice3:
self._test_list_with_sort('vpnservice', (vpnservice3,
vpnservice2,
vpnservice1),
[('name', 'desc')])
def test_list_vpnservice_with_pagination_emulated(self):
"""Test case to list all vpnservices with pagination."""
with self.subnet() as subnet:
with self.router() as router:
with self.vpnservice(name='vpnservice1',
subnet=subnet,
router=router,
external_subnet_cidr='192.168.10.0/24'
) as vpnservice1, \
self.vpnservice(name='vpnservice2',
subnet=subnet,
router=router,
plug_subnet=False,
external_subnet_cidr='192.168.20.0/24',
external_router=False
) as vpnservice2, \
self.vpnservice(name='vpnservice3',
subnet=subnet,
router=router,
plug_subnet=False,
external_subnet_cidr='192.168.30.0/24',
external_router=False
) as vpnservice3:
self._test_list_with_pagination('vpnservice',
(vpnservice1,
vpnservice2,
vpnservice3),
('name', 'asc'), 2, 2)
def test_list_vpnservice_with_pagination_reverse_emulated(self):
"""Test case to list all vpnservices with reverse pagination."""
with self.subnet() as subnet:
with self.router() as router:
with self.vpnservice(name='vpnservice1',
subnet=subnet,
router=router,
external_subnet_cidr='192.168.10.0/24'
) as vpnservice1, \
self.vpnservice(name='vpnservice2',
subnet=subnet,
router=router,
plug_subnet=False,
external_subnet_cidr='192.168.11.0/24',
external_router=False
) as vpnservice2, \
self.vpnservice(name='vpnservice3',
subnet=subnet,
router=router,
plug_subnet=False,
external_subnet_cidr='192.168.12.0/24',
external_router=False
) as vpnservice3:
self._test_list_with_pagination_reverse('vpnservice',
(vpnservice1,
vpnservice2,
vpnservice3),
('name', 'asc'),
2, 2)
def test_create_ipsec_site_connection_with_invalid_values(self):
"""Test case to create an ipsec_site_connection with invalid values."""
name = 'connection1'
self._create_ipsec_site_connection(
fmt=self.fmt,
name=name, peer_cidrs='myname', expected_status_int=400)
self._create_ipsec_site_connection(
fmt=self.fmt,
name=name, mtu=-100, expected_status_int=400)
self._create_ipsec_site_connection(
fmt=self.fmt,
name=name, dpd_action='unsupported', expected_status_int=400)
self._create_ipsec_site_connection(
fmt=self.fmt,
name=name, dpd_interval=-1, expected_status_int=400)
self._create_ipsec_site_connection(
fmt=self.fmt,
name=name, dpd_timeout=-200, expected_status_int=400)
self._create_ipsec_site_connection(
fmt=self.fmt,
name=name, initiator='unsupported', expected_status_int=400)
def _test_create_ipsec_site_connection(self, key_overrides=None,
setup_overrides=None,
expected_status_int=200):
"""Create ipsec_site_connection and check results."""
params = {'ikename': 'ikepolicy1',
'ipsecname': 'ipsecpolicy1',
'vpnsname': 'vpnservice1',
'subnet_cidr': '10.2.0.0/24',
'subnet_version': 4}
if setup_overrides is not None:
params.update(setup_overrides)
keys = {'name': 'connection1',
'description': 'my-ipsec-connection',
'peer_address': '192.168.1.10',
'peer_id': '192.168.1.10',
'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'],
'initiator': 'bi-directional',
'mtu': 1500,
'tenant_id': self._tenant_id,
'psk': 'abcd',
'status': 'PENDING_CREATE',
'admin_state_up': True}
if key_overrides is not None:
keys.update(key_overrides)
dpd = {'action': 'hold',
'interval': 40,
'timeout': 120}
with self.ikepolicy(name=params['ikename']) as ikepolicy, \
self.ipsecpolicy(name=params['ipsecname']) as ipsecpolicy, \
self.subnet(cidr=params['subnet_cidr'],
ip_version=params['subnet_version']) as subnet, \
self.router() as router:
with self.vpnservice(name=params['vpnsname'], subnet=subnet,
router=router) as vpnservice1:
keys['ikepolicy_id'] = ikepolicy['ikepolicy']['id']
keys['ipsecpolicy_id'] = ipsecpolicy['ipsecpolicy']['id']
keys['vpnservice_id'] = vpnservice1['vpnservice']['id']
try:
with self.ipsec_site_connection(
self.fmt,
keys['name'],
keys['peer_address'],
keys['peer_id'],
keys['peer_cidrs'],
keys['mtu'],
keys['psk'],
keys['initiator'],
dpd['action'],
dpd['interval'],
dpd['timeout'],
vpnservice1,
ikepolicy,
ipsecpolicy,
keys['admin_state_up'],
description=keys['description']
) as ipsec_site_connection:
if expected_status_int != 200:
self.fail("Expected failure on create")
self._check_ipsec_site_connection(
ipsec_site_connection['ipsec_site_connection'],
keys,
dpd)
except webob.exc.HTTPClientError as ce:
self.assertEqual(ce.code, expected_status_int)
self._delete('subnets', subnet['subnet']['id'])
def test_create_ipsec_site_connection(self, **extras):
"""Test case to create an ipsec_site_connection."""
self._test_create_ipsec_site_connection(key_overrides=extras)
def test_delete_ipsec_site_connection(self):
"""Test case to delete a ipsec_site_connection."""
with self.ipsec_site_connection(
do_delete=False) as ipsec_site_connection:
req = self.new_delete_request(
'ipsec-site-connections',
ipsec_site_connection['ipsec_site_connection']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 204)
def test_update_ipsec_site_connection(self):
"""Test case for valid updates to IPSec site connection."""
dpd = {'action': 'hold',
'interval': 40,
'timeout': 120}
self._test_update_ipsec_site_connection(update={'dpd': dpd})
self._test_update_ipsec_site_connection(update={'mtu': 2000})
ipv6_settings = {
'peer_address': 'fe80::c0a8:10a',
'peer_id': 'fe80::c0a8:10a',
'peer_cidrs': ['fe80::c0a8:200/120', 'fe80::c0a8:300/120'],
'subnet_cidr': 'fe80::a02:0/120',
'subnet_version': 6}
self._test_update_ipsec_site_connection(update={'mtu': 2000},
overrides=ipv6_settings)
def test_update_ipsec_site_connection_with_invalid_state(self):
"""Test updating an ipsec_site_connection in invalid state."""
self._test_update_ipsec_site_connection(
overrides={'make_active': False},
expected_status_int=400)
def test_update_ipsec_site_connection_peer_cidrs(self):
"""Test updating an ipsec_site_connection for peer_cidrs."""
new_peers = {'peer_cidrs': ['192.168.4.0/24',
'192.168.5.0/24']}
self._test_update_ipsec_site_connection(
update=new_peers)
def _test_update_ipsec_site_connection(self,
update={'name': 'new name'},
overrides=None,
expected_status_int=200):
"""Creates and then updates ipsec_site_connection."""
keys = {'name': 'new_ipsec_site_connection',
'ikename': 'ikepolicy1',
'ipsecname': 'ipsecpolicy1',
'vpnsname': 'vpnservice1',
'description': 'my-ipsec-connection',
'peer_address': '192.168.1.10',
'peer_id': '192.168.1.10',
'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'],
'initiator': 'bi-directional',
'mtu': 1500,
'tenant_id': self._tenant_id,
'psk': 'abcd',
'status': 'ACTIVE',
'admin_state_up': True,
'action': 'hold',
'interval': 40,
'timeout': 120,
'subnet_cidr': '10.2.0.0/24',
'subnet_version': 4,
'make_active': True}
if overrides is not None:
keys.update(overrides)
with self.ikepolicy(name=keys['ikename']) as ikepolicy, \
self.ipsecpolicy(name=keys['ipsecname']) as ipsecpolicy, \
self.subnet(cidr=keys['subnet_cidr'],
ip_version=keys['subnet_version']) as subnet, \
self.router() as router:
with self.vpnservice(name=keys['vpnsname'], subnet=subnet,
router=router) as vpnservice1:
ext_gw = router['router']['external_gateway_info']
if ext_gw:
self._create_subnet(self.fmt,
net_id=ext_gw['network_id'],
ip_version=6, cidr='2001:db8::/32')
keys['vpnservice_id'] = vpnservice1['vpnservice']['id']
keys['ikepolicy_id'] = ikepolicy['ikepolicy']['id']
keys['ipsecpolicy_id'] = ipsecpolicy['ipsecpolicy']['id']
with self.ipsec_site_connection(
self.fmt,
keys['name'],
keys['peer_address'],
keys['peer_id'],
keys['peer_cidrs'],
keys['mtu'],
keys['psk'],
keys['initiator'],
keys['action'],
keys['interval'],
keys['timeout'],
vpnservice1,
ikepolicy,
ipsecpolicy,
keys['admin_state_up'],
description=keys['description']
) as ipsec_site_connection:
data = {'ipsec_site_connection': update}
if keys.get('make_active', None):
self._set_active(
vpn_models.IPsecSiteConnection,
(ipsec_site_connection['ipsec_site_connection']
['id']))
req = self.new_update_request(
'ipsec-site-connections',
data,
ipsec_site_connection['ipsec_site_connection']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(expected_status_int, res.status_int)
if expected_status_int == 200:
res_dict = self.deserialize(self.fmt, res)
actual = res_dict['ipsec_site_connection']
for k, v in update.items():
# Sort lists before checking equality
if isinstance(actual[k], list):
self.assertEqual(v, sorted(actual[k]))
else:
self.assertEqual(v, actual[k])
self._delete('networks', subnet['subnet']['network_id'])
def test_show_ipsec_site_connection(self):
"""Test case to show a ipsec_site_connection."""
ikename = "ikepolicy1"
ipsecname = "ipsecpolicy1"
vpnsname = "vpnservice1"
name = "connection1"
description = "my-ipsec-connection"
keys = {'name': name,
'description': "my-ipsec-connection",
'peer_address': '192.168.1.10',
'peer_id': '192.168.1.10',
'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'],
'initiator': 'bi-directional',
'mtu': 1500,
'tenant_id': self._tenant_id,
'psk': 'abcd',
'status': 'PENDING_CREATE',
'admin_state_up': True}
dpd = {'action': 'hold',
'interval': 40,
'timeout': 120}
with self.ikepolicy(name=ikename) as ikepolicy, \
self.ipsecpolicy(name=ipsecname) as ipsecpolicy, \
self.subnet() as subnet, \
self.router() as router:
with self.vpnservice(name=vpnsname, subnet=subnet,
router=router) as vpnservice1:
keys['ikepolicy_id'] = ikepolicy['ikepolicy']['id']
keys['ipsecpolicy_id'] = ipsecpolicy['ipsecpolicy']['id']
keys['vpnservice_id'] = vpnservice1['vpnservice']['id']
with self.ipsec_site_connection(
self.fmt,
name,
keys['peer_address'],
keys['peer_id'],
keys['peer_cidrs'],
keys['mtu'],
keys['psk'],
keys['initiator'],
dpd['action'],
dpd['interval'],
dpd['timeout'],
vpnservice1,
ikepolicy,
ipsecpolicy,
keys['admin_state_up'],
description=description,
) as ipsec_site_connection:
req = self.new_show_request(
'ipsec-site-connections',
ipsec_site_connection[
'ipsec_site_connection']['id'],
fmt=self.fmt
)
res = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
self._check_ipsec_site_connection(
res['ipsec_site_connection'],
keys,
dpd)
def test_list_ipsec_site_connections_with_sort_emulated(self):
"""Test case to list all ipsec_site_connections with sort."""
with self.subnet(cidr='10.2.0.0/24') as subnet:
with self.router() as router:
with self.vpnservice(subnet=subnet,
router=router
) as vpnservice:
with self.ipsec_site_connection(name='connection1',
vpnservice=vpnservice
) as conn1, \
self.ipsec_site_connection(name='connection2',
vpnservice=vpnservice
) as conn2, \
self.ipsec_site_connection(name='connection3',
vpnservice=vpnservice
) as conn3:
self._test_list_with_sort('ipsec-site-connection',
(conn3, conn2, conn1),
[('name', 'desc')])
def test_list_ipsec_site_connections_with_pagination_emulated(self):
"""Test case to list all ipsec_site_connections with pagination."""
with self.subnet(cidr='10.2.0.0/24') as subnet:
with self.router() as router:
with self.vpnservice(subnet=subnet,
router=router
) as vpnservice:
with self.ipsec_site_connection(
name='ipsec_site_connection1',
vpnservice=vpnservice) as conn1, \
self.ipsec_site_connection(
name='ipsec_site_connection1',
vpnservice=vpnservice) as conn2, \
self.ipsec_site_connection(
name='ipsec_site_connection1',
vpnservice=vpnservice) as conn3:
self._test_list_with_pagination(
'ipsec-site-connection',
(conn1, conn2, conn3),
('name', 'asc'), 2, 2)
def test_list_ipsec_site_conns_with_pagination_reverse_emulated(self):
"""Test to list all ipsec_site_connections with reverse pagination."""
with self.subnet(cidr='10.2.0.0/24') as subnet:
with self.router() as router:
with self.vpnservice(subnet=subnet,
router=router
) as vpnservice:
with self.ipsec_site_connection(name='connection1',
vpnservice=vpnservice
) as conn1, \
self.ipsec_site_connection(name='connection2',
vpnservice=vpnservice
) as conn2, \
self.ipsec_site_connection(name='connection3',
vpnservice=vpnservice
) as conn3:
self._test_list_with_pagination_reverse(
'ipsec-site-connection',
(conn1, conn2, conn3),
('name', 'asc'), 2, 2
)
def test_create_vpn(self):
"""Test case to create a vpn."""
vpns_name = "vpnservice1"
ike_name = "ikepolicy1"
ipsec_name = "ipsecpolicy1"
name1 = "ipsec_site_connection1"
with self.ikepolicy(name=ike_name) as ikepolicy, \
self.ipsecpolicy(name=ipsec_name) as ipsecpolicy, \
self.vpnservice(name=vpns_name) as vpnservice:
vpnservice_id = vpnservice['vpnservice']['id']
ikepolicy_id = ikepolicy['ikepolicy']['id']
ipsecpolicy_id = ipsecpolicy['ipsecpolicy']['id']
with self.ipsec_site_connection(
self.fmt,
name1,
'192.168.1.10',
'192.168.1.10',
['192.168.2.0/24',
'192.168.3.0/24'],
1500,
'abcdef',
'bi-directional',
'hold',
30,
120,
vpnservice,
ikepolicy,
ipsecpolicy,
True
) as vpnconn1:
vpnservice_req = self.new_show_request(
'vpnservices',
vpnservice_id,
fmt=self.fmt)
vpnservice_updated = self.deserialize(
self.fmt,
vpnservice_req.get_response(self.ext_api)
)
self.assertEqual(
vpnservice_updated['vpnservice']['id'],
vpnconn1['ipsec_site_connection']['vpnservice_id']
)
ikepolicy_req = self.new_show_request('ikepolicies',
ikepolicy_id,
fmt=self.fmt)
ikepolicy_res = self.deserialize(
self.fmt,
ikepolicy_req.get_response(self.ext_api)
)
self.assertEqual(
ikepolicy_res['ikepolicy']['id'],
vpnconn1['ipsec_site_connection']['ikepolicy_id'])
ipsecpolicy_req = self.new_show_request(
'ipsecpolicies',
ipsecpolicy_id,
fmt=self.fmt)
ipsecpolicy_res = self.deserialize(
self.fmt,
ipsecpolicy_req.get_response(self.ext_api)
)
self.assertEqual(
ipsecpolicy_res['ipsecpolicy']['id'],
vpnconn1['ipsec_site_connection']['ipsecpolicy_id']
)
def test_delete_ikepolicy_inuse(self):
"""Test case to delete an ikepolicy, that is in use."""
vpns_name = "vpnservice1"
ike_name = "ikepolicy1"
ipsec_name = "ipsecpolicy1"
name1 = "ipsec_site_connection1"
with self.ikepolicy(name=ike_name) as ikepolicy:
with self.ipsecpolicy(name=ipsec_name) as ipsecpolicy:
with self.vpnservice(name=vpns_name) as vpnservice:
with self.ipsec_site_connection(
self.fmt,
name1,
'192.168.1.10',
'192.168.1.10',
['192.168.2.0/24',
'192.168.3.0/24'],
1500,
'abcdef',
'bi-directional',
'hold',
30,
120,
vpnservice,
ikepolicy,
ipsecpolicy,
True
):
delete_req = self.new_delete_request(
'ikepolicies',
ikepolicy['ikepolicy']['id']
)
delete_res = delete_req.get_response(self.ext_api)
self.assertEqual(409, delete_res.status_int)
def test_delete_ipsecpolicy_inuse(self):
"""Test case to delete an ipsecpolicy, that is in use."""
vpns_name = "vpnservice1"
ike_name = "ikepolicy1"
ipsec_name = "ipsecpolicy1"
name1 = "ipsec_site_connection1"
with self.ikepolicy(name=ike_name) as ikepolicy:
with self.ipsecpolicy(name=ipsec_name) as ipsecpolicy:
with self.vpnservice(name=vpns_name) as vpnservice:
with self.ipsec_site_connection(
self.fmt,
name1,
'192.168.1.10',
'192.168.1.10',
['192.168.2.0/24',
'192.168.3.0/24'],
1500,
'abcdef',
'bi-directional',
'hold',
30,
120,
vpnservice,
ikepolicy,
ipsecpolicy,
True
):
delete_req = self.new_delete_request(
'ipsecpolicies',
ipsecpolicy['ipsecpolicy']['id']
)
delete_res = delete_req.get_response(self.ext_api)
self.assertEqual(409, delete_res.status_int)
def test_router_in_use_by_vpnaas(self):
"""Check that exception raised, if router in use by VPNaaS."""
with self.subnet(cidr='10.2.0.0/24') as subnet, \
self.router() as router:
with self.vpnservice(subnet=subnet,
router=router):
self.assertRaises(l3_exception.RouterInUse,
self.plugin.check_router_in_use,
context.get_admin_context(),
router['router']['id'])
def test_subnet_in_use_by_vpnaas(self):
"""Check that exception raised, if subnet in use by VPNaaS."""
with self.subnet(cidr='10.2.0.0/24') as subnet, \
self.router() as router:
with self.vpnservice(subnet=subnet,
router=router):
self.assertRaises(vpnaas.SubnetInUseByVPNService,
self.plugin.check_subnet_in_use,
context.get_admin_context(),
subnet['subnet']['id'])
def test_check_router_has_no_vpn(self):
with mock.patch.object(
manager.NeutronManager, 'get_service_plugins') as sp:
vpn_plugin = mock.Mock()
sp.return_value = {'VPN': vpn_plugin}
kwargs = {'context': mock.ANY, 'router': {'id': 'foo_id'}}
self.assertTrue(vpn_db.migration_callback(
mock.ANY, mock.ANY, mock.ANY, **kwargs))
vpn_plugin.check_router_in_use.assert_called_once_with(
mock.ANY, 'foo_id')
# Note: Below are new database related tests that only exercise the database
# instead of going through the client API. The intent here is to (eventually)
# convert all the database tests to this method, for faster, more granular
# tests.
# TODO(pcm): Put helpers in another module for sharing
class NeutronResourcesMixin(object):
def create_network(self, overrides=None):
"""Create database entry for network."""
network_info = {'network': {'name': 'my-net',
'tenant_id': self.tenant_id,
'admin_state_up': True,
'shared': False}}
if overrides:
network_info['network'].update(overrides)
return self.core_plugin.create_network(self.context, network_info)
def create_subnet(self, overrides=None):
"""Create database entry for subnet."""
subnet_info = {'subnet': {'name': 'my-subnet',
'tenant_id': self.tenant_id,
'ip_version': 4,
'enable_dhcp': True,
'dns_nameservers': None,
'host_routes': None,
'allocation_pools': None}}
if overrides:
subnet_info['subnet'].update(overrides)
return self.core_plugin.create_subnet(self.context, subnet_info)
def create_router(self, overrides=None, gw=None):
"""Create database entry for router with optional gateway."""
router_info = {
'router': {
'name': 'my-router',
'tenant_id': self.tenant_id,
'admin_state_up': True,
}
}
if overrides:
router_info['router'].update(overrides)
if gw:
gw_info = {
'external_gateway_info': {
'network_id': gw['net_id'],
'external_fixed_ips': [{'subnet_id': gw['subnet_id'],
'ip_address': gw['ip']}],
}
}
router_info['router'].update(gw_info)
return self.l3_plugin.create_router(self.context, router_info)
def create_router_port_for_subnet(self, router, subnet):
"""Creates port on router for subnet specified."""
port = {'port': {
'tenant_id': self.tenant_id,
'network_id': subnet['network_id'],
'fixed_ips': [
{'ip_address': subnet['gateway_ip'],
'subnet_id': subnet['id']}
],
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'admin_state_up': True,
'device_id': router['id'],
'device_owner': l3_constants.DEVICE_OWNER_ROUTER_INTF,
'name': ''
}}
return self.core_plugin.create_port(self.context, port)
def create_basic_topology(self):
"""Setup networks, subnets, and a router for testing VPN."""
public_net = self.create_network(overrides={'name': 'public',
'router:external': True})
private_net = self.create_network(overrides={'name': 'private'})
overrides = {'name': 'private-subnet',
'cidr': '10.2.0.0/24',
'gateway_ip': '10.2.0.1',
'network_id': private_net['id']}
private_subnet = self.create_subnet(overrides=overrides)
overrides = {'name': 'public-subnet',
'cidr': '192.168.100.0/24',
'gateway_ip': '192.168.100.1',
'allocation_pools': [{'start': '192.168.100.2',
'end': '192.168.100.254'}],
'network_id': public_net['id']}
public_subnet = self.create_subnet(overrides=overrides)
gw_info = {'net_id': public_net['id'],
'subnet_id': public_subnet['id'],
'ip': '192.168.100.5'}
router = self.create_router(gw=gw_info)
self.create_router_port_for_subnet(router, private_subnet)
return (private_subnet, router)
class TestVpnDatabase(base.NeutronDbPluginV2TestCase, NeutronResourcesMixin):
def setUp(self):
# Setup the core plugin
self.plugin_str = ('neutron_vpnaas.tests.unit.db.vpn.'
'test_vpn_db.TestVpnCorePlugin')
super(TestVpnDatabase, self).setUp(self.plugin_str)
# Get the plugins
self.core_plugin = manager.NeutronManager.get_plugin()
self.l3_plugin = manager.NeutronManager.get_service_plugins().get(
nconstants.L3_ROUTER_NAT)
# Create VPN database instance
self.plugin = vpn_db.VPNPluginDb()
self.tenant_id = _uuid()
self.context = context.get_admin_context()
def prepare_service_info(self, private_subnet, router):
subnet_id = private_subnet['id'] if private_subnet else None
return {'vpnservice': {'tenant_id': self.tenant_id,
'name': 'my-service',
'description': 'new service',
'subnet_id': subnet_id,
'router_id': router['id'],
'admin_state_up': True}}
def test_create_vpnservice(self):
private_subnet, router = self.create_basic_topology()
info = self.prepare_service_info(private_subnet, router)
expected = {'admin_state_up': True,
'external_v4_ip': None,
'external_v6_ip': None,
'status': 'PENDING_CREATE'}
expected.update(info['vpnservice'])
new_service = self.plugin.create_vpnservice(self.context, info)
self.assertDictSupersetOf(expected, new_service)
def test_create_vpn_service_without_subnet(self):
"""Create service w/o subnet (will use endpoint groups for conn)."""
private_subnet, router = self.create_basic_topology()
info = self.prepare_service_info(private_subnet=None, router=router)
expected = {'admin_state_up': True,
'external_v4_ip': None,
'external_v6_ip': None,
'status': 'PENDING_CREATE'}
expected.update(info['vpnservice'])
new_service = self.plugin.create_vpnservice(self.context, info)
self.assertDictSupersetOf(expected, new_service)
def test_update_external_tunnel_ips(self):
"""Verify that external tunnel IPs can be set."""
private_subnet, router = self.create_basic_topology()
info = self.prepare_service_info(private_subnet, router)
expected = {'admin_state_up': True,
'external_v4_ip': None,
'external_v6_ip': None,
'status': 'PENDING_CREATE'}
expected.update(info['vpnservice'])
new_service = self.plugin.create_vpnservice(self.context, info)
self.assertDictSupersetOf(expected, new_service)
external_v4_ip = '192.168.100.5'
external_v6_ip = 'fd00:1000::4'
expected.update({'external_v4_ip': external_v4_ip,
'external_v6_ip': external_v6_ip})
mod_service = self.plugin.set_external_tunnel_ips(self.context,
new_service['id'],
v4_ip=external_v4_ip,
v6_ip=external_v6_ip)
self.assertDictSupersetOf(expected, mod_service)
def prepare_endpoint_info(self, group_type, endpoints):
return {'endpoint_group': {'tenant_id': self.tenant_id,
'name': 'my endpoint group',
'description': 'my description',
'type': group_type,
'endpoints': endpoints}}
def test_endpoint_group_create_with_cidrs(self):
"""Verify create endpoint group using CIDRs."""
info = self.prepare_endpoint_info(constants.CIDR_ENDPOINT,
['10.10.10.0/24', '20.20.20.0/24'])
expected = info['endpoint_group']
new_endpoint_group = self.plugin.create_endpoint_group(self.context,
info)
self._compare_groups(expected, new_endpoint_group)
def test_endpoint_group_create_with_subnets(self):
"""Verify create endpoint group using subnets."""
# Skip validation for subnets, as validation is checked in other tests
mock.patch.object(self.l3_plugin, "get_subnet").start()
private_subnet, router = self.create_basic_topology()
private_net2 = self.create_network(overrides={'name': 'private2'})
overrides = {'name': 'private-subnet2',
'cidr': '10.1.0.0/24',
'gateway_ip': '10.1.0.1',
'network_id': private_net2['id']}
private_subnet2 = self.create_subnet(overrides=overrides)
self.create_router_port_for_subnet(router, private_subnet2)
info = self.prepare_endpoint_info(constants.SUBNET_ENDPOINT,
[private_subnet['id'],
private_subnet2['id']])
expected = info['endpoint_group']
new_endpoint_group = self.plugin.create_endpoint_group(self.context,
info)
self._compare_groups(expected, new_endpoint_group)
def test_endpoint_group_create_with_vlans(self):
"""Verify endpoint group using VLANs."""
info = self.prepare_endpoint_info(constants.VLAN_ENDPOINT,
['100', '200', '300'])
expected = info['endpoint_group']
new_endpoint_group = self.plugin.create_endpoint_group(self.context,
info)
self._compare_groups(expected, new_endpoint_group)
def _compare_groups(self, expected_group, actual_group):
# Callers may want to reuse passed dicts later
expected_group = copy.deepcopy(expected_group)
actual_group = copy.deepcopy(actual_group)
# We need to compare endpoints separately because their order is
# not defined
check_endpoints = 'endpoints' in expected_group
expected_endpoints = set(expected_group.pop('endpoints', []))
actual_endpoints = set(actual_group.pop('endpoints', []))
self.assertDictSupersetOf(expected_group, actual_group)
if check_endpoints:
self.assertEqual(expected_endpoints, actual_endpoints)
def helper_create_endpoint_group(self, info):
"""Create endpoint group database entry and verify OK."""
group = info['endpoint_group']
try:
actual = self.plugin.create_endpoint_group(self.context, info)
except db_exc.DBError as e:
self.fail("Endpoint create in prep for test failed: %s" % e)
self._compare_groups(group, actual)
self.assertIn('id', actual)
return actual['id']
def check_endpoint_group_entry(self, endpoint_group_id, expected_info,
should_exist=True):
try:
endpoint_group = self.plugin.get_endpoint_group(self.context,
endpoint_group_id)
is_found = True
except vpnaas.VPNEndpointGroupNotFound:
is_found = False
except Exception as e:
self.fail("Unexpected exception getting endpoint group: %s" % e)
if should_exist != is_found:
self.fail("Endpoint group should%(expected)s exist, but "
"did%(actual)s exist" %
{'expected': '' if should_exist else ' not',
'actual': '' if is_found else ' not'})
if is_found:
self._compare_groups(expected_info, endpoint_group)
def test_delete_endpoint_group(self):
"""Test that endpoint group is deleted."""
info = self.prepare_endpoint_info(constants.CIDR_ENDPOINT,
['10.10.10.0/24', '20.20.20.0/24'])
expected = info['endpoint_group']
group_id = self.helper_create_endpoint_group(info)
self.check_endpoint_group_entry(group_id, expected, should_exist=True)
self.plugin.delete_endpoint_group(self.context, group_id)
self.check_endpoint_group_entry(group_id, expected, should_exist=False)
self.assertRaises(vpnaas.VPNEndpointGroupNotFound,
self.plugin.delete_endpoint_group,
self.context, group_id)
def test_show_endpoint_group(self):
"""Test showing a single endpoint group."""
info = self.prepare_endpoint_info(constants.CIDR_ENDPOINT,
['10.10.10.0/24', '20.20.20.0/24'])
expected = info['endpoint_group']
group_id = self.helper_create_endpoint_group(info)
self.check_endpoint_group_entry(group_id, expected, should_exist=True)
actual = self.plugin.get_endpoint_group(self.context, group_id)
self._compare_groups(expected, actual)
def test_fail_showing_non_existent_endpoint_group(self):
"""Test failure to show non-existent endpoint group."""
self.assertRaises(vpnaas.VPNEndpointGroupNotFound,
self.plugin.get_endpoint_group,
self.context, uuidutils.generate_uuid())
def test_list_endpoint_groups(self):
"""Test listing multiple endpoint groups."""
# Skip validation for subnets, as validation is checked in other tests
mock.patch.object(self.l3_plugin, "get_subnet").start()
info1 = self.prepare_endpoint_info(constants.CIDR_ENDPOINT,
['10.10.10.0/24', '20.20.20.0/24'])
expected1 = info1['endpoint_group']
group_id1 = self.helper_create_endpoint_group(info1)
self.check_endpoint_group_entry(group_id1, expected1,
should_exist=True)
info2 = self.prepare_endpoint_info(constants.SUBNET_ENDPOINT,
[uuidutils.generate_uuid(),
uuidutils.generate_uuid()])
expected2 = info2['endpoint_group']
group_id2 = self.helper_create_endpoint_group(info2)
self.check_endpoint_group_entry(group_id2, expected2,
should_exist=True)
expected1.update({'id': group_id1})
expected2.update({'id': group_id2})
expected_groups = [expected1, expected2]
actual_groups = self.plugin.get_endpoint_groups(self.context,
fields=('type', 'tenant_id', 'endpoints',
'name', 'description', 'id'))
for expected_group, actual_group in zip(expected_groups,
actual_groups):
self._compare_groups(expected_group, actual_group)
def test_update_endpoint_group(self):
"""Test updating endpoint group information."""
info = self.prepare_endpoint_info(constants.CIDR_ENDPOINT,
['10.10.10.0/24', '20.20.20.0/24'])
expected = info['endpoint_group']
group_id = self.helper_create_endpoint_group(info)
self.check_endpoint_group_entry(group_id, expected, should_exist=True)
group_updates = {'endpoint_group': {'name': 'new name',
'description': 'new description'}}
updated_group = self.plugin.update_endpoint_group(self.context,
group_id,
group_updates)
# Check what was returned, and what is stored in database
self._compare_groups(group_updates['endpoint_group'], updated_group)
expected.update(group_updates['endpoint_group'])
self.check_endpoint_group_entry(group_id, expected,
should_exist=True)
def test_fail_updating_non_existent_group(self):
"""Test fail updating a non-existent group."""
group_updates = {'endpoint_group': {'name': 'new name'}}
self.assertRaises(
vpnaas.VPNEndpointGroupNotFound,
self.plugin.update_endpoint_group,
self.context, _uuid(), group_updates)
def prepare_ike_policy_info(self):
return {'ikepolicy': {'tenant_id': self.tenant_id,
'name': 'ike policy',
'description': 'my ike policy',
'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-128',
'phase1_negotiation_mode': 'main',
'lifetime': {'units': 'seconds', 'value': 3600},
'ike_version': 'v1',
'pfs': 'group5'}}
def test_create_ike_policy(self):
"""Create IKE policy with all settings specified."""
info = self.prepare_ike_policy_info()
expected = info['ikepolicy']
new_ike_policy = self.plugin.create_ikepolicy(self.context, info)
self.assertDictSupersetOf(expected, new_ike_policy)
def prepare_ipsec_policy_info(self):
return {'ipsecpolicy': {'tenant_id': self.tenant_id,
'name': 'ipsec policy',
'description': 'my ipsec policy',
'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-128',
'encapsulation_mode': 'tunnel',
'transform_protocol': 'esp',
'lifetime': {'units': 'seconds',
'value': 3600},
'pfs': 'group5'}}
def test_create_ipsec_policy(self):
"""Create IPsec policy with all settings specified."""
info = self.prepare_ipsec_policy_info()
expected = info['ipsecpolicy']
new_ipsec_policy = self.plugin.create_ipsecpolicy(self.context, info)
self.assertDictSupersetOf(expected, new_ipsec_policy)
def create_vpn_service(self, with_subnet=True):
private_subnet, router = self.create_basic_topology()
if not with_subnet:
private_subnet = None
info = self.prepare_service_info(private_subnet, router)
return self.plugin.create_vpnservice(self.context, info)
def create_ike_policy(self):
info = self.prepare_ike_policy_info()
return self.plugin.create_ikepolicy(self.context, info)
def create_ipsec_policy(self):
info = self.prepare_ipsec_policy_info()
return self.plugin.create_ipsecpolicy(self.context, info)
def create_endpoint_group(self, group_type, endpoints):
info = self.prepare_endpoint_info(group_type=group_type,
endpoints=endpoints)
return self.plugin.create_endpoint_group(self.context, info)
def prepare_connection_info(self, service_id, ike_policy_id,
ipsec_policy_id):
"""Creates connection request dictionary.
The peer_cidrs, local_ep_group_id, and peer_ep_group_id are set to
defaults. Caller must then fill in either CIDRs or endpoints, before
creating a connection.
"""
return {'ipsec_site_connection': {'name': 'my connection',
'description': 'my description',
'peer_id': '192.168.1.10',
'peer_address': '192.168.1.10',
'peer_cidrs': [],
'mtu': 1500,
'psk': 'shhhh!!!',
'initiator': 'bi-directional',
'dpd_action': 'hold',
'dpd_interval': 30,
'dpd_timeout': 120,
'vpnservice_id': service_id,
'ikepolicy_id': ike_policy_id,
'ipsecpolicy_id': ipsec_policy_id,
'admin_state_up': True,
'tenant_id': self._tenant_id,
'local_ep_group_id': None,
'peer_ep_group_id': None}}
def build_expected_connection_result(self, info):
"""Create the expected IPsec connection dict from the request info.
The DPD information is stored and converted to a nested dict, instead
of individual fields.
"""
expected = copy.copy(info['ipsec_site_connection'])
expected['dpd'] = {'action': expected['dpd_action'],
'interval': expected['dpd_interval'],
'timeout': expected['dpd_timeout']}
del expected['dpd_action']
del expected['dpd_interval']
del expected['dpd_timeout']
expected['status'] = 'PENDING_CREATE'
return expected
def prepare_for_ipsec_connection_create(self, with_subnet=True):
service = self.create_vpn_service(with_subnet)
ike_policy = self.create_ike_policy()
ipsec_policy = self.create_ipsec_policy()
return self.prepare_connection_info(service['id'],
ike_policy['id'],
ipsec_policy['id'])
def test_create_ipsec_site_connection_with_peer_cidrs(self):
"""Create connection using old API with peer CIDRs specified."""
info = self.prepare_for_ipsec_connection_create()
info['ipsec_site_connection']['peer_cidrs'] = ['10.1.0.0/24',
'10.2.0.0/24']
expected = self.build_expected_connection_result(info)
new_conn = self.plugin.create_ipsec_site_connection(self.context,
info)
self.assertDictSupersetOf(expected, new_conn)
def test_create_ipsec_site_connection_with_endpoint_groups(self):
"""Create connection using new API with endpoint groups."""
# Skip validation, which is tested separately
mock.patch.object(self.plugin, '_get_validator').start()
local_net = self.create_network(overrides={'name': 'local'})
overrides = {'name': 'local-subnet',
'cidr': '30.0.0.0/24',
'gateway_ip': '30.0.0.1',
'network_id': local_net['id']}
local_subnet = self.create_subnet(overrides=overrides)
info = self.prepare_for_ipsec_connection_create(with_subnet=False)
local_ep_group = self.create_endpoint_group(
group_type='subnet', endpoints=[local_subnet['id']])
peer_ep_group = self.create_endpoint_group(
group_type='cidr', endpoints=['20.1.0.0/24', '20.2.0.0/24'])
info['ipsec_site_connection'].update(
{'local_ep_group_id': local_ep_group['id'],
'peer_ep_group_id': peer_ep_group['id']})
expected = self.build_expected_connection_result(info)
new_conn = self.plugin.create_ipsec_site_connection(self.context,
info)
self.assertDictSupersetOf(expected, new_conn)
def test_fail_endpoint_group_delete_when_in_use_by_ipsec_conn(self):
"""Ensure endpoint group is not deleted from under IPSec connection."""
# Skip validation, which is tested separately
mock.patch.object(self.plugin, '_get_validator').start()
local_net = self.create_network(overrides={'name': 'local'})
overrides = {'name': 'local-subnet',
'cidr': '30.0.0.0/24',
'gateway_ip': '30.0.0.1',
'network_id': local_net['id']}
local_subnet = self.create_subnet(overrides=overrides)
info = self.prepare_for_ipsec_connection_create(with_subnet=False)
local_ep_group = self.create_endpoint_group(
group_type='subnet', endpoints=[local_subnet['id']])
peer_ep_group = self.create_endpoint_group(
group_type='cidr', endpoints=['20.1.0.0/24', '20.2.0.0/24'])
info['ipsec_site_connection'].update(
{'local_ep_group_id': local_ep_group['id'],
'peer_ep_group_id': peer_ep_group['id']})
self.plugin.create_ipsec_site_connection(self.context, info)
self.assertRaises(vpnaas.EndpointGroupInUse,
self.plugin.delete_endpoint_group,
self.context,
local_ep_group['id'])
self.assertRaises(vpnaas.EndpointGroupInUse,
self.plugin.delete_endpoint_group,
self.context,
peer_ep_group['id'])
unused_ep_group = self.create_endpoint_group(
group_type=constants.CIDR_ENDPOINT, endpoints=['30.0.0.0/24'])
self.plugin.delete_endpoint_group(self.context, unused_ep_group['id'])
def test_fail_subnet_delete_when_in_use_by_endpoint_group(self):
"""Ensure don't delete subnet from under endpoint group."""
# mock.patch.object(self.plugin, '_get_validator').start()
local_net = self.create_network(overrides={'name': 'local'})
overrides = {'name': 'local-subnet',
'cidr': '30.0.0.0/24',
'gateway_ip': '30.0.0.1',
'network_id': local_net['id']}
local_subnet = self.create_subnet(overrides=overrides)
self.create_endpoint_group(group_type='subnet',
endpoints=[local_subnet['id']])
self.assertRaises(vpnaas.SubnetInUseByEndpointGroup,
self.plugin.check_subnet_in_use_by_endpoint_group,
self.context, local_subnet['id'])
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/unit/db/__init__.py 0000664 0005670 0005671 00000000000 12701407726 025515 0 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/tests/api/ 0000775 0005670 0005671 00000000000 12701410103 022602 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/tests/api/clients.py 0000664 0005670 0005671 00000005410 12701407726 024636 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2012 OpenStack Foundation
# Copyright 2016 Hewlett Packard Enterprise Development Company
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest import manager
from tempest.services.identity.v2.json.tenants_client import \
TenantsClient
from neutron.tests.tempest import config
from neutron.tests.tempest.services.network.json.network_client import \
NetworkClientJSON
CONF = config.CONF
LOG = logging.getLogger(__name__)
class Manager(manager.Manager):
"""
Top level manager for OpenStack tempest clients
"""
default_params = {
'disable_ssl_certificate_validation':
CONF.identity.disable_ssl_certificate_validation,
'ca_certs': CONF.identity.ca_certificates_file,
'trace_requests': CONF.debug.trace_requests
}
# NOTE: Tempest uses timeout values of compute API if project specific
# timeout values don't exist.
default_params_with_timeout_values = {
'build_interval': CONF.compute.build_interval,
'build_timeout': CONF.compute.build_timeout
}
default_params_with_timeout_values.update(default_params)
def __init__(self, credentials=None, service=None):
super(Manager, self).__init__(credentials=credentials)
self._set_identity_clients()
self.network_client = NetworkClientJSON(
self.auth_provider,
CONF.network.catalog_type,
CONF.network.region or CONF.identity.region,
endpoint_type=CONF.network.endpoint_type,
build_interval=CONF.network.build_interval,
build_timeout=CONF.network.build_timeout,
**self.default_params)
def _set_identity_clients(self):
params = {
'service': CONF.identity.catalog_type,
'region': CONF.identity.region,
'endpoint_type': 'adminURL'
}
params.update(self.default_params_with_timeout_values)
params_v2_admin = params.copy()
params_v2_admin['endpoint_type'] = CONF.identity.v2_admin_endpoint_type
# Client uses admin endpoint type of Keystone API v2
self.tenants_client = TenantsClient(self.auth_provider,
**params_v2_admin)
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/api/__init__.py 0000664 0005670 0005671 00000000000 12701407726 024722 0 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/tests/api/base.py 0000664 0005670 0005671 00000023673 12701407726 024122 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2012 OpenStack Foundation
# Copyright 2016 Hewlett Packard Enterprise Development Company LP
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_log import log as logging
from tempest.lib.common.utils import data_utils
from tempest.lib import exceptions as lib_exc
from tempest import test
from neutron.tests.tempest import config
from neutron_vpnaas.tests.api import clients
CONF = config.CONF
LOG = logging.getLogger(__name__)
class BaseNetworkTest(test.BaseTestCase):
"""
Base class for the Neutron tests that use the Tempest Neutron REST client
Per the Neutron API Guide, API v1.x was removed from the source code tree
(docs.openstack.org/api/openstack-network/2.0/content/Overview-d1e71.html)
Therefore, v2.x of the Neutron API is assumed. It is also assumed that the
following options are defined in the [network] section of etc/tempest.conf:
tenant_network_cidr with a block of cidr's from which smaller blocks
can be allocated for tenant networks
tenant_network_mask_bits with the mask bits to be used to partition the
block defined by tenant-network_cidr
Finally, it is assumed that the following option is defined in the
[service_available] section of etc/tempest.conf
neutron as True
"""
force_tenant_isolation = False
credentials = ['primary']
# Default to ipv4.
_ip_version = 4
@classmethod
def get_client_manager(cls, credential_type=None, roles=None,
force_new=None):
manager = test.BaseTestCase.get_client_manager(
credential_type=credential_type,
roles=roles,
force_new=force_new)
# Neutron uses a different clients manager than the one in the Tempest
return clients.Manager(manager.credentials)
@classmethod
def skip_checks(cls):
super(BaseNetworkTest, cls).skip_checks()
# Create no network resources for these tests.
if not CONF.service_available.neutron:
raise cls.skipException("Neutron support is required")
if cls._ip_version == 6 and not CONF.network_feature_enabled.ipv6:
raise cls.skipException("IPv6 Tests are disabled.")
@classmethod
def setup_clients(cls):
super(BaseNetworkTest, cls).setup_clients()
cls.client = cls.os.network_client
@classmethod
def resource_setup(cls):
super(BaseNetworkTest, cls).resource_setup()
cls.network_cfg = CONF.network
cls.networks = []
cls.shared_networks = []
cls.subnets = []
cls.ports = []
cls.routers = []
cls.vpnservices = []
cls.ikepolicies = []
cls.ipsecpolicies = []
@classmethod
def resource_cleanup(cls):
if CONF.service_available.neutron:
# Clean up ipsec policies
for ipsecpolicy in cls.ipsecpolicies:
cls._try_delete_resource(cls.client.delete_ipsecpolicy,
ipsecpolicy['id'])
# Clean up ike policies
for ikepolicy in cls.ikepolicies:
cls._try_delete_resource(cls.client.delete_ikepolicy,
ikepolicy['id'])
# Clean up vpn services
for vpnservice in cls.vpnservices:
cls._try_delete_resource(cls.client.delete_vpnservice,
vpnservice['id'])
@classmethod
def _try_delete_resource(cls, delete_callable, *args, **kwargs):
"""Cleanup resources in case of test-failure
Some resources are explicitly deleted by the test.
If the test failed to delete a resource, this method will execute
the appropriate delete methods. Otherwise, the method ignores NotFound
exceptions thrown for resources that were correctly deleted by the
test.
:param delete_callable: delete method
:param args: arguments for delete method
:param kwargs: keyword arguments for delete method
"""
try:
delete_callable(*args, **kwargs)
# if resource is not found, this means it was deleted in the test
except lib_exc.NotFound:
pass
@classmethod
def create_network(cls, network_name=None, **kwargs):
"""Wrapper utility that returns a test network."""
network_name = network_name or data_utils.rand_name('test-network-')
body = cls.client.create_network(name=network_name, **kwargs)
network = body['network']
cls.networks.append(network)
return network
@classmethod
def create_shared_network(cls, network_name=None):
network_name = network_name or data_utils.rand_name('sharednetwork-')
post_body = {'name': network_name, 'shared': True}
body = cls.admin_client.create_network(**post_body)
network = body['network']
cls.shared_networks.append(network)
return network
@classmethod
def create_router_interface(cls, router_id, subnet_id):
"""Wrapper utility that returns a router interface."""
interface = cls.client.add_router_interface_with_subnet_id(
router_id, subnet_id)
return interface
@classmethod
def create_subnet(cls, network, gateway='', cidr=None, mask_bits=None,
ip_version=None, client=None, **kwargs):
"""Wrapper utility that returns a test subnet."""
# allow tests to use admin client
if not client:
client = cls.client
# The cidr and mask_bits depend on the ip version.
ip_version = ip_version if ip_version is not None else cls._ip_version
gateway_not_set = gateway == ''
if ip_version == 4:
cidr = cidr or netaddr.IPNetwork(CONF.network.tenant_network_cidr)
mask_bits = mask_bits or CONF.network.tenant_network_mask_bits
elif ip_version == 6:
cidr = (
cidr or netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr))
mask_bits = mask_bits or CONF.network.tenant_network_v6_mask_bits
# Find a cidr that is not in use yet and create a subnet with it
for subnet_cidr in cidr.subnet(mask_bits):
if gateway_not_set:
gateway_ip = str(netaddr.IPAddress(subnet_cidr) + 1)
else:
gateway_ip = gateway
try:
body = client.create_subnet(
network_id=network['id'],
cidr=str(subnet_cidr),
ip_version=ip_version,
gateway_ip=gateway_ip,
**kwargs)
break
except lib_exc.BadRequest as e:
is_overlapping_cidr = 'overlaps with another subnet' in str(e)
if not is_overlapping_cidr:
raise
else:
message = 'Available CIDR for subnet creation could not be found'
raise ValueError(message)
subnet = body['subnet']
cls.subnets.append(subnet)
return subnet
@classmethod
def create_port(cls, network, **kwargs):
"""Wrapper utility that returns a test port."""
body = cls.client.create_port(network_id=network['id'],
**kwargs)
port = body['port']
cls.ports.append(port)
return port
@classmethod
def update_port(cls, port, **kwargs):
"""Wrapper utility that updates a test port."""
body = cls.client.update_port(port['id'],
**kwargs)
return body['port']
@classmethod
def create_router(cls, router_name=None, admin_state_up=False,
external_network_id=None, enable_snat=None,
**kwargs):
ext_gw_info = {}
if external_network_id:
ext_gw_info['network_id'] = external_network_id
if enable_snat:
ext_gw_info['enable_snat'] = enable_snat
body = cls.client.create_router(
router_name, external_gateway_info=ext_gw_info,
admin_state_up=admin_state_up, **kwargs)
router = body['router']
cls.routers.append(router)
return router
@classmethod
def create_vpnservice(cls, subnet_id, router_id):
"""Wrapper utility that returns a test vpn service."""
body = cls.client.create_vpnservice(
subnet_id=subnet_id, router_id=router_id, admin_state_up=True,
name=data_utils.rand_name("vpnservice-"))
vpnservice = body['vpnservice']
cls.vpnservices.append(vpnservice)
return vpnservice
@classmethod
def create_ikepolicy(cls, name):
"""Wrapper utility that returns a test ike policy."""
body = cls.client.create_ikepolicy(name=name)
ikepolicy = body['ikepolicy']
cls.ikepolicies.append(ikepolicy)
return ikepolicy
@classmethod
def create_ipsecpolicy(cls, name):
"""Wrapper utility that returns a test ipsec policy."""
body = cls.client.create_ipsecpolicy(name=name)
ipsecpolicy = body['ipsecpolicy']
cls.ipsecpolicies.append(ipsecpolicy)
return ipsecpolicy
class BaseAdminNetworkTest(BaseNetworkTest):
credentials = ['primary', 'admin']
@classmethod
def setup_clients(cls):
super(BaseAdminNetworkTest, cls).setup_clients()
cls.admin_client = cls.os_adm.network_client
cls.identity_admin_client = cls.os_adm.tenants_client
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/api/test_vpnaas.py 0000664 0005670 0005671 00000034232 12701407726 025530 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from tempest.lib.common.utils import data_utils
from tempest.lib import exceptions as lib_exc
from tempest import test
from neutron.tests.tempest import config
from neutron_vpnaas.tests.api import base
CONF = config.CONF
class VPNaaSTestJSON(base.BaseAdminNetworkTest):
"""
Tests the following operations in the Neutron API using the REST client for
Neutron:
List, Show, Create, Delete, and Update VPN Service
List, Show, Create, Delete, and Update IKE policy
List, Show, Create, Delete, and Update IPSec policy
"""
@classmethod
def resource_setup(cls):
if not test.is_extension_enabled('vpnaas', 'network'):
msg = "vpnaas extension not enabled."
raise cls.skipException(msg)
super(VPNaaSTestJSON, cls).resource_setup()
cls.ext_net_id = CONF.network.public_network_id
network_name = data_utils.rand_name('network-')
cls.network = cls.create_network(network_name)
cls.subnet = cls.create_subnet(cls.network)
cls.router = cls.create_router(
data_utils.rand_name("router"),
external_network_id=CONF.network.public_network_id)
cls.create_router_interface(cls.router['id'], cls.subnet['id'])
cls.vpnservice = cls.create_vpnservice(cls.subnet['id'],
cls.router['id'])
cls.ikepolicy = cls.create_ikepolicy(
data_utils.rand_name("ike-policy-"))
cls.ipsecpolicy = cls.create_ipsecpolicy(
data_utils.rand_name("ipsec-policy-"))
def _delete_ike_policy(self, ike_policy_id):
# Deletes a ike policy and verifies if it is deleted or not
ike_list = list()
all_ike = self.client.list_ikepolicies()
for ike in all_ike['ikepolicies']:
ike_list.append(ike['id'])
if ike_policy_id in ike_list:
self.client.delete_ikepolicy(ike_policy_id)
# Asserting that the policy is not found in list after deletion
ikepolicies = self.client.list_ikepolicies()
ike_id_list = list()
for i in ikepolicies['ikepolicies']:
ike_id_list.append(i['id'])
self.assertNotIn(ike_policy_id, ike_id_list)
def _delete_ipsec_policy(self, ipsec_policy_id):
# Deletes an ike policy if it exists
try:
self.client.delete_ipsecpolicy(ipsec_policy_id)
except lib_exc.NotFound:
pass
def _assertExpected(self, expected, actual):
# Check if not expected keys/values exists in actual response body
for key, value in six.iteritems(expected):
self.assertIn(key, actual)
self.assertEqual(value, actual[key])
def _delete_vpn_service(self, vpn_service_id):
self.client.delete_vpnservice(vpn_service_id)
# Asserting if vpn service is found in the list after deletion
body = self.client.list_vpnservices()
vpn_services = [vs['id'] for vs in body['vpnservices']]
self.assertNotIn(vpn_service_id, vpn_services)
def _get_tenant_id(self):
"""
Returns the tenant_id of the client current user
"""
# TODO(jroovers) This is a temporary workaround to get the tenant_id
# of the the current client. Replace this once tenant_isolation for
# neutron is fixed.
body = self.client.show_network(self.network['id'])
return body['network']['tenant_id']
@test.attr(type='smoke')
def test_admin_create_ipsec_policy_for_tenant(self):
tenant_id = self._get_tenant_id()
# Create IPSec policy for the newly created tenant
name = data_utils.rand_name('ipsec-policy')
body = (self.admin_client.
create_ipsecpolicy(name=name, tenant_id=tenant_id))
ipsecpolicy = body['ipsecpolicy']
self.assertIsNotNone(ipsecpolicy['id'])
self.addCleanup(self.admin_client.delete_ipsecpolicy,
ipsecpolicy['id'])
# Assert that created ipsec policy is found in API list call
body = self.client.list_ipsecpolicies()
ipsecpolicies = [policy['id'] for policy in body['ipsecpolicies']]
self.assertIn(ipsecpolicy['id'], ipsecpolicies)
@test.attr(type='smoke')
def test_admin_create_vpn_service_for_tenant(self):
tenant_id = self._get_tenant_id()
# Create vpn service for the newly created tenant
network2 = self.create_network()
subnet2 = self.create_subnet(network2)
router2 = self.create_router(data_utils.rand_name('router-'),
external_network_id=self.ext_net_id)
self.create_router_interface(router2['id'], subnet2['id'])
name = data_utils.rand_name('vpn-service')
body = self.admin_client.create_vpnservice(
subnet_id=subnet2['id'],
router_id=router2['id'],
name=name,
admin_state_up=True,
tenant_id=tenant_id)
vpnservice = body['vpnservice']
self.assertIsNotNone(vpnservice['id'])
self.addCleanup(self.admin_client.delete_vpnservice, vpnservice['id'])
# Assert that created vpnservice is found in API list call
body = self.client.list_vpnservices()
vpn_services = [vs['id'] for vs in body['vpnservices']]
self.assertIn(vpnservice['id'], vpn_services)
@test.attr(type='smoke')
def test_admin_create_ike_policy_for_tenant(self):
tenant_id = self._get_tenant_id()
# Create IKE policy for the newly created tenant
name = data_utils.rand_name('ike-policy')
body = (self.admin_client.
create_ikepolicy(name=name, ike_version="v1",
encryption_algorithm="aes-128",
auth_algorithm="sha1",
tenant_id=tenant_id))
ikepolicy = body['ikepolicy']
self.assertIsNotNone(ikepolicy['id'])
self.addCleanup(self.admin_client.delete_ikepolicy, ikepolicy['id'])
# Assert that created ike policy is found in API list call
body = self.client.list_ikepolicies()
ikepolicies = [ikp['id'] for ikp in body['ikepolicies']]
self.assertIn(ikepolicy['id'], ikepolicies)
@test.attr(type='smoke')
def test_list_vpn_services(self):
# Verify the VPN service exists in the list of all VPN services
body = self.client.list_vpnservices()
vpnservices = body['vpnservices']
self.assertIn(self.vpnservice['id'], [v['id'] for v in vpnservices])
@test.attr(type='smoke')
def test_create_update_delete_vpn_service(self):
# Creates a VPN service and sets up deletion
network1 = self.create_network()
subnet1 = self.create_subnet(network1)
router1 = self.create_router(data_utils.rand_name('router-'),
external_network_id=self.ext_net_id)
self.create_router_interface(router1['id'], subnet1['id'])
name = data_utils.rand_name('vpn-service1')
body = self.client.create_vpnservice(subnet_id=subnet1['id'],
router_id=router1['id'],
name=name,
admin_state_up=True)
vpnservice = body['vpnservice']
self.addCleanup(self._delete_vpn_service, vpnservice['id'])
# Assert if created vpnservices are not found in vpnservices list
body = self.client.list_vpnservices()
vpn_services = [vs['id'] for vs in body['vpnservices']]
self.assertIsNotNone(vpnservice['id'])
self.assertIn(vpnservice['id'], vpn_services)
# TODO(raies): implement logic to update vpnservice
# VPNaaS client function to update is implemented.
# But precondition is that current state of vpnservice
# should be "ACTIVE" not "PENDING*"
@test.attr(type='smoke')
def test_show_vpn_service(self):
# Verifies the details of a vpn service
body = self.client.show_vpnservice(self.vpnservice['id'])
vpnservice = body['vpnservice']
self.assertEqual(self.vpnservice['id'], vpnservice['id'])
self.assertEqual(self.vpnservice['name'], vpnservice['name'])
self.assertEqual(self.vpnservice['description'],
vpnservice['description'])
self.assertEqual(self.vpnservice['router_id'], vpnservice['router_id'])
self.assertEqual(self.vpnservice['subnet_id'], vpnservice['subnet_id'])
self.assertEqual(self.vpnservice['tenant_id'], vpnservice['tenant_id'])
valid_status = ["ACTIVE", "DOWN", "BUILD", "ERROR", "PENDING_CREATE",
"PENDING_UPDATE", "PENDING_DELETE"]
self.assertIn(vpnservice['status'], valid_status)
@test.attr(type='smoke')
def test_list_ike_policies(self):
# Verify the ike policy exists in the list of all IKE policies
body = self.client.list_ikepolicies()
ikepolicies = body['ikepolicies']
self.assertIn(self.ikepolicy['id'], [i['id'] for i in ikepolicies])
@test.attr(type='smoke')
def test_create_update_delete_ike_policy(self):
# Creates a IKE policy
name = data_utils.rand_name('ike-policy')
body = (self.client.create_ikepolicy(
name=name,
ike_version="v1",
encryption_algorithm="aes-128",
auth_algorithm="sha1"))
ikepolicy = body['ikepolicy']
self.assertIsNotNone(ikepolicy['id'])
self.addCleanup(self._delete_ike_policy, ikepolicy['id'])
# Update IKE Policy
new_ike = {'name': data_utils.rand_name("New-IKE"),
'description': "Updated ike policy",
'encryption_algorithm': "aes-256",
'ike_version': "v2",
'pfs': "group14",
'lifetime': {'units': "seconds", 'value': 2000}}
self.client.update_ikepolicy(ikepolicy['id'], **new_ike)
# Confirm that update was successful by verifying using 'show'
body = self.client.show_ikepolicy(ikepolicy['id'])
ike_policy = body['ikepolicy']
for key, value in six.iteritems(new_ike):
self.assertIn(key, ike_policy)
self.assertEqual(value, ike_policy[key])
# Verification of ike policy delete
self.client.delete_ikepolicy(ikepolicy['id'])
body = self.client.list_ikepolicies()
ikepolicies = [ikp['id'] for ikp in body['ikepolicies']]
self.assertNotIn(ike_policy['id'], ikepolicies)
@test.attr(type='smoke')
def test_show_ike_policy(self):
# Verifies the details of a ike policy
body = self.client.show_ikepolicy(self.ikepolicy['id'])
ikepolicy = body['ikepolicy']
self.assertEqual(self.ikepolicy['id'], ikepolicy['id'])
self.assertEqual(self.ikepolicy['name'], ikepolicy['name'])
self.assertEqual(self.ikepolicy['description'],
ikepolicy['description'])
self.assertEqual(self.ikepolicy['encryption_algorithm'],
ikepolicy['encryption_algorithm'])
self.assertEqual(self.ikepolicy['auth_algorithm'],
ikepolicy['auth_algorithm'])
self.assertEqual(self.ikepolicy['tenant_id'],
ikepolicy['tenant_id'])
self.assertEqual(self.ikepolicy['pfs'],
ikepolicy['pfs'])
self.assertEqual(self.ikepolicy['phase1_negotiation_mode'],
ikepolicy['phase1_negotiation_mode'])
self.assertEqual(self.ikepolicy['ike_version'],
ikepolicy['ike_version'])
@test.attr(type='smoke')
def test_list_ipsec_policies(self):
# Verify the ipsec policy exists in the list of all ipsec policies
body = self.client.list_ipsecpolicies()
ipsecpolicies = body['ipsecpolicies']
self.assertIn(self.ipsecpolicy['id'], [i['id'] for i in ipsecpolicies])
@test.attr(type='smoke')
def test_create_update_delete_ipsec_policy(self):
# Creates an ipsec policy
ipsec_policy_body = {'name': data_utils.rand_name('ipsec-policy'),
'pfs': 'group5',
'encryption_algorithm': "aes-128",
'auth_algorithm': 'sha1'}
resp_body = self.client.create_ipsecpolicy(**ipsec_policy_body)
ipsecpolicy = resp_body['ipsecpolicy']
self.addCleanup(self._delete_ipsec_policy, ipsecpolicy['id'])
self._assertExpected(ipsec_policy_body, ipsecpolicy)
# Verification of ipsec policy update
new_ipsec = {'description': 'Updated ipsec policy',
'pfs': 'group2',
'name': data_utils.rand_name("New-IPSec"),
'encryption_algorithm': "aes-256",
'lifetime': {'units': "seconds", 'value': '2000'}}
body = self.client.update_ipsecpolicy(ipsecpolicy['id'],
**new_ipsec)
updated_ipsec_policy = body['ipsecpolicy']
self._assertExpected(new_ipsec, updated_ipsec_policy)
# Verification of ipsec policy delete
self.client.delete_ipsecpolicy(ipsecpolicy['id'])
self.assertRaises(lib_exc.NotFound,
self.client.delete_ipsecpolicy, ipsecpolicy['id'])
@test.attr(type='smoke')
def test_show_ipsec_policy(self):
# Verifies the details of an ipsec policy
body = self.client.show_ipsecpolicy(self.ipsecpolicy['id'])
ipsecpolicy = body['ipsecpolicy']
self._assertExpected(self.ipsecpolicy, ipsecpolicy)
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/functional/ 0000775 0005670 0005671 00000000000 12701410103 024173 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/tests/functional/strongswan/ 0000775 0005670 0005671 00000000000 12701410103 026400 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/tests/functional/strongswan/test_netns_wrapper.py 0000664 0005670 0005671 00000004303 12701407726 032721 0 ustar jenkins jenkins 0000000 0000000 # Copyright (c) 2015 Canonical, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from neutron.agent.common import config
from neutron.agent.linux import utils
from neutron.tests.common import net_helpers
from neutron.tests.functional import base
WRAPPER_SCRIPT = 'neutron-vpn-netns-wrapper'
STATUS_PATTERN = re.compile('Command:.*ip.*addr.*show.*Exit code: 0')
class TestNetnsWrapper(base.BaseSudoTestCase):
def setUp(self):
super(TestNetnsWrapper, self).setUp()
config.setup_logging()
self.fake_ns = 'func-8f1b728c-6eca-4042-9b6b-6ef66ab9352a'
self.mount_paths = ('--mount_paths=/etc:/var/lib/neutron'
'/vpnaas/%(ns)s/etc,/var/run:/var/lib'
'/neutron/vpnaas/%(ns)s/var/run')
self.fake_pth = self.mount_paths % {'ns': self.fake_ns}
def test_netns_wrap_success(self):
client_ns = self.useFixture(net_helpers.NamespaceFixture()).ip_wrapper
ns = client_ns.namespace
pth = self.mount_paths % {'ns': ns}
cmd = WRAPPER_SCRIPT, pth, '--cmd=ip,addr,show'
output = client_ns.netns.execute(cmd)
self.assertTrue(STATUS_PATTERN.search(output))
def test_netns_wrap_fail_without_netns(self):
cmd = [WRAPPER_SCRIPT, self.fake_pth,
'--cmd=ip,addr,show']
self.assertRaises(RuntimeError, utils.execute, cmd=cmd,
run_as_root=True)
def test_netns_wrap_unauthorized_command(self):
cmd = [WRAPPER_SCRIPT, self.fake_pth,
'--cmd=nofiltercommand']
self.assertRaises(RuntimeError, utils.execute, cmd=cmd,
run_as_root=True)
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/functional/strongswan/README 0000664 0005670 0005671 00000000202 12701407726 027273 0 ustar jenkins jenkins 0000000 0000000 This area holds tests for the StrongSwan implementation (only). It will also
run tests in neutron_vpnaas/tests/functional/common.
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/functional/strongswan/test_strongswan_driver.py 0000664 0005670 0005671 00000012526 12701407726 033620 0 ustar jenkins jenkins 0000000 0000000 # Copyright (c) 2015 Canonical, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import os
from neutron.agent.l3 import config as l3_config
from neutron.agent.l3 import legacy_router
from neutron.plugins.common import constants
from neutron.tests.functional import base
from oslo_config import cfg
from oslo_utils import uuidutils
from neutron_vpnaas.services.vpn.device_drivers import ipsec
from neutron_vpnaas.services.vpn.device_drivers import strongswan_ipsec
_uuid = uuidutils.generate_uuid
FAKE_ROUTER_ID = _uuid()
FAKE_IPSEC_SITE_CONNECTION1_ID = _uuid()
FAKE_IPSEC_SITE_CONNECTION2_ID = _uuid()
FAKE_IKE_POLICY = {
'ike_version': 'v1',
'encryption_algorithm': 'aes-128',
'auth_algorithm': 'sha1',
'pfs': 'group5'
}
FAKE_IPSEC_POLICY = {
'encryption_algorithm': 'aes-128',
'auth_algorithm': 'sha1',
'pfs': 'group5'
}
FAKE_VPN_SERVICE = {
'id': _uuid(),
'router_id': FAKE_ROUTER_ID,
'name': 'myvpn',
'admin_state_up': True,
'status': constants.PENDING_CREATE,
'external_ip': '50.0.0.4',
'subnet': {'cidr': '10.0.0.0/24'},
'ipsec_site_connections': [
{'peer_cidrs': ['20.0.0.0/24',
'30.0.0.0/24'],
'id': FAKE_IPSEC_SITE_CONNECTION1_ID,
'external_ip': '50.0.0.4',
'peer_address': '30.0.0.5',
'peer_id': '30.0.0.5',
'psk': 'password',
'initiator': 'bi-directional',
'ikepolicy': FAKE_IKE_POLICY,
'ipsecpolicy': FAKE_IPSEC_POLICY,
'status': constants.PENDING_CREATE},
{'peer_cidrs': ['40.0.0.0/24',
'50.0.0.0/24'],
'external_ip': '50.0.0.4',
'peer_address': '50.0.0.5',
'peer_id': '50.0.0.5',
'psk': 'password',
'id': FAKE_IPSEC_SITE_CONNECTION2_ID,
'initiator': 'bi-directional',
'ikepolicy': FAKE_IKE_POLICY,
'ipsecpolicy': FAKE_IPSEC_POLICY,
'status': constants.PENDING_CREATE}]
}
DESIRED_CONN_STATUS = {FAKE_IPSEC_SITE_CONNECTION1_ID:
{'status': 'DOWN',
'updated_pending_status': False},
FAKE_IPSEC_SITE_CONNECTION2_ID:
{'status': 'DOWN',
'updated_pending_status': False}}
class TestStrongSwanDeviceDriver(base.BaseSudoTestCase):
"""Test the StrongSwan reference implementation of the device driver."""
def setUp(self):
super(TestStrongSwanDeviceDriver, self).setUp()
self.conf = cfg.CONF
self.conf.register_opts(l3_config.OPTS)
self.conf.register_opts(ipsec.ipsec_opts, 'ipsec')
self.conf.register_opts(strongswan_ipsec.strongswan_opts,
'strongswan')
self.conf.set_override('state_path', '/tmp')
ri_kwargs = {'router': {'id': FAKE_ROUTER_ID},
'agent_conf': self.conf,
'interface_driver': mock.sentinel.interface_driver}
self.router = legacy_router.LegacyRouter(FAKE_ROUTER_ID, **ri_kwargs)
self.router.router['distributed'] = False
self.router_id = FAKE_VPN_SERVICE['router_id']
looping_call_p = mock.patch(
'oslo_service.loopingcall.FixedIntervalLoopingCall')
looping_call_p.start()
vpn_service = mock.Mock()
vpn_service.conf = self.conf
self.driver = strongswan_ipsec.StrongSwanDriver(
vpn_service, host=mock.sentinel.host)
self.driver.routers[FAKE_ROUTER_ID] = self.router
self.driver.agent_rpc = mock.Mock()
self.driver._update_nat = mock.Mock()
self.driver.agent_rpc.get_vpn_services_on_host.return_value = [
FAKE_VPN_SERVICE]
self.addCleanup(self.driver.destroy_router, self.router_id)
self.router.router_namespace.create()
self.addCleanup(self.router.router_namespace.delete)
def test_process_lifecycle(self):
"""
Lifecycle test that validates that the strongswan process could be
launched, that a connection could be successfully initiated through
it, and then that it could be terminated and clean up after itself.
"""
process = self.driver.ensure_process(self.router_id,
FAKE_VPN_SERVICE)
process.enable()
self.assertTrue(process.active)
self.assertIn(self.router_id, self.driver.processes)
self.assertEqual(DESIRED_CONN_STATUS, process.connection_status)
self.assertIsNotNone(process.namespace)
conf_dir = os.path.join(self.conf.ipsec.config_base_dir,
self.router_id)
self.assertTrue(os.path.exists(conf_dir))
process.disable()
self.assertFalse(process.active)
self.assertFalse(process.connection_status)
self.assertFalse(os.path.exists(conf_dir))
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/functional/strongswan/__init__.py 0000664 0005670 0005671 00000001675 12701407726 030543 0 ustar jenkins jenkins 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
def load_tests(loader, tests, pattern):
this_dir = os.path.dirname(__file__)
strongswan_tests = loader.discover(start_dir=this_dir, pattern=pattern)
tests.addTests(strongswan_tests)
common_dir = os.path.join(this_dir, "../common")
common_tests = loader.discover(start_dir=common_dir, pattern=pattern)
tests.addTests(common_tests)
return tests
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/functional/__init__.py 0000664 0005670 0005671 00000000000 12701407726 026313 0 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/tests/functional/openswan/ 0000775 0005670 0005671 00000000000 12701410103 026025 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/tests/functional/openswan/README 0000664 0005670 0005671 00000000200 12701407726 026716 0 ustar jenkins jenkins 0000000 0000000 This area holds tests for the OpenSwan implementation (only). It will also
run tests in neutron_vpnaas/tests/functional/common.
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/functional/openswan/__init__.py 0000664 0005670 0005671 00000001712 12701407726 030160 0 ustar jenkins jenkins 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
def load_tests(loader, tests, pattern):
this_dir = os.path.dirname(__file__)
openswan_tests = loader.discover(start_dir=this_dir, pattern=pattern)
tests.addTests(openswan_tests)
common_dir = os.path.abspath(os.path.join(this_dir, "../common"))
common_tests = loader.discover(start_dir=common_dir, pattern=pattern)
tests.addTests(common_tests)
return tests
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/functional/openswan/test_openswan_driver.py 0000664 0005670 0005671 00000007153 12701407726 032672 0 ustar jenkins jenkins 0000000 0000000 # Copyright (c) 2015 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils as linux_utils
from neutron_vpnaas.tests.functional.common import test_scenario
class TestOpenSwanDeviceDriver(test_scenario.TestIPSecBase):
"""Test the OpenSwan reference implementation of the device driver."""
# NOTE: Tests may be added/removed/changed, when this is fleshed out
# in future commits.
def _ping_mtu(self, from_site, to_site, size, instance=0):
"""Pings ip address using packets of given size and with DF=1.
In order to ping it uses following cli command:
ip netns exec ping -c 4 -M do -s
"""
namespace = from_site.vm[instance].namespace
ip = to_site.vm[instance].port_ip
try:
cmd = ['ping', '-c', 4, '-M', 'do', '-s', size, ip]
cmd = ip_lib.add_namespace_to_cmd(cmd, namespace)
linux_utils.execute(cmd, run_as_root=True)
return True
except RuntimeError:
return False
def test_process_created_on_ipsec_connection_create(self):
"""Check that pluto process is running."""
pass
def test_connection_status_with_one_side_of_ipsec_connection(self):
"""Check status of connection, with only one end created.
Expect that the status will indicate that the connection is down.
"""
pass
def test_process_gone_on_ipsec_connection_delete(self):
"""Verify that there is no longer a process, upon deletion."""
pass
def test_cached_status_on_create_and_delete(self):
"""Test that the status is cached."""
pass
def test_status_reporting(self):
"""Test status reported correctly to agent."""
pass
def _override_mtu_for_site(self, site, mtu):
ipsec_connection = site.vpn_service['ipsec_site_connections'][0]
ipsec_connection['mtu'] = mtu
def test_ipsec_site_connections_mtu_enforcement(self):
"""Test that mtu of ipsec site connections is enforced."""
site1 = self.create_site(test_scenario.PUBLIC_NET[4],
[self.private_nets[1]])
site2 = self.create_site(test_scenario.PUBLIC_NET[5],
[self.private_nets[2]])
self.check_ping(site1, site2, success=False)
self.check_ping(site2, site1, success=False)
self.prepare_ipsec_site_connections(site1, site2)
# Set up non-default mtu value
self._override_mtu_for_site(site1, 1200)
self._override_mtu_for_site(site2, 1200)
self.sync_to_create_ipsec_connections(site1, site2)
# Validate that ip packets with 1172 (1200) bytes of data pass
self.assertTrue(self._ping_mtu(site1, site2, 1172))
self.assertTrue(self._ping_mtu(site2, site1, 1172))
# Validate that ip packets with 1173 (1201) bytes of data are dropped
self.assertFalse(self._ping_mtu(site1, site2, 1173))
self.assertFalse(self._ping_mtu(site2, site1, 1173))
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/functional/requirements.txt 0000664 0005670 0005671 00000000500 12701407726 027473 0 ustar jenkins jenkins 0000000 0000000 # Additional requirements for functional tests
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
psutil>=1.1.1,<2.0.0
psycopg2
PyMySQL>=0.6.2 # MIT License
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/functional/common/ 0000775 0005670 0005671 00000000000 12701410103 025463 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/tests/functional/common/README 0000664 0005670 0005671 00000000370 12701407726 026364 0 ustar jenkins jenkins 0000000 0000000 This area holds tests that are run for all functional jobs. The load_tests()
method in each implementation specific area will include tests in this area.
Do not place a test module in this area, if it is specific to one particular
implementation.
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/functional/common/test_migrations_sync.py 0000664 0005670 0005671 00000004037 12701407726 032331 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2015 Mirantis Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from neutron.db.migration.alembic_migrations import external
from neutron.db.migration import cli as migration
from neutron.tests.common import base
from neutron.tests.functional.db import test_migrations
from neutron_vpnaas.db.migration import alembic_migrations
from neutron_vpnaas.db.models import head
EXTERNAL_TABLES = set(external.TABLES) - set(external.VPNAAS_TABLES)
class _TestModelsMigrationsVPNAAS(test_migrations._TestModelsMigrations):
def db_sync(self, engine):
cfg.CONF.set_override('connection', engine.url, group='database')
for conf in migration.get_alembic_configs():
self.alembic_config = conf
self.alembic_config.neutron_config = cfg.CONF
migration.do_alembic_command(conf, 'upgrade', 'heads')
def get_metadata(self):
return head.get_metadata()
def include_object(self, object_, name, type_, reflected, compare_to):
if type_ == 'table' and (
name == alembic_migrations.VPNAAS_VERSION_TABLE or
name in EXTERNAL_TABLES):
return False
else:
return True
def get_engine(self):
return self.engine
class TestModelsMigrationsMysql(_TestModelsMigrationsVPNAAS,
base.MySQLTestCase):
pass
class TestModelsMigrationsPsql(_TestModelsMigrationsVPNAAS,
base.PostgreSQLTestCase):
pass
neutron-vpnaas-8.0.0/neutron_vpnaas/tests/functional/common/__init__.py 0000664 0005670 0005671 00000000000 12701407726 027603 0 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/tests/functional/common/test_scenario.py 0000664 0005670 0005671 00000061226 12701407726 030727 0 ustar jenkins jenkins 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import functools
import mock
import netaddr
import os
from neutron.agent.common import config as agent_config
from neutron.agent.common import ovs_lib
from neutron.agent.l3 import namespaces as n_namespaces
from neutron.agent.l3 import router_info
from neutron.agent import l3_agent as l3_agent_main
from neutron.agent.linux import external_process
from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils as linux_utils
from neutron.common import config as common_config
from neutron.common import constants as l3_constants
from neutron.common import utils as common_utils
from neutron.plugins.common import constants
from neutron.services.provider_configuration import serviceprovider_opts
from neutron.tests.common import l3_test_common
from neutron.tests.common import net_helpers
from neutron.tests.functional import base
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
from neutron_vpnaas.services.vpn import agent as vpn_agent
from neutron_vpnaas.services.vpn.agent import vpn_agent_opts
from neutron_vpnaas.services.vpn.device_drivers import ipsec
_uuid = uuidutils.generate_uuid
FAKE_IKE_POLICY = {
'auth_algorithm': 'sha1',
"ike_version": "v1",
'encryption_algorithm': 'aes-128',
'pfs': 'group5',
'phase1_negotiation_mode': 'main',
'lifetime_units': 'seconds',
'lifetime_value': 3600
}
FAKE_IPSEC_POLICY = {
"encapsulation_mode": "tunnel",
"encryption_algorithm": "aes-128",
"pfs": "group5",
"lifetime_units": "seconds",
"lifetime_value": 3600,
"transform_protocol": "esp",
"auth_algorithm": "sha1",
}
FAKE_VPN_SERVICE = {
"id": _uuid(),
"router_id": _uuid(),
"status": constants.PENDING_CREATE,
"admin_state_up": True,
'external_ip': "172.24.4.8"
}
FAKE_IPSEC_CONNECTION = {
"vpnservice_id": _uuid(),
"status": "PENDING_CREATE",
"psk": "969022489",
"initiator": "bi-directional",
"admin_state_up": True,
"auth_mode": "psk",
'external_ip': "172.24.4.8",
"peer_cidrs": ["10.100.255.224/28"],
"mtu": 1500,
"dpd_action": "hold",
"dpd_interval": 30,
"dpd_timeout": 120,
"route_mode": "static",
"ikepolicy": FAKE_IKE_POLICY,
"ipsecpolicy": FAKE_IPSEC_POLICY,
"peer_address": "172.24.4.8",
"peer_id": "172.24.4.8",
"id": _uuid()
}
PUBLIC_NET = netaddr.IPNetwork('19.4.4.0/24')
PRIVATE_NET = netaddr.IPNetwork('35.4.0.0/16')
FAKE_PUBLIC_SUBNET_ID = _uuid()
FAKE_PRIVATE_SUBNET_ID = _uuid()
MAC_BASE = cfg.CONF.base_mac.split(':')
FAKE_ROUTER = {
'enable_snat': True,
'gw_port': {
'network_id': _uuid(),
'subnets': [
{
'cidr': str(PUBLIC_NET),
'gateway_ip': str(PUBLIC_NET[1]),
'id': FAKE_PUBLIC_SUBNET_ID
}
],
'fixed_ips': [
{
'subnet_id': FAKE_PUBLIC_SUBNET_ID,
'prefixlen': PUBLIC_NET.prefixlen,
}
],
},
'distributed': False,
'_floatingips': [],
'routes': []
}
def get_ovs_bridge(br_name):
return ovs_lib.OVSBridge(br_name)
Vm = collections.namedtuple('Vm', ['namespace', 'port_ip'])
class SiteInfo(object):
"""Holds info on the router, ports, service, and connection."""
def __init__(self, public_net, private_nets):
self.public_net = public_net
self.private_nets = private_nets
self.generate_router_info()
self._prepare_vpn_service_info()
def _generate_private_interface_for_router(self, subnet):
subnet_id = _uuid()
return {
'id': _uuid(),
'admin_state_up': True,
'network_id': _uuid(),
'mac_address': common_utils.get_random_mac(MAC_BASE),
'subnets': [
{
'ipv6_ra_mode': None,
'cidr': str(subnet),
'gateway_ip': str(subnet[1]),
'id': subnet_id,
'ipv6_address_mode': None
}
],
'fixed_ips': [
{
'subnet_id': subnet_id,
'prefixlen': 24,
'ip_address': str(subnet[4])
}
]
}
def generate_router_info(self):
self.info = copy.deepcopy(FAKE_ROUTER)
self.info['id'] = _uuid()
self.info['_interfaces'] = [
self._generate_private_interface_for_router(subnet)
for subnet in self.private_nets]
self.info['gw_port']['id'] = _uuid()
self.info['gw_port']['fixed_ips'][0]['ip_address'] = str(
self.public_net)
self.info['gw_port']['mac_address'] = (
common_utils.get_random_mac(MAC_BASE))
self.info['ha'] = False
def _prepare_vpn_service_info(self):
self.vpn_service = copy.deepcopy(FAKE_VPN_SERVICE)
self.vpn_service.update({'id': _uuid(),
'router_id': self.info['id'],
'external_ip': str(self.public_net)})
def prepare_ipsec_conn_info(self, peer):
ipsec_connection = copy.deepcopy(FAKE_IPSEC_CONNECTION)
local_cidrs = [str(s) for s in self.private_nets]
peer_cidrs = [str(s) for s in peer.private_nets]
ipsec_connection.update({
'id': _uuid(),
'vpnservice_id': self.vpn_service['id'],
'external_ip': self.vpn_service['external_ip'],
'peer_cidrs': peer_cidrs,
'peer_address': peer.vpn_service['external_ip'],
'peer_id': peer.vpn_service['external_ip'],
'local_cidrs': local_cidrs,
'local_ip_vers': 4
})
self.vpn_service['ipsec_site_connections'] = [ipsec_connection]
class SiteInfoWithHaRouter(SiteInfo):
def __init__(self, public_net, private_nets, host, failover_host):
self.host = host
self.failover_host = failover_host
self.get_ns_name = mock.patch.object(n_namespaces.RouterNamespace,
'_get_ns_name').start()
super(SiteInfoWithHaRouter, self).__init__(public_net, private_nets)
def generate_router_info(self):
super(SiteInfoWithHaRouter, self).generate_router_info()
self.info['ha'] = True
self.info['ha_vr_id'] = 1
self.info[l3_constants.HA_INTERFACE_KEY] = (
l3_test_common.get_ha_interface())
# Mock router namespace name, for when router is created
self.get_ns_name.return_value = "qrouter-{0}-{1}".format(
self.info['id'], self.host)
def generate_backup_router_info(self):
# Clone router info, using different HA interface (using same ID)
info = copy.deepcopy(self.info)
info[l3_constants.HA_INTERFACE_KEY] = (
l3_test_common.get_ha_interface(ip='169.254.192.2',
mac='22:22:22:22:22:22'))
# Mock router namespace name, for when router is created
self.get_ns_name.return_value = "qrouter-{0}-{1}".format(
info['id'], self.failover_host)
return info
class TestIPSecBase(base.BaseSudoTestCase):
vpn_agent_ini = os.environ.get('VPN_AGENT_INI',
'/etc/neutron/vpn_agent.ini')
NESTED_NAMESPACE_SEPARATOR = '@'
def setUp(self):
super(TestIPSecBase, self).setUp()
mock.patch('neutron.agent.l3.agent.L3PluginApi').start()
# avoid report_status running periodically
mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall').start()
# Both the vpn agents try to use execute_rootwrap_daemon's socket
# simultaneously during test cleanup, but execute_rootwrap_daemon has
# limitations with simultaneous reads. So avoid using
# root_helper_daemon and instead use root_helper
# https://bugs.launchpad.net/neutron/+bug/1482622
cfg.CONF.set_override('root_helper_daemon', None, group='AGENT')
# Mock the method below because it causes Exception:
# RuntimeError: Second simultaneous read on fileno 5 detected.
# Unless you really know what you're doing, make sure that only
# one greenthread can read any particular socket. Consider using
# a pools.Pool. If you do know what you're doing and want to disable
# this error, call eventlet.debug.hub_prevent_multiple_readers(False)
# Can reproduce the exception in the test only
ip_lib.send_ip_addr_adv_notif = mock.Mock()
self.vpn_agent = self._configure_agent('agent1')
self.driver = self.vpn_agent.device_drivers[0]
self.driver.agent_rpc.get_vpn_services_on_host = mock.Mock(
return_value=[])
self.driver.report_status = mock.Mock()
self.private_nets = list(PRIVATE_NET.subnet(24))
def _connect_agents(self, agent1, agent2):
"""Simulate both agents in the same host.
For packet flow between resources connected to these two agents,
agent's ovs bridges are connected through patch ports.
"""
br_int_1 = get_ovs_bridge(agent1.conf.ovs_integration_bridge)
br_int_2 = get_ovs_bridge(agent2.conf.ovs_integration_bridge)
net_helpers.create_patch_ports(br_int_1, br_int_2)
br_ex_1 = get_ovs_bridge(agent1.conf.external_network_bridge)
br_ex_2 = get_ovs_bridge(agent2.conf.external_network_bridge)
net_helpers.create_patch_ports(br_ex_1, br_ex_2)
def _get_config_opts(self):
"""Register default config options"""
config = cfg.ConfigOpts()
config.register_opts(common_config.core_opts)
config.register_opts(common_config.core_cli_opts)
config.register_opts(serviceprovider_opts, 'service_providers')
config.register_opts(vpn_agent_opts, 'vpnagent')
config.register_opts(ipsec.ipsec_opts, 'ipsec')
config.register_opts(ipsec.openswan_opts, 'openswan')
logging.register_options(config)
agent_config.register_process_monitor_opts(config)
return config
def _configure_agent(self, host):
"""Override specific config options"""
config = self._get_config_opts()
l3_agent_main.register_opts(config)
cfg.CONF.set_override('debug', True)
agent_config.setup_logging()
config.set_override(
'interface_driver',
'neutron.agent.linux.interface.OVSInterfaceDriver')
br_int = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
br_ex = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
config.set_override('ovs_integration_bridge', br_int.br_name)
config.set_override('external_network_bridge', br_ex.br_name)
temp_dir = self.get_new_temp_dir()
get_temp_file_path = functools.partial(self.get_temp_file_path,
root=temp_dir)
config.set_override('state_path', temp_dir.path)
config.set_override('metadata_proxy_socket',
get_temp_file_path('metadata_proxy'))
config.set_override('ha_confs_path',
get_temp_file_path('ha_confs'))
config.set_override('external_pids',
get_temp_file_path('external/pids'))
config.set_override('host', host)
ipsec_config_base_dir = '%s/%s' % (temp_dir.path, 'ipsec')
config.set_override('config_base_dir',
ipsec_config_base_dir, group='ipsec')
config(['--config-file', self.vpn_agent_ini])
# Assign ip address to br-ex port because it is a gateway
ex_port = ip_lib.IPDevice(br_ex.br_name)
ex_port.addr.add(str(PUBLIC_NET[1]))
return vpn_agent.VPNAgent(host, config)
def _setup_failover_agent(self):
self.failover_agent = self._configure_agent('agent2')
self._connect_agents(self.vpn_agent, self.failover_agent)
self.failover_driver = self.failover_agent.device_drivers[0]
self.failover_driver.agent_rpc.get_vpn_services_on_host = (
mock.Mock(return_value=[]))
self.failover_driver.report_status = mock.Mock()
def create_router(self, agent, info):
"""Create router for agent from router info."""
self.addCleanup(agent._safe_router_removed, info['id'])
# Generate unique internal and external router device names using the
# agent's hostname. This is to allow multiple HA router replicas to
# co-exist on the same machine, otherwise they'd all use the same
# device names and OVS would freak out(OVS won't allow a port with
# same name connected to two bridges).
def _append_suffix(dev_name):
# If dev_name = 'xyz123' and the suffix is 'agent2' then the result
# will be 'xy-nt2'
return "{0}-{1}".format(dev_name[:-4], agent.host[-3:])
def get_internal_device_name(port_id):
return _append_suffix(
(n_namespaces.INTERNAL_DEV_PREFIX + port_id)
[:interface.LinuxInterfaceDriver.DEV_NAME_LEN])
def get_external_device_name(port_id):
return _append_suffix(
(n_namespaces.EXTERNAL_DEV_PREFIX + port_id)
[:interface.LinuxInterfaceDriver.DEV_NAME_LEN])
mock_get_internal_device_name = mock.patch.object(
router_info.RouterInfo, 'get_internal_device_name').start()
mock_get_internal_device_name.side_effect = get_internal_device_name
mock_get_external_device_name = mock.patch.object(
router_info.RouterInfo, 'get_external_device_name').start()
mock_get_external_device_name.side_effect = get_external_device_name
agent._process_added_router(info)
return agent.router_info[info['id']]
def _port_first_ip_cidr(self, port):
fixed_ip = port['fixed_ips'][0]
return common_utils.ip_to_cidr(fixed_ip['ip_address'],
fixed_ip['prefixlen'])
def create_ports_for(self, site):
"""Creates namespaces and ports for simulated VM.
There will be a unique namespace for each port, which is representing
a VM for the test.
"""
bridge = get_ovs_bridge(self.vpn_agent.conf.ovs_integration_bridge)
site.vm = []
for internal_port in site.router.internal_ports:
router_ip_cidr = self._port_first_ip_cidr(internal_port)
port_ip_cidr = net_helpers.increment_ip_cidr(router_ip_cidr, 1)
client_ns = self.useFixture(
net_helpers.NamespaceFixture()).ip_wrapper
namespace = client_ns.namespace
port = self.useFixture(
net_helpers.OVSPortFixture(bridge, namespace)).port
port.addr.add(port_ip_cidr)
port.route.add_gateway(router_ip_cidr.partition('/')[0])
site.vm.append(Vm(namespace, port_ip_cidr.partition('/')[0]))
def create_site(self, public_net, private_nets, l3ha=False):
"""Build router(s), namespaces, and ports for a site.
For HA, we'll create a backup router and wait for both routers
to be ready, so that we can test pings after failover.
"""
if l3ha:
site = SiteInfoWithHaRouter(public_net, private_nets,
self.vpn_agent.host,
self.failover_agent.host)
else:
site = SiteInfo(public_net, private_nets)
site.router = self.create_router(self.vpn_agent, site.info)
if l3ha:
backup_info = site.generate_backup_router_info()
site.backup_router = self.create_router(self.failover_agent,
backup_info)
linux_utils.wait_until_true(
lambda: site.router.ha_state == 'master')
linux_utils.wait_until_true(
lambda: site.backup_router.ha_state == 'backup')
self.create_ports_for(site)
return site
def prepare_ipsec_site_connections(self, site1, site2):
"""Builds info for connections in both directions in prep for sync."""
site1.prepare_ipsec_conn_info(site2)
site2.prepare_ipsec_conn_info(site1)
def sync_to_create_ipsec_connections(self, site1, site2):
"""Perform a sync, so that connections are created."""
# Provide service info to sync
self.driver.agent_rpc.get_vpn_services_on_host = mock.Mock(
return_value=[site1.vpn_service, site2.vpn_service])
local_router_id = site1.router.router_id
peer_router_id = site2.router.router_id
self.driver.sync(mock.Mock(), [{'id': local_router_id},
{'id': peer_router_id}])
self.addCleanup(self.driver._delete_vpn_processes,
[local_router_id, peer_router_id], [])
def sync_failover_agent(self, site):
"""Perform a sync on failover agent associated w/backup router."""
self.failover_driver.agent_rpc.get_vpn_services_on_host = mock.Mock(
return_value=[site.vpn_service])
self.failover_driver.sync(mock.Mock(),
[{'id': site.backup_router.router_id}])
def check_ping(self, from_site, to_site, instance=0, success=True):
if success:
net_helpers.assert_ping(from_site.vm[instance].namespace,
to_site.vm[instance].port_ip,
timeout=8, count=4)
else:
net_helpers.assert_no_ping(from_site.vm[instance].namespace,
to_site.vm[instance].port_ip,
timeout=8, count=4)
def _failover_ha_router(self, router1, router2):
"""Cause a failover of HA router.
Fail the agent1's HA router. Agent1's HA router will transition
to backup and agent2's HA router will become master. Wait for
the failover to complete.
"""
device_name = router1.get_ha_device_name()
ha_device = ip_lib.IPDevice(device_name, router1.ns_name)
ha_device.link.set_down()
linux_utils.wait_until_true(lambda: router2.ha_state == 'master')
linux_utils.wait_until_true(lambda: router1.ha_state == 'backup')
def _ipsec_process_exists(self, conf, router, pid_files):
"""Check if *Swan process has started up."""
for pid_file in pid_files:
pm = external_process.ProcessManager(
conf,
"ipsec",
router.ns_name, pid_file=pid_file)
if pm.active:
break
return pm.active
def _wait_for_ipsec_startup(self, router, driver, conf, should_run=True):
"""Wait for new IPSec process on failover agent to start up."""
# check for both strongswan and openswan processes
path = driver.processes[router.router_id].config_dir
pid_files = ['%s/var/run/charon.pid' % path,
'%s/var/run/pluto.pid' % path]
linux_utils.wait_until_true(
lambda: should_run == self._ipsec_process_exists(
conf, router, pid_files))
@staticmethod
def _update_vpnservice(site, **kwargs):
site.vpn_service.update(kwargs)
@staticmethod
def _update_ipsec_connection(site, **kwargs):
ipsec_connection = site.vpn_service['ipsec_site_connections'][0]
ipsec_connection.update(kwargs)
class TestIPSecScenario(TestIPSecBase):
def test_single_ipsec_connection(self):
site1 = self.create_site(PUBLIC_NET[4], [self.private_nets[1]])
site2 = self.create_site(PUBLIC_NET[5], [self.private_nets[2]])
self.check_ping(site1, site2, success=False)
self.check_ping(site2, site1, success=False)
self.prepare_ipsec_site_connections(site1, site2)
self.sync_to_create_ipsec_connections(site1, site2)
self.check_ping(site1, site2)
self.check_ping(site2, site1)
def test_ipsec_site_connections_with_mulitple_subnets(self):
"""Check with a pair of subnets on each end of connection."""
site1 = self.create_site(PUBLIC_NET[4], self.private_nets[1:3])
site2 = self.create_site(PUBLIC_NET[5], self.private_nets[3:5])
# Just check from each VM, not every combination
for i in [0, 1]:
self.check_ping(site1, site2, instance=i, success=False)
self.check_ping(site2, site1, instance=i, success=False)
self.prepare_ipsec_site_connections(site1, site2)
self.sync_to_create_ipsec_connections(site1, site2)
for i in [0, 1]:
self.check_ping(site1, site2, instance=i)
self.check_ping(site2, site1, instance=i)
def test_ipsec_site_connections_with_l3ha_routers(self):
"""Test ipsec site connection with HA routers.
This test creates two agents. First agent will have Legacy and HA
routers. Second agent will host only HA router. We setup ipsec
connection between legacy and HA router.
When HA router is created, agent1 will have master router and
agent2 will have backup router. Ipsec connection will be established
between legacy router and agent1's master HA router.
Then we fail the agent1's master HA router. Agent1's HA router will
transition to backup and agent2's HA router will become master.
Now ipsec connection will be established between legacy router and
agent2's master HA router
"""
self._setup_failover_agent()
site1 = self.create_site(PUBLIC_NET[4], [self.private_nets[1]])
site2 = self.create_site(PUBLIC_NET[5], [self.private_nets[2]],
l3ha=True)
# No ipsec connection between legacy router and HA routers
self.check_ping(site1, site2, 0, success=False)
self.check_ping(site2, site1, 0, success=False)
self.prepare_ipsec_site_connections(site1, site2)
self.sync_to_create_ipsec_connections(site1, site2)
self.sync_failover_agent(site2)
# Test ipsec connection between legacy router and agent2's HA router
self.check_ping(site1, site2, 0)
self.check_ping(site2, site1, 0)
self._failover_ha_router(site2.router, site2.backup_router)
self._wait_for_ipsec_startup(site2.backup_router,
self.failover_driver,
self.failover_agent.conf)
# Test ipsec connection between legacy router and agent2's HA router
self.check_ping(site1, site2, 0)
self.check_ping(site2, site1, 0)
def _test_admin_state_up(self, update_method):
# Create ipsec connection between two sites
site1 = self.create_site(PUBLIC_NET[4], [self.private_nets[1]])
site2 = self.create_site(PUBLIC_NET[5], [self.private_nets[2]])
self.prepare_ipsec_site_connections(site1, site2)
self.sync_to_create_ipsec_connections(site1, site2)
self.check_ping(site1, site2)
self.check_ping(site2, site1)
# Disable resource on one of the sites and check that
# ping no longer passes.
update_method(site1, admin_state_up=False)
self.sync_to_create_ipsec_connections(site1, site2)
self.check_ping(site1, site2, 0, success=False)
self.check_ping(site2, site1, 0, success=False)
# Validate that ipsec process for the disabled site was terminated.
self._wait_for_ipsec_startup(site1.router, self.driver,
self.vpn_agent.conf,
should_run=False)
# Change admin_state_up of the disabled resource back to True and
# check that everything works again.
update_method(site1, admin_state_up=True)
self.sync_to_create_ipsec_connections(site1, site2)
self.check_ping(site1, site2)
self.check_ping(site2, site1)
def test_ipsec_site_connections_update_admin_state_up(self):
"""Test updating admin_state_up of ipsec site connections."""
self._test_admin_state_up(self._update_ipsec_connection)
def test_vpnservice_update_admin_state_up(self):
"""Test updating admin_state_up of a vpn service."""
self._test_admin_state_up(self._update_vpnservice)
neutron-vpnaas-8.0.0/neutron_vpnaas/opts.py 0000664 0005670 0005671 00000002460 12701407726 022251 0 ustar jenkins jenkins 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import neutron.services.provider_configuration
import neutron_vpnaas.services.vpn.agent
import neutron_vpnaas.services.vpn.device_drivers.ipsec
import neutron_vpnaas.services.vpn.device_drivers.strongswan_ipsec
def list_agent_opts():
return [
('vpnagent',
neutron_vpnaas.services.vpn.agent.vpn_agent_opts),
('ipsec',
neutron_vpnaas.services.vpn.device_drivers.ipsec.ipsec_opts),
('strongswan',
neutron_vpnaas.services.vpn.device_drivers.strongswan_ipsec.
strongswan_opts),
('pluto',
neutron_vpnaas.services.vpn.device_drivers.ipsec.pluto_opts)
]
def list_opts():
return [
('service_providers',
neutron.services.provider_configuration.serviceprovider_opts)
]
neutron-vpnaas-8.0.0/neutron_vpnaas/__init__.py 0000664 0005670 0005671 00000001363 12701407726 023024 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gettext
import six
if six.PY2:
gettext.install('neutron', unicode=1)
else:
gettext.install('neutron')
neutron-vpnaas-8.0.0/neutron_vpnaas/services/ 0000775 0005670 0005671 00000000000 12701410103 022512 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/ 0000775 0005670 0005671 00000000000 12701410103 023315 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/vyatta_agent.py 0000664 0005670 0005671 00000003050 12701407726 026374 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2015 Brocade Communications System, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from networking_brocade.vyatta.common import l3_agent as vyatta_l3
from neutron.agent import l3_agent as entry
from oslo_config import cfg
from neutron_vpnaas._i18n import _
from neutron_vpnaas.services.vpn import vyatta_vpn_service
vpn_agent_opts = [
cfg.MultiStrOpt(
'vpn_device_driver',
default=['neutron_vpnaas.services.vpn.device_drivers.'
'vyatta_ipsec.VyattaIPSecDriver'],
help=_("The vpn device drivers Neutron will use")),
]
cfg.CONF.register_opts(vpn_agent_opts, 'vpnagent')
class VyattaVPNAgent(vyatta_l3.L3AgentMiddleware):
def __init__(self, host, conf=None):
super(VyattaVPNAgent, self).__init__(host, conf)
self.service = vyatta_vpn_service.VyattaVPNService(self)
self.device_drivers = self.service.load_device_drivers(host)
def main():
entry.main(
manager='neutron_vpnaas.services.vpn.vyatta_agent.VyattaVPNAgent')
neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/plugin.py 0000664 0005670 0005671 00000012532 12701407726 025211 0 ustar jenkins jenkins 0000000 0000000
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.db import servicetype_db as st_db
from neutron.plugins.common import constants
from neutron.services import provider_configuration as pconf
from neutron.services import service_base
from oslo_log import log as logging
from neutron_vpnaas._i18n import _LI
from neutron_vpnaas.db.vpn import vpn_db
LOG = logging.getLogger(__name__)
def add_provider_configuration(type_manager, service_type):
type_manager.add_provider_configuration(
service_type,
pconf.ProviderConfiguration('neutron_vpnaas'))
class VPNPlugin(vpn_db.VPNPluginDb):
"""Implementation of the VPN Service Plugin.
This class manages the workflow of VPNaaS request/response.
Most DB related works are implemented in class
vpn_db.VPNPluginDb.
"""
supported_extension_aliases = ["vpnaas",
"vpn-endpoint-groups",
"service-type"]
path_prefix = "/vpn"
class VPNDriverPlugin(VPNPlugin, vpn_db.VPNPluginRpcDbMixin):
"""VpnPlugin which supports VPN Service Drivers."""
#TODO(nati) handle ikepolicy and ipsecpolicy update usecase
def __init__(self):
super(VPNDriverPlugin, self).__init__()
self.service_type_manager = st_db.ServiceTypeManager.get_instance()
add_provider_configuration(self.service_type_manager, constants.VPN)
# Load the service driver from neutron.conf.
drivers, default_provider = service_base.load_drivers(
constants.VPN, self)
LOG.info(_LI("VPN plugin using service driver: %s"), default_provider)
self.ipsec_driver = drivers[default_provider]
vpn_db.subscribe()
def _get_driver_for_vpnservice(self, vpnservice):
return self.ipsec_driver
def _get_driver_for_ipsec_site_connection(self, context,
ipsec_site_connection):
#TODO(nati) get vpnservice when we support service type framework
vpnservice = None
return self._get_driver_for_vpnservice(vpnservice)
def _get_validator(self):
return self.ipsec_driver.validator
def create_ipsec_site_connection(self, context, ipsec_site_connection):
ipsec_site_connection = super(
VPNDriverPlugin, self).create_ipsec_site_connection(
context, ipsec_site_connection)
driver = self._get_driver_for_ipsec_site_connection(
context, ipsec_site_connection)
driver.create_ipsec_site_connection(context, ipsec_site_connection)
return ipsec_site_connection
def delete_ipsec_site_connection(self, context, ipsec_conn_id):
ipsec_site_connection = self.get_ipsec_site_connection(
context, ipsec_conn_id)
super(VPNDriverPlugin, self).delete_ipsec_site_connection(
context, ipsec_conn_id)
driver = self._get_driver_for_ipsec_site_connection(
context, ipsec_site_connection)
driver.delete_ipsec_site_connection(context, ipsec_site_connection)
def update_ipsec_site_connection(
self, context,
ipsec_conn_id, ipsec_site_connection):
old_ipsec_site_connection = self.get_ipsec_site_connection(
context, ipsec_conn_id)
ipsec_site_connection = super(
VPNDriverPlugin, self).update_ipsec_site_connection(
context,
ipsec_conn_id,
ipsec_site_connection)
driver = self._get_driver_for_ipsec_site_connection(
context, ipsec_site_connection)
driver.update_ipsec_site_connection(
context, old_ipsec_site_connection, ipsec_site_connection)
return ipsec_site_connection
def create_vpnservice(self, context, vpnservice):
vpnservice = super(
VPNDriverPlugin, self).create_vpnservice(context, vpnservice)
driver = self._get_driver_for_vpnservice(vpnservice)
driver.create_vpnservice(context, vpnservice)
return vpnservice
def update_vpnservice(self, context, vpnservice_id, vpnservice):
old_vpn_service = self.get_vpnservice(context, vpnservice_id)
new_vpn_service = super(
VPNDriverPlugin, self).update_vpnservice(context, vpnservice_id,
vpnservice)
driver = self._get_driver_for_vpnservice(old_vpn_service)
driver.update_vpnservice(context, old_vpn_service, new_vpn_service)
return new_vpn_service
def delete_vpnservice(self, context, vpnservice_id):
vpnservice = self._get_vpnservice(context, vpnservice_id)
super(VPNDriverPlugin, self).delete_vpnservice(context, vpnservice_id)
driver = self._get_driver_for_vpnservice(vpnservice)
driver.delete_vpnservice(context, vpnservice)
neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/__init__.py 0000664 0005670 0005671 00000000000 12701407726 025435 0 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/vyatta_vpn_service.py 0000664 0005670 0005671 00000002672 12701407726 027632 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2015 Brocade Communications System, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_vpnaas.services.vpn import vpn_service
class VyattaVPNService(vpn_service.VPNService):
"""Vyatta VPN Service handler."""
def __init__(self, l3_agent):
"""Creates a Vyatta VPN Service instance.
NOTE: Directly accessing l3_agent here is an interim solution
until we move to have a router object given down to device drivers
to access router related methods
"""
super(VyattaVPNService, self).__init__(l3_agent)
self.l3_agent = l3_agent
def get_router_client(self, router_id):
"""
Get Router RESTapi client
"""
return self.l3_agent.get_router_client(router_id)
def get_router(self, router_id):
"""
Get Router Object
"""
return self.l3_agent.get_router(router_id)
neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/agent.py 0000664 0005670 0005671 00000006057 12701407726 025016 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2013, Nachi Ueno, NTT I3, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.agent.l3 import agent as l3_agent
from neutron.agent import l3_agent as entry
from oslo_config import cfg
from neutron_vpnaas._i18n import _
from neutron_vpnaas.services.vpn import vpn_service
vpn_agent_opts = [
cfg.MultiStrOpt(
'vpn_device_driver',
default=['neutron_vpnaas.services.vpn.device_drivers.'
'ipsec.OpenSwanDriver'],
sample_default=['neutron_vpnaas.services.vpn.device_drivers.ipsec.'
'OpenSwanDriver, '
'neutron_vpnaas.services.vpn.device_drivers.'
'cisco_ipsec.CiscoCsrIPsecDriver, '
'neutron_vpnaas.services.vpn.device_drivers.'
'vyatta_ipsec.VyattaIPSecDriver, '
'neutron_vpnaas.services.vpn.device_drivers.'
'strongswan_ipsec.StrongSwanDriver, '
'neutron_vpnaas.services.vpn.device_drivers.'
'fedora_strongswan_ipsec.FedoraStrongSwanDriver, '
'neutron_vpnaas.services.vpn.device_drivers.'
'libreswan_ipsec.LibreSwanDriver'],
help=_("The vpn device drivers Neutron will use")),
]
cfg.CONF.register_opts(vpn_agent_opts, 'vpnagent')
class VPNAgent(l3_agent.L3NATAgentWithStateReport):
"""VPNAgent class which can handle vpn service drivers."""
def __init__(self, host, conf=None):
super(VPNAgent, self).__init__(host=host, conf=conf)
self.agent_state['binary'] = 'neutron-vpn-agent'
self.service = vpn_service.VPNService(self)
self.device_drivers = self.service.load_device_drivers(host)
def process_state_change(self, router_id, state):
"""Enable the vpn process when router transitioned to master.
And disable vpn process for backup router.
"""
for device_driver in self.device_drivers:
if router_id in device_driver.processes:
process = device_driver.processes[router_id]
if state == 'master':
process.enable()
else:
process.disable()
def enqueue_state_change(self, router_id, state):
"""Handle HA router state changes for vpn process"""
self.process_state_change(router_id, state)
super(VPNAgent, self).enqueue_state_change(router_id, state)
def main():
entry.main(manager='neutron_vpnaas.services.vpn.agent.VPNAgent')
neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/common/ 0000775 0005670 0005671 00000000000 12701410103 024605 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/common/constants.py 0000664 0005670 0005671 00000002275 12701407726 027222 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2015 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Endpoint group types
SUBNET_ENDPOINT = 'subnet'
CIDR_ENDPOINT = 'cidr'
VLAN_ENDPOINT = 'vlan'
NETWORK_ENDPOINT = 'network'
ROUTER_ENDPOINT = 'router'
# NOTE: Type usage...
# IPSec local endpoints - subnet, IPSec peer endpoints - cidr
# BGP VPN local endpoints - network
# Direct connect style endpoints - vlan
# IMPORTANT: The ordering of these is important, as it is used in an enum
# for the database (and migration script). Only add to this list.
VPN_SUPPORTED_ENDPOINT_TYPES = [
SUBNET_ENDPOINT, CIDR_ENDPOINT, NETWORK_ENDPOINT,
VLAN_ENDPOINT, ROUTER_ENDPOINT,
]
neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/common/__init__.py 0000664 0005670 0005671 00000000000 12701407726 026725 0 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/common/netns_wrapper.py 0000664 0005670 0005671 00000013401 12701407726 030066 0 ustar jenkins jenkins 0000000 0000000 # Copyright (c) 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
import sys
from eventlet.green import subprocess
from neutron.common import config
from neutron.common import utils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_rootwrap import wrapper
import six
from neutron_vpnaas._i18n import _, _LE, _LI
if six.PY3:
import configparser as ConfigParser
else:
import ConfigParser
LOG = logging.getLogger(__name__)
def setup_conf():
cli_opts = [
cfg.DictOpt('mount_paths',
required=True,
help=_('Dict of paths to bind-mount (source:target) '
'prior to launch subprocess.')),
cfg.ListOpt(
'cmd',
required=True,
help=_('Command line to execute as a subprocess '
'provided as comma-separated list of arguments.')),
cfg.StrOpt('rootwrap_config', default='/etc/neutron/rootwrap.conf',
help=_('Rootwrap configuration file.')),
]
conf = cfg.CONF
conf.register_cli_opts(cli_opts)
return conf
def execute(cmd):
if not cmd:
return
cmd = map(str, cmd)
LOG.debug("Running command: %s", cmd)
env = os.environ.copy()
obj = utils.subprocess_popen(cmd, shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
_stdout, _stderr = obj.communicate()
msg = ('Command: %(cmd)s Exit code: %(returncode)s '
'Stdout: %(stdout)s Stderr: %(stderr)s' %
{'cmd': cmd,
'returncode': obj.returncode,
'stdout': _stdout,
'stderr': _stderr})
LOG.debug(msg)
obj.stdin.close()
# Pass the output to calling process
sys.stdout.write(msg)
sys.stdout.flush()
return obj.returncode
def filter_command(command, rootwrap_config):
# Load rootwrap configuration
try:
rawconfig = ConfigParser.RawConfigParser()
rawconfig.read(rootwrap_config)
rw_config = wrapper.RootwrapConfig(rawconfig)
except ValueError as exc:
LOG.error(_LE('Incorrect value in %(config)s: %(exc)s'),
{'config': rootwrap_config, 'exc': exc.message})
sys.exit(errno.EINVAL)
except ConfigParser.Error:
LOG.error(_LE('Incorrect configuration file: %(config)s'),
{'config': rootwrap_config})
sys.exit(errno.EINVAL)
# Check if command matches any of the loaded filters
filters = wrapper.load_filters(rw_config.filters_path)
try:
wrapper.match_filter(filters, command, exec_dirs=rw_config.exec_dirs)
except wrapper.FilterMatchNotExecutable as exc:
LOG.error(_LE('Command %(command)s is not executable: '
'%(path)s (filter match = %(name)s)'),
{'command': command,
'path': exc.match.exec_path,
'name': exc.match.name})
sys.exit(errno.EINVAL)
except wrapper.NoFilterMatched:
LOG.error(_LE('Unauthorized command: %(cmd)s (no filter matched)'),
{'cmd': command})
sys.exit(errno.EPERM)
def execute_with_mount():
conf = setup_conf()
conf()
config.setup_logging()
if not conf.cmd:
LOG.error(_LE('No command provided, exiting'))
return errno.EINVAL
if not conf.mount_paths:
LOG.error(_LE('No mount path provided, exiting'))
return errno.EINVAL
# Both sudoers and rootwrap.conf will not exist in the directory /etc
# after bind-mount, so we can't use utils.execute(conf.cmd,
# run_as_root=True). That's why we have to check here if cmd matches
# CommandFilter
filter_command(conf.cmd, conf.rootwrap_config)
# Make sure the process is running in net namespace invoked by ip
# netns exec(/proc/[pid]/ns/net) which is since Linux 3.0,
# as we can't check mount namespace(/proc/[pid]/ns/mnt)
# which is since Linux 3.8. For more detail please refer the link
# http://man7.org/linux/man-pages/man7/namespaces.7.html
if os.path.samefile(os.path.join('/proc/1/ns/net'),
os.path.join('/proc', str(os.getpid()), 'ns/net')):
LOG.error(_LE('Cannot run without netns, exiting'))
return errno.EINVAL
for path, new_path in six.iteritems(conf.mount_paths):
if not os.path.isdir(new_path):
# Sometimes all directories are not ready
LOG.debug('%s is not directory', new_path)
continue
if os.path.isdir(path) and os.path.isabs(path):
return_code = execute(['mount', '--bind', new_path, path])
if return_code == 0:
LOG.info(_LI('%(new_path)s has been '
'bind-mounted in %(path)s'),
{'new_path': new_path, 'path': path})
else:
LOG.error(_LE('Failed to bind-mount '
'%(new_path)s in %(path)s'),
{'new_path': new_path, 'path': path})
return execute(conf.cmd)
def main():
sys.exit(execute_with_mount())
if __name__ == "__main__":
main()
neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/common/topics.py 0000664 0005670 0005671 00000001642 12701407726 026504 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2013, Nachi Ueno, NTT I3, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
IPSEC_DRIVER_TOPIC = 'ipsec_driver'
IPSEC_AGENT_TOPIC = 'ipsec_agent'
CISCO_IPSEC_DRIVER_TOPIC = 'cisco_csr_ipsec_driver'
CISCO_IPSEC_AGENT_TOPIC = 'cisco_csr_ipsec_agent'
BROCADE_IPSEC_DRIVER_TOPIC = 'brocade_vyatta_ipsec_driver'
BROCADE_IPSEC_AGENT_TOPIC = 'brocade_vyatta_ipsec_agent'
neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/service_drivers/ 0000775 0005670 0005671 00000000000 12701410103 026513 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/service_drivers/cisco_ipsec.py 0000664 0005670 0005671 00000022106 12701407726 031372 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.common import rpc as n_rpc
from oslo_log import log as logging
import oslo_messaging
from neutron_vpnaas.db.vpn import vpn_models
from neutron_vpnaas.services.vpn.common import topics
from neutron_vpnaas.services.vpn import service_drivers
from neutron_vpnaas.services.vpn.service_drivers import base_ipsec
from neutron_vpnaas.services.vpn.service_drivers \
import cisco_csr_db as csr_id_map
from neutron_vpnaas.services.vpn.service_drivers import cisco_validator
LOG = logging.getLogger(__name__)
IPSEC = 'ipsec'
BASE_IPSEC_VERSION = '1.0'
LIFETIME_LIMITS = {'IKE Policy': {'min': 60, 'max': 86400},
'IPSec Policy': {'min': 120, 'max': 2592000}}
MIN_CSR_MTU = 1500
MAX_CSR_MTU = 9192
VRF_SUFFIX_LEN = 6
T2_PORT_NAME = 't2_p:'
class CiscoCsrIPsecVpnDriverCallBack(object):
"""Handler for agent to plugin RPC messaging."""
# history
# 1.0 Initial version
target = oslo_messaging.Target(version=BASE_IPSEC_VERSION)
def __init__(self, driver):
super(CiscoCsrIPsecVpnDriverCallBack, self).__init__()
self.driver = driver
def create_rpc_dispatcher(self):
return n_rpc.PluginRpcDispatcher([self])
def get_vpn_services_using(self, context, router_id):
query = context.session.query(vpn_models.VPNService)
query = query.join(vpn_models.IPsecSiteConnection)
query = query.join(vpn_models.IKEPolicy)
query = query.join(vpn_models.IPsecPolicy)
query = query.join(vpn_models.IPsecPeerCidr)
query = query.filter(vpn_models.VPNService.router_id == router_id)
return query.all()
def get_vpn_services_on_host(self, context, host=None):
"""Returns info on the VPN services on the host."""
routers = self.driver.l3_plugin.get_active_routers_for_host(context,
host)
host_vpn_services = []
for router in routers:
vpn_services = self.get_vpn_services_using(context, router['id'])
for vpn_service in vpn_services:
host_vpn_services.append(
self.driver.make_vpnservice_dict(context, vpn_service,
router))
return host_vpn_services
def update_status(self, context, status):
"""Update status of all vpnservices."""
plugin = self.driver.service_plugin
plugin.update_status_by_agent(context, status)
class CiscoCsrIPsecVpnAgentApi(service_drivers.BaseIPsecVpnAgentApi):
"""API and handler for Cisco IPSec plugin to agent RPC messaging."""
target = oslo_messaging.Target(version=BASE_IPSEC_VERSION)
def __init__(self, topic, default_version, driver):
super(CiscoCsrIPsecVpnAgentApi, self).__init__(
topic, default_version, driver)
def _agent_notification(self, context, method, router_id,
version=None, **kwargs):
"""Notify update for the agent.
Find the host for the router being notified and then
dispatches a notification for the VPN device driver.
"""
admin_context = context if context.is_admin else context.elevated()
if not version:
version = self.target.version
host = self.driver.l3_plugin.get_host_for_router(admin_context,
router_id)
LOG.debug('Notify agent at %(topic)s.%(host)s the message '
'%(method)s %(args)s for router %(router)s',
{'topic': self.topic,
'host': host,
'method': method,
'args': kwargs,
'router': router_id})
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(context, method, **kwargs)
class CiscoCsrIPsecVPNDriver(base_ipsec.BaseIPsecVPNDriver):
"""Cisco CSR VPN Service Driver class for IPsec."""
def __init__(self, service_plugin):
super(CiscoCsrIPsecVPNDriver, self).__init__(
service_plugin,
cisco_validator.CiscoCsrVpnValidator(service_plugin))
def create_rpc_conn(self):
self.endpoints = [CiscoCsrIPsecVpnDriverCallBack(self)]
self.conn = n_rpc.create_connection()
self.conn.create_consumer(
topics.CISCO_IPSEC_DRIVER_TOPIC, self.endpoints, fanout=False)
self.conn.consume_in_threads()
self.agent_rpc = CiscoCsrIPsecVpnAgentApi(
topics.CISCO_IPSEC_AGENT_TOPIC, BASE_IPSEC_VERSION, self)
def create_ipsec_site_connection(self, context, ipsec_site_connection):
vpnservice = self.service_plugin._get_vpnservice(
context, ipsec_site_connection['vpnservice_id'])
csr_id_map.create_tunnel_mapping(context, ipsec_site_connection)
self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'],
reason='ipsec-conn-create')
def update_ipsec_site_connection(
self, context, old_ipsec_site_connection, ipsec_site_connection):
vpnservice = self.service_plugin._get_vpnservice(
context, ipsec_site_connection['vpnservice_id'])
self.agent_rpc.vpnservice_updated(
context, vpnservice['router_id'],
reason='ipsec-conn-update')
def delete_ipsec_site_connection(self, context, ipsec_site_connection):
vpnservice = self.service_plugin._get_vpnservice(
context, ipsec_site_connection['vpnservice_id'])
self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'],
reason='ipsec-conn-delete')
def update_vpnservice(self, context, old_vpnservice, vpnservice):
self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'],
reason='vpn-service-update')
def delete_vpnservice(self, context, vpnservice):
self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'],
reason='vpn-service-delete')
def get_cisco_connection_mappings(self, conn_id, context):
"""Obtain persisted mappings for IDs related to connection."""
tunnel_id, ike_id, ipsec_id = csr_id_map.get_tunnel_mapping_for(
conn_id, context.session)
return {'site_conn_id': u'Tunnel%d' % tunnel_id,
'ike_policy_id': u'%d' % ike_id,
'ipsec_policy_id': u'%s' % ipsec_id}
def _create_interface(self, interface_info):
hosting_info = interface_info['hosting_info']
vlan = hosting_info['segmentation_id']
# Port name "currently" is t{1,2}_p:1, as only one router per CSR,
# but will keep a semi-generic algorithm
port_name = hosting_info['hosting_port_name']
name, sep, num = port_name.partition(':')
offset = 1 if name in T2_PORT_NAME else 0
if_num = int(num) * 2 + offset
return 'GigabitEthernet%d.%d' % (if_num, vlan)
def _get_router_info(self, router_info):
hosting_device = router_info['hosting_device']
return {'rest_mgmt_ip': hosting_device['management_ip_address'],
'username': hosting_device['credentials']['username'],
'password': hosting_device['credentials']['password'],
'inner_if_name': self._create_interface(
router_info['_interfaces'][0]),
'outer_if_name': self._create_interface(
router_info['gw_port']),
'vrf': 'nrouter-' + router_info['id'][:VRF_SUFFIX_LEN],
'timeout': 30} # Hard-coded for now
def make_vpnservice_dict(self, context, vpnservice, router_info):
"""Collect all service info, including Cisco info for IPSec conn."""
vpnservice_dict = dict(vpnservice)
vpnservice_dict['ipsec_conns'] = []
vpnservice_dict['subnet'] = dict(vpnservice.subnet)
vpnservice_dict['router_info'] = self._get_router_info(router_info)
for ipsec_conn in vpnservice.ipsec_site_connections:
ipsec_conn_dict = dict(ipsec_conn)
ipsec_conn_dict['ike_policy'] = dict(ipsec_conn.ikepolicy)
ipsec_conn_dict['ipsec_policy'] = dict(ipsec_conn.ipsecpolicy)
ipsec_conn_dict['peer_cidrs'] = [
peer_cidr.cidr for peer_cidr in ipsec_conn.peer_cidrs]
ipsec_conn_dict['cisco'] = self.get_cisco_connection_mappings(
ipsec_conn['id'], context)
vpnservice_dict['ipsec_conns'].append(ipsec_conn_dict)
return vpnservice_dict
neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/service_drivers/cisco_validator.py 0000664 0005670 0005671 00000012320 12701407726 032251 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from netaddr import core as net_exc
from neutron_lib import exceptions as nexception
from oslo_log import log as logging
from neutron_vpnaas._i18n import _
from neutron_vpnaas.db.vpn import vpn_validator
LIFETIME_LIMITS = {'IKE Policy': {'min': 60, 'max': 86400},
'IPSec Policy': {'min': 120, 'max': 2592000}}
MIN_CSR_MTU = 1500
MAX_CSR_MTU = 9192
LOG = logging.getLogger(__name__)
class CsrValidationFailure(nexception.BadRequest):
message = _("Cisco CSR does not support %(resource)s attribute %(key)s "
"with value '%(value)s'")
class CiscoCsrVpnValidator(vpn_validator.VpnReferenceValidator):
"""Validator methods for the Cisco CSR."""
def __init__(self, service_plugin):
self.service_plugin = service_plugin
super(CiscoCsrVpnValidator, self).__init__()
def validate_lifetime(self, for_policy, policy_info):
"""Ensure lifetime in secs and value is supported, based on policy."""
units = policy_info['lifetime']['units']
if units != 'seconds':
raise CsrValidationFailure(resource=for_policy,
key='lifetime:units',
value=units)
value = policy_info['lifetime']['value']
if (value < LIFETIME_LIMITS[for_policy]['min'] or
value > LIFETIME_LIMITS[for_policy]['max']):
raise CsrValidationFailure(resource=for_policy,
key='lifetime:value',
value=value)
def validate_ike_version(self, policy_info):
"""Ensure IKE policy is v1 for current REST API."""
version = policy_info['ike_version']
if version != 'v1':
raise CsrValidationFailure(resource='IKE Policy',
key='ike_version',
value=version)
def validate_mtu(self, conn_info):
"""Ensure the MTU value is supported."""
mtu = conn_info['mtu']
if mtu < MIN_CSR_MTU or mtu > MAX_CSR_MTU:
raise CsrValidationFailure(resource='IPSec Connection',
key='mtu',
value=mtu)
def validate_public_ip_present(self, router):
"""Ensure there is one gateway IP specified for the router used."""
gw_port = router.gw_port
if not gw_port or len(gw_port.fixed_ips) != 1:
raise CsrValidationFailure(resource='IPSec Connection',
key='router:gw_port:ip_address',
value='missing')
def validate_peer_id(self, ipsec_conn):
"""Ensure that an IP address is specified for peer ID."""
# TODO(pcm) Should we check peer_address too?
peer_id = ipsec_conn['peer_id']
try:
netaddr.IPAddress(peer_id)
except net_exc.AddrFormatError:
raise CsrValidationFailure(resource='IPSec Connection',
key='peer_id', value=peer_id)
def validate_ipsec_encap_mode(self, ipsec_policy):
"""Ensure IPSec policy encap mode is tunnel for current REST API."""
mode = ipsec_policy['encapsulation_mode']
if mode != 'tunnel':
raise CsrValidationFailure(resource='IPsec Policy',
key='encapsulation_mode',
value=mode)
def validate_ipsec_site_connection(self, context, ipsec_sitecon,
ip_version):
"""Validate IPSec site connection for Cisco CSR.
After doing reference validation, do additional checks that relate
to the Cisco CSR.
"""
super(CiscoCsrVpnValidator, self)._check_dpd(ipsec_sitecon)
ike_policy = self.service_plugin.get_ikepolicy(
context, ipsec_sitecon['ikepolicy_id'])
ipsec_policy = self.service_plugin.get_ipsecpolicy(
context, ipsec_sitecon['ipsecpolicy_id'])
vpn_service = self.service_plugin.get_vpnservice(
context, ipsec_sitecon['vpnservice_id'])
router = self.l3_plugin._get_router(context, vpn_service['router_id'])
self.validate_lifetime('IKE Policy', ike_policy)
self.validate_lifetime('IPSec Policy', ipsec_policy)
self.validate_ike_version(ike_policy)
self.validate_mtu(ipsec_sitecon)
self.validate_public_ip_present(router)
self.validate_peer_id(ipsec_sitecon)
self.validate_ipsec_encap_mode(ipsec_policy)
LOG.debug("IPSec connection validated for Cisco CSR")
neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/service_drivers/__init__.py 0000664 0005670 0005671 00000007236 12701407726 030655 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2013, Nachi Ueno, NTT I3, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from neutron.common import rpc as n_rpc
from neutron import manager
from neutron.plugins.common import constants
from oslo_log import log as logging
import oslo_messaging
import six
from neutron_vpnaas.db.vpn import vpn_validator
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class VpnDriver(object):
def __init__(self, service_plugin, validator=None):
self.service_plugin = service_plugin
if validator is None:
validator = vpn_validator.VpnReferenceValidator()
self.validator = validator
@property
def l3_plugin(self):
return manager.NeutronManager.get_service_plugins().get(
constants.L3_ROUTER_NAT)
@property
def service_type(self):
pass
@abc.abstractmethod
def create_vpnservice(self, context, vpnservice):
pass
@abc.abstractmethod
def update_vpnservice(
self, context, old_vpnservice, vpnservice):
pass
@abc.abstractmethod
def delete_vpnservice(self, context, vpnservice):
pass
@abc.abstractmethod
def create_ipsec_site_connection(self, context, ipsec_site_connection):
pass
@abc.abstractmethod
def update_ipsec_site_connection(self, context, old_ipsec_site_connection,
ipsec_site_connection):
pass
@abc.abstractmethod
def delete_ipsec_site_connection(self, context, ipsec_site_connection):
pass
class BaseIPsecVpnAgentApi(object):
"""Base class for IPSec API to agent."""
def __init__(self, topic, default_version, driver):
self.topic = topic
self.driver = driver
target = oslo_messaging.Target(topic=topic, version=default_version)
self.client = n_rpc.get_client(target)
def _agent_notification(self, context, method, router_id,
version=None, **kwargs):
"""Notify update for the agent.
This method will find where is the router, and
dispatch notification for the agent.
"""
admin_context = context if context.is_admin else context.elevated()
if not version:
version = self.target.version
l3_agents = self.driver.l3_plugin.get_l3_agents_hosting_routers(
admin_context, [router_id],
admin_state_up=True,
active=True)
for l3_agent in l3_agents:
LOG.debug('Notify agent at %(topic)s.%(host)s the message '
'%(method)s %(args)s',
{'topic': self.topic,
'host': l3_agent.host,
'method': method,
'args': kwargs})
cctxt = self.client.prepare(server=l3_agent.host, version=version)
cctxt.cast(context, method, **kwargs)
def vpnservice_updated(self, context, router_id, **kwargs):
"""Send update event of vpnservices."""
kwargs['router'] = {'id': router_id}
self._agent_notification(context, 'vpnservice_updated', router_id,
**kwargs)
neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/service_drivers/cisco_csr_db.py 0000664 0005670 0005671 00000023647 12701407726 031536 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.db import model_base
from neutron_lib import exceptions as nexception
from oslo_db import exception as db_exc
from oslo_log import log as logging
import sqlalchemy as sa
from sqlalchemy.orm import exc as sql_exc
from neutron_vpnaas._i18n import _, _LI
from neutron_vpnaas.db.vpn import vpn_models
LOG = logging.getLogger(__name__)
# Note: Artificially limit these to reduce mapping table size and performance
# Tunnel can be 0..7FFFFFFF, IKE policy can be 1..10000, IPSec policy can be
# 1..31 characters long.
MAX_CSR_TUNNELS = 10000
MAX_CSR_IKE_POLICIES = 2000
MAX_CSR_IPSEC_POLICIES = 2000
TUNNEL = 'Tunnel'
IKE_POLICY = 'IKE Policy'
IPSEC_POLICY = 'IPSec Policy'
MAPPING_LIMITS = {TUNNEL: (0, MAX_CSR_TUNNELS),
IKE_POLICY: (1, MAX_CSR_IKE_POLICIES),
IPSEC_POLICY: (1, MAX_CSR_IPSEC_POLICIES)}
class CsrInternalError(nexception.NeutronException):
message = _("Fatal - %(reason)s")
class IdentifierMap(model_base.BASEV2):
"""Maps OpenStack IDs to compatible numbers for Cisco CSR."""
__tablename__ = 'cisco_csr_identifier_map'
ipsec_site_conn_id = sa.Column(sa.String(36),
sa.ForeignKey('ipsec_site_connections.id',
ondelete="CASCADE"),
primary_key=True)
csr_tunnel_id = sa.Column(sa.Integer, nullable=False)
csr_ike_policy_id = sa.Column(sa.Integer, nullable=False)
csr_ipsec_policy_id = sa.Column(sa.Integer, nullable=False)
def get_next_available_id(session, table_field, id_type):
"""Find first unused id for the specified field in IdentifierMap table.
As entries are removed, find the first "hole" and return that as the
next available ID. To improve performance, artificially limit
the number of entries to a smaller range. Currently, these IDs are
globally unique. Could enhance in the future to be unique per router
(CSR).
"""
min_value = MAPPING_LIMITS[id_type][0]
max_value = MAPPING_LIMITS[id_type][1]
rows = session.query(table_field).order_by(table_field)
used_ids = set([row[0] for row in rows])
all_ids = set(range(min_value, max_value + min_value))
available_ids = all_ids - used_ids
if not available_ids:
msg = _("No available Cisco CSR %(type)s IDs from "
"%(min)d..%(max)d") % {'type': id_type,
'min': min_value,
'max': max_value}
LOG.error(msg)
raise IndexError(msg)
return available_ids.pop()
def get_next_available_tunnel_id(session):
"""Find first available tunnel ID from 0..MAX_CSR_TUNNELS-1."""
return get_next_available_id(session, IdentifierMap.csr_tunnel_id,
TUNNEL)
def get_next_available_ike_policy_id(session):
"""Find first available IKE Policy ID from 1..MAX_CSR_IKE_POLICIES."""
return get_next_available_id(session, IdentifierMap.csr_ike_policy_id,
IKE_POLICY)
def get_next_available_ipsec_policy_id(session):
"""Find first available IPSec Policy ID from 1..MAX_CSR_IKE_POLICIES."""
return get_next_available_id(session, IdentifierMap.csr_ipsec_policy_id,
IPSEC_POLICY)
def find_conn_with_policy(policy_field, policy_id, conn_id, session):
"""Return ID of another connection (if any) that uses same policy ID."""
qry = session.query(vpn_models.IPsecSiteConnection.id)
match = qry.filter_request(
policy_field == policy_id,
vpn_models.IPsecSiteConnection.id != conn_id).first()
if match:
return match[0]
def find_connection_using_ike_policy(ike_policy_id, conn_id, session):
"""Return ID of another connection that uses same IKE policy ID."""
return find_conn_with_policy(vpn_models.IPsecSiteConnection.ikepolicy_id,
ike_policy_id, conn_id, session)
def find_connection_using_ipsec_policy(ipsec_policy_id, conn_id, session):
"""Return ID of another connection that uses same IPSec policy ID."""
return find_conn_with_policy(vpn_models.IPsecSiteConnection.ipsecpolicy_id,
ipsec_policy_id, conn_id, session)
def lookup_policy(policy_type, policy_field, conn_id, session):
"""Obtain specified policy's mapping from other connection."""
try:
return session.query(policy_field).filter_by(
ipsec_site_conn_id=conn_id).one()[0]
except sql_exc.NoResultFound:
msg = _("Database inconsistency between IPSec connection and "
"Cisco CSR mapping table (%s)") % policy_type
raise CsrInternalError(reason=msg)
def lookup_ike_policy_id_for(conn_id, session):
"""Obtain existing Cisco CSR IKE policy ID from another connection."""
return lookup_policy(IKE_POLICY, IdentifierMap.csr_ike_policy_id,
conn_id, session)
def lookup_ipsec_policy_id_for(conn_id, session):
"""Obtain existing Cisco CSR IPSec policy ID from another connection."""
return lookup_policy(IPSEC_POLICY, IdentifierMap.csr_ipsec_policy_id,
conn_id, session)
def determine_csr_policy_id(policy_type, conn_policy_field, map_policy_field,
policy_id, conn_id, session):
"""Use existing or reserve a new policy ID for Cisco CSR use.
TODO(pcm) FUTURE: Once device driver adds support for IKE/IPSec policy
ID sharing, add call to find_conn_with_policy() to find used ID and
then call lookup_policy() to find the current mapping for that ID.
"""
csr_id = get_next_available_id(session, map_policy_field, policy_type)
LOG.debug("Reserved new CSR ID %(csr_id)d for %(policy)s "
"ID %(policy_id)s", {'csr_id': csr_id,
'policy': policy_type,
'policy_id': policy_id})
return csr_id
def determine_csr_ike_policy_id(ike_policy_id, conn_id, session):
"""Use existing, or reserve a new IKE policy ID for Cisco CSR."""
return determine_csr_policy_id(IKE_POLICY,
vpn_models.IPsecSiteConnection.ikepolicy_id,
IdentifierMap.csr_ike_policy_id,
ike_policy_id, conn_id, session)
def determine_csr_ipsec_policy_id(ipsec_policy_id, conn_id, session):
"""Use existing, or reserve a new IPSec policy ID for Cisco CSR."""
return determine_csr_policy_id(
IPSEC_POLICY,
vpn_models.IPsecSiteConnection.ipsecpolicy_id,
IdentifierMap.csr_ipsec_policy_id,
ipsec_policy_id, conn_id, session)
def get_tunnel_mapping_for(conn_id, session):
try:
entry = session.query(IdentifierMap).filter_by(
ipsec_site_conn_id=conn_id).one()
LOG.debug("Mappings for IPSec connection %(conn)s - "
"tunnel=%(tunnel)s ike_policy=%(csr_ike)d "
"ipsec_policy=%(csr_ipsec)d",
{'conn': conn_id, 'tunnel': entry.csr_tunnel_id,
'csr_ike': entry.csr_ike_policy_id,
'csr_ipsec': entry.csr_ipsec_policy_id})
return (entry.csr_tunnel_id, entry.csr_ike_policy_id,
entry.csr_ipsec_policy_id)
except sql_exc.NoResultFound:
msg = _("Existing entry for IPSec connection %s not found in Cisco "
"CSR mapping table") % conn_id
raise CsrInternalError(reason=msg)
def create_tunnel_mapping(context, conn_info):
"""Create Cisco CSR IDs, using mapping table and OpenStack UUIDs."""
conn_id = conn_info['id']
ike_policy_id = conn_info['ikepolicy_id']
ipsec_policy_id = conn_info['ipsecpolicy_id']
tenant_id = conn_info['tenant_id']
with context.session.begin():
csr_tunnel_id = get_next_available_tunnel_id(context.session)
csr_ike_id = determine_csr_ike_policy_id(ike_policy_id, conn_id,
context.session)
csr_ipsec_id = determine_csr_ipsec_policy_id(ipsec_policy_id, conn_id,
context.session)
map_entry = IdentifierMap(tenant_id=tenant_id,
ipsec_site_conn_id=conn_id,
csr_tunnel_id=csr_tunnel_id,
csr_ike_policy_id=csr_ike_id,
csr_ipsec_policy_id=csr_ipsec_id)
try:
context.session.add(map_entry)
# Force committing to database
context.session.flush()
except db_exc.DBDuplicateEntry:
msg = _("Attempt to create duplicate entry in Cisco CSR "
"mapping table for connection %s") % conn_id
raise CsrInternalError(reason=msg)
LOG.info(_LI("Mapped connection %(conn_id)s to Tunnel%(tunnel_id)d "
"using IKE policy ID %(ike_id)d and IPSec policy "
"ID %(ipsec_id)d"),
{'conn_id': conn_id, 'tunnel_id': csr_tunnel_id,
'ike_id': csr_ike_id, 'ipsec_id': csr_ipsec_id})
def delete_tunnel_mapping(context, conn_info):
conn_id = conn_info['id']
with context.session.begin():
sess_qry = context.session.query(IdentifierMap)
sess_qry.filter_by(ipsec_site_conn_id=conn_id).delete()
LOG.info(_LI("Removed mapping for connection %s"), conn_id)
neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/service_drivers/ipsec_validator.py 0000664 0005670 0005671 00000003354 12701407726 032263 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2015 Awcloud Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import exceptions as nexception
from neutron_vpnaas._i18n import _
from neutron_vpnaas.db.vpn import vpn_validator
class IpsecValidationFailure(nexception.BadRequest):
message = _("IPSec does not support %(resource)s attribute %(key)s "
"with value '%(value)s'")
class IpsecVpnValidator(vpn_validator.VpnReferenceValidator):
"""Validator methods for the Openswan, Strongswan and Libreswan."""
def __init__(self, service_plugin):
self.service_plugin = service_plugin
super(IpsecVpnValidator, self).__init__()
def validate_ipsec_policy(self, context, ipsec_policy):
"""Restrict selecting ah-esp as IPSec Policy transform protocol.
For those *Swan implementations, the 'ah-esp' transform protocol
is not supported and therefore the request should be rejected.
"""
transform_protocol = ipsec_policy.get('transform_protocol')
if transform_protocol == "ah-esp":
raise IpsecValidationFailure(
resource='IPsec Policy',
key='transform_protocol',
value=transform_protocol)
neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/service_drivers/ipsec.py 0000664 0005670 0005671 00000003105 12701407726 030210 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2015, Nachi Ueno, NTT I3, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.common import rpc as n_rpc
from neutron_vpnaas.services.vpn.common import topics
from neutron_vpnaas.services.vpn.service_drivers import base_ipsec
from neutron_vpnaas.services.vpn.service_drivers import ipsec_validator
IPSEC = 'ipsec'
BASE_IPSEC_VERSION = '1.0'
class IPsecVPNDriver(base_ipsec.BaseIPsecVPNDriver):
"""VPN Service Driver class for IPsec."""
def __init__(self, service_plugin):
super(IPsecVPNDriver, self).__init__(
service_plugin,
ipsec_validator.IpsecVpnValidator(service_plugin))
def create_rpc_conn(self):
self.endpoints = [base_ipsec.IPsecVpnDriverCallBack(self)]
self.conn = n_rpc.create_connection()
self.conn.create_consumer(
topics.IPSEC_DRIVER_TOPIC, self.endpoints, fanout=False)
self.conn.consume_in_threads()
self.agent_rpc = base_ipsec.IPsecVpnAgentApi(
topics.IPSEC_AGENT_TOPIC, BASE_IPSEC_VERSION, self)
neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/service_drivers/vyatta_ipsec.py 0000664 0005670 0005671 00000002644 12701407726 031607 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2015 Brocade Communications System, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutron.common import rpc as n_rpc
from neutron_vpnaas.services.vpn.common import topics
from neutron_vpnaas.services.vpn.service_drivers import base_ipsec
IPSEC = 'ipsec'
BASE_IPSEC_VERSION = '1.0'
class VyattaIPsecDriver(base_ipsec.BaseIPsecVPNDriver):
def __init__(self, service_plugin):
super(VyattaIPsecDriver, self).__init__(service_plugin)
def create_rpc_conn(self):
self.endpoints = [base_ipsec.IPsecVpnDriverCallBack(self)]
self.conn = n_rpc.create_connection()
self.conn.create_consumer(
topics.BROCADE_IPSEC_DRIVER_TOPIC, self.endpoints, fanout=False)
self.conn.consume_in_threads()
self.agent_rpc = base_ipsec.IPsecVpnAgentApi(
topics.BROCADE_IPSEC_AGENT_TOPIC, BASE_IPSEC_VERSION, self)
neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/service_drivers/base_ipsec.py 0000664 0005670 0005671 00000021651 12701407726 031210 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2015, Nachi Ueno, NTT I3, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import netaddr
import oslo_messaging
import six
from neutron_vpnaas.services.vpn import service_drivers
IPSEC = 'ipsec'
BASE_IPSEC_VERSION = '1.0'
class IPsecVpnDriverCallBack(object):
"""Callback for IPSecVpnDriver rpc."""
# history
# 1.0 Initial version
target = oslo_messaging.Target(version=BASE_IPSEC_VERSION)
def __init__(self, driver):
super(IPsecVpnDriverCallBack, self).__init__()
self.driver = driver
def get_vpn_services_on_host(self, context, host=None):
"""Returns the vpnservices on the host."""
plugin = self.driver.service_plugin
vpnservices = plugin._get_agent_hosting_vpn_services(
context, host)
local_cidr_map = plugin._build_local_subnet_cidr_map(context)
return [self.driver.make_vpnservice_dict(vpnservice, local_cidr_map)
for vpnservice in vpnservices]
def update_status(self, context, status):
"""Update status of vpnservices."""
plugin = self.driver.service_plugin
plugin.update_status_by_agent(context, status)
class IPsecVpnAgentApi(service_drivers.BaseIPsecVpnAgentApi):
"""Agent RPC API for IPsecVPNAgent."""
target = oslo_messaging.Target(version=BASE_IPSEC_VERSION)
def __init__(self, topic, default_version, driver):
super(IPsecVpnAgentApi, self).__init__(
topic, default_version, driver)
@six.add_metaclass(abc.ABCMeta)
class BaseIPsecVPNDriver(service_drivers.VpnDriver):
"""Base VPN Service Driver class."""
def __init__(self, service_plugin, validator=None):
super(BaseIPsecVPNDriver, self).__init__(service_plugin, validator)
self.create_rpc_conn()
@property
def service_type(self):
return IPSEC
@abc.abstractmethod
def create_rpc_conn(self):
pass
def create_ipsec_site_connection(self, context, ipsec_site_connection):
vpnservice = self.service_plugin._get_vpnservice(
context, ipsec_site_connection['vpnservice_id'])
self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'])
def update_ipsec_site_connection(
self, context, old_ipsec_site_connection, ipsec_site_connection):
vpnservice = self.service_plugin._get_vpnservice(
context, ipsec_site_connection['vpnservice_id'])
self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'])
def delete_ipsec_site_connection(self, context, ipsec_site_connection):
vpnservice = self.service_plugin._get_vpnservice(
context, ipsec_site_connection['vpnservice_id'])
self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'])
def create_ikepolicy(self, context, ikepolicy):
pass
def delete_ikepolicy(self, context, ikepolicy):
pass
def update_ikepolicy(self, context, old_ikepolicy, ikepolicy):
pass
def create_ipsecpolicy(self, context, ipsecpolicy):
pass
def delete_ipsecpolicy(self, context, ipsecpolicy):
pass
def update_ipsecpolicy(self, context, old_ipsec_policy, ipsecpolicy):
pass
def _get_gateway_ips(self, router):
"""Obtain the IPv4 and/or IPv6 GW IP for the router.
If there are multiples, (arbitrarily) use the first one.
"""
v4_ip = v6_ip = None
for fixed_ip in router.gw_port['fixed_ips']:
addr = fixed_ip['ip_address']
vers = netaddr.IPAddress(addr).version
if vers == 4:
if v4_ip is None:
v4_ip = addr
elif v6_ip is None:
v6_ip = addr
return v4_ip, v6_ip
def create_vpnservice(self, context, vpnservice_dict):
"""Get the gateway IP(s) and save for later use.
For the reference implementation, this side's tunnel IP (external_ip)
will be the router's GW IP. IPSec connections will use a GW IP of
the same version, as is used for the peer, so we will collect the
first IP for each version (if they exist) and save them.
"""
vpnservice = self.service_plugin._get_vpnservice(context,
vpnservice_dict['id'])
v4_ip, v6_ip = self._get_gateway_ips(vpnservice.router)
vpnservice_dict['external_v4_ip'] = v4_ip
vpnservice_dict['external_v6_ip'] = v6_ip
self.service_plugin.set_external_tunnel_ips(context,
vpnservice_dict['id'],
v4_ip=v4_ip, v6_ip=v6_ip)
def update_vpnservice(self, context, old_vpnservice, vpnservice):
self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'])
def delete_vpnservice(self, context, vpnservice):
self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'])
def get_external_ip_based_on_peer(self, vpnservice, ipsec_site_con):
"""Use service's external IP, based on peer IP version."""
vers = netaddr.IPAddress(ipsec_site_con['peer_address']).version
if vers == 4:
ip_to_use = vpnservice.external_v4_ip
else:
ip_to_use = vpnservice.external_v6_ip
# TODO(pcm): Add validator to check that connection's peer address has
# a version that is available in service table, so can fail early and
# don't need a check here.
return ip_to_use
def make_vpnservice_dict(self, vpnservice, local_cidr_map):
"""Convert vpnservice information for vpn agent.
also converting parameter name for vpn agent driver
"""
vpnservice_dict = dict(vpnservice)
vpnservice_dict['ipsec_site_connections'] = []
if vpnservice.subnet:
vpnservice_dict['subnet'] = dict(vpnservice.subnet)
else:
vpnservice_dict['subnet'] = None
# NOTE: Following is used for rolling upgrades, where agent may be
# at version N, and server at N+1. We need to populate the subnet
# with (only) the CIDR from the first connection's local endpoint
# group.
subnet_cidr = None
# Not removing external_ip from vpnservice_dict, as some providers
# may be still using it from vpnservice_dict. Will use whichever IP
# is specified.
vpnservice_dict['external_ip'] = (
vpnservice.external_v4_ip or vpnservice.external_v6_ip)
for ipsec_site_connection in vpnservice.ipsec_site_connections:
ipsec_site_connection_dict = dict(ipsec_site_connection)
try:
netaddr.IPAddress(ipsec_site_connection_dict['peer_id'])
except netaddr.core.AddrFormatError:
ipsec_site_connection_dict['peer_id'] = (
'@' + ipsec_site_connection_dict['peer_id'])
ipsec_site_connection_dict['ikepolicy'] = dict(
ipsec_site_connection.ikepolicy)
ipsec_site_connection_dict['ipsecpolicy'] = dict(
ipsec_site_connection.ipsecpolicy)
vpnservice_dict['ipsec_site_connections'].append(
ipsec_site_connection_dict)
if vpnservice.subnet:
local_cidrs = [vpnservice.subnet.cidr]
peer_cidrs = [
peer_cidr.cidr
for peer_cidr in ipsec_site_connection.peer_cidrs]
else:
local_cidrs = [local_cidr_map[ep.endpoint]
for ep in ipsec_site_connection.local_ep_group.endpoints]
peer_cidrs = [
ep.endpoint
for ep in ipsec_site_connection.peer_ep_group.endpoints]
if not subnet_cidr:
epg = ipsec_site_connection.local_ep_group
subnet_cidr = local_cidr_map[epg.endpoints[0].endpoint]
ipsec_site_connection_dict['peer_cidrs'] = peer_cidrs
ipsec_site_connection_dict['local_cidrs'] = local_cidrs
ipsec_site_connection_dict['local_ip_vers'] = netaddr.IPNetwork(
local_cidrs[0]).version
ipsec_site_connection_dict['external_ip'] = (
self.get_external_ip_based_on_peer(vpnservice,
ipsec_site_connection_dict))
if not vpnservice.subnet:
vpnservice_dict['subnet'] = {'cidr': subnet_cidr}
return vpnservice_dict
neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/vpn_service.py 0000664 0005670 0005671 00000006243 12701407726 026240 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.services import provider_configuration as provconfig
from oslo_log import log as logging
from oslo_utils import importutils
from neutron_vpnaas.extensions import vpnaas
LOG = logging.getLogger(__name__)
DEVICE_DRIVERS = 'device_drivers'
class VPNService(object):
"""VPN Service observer."""
def __init__(self, l3_agent):
"""Creates a VPN Service instance with context."""
# TODO(pc_m): Replace l3_agent argument with config, once none of the
# device driver implementations need the L3 agent.
self.conf = l3_agent.conf
registry.subscribe(
router_added_actions, resources.ROUTER, events.AFTER_CREATE)
registry.subscribe(
router_removed_actions, resources.ROUTER, events.AFTER_DELETE)
registry.subscribe(
router_updated_actions, resources.ROUTER, events.AFTER_UPDATE)
def load_device_drivers(self, host):
"""Loads one or more device drivers for VPNaaS."""
drivers = []
for device_driver in self.conf.vpnagent.vpn_device_driver:
device_driver = provconfig.get_provider_driver_class(
device_driver, DEVICE_DRIVERS)
try:
drivers.append(importutils.import_object(device_driver,
self,
host))
LOG.debug('Loaded VPNaaS device driver: %s', device_driver)
except ImportError:
raise vpnaas.DeviceDriverImportError(
device_driver=device_driver)
return drivers
def router_added_actions(resource, event, l3_agent, **kwargs):
"""Create the router and sync for each loaded device driver."""
router = kwargs['router']
for device_driver in l3_agent.device_drivers:
device_driver.create_router(router)
device_driver.sync(l3_agent.context, [router.router])
def router_removed_actions(resource, event, l3_agent, **kwargs):
"""Remove the router from each loaded device driver."""
router = kwargs['router']
for device_driver in l3_agent.device_drivers:
device_driver.destroy_router(router.router_id)
def router_updated_actions(resource, event, l3_agent, **kwargs):
"""Perform a sync on each loaded device driver."""
router = kwargs['router']
for device_driver in l3_agent.device_drivers:
device_driver.sync(l3_agent.context, [router.router])
neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/device_drivers/ 0000775 0005670 0005671 00000000000 12701410103 026312 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/device_drivers/libreswan_ipsec.py 0000664 0005670 0005671 00000005272 12701407726 032064 0 ustar jenkins jenkins 0000000 0000000 # Copyright (c) 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import os.path
from neutron_vpnaas.services.vpn.device_drivers import ipsec
class LibreSwanProcess(ipsec.OpenSwanProcess):
"""Libreswan Process manager class.
Libreswan needs nssdb initialised before running pluto daemon.
"""
def __init__(self, conf, process_id, vpnservice, namespace):
super(LibreSwanProcess, self).__init__(conf, process_id,
vpnservice, namespace)
def ensure_configs(self):
"""Generate config files which are needed for Libreswan.
Initialise the nssdb, otherwise pluto daemon will fail to run.
"""
# Since we set ipsec.secrets to be owned by root, the standard
# mechanisms for setting up the config files will get a permission
# problem when attempting to overwrite the file, so we need to
# remove it first.
secrets_file = self._get_config_filename('ipsec.secrets')
if os.path.exists(secrets_file):
os.remove(secrets_file)
super(LibreSwanProcess, self).ensure_configs()
# LibreSwan uses the capabilities library to restrict access to
# ipsec.secrets to users that have explicit access. Since pluto is
# running as root and the file has 0600 perms, we must set the
# owner of the file to root.
self._execute(['chown', '--from=%s' % os.getuid(), 'root:root',
secrets_file])
# Load the ipsec kernel module if not loaded
self._execute([self.binary, '_stackmanager', 'start'])
# checknss creates nssdb only if it is missing
# It is added in Libreswan version v3.10
# For prior versions use initnss
try:
self._execute([self.binary, 'checknss', self.etc_dir])
except RuntimeError:
self._execute([self.binary, 'initnss', self.etc_dir])
class LibreSwanDriver(ipsec.IPsecDriver):
def create_process(self, process_id, vpnservice, namespace):
return LibreSwanProcess(
self.conf,
process_id,
vpnservice,
namespace)
neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/device_drivers/cisco_csr_rest_client.py 0000664 0005670 0005671 00000030660 12701407726 033254 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import netaddr
from oslo_log import log as logging
from oslo_serialization import jsonutils
import requests
from requests import exceptions as r_exc
from neutron_vpnaas._i18n import _LE, _LW
TIMEOUT = 20.0
LOG = logging.getLogger(__name__)
HEADER_CONTENT_TYPE_JSON = {'content-type': 'application/json'}
URL_BASE = 'https://%(host)s/api/v1/%(resource)s'
# CSR RESTapi URIs
URI_VPN_IPSEC_POLICIES = 'vpn-svc/ipsec/policies'
URI_VPN_IPSEC_POLICIES_ID = URI_VPN_IPSEC_POLICIES + '/%s'
URI_VPN_IKE_POLICIES = 'vpn-svc/ike/policies'
URI_VPN_IKE_POLICIES_ID = URI_VPN_IKE_POLICIES + '/%s'
URI_VPN_IKE_KEYRINGS = 'vpn-svc/ike/keyrings'
URI_VPN_IKE_KEYRINGS_ID = URI_VPN_IKE_KEYRINGS + '/%s'
URI_VPN_IKE_KEEPALIVE = 'vpn-svc/ike/keepalive'
URI_VPN_SITE_TO_SITE = 'vpn-svc/site-to-site'
URI_VPN_SITE_TO_SITE_ID = URI_VPN_SITE_TO_SITE + '/%s'
URI_VPN_SITE_TO_SITE_STATE = URI_VPN_SITE_TO_SITE + '/%s/state'
URI_VPN_SITE_ACTIVE_SESSIONS = URI_VPN_SITE_TO_SITE + '/active/sessions'
URI_ROUTING_STATIC_ROUTES = 'routing-svc/static-routes'
URI_ROUTING_STATIC_ROUTES_ID = URI_ROUTING_STATIC_ROUTES + '/%s'
def make_route_id(cidr, interface):
"""Build ID that will be used to identify route for later deletion."""
net = netaddr.IPNetwork(cidr)
return '%(network)s_%(prefix)s_%(interface)s' % {
'network': net.network,
'prefix': net.prefixlen,
'interface': interface}
class CsrRestClient(object):
"""REST CsrRestClient for accessing the Cisco Cloud Services Router."""
def __init__(self, settings):
self.port = str(settings.get('protocol_port', 55443))
self.host = ':'.join([settings.get('rest_mgmt_ip', ''), self.port])
self.auth = (settings['username'], settings['password'])
self.inner_if_name = settings.get('inner_if_name', '')
self.outer_if_name = settings.get('outer_if_name', '')
self.token = None
self.vrf = settings.get('vrf', '')
self.vrf_prefix = 'vrf/%s/' % self.vrf if self.vrf else ""
self.status = requests.codes.OK
self.timeout = settings.get('timeout')
self.max_tries = 5
self.session = requests.Session()
def _response_info_for(self, response, method):
"""Return contents or location from response.
For a POST or GET with a 200 response, the response content
is returned.
For a POST with a 201 response, return the header's location,
which contains the identifier for the created resource.
If there is an error, return the response content, so that
it can be used in error processing ('error-code', 'error-message',
and 'detail' fields).
"""
if method in ('POST', 'GET') and self.status == requests.codes.OK:
LOG.debug('RESPONSE: %s', response.json())
return response.json()
if method == 'POST' and self.status == requests.codes.CREATED:
return response.headers.get('location', '')
if self.status >= requests.codes.BAD_REQUEST and response.content:
if b'error-code' in response.content:
content = jsonutils.loads(response.content)
LOG.debug("Error response content %s", content)
return content
def _request(self, method, url, **kwargs):
"""Perform REST request and save response info."""
try:
LOG.debug("%(method)s: Request for %(resource)s payload: "
"%(payload)s",
{'method': method.upper(), 'resource': url,
'payload': kwargs.get('data')})
start_time = time.time()
response = self.session.request(method, url, verify=False,
timeout=self.timeout, **kwargs)
LOG.debug("%(method)s Took %(time).2f seconds to process",
{'method': method.upper(),
'time': time.time() - start_time})
except (r_exc.Timeout, r_exc.SSLError) as te:
# Should never see SSLError, unless requests package is old (<2.0)
timeout_val = 0.0 if self.timeout is None else self.timeout
LOG.warning(_LW("%(method)s: Request timeout%(ssl)s "
"(%(timeout).3f sec) for CSR(%(host)s)"),
{'method': method,
'timeout': timeout_val,
'ssl': '(SSLError)'
if isinstance(te, r_exc.SSLError) else '',
'host': self.host})
self.status = requests.codes.REQUEST_TIMEOUT
except r_exc.ConnectionError:
LOG.exception(_LE("%(method)s: Unable to connect to "
"CSR(%(host)s)"),
{'method': method, 'host': self.host})
self.status = requests.codes.NOT_FOUND
except Exception as e:
LOG.error(_LE("%(method)s: Unexpected error for CSR (%(host)s): "
"%(error)s"),
{'method': method, 'host': self.host, 'error': e})
self.status = requests.codes.INTERNAL_SERVER_ERROR
else:
self.status = response.status_code
LOG.debug("%(method)s: Completed [%(status)s]",
{'method': method, 'status': self.status})
return self._response_info_for(response, method)
def authenticate(self):
"""Obtain a token to use for subsequent CSR REST requests.
This is called when there is no token yet, or if the token has expired
and attempts to use it resulted in an UNAUTHORIZED REST response.
"""
url = URL_BASE % {'host': self.host, 'resource': 'auth/token-services'}
headers = {'Content-Length': '0',
'Accept': 'application/json'}
headers.update(HEADER_CONTENT_TYPE_JSON)
LOG.debug("%(auth)s with CSR %(host)s",
{'auth': 'Authenticating' if self.token is None
else 'Reauthenticating', 'host': self.host})
self.token = None
response = self._request("POST", url, headers=headers, auth=self.auth)
if response:
self.token = response['token-id']
LOG.debug("Successfully authenticated with CSR %s", self.host)
return True
LOG.error(_LE("Failed authentication with CSR %(host)s [%(status)s]"),
{'host': self.host, 'status': self.status})
def _do_request(self, method, resource, payload=None, more_headers=None,
full_url=False):
"""Perform a REST request to a CSR resource.
If this is the first time interacting with the CSR, a token will
be obtained. If the request fails, due to an expired token, the
token will be obtained and the request will be retried once more.
"""
if self.token is None:
if not self.authenticate():
return
if full_url:
url = resource
else:
url = ('https://%(host)s/api/v1/%(resource)s' %
{'host': self.host, 'resource': resource})
headers = {'Accept': 'application/json', 'X-auth-token': self.token}
if more_headers:
headers.update(more_headers)
if payload:
payload = jsonutils.dumps(payload)
response = self._request(method, url, data=payload, headers=headers)
if self.status == requests.codes.UNAUTHORIZED:
if not self.authenticate():
return
headers['X-auth-token'] = self.token
response = self._request(method, url, data=payload,
headers=headers)
if self.status != requests.codes.REQUEST_TIMEOUT:
return response
LOG.error(_LE("%(method)s: Request timeout for CSR(%(host)s)"),
{'method': method, 'host': self.host})
def get_request(self, resource, full_url=False):
"""Perform a REST GET requests for a CSR resource."""
return self._do_request('GET', resource, full_url=full_url)
def post_request(self, resource, payload=None):
"""Perform a POST request to a CSR resource."""
return self._do_request('POST', resource, payload=payload,
more_headers=HEADER_CONTENT_TYPE_JSON)
def put_request(self, resource, payload=None):
"""Perform a PUT request to a CSR resource."""
return self._do_request('PUT', resource, payload=payload,
more_headers=HEADER_CONTENT_TYPE_JSON)
def delete_request(self, resource):
"""Perform a DELETE request on a CSR resource."""
return self._do_request('DELETE', resource,
more_headers=HEADER_CONTENT_TYPE_JSON)
# VPN Specific APIs
def create_ike_policy(self, policy_info):
base_ike_policy_info = {u'version': u'v1',
u'local-auth-method': u'pre-share'}
base_ike_policy_info.update(policy_info)
return self.post_request(URI_VPN_IKE_POLICIES,
payload=base_ike_policy_info)
def create_ipsec_policy(self, policy_info):
base_ipsec_policy_info = {u'mode': u'tunnel'}
base_ipsec_policy_info.update(policy_info)
return self.post_request(URI_VPN_IPSEC_POLICIES,
payload=base_ipsec_policy_info)
def create_pre_shared_key(self, psk_info):
return self.post_request(self.vrf_prefix + URI_VPN_IKE_KEYRINGS,
payload=psk_info)
def create_ipsec_connection(self, connection_info):
base_conn_info = {
u'vpn-type': u'site-to-site',
u'ip-version': u'ipv4',
u'local-device': {
u'tunnel-ip-address': self.outer_if_name,
u'ip-address': self.inner_if_name
}
}
connection_info.update(base_conn_info)
if self.vrf:
connection_info[u'tunnel-vrf'] = self.vrf
return self.post_request(self.vrf_prefix + URI_VPN_SITE_TO_SITE,
payload=connection_info)
def configure_ike_keepalive(self, keepalive_info):
base_keepalive_info = {u'periodic': True}
keepalive_info.update(base_keepalive_info)
return self.put_request(URI_VPN_IKE_KEEPALIVE, keepalive_info)
def create_static_route(self, route_info):
return self.post_request(self.vrf_prefix + URI_ROUTING_STATIC_ROUTES,
payload=route_info)
def delete_static_route(self, route_id):
return self.delete_request(
self.vrf_prefix + URI_ROUTING_STATIC_ROUTES_ID % route_id)
def set_ipsec_connection_state(self, tunnel, admin_up=True):
"""Set the IPSec site-to-site connection (tunnel) admin state.
Note: When a tunnel is created, it will be admin up.
"""
info = {u'vpn-interface-name': tunnel, u'enabled': admin_up}
return self.put_request(
self.vrf_prefix + URI_VPN_SITE_TO_SITE_STATE % tunnel, info)
def delete_ipsec_connection(self, conn_id):
return self.delete_request(
self.vrf_prefix + URI_VPN_SITE_TO_SITE_ID % conn_id)
def delete_ipsec_policy(self, policy_id):
return self.delete_request(URI_VPN_IPSEC_POLICIES_ID % policy_id)
def delete_ike_policy(self, policy_id):
return self.delete_request(URI_VPN_IKE_POLICIES_ID % policy_id)
def delete_pre_shared_key(self, key_id):
return self.delete_request(
self.vrf_prefix + URI_VPN_IKE_KEYRINGS_ID % key_id)
def read_tunnel_statuses(self):
results = self.get_request(self.vrf_prefix +
URI_VPN_SITE_ACTIVE_SESSIONS)
if self.status != requests.codes.OK or not results:
return []
tunnels = [(t[u'vpn-interface-name'], t[u'status'])
for t in results['items']]
return tunnels
neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/device_drivers/cisco_ipsec.py 0000664 0005670 0005671 00000100465 12701407726 031176 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import requests
from neutron.common import rpc as n_rpc
from neutron import context as ctx
from neutron.plugins.common import constants
from neutron.plugins.common import utils as plugin_utils
from neutron_lib import exceptions as nexception
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
from neutron_vpnaas._i18n import _, _LE, _LI, _LW
from neutron_vpnaas.services.vpn.common import topics
from neutron_vpnaas.services.vpn import device_drivers
from neutron_vpnaas.services.vpn.device_drivers import (
cisco_csr_rest_client as csr_client)
ipsec_opts = [
cfg.IntOpt('status_check_interval',
default=60,
help=_("Status check interval for Cisco CSR IPSec connections"))
]
cfg.CONF.register_opts(ipsec_opts, 'cisco_csr_ipsec')
LOG = logging.getLogger(__name__)
RollbackStep = collections.namedtuple('RollbackStep',
['action', 'resource_id', 'title'])
class CsrResourceCreateFailure(nexception.NeutronException):
message = _("Cisco CSR failed to create %(resource)s (%(which)s)")
class CsrAdminStateChangeFailure(nexception.NeutronException):
message = _("Cisco CSR failed to change %(tunnel)s admin state to "
"%(state)s")
class CsrDriverMismatchError(nexception.NeutronException):
message = _("Required %(resource)s attribute %(attr)s mapping for Cisco "
"CSR is missing in device driver")
class CsrUnknownMappingError(nexception.NeutronException):
message = _("Device driver does not have a mapping of '%(value)s for "
"attribute %(attr)s of %(resource)s")
class CiscoCsrIPsecVpnDriverApi(object):
"""RPC API for agent to plugin messaging."""
def __init__(self, topic):
target = oslo_messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target)
def get_vpn_services_on_host(self, context, host):
"""Get list of vpnservices on this host.
The vpnservices including related ipsec_site_connection,
ikepolicy, ipsecpolicy, and Cisco info on this host.
"""
cctxt = self.client.prepare()
return cctxt.call(context, 'get_vpn_services_on_host', host=host)
def update_status(self, context, status):
"""Update status for all VPN services and connections."""
cctxt = self.client.prepare()
return cctxt.call(context, 'update_status', status=status)
class CiscoCsrIPsecDriver(device_drivers.DeviceDriver):
"""Cisco CSR VPN Device Driver for IPSec.
This class is designed for use with L3-agent now.
However this driver will be used with another agent in future.
so the use of "Router" is kept minimal now.
Instead of router_id, we are using process_id in this code.
"""
# history
# 1.0 Initial version
target = oslo_messaging.Target(version='1.0')
def __init__(self, vpn_service, host):
# TODO(pc_m): Once all driver implementations no longer need
# vpn_service argument, replace with just config argument.
self.host = host
self.conn = n_rpc.create_connection()
context = ctx.get_admin_context_without_session()
node_topic = '%s.%s' % (topics.CISCO_IPSEC_AGENT_TOPIC, self.host)
self.service_state = {}
self.endpoints = [self]
self.conn.create_consumer(node_topic, self.endpoints, fanout=False)
self.conn.consume_in_threads()
self.agent_rpc = (
CiscoCsrIPsecVpnDriverApi(topics.CISCO_IPSEC_DRIVER_TOPIC))
self.periodic_report = loopingcall.FixedIntervalLoopingCall(
self.report_status, context)
self.periodic_report.start(
interval=vpn_service.conf.cisco_csr_ipsec.status_check_interval)
LOG.debug("Device driver initialized for %s", node_topic)
def vpnservice_updated(self, context, **kwargs):
"""Handle VPNaaS service driver change notifications."""
LOG.debug("Handling VPN service update notification '%s'",
kwargs.get('reason', ''))
self.sync(context, [])
def create_vpn_service(self, service_data):
"""Create new entry to track VPN service and its connections."""
csr = csr_client.CsrRestClient(service_data['router_info'])
vpn_service_id = service_data['id']
self.service_state[vpn_service_id] = CiscoCsrVpnService(
service_data, csr)
return self.service_state[vpn_service_id]
def update_connection(self, context, vpn_service_id, conn_data):
"""Handle notification for a single IPSec connection."""
vpn_service = self.service_state[vpn_service_id]
conn_id = conn_data['id']
conn_is_admin_up = conn_data[u'admin_state_up']
if conn_id in vpn_service.conn_state: # Existing connection...
ipsec_conn = vpn_service.conn_state[conn_id]
config_changed = ipsec_conn.check_for_changes(conn_data)
if config_changed:
LOG.debug("Update: Existing connection %s changed", conn_id)
ipsec_conn.delete_ipsec_site_connection(context, conn_id)
ipsec_conn.create_ipsec_site_connection(context, conn_data)
ipsec_conn.conn_info = conn_data
if ipsec_conn.forced_down:
if vpn_service.is_admin_up and conn_is_admin_up:
LOG.debug("Update: Connection %s no longer admin down",
conn_id)
ipsec_conn.set_admin_state(is_up=True)
ipsec_conn.forced_down = False
else:
if not vpn_service.is_admin_up or not conn_is_admin_up:
LOG.debug("Update: Connection %s forced to admin down",
conn_id)
ipsec_conn.set_admin_state(is_up=False)
ipsec_conn.forced_down = True
else: # New connection...
ipsec_conn = vpn_service.create_connection(conn_data)
ipsec_conn.create_ipsec_site_connection(context, conn_data)
if not vpn_service.is_admin_up or not conn_is_admin_up:
LOG.debug("Update: Created new connection %s in admin down "
"state", conn_id)
ipsec_conn.set_admin_state(is_up=False)
ipsec_conn.forced_down = True
else:
LOG.debug("Update: Created new connection %s", conn_id)
ipsec_conn.is_dirty = False
ipsec_conn.last_status = conn_data['status']
ipsec_conn.is_admin_up = conn_is_admin_up
return ipsec_conn
def update_service(self, context, service_data):
"""Handle notification for a single VPN Service and its connections."""
vpn_service_id = service_data['id']
if vpn_service_id in self.service_state:
LOG.debug("Update: Existing VPN service %s detected",
vpn_service_id)
vpn_service = self.service_state[vpn_service_id]
else:
LOG.debug("Update: New VPN service %s detected", vpn_service_id)
vpn_service = self.create_vpn_service(service_data)
if not vpn_service:
return
vpn_service.is_dirty = False
vpn_service.connections_removed = False
vpn_service.last_status = service_data['status']
vpn_service.is_admin_up = service_data[u'admin_state_up']
for conn_data in service_data['ipsec_conns']:
self.update_connection(context, vpn_service_id, conn_data)
LOG.debug("Update: Completed update processing")
return vpn_service
def update_all_services_and_connections(self, context):
"""Update services and connections based on plugin info.
Perform any create and update operations and then update status.
Mark every visited connection as no longer "dirty" so they will
not be deleted at end of sync processing.
"""
services_data = self.agent_rpc.get_vpn_services_on_host(context,
self.host)
LOG.debug("Sync updating for %d VPN services", len(services_data))
vpn_services = []
for service_data in services_data:
vpn_service = self.update_service(context, service_data)
if vpn_service:
vpn_services.append(vpn_service)
return vpn_services
def mark_existing_connections_as_dirty(self):
"""Mark all existing connections as "dirty" for sync."""
service_count = 0
connection_count = 0
for service_state in self.service_state.values():
service_state.is_dirty = True
service_count += 1
for conn_id in service_state.conn_state:
service_state.conn_state[conn_id].is_dirty = True
connection_count += 1
LOG.debug("Mark: %(service)d VPN services and %(conn)d IPSec "
"connections marked dirty", {'service': service_count,
'conn': connection_count})
def remove_unknown_connections(self, context):
"""Remove connections that are not known by service driver."""
service_count = 0
connection_count = 0
for vpn_service_id, vpn_service in list(self.service_state.items()):
dirty = [c_id for c_id, c in vpn_service.conn_state.items()
if c.is_dirty]
vpn_service.connections_removed = len(dirty) > 0
for conn_id in dirty:
conn_state = vpn_service.conn_state[conn_id]
conn_state.delete_ipsec_site_connection(context, conn_id)
connection_count += 1
del vpn_service.conn_state[conn_id]
if vpn_service.is_dirty:
service_count += 1
del self.service_state[vpn_service_id]
elif dirty:
self.connections_removed = True
LOG.debug("Sweep: Removed %(service)d dirty VPN service%(splural)s "
"and %(conn)d dirty IPSec connection%(cplural)s",
{'service': service_count, 'conn': connection_count,
'splural': 's'[service_count == 1:],
'cplural': 's'[connection_count == 1:]})
def build_report_for_connections_on(self, vpn_service):
"""Create the report fragment for IPSec connections on a service.
Collect the current status from the Cisco CSR and use that to update
the status and generate report fragment for each connection on the
service. If there is no status information, or no change, then no
report info will be created for the connection. The combined report
data is returned.
"""
LOG.debug("Report: Collecting status for IPSec connections on VPN "
"service %s", vpn_service.service_id)
tunnels = vpn_service.get_ipsec_connections_status()
report = {}
for connection in vpn_service.conn_state.values():
if connection.forced_down:
LOG.debug("Connection %s forced down", connection.conn_id)
current_status = constants.DOWN
else:
current_status = connection.find_current_status_in(tunnels)
LOG.debug("Connection %(conn)s reported %(status)s",
{'conn': connection.conn_id,
'status': current_status})
frag = connection.update_status_and_build_report(current_status)
if frag:
LOG.debug("Report: Adding info for IPSec connection %s",
connection.conn_id)
report.update(frag)
return report
def build_report_for_service(self, vpn_service):
"""Create the report info for a VPN service and its IPSec connections.
Get the report info for the connections on the service, and include
it into the report info for the VPN service. If there is no report
info for the connection, then no change has occurred and no report
will be generated. If there is only one connection for the service,
we'll set the service state to match the connection (with ERROR seen
as DOWN).
"""
conn_report = self.build_report_for_connections_on(vpn_service)
if conn_report or vpn_service.connections_removed:
pending_handled = plugin_utils.in_pending_status(
vpn_service.last_status)
vpn_service.update_last_status()
LOG.debug("Report: Adding info for VPN service %s",
vpn_service.service_id)
return {u'id': vpn_service.service_id,
u'status': vpn_service.last_status,
u'updated_pending_status': pending_handled,
u'ipsec_site_connections': conn_report}
else:
return {}
@lockutils.synchronized('vpn-agent', 'neutron-')
def report_status(self, context):
"""Report status of all VPN services and IPSec connections to plugin.
This is called periodically by the agent, to push up changes in
status. Use a lock to serialize access to (and changing of)
running state.
"""
return self.report_status_internal(context)
def report_status_internal(self, context):
"""Generate report and send to plugin, if anything changed."""
service_report = []
LOG.debug("Report: Starting status report processing")
for vpn_service_id, vpn_service in self.service_state.items():
LOG.debug("Report: Collecting status for VPN service %s",
vpn_service_id)
report = self.build_report_for_service(vpn_service)
if report:
service_report.append(report)
if service_report:
LOG.info(_LI("Sending status report update to plugin"))
self.agent_rpc.update_status(context, service_report)
LOG.debug("Report: Completed status report processing")
return service_report
@lockutils.synchronized('vpn-agent', 'neutron-')
def sync(self, context, routers):
"""Synchronize with plugin and report current status.
Mark all "known" services/connections as dirty, update them based on
information from the plugin, remove (sweep) any connections that are
not updated (dirty), and report updates, if any, back to plugin.
Called when update/delete a service or create/update/delete a
connection (vpnservice_updated message), or router change
(_process_routers).
Use lock to serialize access (and changes) to running state for VPN
service and IPsec connections.
"""
self.mark_existing_connections_as_dirty()
self.update_all_services_and_connections(context)
self.remove_unknown_connections(context)
self.report_status_internal(context)
def create_router(self, router):
"""Actions taken when router created."""
# Note: Since Cisco CSR is running out-of-band, nothing to do here
pass
def destroy_router(self, process_id):
"""Actions taken when router deleted."""
# Note: Since Cisco CSR is running out-of-band, nothing to do here
pass
class CiscoCsrVpnService(object):
"""Maintains state/status information for a service and its connections."""
def __init__(self, service_data, csr):
self.service_id = service_data['id']
self.conn_state = {}
self.csr = csr
self.is_admin_up = True
# TODO(pcm) FUTURE - handle sharing of policies
def create_connection(self, conn_data):
conn_id = conn_data['id']
self.conn_state[conn_id] = CiscoCsrIPSecConnection(conn_data, self.csr)
return self.conn_state[conn_id]
def get_connection(self, conn_id):
return self.conn_state.get(conn_id)
def conn_status(self, conn_id):
conn_state = self.get_connection(conn_id)
if conn_state:
return conn_state.last_status
def snapshot_conn_state(self, ipsec_conn):
"""Create/obtain connection state and save current status."""
conn_state = self.conn_state.setdefault(
ipsec_conn['id'], CiscoCsrIPSecConnection(ipsec_conn, self.csr))
conn_state.last_status = ipsec_conn['status']
conn_state.is_dirty = False
return conn_state
STATUS_MAP = {'ERROR': constants.ERROR,
'UP-ACTIVE': constants.ACTIVE,
'UP-IDLE': constants.ACTIVE,
'UP-NO-IKE': constants.ACTIVE,
'DOWN': constants.DOWN,
'DOWN-NEGOTIATING': constants.DOWN}
def get_ipsec_connections_status(self):
"""Obtain current status of all tunnels on a Cisco CSR.
Convert them to OpenStack status values.
"""
tunnels = self.csr.read_tunnel_statuses()
for tunnel in tunnels:
LOG.debug("CSR Reports %(tunnel)s status '%(status)s'",
{'tunnel': tunnel[0], 'status': tunnel[1]})
return dict(map(lambda x: (x[0], self.STATUS_MAP[x[1]]), tunnels))
def find_matching_connection(self, tunnel_id):
"""Find IPSec connection using Cisco CSR tunnel specified, if any."""
for connection in self.conn_state.values():
if connection.tunnel == tunnel_id:
return connection.conn_id
def no_connections_up(self):
return not any(c.last_status == 'ACTIVE'
for c in self.conn_state.values())
def update_last_status(self):
if not self.is_admin_up or self.no_connections_up():
self.last_status = constants.DOWN
else:
self.last_status = constants.ACTIVE
class CiscoCsrIPSecConnection(object):
"""State and actions for IPSec site-to-site connections."""
def __init__(self, conn_info, csr):
self.conn_info = conn_info
self.csr = csr
self.steps = []
self.forced_down = False
self.changed = False
@property
def conn_id(self):
return self.conn_info['id']
@property
def is_admin_up(self):
return self.conn_info['admin_state_up']
@is_admin_up.setter
def is_admin_up(self, is_up):
self.conn_info['admin_state_up'] = is_up
@property
def tunnel(self):
return self.conn_info['cisco']['site_conn_id']
def check_for_changes(self, curr_conn):
return not all([self.conn_info[attr] == curr_conn[attr]
for attr in ('mtu', 'psk', 'peer_address',
'peer_cidrs', 'ike_policy',
'ipsec_policy', 'cisco')])
def find_current_status_in(self, statuses):
if self.tunnel in statuses:
return statuses[self.tunnel]
else:
return constants.ERROR
def update_status_and_build_report(self, current_status):
if current_status != self.last_status:
pending_handled = plugin_utils.in_pending_status(self.last_status)
self.last_status = current_status
return {self.conn_id: {'status': current_status,
'updated_pending_status': pending_handled}}
else:
return {}
DIALECT_MAP = {'ike_policy': {'name': 'IKE Policy',
'v1': u'v1',
# auth_algorithm -> hash
'sha1': u'sha',
# encryption_algorithm -> encryption
'3des': u'3des',
'aes-128': u'aes',
'aes-192': u'aes192',
'aes-256': u'aes256',
# pfs -> dhGroup
'group2': 2,
'group5': 5,
'group14': 14},
'ipsec_policy': {'name': 'IPSec Policy',
# auth_algorithm -> esp-authentication
'sha1': u'esp-sha-hmac',
# transform_protocol -> ah
'esp': None,
'ah': u'ah-sha-hmac',
'ah-esp': u'ah-sha-hmac',
# encryption_algorithm -> esp-encryption
'3des': u'esp-3des',
'aes-128': u'esp-aes',
'aes-192': u'esp-192-aes',
'aes-256': u'esp-256-aes',
# pfs -> pfs
'group2': u'group2',
'group5': u'group5',
'group14': u'group14'}}
def translate_dialect(self, resource, attribute, info):
"""Map VPNaaS attributes values to CSR values for a resource."""
name = self.DIALECT_MAP[resource]['name']
if attribute not in info:
raise CsrDriverMismatchError(resource=name, attr=attribute)
value = info[attribute].lower()
if value in self.DIALECT_MAP[resource]:
return self.DIALECT_MAP[resource][value]
raise CsrUnknownMappingError(resource=name, attr=attribute,
value=value)
def create_psk_info(self, psk_id, conn_info):
"""Collect/create attributes needed for pre-shared key."""
return {u'keyring-name': psk_id,
u'pre-shared-key-list': [
{u'key': conn_info['psk'],
u'encrypted': False,
u'peer-address': conn_info['peer_address']}]}
def create_ike_policy_info(self, ike_policy_id, conn_info):
"""Collect/create/map attributes needed for IKE policy."""
for_ike = 'ike_policy'
policy_info = conn_info[for_ike]
version = self.translate_dialect(for_ike,
'ike_version',
policy_info)
encrypt_algorithm = self.translate_dialect(for_ike,
'encryption_algorithm',
policy_info)
auth_algorithm = self.translate_dialect(for_ike,
'auth_algorithm',
policy_info)
group = self.translate_dialect(for_ike,
'pfs',
policy_info)
lifetime = policy_info['lifetime_value']
return {u'version': version,
u'priority-id': ike_policy_id,
u'encryption': encrypt_algorithm,
u'hash': auth_algorithm,
u'dhGroup': group,
u'lifetime': lifetime}
def create_ipsec_policy_info(self, ipsec_policy_id, info):
"""Collect/create attributes needed for IPSec policy.
Note: OpenStack will provide a default encryption algorithm, if one is
not provided, so a authentication only configuration of (ah, sha1),
which maps to ah-sha-hmac transform protocol, cannot be selected.
As a result, we'll always configure the encryption algorithm, and
will select ah-sha-hmac for transform protocol.
"""
for_ipsec = 'ipsec_policy'
policy_info = info[for_ipsec]
transform_protocol = self.translate_dialect(for_ipsec,
'transform_protocol',
policy_info)
auth_algorithm = self.translate_dialect(for_ipsec,
'auth_algorithm',
policy_info)
encrypt_algorithm = self.translate_dialect(for_ipsec,
'encryption_algorithm',
policy_info)
group = self.translate_dialect(for_ipsec, 'pfs', policy_info)
lifetime = policy_info['lifetime_value']
settings = {u'policy-id': ipsec_policy_id,
u'protection-suite': {
u'esp-encryption': encrypt_algorithm,
u'esp-authentication': auth_algorithm},
u'lifetime-sec': lifetime,
u'pfs': group,
u'anti-replay-window-size': u'disable'}
if transform_protocol:
settings[u'protection-suite'][u'ah'] = transform_protocol
return settings
def create_site_connection_info(self, site_conn_id, ipsec_policy_id,
conn_info):
"""Collect/create attributes needed for the IPSec connection."""
mtu = conn_info['mtu']
return {
u'vpn-interface-name': site_conn_id,
u'ipsec-policy-id': ipsec_policy_id,
u'remote-device': {
u'tunnel-ip-address': conn_info['peer_address']
},
u'mtu': mtu
}
def create_routes_info(self, site_conn_id, conn_info):
"""Collect/create attributes for static routes."""
routes_info = []
for peer_cidr in conn_info.get('peer_cidrs', []):
route = {u'destination-network': peer_cidr,
u'outgoing-interface': site_conn_id}
route_id = csr_client.make_route_id(peer_cidr, site_conn_id)
routes_info.append((route_id, route))
return routes_info
def _check_create(self, resource, which):
"""Determine if REST create request was successful."""
if self.csr.status == requests.codes.CREATED:
LOG.debug("%(resource)s %(which)s is configured",
{'resource': resource, 'which': which})
return
LOG.error(_LE("Unable to create %(resource)s %(which)s: "
"%(status)d"),
{'resource': resource, 'which': which,
'status': self.csr.status})
# ToDO(pcm): Set state to error
raise CsrResourceCreateFailure(resource=resource, which=which)
def do_create_action(self, action_suffix, info, resource_id, title):
"""Perform a single REST step for IPSec site connection create."""
create_action = 'create_%s' % action_suffix
try:
getattr(self.csr, create_action)(info)
except AttributeError:
LOG.exception(_LE("Internal error - '%s' is not defined"),
create_action)
raise CsrResourceCreateFailure(resource=title,
which=resource_id)
self._check_create(title, resource_id)
self.steps.append(RollbackStep(action_suffix, resource_id, title))
def _verify_deleted(self, status, resource, which):
"""Determine if REST delete request was successful."""
if status in (requests.codes.NO_CONTENT, requests.codes.NOT_FOUND):
LOG.debug("%(resource)s configuration %(which)s was removed",
{'resource': resource, 'which': which})
else:
LOG.warning(_LW("Unable to delete %(resource)s %(which)s: "
"%(status)d"), {'resource': resource,
'which': which,
'status': status})
def do_rollback(self):
"""Undo create steps that were completed successfully."""
for step in reversed(self.steps):
delete_action = 'delete_%s' % step.action
LOG.debug("Performing rollback action %(action)s for "
"resource %(resource)s", {'action': delete_action,
'resource': step.title})
try:
getattr(self.csr, delete_action)(step.resource_id)
except AttributeError:
LOG.exception(_LE("Internal error - '%s' is not defined"),
delete_action)
raise CsrResourceCreateFailure(resource=step.title,
which=step.resource_id)
self._verify_deleted(self.csr.status, step.title, step.resource_id)
self.steps = []
def create_ipsec_site_connection(self, context, conn_info):
"""Creates an IPSec site-to-site connection on CSR.
Create the PSK, IKE policy, IPSec policy, connection, static route,
and (future) DPD.
"""
# Get all the IDs
conn_id = conn_info['id']
psk_id = conn_id
site_conn_id = conn_info['cisco']['site_conn_id']
ike_policy_id = conn_info['cisco']['ike_policy_id']
ipsec_policy_id = conn_info['cisco']['ipsec_policy_id']
LOG.debug('Creating IPSec connection %s', conn_id)
# Get all the attributes needed to create
try:
psk_info = self.create_psk_info(psk_id, conn_info)
ike_policy_info = self.create_ike_policy_info(ike_policy_id,
conn_info)
ipsec_policy_info = self.create_ipsec_policy_info(ipsec_policy_id,
conn_info)
connection_info = self.create_site_connection_info(site_conn_id,
ipsec_policy_id,
conn_info)
routes_info = self.create_routes_info(site_conn_id, conn_info)
except (CsrUnknownMappingError, CsrDriverMismatchError) as e:
LOG.exception(e)
return
try:
self.do_create_action('pre_shared_key', psk_info,
conn_id, 'Pre-Shared Key')
self.do_create_action('ike_policy', ike_policy_info,
ike_policy_id, 'IKE Policy')
self.do_create_action('ipsec_policy', ipsec_policy_info,
ipsec_policy_id, 'IPSec Policy')
self.do_create_action('ipsec_connection', connection_info,
site_conn_id, 'IPSec Connection')
# TODO(pcm): FUTURE - Do DPD for v1 and handle if >1 connection
# and different DPD settings
for route_id, route_info in routes_info:
self.do_create_action('static_route', route_info,
route_id, 'Static Route')
except CsrResourceCreateFailure:
self.do_rollback()
LOG.info(_LI("FAILED: Create of IPSec site-to-site connection %s"),
conn_id)
else:
LOG.info(_LI("SUCCESS: Created IPSec site-to-site connection %s"),
conn_id)
def delete_ipsec_site_connection(self, context, conn_id):
"""Delete the site-to-site IPSec connection.
This will be best effort and will continue, if there are any
failures.
"""
LOG.debug('Deleting IPSec connection %s', conn_id)
if not self.steps:
LOG.warning(_LW('Unable to find connection %s'), conn_id)
else:
self.do_rollback()
LOG.info(_LI("SUCCESS: Deleted IPSec site-to-site connection %s"),
conn_id)
def set_admin_state(self, is_up):
"""Change the admin state for the IPSec connection."""
self.csr.set_ipsec_connection_state(self.tunnel, admin_up=is_up)
if self.csr.status != requests.codes.NO_CONTENT:
state = "UP" if is_up else "DOWN"
LOG.error(_LE("Unable to change %(tunnel)s admin state to "
"%(state)s"), {'tunnel': self.tunnel,
'state': state})
raise CsrAdminStateChangeFailure(tunnel=self.tunnel, state=state)
neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/device_drivers/__init__.py 0000664 0005670 0005671 00000001765 12701407726 030455 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2013, Nachi Ueno, NTT I3, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class DeviceDriver(object):
def __init__(self, agent, host):
pass
@abc.abstractmethod
def sync(self, context, processes):
pass
@abc.abstractmethod
def create_router(self, process_id):
pass
@abc.abstractmethod
def destroy_router(self, process_id):
pass
neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/device_drivers/template/ 0000775 0005670 0005671 00000000000 12701410103 030125 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/device_drivers/template/strongswan/ 0000775 0005670 0005671 00000000000 12701410103 032332 5 ustar jenkins jenkins 0000000 0000000 ././@LongLink 0000000 0000000 0000000 00000000150 00000000000 011211 L ustar 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/device_drivers/template/strongswan/ipsec.conf.template neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/device_drivers/template/strongswan/ipsec.conf.templ0000664 0005670 0005671 00000001334 12701407726 035446 0 ustar jenkins jenkins 0000000 0000000 # Configuration for {{vpnservice.name}}
config setup
conn %default
ikelifetime=60m
keylife=20m
rekeymargin=3m
keyingtries=1
authby=psk
mobike=no
{% for ipsec_site_connection in vpnservice.ipsec_site_connections%}
conn {{ipsec_site_connection.id}}
keyexchange={{ipsec_site_connection.ikepolicy.ike_version}}
left={{ipsec_site_connection.external_ip}}
leftsubnet={{ipsec_site_connection['local_cidrs']|join(',')}}
leftid={{ipsec_site_connection.external_ip}}
leftfirewall=yes
right={{ipsec_site_connection.peer_address}}
rightsubnet={{ipsec_site_connection['peer_cidrs']|join(',')}}
rightid={{ipsec_site_connection.peer_id}}
auto=route
{% endfor %}
././@LongLink 0000000 0000000 0000000 00000000155 00000000000 011216 L ustar 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/device_drivers/template/strongswan/strongswan.conf.template neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/device_drivers/template/strongswan/strongswan.conf.0000664 0005670 0005671 00000000222 12701407726 035501 0 ustar jenkins jenkins 0000000 0000000 charon {
load_modular = yes
plugins {
include strongswan.d/charon/*.conf
}
}
include strongswan.d/*.conf
././@LongLink 0000000 0000000 0000000 00000000152 00000000000 011213 L ustar 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/device_drivers/template/strongswan/ipsec.secret.template neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/device_drivers/template/strongswan/ipsec.secret.tem0000664 0005670 0005671 00000000347 12701407726 035455 0 ustar jenkins jenkins 0000000 0000000 # Configuration for {{vpnservice.name}}{% for ipsec_site_connection in vpnservice.ipsec_site_connections %}
{{ipsec_site_connection.external_ip}} {{ipsec_site_connection.peer_id}} : PSK "{{ipsec_site_connection.psk}}"
{% endfor %}
neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/device_drivers/template/openswan/ 0000775 0005670 0005671 00000000000 12701410103 031757 5 ustar jenkins jenkins 0000000 0000000 ././@LongLink 0000000 0000000 0000000 00000000146 00000000000 011216 L ustar 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/device_drivers/template/openswan/ipsec.conf.template neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/device_drivers/template/openswan/ipsec.conf.templat0000664 0005670 0005671 00000007227 12701407726 035427 0 ustar jenkins jenkins 0000000 0000000 # Configuration for {{vpnservice.name}}
config setup
nat_traversal=yes
conn %default
ikelifetime=480m
keylife=60m
keyingtries=%forever
{% for ipsec_site_connection in vpnservice.ipsec_site_connections if ipsec_site_connection.admin_state_up
-%}
conn {{ipsec_site_connection.id}}
{% if ipsec_site_connection['local_ip_vers'] == 6 -%}
# To recognize the given IP addresses in this config
# as IPv6 addresses by pluto whack. Default is ipv4
connaddrfamily=ipv6
# openswan can't process defaultroute for ipv6.
# Assign gateway address as leftnexthop
leftnexthop={{ipsec_site_connection.external_ip}}
# rightnexthop is not mandatory for ipsec, so no need in ipv6.
{% else -%}
# NOTE: a default route is required for %defaultroute to work...
leftnexthop=%defaultroute
rightnexthop=%defaultroute
{% endif -%}
left={{ipsec_site_connection.external_ip}}
leftid={{ipsec_site_connection.external_ip}}
auto={{ipsec_site_connection.initiator}}
# NOTE:REQUIRED
# [subnet]
{% if ipsec_site_connection['local_cidrs']|length == 1 -%}
leftsubnet={{ipsec_site_connection['local_cidrs'][0]}}
{% else -%}
leftsubnets={ {{ipsec_site_connection['local_cidrs']|join(' ')}} }
{% endif -%}
# [updown]
# What "updown" script to run to adjust routing and/or firewalling when
# the status of the connection changes (default "ipsec _updown").
# "--route yes" allows to specify such routing options as mtu and metric.
leftupdown="ipsec _updown --route yes"
######################
# ipsec_site_connections
######################
# [peer_address]
right={{ipsec_site_connection.peer_address}}
# [peer_id]
rightid={{ipsec_site_connection.peer_id}}
# [peer_cidrs]
rightsubnets={ {{ipsec_site_connection['peer_cidrs']|join(' ')}} }
# rightsubnet=networkA/netmaskA, networkB/netmaskB (IKEv2 only)
# [mtu]
mtu={{ipsec_site_connection.mtu}}
# [dpd_action]
dpdaction={{ipsec_site_connection.dpd_action}}
# [dpd_interval]
dpddelay={{ipsec_site_connection.dpd_interval}}
# [dpd_timeout]
dpdtimeout={{ipsec_site_connection.dpd_timeout}}
# [auth_mode]
authby=secret
######################
# IKEPolicy params
######################
#ike version
ikev2={{ipsec_site_connection.ikepolicy.ike_version}}
# [encryption_algorithm]-[auth_algorithm]-[pfs]
ike={{ipsec_site_connection.ikepolicy.encryption_algorithm}}-{{ipsec_site_connection.ikepolicy.auth_algorithm}};{{ipsec_site_connection.ikepolicy.pfs}}
# [lifetime_value]
ikelifetime={{ipsec_site_connection.ikepolicy.lifetime_value}}s
# NOTE: it looks lifetime_units=kilobytes can't be enforced (could be seconds, hours, days...)
##########################
# IPsecPolicys params
##########################
# [transform_protocol]
auth={{ipsec_site_connection.ipsecpolicy.transform_protocol}}
{% if ipsec_site_connection.ipsecpolicy.transform_protocol == "ah" -%}
# AH protocol does not support encryption
# [auth_algorithm]-[pfs]
phase2alg={{ipsec_site_connection.ipsecpolicy.auth_algorithm}};{{ipsec_site_connection.ipsecpolicy.pfs}}
{% else -%}
# [encryption_algorithm]-[auth_algorithm]-[pfs]
phase2alg={{ipsec_site_connection.ipsecpolicy.encryption_algorithm}}-{{ipsec_site_connection.ipsecpolicy.auth_algorithm}};{{ipsec_site_connection.ipsecpolicy.pfs}}
{% endif -%}
# [encapsulation_mode]
type={{ipsec_site_connection.ipsecpolicy.encapsulation_mode}}
# [lifetime_value]
lifetime={{ipsec_site_connection.ipsecpolicy.lifetime_value}}s
# lifebytes=100000 if lifetime_units=kilobytes (IKEv2 only)
{% endfor -%}
././@LongLink 0000000 0000000 0000000 00000000150 00000000000 011211 L ustar 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/device_drivers/template/openswan/ipsec.secret.template neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/device_drivers/template/openswan/ipsec.secret.templ0000664 0005670 0005671 00000000336 12701407726 035434 0 ustar jenkins jenkins 0000000 0000000 # Configuration for {{vpnservice.name}}
{% for ipsec_site_connection in vpnservice.ipsec_site_connections -%}
{{vpnservice.external_ip}} {{ipsec_site_connection.peer_id}} : PSK "{{ipsec_site_connection.psk}}"
{% endfor %}
neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/device_drivers/ipsec.py 0000664 0005670 0005671 00000113710 12701407726 030013 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2013, Nachi Ueno, NTT I3, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import copy
import os
import re
import shutil
import six
import socket
import eventlet
import jinja2
import netaddr
from neutron.agent.linux import ip_lib
from neutron.api.v2 import attributes
from neutron.common import rpc as n_rpc
from neutron.common import utils as n_utils
from neutron import context
from neutron.plugins.common import constants
from neutron.plugins.common import utils as plugin_utils
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
from neutron_vpnaas._i18n import _, _LE, _LI, _LW
from neutron_vpnaas.extensions import vpnaas
from neutron_vpnaas.services.vpn.common import topics
from neutron_vpnaas.services.vpn import device_drivers
LOG = logging.getLogger(__name__)
TEMPLATE_PATH = os.path.dirname(os.path.abspath(__file__))
ipsec_opts = [
cfg.StrOpt(
'config_base_dir',
default='$state_path/ipsec',
help=_('Location to store ipsec server config files')),
cfg.IntOpt('ipsec_status_check_interval',
default=60,
help=_("Interval for checking ipsec status")),
cfg.BoolOpt('enable_detailed_logging',
default=False,
help=_("Enable detail logging for ipsec pluto process. "
"If the flag set to True, the detailed logging will "
"be written into config_base_dir//log. "
"Note: This setting applies to OpenSwan and LibreSwan "
"only. StrongSwan logs to syslog.")),
]
cfg.CONF.register_opts(ipsec_opts, 'ipsec')
openswan_opts = [
cfg.StrOpt(
'ipsec_config_template',
default=os.path.join(
TEMPLATE_PATH,
'template/openswan/ipsec.conf.template'),
help=_('Template file for ipsec configuration')),
cfg.StrOpt(
'ipsec_secret_template',
default=os.path.join(
TEMPLATE_PATH,
'template/openswan/ipsec.secret.template'),
help=_('Template file for ipsec secret configuration'))
]
cfg.CONF.register_opts(openswan_opts, 'openswan')
pluto_opts = [
cfg.IntOpt('shutdown_check_timeout',
default=1,
help=_('Initial interval in seconds for checking if pluto '
'daemon is shutdown'),
deprecated_group='libreswan'),
cfg.IntOpt('shutdown_check_retries',
default=5,
help=_('The maximum number of retries for checking for '
'pluto daemon shutdown'),
deprecated_group='libreswan'),
cfg.FloatOpt('shutdown_check_back_off',
default=1.5,
help=_('A factor to increase the retry interval for '
'each retry'),
deprecated_group='libreswan')
]
cfg.CONF.register_opts(pluto_opts, 'pluto')
JINJA_ENV = None
IPSEC_CONNS = 'ipsec_site_connections'
def _get_template(template_file):
global JINJA_ENV
if not JINJA_ENV:
templateLoader = jinja2.FileSystemLoader(searchpath="/")
JINJA_ENV = jinja2.Environment(loader=templateLoader, autoescape=True)
return JINJA_ENV.get_template(template_file)
@six.add_metaclass(abc.ABCMeta)
class BaseSwanProcess(object):
"""Swan Family Process Manager
This class manages start/restart/stop ipsec process.
This class create/delete config template
"""
binary = "ipsec"
CONFIG_DIRS = [
'var/run',
'log',
'etc',
'etc/ipsec.d/aacerts',
'etc/ipsec.d/acerts',
'etc/ipsec.d/cacerts',
'etc/ipsec.d/certs',
'etc/ipsec.d/crls',
'etc/ipsec.d/ocspcerts',
'etc/ipsec.d/policies',
'etc/ipsec.d/private',
'etc/ipsec.d/reqs',
'etc/pki/nssdb/'
]
DIALECT_MAP = {
"3des": "3des",
"aes-128": "aes128",
"aes-256": "aes256",
"aes-192": "aes192",
"group2": "modp1024",
"group5": "modp1536",
"group14": "modp2048",
"group15": "modp3072",
"bi-directional": "start",
"response-only": "add",
"v2": "insist",
"v1": "never"
}
STATUS_DICT = {
'erouted': constants.ACTIVE,
'unrouted': constants.DOWN
}
STATUS_RE = '\d\d\d "([a-f0-9\-]+).* (unrouted|erouted);'
STATUS_NOT_RUNNING_RE = 'Command:.*ipsec.*status.*Exit code: [1|3]$'
STATUS_IPSEC_SA_ESTABLISHED_RE = (
'\d{3} #\d+: "([a-f0-9\-]+).*IPsec SA established.*')
def __init__(self, conf, process_id, vpnservice, namespace):
self.conf = conf
self.id = process_id
self.updated_pending_status = False
self.namespace = namespace
self.connection_status = {}
self.config_dir = os.path.join(
self.conf.ipsec.config_base_dir, self.id)
self.etc_dir = os.path.join(self.config_dir, 'etc')
self.log_dir = os.path.join(self.config_dir, 'log')
self.update_vpnservice(vpnservice)
self.STATUS_PATTERN = re.compile(self.STATUS_RE)
self.STATUS_NOT_RUNNING_PATTERN = re.compile(
self.STATUS_NOT_RUNNING_RE)
self.STATUS_IPSEC_SA_ESTABLISHED_PATTERN = re.compile(
self.STATUS_IPSEC_SA_ESTABLISHED_RE)
self.STATUS_MAP = self.STATUS_DICT
def translate_dialect(self):
if not self.vpnservice:
return
for ipsec_site_conn in self.vpnservice['ipsec_site_connections']:
self._dialect(ipsec_site_conn, 'initiator')
self._dialect(ipsec_site_conn['ikepolicy'], 'ike_version')
for key in ['encryption_algorithm',
'auth_algorithm',
'pfs']:
self._dialect(ipsec_site_conn['ikepolicy'], key)
self._dialect(ipsec_site_conn['ipsecpolicy'], key)
def update_vpnservice(self, vpnservice):
self.vpnservice = vpnservice
self.translate_dialect()
def _dialect(self, obj, key):
obj[key] = self.DIALECT_MAP.get(obj[key], obj[key])
@abc.abstractmethod
def ensure_configs(self):
pass
def ensure_config_file(self, kind, template, vpnservice, file_mode=None):
"""Update config file, based on current settings for service."""
config_str = self._gen_config_content(template, vpnservice)
config_file_name = self._get_config_filename(kind)
if file_mode is None:
n_utils.replace_file(config_file_name, config_str)
else:
n_utils.replace_file(config_file_name, config_str, file_mode)
def remove_config(self):
"""Remove whole config file."""
shutil.rmtree(self.config_dir, ignore_errors=True)
def _get_config_filename(self, kind):
config_dir = self.etc_dir
return os.path.join(config_dir, kind)
def ensure_config_dir(self, vpnservice):
"""Create config directory if it does not exist."""
n_utils.ensure_dir(self.config_dir)
for subdir in self.CONFIG_DIRS:
dir_path = os.path.join(self.config_dir, subdir)
n_utils.ensure_dir(dir_path)
def _gen_config_content(self, template_file, vpnservice):
template = _get_template(template_file)
return template.render(
{'vpnservice': vpnservice,
'state_path': self.conf.state_path})
@abc.abstractmethod
def get_status(self):
pass
@property
def status(self):
if self.active:
return constants.ACTIVE
return constants.DOWN
@property
def active(self):
"""Check if the process is active or not."""
if not self.namespace:
return False
try:
status = self.get_status()
self._extract_and_record_connection_status(status)
if not self.connection_status:
return False
except RuntimeError:
return False
return True
def update(self):
"""Update Status based on vpnservice configuration."""
# Disable the process if a vpnservice is disabled or it has no
# enabled IPSec site connections.
vpnservice_has_active_ipsec_site_conns = any(
[ipsec_site_conn['admin_state_up']
for ipsec_site_conn in self.vpnservice['ipsec_site_connections']])
if (not self.vpnservice['admin_state_up'] or
not vpnservice_has_active_ipsec_site_conns):
self.disable()
else:
self.enable()
if plugin_utils.in_pending_status(self.vpnservice['status']):
self.updated_pending_status = True
self.vpnservice['status'] = self.status
for ipsec_site_conn in self.vpnservice['ipsec_site_connections']:
if plugin_utils.in_pending_status(ipsec_site_conn['status']):
conn_id = ipsec_site_conn['id']
conn_status = self.connection_status.get(conn_id)
if not conn_status:
continue
conn_status['updated_pending_status'] = True
ipsec_site_conn['status'] = conn_status['status']
def enable(self):
"""Enabling the process."""
try:
self.ensure_configs()
if self.active:
self.restart()
else:
self.start()
except RuntimeError:
LOG.exception(
_LE("Failed to enable vpn process on router %s"),
self.id)
def disable(self):
"""Disabling the process."""
try:
if self.active:
self.stop()
self.remove_config()
except RuntimeError:
LOG.exception(
_LE("Failed to disable vpn process on router %s"),
self.id)
@abc.abstractmethod
def restart(self):
"""Restart process."""
@abc.abstractmethod
def start(self):
"""Start process."""
@abc.abstractmethod
def stop(self):
"""Stop process."""
def _check_status_line(self, line):
"""Parse a line and search for status information.
If a connection has an established Security Association,
it will be considered ACTIVE. Otherwise, even if a status
line shows that a connection is active, it will be marked
as DOWN-ed.
"""
# pluto is not running so just exit
if self.STATUS_NOT_RUNNING_PATTERN.search(line):
self.connection_status = {}
raise StopIteration()
m = self.STATUS_IPSEC_SA_ESTABLISHED_PATTERN.search(line)
if m:
connection_id = m.group(1)
return connection_id, constants.ACTIVE
else:
m = self.STATUS_PATTERN.search(line)
if m:
connection_id = m.group(1)
return connection_id, constants.DOWN
return None, None
def _extract_and_record_connection_status(self, status_output):
if not status_output:
self.connection_status = {}
return
for line in status_output.split('\n'):
try:
conn_id, conn_status = self._check_status_line(line)
except StopIteration:
break
if conn_id:
self._record_connection_status(conn_id, conn_status)
def _record_connection_status(self, connection_id, status,
force_status_update=False):
conn_info = self.connection_status.get(connection_id)
if not conn_info:
self.connection_status[connection_id] = {
'status': status,
'updated_pending_status': force_status_update
}
else:
conn_info['status'] = status
if force_status_update:
conn_info['updated_pending_status'] = True
class OpenSwanProcess(BaseSwanProcess):
"""OpenSwan Process manager class.
This process class uses three commands
(1) ipsec pluto: IPsec IKE keying daemon
(2) ipsec addconn: Adds new ipsec addconn
(3) ipsec whack: control interface for IPSEC keying daemon
"""
def __init__(self, conf, process_id, vpnservice, namespace):
super(OpenSwanProcess, self).__init__(conf, process_id,
vpnservice, namespace)
self.secrets_file = os.path.join(
self.etc_dir, 'ipsec.secrets')
self.config_file = os.path.join(
self.etc_dir, 'ipsec.conf')
self.pid_path = os.path.join(
self.config_dir, 'var', 'run', 'pluto')
self.pid_file = '%s.pid' % self.pid_path
def _execute(self, cmd, check_exit_code=True, extra_ok_codes=None):
"""Execute command on namespace."""
ip_wrapper = ip_lib.IPWrapper(namespace=self.namespace)
return ip_wrapper.netns.execute(cmd, check_exit_code=check_exit_code,
extra_ok_codes=extra_ok_codes)
def ensure_configs(self):
"""Generate config files which are needed for OpenSwan.
If there is no directory, this function will create
dirs.
"""
self.ensure_config_dir(self.vpnservice)
self.ensure_config_file(
'ipsec.conf',
self.conf.openswan.ipsec_config_template,
self.vpnservice)
self.ensure_config_file(
'ipsec.secrets',
self.conf.openswan.ipsec_secret_template,
self.vpnservice,
0o600)
def _process_running(self):
"""Checks if process is still running."""
# If no PID file, we assume the process is not running.
if not os.path.exists(self.pid_file):
return False
try:
# We take an ask-forgiveness-not-permission approach and rely
# on throwing to tell us something. If the pid file exists,
# delve into the process information and check if it matches
# our expected command line.
with open(self.pid_file, 'r') as f:
pid = f.readline().strip()
with open('/proc/%s/cmdline' % pid) as cmd_line_file:
cmd_line = cmd_line_file.readline()
if self.pid_path in cmd_line and 'pluto' in cmd_line:
# Okay the process is probably a pluto process
# and it contains the pid_path in the command
# line... could be a race. Log to error and return
# that it is *NOT* okay to clean up files. We are
# logging to error instead of debug because it
# indicates something bad has happened and this is
# valuable information for figuring it out.
LOG.error(_LE('Process %(pid)s exists with command '
'line %(cmd_line)s.') %
{'pid': pid, 'cmd_line': cmd_line})
return True
except IOError as e:
# This is logged as "info" instead of error because it simply
# means that we couldn't find the files to check on them.
LOG.info(_LI('Unable to find control files on startup for '
'router %(router)s: %(msg)s'),
{'router': self.id, 'msg': e})
return False
def _cleanup_control_files(self):
try:
ctl_file = '%s.ctl' % self.pid_path
LOG.debug('Removing %(pidfile)s and %(ctlfile)s',
{'pidfile': self.pid_file,
'ctlfile': ctl_file})
if os.path.exists(self.pid_file):
os.remove(self.pid_file)
if os.path.exists(ctl_file):
os.remove(ctl_file)
except OSError as e:
LOG.error(_LE('Unable to remove pluto control '
'files for router %(router)s. %(msg)s'),
{'router': self.id, 'msg': e})
def get_status(self):
return self._execute([self.binary,
'whack',
'--ctlbase',
self.pid_path,
'--status'], extra_ok_codes=[1, 3])
def restart(self):
"""Restart the process."""
# stop() followed immediately by a start() runs the risk that the
# current pluto daemon has not had a chance to shutdown. We check
# the current process information to see if the daemon is still
# running and if so, wait a short interval and retry.
self.stop()
wait_interval = cfg.CONF.pluto.shutdown_check_timeout
for i in range(cfg.CONF.pluto.shutdown_check_retries):
if not self._process_running():
self._cleanup_control_files()
break
eventlet.sleep(wait_interval)
wait_interval *= cfg.CONF.pluto.shutdown_check_back_off
else:
LOG.warning(_LW('Server appears to still be running, restart '
'of router %s may fail'), self.id)
self.start()
return
def _resolve_fqdn(self, fqdn):
# The first addrinfo member from the list returned by
# socket.getaddrinfo is used for the address resolution.
# The code doesn't filter for ipv4 or ipv6 address.
try:
addrinfo = socket.getaddrinfo(fqdn, None)[0]
return addrinfo[-1][0]
except socket.gaierror:
LOG.exception(_LE("Peer address %s cannot be resolved"), fqdn)
def _get_nexthop(self, address, connection_id):
# check if address is an ip address or fqdn
invalid_ip_address = attributes._validate_ip_address(address)
if invalid_ip_address:
ip_addr = self._resolve_fqdn(address)
if not ip_addr:
self._record_connection_status(connection_id, constants.ERROR,
force_status_update=True)
raise vpnaas.VPNPeerAddressNotResolved(peer_address=address)
else:
ip_addr = address
routes = self._execute(['ip', 'route', 'get', ip_addr])
if routes.find('via') >= 0:
return routes.split(' ')[2]
return address
def _virtual_privates(self):
"""Returns line of virtual_privates.
virtual_private contains the networks
that are allowed as subnet for the remote client.
"""
virtual_privates = []
nets = []
for ipsec_site_conn in self.vpnservice['ipsec_site_connections']:
nets += ipsec_site_conn['local_cidrs']
nets += ipsec_site_conn['peer_cidrs']
for net in nets:
version = netaddr.IPNetwork(net).version
virtual_privates.append('%%v%s:%s' % (version, net))
return ','.join(virtual_privates)
def start(self):
"""Start the process.
Note: if there is not namespace yet,
just do nothing, and wait next event.
"""
if not self.namespace:
return
# NOTE: The restart operation calls the parent's start() instead of
# this one to avoid having to special case the startup file check.
# If anything is added to this method that needs to run whenever
# a restart occurs, it should be either added to the restart()
# override or things refactored to special-case start() when
# called from restart().
# If, by any reason, ctl and pid files weren't cleaned up, pluto
# won't be able to rewrite them and will fail to start. So we check
# to see if the process is running and if not, attempt a cleanup.
# In either case we fall through to allow the pluto process to
# start or fail in the usual way.
if not self._process_running():
self._cleanup_control_files()
virtual_private = self._virtual_privates()
#start pluto IKE keying daemon
cmd = [self.binary,
'pluto',
'--ctlbase', self.pid_path,
'--ipsecdir', self.etc_dir,
'--use-netkey',
'--uniqueids',
'--nat_traversal',
'--secretsfile', self.secrets_file,
'--virtual_private', virtual_private]
if self.conf.ipsec.enable_detailed_logging:
cmd += ['--perpeerlog', '--perpeerlogbase', self.log_dir]
self._execute(cmd)
#add connections
for ipsec_site_conn in self.vpnservice['ipsec_site_connections']:
# Don't add a connection if its admin state is down
if not ipsec_site_conn['admin_state_up']:
continue
nexthop = self._get_nexthop(ipsec_site_conn['peer_address'],
ipsec_site_conn['id'])
self._execute([self.binary,
'addconn',
'--ctlbase', '%s.ctl' % self.pid_path,
'--defaultroutenexthop', nexthop,
'--config', self.config_file,
ipsec_site_conn['id']
])
#TODO(nati) fix this when openswan is fixed
#Due to openswan bug, this command always exit with 3
#start whack ipsec keying daemon
self._execute([self.binary,
'whack',
'--ctlbase', self.pid_path,
'--listen',
], check_exit_code=False)
for ipsec_site_conn in self.vpnservice['ipsec_site_connections']:
if (not ipsec_site_conn['initiator'] == 'start' or
not ipsec_site_conn['admin_state_up']):
continue
#initiate ipsec connection
self._execute([self.binary,
'whack',
'--ctlbase', self.pid_path,
'--name', ipsec_site_conn['id'],
'--asynchronous',
'--initiate'
])
def disconnect(self):
if not self.namespace:
return
if not self.vpnservice:
return
for conn_id in self.connection_status:
self._execute([self.binary,
'whack',
'--ctlbase', self.pid_path,
'--name', '%s/0x1' % conn_id,
'--terminate'
])
def stop(self):
#Stop process using whack
#Note this will also stop pluto
self.disconnect()
self._execute([self.binary,
'whack',
'--ctlbase', self.pid_path,
'--shutdown',
])
self.connection_status = {}
class IPsecVpnDriverApi(object):
"""IPSecVpnDriver RPC api."""
def __init__(self, topic):
target = oslo_messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target)
def get_vpn_services_on_host(self, context, host):
"""Get list of vpnservices.
The vpnservices including related ipsec_site_connection,
ikepolicy and ipsecpolicy on this host
"""
cctxt = self.client.prepare()
return cctxt.call(context, 'get_vpn_services_on_host', host=host)
def update_status(self, context, status):
"""Update local status.
This method call updates status attribute of
VPNServices.
"""
cctxt = self.client.prepare()
return cctxt.call(context, 'update_status', status=status)
@six.add_metaclass(abc.ABCMeta)
class IPsecDriver(device_drivers.DeviceDriver):
"""VPN Device Driver for IPSec.
This class is designed for use with L3-agent now.
However this driver will be used with another agent in future
so the use of "Router" is kept minimal now.
Instead of router_id, we are using process_id in this code.
"""
# history
# 1.0 Initial version
target = oslo_messaging.Target(version='1.0')
def __init__(self, vpn_service, host):
# TODO(pc_m) Replace vpn_service with config arg, once all driver
# implementations no longer need vpn_service.
self.conf = vpn_service.conf
self.host = host
self.conn = n_rpc.create_connection()
self.context = context.get_admin_context_without_session()
self.topic = topics.IPSEC_AGENT_TOPIC
node_topic = '%s.%s' % (self.topic, self.host)
self.processes = {}
self.routers = {}
self.process_status_cache = {}
self.endpoints = [self]
self.conn.create_consumer(node_topic, self.endpoints, fanout=False)
self.conn.consume_in_threads()
self.agent_rpc = IPsecVpnDriverApi(topics.IPSEC_DRIVER_TOPIC)
self.process_status_cache_check = loopingcall.FixedIntervalLoopingCall(
self.report_status, self.context)
self.process_status_cache_check.start(
interval=self.conf.ipsec.ipsec_status_check_interval)
def get_namespace(self, router_id):
"""Get namespace of router.
:router_id: router_id
:returns: namespace string.
Note: If the router is a DVR, then the SNAT namespace will be
provided. If the router does not exist, return None.
"""
router = self.routers.get(router_id)
if not router:
return
# For DVR, use SNAT namespace
# TODO(pcm): Use router object method to tell if DVR, when available
if router.router['distributed']:
return router.snat_namespace.name
else:
return router.ns_name
def get_router_based_iptables_manager(self, router):
"""Returns router based iptables manager
In DVR routers the IPsec VPN service should run inside
the snat namespace. So the iptables manager used for
snat namespace is different from the iptables manager
used for the qr namespace in a non dvr based router.
This function will check the router type and then will
return the right iptables manager. If DVR enabled router
it will return the snat_iptables_manager otherwise it will
return the legacy iptables_manager.
"""
# TODO(pcm): Use router object method to tell if DVR, when available
if router.router['distributed']:
return router.snat_iptables_manager
else:
return router.iptables_manager
def add_nat_rule(self, router_id, chain, rule, top=False):
"""Add nat rule in namespace.
:param router_id: router_id
:param chain: a string of chain name
:param rule: a string of rule
:param top: if top is true, the rule
will be placed on the top of chain
Note if there is no router, this method does nothing
"""
router = self.routers.get(router_id)
if not router:
return
iptables_manager = self.get_router_based_iptables_manager(router)
iptables_manager.ipv4['nat'].add_rule(chain, rule, top=top)
def remove_nat_rule(self, router_id, chain, rule, top=False):
"""Remove nat rule in namespace.
:param router_id: router_id
:param chain: a string of chain name
:param rule: a string of rule
:param top: unused
needed to have same argument with add_nat_rule
"""
router = self.routers.get(router_id)
if not router:
return
iptables_manager = self.get_router_based_iptables_manager(router)
iptables_manager.ipv4['nat'].remove_rule(chain, rule, top=top)
def iptables_apply(self, router_id):
"""Apply IPtables.
:param router_id: router_id
This method do nothing if there is no router
"""
router = self.routers.get(router_id)
if not router:
return
iptables_manager = self.get_router_based_iptables_manager(router)
iptables_manager.apply()
def _update_nat(self, vpnservice, func):
"""Setting up nat rule in iptables.
We need to setup nat rule for ipsec packet.
:param vpnservice: vpnservices
:param func: self.add_nat_rule or self.remove_nat_rule
"""
router_id = vpnservice['router_id']
for ipsec_site_connection in vpnservice['ipsec_site_connections']:
for local_cidr in ipsec_site_connection['local_cidrs']:
# This ipsec rule is not needed for ipv6.
if netaddr.IPNetwork(local_cidr).version == 6:
continue
for peer_cidr in ipsec_site_connection['peer_cidrs']:
func(router_id,
'POSTROUTING',
'-s %s -d %s -m policy '
'--dir out --pol ipsec '
'-j ACCEPT ' % (local_cidr, peer_cidr),
top=True)
self.iptables_apply(router_id)
def vpnservice_updated(self, context, **kwargs):
"""Vpnservice updated rpc handler
VPN Service Driver will call this method
when vpnservices updated.
Then this method start sync with server.
"""
router = kwargs.get('router', None)
self.sync(context, [router] if router else [])
@abc.abstractmethod
def create_process(self, process_id, vpnservice, namespace):
pass
def ensure_process(self, process_id, vpnservice=None):
"""Ensuring process.
If the process doesn't exist, it will create process
and store it in self.process
"""
process = self.processes.get(process_id)
if not process or not process.namespace:
namespace = self.get_namespace(process_id)
process = self.create_process(
process_id,
vpnservice,
namespace)
self.processes[process_id] = process
elif vpnservice:
process.update_vpnservice(vpnservice)
return process
def create_router(self, router):
"""Handling create router event.
Agent calls this method, when the process namespace is ready.
Note: process_id == router_id == vpnservice_id
"""
process_id = router.router_id
self.routers[process_id] = router
if process_id in self.processes:
# In case of vpnservice is created
# before router's namespace
process = self.processes[process_id]
self._update_nat(process.vpnservice, self.add_nat_rule)
# Don't run ipsec process for backup HA router
if router.router['ha'] and router.ha_state == 'backup':
return
process.enable()
def destroy_process(self, process_id):
"""Destroy process.
Disable the process, remove the nat rule, and remove the process
manager for the processes that no longer are running vpn service.
"""
if process_id in self.processes:
process = self.processes[process_id]
process.disable()
vpnservice = process.vpnservice
if vpnservice:
self._update_nat(vpnservice, self.remove_nat_rule)
del self.processes[process_id]
def destroy_router(self, process_id):
"""Handling destroy_router event.
Agent calls this method, when the process namespace
is deleted.
"""
self.destroy_process(process_id)
if process_id in self.routers:
del self.routers[process_id]
def get_process_status_cache(self, process):
if not self.process_status_cache.get(process.id):
self.process_status_cache[process.id] = {
'status': None,
'id': process.vpnservice['id'],
'updated_pending_status': False,
'ipsec_site_connections': {}}
return self.process_status_cache[process.id]
def is_status_updated(self, process, previous_status):
if process.updated_pending_status:
return True
if process.status != previous_status['status']:
return True
if (process.connection_status !=
previous_status['ipsec_site_connections']):
return True
def unset_updated_pending_status(self, process):
process.updated_pending_status = False
for connection_status in process.connection_status.values():
connection_status['updated_pending_status'] = False
def copy_process_status(self, process):
return {
'id': process.vpnservice['id'],
'status': process.status,
'updated_pending_status': process.updated_pending_status,
'ipsec_site_connections': copy.deepcopy(process.connection_status)
}
def update_downed_connections(self, process_id, new_status):
"""Update info to be reported, if connections just went down.
If there is no longer any information for a connection, because it
has been removed (e.g. due to an admin down of VPN service or IPSec
connection), but there was previous status information for the
connection, mark the connection as down for reporting purposes.
"""
if process_id in self.process_status_cache:
for conn in self.process_status_cache[process_id][IPSEC_CONNS]:
if conn not in new_status[IPSEC_CONNS]:
new_status[IPSEC_CONNS][conn] = {
'status': constants.DOWN,
'updated_pending_status': True
}
def should_be_reported(self, context, process):
if (context.is_admin or
process.vpnservice["tenant_id"] == context.tenant_id):
return True
def report_status(self, context):
status_changed_vpn_services = []
for process in self.processes.values():
if not self.should_be_reported(context, process):
continue
previous_status = self.get_process_status_cache(process)
if self.is_status_updated(process, previous_status):
new_status = self.copy_process_status(process)
self.update_downed_connections(process.id, new_status)
status_changed_vpn_services.append(new_status)
self.process_status_cache[process.id] = (
self.copy_process_status(process))
# We need unset updated_pending status after it
# is reported to the server side
self.unset_updated_pending_status(process)
if status_changed_vpn_services:
self.agent_rpc.update_status(
context,
status_changed_vpn_services)
@lockutils.synchronized('vpn-agent', 'neutron-')
def sync(self, context, routers):
"""Sync status with server side.
:param context: context object for RPC call
:param routers: Router objects which is created in this sync event
There could be many failure cases should be
considered including the followings.
1) Agent class restarted
2) Failure on process creation
3) VpnService is deleted during agent down
4) RPC failure
In order to handle, these failure cases,
This driver takes simple sync strategies.
"""
vpnservices = self.agent_rpc.get_vpn_services_on_host(
context, self.host)
router_ids = [vpnservice['router_id'] for vpnservice in vpnservices]
sync_router_ids = [router['id'] for router in routers]
self._sync_vpn_processes(vpnservices, sync_router_ids)
self._delete_vpn_processes(sync_router_ids, router_ids)
self._cleanup_stale_vpn_processes(router_ids)
self.report_status(context)
def _sync_vpn_processes(self, vpnservices, sync_router_ids):
# Ensure the ipsec process is enabled only for
# - the vpn services which are not yet in self.processes
# - vpn services whose router id is in 'sync_router_ids'
for vpnservice in vpnservices:
if vpnservice['router_id'] not in self.processes or (
vpnservice['router_id'] in sync_router_ids):
process = self.ensure_process(vpnservice['router_id'],
vpnservice=vpnservice)
self._update_nat(vpnservice, self.add_nat_rule)
router = self.routers.get(vpnservice['router_id'])
if not router:
continue
# For HA router, spawn vpn process on master router
# and terminate vpn process on backup router
if router.router['ha'] and router.ha_state == 'backup':
process.disable()
else:
process.update()
def _delete_vpn_processes(self, sync_router_ids, vpn_router_ids):
# Delete any IPSec processes that are
# associated with routers, but are not running the VPN service.
for process_id in sync_router_ids:
if process_id not in vpn_router_ids:
self.destroy_process(process_id)
def _cleanup_stale_vpn_processes(self, vpn_router_ids):
# Delete any IPSec processes running
# VPN that do not have an associated router.
process_ids = [pid for pid in self.processes
if pid not in vpn_router_ids]
for process_id in process_ids:
self.destroy_process(process_id)
class OpenSwanDriver(IPsecDriver):
def create_process(self, process_id, vpnservice, namespace):
return OpenSwanProcess(
self.conf,
process_id,
vpnservice,
namespace)
neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/device_drivers/strongswan_ipsec.py 0000664 0005670 0005671 00000014403 12701407726 032277 0 ustar jenkins jenkins 0000000 0000000 # Copyright (c) 2015 Canonical, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
from oslo_config import cfg
from neutron.agent.linux import ip_lib
from neutron.plugins.common import constants
from neutron_vpnaas._i18n import _
from neutron_vpnaas.services.vpn.device_drivers import ipsec
TEMPLATE_PATH = os.path.dirname(os.path.abspath(__file__))
strongswan_opts = [
cfg.StrOpt(
'ipsec_config_template',
default=os.path.join(
TEMPLATE_PATH,
'template/strongswan/ipsec.conf.template'),
help=_('Template file for ipsec configuration.')),
cfg.StrOpt(
'strongswan_config_template',
default=os.path.join(
TEMPLATE_PATH,
'template/strongswan/strongswan.conf.template'),
help=_('Template file for strongswan configuration.')),
cfg.StrOpt(
'ipsec_secret_template',
default=os.path.join(
TEMPLATE_PATH,
'template/strongswan/ipsec.secret.template'),
help=_('Template file for ipsec secret configuration.')),
cfg.StrOpt(
'default_config_area',
default=os.path.join(
TEMPLATE_PATH,
'/etc/strongswan.d'),
help=_('The area where default StrongSwan configuration '
'files are located.'))
]
cfg.CONF.register_opts(strongswan_opts, 'strongswan')
NS_WRAPPER = 'neutron-vpn-netns-wrapper'
class StrongSwanProcess(ipsec.BaseSwanProcess):
# ROUTED means route created. (only for auto=route mode)
# CONNECTING means route created, connection tunnel is negotiating.
# INSTALLED means route created,
# also connection tunnel installed. (traffic can pass)
DIALECT_MAP = dict(ipsec.BaseSwanProcess.DIALECT_MAP)
STATUS_DICT = {
'ROUTED': constants.DOWN,
'CONNECTING': constants.DOWN,
'INSTALLED': constants.ACTIVE
}
STATUS_RE = '([a-f0-9\-]+).* (ROUTED|CONNECTING|INSTALLED)'
STATUS_NOT_RUNNING_RE = 'Command:.*ipsec.*status.*Exit code: [1|3] '
def __init__(self, conf, process_id, vpnservice, namespace):
self.DIALECT_MAP['v1'] = 'ikev1'
self.DIALECT_MAP['v2'] = 'ikev2'
super(StrongSwanProcess, self).__init__(conf, process_id,
vpnservice, namespace)
def _check_status_line(self, line):
"""Parse a line and search for status information.
If a given line contains status information for a connection,
extract the status and mark the connection as ACTIVE or DOWN
according to the STATUS_MAP.
"""
m = self.STATUS_PATTERN.search(line)
if m:
connection_id = m.group(1)
status = self.STATUS_MAP[m.group(2)]
return connection_id, status
return None, None
def _execute(self, cmd, check_exit_code=True, extra_ok_codes=None):
"""Execute command on namespace.
This execute is wrapped by namespace wrapper.
The namespace wrapper will bind /etc/ and /var/run
"""
ip_wrapper = ip_lib.IPWrapper(namespace=self.namespace)
return ip_wrapper.netns.execute(
[NS_WRAPPER,
'--mount_paths=/etc:%s/etc,/var/run:%s/var/run' % (
self.config_dir, self.config_dir),
'--cmd=%s' % ','.join(cmd)],
check_exit_code=check_exit_code,
extra_ok_codes=extra_ok_codes)
def copy_and_overwrite(self, from_path, to_path):
if os.path.exists(to_path):
shutil.rmtree(to_path)
shutil.copytree(from_path, to_path)
def ensure_configs(self):
"""Generate config files which are needed for StrongSwan.
If there is no directory, this function will create
dirs.
"""
self.ensure_config_dir(self.vpnservice)
self.ensure_config_file(
'ipsec.conf',
cfg.CONF.strongswan.ipsec_config_template,
self.vpnservice)
self.ensure_config_file(
'strongswan.conf',
cfg.CONF.strongswan.strongswan_config_template,
self.vpnservice)
self.ensure_config_file(
'ipsec.secrets',
cfg.CONF.strongswan.ipsec_secret_template,
self.vpnservice,
0o600)
self.copy_and_overwrite(cfg.CONF.strongswan.default_config_area,
self._get_config_filename('strongswan.d'))
def get_status(self):
return self._execute([self.binary, 'status'],
extra_ok_codes=[1, 3])
def restart(self):
"""Restart the process."""
self.reload()
def reload(self):
"""Reload the process.
Sends a USR1 signal to ipsec starter which in turn reloads the whole
configuration on the running IKE daemon charon based on the actual
ipsec.conf. Currently established connections are not affected by
configuration changes.
"""
self._execute([self.binary, 'reload'])
def start(self):
"""Start the process for only auto=route mode now.
Note: if there is no namespace yet,
just do nothing, and wait next event.
"""
if not self.namespace:
return
self._execute([self.binary, 'start'])
# initiate ipsec connection
for ipsec_site_conn in self.vpnservice['ipsec_site_connections']:
self._execute([self.binary, 'up', ipsec_site_conn['id']])
def stop(self):
self._execute([self.binary, 'stop'])
self.connection_status = {}
class StrongSwanDriver(ipsec.IPsecDriver):
def create_process(self, process_id, vpnservice, namespace):
return StrongSwanProcess(
self.conf,
process_id,
vpnservice,
namespace)
neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/device_drivers/vyatta_ipsec.py 0000664 0005670 0005671 00000025203 12701407726 031402 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2015 Brocade Communications System, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import pprint
from networking_brocade.vyatta.common import exceptions as v_exc
from networking_brocade.vyatta.common import vrouter_config
from networking_brocade.vyatta.vpn import config as vyatta_vpn_config
from neutron.common import rpc as n_rpc
from neutron import context as n_ctx
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_service import loopingcall
from oslo_service import periodic_task
import six
from neutron_vpnaas._i18n import _, _LE, _LW
from neutron_vpnaas.services.vpn.common import topics
from neutron_vpnaas.services.vpn import device_drivers
LOG = logging.getLogger(__name__)
_KEY_CONNECTIONS = 'ipsec_site_connections'
_KEY_IKEPOLICY = 'ikepolicy'
_KEY_ESPPOLICY = 'ipsecpolicy'
class _DriverRPCEndpoint(object):
"""
VPN device driver RPC endpoint (server > agent)
history
1.0 Initial version
"""
target = messaging.Target(version='1.0')
def __init__(self, driver):
self.driver = driver
def vpnservice_updated(self, context, **kwargs):
self.driver.sync(context, [])
class NeutronServerAPI(object):
"""
VPN service driver RPC endpoint (agent > server)
"""
def __init__(self, topic):
target = messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target)
def get_vpn_services_on_host(self, context, host):
# make RPC call to neutron server
cctxt = self.client.prepare()
data = cctxt.call(context, 'get_vpn_services_on_host', host=host)
vpn_services = list()
for svc in data:
try:
for conn in svc[_KEY_CONNECTIONS]:
vyatta_vpn_config.validate_svc_connection(conn)
except v_exc.InvalidVPNServiceError:
LOG.error(_LE('Invalid or incomplete VPN service data: '
'id={id}').format(id=svc.get('id')))
continue
vpn_services.append(svc)
# return transformed data to caller
return vpn_services
def update_status(self, context, status):
cctxt = self.client.prepare()
cctxt.cast(context, 'update_status', status=status)
class VyattaIPSecDriver(device_drivers.DeviceDriver):
"""
Vyatta VPN device driver
"""
rpc_endpoint_factory = _DriverRPCEndpoint
def __init__(self, vpn_service, host):
super(VyattaIPSecDriver, self).__init__(vpn_service, host)
self.vpn_service = vpn_service
self.host = host
# register RPC endpoint
conn = n_rpc.create_connection()
node_topic = '%s.%s' % (topics.BROCADE_IPSEC_AGENT_TOPIC,
self.host)
endpoints = [self.rpc_endpoint_factory(self)]
conn.create_consumer(node_topic, endpoints, fanout=False)
conn.consume_in_threads()
# initialize agent to server RPC link
self.server_api = NeutronServerAPI(
topics.BROCADE_IPSEC_DRIVER_TOPIC)
# initialize VPN service cache (to keep service state)
self._svc_cache = list()
self._router_resources_cache = dict()
# setup periodic task. All periodic task require fully configured
# device driver. It will be called asynchronously, and soon, so it
# should be last, when all configuration is done.
self._periodic_tasks = periodic = _VyattaPeriodicTasks(self)
loop = loopingcall.DynamicLoopingCall(periodic)
loop.start(initial_delay=5)
def sync(self, context, processes):
"""
Called by _DriverRPCEndpoint instance.
"""
svc_update = self.server_api.get_vpn_services_on_host(
context, self.host)
to_del, to_change, to_add = self._svc_diff(
self._svc_cache, svc_update)
for svc in to_del:
resources = self.get_router_resources(svc['router_id'])
self._svc_delete(svc, resources)
for old, new in to_change:
resources = self.get_router_resources(old['router_id'])
self._svc_delete(old, resources)
self._svc_add(new, resources)
for svc in to_add:
resources = self.get_router_resources(svc['router_id'])
self._svc_add(svc, resources)
self._svc_cache = svc_update
def create_router(self, router):
router_id = router.router_id
vrouter = self.vpn_service.get_router_client(router_id)
config_raw = vrouter.get_vrouter_configuration()
resources = self.get_router_resources(router_id)
with resources.make_patch() as patch:
vrouter_svc = vyatta_vpn_config.parse_vrouter_config(
vrouter_config.parse_config(config_raw), patch)
for svc in vrouter_svc:
svc['router_id'] = router_id
self._svc_cache.extend(vrouter_svc)
def destroy_router(self, router_id):
to_del = list()
for idx, svc in enumerate(self._svc_cache):
if svc['router_id'] != router_id:
continue
resources = self.get_router_resources(svc['router_id'])
self._svc_delete(svc, resources)
to_del.insert(0, idx)
for idx in to_del:
del self._svc_cache[idx]
def _svc_add(self, svc, resources):
vrouter = self.vpn_service.get_router_client(svc['router_id'])
for conn in svc[_KEY_CONNECTIONS]:
with resources.make_patch() as patch:
iface = self._get_router_gw_iface(vrouter, svc['router_id'])
batch = vyatta_vpn_config.connect_setup_commands(
vrouter, iface, svc, conn, patch)
vrouter.exec_cmd_batch(batch)
def _svc_delete(self, svc, resources):
vrouter = self.vpn_service.get_router_client(svc['router_id'])
for conn in svc[_KEY_CONNECTIONS]:
with resources.make_patch() as patch:
iface = self._get_router_gw_iface(vrouter, svc['router_id'])
batch = vyatta_vpn_config.connect_remove_commands(
vrouter, iface, svc, conn, patch)
vrouter.exec_cmd_batch(batch)
def _svc_diff(self, svc_old, svc_new):
state_key = 'admin_state_up'
old_idnr = set(x['id'] for x in svc_old)
new_idnr = set(x['id'] for x in svc_new if x[state_key])
to_del = old_idnr - new_idnr
to_add = new_idnr - old_idnr
possible_change = old_idnr & new_idnr
svc_old = dict((x['id'], x) for x in svc_old)
svc_new = dict((x['id'], x) for x in svc_new)
to_del = [svc_old[x] for x in to_del]
to_add = [svc_new[x] for x in to_add]
to_change = list()
for idnr in possible_change:
old = svc_old[idnr]
new = svc_new[idnr]
assert old['router_id'] == new['router_id']
vrouter = self.vpn_service.get_router_client(old['router_id'])
gw_iface = self._get_router_gw_iface(vrouter, old['router_id'])
if vyatta_vpn_config.compare_vpn_services(
vrouter, gw_iface, old, new):
continue
to_change.append((old, new))
return to_del, to_change, to_add
def get_active_services(self):
return tuple(self._svc_cache)
def get_router_resources(self, router_id):
try:
res = self._router_resources_cache[router_id]
except KeyError:
res = vyatta_vpn_config.RouterResources(router_id)
self._router_resources_cache[router_id] = res
return res
def update_status(self, ctx, stat):
LOG.debug('STAT: %s', pprint.pformat(stat))
self.server_api.update_status(ctx, stat)
def _get_router_gw_iface(self, vrouter, router_id):
router = self.vpn_service.get_router(router_id)
try:
gw_interface = vrouter.get_ethernet_if_id(
router['gw_port']['mac_address'])
except KeyError:
raise v_exc.InvalidL3AgentStateError(description=_(
'Router id={0} have no external gateway.').format(
router['id']))
return gw_interface
class _VyattaPeriodicTasks(periodic_task.PeriodicTasks):
def __init__(self, driver):
super(_VyattaPeriodicTasks, self).__init__(cfg.CONF)
self.driver = driver
def __call__(self):
ctx_admin = n_ctx.get_admin_context()
return self.run_periodic_tasks(ctx_admin)
@periodic_task.periodic_task(spacing=5)
def grab_vpn_status(self, ctx):
LOG.debug('VPN device driver periodic task: grab_vpn_status.')
svc_by_vrouter = collections.defaultdict(list)
for svc in self.driver.get_active_services():
svc_by_vrouter[svc['router_id']].append(svc)
status = list()
for router_id, svc_set in six.iteritems(svc_by_vrouter):
vrouter = self.driver.vpn_service.get_router_client(router_id)
resources = self.driver.get_router_resources(router_id)
try:
ipsec_sa = vrouter.get_vpn_ipsec_sa()
except v_exc.VRouterOperationError as e:
LOG.warning(_LW('Failed to fetch tunnel stats from router '
'{0}: {1}').format(router_id, unicode(e)))
continue
conn_ok = vyatta_vpn_config.parse_vpn_connections(
ipsec_sa, resources)
for svc in svc_set:
svc_ok = True
conn_stat = dict()
for conn in svc[_KEY_CONNECTIONS]:
ok = conn['id'] in conn_ok
svc_ok = svc_ok and ok
conn_stat[conn['id']] = {
'status': 'ACTIVE' if ok else 'DOWN',
'updated_pending_status': True
}
status.append({
'id': svc['id'],
'status': 'ACTIVE' if svc_ok else 'DOWN',
'updated_pending_status': True,
'ipsec_site_connections': conn_stat
})
self.driver.update_status(ctx, status)
neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/device_drivers/fedora_strongswan_ipsec.py 0000664 0005670 0005671 00000007610 12701407726 033621 0 ustar jenkins jenkins 0000000 0000000 # Copyright (c) 2015 IBM, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_config import cfg
from neutron_vpnaas.services.vpn.device_drivers import ipsec
from neutron_vpnaas.services.vpn.device_drivers import strongswan_ipsec
TEMPLATE_PATH = os.path.dirname(os.path.abspath(__file__))
cfg.CONF.set_default(name='default_config_area',
default=os.path.join(
TEMPLATE_PATH,
'/usr/share/strongswan/templates/'
'config/strongswan.d'),
group='strongswan')
class FedoraStrongSwanProcess(strongswan_ipsec.StrongSwanProcess):
binary = 'strongswan'
CONFIG_DIRS = [
'var/run',
'log',
'etc',
'etc/strongswan/ipsec.d/aacerts',
'etc/strongswan/ipsec.d/acerts',
'etc/strongswan/ipsec.d/cacerts',
'etc/strongswan/ipsec.d/certs',
'etc/strongswan/ipsec.d/crls',
'etc/strongswan/ipsec.d/ocspcerts',
'etc/strongswan/ipsec.d/policies',
'etc/strongswan/ipsec.d/private',
'etc/strongswan/ipsec.d/reqs',
'etc/pki/nssdb/'
]
STATUS_NOT_RUNNING_RE = ('Command:.*[ipsec|strongswan].*status.*'
'Exit code: [1|3] ')
def __init__(self, conf, process_id, vpnservice, namespace):
super(FedoraStrongSwanProcess, self).__init__(conf, process_id,
vpnservice, namespace)
def ensure_configs(self):
"""Generate config files which are needed for StrongSwan.
If there is no directory, this function will create
dirs.
"""
self.ensure_config_dir(self.vpnservice)
self.ensure_config_file(
'ipsec.conf',
cfg.CONF.strongswan.ipsec_config_template,
self.vpnservice)
self.ensure_config_file(
'strongswan.conf',
cfg.CONF.strongswan.strongswan_config_template,
self.vpnservice)
self.ensure_config_file(
'ipsec.secrets',
cfg.CONF.strongswan.ipsec_secret_template,
self.vpnservice,
0o600)
self.copy_and_overwrite(cfg.CONF.strongswan.default_config_area,
self._get_config_filename('strongswan.d'))
# Fedora uses /usr/share/strongswan/templates/config/ as strongswan
# template directory. But /usr/share/strongswan/templates/config/
# strongswan.d does not include charon. Those configuration files
# are in /usr/share/strongswan/templates/config/plugins directory.
charon_dir = os.path.join(
cfg.CONF.strongswan.default_config_area,
'charon')
if not os.path.exists(charon_dir):
plugins_dir = os.path.join(
cfg.CONF.strongswan.default_config_area, '../plugins')
self.copy_and_overwrite(
plugins_dir,
self._get_config_filename('strongswan.d/charon'))
def _get_config_filename(self, kind):
config_dir = '%s/strongswan' % self.etc_dir
return os.path.join(config_dir, kind)
class FedoraStrongSwanDriver(ipsec.IPsecDriver):
def create_process(self, process_id, vpnservice, namespace):
return FedoraStrongSwanProcess(
self.conf,
process_id,
vpnservice,
namespace)
neutron-vpnaas-8.0.0/neutron_vpnaas/services/__init__.py 0000664 0005670 0005671 00000000000 12701407726 024632 0 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/cmd/ 0000775 0005670 0005671 00000000000 12701410103 021432 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/cmd/__init__.py 0000664 0005670 0005671 00000000000 12701407726 023552 0 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/cmd/eventlet/ 0000775 0005670 0005671 00000000000 12701410103 023260 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/cmd/eventlet/vyatta_agent.py 0000664 0005670 0005671 00000001350 12701407726 026340 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2015 Brocade Communications System, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutron_vpnaas.services.vpn import vyatta_agent
def main():
vyatta_agent.main()
neutron-vpnaas-8.0.0/neutron_vpnaas/cmd/eventlet/__init__.py 0000664 0005670 0005671 00000001145 12701407726 025413 0 ustar jenkins jenkins 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
eventlet.monkey_patch()
neutron-vpnaas-8.0.0/neutron_vpnaas/cmd/eventlet/agent.py 0000664 0005670 0005671 00000001212 12701407726 024745 0 ustar jenkins jenkins 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_vpnaas.services.vpn import agent
def main():
agent.main()
neutron-vpnaas-8.0.0/neutron_vpnaas/_i18n.py 0000664 0005670 0005671 00000002522 12701407726 022201 0 ustar jenkins jenkins 0000000 0000000 # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo_i18n
DOMAIN = "neutron_vpnaas"
_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN)
# The primary translation function using the well-known name "_"
_ = _translators.primary
# The contextual translation function using the name "_C"
_C = _translators.contextual_form
# The plural translation function using the name "_P"
_P = _translators.plural_form
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
def get_available_languages():
return oslo_i18n.get_available_languages(DOMAIN)
neutron-vpnaas-8.0.0/neutron_vpnaas/extensions/ 0000775 0005670 0005671 00000000000 12701410103 023066 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/extensions/vpnaas.py 0000664 0005670 0005671 00000052214 12701407726 024755 0 ustar jenkins jenkins 0000000 0000000 # (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import resource_helper
from neutron.plugins.common import constants as nconstants
from neutron.services import service_base
from neutron_lib import exceptions as nexception
from neutron_vpnaas._i18n import _
class VPNServiceNotFound(nexception.NotFound):
message = _("VPNService %(vpnservice_id)s could not be found")
class IPsecSiteConnectionNotFound(nexception.NotFound):
message = _("ipsec_site_connection %(ipsec_site_conn_id)s not found")
class IPsecSiteConnectionDpdIntervalValueError(nexception.InvalidInput):
message = _("ipsec_site_connection %(attr)s is "
"equal to or less than dpd_interval")
class IPsecSiteConnectionMtuError(nexception.InvalidInput):
message = _("ipsec_site_connection MTU %(mtu)d is too small "
"for ipv%(version)s")
class IKEPolicyNotFound(nexception.NotFound):
message = _("IKEPolicy %(ikepolicy_id)s could not be found")
class IPsecPolicyNotFound(nexception.NotFound):
message = _("IPsecPolicy %(ipsecpolicy_id)s could not be found")
class IKEPolicyInUse(nexception.InUse):
message = _("IKEPolicy %(ikepolicy_id)s is in use by existing "
"IPsecSiteConnection and can't be updated or deleted")
class VPNServiceInUse(nexception.InUse):
message = _("VPNService %(vpnservice_id)s is still in use")
class SubnetInUseByVPNService(nexception.InUse):
message = _("Subnet %(subnet_id)s is used by VPNService %(vpnservice_id)s")
class SubnetInUseByEndpointGroup(nexception.InUse):
message = _("Subnet %(subnet_id)s is used by endpoint group %(group_id)s")
class VPNStateInvalidToUpdate(nexception.BadRequest):
message = _("Invalid state %(state)s of vpnaas resource %(id)s"
" for updating")
class IPsecPolicyInUse(nexception.InUse):
message = _("IPsecPolicy %(ipsecpolicy_id)s is in use by existing "
"IPsecSiteConnection and can't be updated or deleted")
class DeviceDriverImportError(nexception.NeutronException):
message = _("Can not load driver :%(device_driver)s")
class SubnetIsNotConnectedToRouter(nexception.BadRequest):
message = _("Subnet %(subnet_id)s is not "
"connected to Router %(router_id)s")
class RouterIsNotExternal(nexception.BadRequest):
message = _("Router %(router_id)s has no external network gateway set")
class VPNPeerAddressNotResolved(nexception.InvalidInput):
message = _("Peer address %(peer_address)s cannot be resolved")
class ExternalNetworkHasNoSubnet(nexception.BadRequest):
message = _("Router's %(router_id)s external network has "
"no %(ip_version)s subnet")
class VPNEndpointGroupNotFound(nexception.NotFound):
message = _("Endpoint group %(endpoint_group_id)s could not be found")
class InvalidEndpointInEndpointGroup(nexception.InvalidInput):
message = _("Endpoint '%(endpoint)s' is invalid for group "
"type '%(group_type)s': %(why)s")
class MissingEndpointForEndpointGroup(nexception.BadRequest):
message = _("No endpoints specified for endpoint group '%(group)s'")
class NonExistingSubnetInEndpointGroup(nexception.InvalidInput):
message = _("Subnet %(subnet)s in endpoint group does not exist")
class MixedIPVersionsForIPSecEndpoints(nexception.BadRequest):
message = _("Endpoints in group %(group)s do not have the same IP "
"version, as required for IPSec site-to-site connection")
class MixedIPVersionsForPeerCidrs(nexception.BadRequest):
message = _("Peer CIDRs do not have the same IP version, as required "
"for IPSec site-to-site connection")
class MixedIPVersionsForIPSecConnection(nexception.BadRequest):
message = _("IP versions are not compatible between peer and local "
"endpoints")
class InvalidEndpointGroup(nexception.BadRequest):
message = _("Endpoint group%(suffix)s %(which)s cannot be specified, "
"when VPN Service has subnet specified")
class WrongEndpointGroupType(nexception.BadRequest):
message = _("Endpoint group %(which)s type is '%(group_type)s' and "
"should be '%(expected)s'")
class PeerCidrsInvalid(nexception.BadRequest):
message = _("Peer CIDRs cannot be specified, when using endpoint "
"groups")
class MissingPeerCidrs(nexception.BadRequest):
message = _("Missing peer CIDRs for IPsec site-to-site connection")
class MissingRequiredEndpointGroup(nexception.BadRequest):
message = _("Missing endpoint group%(suffix)s %(which)s for IPSec "
"site-to-site connection")
class EndpointGroupInUse(nexception.BadRequest):
message = _("Endpoint group %(group_id)s is in use and cannot be deleted")
def _validate_subnet_list_or_none(data, key_specs=None):
if data is not None:
attr._validate_subnet_list(data, key_specs)
attr.validators['type:subnet_list_or_none'] = _validate_subnet_list_or_none
vpn_supported_initiators = ['bi-directional', 'response-only']
vpn_supported_encryption_algorithms = ['3des', 'aes-128',
'aes-192', 'aes-256']
vpn_dpd_supported_actions = [
'hold', 'clear', 'restart', 'restart-by-peer', 'disabled'
]
vpn_supported_transform_protocols = ['esp', 'ah', 'ah-esp']
vpn_supported_encapsulation_mode = ['tunnel', 'transport']
#TODO(nati) add kilobytes when we support it
vpn_supported_lifetime_units = ['seconds']
vpn_supported_pfs = ['group2', 'group5', 'group14']
vpn_supported_ike_versions = ['v1', 'v2']
vpn_supported_auth_mode = ['psk']
vpn_supported_auth_algorithms = ['sha1']
vpn_supported_phase1_negotiation_mode = ['main']
vpn_lifetime_limits = (60, attr.UNLIMITED)
positive_int = (0, attr.UNLIMITED)
RESOURCE_ATTRIBUTE_MAP = {
'vpnservices': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'subnet_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None},
'router_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': attr.convert_to_boolean,
'is_visible': True},
'external_v4_ip': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'external_v6_ip': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True}
},
'ipsec_site_connections': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'peer_address': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True},
'peer_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True},
'peer_cidrs': {'allow_post': True, 'allow_put': True,
'convert_to': attr.convert_to_list,
'validate': {'type:subnet_list_or_none': None},
'is_visible': True,
'default': None},
'local_ep_group_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None},
'peer_ep_group_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None},
'route_mode': {'allow_post': False, 'allow_put': False,
'default': 'static',
'is_visible': True},
'mtu': {'allow_post': True, 'allow_put': True,
'default': '1500',
'validate': {'type:range': positive_int},
'convert_to': attr.convert_to_int,
'is_visible': True},
'initiator': {'allow_post': True, 'allow_put': True,
'default': 'bi-directional',
'validate': {'type:values': vpn_supported_initiators},
'is_visible': True},
'auth_mode': {'allow_post': False, 'allow_put': False,
'default': 'psk',
'validate': {'type:values': vpn_supported_auth_mode},
'is_visible': True},
'psk': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True},
'dpd': {'allow_post': True, 'allow_put': True,
'convert_to': attr.convert_none_to_empty_dict,
'is_visible': True,
'default': {},
'validate': {
'type:dict_or_empty': {
'actions': {
'type:values': vpn_dpd_supported_actions,
},
'interval': {
'type:range': positive_int
},
'timeout': {
'type:range': positive_int
}}}},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': attr.convert_to_boolean,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'vpnservice_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'ikepolicy_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'ipsecpolicy_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True}
},
'ipsecpolicies': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'transform_protocol': {
'allow_post': True,
'allow_put': True,
'default': 'esp',
'validate': {
'type:values': vpn_supported_transform_protocols},
'is_visible': True},
'auth_algorithm': {
'allow_post': True,
'allow_put': True,
'default': 'sha1',
'validate': {
'type:values': vpn_supported_auth_algorithms
},
'is_visible': True},
'encryption_algorithm': {
'allow_post': True,
'allow_put': True,
'default': 'aes-128',
'validate': {
'type:values': vpn_supported_encryption_algorithms
},
'is_visible': True},
'encapsulation_mode': {
'allow_post': True,
'allow_put': True,
'default': 'tunnel',
'validate': {
'type:values': vpn_supported_encapsulation_mode
},
'is_visible': True},
'lifetime': {'allow_post': True, 'allow_put': True,
'convert_to': attr.convert_none_to_empty_dict,
'default': {},
'validate': {
'type:dict_or_empty': {
'units': {
'type:values': vpn_supported_lifetime_units,
},
'value': {
'type:range': vpn_lifetime_limits
}}},
'is_visible': True},
'pfs': {'allow_post': True, 'allow_put': True,
'default': 'group5',
'validate': {'type:values': vpn_supported_pfs},
'is_visible': True}
},
'ikepolicies': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'auth_algorithm': {'allow_post': True, 'allow_put': True,
'default': 'sha1',
'validate': {
'type:values': vpn_supported_auth_algorithms},
'is_visible': True},
'encryption_algorithm': {
'allow_post': True, 'allow_put': True,
'default': 'aes-128',
'validate': {'type:values': vpn_supported_encryption_algorithms},
'is_visible': True},
'phase1_negotiation_mode': {
'allow_post': True, 'allow_put': True,
'default': 'main',
'validate': {
'type:values': vpn_supported_phase1_negotiation_mode
},
'is_visible': True},
'lifetime': {'allow_post': True, 'allow_put': True,
'convert_to': attr.convert_none_to_empty_dict,
'default': {},
'validate': {
'type:dict_or_empty': {
'units': {
'type:values': vpn_supported_lifetime_units,
},
'value': {
'type:range': vpn_lifetime_limits,
}}},
'is_visible': True},
'ike_version': {'allow_post': True, 'allow_put': True,
'default': 'v1',
'validate': {
'type:values': vpn_supported_ike_versions},
'is_visible': True},
'pfs': {'allow_post': True, 'allow_put': True,
'default': 'group5',
'validate': {'type:values': vpn_supported_pfs},
'is_visible': True}
},
}
class Vpnaas(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "VPN service"
@classmethod
def get_alias(cls):
return "vpnaas"
@classmethod
def get_description(cls):
return "Extension for VPN service"
@classmethod
def get_namespace(cls):
return "https://wiki.openstack.org/Neutron/VPNaaS"
@classmethod
def get_updated(cls):
return "2013-05-29T10:00:00-00:00"
@classmethod
def get_resources(cls):
special_mappings = {'ikepolicies': 'ikepolicy',
'ipsecpolicies': 'ipsecpolicy'}
plural_mappings = resource_helper.build_plural_mappings(
special_mappings, RESOURCE_ATTRIBUTE_MAP)
plural_mappings['peer_cidrs'] = 'peer_cidr'
attr.PLURALS.update(plural_mappings)
return resource_helper.build_resource_info(plural_mappings,
RESOURCE_ATTRIBUTE_MAP,
nconstants.VPN,
register_quota=True,
translate_name=True)
@classmethod
def get_plugin_interface(cls):
return VPNPluginBase
def update_attributes_map(self, attributes):
super(Vpnaas, self).update_attributes_map(
attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
@six.add_metaclass(abc.ABCMeta)
class VPNPluginBase(service_base.ServicePluginBase):
def get_plugin_name(self):
return nconstants.VPN
def get_plugin_type(self):
return nconstants.VPN
def get_plugin_description(self):
return 'VPN service plugin'
@abc.abstractmethod
def get_vpnservices(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def get_vpnservice(self, context, vpnservice_id, fields=None):
pass
@abc.abstractmethod
def create_vpnservice(self, context, vpnservice):
pass
@abc.abstractmethod
def update_vpnservice(self, context, vpnservice_id, vpnservice):
pass
@abc.abstractmethod
def delete_vpnservice(self, context, vpnservice_id):
pass
@abc.abstractmethod
def get_ipsec_site_connections(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def get_ipsec_site_connection(self, context,
ipsecsite_conn_id, fields=None):
pass
@abc.abstractmethod
def create_ipsec_site_connection(self, context, ipsec_site_connection):
pass
@abc.abstractmethod
def update_ipsec_site_connection(self, context,
ipsecsite_conn_id, ipsec_site_connection):
pass
@abc.abstractmethod
def delete_ipsec_site_connection(self, context, ipsecsite_conn_id):
pass
@abc.abstractmethod
def get_ikepolicy(self, context, ikepolicy_id, fields=None):
pass
@abc.abstractmethod
def get_ikepolicies(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def create_ikepolicy(self, context, ikepolicy):
pass
@abc.abstractmethod
def update_ikepolicy(self, context, ikepolicy_id, ikepolicy):
pass
@abc.abstractmethod
def delete_ikepolicy(self, context, ikepolicy_id):
pass
@abc.abstractmethod
def get_ipsecpolicies(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def get_ipsecpolicy(self, context, ipsecpolicy_id, fields=None):
pass
@abc.abstractmethod
def create_ipsecpolicy(self, context, ipsecpolicy):
pass
@abc.abstractmethod
def update_ipsecpolicy(self, context, ipsecpolicy_id, ipsecpolicy):
pass
@abc.abstractmethod
def delete_ipsecpolicy(self, context, ipsecpolicy_id):
pass
neutron-vpnaas-8.0.0/neutron_vpnaas/extensions/vpn_endpoint_groups.py 0000664 0005670 0005671 00000010047 12701407726 027565 0 ustar jenkins jenkins 0000000 0000000 # (c) Copyright 2015 NEC Corporation, All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import resource_helper
from neutron.plugins.common import constants as nconstants
from neutron_vpnaas.services.vpn.common import constants
RESOURCE_ATTRIBUTE_MAP = {
'endpoint_groups': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': attr.TENANT_ID_MAX_LEN},
'required_by_policy': True,
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': attr.NAME_MAX_LEN},
'is_visible': True, 'default': ''},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': attr.DESCRIPTION_MAX_LEN},
'is_visible': True, 'default': ''},
'type': {'allow_post': True, 'allow_put': False,
'validate': {
'type:values': constants.VPN_SUPPORTED_ENDPOINT_TYPES,
},
'is_visible': True},
'endpoints': {'allow_post': True, 'allow_put': False,
'convert_to': attr.convert_to_list,
'is_visible': True},
},
}
class Vpn_endpoint_groups(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "VPN Endpoint Groups"
@classmethod
def get_alias(cls):
return "vpn-endpoint-groups"
@classmethod
def get_description(cls):
return "VPN endpoint groups support"
@classmethod
def get_updated(cls):
return "2015-08-04T10:00:00-00:00"
@classmethod
def get_resources(cls):
plural_mappings = resource_helper.build_plural_mappings(
{}, RESOURCE_ATTRIBUTE_MAP)
attr.PLURALS.update(plural_mappings)
return resource_helper.build_resource_info(plural_mappings,
RESOURCE_ATTRIBUTE_MAP,
nconstants.VPN,
register_quota=True,
translate_name=True)
def get_required_extensions(self):
return ["vpnaas"]
def update_attributes_map(self, attributes):
super(Vpn_endpoint_groups, self).update_attributes_map(
attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
@six.add_metaclass(abc.ABCMeta)
class VPNEndpointGroupsPluginBase(object):
@abc.abstractmethod
def create_endpoint_group(self, context, endpoint_group):
pass
@abc.abstractmethod
def update_endpoint_group(self, context, endpoint_group_id,
endpoint_group):
pass
@abc.abstractmethod
def delete_endpoint_group(self, context, endpoint_group_id):
pass
@abc.abstractmethod
def get_endpoint_group(self, context, endpoint_group_id, fields=None):
pass
@abc.abstractmethod
def get_endpoint_groups(self, context, filters=None, fields=None):
pass
neutron-vpnaas-8.0.0/neutron_vpnaas/extensions/__init__.py 0000664 0005670 0005671 00000000000 12701407726 025206 0 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/version.py 0000664 0005670 0005671 00000001265 12701407726 022753 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
version_info = pbr.version.VersionInfo('neutron-vpnaas')
neutron-vpnaas-8.0.0/neutron_vpnaas/db/ 0000775 0005670 0005671 00000000000 12701410103 021254 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/db/vpn/ 0000775 0005670 0005671 00000000000 12701410103 022057 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/db/vpn/__init__.py 0000664 0005670 0005671 00000000000 12701407726 024177 0 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/db/vpn/vpn_db.py 0000664 0005670 0005671 00000106755 12701407726 023740 0 ustar jenkins jenkins 0000000 0000000 # (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# (c) Copyright 2015 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.common import constants as n_constants
from neutron.db import common_db_mixin as base_db
from neutron.db import l3_agentschedulers_db as l3_agent_db
from neutron.db import models_v2
from neutron.extensions import l3 as l3_exception
from neutron import manager
from neutron.plugins.common import constants as p_constants
from neutron.plugins.common import utils
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import uuidutils
import sqlalchemy as sa
from sqlalchemy.orm import exc
from neutron_vpnaas._i18n import _LW
from neutron_vpnaas.db.vpn import vpn_models
from neutron_vpnaas.db.vpn import vpn_validator
from neutron_vpnaas.extensions import vpn_endpoint_groups
from neutron_vpnaas.extensions import vpnaas
from neutron_vpnaas.services.vpn.common import constants as v_constants
LOG = logging.getLogger(__name__)
class VPNPluginDb(vpnaas.VPNPluginBase,
vpn_endpoint_groups.VPNEndpointGroupsPluginBase,
base_db.CommonDbMixin):
"""VPN plugin database class using SQLAlchemy models."""
def _get_validator(self):
"""Obtain validator to use for attribute validation.
Subclasses may override this with a different validator, as needed.
Note: some UTs will directly create a VPNPluginDb object and then
call its methods, instead of creating a VPNDriverPlugin, which
will have a service driver associated that will provide a
validator object. As a result, we use the reference validator here.
"""
return vpn_validator.VpnReferenceValidator()
def update_status(self, context, model, v_id, status):
with context.session.begin(subtransactions=True):
v_db = self._get_resource(context, model, v_id)
v_db.update({'status': status})
def _get_resource(self, context, model, v_id):
try:
r = self._get_by_id(context, model, v_id)
except exc.NoResultFound:
with excutils.save_and_reraise_exception(reraise=False) as ctx:
if issubclass(model, vpn_models.IPsecSiteConnection):
raise vpnaas.IPsecSiteConnectionNotFound(
ipsec_site_conn_id=v_id
)
elif issubclass(model, vpn_models.IKEPolicy):
raise vpnaas.IKEPolicyNotFound(ikepolicy_id=v_id)
elif issubclass(model, vpn_models.IPsecPolicy):
raise vpnaas.IPsecPolicyNotFound(ipsecpolicy_id=v_id)
elif issubclass(model, vpn_models.VPNService):
raise vpnaas.VPNServiceNotFound(vpnservice_id=v_id)
elif issubclass(model, vpn_models.VPNEndpointGroup):
raise vpnaas.VPNEndpointGroupNotFound(
endpoint_group_id=v_id)
ctx.reraise = True
return r
def assert_update_allowed(self, obj):
status = getattr(obj, 'status', None)
_id = getattr(obj, 'id', None)
if utils.in_pending_status(status):
raise vpnaas.VPNStateInvalidToUpdate(id=_id, state=status)
def _make_ipsec_site_connection_dict(self, ipsec_site_conn, fields=None):
res = {'id': ipsec_site_conn['id'],
'tenant_id': ipsec_site_conn['tenant_id'],
'name': ipsec_site_conn['name'],
'description': ipsec_site_conn['description'],
'peer_address': ipsec_site_conn['peer_address'],
'peer_id': ipsec_site_conn['peer_id'],
'route_mode': ipsec_site_conn['route_mode'],
'mtu': ipsec_site_conn['mtu'],
'auth_mode': ipsec_site_conn['auth_mode'],
'psk': ipsec_site_conn['psk'],
'initiator': ipsec_site_conn['initiator'],
'dpd': {
'action': ipsec_site_conn['dpd_action'],
'interval': ipsec_site_conn['dpd_interval'],
'timeout': ipsec_site_conn['dpd_timeout']
},
'admin_state_up': ipsec_site_conn['admin_state_up'],
'status': ipsec_site_conn['status'],
'vpnservice_id': ipsec_site_conn['vpnservice_id'],
'ikepolicy_id': ipsec_site_conn['ikepolicy_id'],
'ipsecpolicy_id': ipsec_site_conn['ipsecpolicy_id'],
'peer_cidrs': [pcidr['cidr']
for pcidr in ipsec_site_conn['peer_cidrs']],
'local_ep_group_id': ipsec_site_conn['local_ep_group_id'],
'peer_ep_group_id': ipsec_site_conn['peer_ep_group_id'],
}
return self._fields(res, fields)
def get_endpoint_info(self, context, ipsec_sitecon):
"""Obtain all endpoint info, and store in connection for validation."""
ipsec_sitecon['local_epg_subnets'] = self.get_endpoint_group(
context, ipsec_sitecon['local_ep_group_id'])
ipsec_sitecon['peer_epg_cidrs'] = self.get_endpoint_group(
context, ipsec_sitecon['peer_ep_group_id'])
def validate_connection_info(self, context, validator, ipsec_sitecon,
vpnservice):
"""Collect info and validate connection.
If endpoint groups used (default), collect the group info and
do not specify the IP version (as it will come from endpoints).
Otherwise, get the IP version from the (legacy) subnet for
validation purposes.
NOTE: Once the deprecated subnet is removed, the caller can just
call get_endpoint_info() and validate_ipsec_site_connection().
"""
if ipsec_sitecon['local_ep_group_id']:
self.get_endpoint_info(context, ipsec_sitecon)
ip_version = None
else:
ip_version = vpnservice.subnet.ip_version
validator.validate_ipsec_site_connection(context, ipsec_sitecon,
ip_version, vpnservice)
def create_ipsec_site_connection(self, context, ipsec_site_connection):
ipsec_sitecon = ipsec_site_connection['ipsec_site_connection']
validator = self._get_validator()
validator.assign_sensible_ipsec_sitecon_defaults(ipsec_sitecon)
with context.session.begin(subtransactions=True):
#Check permissions
vpnservice_id = ipsec_sitecon['vpnservice_id']
self._get_resource(context, vpn_models.VPNService, vpnservice_id)
self._get_resource(context, vpn_models.IKEPolicy,
ipsec_sitecon['ikepolicy_id'])
self._get_resource(context, vpn_models.IPsecPolicy,
ipsec_sitecon['ipsecpolicy_id'])
vpnservice = self._get_vpnservice(context, vpnservice_id)
validator.validate_ipsec_conn_optional_args(ipsec_sitecon,
vpnservice.subnet)
self.validate_connection_info(context, validator, ipsec_sitecon,
vpnservice)
validator.resolve_peer_address(ipsec_sitecon, vpnservice.router)
ipsec_site_conn_db = vpn_models.IPsecSiteConnection(
id=uuidutils.generate_uuid(),
tenant_id=ipsec_sitecon['tenant_id'],
name=ipsec_sitecon['name'],
description=ipsec_sitecon['description'],
peer_address=ipsec_sitecon['peer_address'],
peer_id=ipsec_sitecon['peer_id'],
route_mode='static',
mtu=ipsec_sitecon['mtu'],
auth_mode='psk',
psk=ipsec_sitecon['psk'],
initiator=ipsec_sitecon['initiator'],
dpd_action=ipsec_sitecon['dpd_action'],
dpd_interval=ipsec_sitecon['dpd_interval'],
dpd_timeout=ipsec_sitecon['dpd_timeout'],
admin_state_up=ipsec_sitecon['admin_state_up'],
status=p_constants.PENDING_CREATE,
vpnservice_id=vpnservice_id,
ikepolicy_id=ipsec_sitecon['ikepolicy_id'],
ipsecpolicy_id=ipsec_sitecon['ipsecpolicy_id'],
local_ep_group_id=ipsec_sitecon['local_ep_group_id'],
peer_ep_group_id=ipsec_sitecon['peer_ep_group_id']
)
context.session.add(ipsec_site_conn_db)
for cidr in ipsec_sitecon['peer_cidrs']:
peer_cidr_db = vpn_models.IPsecPeerCidr(
cidr=cidr,
ipsec_site_connection_id=ipsec_site_conn_db['id']
)
context.session.add(peer_cidr_db)
return self._make_ipsec_site_connection_dict(ipsec_site_conn_db)
def update_ipsec_site_connection(
self, context,
ipsec_site_conn_id, ipsec_site_connection):
ipsec_sitecon = ipsec_site_connection['ipsec_site_connection']
changed_peer_cidrs = False
validator = self._get_validator()
with context.session.begin(subtransactions=True):
ipsec_site_conn_db = self._get_resource(
context, vpn_models.IPsecSiteConnection, ipsec_site_conn_id)
vpnservice_id = ipsec_site_conn_db['vpnservice_id']
vpnservice = self._get_vpnservice(context, vpnservice_id)
validator.assign_sensible_ipsec_sitecon_defaults(
ipsec_sitecon, ipsec_site_conn_db)
validator.validate_ipsec_conn_optional_args(ipsec_sitecon,
vpnservice.subnet)
self.validate_connection_info(context, validator, ipsec_sitecon,
vpnservice)
if 'peer_address' in ipsec_sitecon:
validator.resolve_peer_address(ipsec_sitecon,
vpnservice.router)
self.assert_update_allowed(ipsec_site_conn_db)
if "peer_cidrs" in ipsec_sitecon:
changed_peer_cidrs = True
old_peer_cidr_list = ipsec_site_conn_db['peer_cidrs']
old_peer_cidr_dict = dict(
(peer_cidr['cidr'], peer_cidr)
for peer_cidr in old_peer_cidr_list)
new_peer_cidr_set = set(ipsec_sitecon["peer_cidrs"])
old_peer_cidr_set = set(old_peer_cidr_dict)
new_peer_cidrs = list(new_peer_cidr_set)
for peer_cidr in old_peer_cidr_set - new_peer_cidr_set:
context.session.delete(old_peer_cidr_dict[peer_cidr])
for peer_cidr in new_peer_cidr_set - old_peer_cidr_set:
pcidr = vpn_models.IPsecPeerCidr(
cidr=peer_cidr,
ipsec_site_connection_id=ipsec_site_conn_id)
context.session.add(pcidr)
# Note: Unconditionally remove peer_cidrs, as they will be set to
# previous, if unchanged (to be able to validate above).
del ipsec_sitecon["peer_cidrs"]
if ipsec_sitecon:
ipsec_site_conn_db.update(ipsec_sitecon)
result = self._make_ipsec_site_connection_dict(ipsec_site_conn_db)
if changed_peer_cidrs:
result['peer_cidrs'] = new_peer_cidrs
return result
def delete_ipsec_site_connection(self, context, ipsec_site_conn_id):
with context.session.begin(subtransactions=True):
ipsec_site_conn_db = self._get_resource(
context, vpn_models.IPsecSiteConnection, ipsec_site_conn_id)
context.session.delete(ipsec_site_conn_db)
def _get_ipsec_site_connection(
self, context, ipsec_site_conn_id):
return self._get_resource(
context, vpn_models.IPsecSiteConnection, ipsec_site_conn_id)
def get_ipsec_site_connection(self, context,
ipsec_site_conn_id, fields=None):
ipsec_site_conn_db = self._get_ipsec_site_connection(
context, ipsec_site_conn_id)
return self._make_ipsec_site_connection_dict(
ipsec_site_conn_db, fields)
def get_ipsec_site_connections(self, context, filters=None, fields=None):
return self._get_collection(context, vpn_models.IPsecSiteConnection,
self._make_ipsec_site_connection_dict,
filters=filters, fields=fields)
def update_ipsec_site_conn_status(self, context, conn_id, new_status):
with context.session.begin():
self._update_connection_status(context, conn_id, new_status, True)
def _update_connection_status(self, context, conn_id, new_status,
updated_pending):
"""Update the connection status, if changed.
If the connection is not in a pending state, unconditionally update
the status. Likewise, if in a pending state, and have an indication
that the status has changed, then update the database.
"""
try:
conn_db = self._get_ipsec_site_connection(context, conn_id)
except vpnaas.IPsecSiteConnectionNotFound:
return
if not utils.in_pending_status(conn_db.status) or updated_pending:
conn_db.status = new_status
def _make_ikepolicy_dict(self, ikepolicy, fields=None):
res = {'id': ikepolicy['id'],
'tenant_id': ikepolicy['tenant_id'],
'name': ikepolicy['name'],
'description': ikepolicy['description'],
'auth_algorithm': ikepolicy['auth_algorithm'],
'encryption_algorithm': ikepolicy['encryption_algorithm'],
'phase1_negotiation_mode': ikepolicy['phase1_negotiation_mode'],
'lifetime': {
'units': ikepolicy['lifetime_units'],
'value': ikepolicy['lifetime_value'],
},
'ike_version': ikepolicy['ike_version'],
'pfs': ikepolicy['pfs']
}
return self._fields(res, fields)
def create_ikepolicy(self, context, ikepolicy):
ike = ikepolicy['ikepolicy']
lifetime_info = ike['lifetime']
lifetime_units = lifetime_info.get('units', 'seconds')
lifetime_value = lifetime_info.get('value', 3600)
with context.session.begin(subtransactions=True):
ike_db = vpn_models.IKEPolicy(
id=uuidutils.generate_uuid(),
tenant_id=ike['tenant_id'],
name=ike['name'],
description=ike['description'],
auth_algorithm=ike['auth_algorithm'],
encryption_algorithm=ike['encryption_algorithm'],
phase1_negotiation_mode=ike['phase1_negotiation_mode'],
lifetime_units=lifetime_units,
lifetime_value=lifetime_value,
ike_version=ike['ike_version'],
pfs=ike['pfs']
)
context.session.add(ike_db)
return self._make_ikepolicy_dict(ike_db)
def update_ikepolicy(self, context, ikepolicy_id, ikepolicy):
ike = ikepolicy['ikepolicy']
with context.session.begin(subtransactions=True):
if context.session.query(vpn_models.IPsecSiteConnection).filter_by(
ikepolicy_id=ikepolicy_id).first():
raise vpnaas.IKEPolicyInUse(ikepolicy_id=ikepolicy_id)
ike_db = self._get_resource(
context, vpn_models.IKEPolicy, ikepolicy_id)
if ike:
lifetime_info = ike.get('lifetime')
if lifetime_info:
if lifetime_info.get('units'):
ike['lifetime_units'] = lifetime_info['units']
if lifetime_info.get('value'):
ike['lifetime_value'] = lifetime_info['value']
ike_db.update(ike)
return self._make_ikepolicy_dict(ike_db)
def delete_ikepolicy(self, context, ikepolicy_id):
with context.session.begin(subtransactions=True):
if context.session.query(vpn_models.IPsecSiteConnection).filter_by(
ikepolicy_id=ikepolicy_id).first():
raise vpnaas.IKEPolicyInUse(ikepolicy_id=ikepolicy_id)
ike_db = self._get_resource(
context, vpn_models.IKEPolicy, ikepolicy_id)
context.session.delete(ike_db)
def get_ikepolicy(self, context, ikepolicy_id, fields=None):
ike_db = self._get_resource(
context, vpn_models.IKEPolicy, ikepolicy_id)
return self._make_ikepolicy_dict(ike_db, fields)
def get_ikepolicies(self, context, filters=None, fields=None):
return self._get_collection(context, vpn_models.IKEPolicy,
self._make_ikepolicy_dict,
filters=filters, fields=fields)
def _make_ipsecpolicy_dict(self, ipsecpolicy, fields=None):
res = {'id': ipsecpolicy['id'],
'tenant_id': ipsecpolicy['tenant_id'],
'name': ipsecpolicy['name'],
'description': ipsecpolicy['description'],
'transform_protocol': ipsecpolicy['transform_protocol'],
'auth_algorithm': ipsecpolicy['auth_algorithm'],
'encryption_algorithm': ipsecpolicy['encryption_algorithm'],
'encapsulation_mode': ipsecpolicy['encapsulation_mode'],
'lifetime': {
'units': ipsecpolicy['lifetime_units'],
'value': ipsecpolicy['lifetime_value'],
},
'pfs': ipsecpolicy['pfs']
}
return self._fields(res, fields)
def create_ipsecpolicy(self, context, ipsecpolicy):
ipsecp = ipsecpolicy['ipsecpolicy']
validator = self._get_validator()
lifetime_info = ipsecp['lifetime']
lifetime_units = lifetime_info.get('units', 'seconds')
lifetime_value = lifetime_info.get('value', 3600)
with context.session.begin(subtransactions=True):
validator.validate_ipsec_policy(context, ipsecp)
ipsecp_db = vpn_models.IPsecPolicy(
id=uuidutils.generate_uuid(),
tenant_id=ipsecp['tenant_id'],
name=ipsecp['name'],
description=ipsecp['description'],
transform_protocol=ipsecp['transform_protocol'],
auth_algorithm=ipsecp['auth_algorithm'],
encryption_algorithm=ipsecp['encryption_algorithm'],
encapsulation_mode=ipsecp['encapsulation_mode'],
lifetime_units=lifetime_units,
lifetime_value=lifetime_value,
pfs=ipsecp['pfs'])
context.session.add(ipsecp_db)
return self._make_ipsecpolicy_dict(ipsecp_db)
def update_ipsecpolicy(self, context, ipsecpolicy_id, ipsecpolicy):
ipsecp = ipsecpolicy['ipsecpolicy']
validator = self._get_validator()
with context.session.begin(subtransactions=True):
validator.validate_ipsec_policy(context, ipsecp)
if context.session.query(vpn_models.IPsecSiteConnection).filter_by(
ipsecpolicy_id=ipsecpolicy_id).first():
raise vpnaas.IPsecPolicyInUse(ipsecpolicy_id=ipsecpolicy_id)
ipsecp_db = self._get_resource(
context, vpn_models.IPsecPolicy, ipsecpolicy_id)
if ipsecp:
lifetime_info = ipsecp.get('lifetime')
if lifetime_info:
if lifetime_info.get('units'):
ipsecp['lifetime_units'] = lifetime_info['units']
if lifetime_info.get('value'):
ipsecp['lifetime_value'] = lifetime_info['value']
ipsecp_db.update(ipsecp)
return self._make_ipsecpolicy_dict(ipsecp_db)
def delete_ipsecpolicy(self, context, ipsecpolicy_id):
with context.session.begin(subtransactions=True):
if context.session.query(vpn_models.IPsecSiteConnection).filter_by(
ipsecpolicy_id=ipsecpolicy_id).first():
raise vpnaas.IPsecPolicyInUse(ipsecpolicy_id=ipsecpolicy_id)
ipsec_db = self._get_resource(
context, vpn_models.IPsecPolicy, ipsecpolicy_id)
context.session.delete(ipsec_db)
def get_ipsecpolicy(self, context, ipsecpolicy_id, fields=None):
ipsec_db = self._get_resource(
context, vpn_models.IPsecPolicy, ipsecpolicy_id)
return self._make_ipsecpolicy_dict(ipsec_db, fields)
def get_ipsecpolicies(self, context, filters=None, fields=None):
return self._get_collection(context, vpn_models.IPsecPolicy,
self._make_ipsecpolicy_dict,
filters=filters, fields=fields)
def _make_vpnservice_dict(self, vpnservice, fields=None):
res = {'id': vpnservice['id'],
'name': vpnservice['name'],
'description': vpnservice['description'],
'tenant_id': vpnservice['tenant_id'],
'subnet_id': vpnservice['subnet_id'],
'router_id': vpnservice['router_id'],
'admin_state_up': vpnservice['admin_state_up'],
'external_v4_ip': vpnservice['external_v4_ip'],
'external_v6_ip': vpnservice['external_v6_ip'],
'status': vpnservice['status']}
return self._fields(res, fields)
def create_vpnservice(self, context, vpnservice):
vpns = vpnservice['vpnservice']
validator = self._get_validator()
with context.session.begin(subtransactions=True):
validator.validate_vpnservice(context, vpns)
vpnservice_db = vpn_models.VPNService(
id=uuidutils.generate_uuid(),
tenant_id=vpns['tenant_id'],
name=vpns['name'],
description=vpns['description'],
subnet_id=vpns['subnet_id'],
router_id=vpns['router_id'],
admin_state_up=vpns['admin_state_up'],
status=p_constants.PENDING_CREATE)
context.session.add(vpnservice_db)
return self._make_vpnservice_dict(vpnservice_db)
def set_external_tunnel_ips(self, context, vpnservice_id, v4_ip=None,
v6_ip=None):
"""Update the external tunnel IP(s) for service."""
vpns = {'external_v4_ip': v4_ip, 'external_v6_ip': v6_ip}
with context.session.begin(subtransactions=True):
vpns_db = self._get_resource(context, vpn_models.VPNService,
vpnservice_id)
vpns_db.update(vpns)
return self._make_vpnservice_dict(vpns_db)
def update_vpnservice(self, context, vpnservice_id, vpnservice):
vpns = vpnservice['vpnservice']
with context.session.begin(subtransactions=True):
vpns_db = self._get_resource(context, vpn_models.VPNService,
vpnservice_id)
self.assert_update_allowed(vpns_db)
if vpns:
vpns_db.update(vpns)
return self._make_vpnservice_dict(vpns_db)
def delete_vpnservice(self, context, vpnservice_id):
with context.session.begin(subtransactions=True):
if context.session.query(vpn_models.IPsecSiteConnection).filter_by(
vpnservice_id=vpnservice_id
).first():
raise vpnaas.VPNServiceInUse(vpnservice_id=vpnservice_id)
vpns_db = self._get_resource(context, vpn_models.VPNService,
vpnservice_id)
context.session.delete(vpns_db)
def _get_vpnservice(self, context, vpnservice_id):
return self._get_resource(context, vpn_models.VPNService,
vpnservice_id)
def get_vpnservice(self, context, vpnservice_id, fields=None):
vpns_db = self._get_resource(context, vpn_models.VPNService,
vpnservice_id)
return self._make_vpnservice_dict(vpns_db, fields)
def get_vpnservices(self, context, filters=None, fields=None):
return self._get_collection(context, vpn_models.VPNService,
self._make_vpnservice_dict,
filters=filters, fields=fields)
def check_router_in_use(self, context, router_id):
vpnservices = self.get_vpnservices(
context, filters={'router_id': [router_id]})
if vpnservices:
plural = "s" if len(vpnservices) > 1 else ""
services = ",".join([v['id'] for v in vpnservices])
raise l3_exception.RouterInUse(
router_id=router_id,
reason="is currently used by VPN service%(plural)s "
"(%(services)s)" % {'plural': plural,
'services': services})
def check_subnet_in_use(self, context, subnet_id):
with context.session.begin(subtransactions=True):
vpnservices = context.session.query(
vpn_models.VPNService).filter_by(subnet_id=subnet_id).first()
if vpnservices:
raise vpnaas.SubnetInUseByVPNService(
subnet_id=subnet_id,
vpnservice_id=vpnservices['id'])
def check_subnet_in_use_by_endpoint_group(self, context, subnet_id):
with context.session.begin(subtransactions=True):
query = context.session.query(vpn_models.VPNEndpointGroup)
query = query.filter(vpn_models.VPNEndpointGroup.endpoint_type ==
v_constants.SUBNET_ENDPOINT)
query = query.join(
vpn_models.VPNEndpoint,
sa.and_(vpn_models.VPNEndpoint.endpoint_group_id ==
vpn_models.VPNEndpointGroup.id,
vpn_models.VPNEndpoint.endpoint == subnet_id))
group = query.first()
if group:
raise vpnaas.SubnetInUseByEndpointGroup(
subnet_id=subnet_id, group_id=group['id'])
def _make_endpoint_group_dict(self, endpoint_group, fields=None):
res = {'id': endpoint_group['id'],
'tenant_id': endpoint_group['tenant_id'],
'name': endpoint_group['name'],
'description': endpoint_group['description'],
'type': endpoint_group['endpoint_type'],
'endpoints': [ep['endpoint']
for ep in endpoint_group['endpoints']]}
return self._fields(res, fields)
def create_endpoint_group(self, context, endpoint_group):
group = endpoint_group['endpoint_group']
validator = self._get_validator()
with context.session.begin(subtransactions=True):
validator.validate_endpoint_group(context, group)
endpoint_group_db = vpn_models.VPNEndpointGroup(
id=uuidutils.generate_uuid(),
tenant_id=group['tenant_id'],
name=group['name'],
description=group['description'],
endpoint_type=group['type'])
context.session.add(endpoint_group_db)
for endpoint in group['endpoints']:
endpoint_db = vpn_models.VPNEndpoint(
endpoint=endpoint,
endpoint_group_id=endpoint_group_db['id']
)
context.session.add(endpoint_db)
return self._make_endpoint_group_dict(endpoint_group_db)
def update_endpoint_group(self, context, endpoint_group_id,
endpoint_group):
group_changes = endpoint_group['endpoint_group']
# Note: Endpoints cannot be changed, so will not do validation
with context.session.begin(subtransactions=True):
endpoint_group_db = self._get_resource(context,
vpn_models.VPNEndpointGroup,
endpoint_group_id)
endpoint_group_db.update(group_changes)
return self._make_endpoint_group_dict(endpoint_group_db)
def delete_endpoint_group(self, context, endpoint_group_id):
with context.session.begin(subtransactions=True):
self.check_endpoint_group_not_in_use(context, endpoint_group_id)
endpoint_group_db = self._get_resource(
context, vpn_models.VPNEndpointGroup, endpoint_group_id)
context.session.delete(endpoint_group_db)
def get_endpoint_group(self, context, endpoint_group_id, fields=None):
endpoint_group_db = self._get_resource(
context, vpn_models.VPNEndpointGroup, endpoint_group_id)
return self._make_endpoint_group_dict(endpoint_group_db, fields)
def get_endpoint_groups(self, context, filters=None, fields=None):
return self._get_collection(context, vpn_models.VPNEndpointGroup,
self._make_endpoint_group_dict,
filters=filters, fields=fields)
def check_endpoint_group_not_in_use(self, context, group_id):
query = context.session.query(vpn_models.IPsecSiteConnection)
query = query.filter(
sa.or_(
vpn_models.IPsecSiteConnection.local_ep_group_id == group_id,
vpn_models.IPsecSiteConnection.peer_ep_group_id == group_id)
)
if query.first():
raise vpnaas.EndpointGroupInUse(group_id=group_id)
class VPNPluginRpcDbMixin(object):
def _get_agent_hosting_vpn_services(self, context, host):
plugin = manager.NeutronManager.get_plugin()
agent = plugin._get_agent_by_type_and_host(
context, n_constants.AGENT_TYPE_L3, host)
agent_conf = plugin.get_configuration_dict(agent)
# Retrieve the agent_mode to check if this is the
# right agent to deploy the vpn service. In the
# case of distributed the vpn service should reside
# only on a dvr_snat node.
agent_mode = agent_conf.get('agent_mode', 'legacy')
if not agent.admin_state_up or agent_mode == 'dvr':
return []
query = context.session.query(vpn_models.VPNService)
query = query.join(vpn_models.IPsecSiteConnection)
query = query.join(l3_agent_db.RouterL3AgentBinding,
l3_agent_db.RouterL3AgentBinding.router_id ==
vpn_models.VPNService.router_id)
query = query.filter(
l3_agent_db.RouterL3AgentBinding.l3_agent_id == agent.id)
return query
def _build_local_subnet_cidr_map(self, context):
"""Build a dict of all local endpoint subnets, with list of CIDRs."""
query = context.session.query(models_v2.Subnet.id,
models_v2.Subnet.cidr)
query = query.join(vpn_models.VPNEndpoint,
vpn_models.VPNEndpoint.endpoint ==
models_v2.Subnet.id)
query = query.join(vpn_models.VPNEndpointGroup,
vpn_models.VPNEndpointGroup.id ==
vpn_models.VPNEndpoint.endpoint_group_id)
query = query.join(vpn_models.IPsecSiteConnection,
vpn_models.IPsecSiteConnection.local_ep_group_id ==
vpn_models.VPNEndpointGroup.id)
return {sn.id: sn.cidr for sn in query.all()}
def update_status_by_agent(self, context, service_status_info_list):
"""Updating vpnservice and vpnconnection status.
:param context: context variable
:param service_status_info_list: list of status
The structure is
[{id: vpnservice_id,
status: ACTIVE|DOWN|ERROR,
updated_pending_status: True|False
ipsec_site_connections: {
ipsec_site_connection_id: {
status: ACTIVE|DOWN|ERROR,
updated_pending_status: True|False
}
}]
The agent will set updated_pending_status as True,
when agent updates any pending status.
"""
with context.session.begin(subtransactions=True):
for vpnservice in service_status_info_list:
try:
vpnservice_db = self._get_vpnservice(
context, vpnservice['id'])
except vpnaas.VPNServiceNotFound:
LOG.warning(_LW('vpnservice %s in db is already deleted'),
vpnservice['id'])
continue
if (not utils.in_pending_status(vpnservice_db.status)
or vpnservice['updated_pending_status']):
vpnservice_db.status = vpnservice['status']
for conn_id, conn in vpnservice[
'ipsec_site_connections'].items():
self._update_connection_status(
context, conn_id, conn['status'],
conn['updated_pending_status'])
def vpn_callback(resource, event, trigger, **kwargs):
vpn_plugin = manager.NeutronManager.get_service_plugins().get(
p_constants.VPN)
if vpn_plugin:
context = kwargs.get('context')
if resource == resources.ROUTER_GATEWAY:
check_func = vpn_plugin.check_router_in_use
resource_id = kwargs.get('router_id')
elif resource == resources.ROUTER_INTERFACE:
check_func = vpn_plugin.check_subnet_in_use
resource_id = kwargs.get('subnet_id')
check_func(context, resource_id)
def migration_callback(resource, event, trigger, **kwargs):
context = kwargs['context']
router = kwargs['router']
vpn_plugin = manager.NeutronManager.get_service_plugins().get(
p_constants.VPN)
if vpn_plugin:
vpn_plugin.check_router_in_use(context, router['id'])
return True
def subnet_callback(resource, event, trigger, **kwargs):
"""Respond to subnet based notifications - see if subnet in use."""
context = kwargs['context']
subnet_id = kwargs['subnet_id']
vpn_plugin = manager.NeutronManager.get_service_plugins().get(
p_constants.VPN)
if vpn_plugin:
vpn_plugin.check_subnet_in_use_by_endpoint_group(context, subnet_id)
def subscribe():
registry.subscribe(
vpn_callback, resources.ROUTER_GATEWAY, events.BEFORE_DELETE)
registry.subscribe(
vpn_callback, resources.ROUTER_INTERFACE, events.BEFORE_DELETE)
registry.subscribe(
migration_callback, resources.ROUTER, events.BEFORE_UPDATE)
registry.subscribe(
subnet_callback, resources.SUBNET, events.BEFORE_DELETE)
# NOTE(armax): multiple VPN service plugins (potentially out of tree) may
# inherit from vpn_db and may need the callbacks to be processed. Having an
# implicit subscription (through the module import) preserves the existing
# behavior, and at the same time it avoids fixing it manually in each and
# every vpn plugin outta there. That said, The subscription is also made
# explicitly in the reference vpn plugin. The subscription operation is
# idempotent so there is no harm in registering the same callback multiple
# times.
subscribe()
neutron-vpnaas-8.0.0/neutron_vpnaas/db/vpn/vpn_validator.py 0000664 0005670 0005671 00000033476 12701407726 025337 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import socket
from neutron.api.v2 import attributes
from neutron.db import l3_db
from neutron.db import models_v2
from neutron import manager
from neutron.plugins.common import constants as nconstants
from neutron_lib import exceptions as nexception
from neutron_vpnaas._i18n import _
from neutron_vpnaas.extensions import vpnaas
from neutron_vpnaas.services.vpn.common import constants
class VpnReferenceValidator(object):
"""Baseline validation routines for VPN resources."""
IP_MIN_MTU = {4: 68, 6: 1280}
@property
def l3_plugin(self):
try:
return self._l3_plugin
except AttributeError:
self._l3_plugin = manager.NeutronManager.get_service_plugins().get(
nconstants.L3_ROUTER_NAT)
return self._l3_plugin
@property
def core_plugin(self):
try:
return self._core_plugin
except AttributeError:
self._core_plugin = manager.NeutronManager.get_plugin()
return self._core_plugin
def _check_dpd(self, ipsec_sitecon):
"""Ensure that DPD timeout is greater than DPD interval."""
if ipsec_sitecon['dpd_timeout'] <= ipsec_sitecon['dpd_interval']:
raise vpnaas.IPsecSiteConnectionDpdIntervalValueError(
attr='dpd_timeout')
def _check_mtu(self, context, mtu, ip_version):
if mtu < VpnReferenceValidator.IP_MIN_MTU[ip_version]:
raise vpnaas.IPsecSiteConnectionMtuError(mtu=mtu,
version=ip_version)
def _validate_peer_address(self, ip_version, router):
# NOTE: peer_address ip version should match with
# at least one external gateway address ip version.
# ipsec won't work with IPv6 LLA and neutron unaware GUA.
# So to support vpnaas with ipv6, external network must
# have ipv6 subnet
for fixed_ip in router.gw_port['fixed_ips']:
addr = fixed_ip['ip_address']
if ip_version == netaddr.IPAddress(addr).version:
return
raise vpnaas.ExternalNetworkHasNoSubnet(
router_id=router.id,
ip_version="IPv6" if ip_version == 6 else "IPv4")
def resolve_peer_address(self, ipsec_sitecon, router):
address = ipsec_sitecon['peer_address']
# check if address is an ip address or fqdn
invalid_ip_address = attributes._validate_ip_address(address)
if invalid_ip_address:
# resolve fqdn
try:
addrinfo = socket.getaddrinfo(address, None)[0]
ipsec_sitecon['peer_address'] = addrinfo[-1][0]
except socket.gaierror:
raise vpnaas.VPNPeerAddressNotResolved(peer_address=address)
ip_version = netaddr.IPAddress(ipsec_sitecon['peer_address']).version
self._validate_peer_address(ip_version, router)
def _get_local_subnets(self, context, endpoint_group):
if endpoint_group['type'] != constants.SUBNET_ENDPOINT:
raise vpnaas.WrongEndpointGroupType(
group_type=endpoint_group['type'], which=endpoint_group['id'],
expected=constants.SUBNET_ENDPOINT)
subnet_ids = endpoint_group['endpoints']
return context.session.query(models_v2.Subnet).filter(
models_v2.Subnet.id.in_(subnet_ids)).all()
def _get_peer_cidrs(self, endpoint_group):
if endpoint_group['type'] != constants.CIDR_ENDPOINT:
raise vpnaas.WrongEndpointGroupType(
group_type=endpoint_group['type'], which=endpoint_group['id'],
expected=constants.CIDR_ENDPOINT)
return endpoint_group['endpoints']
def _check_local_endpoint_ip_versions(self, group_id, local_subnets):
"""Ensure all subnets in endpoint group have the same IP version.
Will return the IP version, so it can be used for inter-group testing.
"""
if len(local_subnets) == 1:
return local_subnets[0]['ip_version']
ip_versions = set([subnet['ip_version'] for subnet in local_subnets])
if len(ip_versions) > 1:
raise vpnaas.MixedIPVersionsForIPSecEndpoints(group=group_id)
return ip_versions.pop()
def _check_peer_endpoint_ip_versions(self, group_id, peer_cidrs):
"""Ensure all CIDRs in endpoint group have the same IP version.
Will return the IP version, so it can be used for inter-group testing.
"""
if len(peer_cidrs) == 1:
return netaddr.IPNetwork(peer_cidrs[0]).version
ip_versions = set([netaddr.IPNetwork(pc).version for pc in peer_cidrs])
if len(ip_versions) > 1:
raise vpnaas.MixedIPVersionsForIPSecEndpoints(group=group_id)
return ip_versions.pop()
def _check_peer_cidrs_ip_versions(self, peer_cidrs):
"""Ensure all CIDRs have the same IP version."""
if len(peer_cidrs) == 1:
return netaddr.IPNetwork(peer_cidrs[0]).version
ip_versions = set([netaddr.IPNetwork(pc).version for pc in peer_cidrs])
if len(ip_versions) > 1:
raise vpnaas.MixedIPVersionsForPeerCidrs()
return ip_versions.pop()
def _check_local_subnets_on_router(self, context, router, local_subnets):
for subnet in local_subnets:
self._check_subnet_id(context, router, subnet['id'])
def _validate_compatible_ip_versions(self, local_ip_version,
peer_ip_version):
if local_ip_version != peer_ip_version:
raise vpnaas.MixedIPVersionsForIPSecConnection()
def validate_ipsec_conn_optional_args(self, ipsec_sitecon, subnet):
"""Ensure that proper combinations of optional args are used.
When VPN service has a subnet, then we must have peer_cidrs, and
cannot have any endpoint groups. If no subnet for the service, then
we must have both endpoint groups, and no peer_cidrs. Method will
form a string indicating which endpoints are incorrect, for any
exception raised.
"""
local_epg_id = ipsec_sitecon.get('local_ep_group_id')
peer_epg_id = ipsec_sitecon.get('peer_ep_group_id')
peer_cidrs = ipsec_sitecon.get('peer_cidrs')
if subnet:
if not peer_cidrs:
raise vpnaas.MissingPeerCidrs()
epgs = []
if local_epg_id:
epgs.append('local')
if peer_epg_id:
epgs.append('peer')
if epgs:
which = ' and '.join(epgs)
suffix = 's' if len(epgs) > 1 else ''
raise vpnaas.InvalidEndpointGroup(which=which, suffix=suffix)
else:
if peer_cidrs:
raise vpnaas.PeerCidrsInvalid()
epgs = []
if not local_epg_id:
epgs.append('local')
if not peer_epg_id:
epgs.append('peer')
if epgs:
which = ' and '.join(epgs)
suffix = 's' if len(epgs) > 1 else ''
raise vpnaas.MissingRequiredEndpointGroup(which=which,
suffix=suffix)
def assign_sensible_ipsec_sitecon_defaults(self, ipsec_sitecon,
prev_conn=None):
"""Provide defaults for optional items, if missing.
With endpoint groups capabilities, the peer_cidr (legacy mode)
and endpoint group IDs (new mode), are optional. For updating,
we need to provide the previous values for any missing values,
so that we can detect if the update request is attempting to
mix modes.
Flatten the nested DPD information, and set default values for
any missing information. For connection updates, the previous
values will be used as defaults for any missing items.
"""
if prev_conn:
ipsec_sitecon.setdefault(
'peer_cidrs', [pc['cidr'] for pc in prev_conn['peer_cidrs']])
ipsec_sitecon.setdefault('local_ep_group_id',
prev_conn['local_ep_group_id'])
ipsec_sitecon.setdefault('peer_ep_group_id',
prev_conn['peer_ep_group_id'])
else:
prev_conn = {'dpd_action': 'hold',
'dpd_interval': 30,
'dpd_timeout': 120}
dpd = ipsec_sitecon.get('dpd', {})
ipsec_sitecon['dpd_action'] = dpd.get('action',
prev_conn['dpd_action'])
ipsec_sitecon['dpd_interval'] = dpd.get('interval',
prev_conn['dpd_interval'])
ipsec_sitecon['dpd_timeout'] = dpd.get('timeout',
prev_conn['dpd_timeout'])
def validate_ipsec_site_connection(self, context, ipsec_sitecon,
local_ip_version, vpnservice=None):
"""Reference implementation of validation for IPSec connection.
This makes sure that IP versions are the same. For endpoint groups,
we use the local subnet(s) IP versions, and peer CIDR(s) IP versions.
For legacy mode, we use the (sole) subnet IP version, and the peer
CIDR(s). All IP versions must be the same.
This method also checks MTU (based on the local IP version), and
DPD settings.
"""
if not local_ip_version:
# Using endpoint groups
local_subnets = self._get_local_subnets(
context, ipsec_sitecon['local_epg_subnets'])
self._check_local_subnets_on_router(
context, vpnservice['router_id'], local_subnets)
local_ip_version = self._check_local_endpoint_ip_versions(
ipsec_sitecon['local_ep_group_id'], local_subnets)
peer_cidrs = self._get_peer_cidrs(ipsec_sitecon['peer_epg_cidrs'])
peer_ip_version = self._check_peer_endpoint_ip_versions(
ipsec_sitecon['peer_ep_group_id'], peer_cidrs)
else:
peer_ip_version = self._check_peer_cidrs_ip_versions(
ipsec_sitecon['peer_cidrs'])
self._validate_compatible_ip_versions(local_ip_version,
peer_ip_version)
self._check_dpd(ipsec_sitecon)
mtu = ipsec_sitecon.get('mtu')
if mtu:
self._check_mtu(context, mtu, local_ip_version)
def _check_router(self, context, router_id):
router = self.l3_plugin.get_router(context, router_id)
if not router.get(l3_db.EXTERNAL_GW_INFO):
raise vpnaas.RouterIsNotExternal(router_id=router_id)
def _check_subnet_id(self, context, router_id, subnet_id):
ports = self.core_plugin.get_ports(
context,
filters={
'fixed_ips': {'subnet_id': [subnet_id]},
'device_id': [router_id]})
if not ports:
raise vpnaas.SubnetIsNotConnectedToRouter(
subnet_id=subnet_id,
router_id=router_id)
def validate_vpnservice(self, context, vpnservice):
self._check_router(context, vpnservice['router_id'])
if vpnservice['subnet_id'] is not None:
self._check_subnet_id(context, vpnservice['router_id'],
vpnservice['subnet_id'])
def validate_ipsec_policy(self, context, ipsec_policy):
"""Reference implementation of validation for IPSec Policy.
Service driver can override and implement specific logic
for IPSec Policy validation.
"""
pass
def _validate_cidrs(self, cidrs):
"""Ensure valid IPv4/6 CIDRs."""
for cidr in cidrs:
msg = attributes._validate_subnet(cidr)
if msg:
raise vpnaas.InvalidEndpointInEndpointGroup(
group_type=constants.CIDR_ENDPOINT, endpoint=cidr,
why=_("Invalid CIDR"))
def _validate_subnets(self, context, subnet_ids):
"""Ensure UUIDs OK and subnets exist."""
for subnet_id in subnet_ids:
msg = attributes._validate_uuid(subnet_id)
if msg:
raise vpnaas.InvalidEndpointInEndpointGroup(
group_type=constants.SUBNET_ENDPOINT, endpoint=subnet_id,
why=_('Invalid UUID'))
try:
self.core_plugin.get_subnet(context, subnet_id)
except nexception.SubnetNotFound:
raise vpnaas.NonExistingSubnetInEndpointGroup(
subnet=subnet_id)
def validate_endpoint_group(self, context, endpoint_group):
"""Reference validator for endpoint group.
Ensures that there is at least one endpoint, all the endpoints in the
group are of the same type, and that the endpoints are "valid".
Note: Only called for create, as endpoints cannot be changed.
"""
endpoints = endpoint_group['endpoints']
if not endpoints:
raise vpnaas.MissingEndpointForEndpointGroup(group=endpoint_group)
group_type = endpoint_group['type']
if group_type == constants.CIDR_ENDPOINT:
self._validate_cidrs(endpoints)
elif group_type == constants.SUBNET_ENDPOINT:
self._validate_subnets(context, endpoints)
neutron-vpnaas-8.0.0/neutron_vpnaas/db/vpn/vpn_models.py 0000664 0005670 0005671 00000021353 12701407726 024624 0 ustar jenkins jenkins 0000000 0000000 # (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.api.v2 import attributes as attr
from neutron.db import l3_db
from neutron.db import model_base
from neutron.db import models_v2
import sqlalchemy as sa
from sqlalchemy import orm
from neutron_vpnaas.services.vpn.common import constants
class IPsecPeerCidr(model_base.BASEV2):
"""Internal representation of a IPsec Peer Cidrs."""
cidr = sa.Column(sa.String(32), nullable=False, primary_key=True)
ipsec_site_connection_id = sa.Column(
sa.String(36),
sa.ForeignKey('ipsec_site_connections.id',
ondelete="CASCADE"),
primary_key=True)
class IPsecPolicy(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 IPsecPolicy Object."""
__tablename__ = 'ipsecpolicies'
name = sa.Column(sa.String(attr.NAME_MAX_LEN))
description = sa.Column(sa.String(attr.DESCRIPTION_MAX_LEN))
transform_protocol = sa.Column(sa.Enum("esp", "ah", "ah-esp",
name="ipsec_transform_protocols"),
nullable=False)
auth_algorithm = sa.Column(sa.Enum("sha1",
name="vpn_auth_algorithms"),
nullable=False)
encryption_algorithm = sa.Column(sa.Enum("3des", "aes-128",
"aes-256", "aes-192",
name="vpn_encrypt_algorithms"),
nullable=False)
encapsulation_mode = sa.Column(sa.Enum("tunnel", "transport",
name="ipsec_encapsulations"),
nullable=False)
lifetime_units = sa.Column(sa.Enum("seconds", "kilobytes",
name="vpn_lifetime_units"),
nullable=False)
lifetime_value = sa.Column(sa.Integer, nullable=False)
pfs = sa.Column(sa.Enum("group2", "group5", "group14",
name="vpn_pfs"), nullable=False)
class IKEPolicy(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 IKEPolicy Object."""
__tablename__ = 'ikepolicies'
name = sa.Column(sa.String(attr.NAME_MAX_LEN))
description = sa.Column(sa.String(attr.DESCRIPTION_MAX_LEN))
auth_algorithm = sa.Column(sa.Enum("sha1",
name="vpn_auth_algorithms"),
nullable=False)
encryption_algorithm = sa.Column(sa.Enum("3des", "aes-128",
"aes-256", "aes-192",
name="vpn_encrypt_algorithms"),
nullable=False)
phase1_negotiation_mode = sa.Column(sa.Enum("main",
name="ike_phase1_mode"),
nullable=False)
lifetime_units = sa.Column(sa.Enum("seconds", "kilobytes",
name="vpn_lifetime_units"),
nullable=False)
lifetime_value = sa.Column(sa.Integer, nullable=False)
ike_version = sa.Column(sa.Enum("v1", "v2", name="ike_versions"),
nullable=False)
pfs = sa.Column(sa.Enum("group2", "group5", "group14",
name="vpn_pfs"), nullable=False)
class IPsecSiteConnection(model_base.BASEV2,
models_v2.HasId, models_v2.HasTenant):
"""Represents a IPsecSiteConnection Object."""
__tablename__ = 'ipsec_site_connections'
name = sa.Column(sa.String(attr.NAME_MAX_LEN))
description = sa.Column(sa.String(attr.DESCRIPTION_MAX_LEN))
peer_address = sa.Column(sa.String(255), nullable=False)
peer_id = sa.Column(sa.String(255), nullable=False)
route_mode = sa.Column(sa.String(8), nullable=False)
mtu = sa.Column(sa.Integer, nullable=False)
initiator = sa.Column(sa.Enum("bi-directional", "response-only",
name="vpn_initiators"), nullable=False)
auth_mode = sa.Column(sa.String(16), nullable=False)
psk = sa.Column(sa.String(255), nullable=False)
dpd_action = sa.Column(sa.Enum("hold", "clear",
"restart", "disabled",
"restart-by-peer", name="vpn_dpd_actions"),
nullable=False)
dpd_interval = sa.Column(sa.Integer, nullable=False)
dpd_timeout = sa.Column(sa.Integer, nullable=False)
status = sa.Column(sa.String(16), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
vpnservice_id = sa.Column(sa.String(36),
sa.ForeignKey('vpnservices.id'),
nullable=False)
ipsecpolicy_id = sa.Column(sa.String(36),
sa.ForeignKey('ipsecpolicies.id'),
nullable=False)
ikepolicy_id = sa.Column(sa.String(36),
sa.ForeignKey('ikepolicies.id'),
nullable=False)
ipsecpolicy = orm.relationship(
IPsecPolicy, backref='ipsec_site_connection')
ikepolicy = orm.relationship(IKEPolicy, backref='ipsec_site_connection')
peer_cidrs = orm.relationship(IPsecPeerCidr,
backref='ipsec_site_connection',
lazy='joined',
cascade='all, delete, delete-orphan')
local_ep_group_id = sa.Column(sa.String(36),
sa.ForeignKey('vpn_endpoint_groups.id'))
peer_ep_group_id = sa.Column(sa.String(36),
sa.ForeignKey('vpn_endpoint_groups.id'))
local_ep_group = orm.relationship("VPNEndpointGroup",
foreign_keys=local_ep_group_id)
peer_ep_group = orm.relationship("VPNEndpointGroup",
foreign_keys=peer_ep_group_id)
class VPNService(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 VPNService Object."""
name = sa.Column(sa.String(attr.NAME_MAX_LEN))
description = sa.Column(sa.String(attr.DESCRIPTION_MAX_LEN))
status = sa.Column(sa.String(16), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
external_v4_ip = sa.Column(sa.String(16))
external_v6_ip = sa.Column(sa.String(64))
subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id'))
router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id'),
nullable=False)
subnet = orm.relationship(models_v2.Subnet)
router = orm.relationship(l3_db.Router)
ipsec_site_connections = orm.relationship(
IPsecSiteConnection,
backref='vpnservice',
cascade="all, delete-orphan")
class VPNEndpoint(model_base.BASEV2):
"""Endpoints used in VPN connections.
All endpoints in a group must be of the same type. Note: the endpoint
is an 'opaque' field used to hold different endpoint types, and be
flexible enough to use for future types.
"""
__tablename__ = 'vpn_endpoints'
endpoint = sa.Column(sa.String(255), nullable=False, primary_key=True)
endpoint_group_id = sa.Column(sa.String(36),
sa.ForeignKey('vpn_endpoint_groups.id',
ondelete="CASCADE"),
primary_key=True)
class VPNEndpointGroup(model_base.BASEV2, models_v2.HasId,
models_v2.HasTenant):
"""Collection of endpoints of a specific type, for VPN connections."""
__tablename__ = 'vpn_endpoint_groups'
name = sa.Column(sa.String(attr.NAME_MAX_LEN))
description = sa.Column(sa.String(attr.DESCRIPTION_MAX_LEN))
endpoint_type = sa.Column(sa.Enum(*constants.VPN_SUPPORTED_ENDPOINT_TYPES,
name="vpn_endpoint_type"),
nullable=False)
endpoints = orm.relationship(VPNEndpoint,
backref='endpoint_group',
lazy='joined',
cascade='all, delete, delete-orphan')
neutron-vpnaas-8.0.0/neutron_vpnaas/db/__init__.py 0000664 0005670 0005671 00000000000 12701407726 023374 0 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/db/models/ 0000775 0005670 0005671 00000000000 12701410103 022537 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/db/models/__init__.py 0000664 0005670 0005671 00000000000 12701407726 024657 0 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/db/models/head.py 0000664 0005670 0005671 00000002105 12701407726 024031 0 ustar jenkins jenkins 0000000 0000000 # Copyright (c) 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The module provides all database models at current HEAD.
Its purpose is to create comparable metadata with current database schema.
Based on this comparison database can be healed with healing migration.
"""
from neutron.db.migration.models import head
from neutron_vpnaas.db.vpn import vpn_db # noqa
from neutron_vpnaas.services.vpn.service_drivers import cisco_csr_db # noqa
def get_metadata():
return head.model_base.BASEV2.metadata
neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/ 0000775 0005670 0005671 00000000000 12701410103 023245 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/__init__.py 0000664 0005670 0005671 00000000000 12701407726 025365 0 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/ 0000775 0005670 0005671 00000000000 12701410103 027075 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/README 0000664 0005670 0005671 00000000046 12701407726 027776 0 ustar jenkins jenkins 0000000 0000000 Generic single-database configuration. neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/env.py 0000664 0005670 0005671 00000004743 12701407726 030270 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from alembic import context
from logging import config as logging_config
from oslo_config import cfg
from oslo_db.sqlalchemy import session
import sqlalchemy as sa
from sqlalchemy import event
from neutron.db import model_base
from neutron_vpnaas.db.migration import alembic_migrations
MYSQL_ENGINE = None
config = context.config
neutron_config = config.neutron_config
logging_config.fileConfig(config.config_file_name)
target_metadata = model_base.BASEV2.metadata
def set_mysql_engine():
try:
mysql_engine = neutron_config.command.mysql_engine
except cfg.NoSuchOptError:
mysql_engine = None
global MYSQL_ENGINE
MYSQL_ENGINE = (mysql_engine or
model_base.BASEV2.__table_args__['mysql_engine'])
def run_migrations_offline():
set_mysql_engine()
kwargs = dict()
if neutron_config.database.connection:
kwargs['url'] = neutron_config.database.connection
else:
kwargs['dialect_name'] = neutron_config.database.engine
kwargs['version_table'] = alembic_migrations.VPNAAS_VERSION_TABLE
context.configure(**kwargs)
with context.begin_transaction():
context.run_migrations()
@event.listens_for(sa.Table, 'after_parent_attach')
def set_storage_engine(target, parent):
if MYSQL_ENGINE:
target.kwargs['mysql_engine'] = MYSQL_ENGINE
def run_migrations_online():
set_mysql_engine()
engine = session.create_engine(neutron_config.database.connection)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata,
version_table=alembic_migrations.VPNAAS_VERSION_TABLE
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
engine.dispose()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/versions/ 0000775 0005670 0005671 00000000000 12701410103 030745 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/versions/kilo_release.py 0000664 0005670 0005671 00000001514 12701407726 033777 0 ustar jenkins jenkins 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""kilo
Revision ID: kilo
Revises: 3ea02b2a773e
Create Date: 2015-04-16 00:00:00.000000
"""
# revision identifiers, used by Alembic.
revision = 'kilo'
down_revision = '3ea02b2a773e'
def upgrade():
"""A no-op migration for marking the Kilo release."""
pass
neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/versions/mitaka/ 0000775 0005670 0005671 00000000000 12701410103 032213 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/versions/mitaka/contract/ 0000775 0005670 0005671 00000000000 12701410103 034030 5 ustar jenkins jenkins 0000000 0000000 ././@LongLink 0000000 0000000 0000000 00000000204 00000000000 011211 L ustar 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/versions/mitaka/contract/2cb4ee992b41_multiple_local_subnets.py neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/versions/mitaka/contract/2cb4ee90000664 0005670 0005671 00000015623 12701407726 035140 0 ustar jenkins jenkins 0000000 0000000 # (c) Copyright 2015 Cisco Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Multiple local subnets
Revision ID: 2cb4ee992b41
Revises: 2c82e782d734
Create Date: 2015-09-09 20:32:54.254267
"""
from alembic import op
from oslo_utils import uuidutils
import sqlalchemy as sa
from sqlalchemy.sql import expression as sa_expr
from neutron.api.v2 import attributes as attr
from neutron.db import migration
from neutron_vpnaas.services.vpn.common import constants as v_constants
# revision identifiers, used by Alembic.
revision = '2cb4ee992b41'
down_revision = '2c82e782d734'
depends_on = ('28ee739a7e4b',)
# milestone identifier, used by neutron-db-manage
neutron_milestone = [migration.MITAKA]
vpnservices = sa.Table(
'vpnservices', sa.MetaData(),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('tenant_id', sa.String(length=255), nullable=False),
sa.Column('name', sa.String(attr.NAME_MAX_LEN)),
sa.Column('description', sa.String(attr.DESCRIPTION_MAX_LEN)),
sa.Column('status', sa.String(16), nullable=False),
sa.Column('admin_state_up', sa.Boolean(), nullable=False),
sa.Column('external_v4_ip', sa.String(16)),
sa.Column('external_v6_ip', sa.String(64)),
sa.Column('subnet_id', sa.String(36)),
sa.Column('router_id', sa.String(36), nullable=False))
ipsec_site_conns = sa.Table(
'ipsec_site_connections', sa.MetaData(),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('tenant_id', sa.String(length=255), nullable=False),
sa.Column('name', sa.String(attr.NAME_MAX_LEN)),
sa.Column('description', sa.String(attr.DESCRIPTION_MAX_LEN)),
sa.Column('peer_address', sa.String(255), nullable=False),
sa.Column('peer_id', sa.String(255), nullable=False),
sa.Column('route_mode', sa.String(8), nullable=False),
sa.Column('mtu', sa.Integer, nullable=False),
sa.Column('initiator', sa.Enum("bi-directional", "response-only",
name="vpn_initiators"), nullable=False),
sa.Column('auth_mode', sa.String(16), nullable=False),
sa.Column('psk', sa.String(255), nullable=False),
sa.Column('dpd_action', sa.Enum("hold", "clear", "restart", "disabled",
"restart-by-peer", name="vpn_dpd_actions"),
nullable=False),
sa.Column('dpd_interval', sa.Integer, nullable=False),
sa.Column('dpd_timeout', sa.Integer, nullable=False),
sa.Column('status', sa.String(16), nullable=False),
sa.Column('admin_state_up', sa.Boolean(), nullable=False),
sa.Column('vpnservice_id', sa.String(36), nullable=False),
sa.Column('ipsecpolicy_id', sa.String(36), nullable=False),
sa.Column('ikepolicy_id', sa.String(36), nullable=False),
sa.Column('local_ep_group_id', sa.String(36)),
sa.Column('peer_ep_group_id', sa.String(36)))
ipsecpeercidrs = sa.Table(
'ipsecpeercidrs', sa.MetaData(),
sa.Column('cidr', sa.String(32), nullable=False, primary_key=True),
sa.Column('ipsec_site_connection_id', sa.String(36), primary_key=True))
def _make_endpoint_groups(new_groups, new_endpoints):
"""Create endpoint groups and their corresponding endpoints."""
md = sa.MetaData()
engine = op.get_bind()
sa.Table('vpn_endpoint_groups', md, autoload=True, autoload_with=engine)
op.bulk_insert(md.tables['vpn_endpoint_groups'], new_groups)
sa.Table('vpn_endpoints', md, autoload=True, autoload_with=engine)
op.bulk_insert(md.tables['vpn_endpoints'], new_endpoints)
def _update_connections(connection_map):
"""Store the endpoint group IDs in the connections."""
for conn_id, mapping in connection_map.items():
stmt = ipsec_site_conns.update().where(
ipsec_site_conns.c.id == conn_id).values(
local_ep_group_id=mapping['local'],
peer_ep_group_id=mapping['peer'])
op.execute(stmt)
def upgrade():
new_groups = []
new_endpoints = []
service_map = {}
session = sa.orm.Session(bind=op.get_bind())
vpn_services = session.query(vpnservices).filter(
vpnservices.c.subnet_id is not None).all()
for vpn_service in vpn_services:
subnet_id = vpn_service.subnet_id
if subnet_id is None:
continue # Skip new service entries
# Define the subnet group
group_id = uuidutils.generate_uuid()
group = {'id': group_id,
'name': '',
'description': '',
'tenant_id': vpn_service.tenant_id,
'endpoint_type': v_constants.SUBNET_ENDPOINT}
new_groups.append(group)
# Define the (sole) endpoint
endpoint = {'endpoint_group_id': group_id,
'endpoint': subnet_id}
new_endpoints.append(endpoint)
# Save info to use for connections
service_map[vpn_service.id] = group_id
connection_map = {}
ipsec_conns = session.query(ipsec_site_conns).all()
for connection in ipsec_conns:
peer_cidrs = session.query(ipsecpeercidrs.c.cidr).filter(
ipsecpeercidrs.c.ipsec_site_connection_id == connection.id).all()
if not peer_cidrs:
continue # Skip new style connections
# Define the CIDR group
group_id = uuidutils.generate_uuid()
group = {'id': group_id,
'name': '',
'description': '',
'tenant_id': connection.tenant_id,
'endpoint_type': v_constants.CIDR_ENDPOINT}
new_groups.append(group)
# Define the endpoint(s)
for peer_cidr in peer_cidrs:
endpoint = {'endpoint_group_id': group_id,
'endpoint': peer_cidr[0]}
new_endpoints.append(endpoint)
# Save the endpoint group ID info for the connection
vpn_service = connection.vpnservice_id
connection_map[connection.id] = {'local': service_map[vpn_service],
'peer': group_id}
# Create all the defined endpoint groups and their endpoints
_make_endpoint_groups(new_groups, new_endpoints)
# Refer to new groups, in the IPSec connections
_update_connections(connection_map)
# Remove the peer_cidrs from IPSec connections
op.execute(sa_expr.table('ipsecpeercidrs').delete())
# Remove the subnets from VPN services
stmt = vpnservices.update().where(
vpnservices.c.subnet_id is not None).values(
subnet_id=None)
op.execute(stmt)
session.commit()
neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/versions/mitaka/expand/ 0000775 0005670 0005671 00000000000 12701410103 033472 5 ustar jenkins jenkins 0000000 0000000 ././@LongLink 0000000 0000000 0000000 00000000202 00000000000 011207 L ustar 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/versions/mitaka/expand/41b509d10b5e_vpnaas_endpoint_groups.py neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/versions/mitaka/expand/41b509d100000664 0005670 0005671 00000004217 12701407726 034573 0 ustar jenkins jenkins 0000000 0000000 # (c) Copyright 2015 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""VPNaaS endpoint groups
Revision ID: 41b509d10b5e
Revises: 24f28869838b
Create Date: 2015-08-06 18:21:03.241664
"""
# revision identifiers, used by Alembic.
revision = '41b509d10b5e'
down_revision = '24f28869838b'
from alembic import op
import sqlalchemy as sa
from neutron.api.v2 import attributes as attr
from neutron_vpnaas.services.vpn.common import constants
def upgrade():
op.create_table(
'vpn_endpoint_groups',
sa.Column('id', sa.String(length=36), nullable=False,
primary_key=True),
sa.Column('tenant_id', sa.String(length=attr.TENANT_ID_MAX_LEN),
index=True),
sa.Column('name', sa.String(length=attr.NAME_MAX_LEN)),
sa.Column('description', sa.String(length=attr.DESCRIPTION_MAX_LEN)),
sa.Column('endpoint_type',
sa.Enum(constants.SUBNET_ENDPOINT, constants.CIDR_ENDPOINT,
constants.VLAN_ENDPOINT, constants.NETWORK_ENDPOINT,
constants.ROUTER_ENDPOINT,
name='endpoint_type'),
nullable=False),
)
op.create_table(
'vpn_endpoints',
sa.Column('endpoint', sa.String(length=255), nullable=False),
sa.Column('endpoint_group_id', sa.String(36), nullable=False),
sa.ForeignKeyConstraint(['endpoint_group_id'],
['vpn_endpoint_groups.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('endpoint', 'endpoint_group_id'),
)
././@LongLink 0000000 0000000 0000000 00000000202 00000000000 011207 L ustar 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/versions/mitaka/expand/28ee739a7e4b_multiple_local_subnets.py neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/versions/mitaka/expand/28ee739a70000664 0005670 0005671 00000004046 12701407726 034700 0 ustar jenkins jenkins 0000000 0000000 # (c) Copyright 2015 Cisco Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Multiple local subnets
Revision ID: 28ee739a7e4b
Revises: 41b509d10b5e
Create Date: 2015-09-09 20:32:54.231765
"""
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
# revision identifiers, used by Alembic.
revision = '28ee739a7e4b'
down_revision = '41b509d10b5e'
# milestone identifier, used by neutron-db-manage
neutron_milestone = [migration.MITAKA]
def upgrade():
op.add_column('ipsec_site_connections',
sa.Column('local_ep_group_id',
sa.String(length=36),
nullable=True))
op.add_column('ipsec_site_connections',
sa.Column('peer_ep_group_id',
sa.String(length=36),
nullable=True))
op.create_foreign_key(constraint_name=None,
source_table='ipsec_site_connections',
referent_table='vpn_endpoint_groups',
local_cols=['local_ep_group_id'],
remote_cols=['id'])
op.create_foreign_key(constraint_name=None,
source_table='ipsec_site_connections',
referent_table='vpn_endpoint_groups',
local_cols=['peer_ep_group_id'],
remote_cols=['id'])
op.alter_column('vpnservices', 'subnet_id',
existing_type=sa.String(length=36), nullable=True)
neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/versions/CONTRACT_HEAD 0000664 0005670 0005671 00000000015 12701407726 032703 0 ustar jenkins jenkins 0000000 0000000 2cb4ee992b41
neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/versions/EXPAND_HEAD 0000664 0005670 0005671 00000000015 12701407726 032445 0 ustar jenkins jenkins 0000000 0000000 28ee739a7e4b
neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/versions/liberty/ 0000775 0005670 0005671 00000000000 12701410103 032417 5 ustar jenkins jenkins 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/versions/liberty/contract/ 0000775 0005670 0005671 00000000000 12701410103 034234 5 ustar jenkins jenkins 0000000 0000000 ././@LongLink 0000000 0000000 0000000 00000000226 00000000000 011215 L ustar 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/versions/liberty/contract/2c82e782d734_drop_tenant_id_in_cisco_csr_identifier_.py neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/versions/liberty/contract/2c82e70000664 0005670 0005671 00000002062 12701407726 035112 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""drop_tenant_id_in_cisco_csr_identifier_map
Revision ID: 2c82e782d734
Revises: 333dfd6afaa2
Create Date: 2015-08-20 15:17:09.897944
"""
from alembic import op
from neutron.db import migration
# revision identifiers, used by Alembic.
revision = '2c82e782d734'
down_revision = '333dfd6afaa2'
# milestone identifier, used by neutron-db-manage
neutron_milestone = [migration.LIBERTY]
def upgrade():
op.drop_column('cisco_csr_identifier_map', 'tenant_id')
././@LongLink 0000000 0000000 0000000 00000000204 00000000000 011211 L ustar 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/versions/liberty/contract/56893333aa52_fix_identifier_map_fk.py neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/versions/liberty/contract/5689330000664 0005670 0005671 00000003771 12701407726 034771 0 ustar jenkins jenkins 0000000 0000000 # Copyright(c) 2015, Oracle and/or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""fix identifier map fk
Revision ID: 56893333aa52
Revises: kilo
Create Date: 2015-06-11 12:09:01.263253
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import column
from sqlalchemy.sql import expression as expr
from sqlalchemy.sql import func
from sqlalchemy.sql import table
from neutron.db import migration
from neutron.db.migration import cli
# revision identifiers, used by Alembic.
revision = '56893333aa52'
down_revision = 'kilo'
branch_labels = (cli.CONTRACT_BRANCH,)
def upgrade():
# re-size existing data if necessary
identifier_map = table('cisco_csr_identifier_map',
column('ipsec_site_conn_id', sa.String(36)))
ipsec_site_conn_id = identifier_map.columns['ipsec_site_conn_id']
op.execute(identifier_map.update(values={
ipsec_site_conn_id: expr.case([(func.length(ipsec_site_conn_id) > 36,
func.substr(ipsec_site_conn_id, 1, 36))],
else_=ipsec_site_conn_id)}))
# Need to drop foreign key constraint before mysql will allow changes
with migration.remove_fks_from_table('cisco_csr_identifier_map'):
op.alter_column(table_name='cisco_csr_identifier_map',
column_name='ipsec_site_conn_id',
type_=sa.String(36),
existing_nullable=True)
././@LongLink 0000000 0000000 0000000 00000000220 00000000000 011207 L ustar 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/versions/liberty/contract/333dfd6afaa2_populate_vpn_service_table_fields.py neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/versions/liberty/contract/333dfd0000664 0005670 0005671 00000006243 12701407726 035173 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Populate VPN service table fields
Revision ID: 333dfd6afaa2
Revises: 56893333aa52
Create Date: 2015-07-27 16:43:59.123456
"""
# revision identifiers, used by Alembic.
revision = '333dfd6afaa2'
down_revision = '56893333aa52'
depends_on = '24f28869838b'
from alembic import op
import netaddr
import sqlalchemy as sa
VPNService = sa.Table('vpnservices', sa.MetaData(),
sa.Column('router_id', sa.String(36), nullable=False),
sa.Column('external_v4_ip', sa.String(16)),
sa.Column('external_v6_ip', sa.String(64)),
sa.Column('id', sa.String(36), nullable=False,
primary_key=True))
Router = sa.Table('routers', sa.MetaData(),
sa.Column('gw_port_id', sa.String(36)),
sa.Column('id', sa.String(36), nullable=False,
primary_key=True))
Port = sa.Table('ports', sa.MetaData(),
sa.Column('id', sa.String(36), nullable=False,
primary_key=True))
IPAllocation = sa.Table('ipallocations', sa.MetaData(),
sa.Column('ip_address', sa.String(64),
nullable=False, primary_key=True),
sa.Column('port_id', sa.String(36)))
def _migrate_external_ips(engine):
"""Use router external IPs to populate external_v*_ip entries.
For each service, look through the associated router's
gw_port['fixed_ips'] list and store any IPv4 and/or IPv6
addresses into the new fields. If there are multiple
addresses for an IP version, then only the first one will
be stored (the same as the reference driver does).
"""
session = sa.orm.Session(bind=engine.connect())
services = session.query(VPNService).all()
for service in services:
addresses = session.query(IPAllocation.c.ip_address).filter(
service.router_id == Router.c.id,
Router.c.gw_port_id == Port.c.id,
Port.c.id == IPAllocation.c.port_id).all()
have_version = []
for address in addresses:
version = netaddr.IPAddress(address[0]).version
if version in have_version:
continue
have_version.append(version)
update = {'external_v%s_ip' % version: address[0]}
op.execute(VPNService.update().where(
VPNService.c.id == service.id).values(update))
session.commit()
def upgrade():
# Use the router to populate the fields
for_engine = op.get_bind()
_migrate_external_ips(for_engine)
neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/versions/liberty/expand/ 0000775 0005670 0005671 00000000000 12701410103 033676 5 ustar jenkins jenkins 0000000 0000000 ././@LongLink 0000000 0000000 0000000 00000000214 00000000000 011212 L ustar 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/versions/liberty/expand/24f28869838b_add_fields_to_vpn_service_table.py neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/versions/liberty/expand/24f288690000664 0005670 0005671 00000002346 12701407726 034663 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Add fields to VPN service table
Revision ID: 24f28869838b
Revises: 30018084ed99
Create Date: 2015-07-06 14:52:24.339246
"""
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
# revision identifiers, used by Alembic.
revision = '24f28869838b'
down_revision = '30018084ed99'
# milestone identifier, used by neutron-db-manage
neutron_milestone = [migration.LIBERTY]
def upgrade():
op.add_column('vpnservices',
sa.Column('external_v4_ip', sa.String(16), nullable=True))
op.add_column('vpnservices',
sa.Column('external_v6_ip', sa.String(64), nullable=True))
././@LongLink 0000000 0000000 0000000 00000000164 00000000000 011216 L ustar 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/versions/liberty/expand/30018084ed99_initial.py neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/versions/liberty/expand/300180840000664 0005670 0005671 00000001574 12701407726 034560 0 ustar jenkins jenkins 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Initial no-op Liberty expand rule.
Revision ID: 30018084ed99
Revises: kilo
Create Date: 2015-07-16 00:00:00.000000
"""
from neutron.db.migration import cli
# revision identifiers, used by Alembic.
revision = '30018084ed99'
down_revision = 'kilo'
branch_labels = (cli.EXPAND_BRANCH,)
def upgrade():
pass
neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/versions/start_neutron_vpnaas.py0000664 0005670 0005671 00000001536 12701407726 035624 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""start neutron-vpnaas chain
Revision ID: start_neutron_vpnaas
Revises: None
Create Date: 2014-12-09 18:50:01.946832
"""
# revision identifiers, used by Alembic.
revision = 'start_neutron_vpnaas'
down_revision = None
def upgrade():
pass
././@LongLink 0000000 0000000 0000000 00000000161 00000000000 011213 L ustar 0000000 0000000 neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/versions/3ea02b2a773e_add_index_tenant_id.py neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/versions/3ea02b2a773e_add_index_0000664 0005670 0005671 00000002144 12701407726 034655 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""add_index_tenant_id
Revision ID: 3ea02b2a773e
Revises: start_neutron_vpnaas
Create Date: 2015-02-10 17:51:10.752504
"""
# revision identifiers, used by Alembic.
revision = '3ea02b2a773e'
down_revision = 'start_neutron_vpnaas'
from alembic import op
TABLES = ['ipsecpolicies', 'ikepolicies', 'ipsec_site_connections',
'vpnservices']
def upgrade():
for table in TABLES:
op.create_index(op.f('ix_%s_tenant_id' % table),
table, ['tenant_id'], unique=False)
neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/__init__.py 0000664 0005670 0005671 00000001216 12701407726 031227 0 ustar jenkins jenkins 0000000 0000000 # Copyright 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
VPNAAS_VERSION_TABLE = 'alembic_version_vpnaas'
neutron-vpnaas-8.0.0/neutron_vpnaas/db/migration/alembic_migrations/script.py.mako 0000664 0005670 0005671 00000002035 12701407726 031722 0 ustar jenkins jenkins 0000000 0000000 # Copyright ${create_date.year}